diff --git a/.github/workflows/submit.yml b/.github/workflows/submit.yml
index c22f840b410..01e4c511379 100644
--- a/.github/workflows/submit.yml
+++ b/.github/workflows/submit.yml
@@ -1365,8 +1365,8 @@ jobs:
with:
name: transient_jdk-macos-x64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }}
path: |
- jdk/build/macos-x64/bundles/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }}.tar.gz
- jdk/build/macos-x64/bundles/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin-tests${{ matrix.artifact }}.tar.gz
+ jdk/build/macos-x64/bundles/jdk-${{ env.JDK_VERSION }}-internal+0_macos-x64_bin${{ matrix.artifact }}.tar.gz
+ jdk/build/macos-x64/bundles/jdk-${{ env.JDK_VERSION }}-internal+0_macos-x64_bin-tests${{ matrix.artifact }}.tar.gz
macos_aarch64_build:
name: macOS aarch64
@@ -1468,8 +1468,8 @@ jobs:
with:
name: transient_jdk-macos-aarch64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }}
path: |
- jdk/build/macos-aarch64/bundles/jdk-${{ env.JDK_VERSION }}-internal+0_osx-aarch64_bin${{ matrix.artifact }}.tar.gz
- jdk/build/macos-aarch64/bundles/jdk-${{ env.JDK_VERSION }}-internal+0_osx-aarch64_bin-tests${{ matrix.artifact }}.tar.gz
+ jdk/build/macos-aarch64/bundles/jdk-${{ env.JDK_VERSION }}-internal+0_macos-aarch64_bin${{ matrix.artifact }}.tar.gz
+ jdk/build/macos-aarch64/bundles/jdk-${{ env.JDK_VERSION }}-internal+0_macos-aarch64_bin-tests${{ matrix.artifact }}.tar.gz
macos_x64_test:
@@ -1576,13 +1576,13 @@ jobs:
- name: Unpack jdk
run: |
- mkdir -p "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }}"
- tar -xf "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }}.tar.gz" -C "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }}"
+ mkdir -p "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_macos-x64_bin${{ matrix.artifact }}"
+ tar -xf "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_macos-x64_bin${{ matrix.artifact }}.tar.gz" -C "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_macos-x64_bin${{ matrix.artifact }}"
- name: Unpack tests
run: |
- mkdir -p "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin-tests${{ matrix.artifact }}"
- tar -xf "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin-tests${{ matrix.artifact }}.tar.gz" -C "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin-tests${{ matrix.artifact }}"
+ mkdir -p "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_macos-x64_bin-tests${{ matrix.artifact }}"
+ tar -xf "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_macos-x64_bin-tests${{ matrix.artifact }}.tar.gz" -C "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_macos-x64_bin-tests${{ matrix.artifact }}"
- name: Install dependencies
run: brew install make
@@ -1592,13 +1592,13 @@ jobs:
- name: Find root of jdk image dir
run: |
- imageroot=`find ${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }} -name release -type f`
+ imageroot=`find ${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_macos-x64_bin${{ matrix.artifact }} -name release -type f`
echo "imageroot=`dirname ${imageroot}`" >> $GITHUB_ENV
- name: Run tests
run: >
JDK_IMAGE_DIR=${{ env.imageroot }}
- TEST_IMAGE_DIR=${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin-tests${{ matrix.artifact }}
+ TEST_IMAGE_DIR=${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_macos-x64_bin-tests${{ matrix.artifact }}
BOOT_JDK=${HOME}/bootjdk/${BOOT_JDK_VERSION}/Contents/Home
JT_HOME=${HOME}/jtreg
gmake test-prebuilt
diff --git a/doc/building.html b/doc/building.html
index 4d5cdf67c3a..71c3710c9ae 100644
--- a/doc/building.html
+++ b/doc/building.html
@@ -76,7 +76,7 @@
Building the JDK
Specifying the Target Platform
Toolchain Considerations
Native Libraries
-Creating And Using Sysroots With qemu-deboostrap
+Cross compiling with Debian sysroots
Building for ARM/aarch64
Building for musl
Verifying the Build
@@ -320,6 +320,7 @@ FreeType
To install on an apt-based Linux, try running sudo apt-get install libfreetype6-dev
.
To install on an rpm-based Linux, try running sudo yum install freetype-devel
.
To install on Alpine Linux, try running sudo apk add freetype-dev
.
+To install on macOS, try running brew install freetype
.
Use --with-freetype-include=<path>
and --with-freetype-lib=<path>
if configure
does not automatically locate the platform FreeType files.
CUPS
@@ -628,7 +629,7 @@ X11
cp: cannot stat `arm-linux-gnueabihf/libXt.so': No such file or directory
If the X11 libraries are not properly detected by configure
, you can point them out by --with-x
.
-Creating And Using Sysroots With qemu-deboostrap
+Cross compiling with Debian sysroots
Fortunately, you can create sysroots for foreign architectures with tools provided by your OS. On Debian/Ubuntu systems, one could use qemu-deboostrap
to create the target system chroot, which would have the native libraries and headers specific to that target system. After that, we can use the cross-compiler on the build system, pointing into chroot to get the build dependencies right. This allows building for foreign architectures with native compilation speed.
For example, cross-compiling to AArch64 from x86_64 could be done like this:
@@ -638,7 +639,7 @@ Creating And Using Sys
sudo qemu-debootstrap \
--arch=arm64 \
--verbose \
- --include=fakeroot,symlinks,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxrandr-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng-dev \
+ --include=fakeroot,symlinks,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxrandr-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng-dev,libffi-dev \
--resolve-deps \
buster \
~/sysroot-arm64 \
@@ -646,67 +647,125 @@ Creating And Using Sys
Make sure the symlinks inside the newly created chroot point to proper locations:
sudo chroot ~/sysroot-arm64 symlinks -cr .
Configure and build with newly created chroot as sysroot/toolchain-path:
-CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ sh ./configure \
- --openjdk-target=aarch64-linux-gnu \
- --with-sysroot=~/sysroot-arm64 \
- --with-toolchain-path=~/sysroot-arm64 \
- --with-freetype-lib=~/sysroot-arm64/usr/lib/aarch64-linux-gnu/ \
- --with-freetype-include=~/sysroot-arm64/usr/include/freetype2/ \
- --x-libraries=~/sysroot-arm64/usr/lib/aarch64-linux-gnu/
+sh ./configure \
+ --openjdk-target=aarch64-linux-gnu \
+ --with-sysroot=~/sysroot-arm64
make images
ls build/linux-aarch64-server-release/
The build does not create new files in that chroot, so it can be reused for multiple builds without additional cleanup.
+The build system should automatically detect the toolchain paths and dependencies, but sometimes it might require a little nudge with:
+
+Native compilers: override CC
or CXX
for ./configure
+Freetype lib location: override --with-freetype-lib
, for example ${sysroot}/usr/lib/${target}/
+Freetype includes location: override --with-freetype-include
for example ${sysroot}/usr/include/freetype2/
+X11 libraries location: override --x-libraries
, for example ${sysroot}/usr/lib/${target}/
+
Architectures that are known to successfully cross-compile like this are:
x86 |
-default |
-default |
+buster |
i386 |
i386-linux-gnu |
+(all) |
-armhf |
-gcc-arm-linux-gnueabihf |
-g++-arm-linux-gnueabihf |
+arm |
+buster |
armhf |
arm-linux-gnueabihf |
+(all) |
aarch64 |
-gcc-aarch64-linux-gnu |
-g++-aarch64-linux-gnu |
+buster |
arm64 |
aarch64-linux-gnu |
+(all) |
-ppc64el |
-gcc-powerpc64le-linux-gnu |
-g++-powerpc64le-linux-gnu |
+ppc64le |
+buster |
ppc64el |
powerpc64le-linux-gnu |
+(all) |
s390x |
-gcc-s390x-linux-gnu |
-g++-s390x-linux-gnu |
+buster |
s390x |
s390x-linux-gnu |
+(all) |
+
+
+mipsle |
+buster |
+mipsel |
+mipsel-linux-gnu |
+zero |
+
+
+mips64le |
+buster |
+mips64el |
+mips64el-linux-gnueabi64 |
+zero |
+
+
+armel |
+buster |
+arm |
+arm-linux-gnueabi |
+zero |
+
+
+ppc |
+sid |
+powerpc |
+powerpc-linux-gnu |
+zero |
+
+
+ppc64be |
+sid |
+ppc64 |
+powerpc64-linux-gnu |
+(all) |
+
+
+m68k |
+sid |
+m68k |
+m68k-linux-gnu |
+zero |
+
+
+alpha |
+sid |
+alpha |
+alpha-linux-gnu |
+zero |
+
+
+sh4 |
+sid |
+sh4 |
+sh4-linux-gnu |
+zero |
-Additional architectures might be supported by Debian/Ubuntu Ports.
Building for ARM/aarch64
A common cross-compilation target is the ARM CPU. When building for ARM, it is useful to set the ABI profile. A number of pre-defined ABI profiles are available using --with-abi-profile
: arm-vfp-sflt, arm-vfp-hflt, arm-sflt, armv5-vfp-sflt, armv6-vfp-hflt. Note that soft-float ABIs are no longer properly supported by the JDK.
Building for musl
diff --git a/doc/building.md b/doc/building.md
index 69b7fe640e8..2f9a0026e28 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -454,6 +454,7 @@ rather than bundling the JDK's own copy.
* To install on an rpm-based Linux, try running `sudo yum install
freetype-devel`.
* To install on Alpine Linux, try running `sudo apk add freetype-dev`.
+ * To install on macOS, try running `brew install freetype`.
Use `--with-freetype-include=` and `--with-freetype-lib=`
if `configure` does not automatically locate the platform FreeType files.
@@ -1089,7 +1090,7 @@ Note that X11 is needed even if you only want to build a headless JDK.
* If the X11 libraries are not properly detected by `configure`, you can
point them out by `--with-x`.
-### Creating And Using Sysroots With qemu-deboostrap
+### Cross compiling with Debian sysroots
Fortunately, you can create sysroots for foreign architectures with tools
provided by your OS. On Debian/Ubuntu systems, one could use `qemu-deboostrap` to
@@ -1110,7 +1111,7 @@ For example, cross-compiling to AArch64 from x86_64 could be done like this:
sudo qemu-debootstrap \
--arch=arm64 \
--verbose \
- --include=fakeroot,symlinks,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxrandr-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng-dev \
+ --include=fakeroot,symlinks,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxrandr-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng-dev,libffi-dev \
--resolve-deps \
buster \
~/sysroot-arm64 \
@@ -1124,13 +1125,9 @@ For example, cross-compiling to AArch64 from x86_64 could be done like this:
* Configure and build with newly created chroot as sysroot/toolchain-path:
```
- CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ sh ./configure \
- --openjdk-target=aarch64-linux-gnu \
- --with-sysroot=~/sysroot-arm64 \
- --with-toolchain-path=~/sysroot-arm64 \
- --with-freetype-lib=~/sysroot-arm64/usr/lib/aarch64-linux-gnu/ \
- --with-freetype-include=~/sysroot-arm64/usr/include/freetype2/ \
- --x-libraries=~/sysroot-arm64/usr/lib/aarch64-linux-gnu/
+ sh ./configure \
+ --openjdk-target=aarch64-linux-gnu \
+ --with-sysroot=~/sysroot-arm64
make images
ls build/linux-aarch64-server-release/
```
@@ -1138,17 +1135,34 @@ For example, cross-compiling to AArch64 from x86_64 could be done like this:
The build does not create new files in that chroot, so it can be reused for multiple builds
without additional cleanup.
-Architectures that are known to successfully cross-compile like this are:
+The build system should automatically detect the toolchain paths and dependencies, but sometimes
+it might require a little nudge with:
+
+ * Native compilers: override `CC` or `CXX` for `./configure`
+
+ * Freetype lib location: override `--with-freetype-lib`, for example `${sysroot}/usr/lib/${target}/`
- Target `CC` `CXX` `--arch=...` `--openjdk-target=...`
- ------------ ------------------------- --------------------------- ------------- -----------------------
- x86 default default i386 i386-linux-gnu
- armhf gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf armhf arm-linux-gnueabihf
- aarch64 gcc-aarch64-linux-gnu g++-aarch64-linux-gnu arm64 aarch64-linux-gnu
- ppc64el gcc-powerpc64le-linux-gnu g++-powerpc64le-linux-gnu ppc64el powerpc64le-linux-gnu
- s390x gcc-s390x-linux-gnu g++-s390x-linux-gnu s390x s390x-linux-gnu
+ * Freetype includes location: override `--with-freetype-include` for example `${sysroot}/usr/include/freetype2/`
+
+ * X11 libraries location: override `--x-libraries`, for example `${sysroot}/usr/lib/${target}/`
+
+Architectures that are known to successfully cross-compile like this are:
-Additional architectures might be supported by Debian/Ubuntu Ports.
+ Target Debian tree Debian arch `--openjdk-target=...` `--with-jvm-variants=...`
+ ------------ ------------ ------------- ------------------------ --------------
+ x86 buster i386 i386-linux-gnu (all)
+ arm buster armhf arm-linux-gnueabihf (all)
+ aarch64 buster arm64 aarch64-linux-gnu (all)
+ ppc64le buster ppc64el powerpc64le-linux-gnu (all)
+ s390x buster s390x s390x-linux-gnu (all)
+ mipsle buster mipsel mipsel-linux-gnu zero
+ mips64le buster mips64el mips64el-linux-gnueabi64 zero
+ armel buster arm arm-linux-gnueabi zero
+ ppc sid powerpc powerpc-linux-gnu zero
+ ppc64be sid ppc64 powerpc64-linux-gnu (all)
+ m68k sid m68k m68k-linux-gnu zero
+ alpha sid alpha alpha-linux-gnu zero
+ sh4 sid sh4 sh4-linux-gnu zero
### Building for ARM/aarch64
diff --git a/make/CompileToolsHotspot.gmk b/make/CompileToolsHotspot.gmk
index 1603ef93114..3fd3e5e8b88 100644
--- a/make/CompileToolsHotspot.gmk
+++ b/make/CompileToolsHotspot.gmk
@@ -52,94 +52,6 @@ $(eval $(call SetupJavaCompilation, BUILD_TOOLS_HOTSPOT, \
TARGETS += $(BUILD_TOOLS_HOTSPOT)
-################################################################################
-# Graal build tools
-ifeq ($(INCLUDE_GRAAL), true)
- VM_CI_SRC_DIR := $(TOPDIR)/src/jdk.internal.vm.ci/share/classes
-
- SRC_DIR := $(TOPDIR)/src/jdk.internal.vm.compiler/share/classes
-
- ##############################################################################
- # Compile the annotation processors
- $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
- TARGET_RELEASE := $(TARGET_RELEASE_BOOTJDK), \
- SRC := \
- $(SRC_DIR)/org.graalvm.compiler.processor/src \
- $(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
- , \
- EXCLUDE_FILES := $(EXCLUDE_FILES), \
- BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.match.processor, \
- JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.match.processor.jar, \
- DISABLED_WARNINGS := options, \
- ))
-
- TARGETS += $(BUILD_VM_COMPILER_MATCH_PROCESSOR)
-
- ##############################################################################
-
- $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_NODEINFO_PROCESSOR, \
- TARGET_RELEASE := $(TARGET_RELEASE_BOOTJDK), \
- SRC := \
- $(SRC_DIR)/org.graalvm.compiler.processor/src \
- $(SRC_DIR)/org.graalvm.compiler.nodeinfo.processor/src \
- , \
- BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.nodeinfo.processor, \
- JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.nodeinfo.processor.jar, \
- DISABLED_WARNINGS := options, \
- ))
-
- TARGETS += $(BUILD_VM_COMPILER_NODEINFO_PROCESSOR)
-
- ##############################################################################
-
- $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_OPTIONS_PROCESSOR, \
- TARGET_RELEASE := $(TARGET_RELEASE_BOOTJDK), \
- DISABLED_WARNINGS := options, \
- SRC := \
- $(SRC_DIR)/org.graalvm.compiler.processor/src \
- $(SRC_DIR)/org.graalvm.compiler.options.processor/src \
- , \
- BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \
- JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \
- ))
-
- TARGETS += $(BUILD_VM_COMPILER_OPTIONS_PROCESSOR)
-
- ##############################################################################
-
- $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_PROCESSOR, \
- TARGET_RELEASE := $(TARGET_RELEASE_BOOTJDK), \
- SRC := \
- $(SRC_DIR)/org.graalvm.compiler.processor/src \
- $(SRC_DIR)/org.graalvm.compiler.replacements.processor/src \
- , \
- EXCLUDE_FILES := $(EXCLUDE_FILES), \
- BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier, \
- JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier.jar, \
- DISABLED_WARNINGS := options, \
- ))
-
- TARGETS += $(BUILD_VM_COMPILER_REPLACEMENTS_PROCESSOR)
-
- ##############################################################################
-
- $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_SERVICEPROVIDER_PROCESSOR, \
- TARGET_RELEASE := $(TARGET_RELEASE_BOOTJDK), \
- SRC := \
- $(SRC_DIR)/org.graalvm.compiler.processor/src \
- $(SRC_DIR)/org.graalvm.compiler.serviceprovider.processor/src \
- , \
- EXCLUDE_FILES := $(EXCLUDE_FILES), \
- BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.serviceprovider.processor, \
- JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.serviceprovider.processor.jar, \
- DISABLED_WARNINGS := options, \
- ))
-
- TARGETS += $(BUILD_VM_COMPILER_SERVICEPROVIDER_PROCESSOR)
-
- ##############################################################################
-endif
-
all: $(TARGETS)
.PHONY: all
diff --git a/make/Main.gmk b/make/Main.gmk
index 687ba36c4c4..a6b606e1b45 100644
--- a/make/Main.gmk
+++ b/make/Main.gmk
@@ -662,18 +662,6 @@ $(eval $(call SetupTarget, test-image-libtest-jtreg-native, \
DEPS := build-test-libtest-jtreg-native, \
))
-$(eval $(call SetupTarget, build-test-hotspot-jtreg-graal, \
- MAKEFILE := test/JtregGraalUnit, \
- TARGET := build-test-hotspot-jtreg-graal, \
- DEPS := exploded-image, \
-))
-
-$(eval $(call SetupTarget, test-image-hotspot-jtreg-graal, \
- MAKEFILE := test/JtregGraalUnit, \
- TARGET := test-image-hotspot-jtreg-graal, \
- DEPS := build-test-hotspot-jtreg-graal, \
-))
-
ifneq ($(GTEST_FRAMEWORK_SRC), )
$(eval $(call SetupTarget, test-image-hotspot-gtest, \
MAKEFILE := hotspot/test/GtestImage, \
@@ -877,18 +865,6 @@ else
# copied and processed.
java.desktop-gensrc-src: java.base-gensrc java.base-copy
- # The annotation processing for jdk.internal.vm.compiler
- # and jdk.internal.vm.compiler.management needs classes from the current JDK.
- jdk.internal.vm.compiler-gensrc-src: $(addsuffix -java, \
- $(call FindTransitiveDepsForModule, jdk.internal.vm.compiler))
- jdk.internal.vm.compiler.management-gensrc-src: $(addsuffix -java, \
- $(call FindTransitiveDepsForModule, jdk.internal.vm.compiler.management))
-
- # For these modules, the gensrc step is generating a module-info.java.extra
- # file to be processed by the gensrc-moduleinfo target.
- jdk.internal.vm.compiler-gensrc-moduleinfo: jdk.internal.vm.compiler-gensrc-src
- jdk.internal.vm.compiler.management-gensrc-moduleinfo: jdk.internal.vm.compiler.management-gensrc-src
-
jdk.jdeps-gendata: java
# The ct.sym generation uses all the moduleinfos as input
@@ -1176,10 +1152,6 @@ else
ifneq ($(GTEST_FRAMEWORK_SRC), )
test-image: test-image-hotspot-gtest
endif
-
- ifeq ($(INCLUDE_GRAAL), true)
- test-image: test-image-hotspot-jtreg-graal
- endif
endif
ifeq ($(BUILD_FAILURE_HANDLER), true)
diff --git a/make/RunTests.gmk b/make/RunTests.gmk
index 4eeef218b08..546a85f1659 100644
--- a/make/RunTests.gmk
+++ b/make/RunTests.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@ endif
$(eval $(call ParseKeywordVariable, TEST_OPTS, \
SINGLE_KEYWORDS := JOBS TIMEOUT_FACTOR JCOV JCOV_DIFF_CHANGESET, \
- STRING_KEYWORDS := VM_OPTIONS JAVA_OPTIONS AOT_MODULES, \
+ STRING_KEYWORDS := VM_OPTIONS JAVA_OPTIONS, \
))
# Helper function to propagate TEST_OPTS values.
@@ -134,96 +134,6 @@ ifeq ($(GCOV_ENABLED), true)
JTREG_COV_OPTIONS += -e:GCOV_PREFIX="$(GCOV_OUTPUT_DIR)"
endif
-################################################################################
-# Optionally create AOT libraries for specified modules before running tests.
-# Note, this could not be done during JDK build time.
-################################################################################
-# Parameter 1 is the name of the rule.
-#
-# Remaining parameters are named arguments.
-# MODULE The module to generate a library for
-# BIN Output directory in which to put the library
-# VM_OPTIONS List of JVM arguments to use when creating library
-# OPTIONS_VAR Name of variable to put AOT java options in
-# PREREQS_VAR Name of variable to put all AOT prerequisite rule targets in
-# for test rules to depend on
-#
-SetupAotModule = $(NamedParamsMacroTemplate)
-define SetupAotModuleBody
- $1_AOT_LIB := $$($1_BIN)/$$(call SHARED_LIBRARY,$$($1_MODULE))
- $1_AOT_CCLIST := $$(wildcard $$(TOPDIR)/test/hotspot/jtreg/compiler/aot/scripts/$$($1_MODULE)-list.txt)
-
- # Create jaotc flags.
- # VM flags which don't affect AOT code generation are filtered out:
- # -Xcomp, -XX:+-TieredCompilation
- $1_JAOTC_OPTS := \
- -J-Xmx4g --info \
- $$(addprefix -J, $$(filter-out -Xcomp %TieredCompilation, $$($1_VM_OPTIONS))) \
- $$(addprefix --compile-commands$(SPACE), $$($1_AOT_CCLIST)) \
- --linker-path $$(LD_JAOTC) \
- #
-
- ifneq ($$(filter -ea, $$($1_VM_OPTIONS)), )
- $1_JAOTC_OPTS += --compile-with-assertions
- endif
-
- ifneq ($$(filter -XX:+VerifyOops, $$($1_VM_OPTIONS)), )
- $1_JAOTC_OPTS += -J-Dgraal.AOTVerifyOops=true
- endif
-
- $$($1_AOT_LIB): $$(JDK_UNDER_TEST)/release \
- $$(call DependOnVariable, $1_JAOTC_OPTS) \
- $$(call DependOnVariable, JDK_UNDER_TEST)
- $$(call LogWarn, Generating $$(patsubst $$(OUTPUTDIR)/%, %, $$@))
- $$(call MakeTargetDir)
- $$(call ExecuteWithLog, $$@, \
- $((COV_ENVIRONMENT) \
- $$(FIXPATH) $$(JDK_UNDER_TEST)/bin/jaotc \
- $$($1_JAOTC_OPTS) --output $$@ --module $$($1_MODULE) \
- )
- $$(call ExecuteWithLog, $$@.check, ( \
- $$(FIXPATH) $$(JDK_UNDER_TEST)/bin/java \
- $$($1_VM_OPTIONS) -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions \
- -XX:+PrintAOT -XX:+UseAOTStrictLoading \
- -XX:AOTLibrary=$$@ -version \
- > $$@.verify-aot \
- ))
-
- $1_AOT_OPTIONS += -XX:+UnlockExperimentalVMOptions
- $1_AOT_OPTIONS += -XX:AOTLibrary=$$($1_AOT_LIB)
- $1_AOT_TARGETS += $$($1_AOT_LIB)
-endef
-
-################################################################################
-# Optionally create AOT libraries before running tests.
-# Note, this could not be done during JDK build time.
-################################################################################
-# Parameter 1 is the name of the rule.
-#
-# Remaining parameters are named arguments.
-# MODULES The modules to generate a library for
-# VM_OPTIONS List of JVM arguments to use when creating libraries
-#
-# After calling this, the following variables are defined
-# $1_AOT_OPTIONS List of all java options needed to use the AOT libraries
-# $1_AOT_TARGETS List of all targets that the test rule will need to depend on
-#
-SetupAot = $(NamedParamsMacroTemplate)
-define SetupAotBody
- $$(info Running with AOTd libraries for $$($1_MODULES))
- # Put aot libraries in a separate directory so they are not deleted between
- # test runs and may be reused between make invocations.
- $$(foreach m, $$($1_MODULES), \
- $$(eval $$(call SetupAotModule, $1_$$m, \
- MODULE := $$m, \
- BIN := $$(TEST_SUPPORT_DIR)/aot/$1, \
- VM_OPTIONS := $$($1_VM_OPTIONS), \
- )) \
- $$(eval $1_AOT_OPTIONS += $$($1_$$m_AOT_OPTIONS)) \
- $$(eval $1_AOT_TARGETS += $$($1_$$m_AOT_TARGETS)) \
- )
-endef
-
################################################################################
# Setup global test running parameters
################################################################################
@@ -282,7 +192,6 @@ endif
$(eval $(call SetTestOpt,VM_OPTIONS,JTREG))
$(eval $(call SetTestOpt,JAVA_OPTIONS,JTREG))
-$(eval $(call SetTestOpt,AOT_MODULES,JTREG))
$(eval $(call SetTestOpt,JOBS,JTREG))
$(eval $(call SetTestOpt,TIMEOUT_FACTOR,JTREG))
@@ -293,7 +202,7 @@ $(eval $(call ParseKeywordVariable, JTREG, \
TEST_MODE ASSERT VERBOSE RETAIN MAX_MEM RUN_PROBLEM_LISTS \
RETRY_COUNT MAX_OUTPUT, \
STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS KEYWORDS \
- EXTRA_PROBLEM_LISTS AOT_MODULES LAUNCHER_OPTIONS, \
+ EXTRA_PROBLEM_LISTS LAUNCHER_OPTIONS, \
))
ifneq ($(JTREG), )
@@ -305,11 +214,10 @@ endif
$(eval $(call SetTestOpt,VM_OPTIONS,GTEST))
$(eval $(call SetTestOpt,JAVA_OPTIONS,GTEST))
-$(eval $(call SetTestOpt,AOT_MODULES,GTEST))
$(eval $(call ParseKeywordVariable, GTEST, \
SINGLE_KEYWORDS := REPEAT, \
- STRING_KEYWORDS := OPTIONS VM_OPTIONS JAVA_OPTIONS AOT_MODULES, \
+ STRING_KEYWORDS := OPTIONS VM_OPTIONS JAVA_OPTIONS, \
))
ifneq ($(GTEST), )
@@ -592,14 +500,7 @@ define SetupRunGtestTestBody
$1_GTEST_REPEAT :=--gtest_repeat=$$(GTEST_REPEAT)
endif
- ifneq ($$(GTEST_AOT_MODULES), )
- $$(eval $$(call SetupAot, $1, \
- MODULES := $$(GTEST_AOT_MODULES), \
- VM_OPTIONS := $$(GTEST_VM_OPTIONS) $$(GTEST_JAVA_OPTIONS), \
- ))
- endif
-
- run-test-$1: pre-run-test $$($1_AOT_TARGETS)
+ run-test-$1: pre-run-test
$$(call LogWarn)
$$(call LogWarn, Running test '$$($1_TEST)')
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
@@ -610,7 +511,7 @@ define SetupRunGtestTestBody
--gtest_output=xml:$$($1_TEST_RESULTS_DIR)/gtest.xml \
--gtest_catch_exceptions=0 \
$$($1_GTEST_REPEAT) $$(GTEST_OPTIONS) $$(GTEST_VM_OPTIONS) \
- $$(GTEST_JAVA_OPTIONS) $$($1_AOT_OPTIONS) \
+ $$(GTEST_JAVA_OPTIONS) \
> >($(TEE) $$($1_TEST_RESULTS_DIR)/gtest.txt) \
&& $$(ECHO) $$$$? > $$($1_EXITCODE) \
|| $$(ECHO) $$$$? > $$($1_EXITCODE) \
@@ -917,7 +818,6 @@ define SetupRunJtregTestBody
endif
$1_JTREG_BASIC_OPTIONS += -e:TEST_IMAGE_DIR=$(TEST_IMAGE_DIR)
- $1_JTREG_BASIC_OPTIONS += -e:TEST_IMAGE_GRAAL_DIR=$(TEST_IMAGE_DIR)/hotspot/jtreg/graal
ifneq ($$(JTREG_FAILURE_HANDLER_OPTIONS), )
$1_JTREG_LAUNCHER_OPTIONS += -Djava.library.path="$(JTREG_FAILURE_HANDLER_DIR)"
@@ -934,17 +834,6 @@ define SetupRunJtregTestBody
endif
endif
- ifneq ($$(JTREG_AOT_MODULES), )
- $$(eval $$(call SetupAot, $1, \
- MODULES := $$(JTREG_AOT_MODULES), \
- VM_OPTIONS := $$(JTREG_VM_OPTIONS) $$(JTREG_JAVA_OPTIONS), \
- ))
- endif
-
- ifneq ($$($1_AOT_OPTIONS), )
- $1_JTREG_BASIC_OPTIONS += -vmoptions:"$$($1_AOT_OPTIONS)"
- endif
-
clean-workdir-$1:
$$(RM) -r $$($1_TEST_SUPPORT_DIR)
@@ -979,7 +868,7 @@ define SetupRunJtregTestBody
done
endif
- run-test-$1: pre-run-test clean-workdir-$1 $$($1_AOT_TARGETS)
+ run-test-$1: pre-run-test clean-workdir-$1
$$(call LogWarn)
$$(call LogWarn, Running test '$$($1_TEST)')
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR) \
diff --git a/make/RunTestsPrebuilt.gmk b/make/RunTestsPrebuilt.gmk
index 1afc152ea89..85c6bae6399 100644
--- a/make/RunTestsPrebuilt.gmk
+++ b/make/RunTestsPrebuilt.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -222,25 +222,6 @@ ifeq ($(MEMORY_SIZE), )
MEMORY_SIZE := 1024
endif
-# Setup LD for AOT support
-ifneq ($(DEVKIT_HOME), )
- ifeq ($(OPENJDK_TARGET_OS), windows)
- LD_JAOTC := $(DEVKIT_HOME)/VC/bin/x64/link.exe
- LIBRARY_PREFIX :=
- SHARED_LIBRARY_SUFFIX := .dll
- else ifeq ($(OPENJDK_TARGET_OS), linux)
- LD_JAOTC := $(DEVKIT_HOME)/bin/ld
- LIBRARY_PREFIX := lib
- SHARED_LIBRARY_SUFFIX := .so
- else ifeq ($(OPENJDK_TARGET_OS), macosx)
- LD_JAOTC := $(DEVKIT_HOME)/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/ld
- LIBRARY_PREFIX := lib
- SHARED_LIBRARY_SUFFIX := .dylib
- endif
-else
- LD := ld
-endif
-
ifneq ($(wildcard $(JDK_IMAGE_DIR)/template.xml), )
TEST_OPTS_JCOV := true
JCOV_IMAGE_DIR := $(JDK_IMAGE_DIR)
@@ -286,9 +267,6 @@ $(call CreateNewSpec, $(NEW_SPEC), \
OPENJDK_TARGET_CPU_ENDIAN := $(OPENJDK_TARGET_CPU_ENDIAN), \
NUM_CORES := $(NUM_CORES), \
MEMORY_SIZE := $(MEMORY_SIZE), \
- LD_JAOTC := $(LD_JAOTC), \
- LIBRARY_PREFIX := $(LIBRARY_PREFIX), \
- SHARED_LIBRARY_SUFFIX := $(SHARED_LIBRARY_SUFFIX), \
include $(TOPDIR)/make/RunTestsPrebuiltSpec.gmk, \
TEST_OPTS_JCOV := $(TEST_OPTS_JCOV), \
$(CUSTOM_NEW_SPEC_LINE), \
diff --git a/make/autoconf/flags-other.m4 b/make/autoconf/flags-other.m4
index 9a294f58e8e..14bb3f5b52f 100644
--- a/make/autoconf/flags-other.m4
+++ b/make/autoconf/flags-other.m4
@@ -76,8 +76,16 @@ AC_DEFUN([FLAGS_SETUP_RCFLAGS],
# platform independent
AC_DEFUN([FLAGS_SETUP_ASFLAGS],
[
+ if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
+ # Force preprocessor to run, just to make sure
+ BASIC_ASFLAGS="-x assembler-with-cpp"
+ elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
+ BASIC_ASFLAGS="-nologo -c"
+ fi
+ AC_SUBST(BASIC_ASFLAGS)
+
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
- JVM_BASIC_ASFLAGS="-x assembler-with-cpp -mno-omit-leaf-frame-pointer -mstack-alignment=16"
+ JVM_BASIC_ASFLAGS="-mno-omit-leaf-frame-pointer -mstack-alignment=16"
# Fix linker warning.
# Code taken from make/autoconf/flags-cflags.m4 and adapted.
diff --git a/make/autoconf/flags.m4 b/make/autoconf/flags.m4
index e2f29d7930b..2171ca10ba2 100644
--- a/make/autoconf/flags.m4
+++ b/make/autoconf/flags.m4
@@ -374,15 +374,13 @@ AC_DEFUN([FLAGS_SETUP_TOOLCHAIN_CONTROL],
# Generate make dependency files
if test "x$TOOLCHAIN_TYPE" = xgcc; then
- C_FLAG_DEPS="-MMD -MF"
+ GENDEPS_FLAGS="-MMD -MF"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
- C_FLAG_DEPS="-MMD -MF"
+ GENDEPS_FLAGS="-MMD -MF"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
- C_FLAG_DEPS="-qmakedep=gcc -MF"
+ GENDEPS_FLAGS="-qmakedep=gcc -MF"
fi
- CXX_FLAG_DEPS="$C_FLAG_DEPS"
- AC_SUBST(C_FLAG_DEPS)
- AC_SUBST(CXX_FLAG_DEPS)
+ AC_SUBST(GENDEPS_FLAGS)
])
AC_DEFUN_ONCE([FLAGS_POST_TOOLCHAIN],
diff --git a/make/autoconf/jdk-options.m4 b/make/autoconf/jdk-options.m4
index 96b5d2df5bf..299f76bd1e6 100644
--- a/make/autoconf/jdk-options.m4
+++ b/make/autoconf/jdk-options.m4
@@ -240,11 +240,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
[AS_HELP_STRING([--with-native-debug-symbols],
[set the native debug symbol configuration (none, internal, external, zipped) @<:@varying@:>@])],
[
- if test "x$OPENJDK_TARGET_OS" = xaix; then
- if test "x$withval" = xexternal || test "x$withval" = xzipped; then
- AC_MSG_ERROR([AIX only supports the parameters 'none' and 'internal' for --with-native-debug-symbols])
- fi
- elif test "x$OPENJDK_TARGET_OS" = xwindows; then
+ if test "x$OPENJDK_TARGET_OS" = xwindows; then
if test "x$withval" = xinternal; then
AC_MSG_ERROR([Windows does not support the parameter 'internal' for --with-native-debug-symbols])
fi
@@ -254,12 +250,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
if test "x$STATIC_BUILD" = xtrue; then
with_native_debug_symbols="none"
else
- if test "x$OPENJDK_TARGET_OS" = xaix; then
- # AIX doesn't support 'external' so use 'internal' as default
- with_native_debug_symbols="internal"
- else
- with_native_debug_symbols="external"
- fi
+ with_native_debug_symbols="external"
fi
])
AC_MSG_RESULT([$with_native_debug_symbols])
diff --git a/make/autoconf/jvm-features.m4 b/make/autoconf/jvm-features.m4
index cb3ed433693..fd9ac811b53 100644
--- a/make/autoconf/jvm-features.m4
+++ b/make/autoconf/jvm-features.m4
@@ -44,7 +44,7 @@
m4_define(jvm_features_valid, m4_normalize( \
ifdef([custom_jvm_features_valid], custom_jvm_features_valid) \
\
- aot cds compiler1 compiler2 dtrace epsilongc g1gc graal jfr jni-check \
+ cds compiler1 compiler2 dtrace epsilongc g1gc jfr jni-check \
jvmci jvmti link-time-opt management minimal nmt opt-size parallelgc \
serialgc services shenandoahgc static-build vm-structs zero zgc \
))
@@ -55,14 +55,12 @@ m4_define(jvm_features_deprecated, m4_normalize(
))
# Feature descriptions
-m4_define(jvm_feature_desc_aot, [enable ahead of time compilation (AOT)])
m4_define(jvm_feature_desc_cds, [enable class data sharing (CDS)])
m4_define(jvm_feature_desc_compiler1, [enable hotspot compiler C1])
m4_define(jvm_feature_desc_compiler2, [enable hotspot compiler C2])
m4_define(jvm_feature_desc_dtrace, [enable dtrace support])
m4_define(jvm_feature_desc_epsilongc, [include the epsilon (no-op) garbage collector])
m4_define(jvm_feature_desc_g1gc, [include the G1 garbage collector])
-m4_define(jvm_feature_desc_graal, [enable Graal (jdk.internal.vm.compiler)])
m4_define(jvm_feature_desc_jfr, [enable JDK Flight Recorder (JFR)])
m4_define(jvm_feature_desc_jni_check, [enable -Xcheck:jni support])
m4_define(jvm_feature_desc_jvmci, [enable JVM Compiler Interface (JVMCI)])
@@ -94,7 +92,6 @@ AC_DEFUN_ONCE([JVM_FEATURES_PARSE_OPTIONS],
# For historical reasons, some jvm features have their own, shorter names.
# Keep those as aliases for the --enable-jvm-feature-* style arguments.
- UTIL_ALIASED_ARG_ENABLE(aot, --enable-jvm-feature-aot)
UTIL_ALIASED_ARG_ENABLE(cds, --enable-jvm-feature-cds)
UTIL_ALIASED_ARG_ENABLE(dtrace, --enable-jvm-feature-dtrace)
@@ -229,34 +226,6 @@ AC_DEFUN([JVM_FEATURES_CHECK_AVAILABILITY],
fi
])
-###############################################################################
-# Check if the feature 'aot' is available on this platform.
-#
-AC_DEFUN_ONCE([JVM_FEATURES_CHECK_AOT],
-[
- JVM_FEATURES_CHECK_AVAILABILITY(aot, [
- AC_MSG_CHECKING([if platform is supported by AOT])
- # AOT is only available where JVMCI is available since it requires JVMCI.
- if test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
- AC_MSG_RESULT([yes])
- elif test "x$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU" = "xlinux-aarch64"; then
- AC_MSG_RESULT([yes])
- else
- AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
- AVAILABLE=false
- fi
-
- AC_MSG_CHECKING([if AOT source code is present])
- if test -e "${TOPDIR}/src/jdk.internal.vm.compiler" && \
- test -e "${TOPDIR}/src/jdk.aot"; then
- AC_MSG_RESULT([yes])
- else
- AC_MSG_RESULT([no, missing src/jdk.internal.vm.compiler or src/jdk.aot])
- AVAILABLE=false
- fi
- ])
-])
-
###############################################################################
# Check if the feature 'cds' is available on this platform.
#
@@ -296,25 +265,6 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_DTRACE],
])
])
-###############################################################################
-# Check if the feature 'graal' is available on this platform.
-#
-AC_DEFUN_ONCE([JVM_FEATURES_CHECK_GRAAL],
-[
- JVM_FEATURES_CHECK_AVAILABILITY(graal, [
- AC_MSG_CHECKING([if platform is supported by Graal])
- # Graal is only available where JVMCI is available since it requires JVMCI.
- if test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
- AC_MSG_RESULT([yes])
- elif test "x$OPENJDK_TARGET_CPU" = "xaarch64"; then
- AC_MSG_RESULT([yes])
- else
- AC_MSG_RESULT([no, $OPENJDK_TARGET_CPU])
- AVAILABLE=false
- fi
- ])
-])
-
###############################################################################
# Check if the feature 'jfr' is available on this platform.
#
@@ -400,7 +350,8 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_ZGC],
fi
elif test "x$OPENJDK_TARGET_CPU" = "xaarch64"; then
if test "x$OPENJDK_TARGET_OS" = "xlinux" || \
- test "x$OPENJDK_TARGET_OS" = "xwindows"; then
+ test "x$OPENJDK_TARGET_OS" = "xwindows" || \
+ test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
@@ -439,10 +390,8 @@ AC_DEFUN_ONCE([JVM_FEATURES_PREPARE_PLATFORM],
# The checks below should add unavailable features to
# JVM_FEATURES_PLATFORM_UNAVAILABLE.
- JVM_FEATURES_CHECK_AOT
JVM_FEATURES_CHECK_CDS
JVM_FEATURES_CHECK_DTRACE
- JVM_FEATURES_CHECK_GRAAL
JVM_FEATURES_CHECK_JFR
JVM_FEATURES_CHECK_JVMCI
JVM_FEATURES_CHECK_SHENANDOAHGC
@@ -475,18 +424,18 @@ AC_DEFUN([JVM_FEATURES_PREPARE_VARIANT],
elif test "x$variant" = "xcore"; then
JVM_FEATURES_VARIANT_UNAVAILABLE="cds minimal zero"
elif test "x$variant" = "xzero"; then
- JVM_FEATURES_VARIANT_UNAVAILABLE="aot cds compiler1 compiler2 \
- graal jvmci minimal zgc"
+ JVM_FEATURES_VARIANT_UNAVAILABLE="cds compiler1 compiler2 \
+ jvmci minimal zgc"
else
JVM_FEATURES_VARIANT_UNAVAILABLE="minimal zero"
fi
# Check which features should be off by default for this JVM variant.
if test "x$variant" = "xclient"; then
- JVM_FEATURES_VARIANT_FILTER="aot compiler2 graal jvmci link-time-opt opt-size"
+ JVM_FEATURES_VARIANT_FILTER="compiler2 jvmci link-time-opt opt-size"
elif test "x$variant" = "xminimal"; then
- JVM_FEATURES_VARIANT_FILTER="aot cds compiler2 dtrace epsilongc g1gc \
- graal jfr jni-check jvmci jvmti management nmt parallelgc services \
+ JVM_FEATURES_VARIANT_FILTER="cds compiler2 dtrace epsilongc g1gc \
+ jfr jni-check jvmci jvmti management nmt parallelgc services \
shenandoahgc vm-structs zgc"
if test "x$OPENJDK_TARGET_CPU" = xarm ; then
JVM_FEATURES_VARIANT_FILTER="$JVM_FEATURES_VARIANT_FILTER opt-size"
@@ -496,7 +445,7 @@ AC_DEFUN([JVM_FEATURES_PREPARE_VARIANT],
link-time-opt"
fi
elif test "x$variant" = "xcore"; then
- JVM_FEATURES_VARIANT_FILTER="aot compiler1 compiler2 graal jvmci \
+ JVM_FEATURES_VARIANT_FILTER="compiler1 compiler2 jvmci \
link-time-opt opt-size"
elif test "x$variant" = "xzero"; then
JVM_FEATURES_VARIANT_FILTER="jfr link-time-opt opt-size"
@@ -572,15 +521,6 @@ AC_DEFUN([JVM_FEATURES_VERIFY],
[
variant=$1
- # Verify that dependencies are met for inter-feature relations.
- if JVM_FEATURES_IS_ACTIVE(aot) && ! JVM_FEATURES_IS_ACTIVE(graal); then
- AC_MSG_ERROR([Specified JVM feature 'aot' requires feature 'graal' for variant '$variant'])
- fi
-
- if JVM_FEATURES_IS_ACTIVE(graal) && ! JVM_FEATURES_IS_ACTIVE(jvmci); then
- AC_MSG_ERROR([Specified JVM feature 'graal' requires feature 'jvmci' for variant '$variant'])
- fi
-
if JVM_FEATURES_IS_ACTIVE(jvmci) && ! (JVM_FEATURES_IS_ACTIVE(compiler1) || \
JVM_FEATURES_IS_ACTIVE(compiler2)); then
AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1' for variant '$variant'])
@@ -596,15 +536,9 @@ AC_DEFUN([JVM_FEATURES_VERIFY],
# For backwards compatibility, disable a feature "globally" if one variant
# is missing the feature.
- if ! JVM_FEATURES_IS_ACTIVE(aot); then
- ENABLE_AOT="false"
- fi
if ! JVM_FEATURES_IS_ACTIVE(cds); then
ENABLE_CDS="false"
fi
- if ! JVM_FEATURES_IS_ACTIVE(graal); then
- INCLUDE_GRAAL="false"
- fi
if ! JVM_FEATURES_IS_ACTIVE(jvmci); then
INCLUDE_JVMCI="false"
fi
@@ -629,9 +563,7 @@ AC_DEFUN_ONCE([JVM_FEATURES_SETUP],
# For backwards compatibility, tentatively enable these features "globally",
# and disable them in JVM_FEATURES_VERIFY if a variant is found that are
# missing any of them.
- ENABLE_AOT="true"
ENABLE_CDS="true"
- INCLUDE_GRAAL="true"
INCLUDE_JVMCI="true"
for variant in $JVM_VARIANTS; do
@@ -668,8 +600,6 @@ AC_DEFUN_ONCE([JVM_FEATURES_SETUP],
AC_SUBST(JVM_FEATURES_zero)
AC_SUBST(JVM_FEATURES_custom)
- AC_SUBST(ENABLE_AOT)
- AC_SUBST(INCLUDE_GRAAL)
AC_SUBST(INCLUDE_JVMCI)
])
diff --git a/make/autoconf/lib-tests.m4 b/make/autoconf/lib-tests.m4
index ff41329afb5..c431341f4d8 100644
--- a/make/autoconf/lib-tests.m4
+++ b/make/autoconf/lib-tests.m4
@@ -23,38 +23,6 @@
# questions.
#
-###############################################################################
-#
-# Check for graalunit libs, needed for running graalunit tests.
-#
-AC_DEFUN_ONCE([LIB_TESTS_SETUP_GRAALUNIT],
-[
- AC_ARG_WITH(graalunit-lib, [AS_HELP_STRING([--with-graalunit-lib],
- [specify location of 3rd party libraries used by Graal unit tests])])
-
- GRAALUNIT_LIB=
- if test "x${with_graalunit_lib}" != x; then
- AC_MSG_CHECKING([for graalunit libs])
- if test "x${with_graalunit_lib}" = xno; then
- AC_MSG_RESULT([disabled, graalunit tests can not be run])
- elif test "x${with_graalunit_lib}" = xyes; then
- AC_MSG_RESULT([not specified])
- AC_MSG_ERROR([You must specify the path to 3rd party libraries used by Graal unit tests])
- else
- GRAALUNIT_LIB="${with_graalunit_lib}"
- if test ! -d "${GRAALUNIT_LIB}"; then
- AC_MSG_RESULT([no])
- AC_MSG_ERROR([Could not find graalunit 3rd party libraries as specified. (${with_graalunit_lib})])
- else
- AC_MSG_RESULT([$GRAALUNIT_LIB])
- fi
- fi
- fi
-
- UTIL_FIXUP_PATH([GRAALUNIT_LIB])
- AC_SUBST(GRAALUNIT_LIB)
-])
-
###############################################################################
#
# Setup and check for gtest framework source files
diff --git a/make/autoconf/libraries.m4 b/make/autoconf/libraries.m4
index a12459b5917..a65d91ee974 100644
--- a/make/autoconf/libraries.m4
+++ b/make/autoconf/libraries.m4
@@ -103,7 +103,6 @@ AC_DEFUN_ONCE([LIB_SETUP_LIBRARIES],
LIB_SETUP_LIBFFI
LIB_SETUP_BUNDLED_LIBS
LIB_SETUP_MISC_LIBS
- LIB_TESTS_SETUP_GRAALUNIT
LIB_TESTS_SETUP_GTEST
BASIC_JDKLIB_LIBS=""
diff --git a/make/autoconf/platform.m4 b/make/autoconf/platform.m4
index 181fdbf701d..2dd13d0d5e2 100644
--- a/make/autoconf/platform.m4
+++ b/make/autoconf/platform.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -480,9 +480,11 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER],
fi
# The new version string in JDK 9 also defined new naming of OS and ARCH for bundles
- # Macosx is osx and x86_64 is x64
+ # The macOS bundle name was revised in JDK 17
+ #
+ # macosx is macos and x86_64 is x64
if test "x$OPENJDK_$1_OS" = xmacosx; then
- OPENJDK_$1_OS_BUNDLE="osx"
+ OPENJDK_$1_OS_BUNDLE="macos"
else
OPENJDK_$1_OS_BUNDLE="$OPENJDK_TARGET_OS"
fi
diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in
index 3681688105e..b8ca7972f76 100644
--- a/make/autoconf/spec.gmk.in
+++ b/make/autoconf/spec.gmk.in
@@ -392,7 +392,6 @@ LIBFFI_LIBS:=@LIBFFI_LIBS@
LIBFFI_CFLAGS:=@LIBFFI_CFLAGS@
ENABLE_LIBFFI_BUNDLING:=@ENABLE_LIBFFI_BUNDLING@
LIBFFI_LIB_FILE:=@LIBFFI_LIB_FILE@
-GRAALUNIT_LIB := @GRAALUNIT_LIB@
FILE_MACRO_CFLAGS := @FILE_MACRO_CFLAGS@
STATIC_LIBS_CFLAGS := @STATIC_LIBS_CFLAGS@
@@ -477,8 +476,7 @@ CXX_O_FLAG_NORM:=@CXX_O_FLAG_NORM@
CXX_O_FLAG_NONE:=@CXX_O_FLAG_NONE@
CXX_O_FLAG_SIZE:=@CXX_O_FLAG_SIZE@
-C_FLAG_DEPS:=@C_FLAG_DEPS@
-CXX_FLAG_DEPS:=@CXX_FLAG_DEPS@
+GENDEPS_FLAGS := @GENDEPS_FLAGS@
DISABLE_WARNING_PREFIX := @DISABLE_WARNING_PREFIX@
CFLAGS_WARNINGS_ARE_ERRORS:=@CFLAGS_WARNINGS_ARE_ERRORS@
@@ -510,6 +508,8 @@ JVM_LDFLAGS := @JVM_LDFLAGS@
JVM_ASFLAGS := @JVM_ASFLAGS@
JVM_LIBS := @JVM_LIBS@
+BASIC_ASFLAGS := @BASIC_ASFLAGS@
+
# These flags might contain variables set by a custom extension that is included later.
EXTRA_CFLAGS = @EXTRA_CFLAGS@
EXTRA_CXXFLAGS = @EXTRA_CXXFLAGS@
@@ -523,9 +523,6 @@ CPP := @CPP@
# The linker can be gcc or ld on unix systems, or link.exe on windows systems.
LD := @LD@
-# Linker used by the jaotc tool for AOT compilation.
-LD_JAOTC:=@LD_JAOTC@
-
# Xcode SDK path
SDKROOT:=@SDKROOT@
@@ -765,7 +762,6 @@ TAR_INCLUDE_PARAM:=@TAR_INCLUDE_PARAM@
TAR_SUPPORTS_TRANSFORM:=@TAR_SUPPORTS_TRANSFORM@
# Build setup
-ENABLE_AOT:=@ENABLE_AOT@
USE_EXTERNAL_LIBJPEG:=@USE_EXTERNAL_LIBJPEG@
USE_EXTERNAL_LIBGIF:=@USE_EXTERNAL_LIBGIF@
USE_EXTERNAL_LIBZ:=@USE_EXTERNAL_LIBZ@
@@ -853,7 +849,6 @@ PNG_CFLAGS:=@PNG_CFLAGS@
#
INCLUDE_SA=@INCLUDE_SA@
-INCLUDE_GRAAL=@INCLUDE_GRAAL@
INCLUDE_JVMCI=@INCLUDE_JVMCI@
OS_VERSION_MAJOR:=@OS_VERSION_MAJOR@
diff --git a/make/autoconf/toolchain.m4 b/make/autoconf/toolchain.m4
index d9404d26eb4..78895888095 100644
--- a/make/autoconf/toolchain.m4
+++ b/make/autoconf/toolchain.m4
@@ -664,18 +664,12 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_CORE],
UTIL_LOOKUP_TOOLCHAIN_PROGS(LD, link)
TOOLCHAIN_VERIFY_LINK_BINARY(LD)
LDCXX="$LD"
- # jaotc being a windows program expects the linker to be supplied with exe suffix.but without
- # fixpath
- LD_JAOTC="${LD##$FIXPATH }"
else
# All other toolchains use the compiler to link.
LD="$CC"
LDCXX="$CXX"
- # jaotc expects 'ld' as the linker rather than the compiler.
- UTIL_LOOKUP_TOOLCHAIN_PROGS(LD_JAOTC, ld)
fi
AC_SUBST(LD)
- AC_SUBST(LD_JAOTC)
# FIXME: it should be CXXLD, according to standard (cf CXXCPP)
AC_SUBST(LDCXX)
@@ -696,8 +690,13 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_CORE],
if test "x$TOOLCHAIN_TYPE" != xmicrosoft; then
AS="$CC -c"
else
- # On windows, the assember is "ml.exe"
- UTIL_LOOKUP_TOOLCHAIN_PROGS(AS, ml)
+ if test "x$OPENJDK_TARGET_CPU_BITS" = "x64"; then
+ # On 64 bit windows, the assember is "ml64.exe"
+ UTIL_LOOKUP_TOOLCHAIN_PROGS(AS, ml64)
+ else
+ # otherwise, the assember is "ml.exe"
+ UTIL_LOOKUP_TOOLCHAIN_PROGS(AS, ml)
+ fi
fi
AC_SUBST(AS)
@@ -879,7 +878,13 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_BUILD_COMPILERS],
# On windows, the assember is "ml.exe". We currently don't need this so
# do not require.
- UTIL_LOOKUP_PROGS(BUILD_AS, ml, [$VS_PATH])
+ if test "x$OPENJDK_BUILD_CPU_BITS" = "x64"; then
+ # On 64 bit windows, the assember is "ml64.exe"
+ UTIL_LOOKUP_PROGS(BUILD_AS, ml64, [$VS_PATH])
+ else
+ # otherwise the assember is "ml.exe"
+ UTIL_LOOKUP_PROGS(BUILD_AS, ml, [$VS_PATH])
+ fi
# On windows, the ar tool is lib.exe (used to create static libraries).
# We currently don't need this so do not require.
diff --git a/make/common/Modules.gmk b/make/common/Modules.gmk
index 1bc71c316ef..2f05e39da82 100644
--- a/make/common/Modules.gmk
+++ b/make/common/Modules.gmk
@@ -64,16 +64,9 @@ ifeq ($(INCLUDE_JVMCI), false)
MODULES_FILTER += jdk.internal.vm.ci
endif
-# Filter out Graal specific modules if Graal is disabled
-ifeq ($(INCLUDE_GRAAL), false)
- MODULES_FILTER += jdk.internal.vm.compiler
- MODULES_FILTER += jdk.internal.vm.compiler.management
-endif
-
-# Filter out aot specific modules if aot is disabled
-ifeq ($(ENABLE_AOT), false)
- MODULES_FILTER += jdk.aot
-endif
+# Filter out Graal specific modules
+MODULES_FILTER += jdk.internal.vm.compiler
+MODULES_FILTER += jdk.internal.vm.compiler.management
# jpackage is only on windows, macosx, and linux
ifeq ($(call isTargetOs, windows macosx linux), false)
diff --git a/make/common/NativeCompilation.gmk b/make/common/NativeCompilation.gmk
index 794f7f1889d..1e2b1703d23 100644
--- a/make/common/NativeCompilation.gmk
+++ b/make/common/NativeCompilation.gmk
@@ -163,7 +163,7 @@ $(eval $(call DefineNativeToolchain, TOOLCHAIN_BUILD_LINK_CXX, \
################################################################################
# Extensions of files handled by this macro.
-NATIVE_SOURCE_EXTENSIONS := %.s %.S %.c %.cpp %.cc %.m %.mm
+NATIVE_SOURCE_EXTENSIONS := %.S %.c %.cpp %.cc %.m %.mm
# Replaces native source extensions with the object file extension in a string.
# Param 1: the string containing source file names with extensions
@@ -349,42 +349,36 @@ define SetupCompileNativeFileBody
$1_FLAGS := $(CFLAGS_CCACHE) $$($1_USE_PCH_FLAGS) $$($1_BASE_CFLAGS) \
$$($1_OPT_CFLAGS) $$($1_CFLAGS) -c
$1_COMPILER := $$($$($1_BASE)_CC)
- $1_DEP_FLAG := $(C_FLAG_DEPS)
else ifneq ($$(filter %.m, $$($1_FILENAME)), )
# Compile as an Objective-C file
$1_FLAGS := -x objective-c $(CFLAGS_CCACHE) $$($1_USE_PCH_FLAGS) \
$$($1_BASE_CFLAGS) $$($1_OPT_CFLAGS) $$($1_CFLAGS) -c
$1_COMPILER := $$($$($1_BASE)_CC)
- $1_DEP_FLAG := $(C_FLAG_DEPS)
- else ifneq ($$(filter %.s %.S, $$($1_FILENAME)), )
- # Compile as assembler file
- $1_FLAGS := $$($1_BASE_ASFLAGS)
+ else ifneq ($$(filter %.S, $$($1_FILENAME)), )
+ # Compile as preprocessed assembler file
+ $1_FLAGS := $(BASIC_ASFLAGS) $$($1_BASE_ASFLAGS)
$1_COMPILER := $(AS)
- $1_DEP_FLAG :=
else ifneq ($$(filter %.cpp %.cc %.mm, $$($1_FILENAME)), )
# Compile as a C++ or Objective-C++ file
$1_FLAGS := $(CFLAGS_CCACHE) $$($1_USE_PCH_FLAGS) $$($1_BASE_CXXFLAGS) \
$$($1_OPT_CXXFLAGS) $$($1_CXXFLAGS) -c
$1_COMPILER := $$($$($1_BASE)_CXX)
- $1_DEP_FLAG := $(CXX_FLAG_DEPS)
else
$$(error Internal error in NativeCompilation.gmk: no compiler for file $$($1_FILENAME))
endif
- ifeq ($$(filter %.s %.S, $$($1_FILENAME)), )
- # And this is the dependency file for this obj file.
- $1_DEPS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d,$$($1_OBJ))
- # The dependency target file lists all dependencies as empty targets to
- # avoid make error "No rule to make target" for removed files
- $1_DEPS_TARGETS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d.targets,$$($1_OBJ))
-
- # Only try to load individual dependency information files if the global
- # file hasn't been loaded (could happen if make was interrupted).
- ifneq ($$($$($1_BASE)_DEPS_FILE_LOADED), true)
- # Include previously generated dependency information. (if it exists)
- -include $$($1_DEPS_FILE)
- -include $$($1_DEPS_TARGETS_FILE)
- endif
+ # And this is the dependency file for this obj file.
+ $1_DEPS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d,$$($1_OBJ))
+ # The dependency target file lists all dependencies as empty targets to
+ # avoid make error "No rule to make target" for removed files
+ $1_DEPS_TARGETS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d.targets,$$($1_OBJ))
+
+ # Only try to load individual dependency information files if the global
+ # file hasn't been loaded (could happen if make was interrupted).
+ ifneq ($$($$($1_BASE)_DEPS_FILE_LOADED), true)
+ # Include previously generated dependency information. (if it exists)
+ -include $$($1_DEPS_FILE)
+ -include $$($1_DEPS_TARGETS_FILE)
endif
ifneq ($$(strip $$($1_CFLAGS) $$($1_CXXFLAGS) $$($1_OPTIMIZATION)), )
@@ -405,7 +399,7 @@ define SetupCompileNativeFileBody
$$(call MakeDir, $$(@D))
ifneq ($(TOOLCHAIN_TYPE), microsoft)
$$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
- $$($1_COMPILER) $$($1_DEP_FLAG) \
+ $$($1_COMPILER) $$(GENDEPS_FLAGS) \
$$(addsuffix .tmp, $$($1_DEPS_FILE)) \
$$($1_COMPILE_OPTIONS)))
ifneq ($$($1_DEPS_FILE), )
@@ -424,15 +418,25 @@ define SetupCompileNativeFileBody
# Keep as much as possible on one execution line for best performance
# on Windows. No need to save exit code from compilation since
# pipefail is always active on Windows.
- $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
- $$($1_COMPILER) -showIncludes $$($1_COMPILE_OPTIONS))) \
- | $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \
- -e "^$$($1_FILENAME)$$$$" || test "$$$$?" = "1" ; \
- $(ECHO) $$@: \\ > $$($1_DEPS_FILE) ; \
- $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_OBJ).log \
- | $(SORT) -u >> $$($1_DEPS_FILE) ; \
- $(ECHO) >> $$($1_DEPS_FILE) ; \
- $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE)
+ ifeq ($$(filter %.S, $$($1_FILENAME)), )
+ $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
+ $$($1_COMPILER) -showIncludes $$($1_COMPILE_OPTIONS))) \
+ | $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \
+ -e "^$$($1_FILENAME)$$$$" || test "$$$$?" = "1" ; \
+ $(ECHO) $$@: \\ > $$($1_DEPS_FILE) ; \
+ $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_OBJ).log \
+ | $(SORT) -u >> $$($1_DEPS_FILE) ; \
+ $(ECHO) >> $$($1_DEPS_FILE) ; \
+ $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE)
+ else
+ # For assembler calls just create empty dependency lists
+ $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
+ $$($1_COMPILER) $$($1_FLAGS) \
+ $(CC_OUT_OPTION)$$($1_OBJ) -Ta $$($1_SRC_FILE))) \
+ | $(TR) -d '\r' | $(GREP) -v -e "Assembling:" || test "$$$$?" = "1" ; \
+ $(ECHO) > $$($1_DEPS_FILE) ; \
+ $(ECHO) > $$($1_DEPS_TARGETS_FILE)
+ endif
endif
endif
endef
@@ -814,7 +818,7 @@ define SetupNativeCompilationBody
-include $$($1_PCH_DEPS_TARGETS_FILE)
$1_PCH_COMMAND := $$($1_CC) $$($1_CFLAGS) $$($1_EXTRA_CFLAGS) $$($1_SYSROOT_CFLAGS) \
- $$($1_OPT_CFLAGS) -x c++-header -c $(C_FLAG_DEPS) \
+ $$($1_OPT_CFLAGS) -x c++-header -c $(GENDEPS_FLAGS) \
$$(addsuffix .tmp, $$($1_PCH_DEPS_FILE))
$$($1_PCH_FILE): $$($1_PRECOMPILED_HEADER) $$($1_COMPILE_VARDEPS_FILE)
@@ -974,6 +978,13 @@ define SetupNativeCompilationBody
$(CD) $$($1_SYMBOLS_DIR) && \
$$($1_OBJCOPY) --add-gnu-debuglink=$$($1_DEBUGINFO_FILES) $$($1_TARGET)
+ else ifeq ($(call isTargetOs, aix), true)
+ # AIX does not provide the equivalent of OBJCOPY to extract debug symbols,
+ # so we copy the compiled object with symbols to the .debuginfo file, which
+ # happens prior to the STRIP_CMD on the original target object file.
+ $1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).debuginfo
+ $1_CREATE_DEBUGINFO_CMDS := $(CP) $$($1_TARGET) $$($1_DEBUGINFO_FILES)
+
else ifeq ($(call isTargetOs, macosx), true)
$1_DEBUGINFO_FILES := \
$$($1_SYMBOLS_DIR)/$$($1_BASENAME).dSYM/Contents/Info.plist \
diff --git a/make/conf/build-module-sets.conf b/make/conf/build-module-sets.conf
index cb96c3ede7a..06c879659d1 100644
--- a/make/conf/build-module-sets.conf
+++ b/make/conf/build-module-sets.conf
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -44,9 +44,6 @@ LANGTOOLS_MODULES= \
# These models require buildtools-hotspot to process for gensrc
HOTSPOT_MODULES= \
- jdk.aot \
jdk.hotspot.agent \
jdk.internal.vm.ci \
- jdk.internal.vm.compiler \
- jdk.internal.vm.compiler.management \
#
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index 8c1ced2e3f0..a5e44e91db7 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -205,7 +205,7 @@ var getJibProfiles = function (input) {
// Exclude list to use when Jib creates a source bundle
data.src_bundle_excludes = [
- "build", "{,**/}webrev*", "{,**/}.hg", "{,**/}JTwork", "{,**/}JTreport",
+ "build", "{,**/}webrev*", "{,**/}.hg", "{,**/}JTwork*", "{,**/}JTreport*",
"{,**/}.git"
];
// Include list to use when creating a minimal jib source bundle which
@@ -251,8 +251,6 @@ var getJibProfilesCommon = function (input, data) {
configure_args: concat("--enable-jtreg-failure-handler",
"--with-exclude-translations=de,es,fr,it,ko,pt_BR,sv,ca,tr,cs,sk,ja_JP_A,ja_JP_HA,ja_JP_HI,ja_JP_I,zh_TW,zh_HK",
"--disable-manpages",
- "--disable-jvm-feature-aot",
- "--disable-jvm-feature-graal",
"--disable-jvm-feature-shenandoahgc",
versionArgs(input, common))
};
@@ -396,8 +394,13 @@ var getJibProfilesCommon = function (input, data) {
};
};
- common.boot_jdk_version = "16";
- common.boot_jdk_build_number = "36";
+ if (input.build_os == 'macosx' && input.build_cpu == 'aarch64') {
+ common.boot_jdk_version = "17";
+ common.boot_jdk_build_number = "19";
+ } else {
+ common.boot_jdk_version = "16";
+ common.boot_jdk_build_number = "36";
+ }
common.boot_jdk_home = input.get("boot_jdk", "install_path") + "/jdk-"
+ common.boot_jdk_version
+ (input.build_os == "macosx" ? ".jdk/Contents/Home" : "");
@@ -669,11 +672,7 @@ var getJibProfilesProfiles = function (input, common, data) {
["--with-jcov-input-jdk=" + input.get(name + ".jdk", "home_path")]);
});
- //
// Define artifacts for profiles
- //
- // Macosx bundles are named osx
- // tar.gz.
var artifactData = {
"linux-x64": {
platform: "linux-x64",
@@ -682,11 +681,11 @@ var getJibProfilesProfiles = function (input, common, data) {
platform: "linux-x86",
},
"macosx-x64": {
- platform: "osx-x64",
+ platform: "macos-x64",
jdk_subdir: "jdk-" + data.version + ".jdk/Contents/Home",
},
"macosx-aarch64": {
- platform: "osx-aarch64",
+ platform: "macos-aarch64",
jdk_subdir: "jdk-" + data.version + ".jdk/Contents/Home",
},
"windows-x64": {
@@ -1080,27 +1079,15 @@ var getJibProfilesDependencies = function (input, common) {
boot_jdk_platform = "windows-" + input.build_cpu;
boot_jdk_ext = ".zip";
}
- var boot_jdk;
- if (boot_jdk_platform == 'osx-aarch64') {
- boot_jdk = {
- organization: common.organization,
- ext: "tar.gz",
- module: "jdk-macosx_aarch64",
- revision: "16+1.0-beta1",
- configure_args: "--with-boot-jdk=" + common.boot_jdk_home,
- environment_path: common.boot_jdk_home + "/bin"
- }
- } else {
- boot_jdk = {
- server: "jpg",
- product: "jdk",
- version: common.boot_jdk_version,
- build_number: common.boot_jdk_build_number,
- file: "bundles/" + boot_jdk_platform + "/jdk-" + common.boot_jdk_version + "_"
- + boot_jdk_platform + "_bin" + boot_jdk_ext,
- configure_args: "--with-boot-jdk=" + common.boot_jdk_home,
- environment_path: common.boot_jdk_home + "/bin"
- }
+ var boot_jdk = {
+ server: "jpg",
+ product: "jdk",
+ version: common.boot_jdk_version,
+ build_number: common.boot_jdk_build_number,
+ file: "bundles/" + boot_jdk_platform + "/jdk-" + common.boot_jdk_version + "_"
+ + boot_jdk_platform + "_bin" + boot_jdk_ext,
+ configure_args: "--with-boot-jdk=" + common.boot_jdk_home,
+ environment_path: common.boot_jdk_home + "/bin"
}
var makeBinDir = (input.build_os == "windows"
diff --git a/make/conf/module-loader-map.conf b/make/conf/module-loader-map.conf
index 1904c340d58..b096c238898 100644
--- a/make/conf/module-loader-map.conf
+++ b/make/conf/module-loader-map.conf
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -61,9 +61,6 @@ BOOT_MODULES= \
# should carefully be considered if it should be upgradeable or not.
UPGRADEABLE_PLATFORM_MODULES= \
java.compiler \
- jdk.aot \
- jdk.internal.vm.compiler \
- jdk.internal.vm.compiler.management \
#
PLATFORM_MODULES= \
diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk
index ec67795b7fd..1f16d0a9118 100644
--- a/make/hotspot/lib/JvmFeatures.gmk
+++ b/make/hotspot/lib/JvmFeatures.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -116,23 +116,10 @@ endif
ifneq ($(call check-jvm-feature, cds), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_CDS=0
JVM_EXCLUDE_FILES += \
- archiveBuilder.cpp \
- archiveUtils.cpp \
- classListParser.cpp \
classLoaderDataShared.cpp \
classLoaderExt.cpp \
- cppVtables.cpp \
- dumpAllocStats.cpp \
- dynamicArchive.cpp \
- filemap.cpp \
- heapShared.cpp \
- lambdaFormInvokers.cpp \
- metaspaceShared.cpp \
- metaspaceShared_$(HOTSPOT_TARGET_CPU).cpp \
- metaspaceShared_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
- sharedClassUtil.cpp \
- sharedPathsMiscInfo.cpp \
systemDictionaryShared.cpp
+ JVM_EXCLUDE_PATTERNS += cds/
endif
ifneq ($(call check-jvm-feature, nmt), true)
@@ -142,14 +129,6 @@ ifneq ($(call check-jvm-feature, nmt), true)
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp threadStackTracker.cpp
endif
-ifneq ($(call check-jvm-feature, aot), true)
- JVM_CFLAGS_FEATURES += -DINCLUDE_AOT=0
- JVM_EXCLUDE_FILES += \
- compiledIC_aot_x86_64.cpp compiledIC_aot_aarch64.cpp \
- compilerRuntime.cpp aotCodeHeap.cpp aotCompiledMethod.cpp \
- aotLoader.cpp compiledIC_aot.cpp
-endif
-
ifneq ($(call check-jvm-feature, g1gc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_G1GC=0
JVM_EXCLUDE_PATTERNS += gc/g1
diff --git a/make/ide/visualstudio/hotspot/CreateVSProject.gmk b/make/ide/visualstudio/hotspot/CreateVSProject.gmk
index f60f7c62489..3db28db63be 100644
--- a/make/ide/visualstudio/hotspot/CreateVSProject.gmk
+++ b/make/ide/visualstudio/hotspot/CreateVSProject.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -111,10 +111,8 @@ ifeq ($(call isTargetOs, windows), true)
-relativeSrcInclude hotspot \
-hidePath .hg \
-hidePath .jcheck \
- -hidePath jdk.aot \
-hidePath jdk.hotspot.agent \
-hidePath jdk.internal.vm.ci \
- -hidePath jdk.internal.vm.compiler \
-hidePath jdk.jfr \
-compiler VC10 \
-jdkTargetRoot $(call FixPath, $(JDK_OUTPUTDIR)) \
diff --git a/make/jdk/src/classes/build/tools/generatecharacter/GenerateCharacter.java b/make/jdk/src/classes/build/tools/generatecharacter/GenerateCharacter.java
index e6c8ff53ed8..ad690048e55 100644
--- a/make/jdk/src/classes/build/tools/generatecharacter/GenerateCharacter.java
+++ b/make/jdk/src/classes/build/tools/generatecharacter/GenerateCharacter.java
@@ -409,7 +409,7 @@ static long buildOne(int c, UnicodeSpec us, SpecialCaseMap[] specialMaps) {
// extract and record the uppercase letter / lowercase letter property into the
// maskOtherUppercase/-Lowercase bit so that Character.isLower|UpperCase
- // can use a one-step lookup (this property includes
+ // can use a one-step lookup
if (resultA == Character.UPPERCASE_LETTER) {
resultA |= maskOtherUppercase;
} else if (resultA == Character.LOWERCASE_LETTER) {
@@ -1897,7 +1897,6 @@ public static void main(String[] args) {
hex8(maxOffset));
}
}
- catch (FileNotFoundException e) { FAIL(e.toString()); }
catch (IOException e) { FAIL(e.toString()); }
catch (Throwable e) {
System.out.println("Unexpected exception:");
diff --git a/make/modules/java.desktop/lib/Awt2dLibraries.gmk b/make/modules/java.desktop/lib/Awt2dLibraries.gmk
index cf00b51b665..ff5fa00c720 100644
--- a/make/modules/java.desktop/lib/Awt2dLibraries.gmk
+++ b/make/modules/java.desktop/lib/Awt2dLibraries.gmk
@@ -339,7 +339,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJAVAJPEG, \
OPTIMIZATION := HIGHEST, \
CFLAGS := $(CFLAGS_JDKLIB), \
HEADERS_FROM_SRC := $(LIBJPEG_HEADERS_FROM_SRC), \
- DISABLED_WARNINGS_gcc := clobbered implicit-fallthrough shift-negative-value, \
+ DISABLED_WARNINGS_gcc := clobbered implicit-fallthrough shift-negative-value array-bounds, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := $(LIBJPEG_LIBS) $(JDKLIB_LIBS), \
@@ -465,7 +465,7 @@ else
HARFBUZZ_DISABLED_WARNINGS_gcc := type-limits missing-field-initializers strict-aliasing
HARFBUZZ_DISABLED_WARNINGS_CXX_gcc := reorder delete-non-virtual-dtor strict-overflow \
- maybe-uninitialized class-memaccess
+ maybe-uninitialized class-memaccess unused-result
HARFBUZZ_DISABLED_WARNINGS_clang := unused-value incompatible-pointer-types \
tautological-constant-out-of-range-compare int-to-pointer-cast \
undef missing-field-initializers range-loop-analysis \
diff --git a/make/modules/jdk.aot/Java.gmk b/make/modules/jdk.aot/Java.gmk
deleted file mode 100644
index af920f0411d..00000000000
--- a/make/modules/jdk.aot/Java.gmk
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-# Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation. Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-# -parameters provides method's parameters information in class file,
-# JVMCI compilers make use of that information for various sanity checks.
-# Don't use Indy strings concatenation to have good JAOTC startup performance.
-# The exports are needed since JVMCI is dynamically exported (see
-# jdk.vm.ci.services.internal.ReflectionAccessJDK::openJVMCITo).
-
-JAVAC_FLAGS += -parameters -XDstringConcat=inline \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.aarch64=jdk.internal.vm.compiler,jdk.aot \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.amd64=jdk.internal.vm.compiler,jdk.aot \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.code=jdk.internal.vm.compiler,jdk.aot \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.code.site=jdk.internal.vm.compiler,jdk.aot \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.code.stack=jdk.internal.vm.compiler,jdk.aot \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.common=jdk.internal.vm.compiler,jdk.aot \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot=jdk.internal.vm.compiler,jdk.aot \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=jdk.internal.vm.compiler,jdk.aot \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.amd64=jdk.internal.vm.compiler,jdk.aot \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.meta=jdk.internal.vm.compiler,jdk.aot \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.runtime=jdk.internal.vm.compiler,jdk.aot \
- #
-
-EXCLUDES += \
- jdk.tools.jaotc.test
- #
-
-## WORKAROUND jdk.aot source structure issue
-AOT_MODULESOURCEPATH := $(MODULESOURCEPATH) \
- $(subst /$(MODULE)/,/*/, $(filter-out %processor/src, \
- $(wildcard $(TOPDIR)/src/$(MODULE)/share/classes/*/src)))
-MODULESOURCEPATH := $(call PathList, $(AOT_MODULESOURCEPATH))
diff --git a/make/modules/jdk.aot/Launcher.gmk b/make/modules/jdk.aot/Launcher.gmk
deleted file mode 100644
index 5025ceaf7f1..00000000000
--- a/make/modules/jdk.aot/Launcher.gmk
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation. Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-include LauncherCommon.gmk
-
-# The JVMCI exports are needed since JVMCI is normally dynamically exported
-# (see jdk.vm.ci.services.internal.ReflectionAccessJDK::openJVMCITo).
-
-$(eval $(call SetupBuildLauncher, jaotc, \
- MAIN_CLASS := jdk.tools.jaotc.Main, \
- EXTRA_JAVA_ARGS := -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.aarch64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.amd64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.code=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.code.site=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.code.stack=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.common=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- , \
- JAVA_ARGS := --add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.amd64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.meta=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- --add-exports=jdk.internal.vm.ci/jdk.vm.ci.runtime=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
- -XX:+UnlockExperimentalVMOptions -XX:+UseAOT \
- -XX:+CalculateClassFingerprint \
- -Djvmci.UseProfilingInformation=false \
- -Dgraal.UseExceptionProbability=false \
- -Djvmci.Compiler=graal \
- --add-modules ALL-DEFAULT \
- , \
-))
diff --git a/make/modules/jdk.internal.vm.compiler.management/Gensrc.gmk b/make/modules/jdk.internal.vm.compiler.management/Gensrc.gmk
deleted file mode 100644
index 8a842997d76..00000000000
--- a/make/modules/jdk.internal.vm.compiler.management/Gensrc.gmk
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation. Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-GENSRC_DIR := $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)
-SRC_DIR := $(TOPDIR)/src/$(MODULE)/share/classes
-
-################################################################################
-
-PROC_SRC_SUBDIRS := \
- org.graalvm.compiler.hotspot.management \
- #
-
-PROC_SRC_DIRS := $(patsubst %, $(SRC_DIR)/%/src, $(PROC_SRC_SUBDIRS))
-
-PROC_SRCS := $(filter %.java, $(call FindFiles, $(PROC_SRC_DIRS)))
-
-ALL_SRC_DIRS := $(SRC_DIR) $(wildcard $(SRC_DIR)/*/src)
-SOURCEPATH := $(call PathList, $(ALL_SRC_DIRS))
-
-PROCESSOR_JARS := \
- $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.serviceprovider.processor.jar \
- #
-PROCESSOR_PATH := $(call PathList, $(PROCESSOR_JARS))
-
-$(GENSRC_DIR)/_gensrc_proc_done: $(PROC_SRCS) $(PROCESSOR_JARS)
- $(call MakeDir, $(@D))
- $(eval $(call ListPathsSafely,PROC_SRCS,$(@D)/_gensrc_proc_files))
- $(JAVA) $(NEW_JAVAC) \
- -XDignore.symbol.file \
- --upgrade-module-path $(JDK_OUTPUTDIR)/modules --system none \
- -sourcepath $(SOURCEPATH) \
- -implicit:none \
- -proc:only \
- -processorpath $(PROCESSOR_PATH) \
- -d $(GENSRC_DIR) \
- -s $(GENSRC_DIR) \
- @$(@D)/_gensrc_proc_files
- $(TOUCH) $@
-
-TARGETS += $(GENSRC_DIR)/_gensrc_proc_done
-
-################################################################################
-
-$(GENSRC_DIR)/module-info.java.extra: $(GENSRC_DIR)/_gensrc_proc_done
- $(ECHO) "" > $@;
- ($(CD) $(GENSRC_DIR)/META-INF/providers && \
- p=""; \
- impl=""; \
- for i in $$($(AWK) '$$0=FILENAME" "$$0' * | $(SORT) -k 2 | $(SED) 's/ .*//'); do \
- c=$$($(CAT) $$i | $(TR) -d '\n\r'); \
- if test x$$p != x$$c; then \
- if test x$$p != x; then \
- $(ECHO) " ;" >> $@; \
- fi; \
- $(ECHO) "provides $$c with" >> $@; \
- p=$$c; \
- impl=""; \
- fi; \
- if test x$$impl != x; then \
- $(ECHO) " , $$i" >> $@; \
- else \
- $(ECHO) " $$i" >> $@; \
- fi; \
- impl=$$i; \
- done); \
- $(ECHO) " ;" >> $@;
-
-TARGETS += $(GENSRC_DIR)/module-info.java.extra
diff --git a/make/modules/jdk.internal.vm.compiler/Gensrc.gmk b/make/modules/jdk.internal.vm.compiler/Gensrc.gmk
deleted file mode 100644
index d4479e96cc8..00000000000
--- a/make/modules/jdk.internal.vm.compiler/Gensrc.gmk
+++ /dev/null
@@ -1,150 +0,0 @@
-#
-# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation. Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-GENSRC_DIR := $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)
-SRC_DIR := $(TOPDIR)/src/$(MODULE)/share/classes
-
-################################################################################
-
-PROC_SRC_SUBDIRS := \
- org.graalvm.compiler.asm.amd64 \
- org.graalvm.compiler.code \
- org.graalvm.compiler.core \
- org.graalvm.compiler.core.aarch64 \
- org.graalvm.compiler.core.amd64 \
- org.graalvm.compiler.core.common \
- org.graalvm.compiler.debug \
- org.graalvm.compiler.hotspot \
- org.graalvm.compiler.hotspot.aarch64 \
- org.graalvm.compiler.hotspot.amd64 \
- org.graalvm.compiler.graph \
- org.graalvm.compiler.java \
- org.graalvm.compiler.lir \
- org.graalvm.compiler.lir.amd64 \
- org.graalvm.compiler.loop \
- org.graalvm.compiler.loop.phases \
- org.graalvm.compiler.nodes \
- org.graalvm.compiler.replacements \
- org.graalvm.compiler.replacements.aarch64 \
- org.graalvm.compiler.replacements.amd64 \
- org.graalvm.compiler.phases \
- org.graalvm.compiler.phases.common \
- org.graalvm.compiler.printer \
- org.graalvm.compiler.virtual \
- #
-
-PROC_SRC_DIRS := $(patsubst %, $(SRC_DIR)/%/src, $(PROC_SRC_SUBDIRS))
-
-PROC_SRCS := $(filter %.java, $(call FindFiles, $(PROC_SRC_DIRS)))
-
-ALL_SRC_DIRS := $(SRC_DIR) $(wildcard $(SRC_DIR)/*/src)
-SOURCEPATH := $(call PathList, $(ALL_SRC_DIRS))
-
-PROCESSOR_JARS := \
- $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.match.processor.jar \
- $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.nodeinfo.processor.jar \
- $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar \
- $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier.jar \
- $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.serviceprovider.processor.jar \
- #
-PROCESSOR_PATH := $(call PathList, $(PROCESSOR_JARS))
-
-ADD_EXPORTS := \
- --add-modules jdk.internal.vm.ci \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.aarch64=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.amd64=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.code=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.code.site=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.code.stack=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.common=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.amd64=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.events=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspotvmconfig=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.inittimer=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.meta=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.runtime=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.services=jdk.internal.vm.compiler \
- #
-
-$(GENSRC_DIR)/_gensrc_proc_done: $(PROC_SRCS) $(PROCESSOR_JARS)
- $(call MakeDir, $(@D))
- $(eval $(call ListPathsSafely,PROC_SRCS,$(@D)/_gensrc_proc_files))
- $(JAVA) $(NEW_JAVAC) \
- -XDignore.symbol.file \
- --upgrade-module-path $(JDK_OUTPUTDIR)/modules --system none \
- $(ADD_EXPORTS) \
- -sourcepath $(SOURCEPATH) \
- -implicit:none \
- -proc:only \
- -processorpath $(PROCESSOR_PATH) \
- -d $(GENSRC_DIR) \
- -s $(GENSRC_DIR) \
- @$(@D)/_gensrc_proc_files
- $(TOUCH) $@
-
-TARGETS += $(GENSRC_DIR)/_gensrc_proc_done
-
-################################################################################
-
-$(GENSRC_DIR)/module-info.java.extra: $(GENSRC_DIR)/_gensrc_proc_done
- $(ECHO) "" > $@;
- ($(CD) $(GENSRC_DIR)/META-INF/providers && \
- p=""; \
- impl=""; \
- for i in $$($(GREP) '^' * | $(SORT) -t ':' -k 2 | $(SED) 's/:.*//'); do \
- c=$$($(CAT) $$i | $(TR) -d '\n\r'); \
- if test x$$p != x$$c; then \
- if test x$$p != x; then \
- $(ECHO) " ;" >> $@; \
- fi; \
- $(ECHO) "provides $$c with" >> $@; \
- p=$$c; \
- impl=""; \
- fi; \
- if test x$$impl != x; then \
- $(ECHO) " , $$i" >> $@; \
- else \
- $(ECHO) " $$i" >> $@; \
- fi; \
- impl=$$i; \
- done); \
- $(ECHO) " ;" >> $@; \
- $(ECHO) "uses org.graalvm.compiler.options.OptionDescriptors;" >> $@; \
- $(ECHO) "provides org.graalvm.compiler.options.OptionDescriptors with" >> $@; \
- impl=""; \
- for i in $$($(FIND) $(GENSRC_DIR) -name '*_OptionDescriptors.java' | $(SORT)); do \
- c=$$($(ECHO) $$i | $(SED) 's:.*/jdk\.internal\.vm\.compiler/\(.*\)\.java:\1:' | $(TR) '/' '.'); \
- if test x$$impl != x; then \
- $(ECHO) " , $$c" >> $@; \
- else \
- $(ECHO) " $$c" >> $@; \
- fi; \
- impl=$$c; \
- done; \
- $(ECHO) " ;" >> $@;
-
-TARGETS += $(GENSRC_DIR)/module-info.java.extra
diff --git a/make/modules/jdk.internal.vm.compiler/Java.gmk b/make/modules/jdk.internal.vm.compiler/Java.gmk
deleted file mode 100644
index fd45f8170ca..00000000000
--- a/make/modules/jdk.internal.vm.compiler/Java.gmk
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation. Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-JAVAC_FLAGS += -parameters -XDstringConcat=inline \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.aarch64=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.amd64=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.code=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.code.site=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.code.stack=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.common=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.amd64=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.meta=jdk.internal.vm.compiler \
- --add-exports jdk.internal.vm.ci/jdk.vm.ci.runtime=jdk.internal.vm.compiler \
- #
-
-EXCLUDES += \
- jdk.internal.vm.compiler.collections.test \
- jdk.tools.jaotc.test \
- org.graalvm.compiler.api.directives.test \
- org.graalvm.compiler.api.test \
- org.graalvm.compiler.asm.aarch64.test \
- org.graalvm.compiler.asm.amd64.test \
- org.graalvm.compiler.asm.test \
- org.graalvm.compiler.core.aarch64.test \
- org.graalvm.compiler.core.amd64.test \
- org.graalvm.compiler.core.jdk9.test \
- org.graalvm.compiler.core.match.processor \
- org.graalvm.compiler.core.test \
- org.graalvm.compiler.debug.test \
- org.graalvm.compiler.graph.test \
- org.graalvm.compiler.hotspot.aarch64.test \
- org.graalvm.compiler.hotspot.amd64.test \
- org.graalvm.compiler.hotspot.jdk15.test \
- org.graalvm.compiler.hotspot.jdk9.test \
- org.graalvm.compiler.hotspot.lir.test \
- org.graalvm.compiler.hotspot.test \
- org.graalvm.compiler.jtt \
- org.graalvm.compiler.lir.jtt \
- org.graalvm.compiler.lir.test \
- org.graalvm.compiler.loop.test \
- org.graalvm.compiler.microbenchmarks \
- org.graalvm.compiler.nodeinfo.processor \
- org.graalvm.compiler.nodes.test \
- org.graalvm.compiler.options.processor \
- org.graalvm.compiler.options.test \
- org.graalvm.compiler.phases.common.test \
- org.graalvm.compiler.processor \
- org.graalvm.compiler.replacements.jdk10.test \
- org.graalvm.compiler.replacements.jdk12.test \
- org.graalvm.compiler.replacements.jdk9.test \
- org.graalvm.compiler.replacements.processor \
- org.graalvm.compiler.replacements.test \
- org.graalvm.compiler.serviceprovider.processor \
- org.graalvm.compiler.test \
- org.graalvm.compiler.virtual.bench \
- org.graalvm.micro.benchmarks \
- org.graalvm.util.test \
- #
-
-## WORKAROUND jdk.internal.vm.compiler source structure issue
-VM_COMPILER_MODULESOURCEPATH := $(MODULESOURCEPATH) \
- $(subst /$(MODULE)/,/*/, $(filter-out %processor/src %test/src %jtt/src %bench/src %microbenchmarks/src, \
- $(wildcard $(TOPDIR)/src/$(MODULE)/share/classes/*/src)))
-MODULESOURCEPATH := $(call PathList, $(VM_COMPILER_MODULESOURCEPATH))
diff --git a/make/test/JtregGraalUnit.gmk b/make/test/JtregGraalUnit.gmk
deleted file mode 100644
index 08426e69456..00000000000
--- a/make/test/JtregGraalUnit.gmk
+++ /dev/null
@@ -1,188 +0,0 @@
-#
-# Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation. Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-################################################################################
-# This file builds Graal component of the JTReg tests for Hotspot.
-# It also covers the test-image part, where the built files are copied to the
-# test image.
-################################################################################
-
-default: all
-
-include $(SPEC)
-include MakeBase.gmk
-include JavaCompilation.gmk
-
-TARGETS_BUILD :=
-TARGETS_IMAGE :=
-TARGETS_EXTRA_LIB :=
-
-ifeq ($(INCLUDE_GRAAL), true)
- ifneq ($(GRAALUNIT_LIB), )
-
- SRC_DIR := $(TOPDIR)/src/jdk.internal.vm.compiler/share/classes
- TEST_DIR := $(TOPDIR)/test/hotspot/jtreg/compiler/graalunit
- COMPILE_OUTPUTDIR := $(SUPPORT_OUTPUTDIR)/test/graalunit
- LIB_OUTPUTDIR := $(TEST_IMAGE_DIR)/hotspot/jtreg/graal
-
- # This evaluation is expensive and should only be done if this target was
- # explicitly called.
- ifneq ($(filter build-test-hotspot-jtreg-graal, $(MAKECMDGOALS)), )
-
- TEST_COMPILE_CP := \
- $(JDK_OUTPUTDIR)/modules/jdk.internal.vm.compiler \
- $(JDK_OUTPUTDIR)/modules/jdk.internal.vm.ci \
- $(LIB_OUTPUTDIR)/junit-4.12.jar \
- $(LIB_OUTPUTDIR)/asm-5.0.4.jar \
- $(LIB_OUTPUTDIR)/asm-tree-5.0.4.jar \
- $(LIB_OUTPUTDIR)/java-allocation-instrumenter.jar \
- $(LIB_OUTPUTDIR)/hamcrest-core-1.3.jar
-
- TEST_JAVAC_FLAGS := \
- -processorpath $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier.jar \
- --add-exports jdk.unsupported/sun.misc=ALL-UNNAMED \
- --add-exports java.base/jdk.internal.misc=ALL-UNNAMED \
- --add-exports java.base/jdk.internal.module=ALL-UNNAMED \
- -Xprefer:newer \
- #
-
- ### Copy 3rd party libs
- $(eval $(call SetupCopyFiles, COPY_GRAALUNIT_LIBS, \
- FILES := $(wildcard $(GRAALUNIT_LIB)/*.jar), \
- DEST := $(LIB_OUTPUTDIR), \
- ))
-
- TARGETS_EXTRA_LIB += $(COPY_GRAALUNIT_LIBS)
-
- ### Compile graalunit tests
- $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_TESTS, \
- TARGET_RELEASE := $(TARGET_RELEASE_NEWJDK_UPGRADED), \
- SRC := \
- $(SRC_DIR)/jdk.internal.vm.compiler.collections.test/src \
- $(SRC_DIR)/org.graalvm.compiler.api.directives.test/src \
- $(SRC_DIR)/org.graalvm.compiler.api.test/src \
- $(SRC_DIR)/org.graalvm.compiler.asm.aarch64.test/src \
- $(SRC_DIR)/org.graalvm.compiler.asm.amd64.test/src \
- $(SRC_DIR)/org.graalvm.compiler.asm.test/src \
- $(SRC_DIR)/org.graalvm.compiler.core.aarch64.test/src \
- $(SRC_DIR)/org.graalvm.compiler.core.amd64.test/src \
- $(SRC_DIR)/org.graalvm.compiler.core.jdk9.test/src \
- $(SRC_DIR)/org.graalvm.compiler.core.test/src \
- $(SRC_DIR)/org.graalvm.compiler.debug.test/src \
- $(SRC_DIR)/org.graalvm.compiler.graph.test/src \
- $(SRC_DIR)/org.graalvm.compiler.hotspot.aarch64.test/src \
- $(SRC_DIR)/org.graalvm.compiler.hotspot.amd64.test/src \
- $(SRC_DIR)/org.graalvm.compiler.hotspot.jdk15.test/src \
- $(SRC_DIR)/org.graalvm.compiler.hotspot.jdk9.test/src \
- $(SRC_DIR)/org.graalvm.compiler.hotspot.lir.test/src \
- $(SRC_DIR)/org.graalvm.compiler.hotspot.test/src \
- $(SRC_DIR)/org.graalvm.compiler.jtt/src \
- $(SRC_DIR)/org.graalvm.compiler.lir.jtt/src \
- $(SRC_DIR)/org.graalvm.compiler.lir.test/src \
- $(SRC_DIR)/org.graalvm.compiler.loop.test/src \
- $(SRC_DIR)/org.graalvm.compiler.nodes.test/src \
- $(SRC_DIR)/org.graalvm.compiler.options.test/src \
- $(SRC_DIR)/org.graalvm.compiler.phases.common.test/src \
- $(SRC_DIR)/org.graalvm.compiler.replacements.jdk10.test/src \
- $(SRC_DIR)/org.graalvm.compiler.replacements.jdk12.test/src \
- $(SRC_DIR)/org.graalvm.compiler.replacements.jdk9.test/src \
- $(SRC_DIR)/org.graalvm.compiler.replacements.test/src \
- $(SRC_DIR)/org.graalvm.compiler.test/src \
- $(SRC_DIR)/org.graalvm.util.test/src \
- , \
- EXCLUDE_FILES := org/graalvm/compiler/core/test/VerifyDebugUsageTest.java, \
- BIN := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \
- CLASSPATH := $(TEST_COMPILE_CP), \
- DISABLED_WARNINGS := processing, \
- JAVAC_FLAGS := $(TEST_JAVAC_FLAGS), \
- COPY := .input, \
- ))
-
- TARGETS_BUILD += $(BUILD_VM_COMPILER_TESTS)
-
- ### Compile graalunit tests which require -XDstringConcat=inline
- $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_TESTS_SET2, \
- TARGET_RELEASE := $(TARGET_RELEASE_NEWJDK_UPGRADED), \
- DEPENDS := $(BUILD_VM_COMPILER_TESTS), \
- SRC := $(SRC_DIR)/org.graalvm.compiler.core.test/src, \
- INCLUDE_FILES := org/graalvm/compiler/core/test/VerifyDebugUsageTest.java, \
- BIN := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \
- CLASSPATH := \
- $(TEST_COMPILE_CP) \
- $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \
- DISABLED_WARNINGS := processing, \
- JAVAC_FLAGS := $(TEST_JAVAC_FLAGS) -XDstringConcat=inline, \
- ))
-
- TARGETS_BUILD += $(BUILD_VM_COMPILER_TESTS_SET2)
-
- ### Generate jdk.vm.compiler.tests.jar
- $(eval $(call SetupJarArchive, BUILD_VM_COMPILER_TESTS_JAR, \
- DEPENDENCIES := $(BUILD_VM_COMPILER_TESTS) $(BUILD_VM_COMPILER_TESTS_SET2), \
- SRCS := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \
- SUFFIXES:=.class .input, \
- JAR := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests.jar, \
- ))
-
- TARGETS_BUILD += $(BUILD_VM_COMPILER_TESTS_JAR)
-
- ### Compile and build mxtool
- $(eval $(call SetupJavaCompilation, BUILD_MXTOOL, \
- TARGET_RELEASE := $(TARGET_RELEASE_NEWJDK_UPGRADED), \
- SRC := $(TEST_DIR)/com.oracle.mxtool.junit, \
- BIN := $(COMPILE_OUTPUTDIR)/com.oracle.mxtool.junit, \
- JAR := $(COMPILE_OUTPUTDIR)/com.oracle.mxtool.junit.jar, \
- DISABLED_WARNINGS := processing, \
- CLASSPATH := $(LIB_OUTPUTDIR)/junit-4.12.jar, \
- ))
-
- TARGETS_BUILD += $(BUILD_MXTOOL)
-
- $(TARGETS_BUILD): $(TARGETS_EXTRA_LIB)
-
- endif # build-test-hotspot-jtreg-graal
-
- ################################################################################
- # Targets for building test-image.
- ################################################################################
-
- # Copy to hotspot jtreg test image
- $(eval $(call SetupCopyFiles, COPY_HOTSPOT_JTREG_GRAAL, \
- SRC := $(COMPILE_OUTPUTDIR), \
- DEST := $(LIB_OUTPUTDIR), \
- FILES := jdk.vm.compiler.tests.jar com.oracle.mxtool.junit.jar, \
- ))
-
- TARGETS_IMAGE += $(COPY_HOTSPOT_JTREG_GRAAL)
- endif
-endif
-
-build-test-hotspot-jtreg-graal: $(TARGETS_BUILD)
-test-image-hotspot-jtreg-graal: $(TARGETS_IMAGE)
-
-all: build-test-hotspot-jtreg-graal
-test-image: test-image-hotspot-jtreg-graal
-
-.PHONY: default all build-test-hotspot-jtreg-graal test-image-hotspot-jtreg-graal test-image
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index e2947fe7868..5d5afbbfc63 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -1360,7 +1360,7 @@ source %{
// r27 is not allocatable when compressed oops is on and heapbase is not
// zero, compressed klass pointers doesn't use r27 after JDK-8234794
- if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL || UseAOT)) {
+ if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL)) {
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
_NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
_NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
@@ -3124,32 +3124,6 @@ encode %{
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
- // This encoding class is generated automatically from ad_encode.m4.
- // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
- enc_class aarch64_enc_strw_immn(immN src, memory1 mem) %{
- C2_MacroAssembler _masm(&cbuf);
- address con = (address)$src$$constant;
- // need to do this the hard way until we can manage relocs
- // for 32 bit constants
- __ movoop(rscratch2, (jobject)con);
- if (con) __ encode_heap_oop_not_null(rscratch2);
- loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
- as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
- %}
-
- // This encoding class is generated automatically from ad_encode.m4.
- // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
- enc_class aarch64_enc_strw_immnk(immN src, memory4 mem) %{
- C2_MacroAssembler _masm(&cbuf);
- address con = (address)$src$$constant;
- // need to do this the hard way until we can manage relocs
- // for 32 bit constants
- __ movoop(rscratch2, (jobject)con);
- __ encode_klass_not_null(rscratch2);
- loadStore(_masm, &MacroAssembler::strw, rscratch2, $mem->opcode(),
- as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
- %}
-
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
diff --git a/src/hotspot/cpu/aarch64/ad_encode.m4 b/src/hotspot/cpu/aarch64/ad_encode.m4
index c8a80b001cb..e6c87cf5b05 100644
--- a/src/hotspot/cpu/aarch64/ad_encode.m4
+++ b/src/hotspot/cpu/aarch64/ad_encode.m4
@@ -29,7 +29,7 @@ define(choose, `loadStore($1, &MacroAssembler::$3, $2, $4,
%}')dnl
define(access, `
$3Register $1_reg = as_$3Register($$1$$reg);
- $4choose(MacroAssembler(&cbuf), $1_reg,$2,$mem->opcode(),
+ $4choose(C2_MacroAssembler(&cbuf), $1_reg,$2,$mem->opcode(),
as_Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp,$5)')dnl
define(load,`
// This encoding class is generated automatically from ad_encode.m4.
@@ -59,7 +59,7 @@ define(STORE0,`
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_$2`'0(memory$4 mem) %{
- MacroAssembler _masm(&cbuf);
+ C2_MacroAssembler _masm(&cbuf);
choose(_masm,zr,$2,$mem->opcode(),
as_$3Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp,$4)')dnl
STORE(iRegI,strb,,,1)
@@ -72,7 +72,7 @@ STORE(iRegL,str,,
`// we sometimes get asked to store the stack pointer into the
// current thread -- we cannot do that directly on AArch64
if (src_reg == r31_sp) {
- MacroAssembler _masm(&cbuf);
+ C2_MacroAssembler _masm(&cbuf);
assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
__ mov(rscratch2, sp);
src_reg = rscratch2;
@@ -82,34 +82,10 @@ STORE0(iRegL,str,,8)
STORE(vRegF,strs,Float,,4)
STORE(vRegD,strd,Float,,8)
- // This encoding class is generated automatically from ad_encode.m4.
- // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
- enc_class aarch64_enc_strw_immn(immN src, memory1 mem) %{
- MacroAssembler _masm(&cbuf);
- address con = (address)$src$$constant;
- // need to do this the hard way until we can manage relocs
- // for 32 bit constants
- __ movoop(rscratch2, (jobject)con);
- if (con) __ encode_heap_oop_not_null(rscratch2);
- choose(_masm,rscratch2,strw,$mem->opcode(),
- as_Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp, 4)
-
- // This encoding class is generated automatically from ad_encode.m4.
- // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
- enc_class aarch64_enc_strw_immnk(immN src, memory4 mem) %{
- MacroAssembler _masm(&cbuf);
- address con = (address)$src$$constant;
- // need to do this the hard way until we can manage relocs
- // for 32 bit constants
- __ movoop(rscratch2, (jobject)con);
- __ encode_klass_not_null(rscratch2);
- choose(_masm,rscratch2,strw,$mem->opcode(),
- as_Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp, 4)
-
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
- MacroAssembler _masm(&cbuf);
+ C2_MacroAssembler _masm(&cbuf);
__ membar(Assembler::StoreStore);
loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index 31c6b8a5b9b..bb840eb9cdb 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -2444,9 +2444,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Register length = op->length()->as_register();
Register tmp = op->tmp()->as_register();
- __ resolve(ACCESS_READ, src);
- __ resolve(ACCESS_WRITE, dst);
-
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
@@ -2804,7 +2801,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
scratch = op->scratch_opr()->as_register();
}
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
- __ resolve(ACCESS_READ | ACCESS_WRITE, obj);
// add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
if (op->info() != NULL) {
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
index 8e6f951fd87..573d7b4116c 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
@@ -74,7 +74,6 @@ friend class ArrayCopyStub;
// call stub: CompiledStaticCall::to_interp_stub_size() +
// CompiledStaticCall::to_trampoline_stub_size()
_call_stub_size = 13 * NativeInstruction::instruction_size,
- _call_aot_stub_size = 0,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = 7 * NativeInstruction::instruction_size
};
diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
index f65e6514e71..8e5c8843bae 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
@@ -981,10 +981,6 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
index = tmp;
}
- if (is_updateBytes) {
- base_op = access_resolve(ACCESS_READ, base_op);
- }
-
if (offset) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
@@ -1063,10 +1059,6 @@ void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
index = tmp;
}
- if (is_updateBytes) {
- base_op = access_resolve(ACCESS_READ, base_op);
- }
-
if (offset) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
@@ -1458,7 +1450,7 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
// membar it's possible for a simple Dekker test to fail if loads
// use LD;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and C1 compiles the loads in another.
- if (!CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
+ if (!CompilerConfig::is_c1_only_no_jvmci()) {
__ membar();
}
__ volatile_load_mem_reg(address, result, info);
diff --git a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
index 2e89960778e..e922fc1cd22 100644
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -60,15 +60,6 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
// static stub relocation stores the instruction address of the call
__ relocate(static_stub_Relocation::spec(mark));
-#if INCLUDE_AOT
- // Don't create a Metadata reloc if we're generating immutable PIC.
- if (cbuf.immutable_PIC()) {
- __ movptr(rmethod, 0);
- __ movptr(rscratch1, 0);
- __ br(rscratch1);
-
- } else
-#endif
{
__ emit_static_call_stub();
}
@@ -96,63 +87,8 @@ int CompiledStaticCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
-#if INCLUDE_AOT
-#define __ _masm.
-void CompiledStaticCall::emit_to_aot_stub(CodeBuffer &cbuf, address mark) {
- if (!UseAOT) {
- return;
- }
- // Stub is fixed up when the corresponding call is converted from
- // calling compiled code to calling aot code.
- // mov r, imm64_aot_code_address
- // jmp r
-
- if (mark == NULL) {
- mark = cbuf.insts_mark(); // Get mark within main instrs section.
- }
-
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a stub.
- MacroAssembler _masm(&cbuf);
-
- address base =
- __ start_a_stub(to_aot_stub_size());
- guarantee(base != NULL, "out of space");
-
- // Static stub relocation stores the instruction address of the call.
- __ relocate(static_stub_Relocation::spec(mark, true /* is_aot */));
- // Load destination AOT code address.
- __ movptr(rscratch1, 0); // address is zapped till fixup time.
- // This is recognized as unresolved by relocs/nativeinst/ic code.
- __ br(rscratch1);
-
- assert(__ pc() - base <= to_aot_stub_size(), "wrong stub size");
-
- // Update current stubs pointer and restore insts_end.
- __ end_a_stub();
-}
-#undef __
-
-int CompiledStaticCall::to_aot_stub_size() {
- if (UseAOT) {
- return 5 * 4; // movz; movk; movk; movk; br
- } else {
- return 0;
- }
-}
-
-// Relocation entries for call stub, compiled java to aot.
-int CompiledStaticCall::reloc_to_aot_stub() {
- if (UseAOT) {
- return 5 * 4; // movz; movk; movk; movk; br
- } else {
- return 0;
- }
-}
-#endif // INCLUDE_AOT
-
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
- address stub = find_stub(false /* is_aot */);
+ address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
@@ -188,10 +124,8 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
method_holder->set_data(0);
- if (!static_stub->is_aot()) {
- NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
- jump->set_jump_destination((address)-1);
- }
+ NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
+ jump->set_jump_destination((address)-1);
}
//-----------------------------------------------------------------------------
@@ -204,7 +138,7 @@ void CompiledDirectStaticCall::verify() {
_call->verify_alignment();
// Verify stub.
- address stub = find_stub(false /* is_aot */);
+ address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder
diff --git a/src/hotspot/cpu/aarch64/compiledIC_aot_aarch64.cpp b/src/hotspot/cpu/aarch64/compiledIC_aot_aarch64.cpp
deleted file mode 100644
index 2a884654aa2..00000000000
--- a/src/hotspot/cpu/aarch64/compiledIC_aot_aarch64.cpp
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "aot/compiledIC_aot.hpp"
-#include "code/codeCache.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-
-void CompiledDirectStaticCall::set_to_far(const methodHandle& callee, address entry) {
- if (TraceICs) {
- ResourceMark rm;
- tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_far %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
- set_destination_mt_safe(entry);
-}
-
-void CompiledPltStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
- address stub = find_stub();
- guarantee(stub != NULL, "stub not found");
- if (TraceICs) {
- ResourceMark rm;
- tty->print_cr("CompiledPltStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
- // Creation also verifies the object.
- NativeLoadGot* method_loader = nativeLoadGot_at(stub);
- NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
-
- intptr_t data = method_loader->data();
- address destination = jump->destination();
- assert(data == 0 || data == (intptr_t)callee(),
- "a) MT-unsafe modification of inline cache");
- assert(destination == (address)Universe::non_oop_word()
- || destination == entry,
- "b) MT-unsafe modification of inline cache");
-
- // Update stub.
- method_loader->set_data((intptr_t)callee());
- jump->set_jump_destination(entry);
-
- // Update jump to call.
- set_destination_mt_safe(stub);
-}
-
-#ifdef NEVER_CALLED
-void CompiledPltStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
- // Reset stub.
- address stub = static_stub->addr();
- assert(stub != NULL, "stub not found");
- assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
- // Creation also verifies the object.
- NativeLoadGot* method_loader = nativeLoadGot_at(stub);
- NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
- method_loader->set_data(0);
- jump->set_jump_destination((address)-1);
-}
-#endif
-
-#ifndef PRODUCT
-void CompiledPltStaticCall::verify() {
- // Verify call.
- _call->verify();
-
-#ifdef ASSERT
- CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call);
- assert(cb && cb->is_aot(), "CompiledPltStaticCall can only be used on AOTCompiledMethod");
-#endif
-
- // Verify stub.
- address stub = find_stub();
- assert(stub != NULL, "no stub found for static call");
- // Creation also verifies the object.
- NativeLoadGot* method_loader = nativeLoadGot_at(stub);
- NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
- // Verify state.
- assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
-}
-#endif // !PRODUCT
diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
index d2b2bca13c0..35d3360b263 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
@@ -149,11 +149,6 @@ void BarrierSetAssembler::value_copy(MacroAssembler* masm, DecoratorSet decorato
}
}
-void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
- Register obj1, Register obj2) {
- __ cmp(obj1, obj2);
-}
-
void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath) {
// If mask changes we need to ensure that the inverse is still encodable as an immediate
diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp
index b017d65c8aa..5280c2c3a6f 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp
@@ -50,13 +50,6 @@ class BarrierSetAssembler: public CHeapObj {
virtual void value_copy(MacroAssembler* masm, DecoratorSet decorators,
Register src, Register dst, Register value_klass);
- virtual void obj_equals(MacroAssembler* masm,
- Register obj1, Register obj2);
-
- virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
- // Default implementation does not need to do anything.
- }
-
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp
index e4f19f791f6..a56b1326463 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp
@@ -51,7 +51,7 @@ void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, result);
- if (CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
+ if (CompilerConfig::is_c1_only_no_jvmci()) {
// The membar here is necessary to prevent reordering between the
// release store in the CAS above and a subsequent volatile load.
// However for tiered compilation C1 inserts a full barrier before
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
index 57a742f3fb9..f53f501a350 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
@@ -281,7 +281,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
} else {
assert(is_phantom, "only remaining strength");
assert(!is_narrow, "phantom access cannot be narrow");
- __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
+ __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
}
__ blr(lr);
__ mov(rscratch1, r0);
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
index 4761ad012fb..1621185a1b5 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
@@ -543,7 +543,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
if (needs_thread_local_poll) {
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
- ldr(rscratch2, Address(rthread, Thread::polling_word_offset()));
+ ldr(rscratch2, Address(rthread, JavaThread::polling_word_offset()));
tbnz(rscratch2, exact_log2(SafepointMechanism::poll_bit()), safepoint);
}
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 1ad3e212f4d..b0a7eeb3353 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -297,10 +297,10 @@ address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod) {
if (acquire) {
- lea(rscratch1, Address(rthread, Thread::polling_word_offset()));
+ lea(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
ldar(rscratch1, rscratch1);
} else {
- ldr(rscratch1, Address(rthread, Thread::polling_word_offset()));
+ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
}
if (at_return) {
// Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
@@ -3919,8 +3919,7 @@ void MacroAssembler::cmpptr(Register src1, Address src2) {
}
void MacroAssembler::cmpoop(Register obj1, Register obj2) {
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->obj_equals(this, obj1, obj2);
+ cmp(obj1, obj2);
}
void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
@@ -4371,15 +4370,6 @@ void MacroAssembler::data_for_value_array_index(Register array, Register array_k
add(data, data, arrayOopDesc::base_offset_in_bytes(T_INLINE_TYPE));
}
-void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
- // Use stronger ACCESS_WRITE|ACCESS_READ by default.
- if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
- decorators |= ACCESS_READ | ACCESS_WRITE;
- }
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- return bs->resolve(this, decorators, obj);
-}
-
void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
Register thread_tmp, DecoratorSet decorators) {
access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
@@ -4736,7 +4726,7 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
// Move the address of the polling page into dest.
void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) {
- ldr(dest, Address(rthread, Thread::polling_page_offset()));
+ ldr(dest, Address(rthread, JavaThread::polling_page_offset()));
}
// Read the polling page. The address of the polling page must
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index 32d473bdfce..6809b7c591e 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -881,10 +881,6 @@ class MacroAssembler: public Assembler {
void data_for_value_array_index(Register array, Register array_klass,
Register index, Register data);
- // Resolves obj for access. Result is placed in the same register.
- // All other registers are preserved.
- void resolve(DecoratorSet decorators, Register obj);
-
void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
Register thread_tmp = noreg, DecoratorSet decorators = 0);
@@ -1143,7 +1139,7 @@ class MacroAssembler: public Assembler {
address trampoline_call(Address entry, CodeBuffer* cbuf = NULL);
static bool far_branches() {
- return ReservedCodeCacheSize > branch_range || UseAOT;
+ return ReservedCodeCacheSize > branch_range;
}
// Jumps that can reach anywhere in the code cache.
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
index 734963145c5..d808e4b5b53 100644
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
@@ -476,16 +476,9 @@ bool NativeInstruction::is_stop() {
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
-
-#ifdef ASSERT
- // This may be the temporary nmethod generated while we're AOT
- // compiling. Such an nmethod doesn't begin with a NOP but with an ADRP.
- if (! (CalculateClassFingerprint && UseAOT && is_adrp_at(verified_entry))) {
- assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
- || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
- "Aarch64 cannot replace non-jump with jump");
- }
-#endif
+ assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
+ || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
+ "Aarch64 cannot replace non-jump with jump");
// Patch this nmethod atomically.
if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
index dcf87913a88..75f2797c326 100644
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -230,16 +230,6 @@ class NativeCall: public NativeInstruction {
return is_call_at(return_address - NativeCall::return_address_offset);
}
-#if INCLUDE_AOT
- // Return true iff a call from instr to target is out of range.
- // Used for calls from JIT- to AOT-compiled code.
- static bool is_far_call(address instr, address target) {
- // On AArch64 we use trampolines which can reach anywhere in the
- // address space, so calls are never out of range.
- return false;
- }
-#endif
-
// MT-safe patching of a call instruction.
static void insert(address code_pos, address entry);
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index d4bee707fdf..523bbed21b9 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -811,7 +811,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int comp_args_on_stack
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
#if INCLUDE_JVMCI
- if (EnableJVMCI || UseAOT) {
+ if (EnableJVMCI) {
// check if this call should be routed towards a specific entry point
__ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
Label no_alternative_target;
@@ -2029,8 +2029,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Load the oop from the handle
__ ldr(obj_reg, Address(oop_handle_reg, 0));
- __ resolve(IS_NOT_NULL, obj_reg);
-
if (UseBiasedLocking) {
__ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
}
@@ -2187,8 +2185,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Get locked oop from the handle we passed to jni
__ ldr(obj_reg, Address(oop_handle_reg, 0));
- __ resolve(IS_NOT_NULL, obj_reg);
-
Label done;
if (UseBiasedLocking) {
@@ -2446,7 +2442,7 @@ void SharedRuntime::generate_deopt_blob() {
// Setup code generation tools
int pad = 0;
#if INCLUDE_JVMCI
- if (EnableJVMCI || UseAOT) {
+ if (EnableJVMCI) {
pad += 512; // Increase the buffer size when compiling for JVMCI
}
#endif
@@ -2521,7 +2517,7 @@ void SharedRuntime::generate_deopt_blob() {
int implicit_exception_uncommon_trap_offset = 0;
int uncommon_trap_offset = 0;
- if (EnableJVMCI || UseAOT) {
+ if (EnableJVMCI) {
implicit_exception_uncommon_trap_offset = __ pc() - start;
__ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
@@ -2647,7 +2643,7 @@ void SharedRuntime::generate_deopt_blob() {
__ reset_last_Java_frame(false);
#if INCLUDE_JVMCI
- if (EnableJVMCI || UseAOT) {
+ if (EnableJVMCI) {
__ bind(after_fetch_unroll_info_call);
}
#endif
@@ -2810,7 +2806,7 @@ void SharedRuntime::generate_deopt_blob() {
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
#if INCLUDE_JVMCI
- if (EnableJVMCI || UseAOT) {
+ if (EnableJVMCI) {
_deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
_deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
}
diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
index 94e8679e56c..a59bb8e46dc 100644
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
@@ -508,7 +508,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
#if INCLUDE_JVMCI
// Check if we need to take lock at entry of synchronized method. This can
// only occur on method entry so emit it only for vtos with step 0.
- if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) {
+ if (EnableJVMCI && state == vtos && step == 0) {
Label L;
__ ldrb(rscratch1, Address(rthread, JavaThread::pending_monitorenter_offset()));
__ cbz(rscratch1, L);
@@ -782,7 +782,6 @@ void TemplateInterpreterGenerator::lock_method() {
#endif // ASSERT
__ bind(done);
- __ resolve(IS_NOT_NULL, r0);
}
// add space for monitor & lock
@@ -1008,7 +1007,6 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
__ ldrw(crc, Address(esp, 4*wordSize)); // Initial CRC
} else {
__ ldr(buf, Address(esp, 2*wordSize)); // byte[] array
- __ resolve(IS_NOT_NULL | ACCESS_READ, buf);
__ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
__ ldrw(off, Address(esp, wordSize)); // offset
__ add(buf, buf, off); // + offset
@@ -1053,9 +1051,6 @@ address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(Abstract
__ ldrw(off, Address(esp, wordSize)); // int offset
__ sub(len, end, off);
__ ldr(buf, Address(esp, 2*wordSize)); // byte[] buf | long buf
- if (kind == Interpreter::java_util_zip_CRC32C_updateBytes) {
- __ resolve(IS_NOT_NULL | ACCESS_READ, buf);
- }
__ add(buf, buf, off); // + offset
if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
__ ldrw(crc, Address(esp, 4*wordSize)); // long crc
@@ -1544,27 +1539,30 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
__ add(rlocals, esp, r2, ext::uxtx, 3);
__ sub(rlocals, rlocals, wordSize);
- // Make room for locals
- __ sub(rscratch1, esp, r3, ext::uxtx, 3);
-
- // Padding between locals and fixed part of activation frame to ensure
- // SP is always 16-byte aligned.
- __ andr(sp, rscratch1, -16);
+ __ mov(rscratch1, esp);
// r3 - # of additional locals
// allocate space for locals
// explicitly initialize locals
+ // Initializing memory allocated for locals in the same direction as
+ // the stack grows to ensure page initialization order according
+ // to windows-aarch64 stack page growth requirement (see
+ // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-160#stack)
{
Label exit, loop;
__ ands(zr, r3, r3);
__ br(Assembler::LE, exit); // do nothing if r3 <= 0
__ bind(loop);
- __ str(zr, Address(__ post(rscratch1, wordSize)));
+ __ str(zr, Address(__ pre(rscratch1, -wordSize)));
__ sub(r3, r3, 1); // until everything initialized
__ cbnz(r3, loop);
__ bind(exit);
}
+ // Padding between locals and fixed part of activation frame to ensure
+ // SP is always 16-byte aligned.
+ __ andr(sp, rscratch1, -16);
+
// And the base dispatch table
__ get_dispatch();
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index a4631979044..d6cc3839ad8 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -2554,7 +2554,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
// membar it's possible for a simple Dekker test to fail if loads
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and we interpret the loads in another.
- if (!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci()){
+ if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){
Label notVolatile;
__ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
__ membar(MacroAssembler::AnyAny);
@@ -3295,7 +3295,7 @@ void TemplateTable::fast_accessfield(TosState state)
// membar it's possible for a simple Dekker test to fail if loads
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and we interpret the loads in another.
- if (!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci()) {
+ if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
Label notVolatile;
__ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
__ membar(MacroAssembler::AnyAny);
@@ -3383,7 +3383,7 @@ void TemplateTable::fast_xaccess(TosState state)
// membar it's possible for a simple Dekker test to fail if loads
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and we interpret the loads in another.
- if (!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci()) {
+ if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
Label notVolatile;
__ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset())));
@@ -4046,8 +4046,6 @@ void TemplateTable::monitorenter()
// check for NULL object
__ null_check(r0);
- __ resolve(IS_NOT_NULL, r0);
-
Label is_inline_type;
__ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
__ test_markword_is_inline_type(rscratch1, is_inline_type);
@@ -4156,8 +4154,6 @@ void TemplateTable::monitorexit()
// check for NULL object
__ null_check(r0);
- __ resolve(IS_NOT_NULL, r0);
-
const int is_inline_type_mask = markWord::inline_type_pattern;
Label has_identity;
__ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
diff --git a/src/hotspot/cpu/aarch64/vmStructs_aarch64.hpp b/src/hotspot/cpu/aarch64/vmStructs_aarch64.hpp
index b1bdccfc3cb..cf79ac81df8 100644
--- a/src/hotspot/cpu/aarch64/vmStructs_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/vmStructs_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -39,4 +39,7 @@
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
+#define DECLARE_INT_CPU_FEATURE_CONSTANT(id, name, bit) GENERATE_VM_INT_CONSTANT_ENTRY(VM_Version::CPU_##id)
+#define VM_INT_CPU_FEATURE_CONSTANTS CPU_FEATURE_FLAGS(DECLARE_INT_CPU_FEATURE_CONSTANT)
+
#endif // CPU_AARCH64_VMSTRUCTS_AARCH64_HPP
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
index 2b2f3dba071..123c429fa0f 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
@@ -195,16 +195,9 @@ void VM_Version::initialize() {
char buf[512];
sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2);
- if (_features & CPU_ASIMD) strcat(buf, ", simd");
- if (_features & CPU_CRC32) strcat(buf, ", crc");
- if (_features & CPU_AES) strcat(buf, ", aes");
- if (_features & CPU_SHA1) strcat(buf, ", sha1");
- if (_features & CPU_SHA2) strcat(buf, ", sha256");
- if (_features & CPU_SHA3) strcat(buf, ", sha3");
- if (_features & CPU_SHA512) strcat(buf, ", sha512");
- if (_features & CPU_LSE) strcat(buf, ", lse");
- if (_features & CPU_SVE) strcat(buf, ", sve");
- if (_features & CPU_SVE2) strcat(buf, ", sve2");
+#define ADD_FEATURE_IF_SUPPORTED(id, name, bit) if (_features & CPU_##id) strcat(buf, ", " name);
+ CPU_FEATURE_FLAGS(ADD_FEATURE_IF_SUPPORTED)
+#undef ADD_FEATURE_IF_SUPPORTED
_features_string = os::strdup(buf);
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
index 37ea7549a11..6817eed08e9 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
@@ -97,23 +97,28 @@ class VM_Version : public Abstract_VM_Version {
};
enum Feature_Flag {
- CPU_FP = (1<<0),
- CPU_ASIMD = (1<<1),
- CPU_EVTSTRM = (1<<2),
- CPU_AES = (1<<3),
- CPU_PMULL = (1<<4),
- CPU_SHA1 = (1<<5),
- CPU_SHA2 = (1<<6),
- CPU_CRC32 = (1<<7),
- CPU_LSE = (1<<8),
- CPU_DCPOP = (1<<16),
- CPU_SHA3 = (1<<17),
- CPU_SHA512 = (1<<21),
- CPU_SVE = (1<<22),
- // flags above must follow Linux HWCAP
- CPU_SVE2 = (1<<28),
- CPU_STXR_PREFETCH= (1<<29),
- CPU_A53MAC = (1<<30),
+#define CPU_FEATURE_FLAGS(decl) \
+ decl(FP, "fp", 0) \
+ decl(ASIMD, "simd", 1) \
+ decl(EVTSTRM, "evtstrm", 2) \
+ decl(AES, "aes", 3) \
+ decl(PMULL, "pmull", 4) \
+ decl(SHA1, "sha1", 5) \
+ decl(SHA2, "sha256", 6) \
+ decl(CRC32, "crc", 7) \
+ decl(LSE, "lse", 8) \
+ decl(DCPOP, "dcpop", 16) \
+ decl(SHA3, "sha3", 17) \
+ decl(SHA512, "sha512", 21) \
+ decl(SVE, "sve", 22) \
+ /* flags above must follow Linux HWCAP */ \
+ decl(SVE2, "sve2", 28) \
+ decl(STXR_PREFETCH, "stxr_prefetch", 29) \
+ decl(A53MAC, "a53mac", 30)
+
+#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (1 << bit),
+ CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG)
+#undef DECLARE_CPU_FEATURE_FLAG
};
static int cpu_family() { return _cpu; }
diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
index 36dc4766b5b..c07dc8e7a37 100644
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
@@ -2101,9 +2101,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption");
- __ resolve(ACCESS_READ, src);
- __ resolve(ACCESS_WRITE, dst);
-
CodeStub* stub = op->stub();
int flags = op->flags();
@@ -2443,7 +2440,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
__ b(*op->stub()->entry());
} else if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
- __ resolve(ACCESS_READ | ACCESS_WRITE, obj);
int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
if (op->info() != NULL) {
add_debug_info_for_null_check(null_check_offset, op->info());
diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.hpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.hpp
index 4cd1f0d08ae..77d13532685 100644
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.hpp
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,6 @@
enum {
_call_stub_size = 16,
- _call_aot_stub_size = 0,
_exception_handler_size = PRODUCT_ONLY(68) NOT_PRODUCT(68+60),
_deopt_handler_size = 16
};
diff --git a/src/hotspot/cpu/arm/compiledIC_arm.cpp b/src/hotspot/cpu/arm/compiledIC_arm.cpp
index e7b512993a5..f0272db02ff 100644
--- a/src/hotspot/cpu/arm/compiledIC_arm.cpp
+++ b/src/hotspot/cpu/arm/compiledIC_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -102,7 +102,7 @@ int CompiledStaticCall::to_interp_stub_size() {
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
- address stub = find_stub(/*is_aot*/ false);
+ address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
@@ -149,7 +149,7 @@ void CompiledDirectStaticCall::verify() {
_call->verify_alignment();
// Verify stub.
- address stub = find_stub(/*is_aot*/ false);
+ address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
diff --git a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp
index e24c6ca93f9..e2ffb6c3670 100644
--- a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp
@@ -142,11 +142,6 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
}
}
-void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
- Register obj1, Register obj2) {
- __ cmp(obj1, obj2);
-}
-
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.
void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, Register obj_end, Register tmp1, Register tmp2,
RegisterOrConstant size_expression, Label& slow_case) {
diff --git a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp
index 41070878d3c..6f8ce6bf835 100644
--- a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp
+++ b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp
@@ -47,9 +47,6 @@ class BarrierSetAssembler: public CHeapObj {
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null);
- virtual void obj_equals(MacroAssembler* masm,
- Register obj1, Register obj2);
-
virtual void eden_allocate(MacroAssembler* masm,
Register obj, // result: pointer to object after successful allocation
Register obj_end, // result: pointer to end of object after successful allocation
@@ -68,10 +65,6 @@ class BarrierSetAssembler: public CHeapObj {
);
virtual void barrier_stubs_init() {}
-
- virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
- // Default implementation does not need to do anything.
- }
};
#endif // CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP
diff --git a/src/hotspot/cpu/arm/interp_masm_arm.cpp b/src/hotspot/cpu/arm/interp_masm_arm.cpp
index f754b36a27f..48efb6a8036 100644
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -580,7 +580,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
if (needs_thread_local_poll) {
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
- ldr(Rtemp, Address(Rthread, Thread::polling_word_offset()));
+ ldr(Rtemp, Address(Rthread, JavaThread::polling_word_offset()));
tbnz(Rtemp, exact_log2(SafepointMechanism::poll_bit()), safepoint);
}
diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.cpp b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
index 5ee719d6d71..1896a940b19 100644
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
@@ -735,8 +735,7 @@ void MacroAssembler::sign_extend(Register rd, Register rn, int bits) {
void MacroAssembler::cmpoop(Register obj1, Register obj2) {
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->obj_equals(this, obj1, obj2);
+ cmp(obj1, obj2);
}
void MacroAssembler::long_move(Register rd_lo, Register rd_hi,
@@ -1888,23 +1887,14 @@ void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
}
}
-void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
- // Use stronger ACCESS_WRITE|ACCESS_READ by default.
- if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
- decorators |= ACCESS_READ | ACCESS_WRITE;
- }
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- return bs->resolve(this, decorators, obj);
-}
-
void MacroAssembler::safepoint_poll(Register tmp1, Label& slow_path) {
- ldr_u32(tmp1, Address(Rthread, Thread::polling_word_offset()));
+ ldr_u32(tmp1, Address(Rthread, JavaThread::polling_word_offset()));
tst(tmp1, exact_log2(SafepointMechanism::poll_bit()));
b(slow_path, eq);
}
void MacroAssembler::get_polling_page(Register dest) {
- ldr(dest, Address(Rthread, Thread::polling_page_offset()));
+ ldr(dest, Address(Rthread, JavaThread::polling_page_offset()));
}
void MacroAssembler::read_polling_page(Register dest, relocInfo::relocType rtype) {
diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.hpp b/src/hotspot/cpu/arm/macroAssembler_arm.hpp
index a07ca65d99e..a27a54e1c71 100644
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp
@@ -878,11 +878,6 @@ class MacroAssembler: public Assembler {
void access_load_at(BasicType type, DecoratorSet decorators, Address src, Register dst, Register tmp1, Register tmp2, Register tmp3);
void access_store_at(BasicType type, DecoratorSet decorators, Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null);
- // Resolves obj for access. Result is placed in the same register.
- // All other registers are preserved.
- void resolve(DecoratorSet decorators, Register obj);
-
-
void ldr_global_ptr(Register reg, address address_of_global);
void ldr_global_s32(Register reg, address address_of_global);
void ldrb_global(Register reg, address address_of_global);
diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
index 0082b477598..027ffd29c2d 100644
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
@@ -1152,8 +1152,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Remember the handle for the unlocking code
__ mov(sync_handle, R1);
- __ resolve(IS_NOT_NULL, sync_obj);
-
if(UseBiasedLocking) {
__ biased_locking_enter(sync_obj, tmp, disp_hdr/*scratched*/, false, Rtemp, lock_done, slow_lock_biased);
}
@@ -1238,8 +1236,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
if (method->is_synchronized()) {
__ ldr(sync_obj, Address(sync_handle));
- __ resolve(IS_NOT_NULL, sync_obj);
-
if(UseBiasedLocking) {
__ biased_locking_exit(sync_obj, Rtemp, unlock_done);
// disp_hdr may not have been saved on entry with biased locking
diff --git a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp
index f38fe198c95..2891532d9bb 100644
--- a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp
+++ b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp
@@ -483,7 +483,6 @@ void TemplateInterpreterGenerator::lock_method() {
__ b(done, eq);
__ load_mirror(R0, Rmethod, Rtemp);
__ bind(done);
- __ resolve(IS_NOT_NULL, R0);
}
// add space for monitor & lock
diff --git a/src/hotspot/cpu/arm/templateTable_arm.cpp b/src/hotspot/cpu/arm/templateTable_arm.cpp
index a95842ebb29..30649a5e104 100644
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp
@@ -4249,8 +4249,6 @@ void TemplateTable::monitorenter() {
// check for NULL object
__ null_check(Robj, Rtemp);
- __ resolve(IS_NOT_NULL, Robj);
-
const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
Label allocate_monitor, allocated;
@@ -4362,8 +4360,6 @@ void TemplateTable::monitorexit() {
// check for NULL object
__ null_check(Robj, Rtemp);
- __ resolve(IS_NOT_NULL, Robj);
-
const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
Label found, throw_exception;
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
index ef1d6374151..7117a382a96 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
@@ -1355,7 +1355,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
const Register poll_addr = tmp->as_register();
- __ ld(poll_addr, in_bytes(Thread::polling_page_offset()), R16_thread);
+ __ ld(poll_addr, in_bytes(JavaThread::polling_page_offset()), R16_thread);
if (info != NULL) {
add_debug_info_for_branch(info);
}
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.hpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.hpp
index 6eef210dccc..861430b79eb 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -62,7 +62,6 @@
enum {
_static_call_stub_size = 4 * BytesPerInstWord + MacroAssembler::b64_patchable_size, // or smaller
_call_stub_size = _static_call_stub_size + MacroAssembler::trampoline_stub_size, // or smaller
- _call_aot_stub_size = 0,
_exception_handler_size = MacroAssembler::b64_patchable_size, // or smaller
_deopt_handler_size = MacroAssembler::bl64_patchable_size
};
@@ -70,11 +69,7 @@ enum {
// '_static_call_stub_size' is only used on ppc (see LIR_Assembler::emit_static_call_stub()
// in c1_LIRAssembler_ppc.cpp. The other, shared getters are defined in c1_LIRAssembler.hpp
static int static_call_stub_size() {
- if (UseAOT) {
- return _static_call_stub_size + _call_aot_stub_size;
- } else {
- return _static_call_stub_size;
- }
+ return _static_call_stub_size;
}
#endif // CPU_PPC_C1_LIRASSEMBLER_PPC_HPP
diff --git a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
index d3cf5ed4766..a112e472e4b 100644
--- a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
+++ b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -164,7 +164,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
- address stub = find_stub(/*is_aot*/ false);
+ address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
@@ -210,7 +210,7 @@ void CompiledDirectStaticCall::verify() {
_call->verify_alignment();
// Verify stub.
- address stub = find_stub(/*is_aot*/ false);
+ address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
index d3b16448d8b..5810e12139f 100644
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
@@ -225,7 +225,7 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register byt
address *sfpt_tbl = Interpreter::safept_table(state);
if (table != sfpt_tbl) {
Label dispatch;
- ld(R0, in_bytes(Thread::polling_word_offset()), R16_thread);
+ ld(R0, in_bytes(JavaThread::polling_word_offset()), R16_thread);
// Armed page has poll_bit set, if poll bit is cleared just continue.
andi_(R0, R0, SafepointMechanism::poll_bit());
beq(CCR0, dispatch);
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index 7c1232be6a8..918ad1d7c89 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -3058,7 +3058,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
}
void MacroAssembler::safepoint_poll(Label& slow_path, Register temp, bool at_return, bool in_nmethod) {
- ld(temp, in_bytes(Thread::polling_word_offset()), R16_thread);
+ ld(temp, in_bytes(JavaThread::polling_word_offset()), R16_thread);
if (at_return) {
if (in_nmethod) {
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index 70a805d3484..2210e05a410 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -2142,7 +2142,7 @@ void TemplateTable::_return(TosState state) {
if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
Label no_safepoint;
- __ ld(R11_scratch1, in_bytes(Thread::polling_word_offset()), R16_thread);
+ __ ld(R11_scratch1, in_bytes(JavaThread::polling_word_offset()), R16_thread);
__ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit());
__ beq(CCR0, no_safepoint);
__ push(state);
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
index e428c6135b3..c5fbd5d3c9c 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
@@ -1209,7 +1209,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
(result->is_single_fpu() && result->as_float_reg() == Z_F0) ||
(result->is_double_fpu() && result->as_double_reg() == Z_F0), "convention");
- __ z_lg(Z_R1_scratch, Address(Z_thread, Thread::polling_page_offset()));
+ __ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::polling_page_offset()));
// Pop the frame before the safepoint code.
__ pop_frame_restore_retPC(initial_frame_size_in_bytes());
@@ -1228,7 +1228,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
const Register poll_addr = tmp->as_register_lo();
- __ z_lg(poll_addr, Address(Z_thread, Thread::polling_page_offset()));
+ __ z_lg(poll_addr, Address(Z_thread, JavaThread::polling_page_offset()));
guarantee(info != NULL, "Shouldn't be NULL");
add_debug_info_for_branch(info);
int offset = __ offset();
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp
index d7bf1de6bd3..7ca94d2d97a 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -47,7 +47,6 @@
enum {
_call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledStaticCall::emit_to_interp_stub.
- _call_aot_stub_size = 0,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
_deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)
};
diff --git a/src/hotspot/cpu/s390/compiledIC_s390.cpp b/src/hotspot/cpu/s390/compiledIC_s390.cpp
index c9ea4500d78..6660a34aeff 100644
--- a/src/hotspot/cpu/s390/compiledIC_s390.cpp
+++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -92,7 +92,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
- address stub = find_stub(/*is_aot*/ false);
+ address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
@@ -137,7 +137,7 @@ void CompiledDirectStaticCall::verify() {
_call->verify_alignment();
// Verify stub.
- address stub = find_stub(/*is_aot*/ false);
+ address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp
index dcdfe491339..98f16b0f004 100644
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -121,7 +121,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state, address* table, bo
address *sfpt_tbl = Interpreter::safept_table(state);
if (table != sfpt_tbl) {
Label dispatch;
- const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_word_offset()) + 7 /* Big Endian */);
+ const Address poll_byte_addr(Z_thread, in_bytes(JavaThread::polling_word_offset()) + 7 /* Big Endian */);
// Armed page has poll_bit set, if poll bit is cleared just continue.
z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
z_braz(dispatch);
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index 7554a3f00e8..c098cf09604 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2652,7 +2652,7 @@ uint MacroAssembler::get_poll_register(address instr_loc) {
}
void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
- const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_word_offset()) + 7 /* Big Endian */);
+ const Address poll_byte_addr(Z_thread, in_bytes(JavaThread::polling_word_offset()) + 7 /* Big Endian */);
// Armed page has poll_bit set.
z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
z_brnaz(slow_path);
diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad
index 9490685ebf6..a5deeda463d 100644
--- a/src/hotspot/cpu/s390/s390.ad
+++ b/src/hotspot/cpu/s390/s390.ad
@@ -957,7 +957,7 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// Touch the polling page.
if (need_polling) {
- __ z_lg(Z_R1_scratch, Address(Z_thread, Thread::polling_page_offset()));
+ __ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::polling_page_offset()));
// We need to mark the code position where the load from the safepoint
// polling page was emitted as relocInfo::poll_return_type here.
__ relocate(relocInfo::poll_return_type);
diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp
index 8698e53ef40..e28481ddd86 100644
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp
@@ -2319,7 +2319,7 @@ void TemplateTable::_return(TosState state) {
if (_desc->bytecode() != Bytecodes::_return_register_finalizer) {
Label no_safepoint;
- const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_word_offset()) + 7 /* Big Endian */);
+ const Address poll_byte_addr(Z_thread, in_bytes(JavaThread::polling_word_offset()) + 7 /* Big Endian */);
__ z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
__ z_braz(no_safepoint);
__ push(state);
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
index 14091f5982e..25cb354f0a2 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
@@ -580,12 +580,12 @@ int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
int offset = __ offset();
#ifdef _LP64
const Register poll_addr = rscratch1;
- __ movptr(poll_addr, Address(r15_thread, Thread::polling_page_offset()));
+ __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
#else
assert(tmp->is_cpu_register(), "needed");
const Register poll_addr = tmp->as_register();
__ get_thread(poll_addr);
- __ movptr(poll_addr, Address(poll_addr, in_bytes(Thread::polling_page_offset())));
+ __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
#endif
add_debug_info_for_branch(info);
__ relocate(relocInfo::poll_type);
@@ -3090,23 +3090,13 @@ void LIR_Assembler::emit_static_call_stub() {
// make sure that the displacement word of the call ends up word aligned
__ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
- __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */));
+ __ relocate(static_stub_Relocation::spec(call_pc));
__ mov_metadata(rbx, (Metadata*)NULL);
// must be set to -1 at code generation time
assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
__ jump(RuntimeAddress(__ pc()));
- if (UseAOT) {
- // Trampoline to aot code
- __ relocate(static_stub_Relocation::spec(call_pc, true /* is_aot */));
-#ifdef _LP64
- __ mov64(rax, CONST64(0)); // address is zapped till fixup time.
-#else
- __ movl(rax, 0xdeadffff); // address is zapped till fixup time.
-#endif
- __ jmp(rax);
- }
assert(__ offset() - start <= call_stub_size(), "stub too big");
__ end_a_stub();
}
@@ -3301,9 +3291,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Register tmp = op->tmp()->as_register();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
- __ resolve(ACCESS_READ, src);
- __ resolve(ACCESS_WRITE, dst);
-
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
@@ -3755,7 +3742,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
scratch = op->scratch_opr()->as_register();
}
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
- __ resolve(ACCESS_READ | ACCESS_WRITE, obj);
// add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
if (op->info() != NULL) {
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp
index acacf520b3a..71723a30ac3 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,6 @@
enum {
_call_stub_size = NOT_LP64(15) LP64_ONLY(28),
- _call_aot_stub_size = NOT_LP64(7) LP64_ONLY(12),
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = NOT_LP64(10) LP64_ONLY(17)
};
diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
index ba8902df635..a17e3896ebd 100644
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
@@ -1086,10 +1086,6 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
}
#endif
- if (is_updateBytes) {
- base_op = access_resolve(IS_NOT_NULL | ACCESS_READ, base_op);
- }
-
LIR_Address* a = new LIR_Address(base_op,
index,
offset,
@@ -1147,7 +1143,7 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
constant_aOffset = result_aOffset->as_jlong();
result_aOffset = LIR_OprFact::illegalOpr;
}
- LIR_Opr result_a = access_resolve(ACCESS_READ, a.result());
+ LIR_Opr result_a = a.result();
long constant_bOffset = 0;
LIR_Opr result_bOffset = bOffset.result();
@@ -1155,7 +1151,7 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
constant_bOffset = result_bOffset->as_jlong();
result_bOffset = LIR_OprFact::illegalOpr;
}
- LIR_Opr result_b = access_resolve(ACCESS_READ, b.result());
+ LIR_Opr result_b = b.result();
#ifndef _LP64
result_a = new_register(T_INT);
diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
index 1f0c7f72868..e37b8bd1b41 100644
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
@@ -334,7 +334,7 @@ void C1_MacroAssembler::build_frame_helper(int frame_size_in_bytes, int sp_inc,
mov(rbp, rsp);
}
#if !defined(_LP64) && defined(COMPILER2)
- if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
+ if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
// c2 leaves fpu stack dirty. Clean it on entry
empty_FPU_stack();
}
diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
index a517ce15bc0..9fb3f2f6518 100644
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
@@ -723,7 +723,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
}
#if !defined(_LP64) && defined(COMPILER2)
- if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
+ if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
// C2 can leave the fpu stack dirty
__ empty_FPU_stack();
}
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
index 5dbc4e91d49..4be0fd64fc2 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
@@ -1050,6 +1050,35 @@ void C2_MacroAssembler::evminmax_fp(int opcode, BasicType elem_bt,
}
}
+// Float/Double signum
+void C2_MacroAssembler::signum_fp(int opcode, XMMRegister dst,
+ XMMRegister zero, XMMRegister one,
+ Register scratch) {
+ assert(opcode == Op_SignumF || opcode == Op_SignumD, "sanity");
+
+ Label DONE_LABEL;
+
+ if (opcode == Op_SignumF) {
+ assert(UseSSE > 0, "required");
+ ucomiss(dst, zero);
+ jcc(Assembler::equal, DONE_LABEL); // handle special case +0.0/-0.0, if argument is +0.0/-0.0, return argument
+ jcc(Assembler::parity, DONE_LABEL); // handle special case NaN, if argument NaN, return NaN
+ movflt(dst, one);
+ jcc(Assembler::above, DONE_LABEL);
+ xorps(dst, ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), scratch);
+ } else if (opcode == Op_SignumD) {
+ assert(UseSSE > 1, "required");
+ ucomisd(dst, zero);
+ jcc(Assembler::equal, DONE_LABEL); // handle special case +0.0/-0.0, if argument is +0.0/-0.0, return argument
+ jcc(Assembler::parity, DONE_LABEL); // handle special case NaN, if argument NaN, return NaN
+ movdbl(dst, one);
+ jcc(Assembler::above, DONE_LABEL);
+ xorpd(dst, ExternalAddress(StubRoutines::x86::vector_double_sign_flip()), scratch);
+ }
+
+ bind(DONE_LABEL);
+}
+
void C2_MacroAssembler::vextendbw(bool sign, XMMRegister dst, XMMRegister src) {
if (sign) {
pmovsxbw(dst, src);
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
index d9f409e3942..d1892c54c90 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
@@ -89,6 +89,10 @@
KRegister ktmp, XMMRegister atmp, XMMRegister btmp,
int vlen_enc);
+ void signum_fp(int opcode, XMMRegister dst,
+ XMMRegister zero, XMMRegister one,
+ Register scratch);
+
void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
void vextendbw(bool sign, XMMRegister dst, XMMRegister src);
void vextendbd(bool sign, XMMRegister dst, XMMRegister src, int vector_len);
diff --git a/src/hotspot/cpu/x86/compiledIC_aot_x86_64.cpp b/src/hotspot/cpu/x86/compiledIC_aot_x86_64.cpp
deleted file mode 100644
index bf98dd3dea0..00000000000
--- a/src/hotspot/cpu/x86/compiledIC_aot_x86_64.cpp
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-
-#include "aot/compiledIC_aot.hpp"
-#include "code/codeCache.hpp"
-#include "memory/resourceArea.hpp"
-
-void CompiledDirectStaticCall::set_to_far(const methodHandle& callee, address entry) {
- address stub = find_stub(true /* is_far */);
- guarantee(stub != NULL, "stub not found");
-
- if (TraceICs) {
- ResourceMark rm;
- tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_far %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
- // Creation also verifies the object.
- // mov rax,imm_aot_addr
- // jmp rax
- NativeMovConstReg* destination_holder = nativeMovConstReg_at(stub);
-
-#ifdef ASSERT
- // read the value once
- intptr_t data = destination_holder->data();
- assert(data == 0 || data == (intptr_t)entry,
- "MT-unsafe modification of inline cache");
-#endif
-
- // Update stub.
- destination_holder->set_data((intptr_t)entry);
-
- // Update jump to call.
- set_destination_mt_safe(stub);
-}
-
-void CompiledPltStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
- address stub = find_stub();
- guarantee(stub != NULL, "stub not found");
- if (TraceICs) {
- ResourceMark rm;
- tty->print_cr("CompiledPltStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
- // Creation also verifies the object.
- NativeLoadGot* method_loader = nativeLoadGot_at(stub);
- NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
-
- intptr_t data = method_loader->data();
- address destination = jump->destination();
- assert(data == 0 || data == (intptr_t)callee(),
- "a) MT-unsafe modification of inline cache");
- assert(destination == (address)-1 || destination == entry,
- "b) MT-unsafe modification of inline cache");
-
- // Update stub.
- method_loader->set_data((intptr_t)callee());
- jump->set_jump_destination(entry);
-
- // Update jump to call.
- set_destination_mt_safe(stub);
-}
-
-#ifdef NEVER_CALLED
-void CompiledPltStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
- // Reset stub.
- address stub = static_stub->addr();
- assert(stub != NULL, "stub not found");
- assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
- // Creation also verifies the object.
- NativeLoadGot* method_loader = nativeLoadGot_at(stub);
- NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
- method_loader->set_data(0);
- jump->set_jump_destination((address)-1);
-}
-#endif
-
-#ifndef PRODUCT
-void CompiledPltStaticCall::verify() {
- // Verify call.
- _call->verify();
-
-#ifdef ASSERT
- CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call);
- assert(cb && cb->is_aot(), "CompiledPltStaticCall can only be used on AOTCompiledMethod");
-#endif
-
- // Verify stub.
- address stub = find_stub();
- assert(stub != NULL, "no stub found for static call");
- // Creation also verifies the object.
- NativeLoadGot* method_loader = nativeLoadGot_at(stub);
- NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
- // Verify state.
- assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
-}
-#endif // !PRODUCT
diff --git a/src/hotspot/cpu/x86/compiledIC_x86.cpp b/src/hotspot/cpu/x86/compiledIC_x86.cpp
index a6b48da8921..e898e523c0f 100644
--- a/src/hotspot/cpu/x86/compiledIC_x86.cpp
+++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
return NULL; // CodeBuffer::expand failed.
}
// Static stub relocation stores the instruction address of the call.
- __ relocate(static_stub_Relocation::spec(mark, false), Assembler::imm_operand);
+ __ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand);
// Static stub relocation also tags the Method* in the code-stream.
__ mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time.
// This is recognized as unresolved by relocs/nativeinst/ic code.
@@ -83,68 +83,8 @@ int CompiledStaticCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
-#if INCLUDE_AOT
-#define __ _masm.
-void CompiledStaticCall::emit_to_aot_stub(CodeBuffer &cbuf, address mark) {
- if (!UseAOT) {
- return;
- }
- // Stub is fixed up when the corresponding call is converted from
- // calling compiled code to calling aot code.
- // movq rax, imm64_aot_code_address
- // jmp rax
-
- if (mark == NULL) {
- mark = cbuf.insts_mark(); // Get mark within main instrs section.
- }
-
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a stub.
- MacroAssembler _masm(&cbuf);
-
- address base =
- __ start_a_stub(to_aot_stub_size());
- guarantee(base != NULL, "out of space");
-
- // Static stub relocation stores the instruction address of the call.
- __ relocate(static_stub_Relocation::spec(mark, true /* is_aot */), Assembler::imm_operand);
- // Load destination AOT code address.
-#ifdef _LP64
- __ mov64(rax, CONST64(0)); // address is zapped till fixup time.
-#else
- __ movl(rax, 0); // address is zapped till fixup time.
-#endif
- // This is recognized as unresolved by relocs/nativeinst/ic code.
- __ jmp(rax);
-
- assert(__ pc() - base <= to_aot_stub_size(), "wrong stub size");
-
- // Update current stubs pointer and restore insts_end.
- __ end_a_stub();
-}
-#undef __
-
-int CompiledStaticCall::to_aot_stub_size() {
- if (UseAOT) {
- return NOT_LP64(7) // movl; jmp
- LP64_ONLY(12); // movq (1+1+8); jmp (2)
- } else {
- return 0;
- }
-}
-
-// Relocation entries for call stub, compiled java to aot.
-int CompiledStaticCall::reloc_to_aot_stub() {
- if (UseAOT) {
- return 2; // 1 in emit_to_aot_stub + 1 in emit_call
- } else {
- return 0;
- }
-}
-#endif // INCLUDE_AOT
-
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
- address stub = find_stub(false /* is_aot */);
+ address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
@@ -175,10 +115,8 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
method_holder->set_data(0);
- if (!static_stub->is_aot()) {
- NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
- jump->set_jump_destination((address)-1);
- }
+ NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
+ jump->set_jump_destination((address)-1);
}
@@ -193,11 +131,11 @@ void CompiledDirectStaticCall::verify() {
#ifdef ASSERT
CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call);
- assert(cb && !cb->is_aot(), "CompiledDirectStaticCall cannot be used on AOTCompiledMethod");
+ assert(cb != NULL, "sanity");
#endif
// Verify stub.
- address stub = find_stub(false /* is_aot */);
+ address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
diff --git a/src/hotspot/cpu/x86/frame_x86.cpp b/src/hotspot/cpu/x86/frame_x86.cpp
index 75d4693178e..91d6e894193 100644
--- a/src/hotspot/cpu/x86/frame_x86.cpp
+++ b/src/hotspot/cpu/x86/frame_x86.cpp
@@ -35,7 +35,6 @@
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/monitorChunk.hpp"
-#include "runtime/os.inline.hpp"
#include "runtime/signature.hpp"
#include "runtime/stackWatermarkSet.hpp"
#include "runtime/stubCodeGenerator.hpp"
diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
index c46f6742ef2..ccf128666ac 100644
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
@@ -212,28 +212,6 @@ void BarrierSetAssembler::value_copy(MacroAssembler* masm, DecoratorSet decorato
}
}
-
-#ifndef _LP64
-void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
- Address obj1, jobject obj2) {
- __ cmpoop_raw(obj1, obj2);
-}
-
-void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
- Register obj1, jobject obj2) {
- __ cmpoop_raw(obj1, obj2);
-}
-#endif
-void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
- Register obj1, Address obj2) {
- __ cmpptr(obj1, obj2);
-}
-
-void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
- Register obj1, Register obj2) {
- __ cmpptr(obj1, obj2);
-}
-
void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath) {
__ clear_jweak_tag(obj);
diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp
index aa0cd991440..f2c1bf00ced 100644
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp
@@ -52,22 +52,6 @@ class BarrierSetAssembler: public CHeapObj {
virtual void value_copy(MacroAssembler* masm, DecoratorSet decorators,
Register src, Register dst, Register value_klass);
-#ifndef _LP64
- virtual void obj_equals(MacroAssembler* masm,
- Address obj1, jobject obj2);
- virtual void obj_equals(MacroAssembler* masm,
- Register obj1, jobject obj2);
-#endif
-
- virtual void obj_equals(MacroAssembler* masm,
- Register obj1, Register obj2);
- virtual void obj_equals(MacroAssembler* masm,
- Register obj1, Address obj2);
-
- virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
- // Default implementation does not need to do anything.
- }
-
// Support for jniFastGetField to try resolving a jobject/jweak in native
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
diff --git a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp
index a2ab61c675a..60e8b0d2232 100644
--- a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp
+++ b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp
@@ -70,7 +70,7 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#endif
#if INCLUDE_JVMCI
-#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS (EnableJVMCI || UseAOT)
+#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS EnableJVMCI
#else
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false
#endif
diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp
index eb81c8edfb3..9a162d20086 100644
--- a/src/hotspot/cpu/x86/interp_masm_x86.cpp
+++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -864,7 +864,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
Label no_safepoint, dispatch;
if (table != safepoint_table && generate_poll) {
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
- testb(Address(r15_thread, Thread::polling_word_offset()), SafepointMechanism::poll_bit());
+ testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
jccb(Assembler::zero, no_safepoint);
lea(rscratch1, ExternalAddress((address)safepoint_table));
@@ -883,7 +883,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
Label no_safepoint;
const Register thread = rcx;
get_thread(thread);
- testb(Address(thread, Thread::polling_word_offset()), SafepointMechanism::poll_bit());
+ testb(Address(thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
jccb(Assembler::zero, no_safepoint);
ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index 755ee5e4396..316ead39e1d 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -127,22 +127,12 @@ void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
}
-void MacroAssembler::cmpoop_raw(Address src1, jobject obj) {
- cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::cmpoop_raw(Register src1, jobject obj) {
- cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
void MacroAssembler::cmpoop(Address src1, jobject obj) {
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->obj_equals(this, src1, obj);
+ cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
}
void MacroAssembler::cmpoop(Register src1, jobject obj) {
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->obj_equals(this, src1, obj);
+ cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
}
void MacroAssembler::extend_sign(Register hi, Register lo) {
@@ -1804,20 +1794,17 @@ void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
}
void MacroAssembler::cmpoop(Register src1, Register src2) {
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->obj_equals(this, src1, src2);
+ cmpptr(src1, src2);
}
void MacroAssembler::cmpoop(Register src1, Address src2) {
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->obj_equals(this, src1, src2);
+ cmpptr(src1, src2);
}
#ifdef _LP64
void MacroAssembler::cmpoop(Register src1, jobject src2) {
movoop(rscratch1, src2);
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->obj_equals(this, src1, rscratch1);
+ cmpptr(src1, rscratch1);
}
#endif
@@ -2958,11 +2945,11 @@ void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool
if (at_return) {
// Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
// we may safely use rsp instead to perform the stack watermark check.
- cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, Thread::polling_word_offset()));
+ cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset()));
jcc(Assembler::above, slow_path);
return;
}
- testb(Address(thread_reg, Thread::polling_word_offset()), SafepointMechanism::poll_bit());
+ testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
}
@@ -4946,15 +4933,6 @@ void MacroAssembler::data_for_value_array_index(Register array, Register array_k
lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_INLINE_TYPE)));
}
-void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
- // Use stronger ACCESS_WRITE|ACCESS_READ by default.
- if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
- decorators |= ACCESS_READ | ACCESS_WRITE;
- }
- BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- return bs->resolve(this, decorators, obj);
-}
-
void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
Register thread_tmp, DecoratorSet decorators) {
access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
index e935236054c..5f1c8ba4987 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
@@ -372,10 +372,6 @@ class MacroAssembler: public Assembler {
Register index, Register data);
- // Resolves obj access. Result is placed in the same register.
- // All other registers are preserved.
- void resolve(DecoratorSet decorators, Register obj);
-
void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
Register thread_tmp = noreg, DecoratorSet decorators = 0);
void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
@@ -771,13 +767,11 @@ class MacroAssembler: public Assembler {
void cmpklass(Address dst, Metadata* obj);
void cmpklass(Register dst, Metadata* obj);
void cmpoop(Address dst, jobject obj);
- void cmpoop_raw(Address dst, jobject obj);
#endif // _LP64
void cmpoop(Register src1, Register src2);
void cmpoop(Register src1, Address src2);
void cmpoop(Register dst, jobject obj);
- void cmpoop_raw(Register dst, jobject obj);
// NOTE src2 must be the lval. This is NOT an mem-mem compare
void cmpptr(Address src1, AddressLiteral src2);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86_exp.cpp b/src/hotspot/cpu/x86/macroAssembler_x86_exp.cpp
index 8527c288567..313c5b514b5 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86_exp.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86_exp.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2016, Intel Corporation.
+* Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
* Intel Math Library (LIBM) Source Code
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -197,7 +198,6 @@ void MacroAssembler::fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xm
Label L_2TAG_PACKET_12_0_2, B1_3, B1_5, start;
assert_different_registers(tmp, eax, ecx, edx);
- jmp(start);
address cv = (address)_cv;
address Shifter = (address)_shifter;
address mmask = (address)_mmask;
@@ -486,7 +486,6 @@ void MacroAssembler::fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xm
Label L_2TAG_PACKET_12_0_2, start;
assert_different_registers(tmp, eax, ecx, edx);
- jmp(start);
address static_const_table = (address)_static_const_table;
bind(start);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86_log.cpp b/src/hotspot/cpu/x86/macroAssembler_x86_log.cpp
index b1edeaf9d6f..9c28d9e510d 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86_log.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86_log.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2016, Intel Corporation.
+* Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
* Intel Math Library (LIBM) Source Code
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -189,7 +190,6 @@ void MacroAssembler::fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xm
Label B1_3, B1_5, start;
assert_different_registers(tmp1, tmp2, eax, ecx, edx);
- jmp(start);
address L_tbl = (address)_L_tbl;
address log2 = (address)_log2;
address coeff = (address)_coeff;
@@ -485,7 +485,6 @@ void MacroAssembler::fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xm
Label L_2TAG_PACKET_10_0_2, start;
assert_different_registers(tmp, eax, ecx, edx);
- jmp(start);
address static_const_table = (address)_static_const_table_log;
bind(start);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86_pow.cpp b/src/hotspot/cpu/x86/macroAssembler_x86_pow.cpp
index af8c85b08be..d1c405ba3c8 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86_pow.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86_pow.cpp
@@ -803,7 +803,6 @@ void MacroAssembler::fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xm
Label L_POW;
assert_different_registers(tmp1, tmp2, eax, ecx, edx);
- jmp(start);
address HIGHSIGMASK = (address)_HIGHSIGMASK;
address LOG2_E = (address)_LOG2_E;
address coeff = (address)_coeff_pow;
@@ -2515,6 +2514,10 @@ ATTRIBUTE_ALIGNED(16) juint _static_const_table_pow[] =
};
+ATTRIBUTE_ALIGNED(8) double _DOUBLE2 = 2.0;
+ATTRIBUTE_ALIGNED(8) double _DOUBLE0 = 0.0;
+ATTRIBUTE_ALIGNED(8) double _DOUBLE0DOT5 = 0.5;
+
//registers,
// input: xmm0, xmm1
// scratch: xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
@@ -2538,10 +2541,14 @@ void MacroAssembler::fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xm
Label L_2TAG_PACKET_48_0_2, L_2TAG_PACKET_49_0_2, L_2TAG_PACKET_50_0_2, L_2TAG_PACKET_51_0_2;
Label L_2TAG_PACKET_52_0_2, L_2TAG_PACKET_53_0_2, L_2TAG_PACKET_54_0_2, L_2TAG_PACKET_55_0_2;
Label L_2TAG_PACKET_56_0_2, L_2TAG_PACKET_57_0_2, L_2TAG_PACKET_58_0_2, start;
+ Label L_NOT_DOUBLE2, L_NOT_DOUBLE0DOT5;
assert_different_registers(tmp, eax, ecx, edx);
address static_const_table_pow = (address)_static_const_table_pow;
+ address DOUBLE2 = (address) &_DOUBLE2;
+ address DOUBLE0 = (address) &_DOUBLE0;
+ address DOUBLE0DOT5 = (address) &_DOUBLE0DOT5;
bind(start);
subl(rsp, 120);
@@ -2549,6 +2556,28 @@ void MacroAssembler::fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xm
lea(tmp, ExternalAddress(static_const_table_pow));
movsd(xmm0, Address(rsp, 128));
movsd(xmm1, Address(rsp, 136));
+
+ // Special case: pow(x, 2.0) => x * x
+ ucomisd(xmm1, ExternalAddress(DOUBLE2));
+ jccb(Assembler::notEqual, L_NOT_DOUBLE2);
+ jccb(Assembler::parity, L_NOT_DOUBLE2);
+ mulsd(xmm0, xmm0);
+ jmp(L_2TAG_PACKET_21_0_2);
+
+ bind(L_NOT_DOUBLE2);
+ // Special case: pow(x, 0.5) => sqrt(x)
+ ucomisd(xmm1, ExternalAddress(DOUBLE0DOT5)); // For pow(x, y), check whether y == 0.5
+ jccb(Assembler::notEqual, L_NOT_DOUBLE0DOT5);
+ jccb(Assembler::parity, L_NOT_DOUBLE0DOT5);
+ ucomisd(xmm0, ExternalAddress(DOUBLE0));
+ // According to the API specs, pow(-0.0, 0.5) = 0.0 and sqrt(-0.0) = -0.0.
+ // So pow(-0.0, 0.5) shouldn't be replaced with sqrt(-0.0).
+ // -0.0/+0.0 are both excluded since floating-point comparison doesn't distinguish -0.0 from +0.0.
+ jccb(Assembler::belowEqual, L_NOT_DOUBLE0DOT5); // pow(x, 0.5) => sqrt(x) only for x > 0.0
+ sqrtsd(xmm0, xmm0);
+ jmp(L_2TAG_PACKET_21_0_2);
+
+ bind(L_NOT_DOUBLE0DOT5);
xorpd(xmm2, xmm2);
movl(eax, 16368);
pinsrw(xmm2, eax, 3);
diff --git a/src/hotspot/cpu/x86/methodHandles_x86.cpp b/src/hotspot/cpu/x86/methodHandles_x86.cpp
index 337c218ed0c..1c73032acba 100644
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp
+++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp
@@ -559,18 +559,25 @@ void trace_method_handle_stub(const char* adaptername,
PreserveExceptionMark pem(Thread::current());
FrameValues values;
- // Current C frame
frame cur_frame = os::current_frame();
if (cur_frame.fp() != 0) { // not walkable
// Robust search of trace_calling_frame (independent of inlining).
// Assumes saved_regs comes from a pusha in the trace_calling_frame.
+ //
+ // We have to start the search from cur_frame, because trace_calling_frame may be it.
+ // It is guaranteed that trace_calling_frame is different from the top frame.
+ // But os::current_frame() does NOT return the top frame: it returns the next frame under it (caller's frame).
+ // (Due to inlining and tail call optimizations, caller's frame doesn't necessarily correspond to the immediate
+ // caller in the source code.)
assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
- frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
+ frame trace_calling_frame = cur_frame;
while (trace_calling_frame.fp() < saved_regs) {
+ assert(trace_calling_frame.cb() == NULL, "not a C frame");
trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
}
+ assert(trace_calling_frame.sp() < saved_regs, "wrong frame");
// safely create a frame and call frame::describe
intptr_t *dump_sp = trace_calling_frame.sender_sp();
diff --git a/src/hotspot/cpu/x86/nativeInst_x86.hpp b/src/hotspot/cpu/x86/nativeInst_x86.hpp
index 56296f86a56..94f8b5e637c 100644
--- a/src/hotspot/cpu/x86/nativeInst_x86.hpp
+++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
#include "asm/assembler.hpp"
#include "runtime/icache.hpp"
-#include "runtime/os.hpp"
#include "runtime/safepointMechanism.hpp"
// We have interfaces for the following instructions:
@@ -199,13 +198,6 @@ class NativeCall: public NativeInstruction {
nativeCall_at(instr)->destination() == target;
}
-#if INCLUDE_AOT
- static bool is_far_call(address instr, address target) {
- intptr_t disp = target - (instr + sizeof(int32_t));
- return !Assembler::is_simm32(disp);
- }
-#endif
-
// MT-safe patching of a call instruction.
static void insert(address code_pos, address entry);
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index 5cc7cc091ed..09de53e585d 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -1071,7 +1071,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
__ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_inline_offset())));
#if INCLUDE_JVMCI
- if (EnableJVMCI || UseAOT) {
+ if (EnableJVMCI) {
// check if this call should be routed towards a specific entry point
__ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
Label no_alternative_target;
@@ -2517,7 +2517,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Load the oop from the handle
__ movptr(obj_reg, Address(oop_handle_reg, 0));
- __ resolve(IS_NOT_NULL, obj_reg);
if (UseBiasedLocking) {
__ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
}
@@ -2677,7 +2676,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Get locked oop from the handle we passed to jni
__ movptr(obj_reg, Address(oop_handle_reg, 0));
- __ resolve(IS_NOT_NULL, obj_reg);
Label done;
@@ -2911,7 +2909,7 @@ void SharedRuntime::generate_deopt_blob() {
pad += 1024;
}
#if INCLUDE_JVMCI
- if (EnableJVMCI || UseAOT) {
+ if (EnableJVMCI) {
pad += 512; // Increase the buffer size when compiling for JVMCI
}
#endif
@@ -2985,7 +2983,7 @@ void SharedRuntime::generate_deopt_blob() {
int implicit_exception_uncommon_trap_offset = 0;
int uncommon_trap_offset = 0;
- if (EnableJVMCI || UseAOT) {
+ if (EnableJVMCI) {
implicit_exception_uncommon_trap_offset = __ pc() - start;
__ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
@@ -3100,7 +3098,7 @@ void SharedRuntime::generate_deopt_blob() {
__ reset_last_Java_frame(false);
#if INCLUDE_JVMCI
- if (EnableJVMCI || UseAOT) {
+ if (EnableJVMCI) {
__ bind(after_fetch_unroll_info_call);
}
#endif
@@ -3263,7 +3261,7 @@ void SharedRuntime::generate_deopt_blob() {
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
#if INCLUDE_JVMCI
- if (EnableJVMCI || UseAOT) {
+ if (EnableJVMCI) {
_deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
_deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
}
diff --git a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp
index 4266ecd2674..31ca53a80d1 100644
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp
+++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp
@@ -266,7 +266,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
#if INCLUDE_JVMCI
// Check if we need to take lock at entry of synchronized method. This can
// only occur on method entry so emit it only for vtos with step 0.
- if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) {
+ if (EnableJVMCI && state == vtos && step == 0) {
Label L;
__ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
__ jcc(Assembler::zero, L);
@@ -597,7 +597,6 @@ void TemplateInterpreterGenerator::lock_method() {
#endif // ASSERT
__ bind(done);
- __ resolve(IS_NOT_NULL, rax);
}
// add space for monitor & lock
diff --git a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_64.cpp b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_64.cpp
index 12a177a1434..75dc0535d2a 100644
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_64.cpp
+++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_64.cpp
@@ -257,7 +257,6 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
__ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
} else {
__ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
- __ resolve(IS_NOT_NULL | ACCESS_READ, buf);
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
__ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
__ addq(buf, off); // + offset
@@ -313,7 +312,6 @@ address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(Abstract
// "When calculating operand stack length, values of type long and double have length two."
} else {
__ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array
- __ resolve(IS_NOT_NULL | ACCESS_READ, buf);
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
__ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
__ addq(buf, off); // + offset
diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp
index 6dce6fc24c7..ed83bed2531 100644
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp
@@ -2727,11 +2727,11 @@ void TemplateTable::_return(TosState state) {
Label no_safepoint;
NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
#ifdef _LP64
- __ testb(Address(r15_thread, Thread::polling_word_offset()), SafepointMechanism::poll_bit());
+ __ testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
#else
const Register thread = rdi;
__ get_thread(thread);
- __ testb(Address(thread, Thread::polling_word_offset()), SafepointMechanism::poll_bit());
+ __ testb(Address(thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
#endif
__ jcc(Assembler::zero, no_safepoint);
__ push(state);
@@ -4611,8 +4611,6 @@ void TemplateTable::monitorenter() {
// check for NULL object
__ null_check(rax);
- __ resolve(IS_NOT_NULL, rax);
-
Label is_inline_type;
__ movptr(rbx, Address(rax, oopDesc::mark_offset_in_bytes()));
__ test_markword_is_inline_type(rbx, is_inline_type);
@@ -4719,8 +4717,6 @@ void TemplateTable::monitorexit() {
// check for NULL object
__ null_check(rax);
- __ resolve(IS_NOT_NULL, rax);
-
const int is_inline_type_mask = markWord::inline_type_pattern;
Label has_identity;
__ movptr(rbx, Address(rax, oopDesc::mark_offset_in_bytes()));
diff --git a/src/hotspot/cpu/x86/vmStructs_x86.hpp b/src/hotspot/cpu/x86/vmStructs_x86.hpp
index 8b84e4ee649..4569bd9a216 100644
--- a/src/hotspot/cpu/x86/vmStructs_x86.hpp
+++ b/src/hotspot/cpu/x86/vmStructs_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,52 +39,9 @@
declare_constant(frame::interpreter_frame_sender_sp_offset) \
declare_constant(frame::interpreter_frame_last_sp_offset)
-#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
- declare_constant(VM_Version::CPU_CX8) \
- declare_constant(VM_Version::CPU_CMOV) \
- declare_constant(VM_Version::CPU_FXSR) \
- declare_constant(VM_Version::CPU_HT) \
- declare_constant(VM_Version::CPU_MMX) \
- declare_constant(VM_Version::CPU_3DNOW_PREFETCH) \
- declare_constant(VM_Version::CPU_SSE) \
- declare_constant(VM_Version::CPU_SSE2) \
- declare_constant(VM_Version::CPU_SSE3) \
- declare_constant(VM_Version::CPU_SSSE3) \
- declare_constant(VM_Version::CPU_SSE4A) \
- declare_constant(VM_Version::CPU_SSE4_1) \
- declare_constant(VM_Version::CPU_SSE4_2) \
- declare_constant(VM_Version::CPU_POPCNT) \
- declare_constant(VM_Version::CPU_LZCNT) \
- declare_constant(VM_Version::CPU_TSC) \
- declare_constant(VM_Version::CPU_TSCINV) \
- declare_constant(VM_Version::CPU_AVX) \
- declare_constant(VM_Version::CPU_AVX2) \
- declare_constant(VM_Version::CPU_AES) \
- declare_constant(VM_Version::CPU_ERMS) \
- declare_constant(VM_Version::CPU_CLMUL) \
- declare_constant(VM_Version::CPU_BMI1) \
- declare_constant(VM_Version::CPU_BMI2) \
- declare_constant(VM_Version::CPU_RTM) \
- declare_constant(VM_Version::CPU_ADX) \
- declare_constant(VM_Version::CPU_AVX512F) \
- declare_constant(VM_Version::CPU_AVX512DQ) \
- declare_constant(VM_Version::CPU_AVX512PF) \
- declare_constant(VM_Version::CPU_AVX512ER) \
- declare_constant(VM_Version::CPU_AVX512CD) \
- declare_constant(VM_Version::CPU_AVX512BW) \
- declare_constant(VM_Version::CPU_AVX512VL) \
- declare_constant(VM_Version::CPU_SHA) \
- declare_constant(VM_Version::CPU_FMA) \
- declare_constant(VM_Version::CPU_VZEROUPPER) \
- declare_constant(VM_Version::CPU_AVX512_VPOPCNTDQ) \
- declare_constant(VM_Version::CPU_AVX512_VPCLMULQDQ) \
- declare_constant(VM_Version::CPU_AVX512_VAES) \
- declare_constant(VM_Version::CPU_AVX512_VNNI) \
- declare_constant(VM_Version::CPU_FLUSH) \
- declare_constant(VM_Version::CPU_FLUSHOPT) \
- declare_constant(VM_Version::CPU_CLWB) \
- declare_constant(VM_Version::CPU_AVX512_VBMI2) \
- declare_constant(VM_Version::CPU_AVX512_VBMI) \
- declare_constant(VM_Version::CPU_HV)
+#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
+
+#define DECLARE_LONG_CPU_FEATURE_CONSTANT(id, name, bit) GENERATE_VM_LONG_CONSTANT_ENTRY(VM_Version::CPU_##id)
+#define VM_LONG_CPU_FEATURE_CONSTANTS CPU_FEATURE_FLAGS(DECLARE_LONG_CPU_FEATURE_CONSTANT)
#endif // CPU_X86_VMSTRUCTS_X86_HPP
diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp
index 0367effc3c4..eb3a3ad52f4 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp
@@ -45,7 +45,10 @@ int VM_Version::_model;
int VM_Version::_stepping;
bool VM_Version::_has_intel_jcc_erratum;
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
-const char* VM_Version::_features_names[] = { FEATURES_NAMES };
+
+#define DECLARE_CPU_FEATURE_NAME(id, name, bit) name,
+const char* VM_Version::_features_names[] = { CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_NAME)};
+#undef DECLARE_CPU_FEATURE_FLAG
// Address of instruction which causes SEGV
address VM_Version::_cpuinfo_segv_addr = 0;
@@ -782,7 +785,6 @@ void VM_Version::get_processor_features() {
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping, os::cpu_microcode_revision());
assert(res > 0, "not enough temporary space allocated");
- assert(log2i_exact((uint64_t)CPU_MAX_FEATURE) + 1 == sizeof(_features_names) / sizeof(char*), "wrong size features_names");
insert_features_names(buf + res, sizeof(buf) - res, _features_names);
_features_string = os::strdup(buf);
@@ -1700,6 +1702,9 @@ void VM_Version::get_processor_features() {
}
}
#endif // !PRODUCT
+ if (FLAG_IS_DEFAULT(UseSignumIntrinsic)) {
+ FLAG_SET_DEFAULT(UseSignumIntrinsic, true);
+ }
}
void VM_Version::print_platform_virtualization_info(outputStream* st) {
diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp
index ab5e35b5479..88334ec8c6f 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -300,87 +300,74 @@ class VM_Version : public Abstract_VM_Version {
static address _cpuinfo_cont_addr; // address of instruction after the one which causes SEGV
enum Feature_Flag : uint64_t {
- CPU_CX8 = (1ULL << 0), // next bits are from cpuid 1 (EDX)
- CPU_CMOV = (1ULL << 1),
- CPU_FXSR = (1ULL << 2),
- CPU_HT = (1ULL << 3),
-
- CPU_MMX = (1ULL << 4),
- CPU_3DNOW_PREFETCH = (1ULL << 5), // Processor supports 3dnow prefetch and prefetchw instructions
- // may not necessarily support other 3dnow instructions
- CPU_SSE = (1ULL << 6),
- CPU_SSE2 = (1ULL << 7),
-
- CPU_SSE3 = (1ULL << 8), // SSE3 comes from cpuid 1 (ECX)
- CPU_SSSE3 = (1ULL << 9),
- CPU_SSE4A = (1ULL << 10),
- CPU_SSE4_1 = (1ULL << 11),
-
- CPU_SSE4_2 = (1ULL << 12),
- CPU_POPCNT = (1ULL << 13),
- CPU_LZCNT = (1ULL << 14),
- CPU_TSC = (1ULL << 15),
-
- CPU_TSCINV_BIT = (1ULL << 16),
- CPU_TSCINV = (1ULL << 17),
- CPU_AVX = (1ULL << 18),
- CPU_AVX2 = (1ULL << 19),
-
- CPU_AES = (1ULL << 20),
- CPU_ERMS = (1ULL << 21), // enhanced 'rep movsb/stosb' instructions
- CPU_CLMUL = (1ULL << 22), // carryless multiply for CRC
- CPU_BMI1 = (1ULL << 23),
-
- CPU_BMI2 = (1ULL << 24),
- CPU_RTM = (1ULL << 25), // Restricted Transactional Memory instructions
- CPU_ADX = (1ULL << 26),
- CPU_AVX512F = (1ULL << 27), // AVX 512bit foundation instructions
-
- CPU_AVX512DQ = (1ULL << 28),
- CPU_AVX512PF = (1ULL << 29),
- CPU_AVX512ER = (1ULL << 30),
- CPU_AVX512CD = (1ULL << 31),
-
- CPU_AVX512BW = (1ULL << 32), // Byte and word vector instructions
- CPU_AVX512VL = (1ULL << 33), // EVEX instructions with smaller vector length
- CPU_SHA = (1ULL << 34), // SHA instructions
- CPU_FMA = (1ULL << 35), // FMA instructions
-
- CPU_VZEROUPPER = (1ULL << 36), // Vzeroupper instruction
- CPU_AVX512_VPOPCNTDQ = (1ULL << 37), // Vector popcount
- CPU_AVX512_VPCLMULQDQ = (1ULL << 38), // Vector carryless multiplication
- CPU_AVX512_VAES = (1ULL << 39), // Vector AES instruction
-
- CPU_AVX512_VNNI = (1ULL << 40), // Vector Neural Network Instructions
- CPU_FLUSH = (1ULL << 41), // flush instruction
- CPU_FLUSHOPT = (1ULL << 42), // flusopth instruction
- CPU_CLWB = (1ULL << 43), // clwb instruction
-
- CPU_AVX512_VBMI2 = (1ULL << 44), // VBMI2 shift left double instructions
- CPU_AVX512_VBMI = (1ULL << 45), // Vector BMI instructions
- CPU_HV = (1ULL << 46), // Hypervisor instructions
-
- CPU_MAX_FEATURE = CPU_HV
+#define CPU_FEATURE_FLAGS(decl) \
+ decl(CX8, "cx8", 0) /* next bits are from cpuid 1 (EDX) */ \
+ decl(CMOV, "cmov", 1) \
+ decl(FXSR, "fxsr", 2) \
+ decl(HT, "ht", 3) \
+ \
+ decl(MMX, "mmx", 4) \
+ decl(3DNOW_PREFETCH, "3dnowpref", 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
+ /* may not necessarily support other 3dnow instructions */ \
+ decl(SSE, "sse", 6) \
+ decl(SSE2, "sse2", 7) \
+ \
+ decl(SSE3, "sse3", 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \
+ decl(SSSE3, "ssse3", 9 ) \
+ decl(SSE4A, "sse4a", 10) \
+ decl(SSE4_1, "sse4.1", 11) \
+ \
+ decl(SSE4_2, "sse4.2", 12) \
+ decl(POPCNT, "popcnt", 13) \
+ decl(LZCNT, "lzcnt", 14) \
+ decl(TSC, "tsc", 15) \
+ \
+ decl(TSCINV_BIT, "tscinvbit", 16) \
+ decl(TSCINV, "tscinv", 17) \
+ decl(AVX, "avx", 18) \
+ decl(AVX2, "avx2", 19) \
+ \
+ decl(AES, "aes", 20) \
+ decl(ERMS, "erms", 21) /* enhanced 'rep movsb/stosb' instructions */ \
+ decl(CLMUL, "clmul", 22) /* carryless multiply for CRC */ \
+ decl(BMI1, "bmi1", 23) \
+ \
+ decl(BMI2, "bmi2", 24) \
+ decl(RTM, "rtm", 25) /* Restricted Transactional Memory instructions */ \
+ decl(ADX, "adx", 26) \
+ decl(AVX512F, "avx512f", 27) /* AVX 512bit foundation instructions */ \
+ \
+ decl(AVX512DQ, "avx512dq", 28) \
+ decl(AVX512PF, "avx512pf", 29) \
+ decl(AVX512ER, "avx512er", 30) \
+ decl(AVX512CD, "avx512cd", 31) \
+ \
+ decl(AVX512BW, "avx512bw", 32) /* Byte and word vector instructions */ \
+ decl(AVX512VL, "avx512vl", 33) /* EVEX instructions with smaller vector length */ \
+ decl(SHA, "sha", 34) /* SHA instructions */ \
+ decl(FMA, "fma", 35) /* FMA instructions */ \
+ \
+ decl(VZEROUPPER, "vzeroupper", 36) /* Vzeroupper instruction */ \
+ decl(AVX512_VPOPCNTDQ, "avx512_vpopcntdq", 37) /* Vector popcount */ \
+ decl(AVX512_VPCLMULQDQ, "avx512_vpclmulqdq", 38) /* Vector carryless multiplication */ \
+ decl(AVX512_VAES, "avx512_vaes", 39) /* Vector AES instruction */ \
+ \
+ decl(AVX512_VNNI, "avx512_vnni", 40) /* Vector Neural Network Instructions */ \
+ decl(FLUSH, "clflush", 41) /* flush instruction */ \
+ decl(FLUSHOPT, "clflushopt", 42) /* flusopth instruction */ \
+ decl(CLWB, "clwb", 43) /* clwb instruction */ \
+ \
+ decl(AVX512_VBMI2, "avx512_vbmi2", 44) /* VBMI2 shift left double instructions */ \
+ decl(AVX512_VBMI, "avx512_vbmi", 45) /* Vector BMI instructions */ \
+ decl(HV, "hv", 46) /* Hypervisor instructions */
+
+#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (1ULL << bit),
+ CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG)
+#undef DECLARE_CPU_FEATURE_FLAG
};
-#define FEATURES_NAMES \
- "cx8", "cmov", "fxsr", "ht", \
- "mmx", "3dnowpref", "sse", "sse2", \
- "sse3", "ssse3", "sse4a", "sse4.1", \
- "sse4.2", "popcnt", "lzcnt", "tsc", \
- "tscinvbit", "tscinv", "avx", "avx2", \
- "aes", "erms", "clmul", "bmi1", \
- "bmi2", "rtm", "adx", "avx512f", \
- "avx512dq", "avx512pf", "avx512er", "avx512cd", \
- "avx512bw", "avx512vl", "sha", "fma", \
- "vzeroupper", "avx512_vpopcntdq", "avx512_vpclmulqdq", "avx512_vaes", \
- "avx512_vnni", "clflush", "clflushopt", "clwb", \
- "avx512_vmbi2", "avx512_vmbi", "hv"
-
static const char* _features_names[];
- // NB! When adding new CPU feature detection consider updating vmStructs_x86.hpp, vmStructs_jvmci.hpp, and VM_Version::get_processor_features().
-
enum Extended_Family {
// AMD
CPU_FAMILY_AMD_11H = 0x11,
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index 376bfc91c79..0fb5d381e98 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -1599,6 +1599,16 @@ const bool Matcher::match_rule_supported(int opcode) {
}
break;
#endif // !LP64
+ case Op_SignumF:
+ if (UseSSE < 1) {
+ return false;
+ }
+ break;
+ case Op_SignumD:
+ if (UseSSE < 2) {
+ return false;
+ }
+ break;
}
return true; // Match rules are supported by default.
}
@@ -2119,6 +2129,10 @@ bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
mstack.push(m, Visit);
return true;
}
+ if (is_vshift_con_pattern(n, m)) { // ShiftV src (ShiftCntV con)
+ mstack.push(m, Visit); // m = ShiftCntV
+ return true;
+ }
return false;
}
@@ -5787,6 +5801,30 @@ instruct evminmaxFP_reg_eavx(vec dst, vec a, vec b, vec atmp, vec btmp, kReg ktm
ins_pipe( pipe_slow );
%}
+// --------------------------------- Signum ---------------------------
+
+instruct signumF_reg(regF dst, regF zero, regF one, rRegP scratch, rFlagsReg cr) %{
+ match(Set dst (SignumF dst (Binary zero one)));
+ effect(TEMP scratch, KILL cr);
+ format %{ "signumF $dst, $dst\t! using $scratch as TEMP" %}
+ ins_encode %{
+ int opcode = this->ideal_Opcode();
+ __ signum_fp(opcode, $dst$$XMMRegister, $zero$$XMMRegister, $one$$XMMRegister, $scratch$$Register);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct signumD_reg(regD dst, regD zero, regD one, rRegP scratch, rFlagsReg cr) %{
+ match(Set dst (SignumD dst (Binary zero one)));
+ effect(TEMP scratch, KILL cr);
+ format %{ "signumD $dst, $dst\t! using $scratch as TEMP" %}
+ ins_encode %{
+ int opcode = this->ideal_Opcode();
+ __ signum_fp(opcode, $dst$$XMMRegister, $zero$$XMMRegister, $one$$XMMRegister, $scratch$$Register);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
// --------------------------------- Sqrt --------------------------------------
instruct vsqrtF_reg(vec dst, vec src) %{
@@ -7528,6 +7566,18 @@ instruct vstoreMask8B_evex(vec dst, vec src, immI_8 size) %{
ins_pipe( pipe_slow );
%}
+instruct vmaskcast(vec dst) %{
+ predicate((vector_length(n) == vector_length(n->in(1))) &&
+ (vector_length_in_bytes(n) == vector_length_in_bytes(n->in(1))));
+ match(Set dst (VectorMaskCast dst));
+ ins_cost(0);
+ format %{ "vector_mask_cast $dst" %}
+ ins_encode %{
+ // empty
+ %}
+ ins_pipe(empty);
+%}
+
//-------------------------------- Load Iota Indices ----------------------------------
instruct loadIotaIndices(vec dst, immI_0 src, rRegP scratch) %{
diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad
index 374e7bc2e17..d0b7656ac83 100644
--- a/src/hotspot/cpu/x86/x86_64.ad
+++ b/src/hotspot/cpu/x86/x86_64.ad
@@ -2228,9 +2228,6 @@ encode %{
ciEnv::current()->record_failure("CodeCache is full");
return;
}
-#if INCLUDE_AOT
- CompiledStaticCall::emit_to_aot_stub(cbuf, mark);
-#endif
}
%}
diff --git a/src/hotspot/cpu/zero/vm_version_ext_zero.cpp b/src/hotspot/cpu/zero/vm_version_ext_zero.cpp
index 1564edb8850..9e4f45fd679 100644
--- a/src/hotspot/cpu/zero/vm_version_ext_zero.cpp
+++ b/src/hotspot/cpu/zero/vm_version_ext_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "vm_version_ext_zero.hpp"
// VM_Version_Ext statics
diff --git a/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp b/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp
index 39bedd0fde3..16e49e5ae60 100644
--- a/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp
+++ b/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -551,8 +551,9 @@ int ZeroInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
}
int ZeroInterpreter::getter_entry(Method* method, intptr_t UNUSED, TRAPS) {
+ JavaThread* thread = THREAD->as_Java_thread();
// Drop into the slow path if we need a safepoint check
- if (SafepointMechanism::should_process(THREAD)) {
+ if (SafepointMechanism::should_process(thread)) {
return normal_entry(method, 0, THREAD);
}
@@ -577,7 +578,6 @@ int ZeroInterpreter::getter_entry(Method* method, intptr_t UNUSED, TRAPS) {
return normal_entry(method, 0, THREAD);
}
- JavaThread* thread = THREAD->as_Java_thread();
ZeroStack* stack = thread->zero_stack();
intptr_t* topOfStack = stack->sp();
@@ -639,8 +639,9 @@ int ZeroInterpreter::getter_entry(Method* method, intptr_t UNUSED, TRAPS) {
}
int ZeroInterpreter::setter_entry(Method* method, intptr_t UNUSED, TRAPS) {
+ JavaThread* thread = THREAD->as_Java_thread();
// Drop into the slow path if we need a safepoint check
- if (SafepointMechanism::should_process(THREAD)) {
+ if (SafepointMechanism::should_process(thread)) {
return normal_entry(method, 0, THREAD);
}
@@ -666,7 +667,6 @@ int ZeroInterpreter::setter_entry(Method* method, intptr_t UNUSED, TRAPS) {
return normal_entry(method, 0, THREAD);
}
- JavaThread* thread = THREAD->as_Java_thread();
ZeroStack* stack = thread->zero_stack();
intptr_t* topOfStack = stack->sp();
@@ -734,7 +734,7 @@ int ZeroInterpreter::empty_entry(Method* method, intptr_t UNUSED, TRAPS) {
ZeroStack *stack = thread->zero_stack();
// Drop into the slow path if we need a safepoint check
- if (SafepointMechanism::should_process(THREAD)) {
+ if (SafepointMechanism::should_process(thread)) {
return normal_entry(method, 0, THREAD);
}
diff --git a/src/hotspot/os/aix/attachListener_aix.cpp b/src/hotspot/os/aix/attachListener_aix.cpp
index ab81becae7c..25dfe8d816b 100644
--- a/src/hotspot/os/aix/attachListener_aix.cpp
+++ b/src/hotspot/os/aix/attachListener_aix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -440,10 +440,6 @@ void AixAttachOperation::complete(jint result, bufferedStream* st) {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
// write operation result
char msg[32];
sprintf(msg, "%d\n", result);
@@ -459,9 +455,6 @@ void AixAttachOperation::complete(jint result, bufferedStream* st) {
// done
::close(this->socket());
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
-
delete this;
}
@@ -472,15 +465,8 @@ AttachOperation* AttachListener::dequeue() {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
AttachOperation* op = AixAttachListener::dequeue();
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
-
return op;
}
@@ -511,15 +497,8 @@ int AttachListener::pd_init() {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
int ret_code = AixAttachListener::init();
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
-
return ret_code;
}
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index 1a668a9a6a9..d857ce0fcd8 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -42,7 +42,6 @@
#include "libodm_aix.hpp"
#include "loadlib_aix.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/filemap.hpp"
#include "misc_aix.hpp"
#include "oops/oop.inline.hpp"
#include "os_aix.inline.hpp"
@@ -1575,10 +1574,10 @@ static char* reserve_shmated_memory (size_t bytes, char* requested_addr) {
// We must prevent anyone from attaching too close to the
// BRK because that may cause malloc OOM.
if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
- trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
- "Will attach anywhere.", p2i(requested_addr));
- // Act like the OS refused to attach there.
- requested_addr = NULL;
+ trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment.", p2i(requested_addr));
+ // Since we treat an attach to the wrong address as an error later anyway,
+ // we return NULL here
+ return NULL;
}
// For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
@@ -1704,10 +1703,10 @@ static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
// We must prevent anyone from attaching too close to the
// BRK because that may cause malloc OOM.
if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
- trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
- "Will attach anywhere.", p2i(requested_addr));
- // Act like the OS refused to attach there.
- requested_addr = NULL;
+ trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment.", p2i(requested_addr));
+ // Since we treat an attach to the wrong address as an error later anyway,
+ // we return NULL here
+ return NULL;
}
// In 64K mode, we lie and claim the global page size (os::vm_page_size()) is 64K
@@ -1749,6 +1748,11 @@ static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
if (addr == MAP_FAILED) {
trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", p2i(requested_addr), size, errno);
return NULL;
+ } else if (requested_addr != NULL && addr != requested_addr) {
+ trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) succeeded, but at a different address than requested (" PTR_FORMAT "), will unmap",
+ p2i(requested_addr), size, p2i(addr));
+ ::munmap(addr, extra_size);
+ return NULL;
}
// Handle alignment.
@@ -1764,16 +1768,8 @@ static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
}
addr = addr_aligned;
- if (addr) {
- trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
- p2i(addr), p2i(addr + bytes), bytes);
- } else {
- if (requested_addr != NULL) {
- trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, p2i(requested_addr));
- } else {
- trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
- }
- }
+ trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
+ p2i(addr), p2i(addr + bytes), bytes);
// bookkeeping
vmembk_add(addr, size, 4*K, VMEM_MAPPED);
@@ -1990,6 +1986,7 @@ bool os::pd_release_memory(char* addr, size_t size) {
// Dynamically do different things for mmap/shmat.
vmembk_t* const vmi = vmembk_find(addr);
guarantee0(vmi);
+ vmi->assert_is_valid_subrange(addr, size);
// Always round to os::vm_page_size(), which may be larger than 4K.
size = align_up(size, os::vm_page_size());
@@ -2003,7 +2000,6 @@ bool os::pd_release_memory(char* addr, size_t size) {
// - If user only wants to release a partial range, uncommit (disclaim) that
// range. That way, at least, we do not use memory anymore (bust still page
// table space).
- vmi->assert_is_valid_subrange(addr, size);
if (addr == vmi->addr && size == vmi->size) {
rc = release_shmated_memory(addr, size);
remove_bookkeeping = true;
@@ -2011,12 +2007,30 @@ bool os::pd_release_memory(char* addr, size_t size) {
rc = uncommit_shmated_memory(addr, size);
}
} else {
- // User may unmap partial regions but region has to be fully contained.
-#ifdef ASSERT
- vmi->assert_is_valid_subrange(addr, size);
-#endif
+ // In mmap-mode:
+ // - If the user wants to release the full range, we do that and remove the mapping.
+ // - If the user wants to release part of the range, we release that part, but need
+ // to adjust bookkeeping.
+ assert(is_aligned(size, 4 * K), "Sanity");
rc = release_mmaped_memory(addr, size);
- remove_bookkeeping = true;
+ if (addr == vmi->addr && size == vmi->size) {
+ remove_bookkeeping = true;
+ } else {
+ if (addr == vmi->addr && size < vmi->size) {
+ // Chopped from head
+ vmi->addr += size;
+ vmi->size -= size;
+ } else if (addr + size == vmi->addr + vmi->size) {
+ // Chopped from tail
+ vmi->size -= size;
+ } else {
+ // releasing a mapping in the middle of the original mapping:
+ // For now we forbid this, since this is an invalid scenario
+ // (the bookkeeping is easy enough to fix if needed but there
+ // is no use case for it; any occurrence is likely an error.
+ ShouldNotReachHere();
+ }
+ }
}
// update bookkeeping
diff --git a/src/hotspot/os/bsd/attachListener_bsd.cpp b/src/hotspot/os/bsd/attachListener_bsd.cpp
index c951aee42c4..9daad43dc7a 100644
--- a/src/hotspot/os/bsd/attachListener_bsd.cpp
+++ b/src/hotspot/os/bsd/attachListener_bsd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -409,10 +409,6 @@ void BsdAttachOperation::complete(jint result, bufferedStream* st) {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
// write operation result
char msg[32];
sprintf(msg, "%d\n", result);
@@ -427,9 +423,6 @@ void BsdAttachOperation::complete(jint result, bufferedStream* st) {
// done
::close(this->socket());
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
-
delete this;
}
@@ -440,15 +433,8 @@ AttachOperation* AttachListener::dequeue() {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
AttachOperation* op = BsdAttachListener::dequeue();
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
-
return op;
}
@@ -479,15 +465,8 @@ int AttachListener::pd_init() {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
int ret_code = BsdAttachListener::init();
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
-
return ret_code;
}
diff --git a/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp b/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp
index a0fe34c6504..4dc46651f92 100644
--- a/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp
+++ b/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
#include "gc/z/zNUMA.hpp"
void ZNUMA::pd_initialize() {
- _enabled = false;
+ _state = Disabled;
}
uint32_t ZNUMA::count() {
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 6c419a2fa13..52b7e1496cc 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -34,7 +34,6 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/filemap.hpp"
#include "oops/oop.inline.hpp"
#include "os_bsd.inline.hpp"
#include "os_posix.inline.hpp"
@@ -128,7 +127,7 @@ static jlong initial_time_count=0;
static int clock_tics_per_sec = 100;
-#ifdef __APPLE__
+#if defined(__APPLE__) && defined(__x86_64__)
static const int processor_id_unassigned = -1;
static const int processor_id_assigning = -2;
static const int processor_id_map_size = 256;
@@ -238,7 +237,7 @@ void os::Bsd::initialize_system_info() {
set_processor_count(1); // fallback
}
-#ifdef __APPLE__
+#if defined(__APPLE__) && defined(__x86_64__)
// initialize processor id map
for (int i = 0; i < processor_id_map_size; i++) {
processor_id_map[i] = processor_id_unassigned;
@@ -2114,8 +2113,8 @@ int os::active_processor_count() {
return _processor_count;
}
-#if defined(__APPLE__) && defined(__x86_64__)
uint os::processor_id() {
+#if defined(__APPLE__) && defined(__x86_64__)
// Get the initial APIC id and return the associated processor id. The initial APIC
// id is limited to 8-bits, which means we can have at most 256 unique APIC ids. If
// the system has more processors (or the initial APIC ids are discontiguous) the
@@ -2146,8 +2145,13 @@ uint os::processor_id() {
assert(processor_id >= 0 && processor_id < os::processor_count(), "invalid processor id");
return (uint)processor_id;
-}
+#else // defined(__APPLE__) && defined(__x86_64__)
+ // Return 0 until we find a good way to get the current processor id on
+ // the platform. Returning 0 is safe, since there is always at least one
+ // processor, but might not be optimal for performance in some cases.
+ return 0;
#endif
+}
void os::set_native_thread_name(const char *name) {
#if defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5
diff --git a/src/hotspot/os/linux/attachListener_linux.cpp b/src/hotspot/os/linux/attachListener_linux.cpp
index aa114d91232..628c3f1c462 100644
--- a/src/hotspot/os/linux/attachListener_linux.cpp
+++ b/src/hotspot/os/linux/attachListener_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -409,10 +409,6 @@ void LinuxAttachOperation::complete(jint result, bufferedStream* st) {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
// write operation result
char msg[32];
sprintf(msg, "%d\n", result);
@@ -427,9 +423,6 @@ void LinuxAttachOperation::complete(jint result, bufferedStream* st) {
// done
::close(this->socket());
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
-
delete this;
}
@@ -440,15 +433,8 @@ AttachOperation* AttachListener::dequeue() {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
AttachOperation* op = LinuxAttachListener::dequeue();
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
-
return op;
}
@@ -479,15 +465,8 @@ int AttachListener::pd_init() {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
int ret_code = LinuxAttachListener::init();
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
-
return ret_code;
}
diff --git a/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp b/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp
index cfe25549ffc..82395201252 100644
--- a/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp
+++ b/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,17 +24,39 @@
#include "gc/z/zCPU.inline.hpp"
#include "gc/z/zErrno.hpp"
#include "gc/z/zNUMA.hpp"
+#include "gc/z/zNUMA.inline.hpp"
#include "gc/z/zSyscall_linux.hpp"
#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
+static bool numa_memory_id(void* addr, uint32_t* id) {
+ return ZSyscall::get_mempolicy((int*)id, NULL, 0, addr, MPOL_F_NODE | MPOL_F_ADDR) != -1;
+}
+
+static bool is_numa_supported() {
+ // Test if syscall is available
+ uint32_t dummy = 0;
+ const bool available = numa_memory_id(&dummy, &dummy);
+
+ if (!available && !FLAG_IS_DEFAULT(UseNUMA)) {
+ warning("NUMA support disabled, system call get_mempolicy not available");
+ }
+
+ return available;
+}
+
void ZNUMA::pd_initialize() {
- _enabled = UseNUMA;
+ if (!UseNUMA) {
+ _state = Disabled;
+ } else {
+ _state = is_numa_supported() ? Enabled : Unsupported;
+ }
}
uint32_t ZNUMA::count() {
- if (!_enabled) {
+ if (!is_enabled()) {
// NUMA support not enabled
return 1;
}
@@ -43,7 +65,7 @@ uint32_t ZNUMA::count() {
}
uint32_t ZNUMA::id() {
- if (!_enabled) {
+ if (!is_enabled()) {
// NUMA support not enabled
return 0;
}
@@ -52,14 +74,14 @@ uint32_t ZNUMA::id() {
}
uint32_t ZNUMA::memory_id(uintptr_t addr) {
- if (!_enabled) {
+ if (!is_enabled()) {
// NUMA support not enabled, assume everything belongs to node zero
return 0;
}
uint32_t id = (uint32_t)-1;
- if (ZSyscall::get_mempolicy((int*)&id, NULL, 0, (void*)addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
+ if (!numa_memory_id((void*)addr, &id)) {
ZErrno err;
fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
}
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index b4c062c9c50..e5154f4bf58 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -34,7 +34,6 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/filemap.hpp"
#include "oops/oop.inline.hpp"
#include "os_linux.inline.hpp"
#include "os_posix.inline.hpp"
diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp
index 719e2fe9ce3..ae058dd345b 100644
--- a/src/hotspot/os/posix/os_posix.cpp
+++ b/src/hotspot/os/posix/os_posix.cpp
@@ -53,11 +53,13 @@
#include
#include
#include
+#include
#include
#include
#include
#include
#include
+#include
#include
#include
#include
@@ -631,6 +633,22 @@ bool os::has_allocatable_memory_limit(size_t* limit) {
#endif
}
+void os::dll_unload(void *lib) {
+ ::dlclose(lib);
+}
+
+jlong os::lseek(int fd, jlong offset, int whence) {
+ return (jlong) BSD_ONLY(::lseek) NOT_BSD(::lseek64)(fd, offset, whence);
+}
+
+int os::fsync(int fd) {
+ return ::fsync(fd);
+}
+
+int os::ftruncate(int fd, jlong length) {
+ return BSD_ONLY(::ftruncate) NOT_BSD(::ftruncate64)(fd, length);
+}
+
const char* os::get_current_directory(char *buf, size_t buflen) {
return getcwd(buf, buflen);
}
@@ -639,10 +657,20 @@ FILE* os::open(int fd, const char* mode) {
return ::fdopen(fd, mode);
}
+size_t os::write(int fd, const void *buf, unsigned int nBytes) {
+ size_t res;
+ RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
+ return res;
+}
+
ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
return ::pread(fd, buf, nBytes, offset);
}
+int os::close(int fd) {
+ return ::close(fd);
+}
+
void os::flockfile(FILE* fp) {
::flockfile(fp);
}
@@ -666,6 +694,38 @@ int os::closedir(DIR *dirp) {
return ::closedir(dirp);
}
+int os::socket_close(int fd) {
+ return ::close(fd);
+}
+
+int os::socket(int domain, int type, int protocol) {
+ return ::socket(domain, type, protocol);
+}
+
+int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
+ RESTARTABLE_RETURN_INT(::recv(fd, buf, nBytes, flags));
+}
+
+int os::send(int fd, char* buf, size_t nBytes, uint flags) {
+ RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
+}
+
+int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
+ return os::send(fd, buf, nBytes, flags);
+}
+
+int os::connect(int fd, struct sockaddr* him, socklen_t len) {
+ RESTARTABLE_RETURN_INT(::connect(fd, him, len));
+}
+
+struct hostent* os::get_host_by_name(char* name) {
+ return ::gethostbyname(name);
+}
+
+void os::exit(int num) {
+ ::exit(num);
+}
+
// Builds a platform dependent Agent_OnLoad_ function name
// which is used to find statically linked in agents.
// Parameters:
@@ -792,6 +852,14 @@ char * os::native_path(char *path) {
}
bool os::same_files(const char* file1, const char* file2) {
+ if (file1 == nullptr && file2 == nullptr) {
+ return true;
+ }
+
+ if (file1 == nullptr || file2 == nullptr) {
+ return false;
+ }
+
if (strcmp(file1, file2) == 0) {
return true;
}
@@ -1604,8 +1672,6 @@ void Parker::park(bool isAbsolute, jlong time) {
}
OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
- jt->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
assert(_cur_index == -1, "invariant");
if (time == 0) {
@@ -1628,11 +1694,6 @@ void Parker::park(bool isAbsolute, jlong time) {
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
-
- // If externally suspended while waiting, re-suspend
- if (jt->handle_special_suspend_equivalent_condition()) {
- jt->java_suspend_self();
- }
}
void Parker::unpark() {
diff --git a/src/hotspot/os/posix/os_posix.hpp b/src/hotspot/os/posix/os_posix.hpp
index 45c647c4f78..af2c158511b 100644
--- a/src/hotspot/os/posix/os_posix.hpp
+++ b/src/hotspot/os/posix/os_posix.hpp
@@ -22,8 +22,6 @@
*
*/
-#include "runtime/os.hpp"
-
#ifndef OS_POSIX_OS_POSIX_HPP
#define OS_POSIX_OS_POSIX_HPP
diff --git a/src/hotspot/os/posix/os_posix.inline.hpp b/src/hotspot/os/posix/os_posix.inline.hpp
index 600ade67032..33ee5da3bc9 100644
--- a/src/hotspot/os/posix/os_posix.inline.hpp
+++ b/src/hotspot/os/posix/os_posix.inline.hpp
@@ -43,69 +43,10 @@
return _result; \
} while(false)
-
-inline void os::dll_unload(void *lib) {
- ::dlclose(lib);
-}
-
-inline jlong os::lseek(int fd, jlong offset, int whence) {
- return (jlong) BSD_ONLY(::lseek) NOT_BSD(::lseek64)(fd, offset, whence);
-}
-
-inline int os::fsync(int fd) {
- return ::fsync(fd);
-}
-
-inline int os::ftruncate(int fd, jlong length) {
- return BSD_ONLY(::ftruncate) NOT_BSD(::ftruncate64)(fd, length);
-}
-
// Aix does not have NUMA support but need these for compilation.
inline bool os::numa_has_static_binding() { AIX_ONLY(ShouldNotReachHere();) return true; }
inline bool os::numa_has_group_homing() { AIX_ONLY(ShouldNotReachHere();) return false; }
-inline size_t os::write(int fd, const void *buf, unsigned int nBytes) {
- size_t res;
- RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
- return res;
-}
-
-inline int os::close(int fd) {
- return ::close(fd);
-}
-
-inline int os::socket_close(int fd) {
- return ::close(fd);
-}
-
-inline int os::socket(int domain, int type, int protocol) {
- return ::socket(domain, type, protocol);
-}
-
-inline int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
- RESTARTABLE_RETURN_INT(::recv(fd, buf, nBytes, flags));
-}
-
-inline int os::send(int fd, char* buf, size_t nBytes, uint flags) {
- RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
-}
-
-inline int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
- return os::send(fd, buf, nBytes, flags);
-}
-
-inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
- RESTARTABLE_RETURN_INT(::connect(fd, him, len));
-}
-
-inline struct hostent* os::get_host_by_name(char* name) {
- return ::gethostbyname(name);
-}
-
-inline void os::exit(int num) {
- ::exit(num);
-}
-
// Platform Mutex/Monitor implementation
inline void os::PlatformMutex::lock() {
diff --git a/src/hotspot/os/posix/perfMemory_posix.cpp b/src/hotspot/os/posix/perfMemory_posix.cpp
index 943a3b30299..8919afffb2c 100644
--- a/src/hotspot/os/posix/perfMemory_posix.cpp
+++ b/src/hotspot/os/posix/perfMemory_posix.cpp
@@ -530,6 +530,7 @@ static char* get_user_name_slow(int vmid, int nspid, TRAPS) {
int searchpid;
char* tmpdirname = (char *)os::get_temp_directory();
#if defined(LINUX)
+ char buffer[MAXPATHLEN + 1];
assert(strlen(tmpdirname) == 4, "No longer using /tmp - update buffer size");
// On Linux, if nspid != -1, look in /proc/{vmid}/root/tmp for directories
@@ -537,7 +538,6 @@ static char* get_user_name_slow(int vmid, int nspid, TRAPS) {
if (nspid == -1) {
searchpid = vmid;
} else {
- char buffer[MAXPATHLEN + 1];
jio_snprintf(buffer, MAXPATHLEN, "/proc/%d/root%s", vmid, tmpdirname);
tmpdirname = buffer;
searchpid = nspid;
diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp
index a2bc9d2d544..c2e5d50bb95 100644
--- a/src/hotspot/os/posix/signals_posix.cpp
+++ b/src/hotspot/os/posix/signals_posix.cpp
@@ -32,6 +32,7 @@
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/osThread.hpp"
+#include "runtime/semaphore.inline.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.hpp"
#include "signals_posix.hpp"
@@ -369,27 +370,7 @@ static int check_pending_signals() {
return i;
}
}
- JavaThread *thread = JavaThread::current();
- ThreadBlockInVM tbivm(thread);
-
- bool threadIsSuspended;
- do {
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
- sig_semaphore->wait();
-
- // were we externally suspended while we were waiting?
- threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
- if (threadIsSuspended) {
- // The semaphore has been incremented, but while we were waiting
- // another thread suspended us. We don't want to continue running
- // while suspended because that would surprise the thread that
- // suspended us.
- sig_semaphore->signal();
-
- thread->java_suspend_self();
- }
- } while (threadIsSuspended);
+ sig_semaphore->wait_with_safepoint_check(JavaThread::current());
}
ShouldNotReachHere();
return 0; // Satisfy compiler
@@ -1555,10 +1536,6 @@ void PosixSignals::hotspot_sigmask(Thread* thread) {
// - sets target osthread state to continue
// - sends signal to end the sigsuspend loop in the SR_handler
//
-// Note that the SR_lock plays no role in this suspend/resume protocol,
-// but is checked for NULL in SR_handler as a thread termination indicator.
-// The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs.
-//
// Note that resume_clear_context() and suspend_save_context() are needed
// by SR_handler(), so that fetch_frame_from_context() works,
// which in part is used by:
@@ -1603,11 +1580,11 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
// On some systems we have seen signal delivery get "stuck" until the signal
// mask is changed as part of thread termination. Check that the current thread
- // has not already terminated (via SR_lock()) - else the following assertion
+ // has not already terminated - else the following assertion
// will fail because the thread is no longer a JavaThread as the ~JavaThread
// destructor has completed.
- if (thread->SR_lock() == NULL) {
+ if (thread->has_terminated()) {
return;
}
diff --git a/src/hotspot/os/posix/vmError_posix.cpp b/src/hotspot/os/posix/vmError_posix.cpp
index 68c14db929a..845dda67f75 100644
--- a/src/hotspot/os/posix/vmError_posix.cpp
+++ b/src/hotspot/os/posix/vmError_posix.cpp
@@ -24,7 +24,7 @@
*/
#include "precompiled.hpp"
-#include "memory/metaspaceShared.hpp"
+#include "cds/metaspaceShared.hpp"
#include "runtime/os.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.hpp"
diff --git a/src/hotspot/os/windows/attachListener_windows.cpp b/src/hotspot/os/windows/attachListener_windows.cpp
index 6a5de964072..8b5a2cb7ab4 100644
--- a/src/hotspot/os/windows/attachListener_windows.cpp
+++ b/src/hotspot/os/windows/attachListener_windows.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -310,10 +310,6 @@ void Win32AttachOperation::complete(jint result, bufferedStream* result_stream)
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
HANDLE hPipe = open_pipe();
int lastError = (int)::GetLastError();
if (hPipe != INVALID_HANDLE_VALUE) {
@@ -351,9 +347,6 @@ void Win32AttachOperation::complete(jint result, bufferedStream* result_stream)
::ReleaseMutex(Win32AttachListener::mutex());
}
-
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
}
@@ -363,15 +356,8 @@ AttachOperation* AttachListener::dequeue() {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
AttachOperation* op = Win32AttachListener::dequeue();
- // were we externally suspended while we were waiting?
- thread->check_and_wait_while_suspended();
-
return op;
}
diff --git a/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp b/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp
index a0fe34c6504..4dc46651f92 100644
--- a/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp
+++ b/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
#include "gc/z/zNUMA.hpp"
void ZNUMA::pd_initialize() {
- _enabled = false;
+ _state = Disabled;
}
uint32_t ZNUMA::count() {
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index 52fd6ef0152..43c66b0dfc0 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -39,7 +39,6 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/filemap.hpp"
#include "oops/oop.inline.hpp"
#include "os_share_windows.hpp"
#include "os_windows.inline.hpp"
@@ -59,6 +58,7 @@
#include "runtime/perfMemory.hpp"
#include "runtime/safefetch.inline.hpp"
#include "runtime/safepointMechanism.hpp"
+#include "runtime/semaphore.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/thread.inline.hpp"
@@ -130,9 +130,6 @@ static FILETIME process_kernel_time;
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
PVOID topLevelVectoredExceptionHandler = NULL;
LPTOP_LEVEL_EXCEPTION_FILTER previousUnhandledExceptionFilter = NULL;
-#elif INCLUDE_AOT
-PVOID topLevelVectoredExceptionHandler = NULL;
-LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
#endif
// save DLL module handle, used by GetModuleFileName
@@ -153,7 +150,7 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
if (ForceTimeHighResolution) {
timeEndPeriod(1L);
}
-#if defined(USE_VECTORED_EXCEPTION_HANDLING) || INCLUDE_AOT
+#if defined(USE_VECTORED_EXCEPTION_HANDLING)
if (topLevelVectoredExceptionHandler != NULL) {
RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
topLevelVectoredExceptionHandler = NULL;
@@ -1190,6 +1187,16 @@ void os::die() {
win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
}
+const char* os::dll_file_extension() { return ".dll"; }
+
+void os::dll_unload(void *lib) {
+ ::FreeLibrary((HMODULE)lib);
+}
+
+void* os::dll_lookup(void *lib, const char *name) {
+ return (void*)::GetProcAddress((HMODULE)lib, name);
+}
+
// Directory routines copied from src/win32/native/java/io/dirent_md.c
// * dirent_md.c 1.15 00/02/02
//
@@ -2197,28 +2204,7 @@ static int check_pending_signals() {
return i;
}
}
- JavaThread *thread = JavaThread::current();
-
- ThreadBlockInVM tbivm(thread);
-
- bool threadIsSuspended;
- do {
- thread->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
- sig_sem->wait();
-
- // were we externally suspended while we were waiting?
- threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
- if (threadIsSuspended) {
- // The semaphore has been incremented, but while we were waiting
- // another thread suspended us. We don't want to continue running
- // while suspended because that would surprise the thread that
- // suspended us.
- sig_sem->signal();
-
- thread->java_suspend_self();
- }
- } while (threadIsSuspended);
+ sig_sem->wait_with_safepoint_check(JavaThread::current());
}
ShouldNotReachHere();
return 0; // Satisfy compiler
@@ -2695,7 +2681,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
return EXCEPTION_CONTINUE_SEARCH;
}
-#if defined(USE_VECTORED_EXCEPTION_HANDLING) || INCLUDE_AOT
+#if defined(USE_VECTORED_EXCEPTION_HANDLING)
LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
#if defined(_M_ARM64)
@@ -2711,9 +2697,7 @@ LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptio
return topLevelExceptionFilter(exceptionInfo);
}
- // Handle the case where we get an implicit exception in AOT generated
- // code. AOT DLL's loaded are not registered for structured exceptions.
- // If the exception occurred in the codeCache or AOT code, pass control
+ // If the exception occurred in the codeCache, pass control
// to our normal exception handler.
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb != NULL) {
@@ -4162,14 +4146,6 @@ jint os::init_2(void) {
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler(1, topLevelVectoredExceptionFilter);
previousUnhandledExceptionFilter = SetUnhandledExceptionFilter(topLevelUnhandledExceptionFilter);
-#elif INCLUDE_AOT
- // If AOT is enabled we need to install a vectored exception handler
- // in order to forward implicit exceptions from code in AOT
- // generated DLLs. This is necessary since these DLLs are not
- // registered for structured exceptions like codecache methods are.
- if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
- topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
- }
#endif
// for debugging float code generation bugs
@@ -4643,6 +4619,18 @@ FILE* os::open(int fd, const char* mode) {
return ::_fdopen(fd, mode);
}
+size_t os::write(int fd, const void *buf, unsigned int nBytes) {
+ return ::write(fd, buf, nBytes);
+}
+
+int os::close(int fd) {
+ return ::close(fd);
+}
+
+void os::exit(int num) {
+ win32::exit_process_or_thread(win32::EPT_PROCESS, num);
+}
+
// Is a (classpath) directory empty?
bool os::dir_is_empty(const char* path) {
errno_t err;
@@ -5465,15 +5453,9 @@ void Parker::park(bool isAbsolute, jlong time) {
} else {
ThreadBlockInVM tbivm(thread);
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
- thread->set_suspend_equivalent();
WaitForSingleObject(_ParkHandle, time);
ResetEvent(_ParkHandle);
-
- // If externally suspended while waiting, re-suspend
- if (thread->handle_special_suspend_equivalent_condition()) {
- thread->java_suspend_self();
- }
}
}
diff --git a/src/hotspot/os/windows/os_windows.inline.hpp b/src/hotspot/os/windows/os_windows.inline.hpp
index 026bd6de198..70c8246ec98 100644
--- a/src/hotspot/os/windows/os_windows.inline.hpp
+++ b/src/hotspot/os/windows/os_windows.inline.hpp
@@ -28,16 +28,6 @@
#include "runtime/os.hpp"
#include "runtime/thread.hpp"
-inline const char* os::dll_file_extension() { return ".dll"; }
-
-inline void os::dll_unload(void *lib) {
- ::FreeLibrary((HMODULE)lib);
-}
-
-inline void* os::dll_lookup(void *lib, const char *name) {
- return (void*)::GetProcAddress((HMODULE)lib, name);
-}
-
inline bool os::uses_stack_guard_pages() {
return true;
}
@@ -63,18 +53,6 @@ inline void os::map_stack_shadow_pages(address sp) {
inline bool os::numa_has_static_binding() { return true; }
inline bool os::numa_has_group_homing() { return false; }
-inline size_t os::write(int fd, const void *buf, unsigned int nBytes) {
- return ::write(fd, buf, nBytes);
-}
-
-inline int os::close(int fd) {
- return ::close(fd);
-}
-
-inline void os::exit(int num) {
- win32::exit_process_or_thread(win32::EPT_PROCESS, num);
-}
-
// Platform Mutex/Monitor implementation
inline os::PlatformMutex::PlatformMutex() {
diff --git a/src/hotspot/os/windows/vmError_windows.cpp b/src/hotspot/os/windows/vmError_windows.cpp
index 1a0a947a749..853a148d9b1 100644
--- a/src/hotspot/os/windows/vmError_windows.cpp
+++ b/src/hotspot/os/windows/vmError_windows.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "memory/metaspaceShared.hpp"
+#include "cds/metaspaceShared.hpp"
#include "runtime/arguments.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.hpp"
diff --git a/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.s b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.S
similarity index 100%
rename from src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.s
rename to src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.S
diff --git a/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.cpp
index 054f07f9f6d..d8b1a0b20ef 100644
--- a/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.cpp
+++ b/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.cpp
@@ -25,7 +25,7 @@
*/
#include "precompiled.hpp"
-#include "memory/metaspaceShared.hpp"
+#include "cds/metaspaceShared.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
diff --git a/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s b/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.S
similarity index 100%
rename from src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s
rename to src/hotspot/os_cpu/bsd_x86/bsd_x86_32.S
diff --git a/src/hotspot/os_cpu/bsd_x86/bsd_x86_64.s b/src/hotspot/os_cpu/bsd_x86/bsd_x86_64.S
similarity index 100%
rename from src/hotspot/os_cpu/bsd_x86/bsd_x86_64.s
rename to src/hotspot/os_cpu/bsd_x86/bsd_x86_64.S
diff --git a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp
index 4564ddc5b92..8d580c79b22 100644
--- a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp
+++ b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "memory/metaspaceShared.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
diff --git a/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.s b/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.S
similarity index 100%
rename from src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.s
rename to src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.S
diff --git a/src/hotspot/os_cpu/linux_aarch64/threadLS_linux_aarch64.s b/src/hotspot/os_cpu/linux_aarch64/threadLS_linux_aarch64.S
similarity index 100%
rename from src/hotspot/os_cpu/linux_aarch64/threadLS_linux_aarch64.s
rename to src/hotspot/os_cpu/linux_aarch64/threadLS_linux_aarch64.S
diff --git a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp
index 702d6f6dcd5..fca22629bc5 100644
--- a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp
+++ b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp
@@ -24,7 +24,6 @@
*/
#include "precompiled.hpp"
-#include "memory/metaspaceShared.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
diff --git a/src/hotspot/os_cpu/linux_arm/linux_arm_32.s b/src/hotspot/os_cpu/linux_arm/linux_arm_32.S
similarity index 100%
rename from src/hotspot/os_cpu/linux_arm/linux_arm_32.s
rename to src/hotspot/os_cpu/linux_arm/linux_arm_32.S
diff --git a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp
index 66cb5e7f845..23aef4f2e33 100644
--- a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp
+++ b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp
@@ -27,7 +27,6 @@
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/universe.hpp"
#include "runtime/frame.inline.hpp"
diff --git a/src/hotspot/os_cpu/linux_x86/linux_x86_32.S b/src/hotspot/os_cpu/linux_x86/linux_x86_32.S
new file mode 100644
index 00000000000..620179d99dd
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/linux_x86_32.S
@@ -0,0 +1,647 @@
+#
+# Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+
+ # NOTE WELL! The _Copy functions are called directly
+ # from server-compiler-generated code via CallLeafNoFP,
+ # which means that they *must* either not use floating
+ # point or use it in the same manner as does the server
+ # compiler.
+
+ .globl _Copy_conjoint_bytes
+ .globl _Copy_arrayof_conjoint_bytes
+ .globl _Copy_conjoint_jshorts_atomic
+ .globl _Copy_arrayof_conjoint_jshorts
+ .globl _Copy_conjoint_jints_atomic
+ .globl _Copy_arrayof_conjoint_jints
+ .globl _Copy_conjoint_jlongs_atomic
+ .globl _mmx_Copy_arrayof_conjoint_jshorts
+
+ .globl _Atomic_cmpxchg_long
+ .globl _Atomic_move_long
+
+ .text
+
+ .globl SpinPause
+ .type SpinPause,@function
+ .p2align 4,,15
+SpinPause:
+ rep
+ nop
+ movl $1, %eax
+ ret
+
+ # Support for void Copy::conjoint_bytes(void* from,
+ # void* to,
+ # size_t count)
+ .p2align 4,,15
+ .type _Copy_conjoint_bytes,@function
+_Copy_conjoint_bytes:
+ pushl %esi
+ movl 4+12(%esp),%ecx # count
+ pushl %edi
+ movl 8+ 4(%esp),%esi # from
+ movl 8+ 8(%esp),%edi # to
+ cmpl %esi,%edi
+ leal -1(%esi,%ecx),%eax # from + count - 1
+ jbe cb_CopyRight
+ cmpl %eax,%edi
+ jbe cb_CopyLeft
+ # copy from low to high
+cb_CopyRight:
+ cmpl $3,%ecx
+ jbe 5f # <= 3 bytes
+ # align source address at dword address boundary
+ movl %ecx,%eax # original count
+ movl $4,%ecx
+ subl %esi,%ecx
+ andl $3,%ecx # prefix byte count
+ jz 1f # no prefix
+ subl %ecx,%eax # byte count less prefix
+ # copy prefix
+ subl %esi,%edi
+0: movb (%esi),%dl
+ movb %dl,(%edi,%esi,1)
+ addl $1,%esi
+ subl $1,%ecx
+ jnz 0b
+ addl %esi,%edi
+1: movl %eax,%ecx # byte count less prefix
+ shrl $2,%ecx # dword count
+ jz 4f # no dwords to move
+ cmpl $32,%ecx
+ jbe 2f # <= 32 dwords
+ # copy aligned dwords
+ rep; smovl
+ jmp 4f
+ # copy aligned dwords
+2: subl %esi,%edi
+ .p2align 4,,15
+3: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ addl $4,%esi
+ subl $1,%ecx
+ jnz 3b
+ addl %esi,%edi
+4: movl %eax,%ecx # byte count less prefix
+5: andl $3,%ecx # suffix byte count
+ jz 7f # no suffix
+ # copy suffix
+ xorl %eax,%eax
+6: movb (%esi,%eax,1),%dl
+ movb %dl,(%edi,%eax,1)
+ addl $1,%eax
+ subl $1,%ecx
+ jnz 6b
+7: popl %edi
+ popl %esi
+ ret
+ # copy from high to low
+cb_CopyLeft:
+ std
+ leal -4(%edi,%ecx),%edi # to + count - 4
+ movl %eax,%esi # from + count - 1
+ movl %ecx,%eax
+ subl $3,%esi # from + count - 4
+ cmpl $3,%ecx
+ jbe 5f # <= 3 bytes
+1: shrl $2,%ecx # dword count
+ jz 4f # no dwords to move
+ cmpl $32,%ecx
+ ja 3f # > 32 dwords
+ # copy dwords, aligned or not
+ subl %esi,%edi
+ .p2align 4,,15
+2: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ subl $4,%esi
+ subl $1,%ecx
+ jnz 2b
+ addl %esi,%edi
+ jmp 4f
+ # copy dwords, aligned or not
+3: rep; smovl
+4: movl %eax,%ecx # byte count
+5: andl $3,%ecx # suffix byte count
+ jz 7f # no suffix
+ # copy suffix
+ subl %esi,%edi
+ addl $3,%esi
+6: movb (%esi),%dl
+ movb %dl,(%edi,%esi,1)
+ subl $1,%esi
+ subl $1,%ecx
+ jnz 6b
+7: cld
+ popl %edi
+ popl %esi
+ ret
+
+ # Support for void Copy::arrayof_conjoint_bytes(void* from,
+ # void* to,
+ # size_t count)
+ #
+ # Same as _Copy_conjoint_bytes, except no source alignment check.
+ .p2align 4,,15
+ .type _Copy_arrayof_conjoint_bytes,@function
+_Copy_arrayof_conjoint_bytes:
+ pushl %esi
+ movl 4+12(%esp),%ecx # count
+ pushl %edi
+ movl 8+ 4(%esp),%esi # from
+ movl 8+ 8(%esp),%edi # to
+ cmpl %esi,%edi
+ leal -1(%esi,%ecx),%eax # from + count - 1
+ jbe acb_CopyRight
+ cmpl %eax,%edi
+ jbe acb_CopyLeft
+ # copy from low to high
+acb_CopyRight:
+ cmpl $3,%ecx
+ jbe 5f
+1: movl %ecx,%eax
+ shrl $2,%ecx
+ jz 4f
+ cmpl $32,%ecx
+ ja 3f
+ # copy aligned dwords
+ subl %esi,%edi
+ .p2align 4,,15
+2: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ addl $4,%esi
+ subl $1,%ecx
+ jnz 2b
+ addl %esi,%edi
+ jmp 4f
+ # copy aligned dwords
+3: rep; smovl
+4: movl %eax,%ecx
+5: andl $3,%ecx
+ jz 7f
+ # copy suffix
+ xorl %eax,%eax
+6: movb (%esi,%eax,1),%dl
+ movb %dl,(%edi,%eax,1)
+ addl $1,%eax
+ subl $1,%ecx
+ jnz 6b
+7: popl %edi
+ popl %esi
+ ret
+acb_CopyLeft:
+ std
+ leal -4(%edi,%ecx),%edi # to + count - 4
+ movl %eax,%esi # from + count - 1
+ movl %ecx,%eax
+ subl $3,%esi # from + count - 4
+ cmpl $3,%ecx
+ jbe 5f
+1: shrl $2,%ecx
+ jz 4f
+ cmpl $32,%ecx
+ jbe 2f # <= 32 dwords
+ rep; smovl
+ jmp 4f
+ .space 8
+2: subl %esi,%edi
+ .p2align 4,,15
+3: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ subl $4,%esi
+ subl $1,%ecx
+ jnz 3b
+ addl %esi,%edi
+4: movl %eax,%ecx
+5: andl $3,%ecx
+ jz 7f
+ subl %esi,%edi
+ addl $3,%esi
+6: movb (%esi),%dl
+ movb %dl,(%edi,%esi,1)
+ subl $1,%esi
+ subl $1,%ecx
+ jnz 6b
+7: cld
+ popl %edi
+ popl %esi
+ ret
+
+ # Support for void Copy::conjoint_jshorts_atomic(void* from,
+ # void* to,
+ # size_t count)
+ .p2align 4,,15
+ .type _Copy_conjoint_jshorts_atomic,@function
+_Copy_conjoint_jshorts_atomic:
+ pushl %esi
+ movl 4+12(%esp),%ecx # count
+ pushl %edi
+ movl 8+ 4(%esp),%esi # from
+ movl 8+ 8(%esp),%edi # to
+ cmpl %esi,%edi
+ leal -2(%esi,%ecx,2),%eax # from + count*2 - 2
+ jbe cs_CopyRight
+ cmpl %eax,%edi
+ jbe cs_CopyLeft
+ # copy from low to high
+cs_CopyRight:
+ # align source address at dword address boundary
+ movl %esi,%eax # original from
+ andl $3,%eax # either 0 or 2
+ jz 1f # no prefix
+ # copy prefix
+ subl $1,%ecx
+ jl 5f # zero count
+ movw (%esi),%dx
+ movw %dx,(%edi)
+ addl %eax,%esi # %eax == 2
+ addl %eax,%edi
+1: movl %ecx,%eax # word count less prefix
+ sarl %ecx # dword count
+ jz 4f # no dwords to move
+ cmpl $32,%ecx
+ jbe 2f # <= 32 dwords
+ # copy aligned dwords
+ rep; smovl
+ jmp 4f
+ # copy aligned dwords
+2: subl %esi,%edi
+ .p2align 4,,15
+3: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ addl $4,%esi
+ subl $1,%ecx
+ jnz 3b
+ addl %esi,%edi
+4: andl $1,%eax # suffix count
+ jz 5f # no suffix
+ # copy suffix
+ movw (%esi),%dx
+ movw %dx,(%edi)
+5: popl %edi
+ popl %esi
+ ret
+ # copy from high to low
+cs_CopyLeft:
+ std
+ leal -4(%edi,%ecx,2),%edi # to + count*2 - 4
+ movl %eax,%esi # from + count*2 - 2
+ movl %ecx,%eax
+ subl $2,%esi # from + count*2 - 4
+1: sarl %ecx # dword count
+ jz 4f # no dwords to move
+ cmpl $32,%ecx
+ ja 3f # > 32 dwords
+ subl %esi,%edi
+ .p2align 4,,15
+2: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ subl $4,%esi
+ subl $1,%ecx
+ jnz 2b
+ addl %esi,%edi
+ jmp 4f
+3: rep; smovl
+4: andl $1,%eax # suffix count
+ jz 5f # no suffix
+ # copy suffix
+ addl $2,%esi
+ addl $2,%edi
+ movw (%esi),%dx
+ movw %dx,(%edi)
+5: cld
+ popl %edi
+ popl %esi
+ ret
+
+ # Support for void Copy::arrayof_conjoint_jshorts(void* from,
+ # void* to,
+ # size_t count)
+ .p2align 4,,15
+ .type _Copy_arrayof_conjoint_jshorts,@function
+_Copy_arrayof_conjoint_jshorts:
+ pushl %esi
+ movl 4+12(%esp),%ecx # count
+ pushl %edi
+ movl 8+ 4(%esp),%esi # from
+ movl 8+ 8(%esp),%edi # to
+ cmpl %esi,%edi
+ leal -2(%esi,%ecx,2),%eax # from + count*2 - 2
+ jbe acs_CopyRight
+ cmpl %eax,%edi
+ jbe acs_CopyLeft
+acs_CopyRight:
+ movl %ecx,%eax # word count
+ sarl %ecx # dword count
+ jz 4f # no dwords to move
+ cmpl $32,%ecx
+ jbe 2f # <= 32 dwords
+ # copy aligned dwords
+ rep; smovl
+ jmp 4f
+ # copy aligned dwords
+ .space 5
+2: subl %esi,%edi
+ .p2align 4,,15
+3: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ addl $4,%esi
+ subl $1,%ecx
+ jnz 3b
+ addl %esi,%edi
+4: andl $1,%eax # suffix count
+ jz 5f # no suffix
+ # copy suffix
+ movw (%esi),%dx
+ movw %dx,(%edi)
+5: popl %edi
+ popl %esi
+ ret
+acs_CopyLeft:
+ std
+ leal -4(%edi,%ecx,2),%edi # to + count*2 - 4
+ movl %eax,%esi # from + count*2 - 2
+ movl %ecx,%eax
+ subl $2,%esi # from + count*2 - 4
+ sarl %ecx # dword count
+ jz 4f # no dwords to move
+ cmpl $32,%ecx
+ ja 3f # > 32 dwords
+ subl %esi,%edi
+ .p2align 4,,15
+2: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ subl $4,%esi
+ subl $1,%ecx
+ jnz 2b
+ addl %esi,%edi
+ jmp 4f
+3: rep; smovl
+4: andl $1,%eax # suffix count
+ jz 5f # no suffix
+ # copy suffix
+ addl $2,%esi
+ addl $2,%edi
+ movw (%esi),%dx
+ movw %dx,(%edi)
+5: cld
+ popl %edi
+ popl %esi
+ ret
+
+ # Support for void Copy::conjoint_jints_atomic(void* from,
+ # void* to,
+ # size_t count)
+ # Equivalent to
+ # arrayof_conjoint_jints
+ .p2align 4,,15
+ .type _Copy_conjoint_jints_atomic,@function
+ .type _Copy_arrayof_conjoint_jints,@function
+_Copy_conjoint_jints_atomic:
+_Copy_arrayof_conjoint_jints:
+ pushl %esi
+ movl 4+12(%esp),%ecx # count
+ pushl %edi
+ movl 8+ 4(%esp),%esi # from
+ movl 8+ 8(%esp),%edi # to
+ cmpl %esi,%edi
+ leal -4(%esi,%ecx,4),%eax # from + count*4 - 4
+ jbe ci_CopyRight
+ cmpl %eax,%edi
+ jbe ci_CopyLeft
+ci_CopyRight:
+ cmpl $32,%ecx
+ jbe 2f # <= 32 dwords
+ rep; smovl
+ popl %edi
+ popl %esi
+ ret
+ .space 10
+2: subl %esi,%edi
+ jmp 4f
+ .p2align 4,,15
+3: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ addl $4,%esi
+4: subl $1,%ecx
+ jge 3b
+ popl %edi
+ popl %esi
+ ret
+ci_CopyLeft:
+ std
+ leal -4(%edi,%ecx,4),%edi # to + count*4 - 4
+ cmpl $32,%ecx
+ ja 4f # > 32 dwords
+ subl %eax,%edi # eax == from + count*4 - 4
+ jmp 3f
+ .p2align 4,,15
+2: movl (%eax),%edx
+ movl %edx,(%edi,%eax,1)
+ subl $4,%eax
+3: subl $1,%ecx
+ jge 2b
+ cld
+ popl %edi
+ popl %esi
+ ret
+4: movl %eax,%esi # from + count*4 - 4
+ rep; smovl
+ cld
+ popl %edi
+ popl %esi
+ ret
+
+ # Support for void Copy::conjoint_jlongs_atomic(jlong* from,
+ # jlong* to,
+ # size_t count)
+ #
+ # 32-bit
+ #
+ # count treated as signed
+ /*
+ #
+ # if (from > to) {
+ # while (--count >= 0) {
+ # *to++ = *from++;
+ # }
+ # } else {
+ # while (--count >= 0) {
+ # to[count] = from[count];
+ # }
+ # }
+ */
+ .p2align 4,,15
+ .type _Copy_conjoint_jlongs_atomic,@function
+_Copy_conjoint_jlongs_atomic:
+ movl 4+8(%esp),%ecx # count
+ movl 4+0(%esp),%eax # from
+ movl 4+4(%esp),%edx # to
+ cmpl %eax,%edx
+ jae cla_CopyLeft
+cla_CopyRight:
+ subl %eax,%edx
+ jmp 2f
+ .p2align 4,,15
+1: fildll (%eax)
+ fistpll (%edx,%eax,1)
+ addl $8,%eax
+2: subl $1,%ecx
+ jge 1b
+ ret
+ .p2align 4,,15
+3: fildll (%eax,%ecx,8)
+ fistpll (%edx,%ecx,8)
+cla_CopyLeft:
+ subl $1,%ecx
+ jge 3b
+ ret
+
+ # Support for void Copy::arrayof_conjoint_jshorts(void* from,
+ # void* to,
+ # size_t count)
+ .p2align 4,,15
+ .type _mmx_Copy_arrayof_conjoint_jshorts,@function
+_mmx_Copy_arrayof_conjoint_jshorts:
+ pushl %esi
+ movl 4+12(%esp),%ecx
+ pushl %edi
+ movl 8+ 4(%esp),%esi
+ movl 8+ 8(%esp),%edi
+ cmpl %esi,%edi
+ leal -2(%esi,%ecx,2),%eax
+ jbe mmx_acs_CopyRight
+ cmpl %eax,%edi
+ jbe mmx_acs_CopyLeft
+mmx_acs_CopyRight:
+ movl %ecx,%eax
+ sarl %ecx
+ je 5f
+ cmpl $33,%ecx
+ jae 3f
+1: subl %esi,%edi
+ .p2align 4,,15
+2: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ addl $4,%esi
+ subl $1,%ecx
+ jnz 2b
+ addl %esi,%edi
+ jmp 5f
+3: smovl # align to 8 bytes, we know we are 4 byte aligned to start
+ subl $1,%ecx
+4: .p2align 4,,15
+ movq 0(%esi),%mm0
+ addl $64,%edi
+ movq 8(%esi),%mm1
+ subl $16,%ecx
+ movq 16(%esi),%mm2
+ movq %mm0,-64(%edi)
+ movq 24(%esi),%mm0
+ movq %mm1,-56(%edi)
+ movq 32(%esi),%mm1
+ movq %mm2,-48(%edi)
+ movq 40(%esi),%mm2
+ movq %mm0,-40(%edi)
+ movq 48(%esi),%mm0
+ movq %mm1,-32(%edi)
+ movq 56(%esi),%mm1
+ movq %mm2,-24(%edi)
+ movq %mm0,-16(%edi)
+ addl $64,%esi
+ movq %mm1,-8(%edi)
+ cmpl $16,%ecx
+ jge 4b
+ emms
+ testl %ecx,%ecx
+ ja 1b
+5: andl $1,%eax
+ je 7f
+6: movw (%esi),%dx
+ movw %dx,(%edi)
+7: popl %edi
+ popl %esi
+ ret
+mmx_acs_CopyLeft:
+ std
+ leal -4(%edi,%ecx,2),%edi
+ movl %eax,%esi
+ movl %ecx,%eax
+ subl $2,%esi
+ sarl %ecx
+ je 4f
+ cmpl $32,%ecx
+ ja 3f
+ subl %esi,%edi
+ .p2align 4,,15
+2: movl (%esi),%edx
+ movl %edx,(%edi,%esi,1)
+ subl $4,%esi
+ subl $1,%ecx
+ jnz 2b
+ addl %esi,%edi
+ jmp 4f
+3: rep; smovl
+4: andl $1,%eax
+ je 6f
+ addl $2,%esi
+ addl $2,%edi
+5: movw (%esi),%dx
+ movw %dx,(%edi)
+6: cld
+ popl %edi
+ popl %esi
+ ret
+
+
+ # Support for jlong Atomic::cmpxchg(volatile jlong* dest,
+ # jlong compare_value,
+ # jlong exchange_value)
+ #
+ .p2align 4,,15
+ .type _Atomic_cmpxchg_long,@function
+_Atomic_cmpxchg_long:
+ # 8(%esp) : return PC
+ pushl %ebx # 4(%esp) : old %ebx
+ pushl %edi # 0(%esp) : old %edi
+ movl 12(%esp), %ebx # 12(%esp) : exchange_value (low)
+ movl 16(%esp), %ecx # 16(%esp) : exchange_value (high)
+ movl 24(%esp), %eax # 24(%esp) : compare_value (low)
+ movl 28(%esp), %edx # 28(%esp) : compare_value (high)
+ movl 20(%esp), %edi # 20(%esp) : dest
+ lock cmpxchg8b (%edi)
+ popl %edi
+ popl %ebx
+ ret
+
+
+ # Support for jlong Atomic::load and Atomic::store.
+ # void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst)
+ .p2align 4,,15
+ .type _Atomic_move_long,@function
+_Atomic_move_long:
+ movl 4(%esp), %eax # src
+ fildll (%eax)
+ movl 8(%esp), %eax # dest
+ fistpll (%eax)
+ ret
diff --git a/src/hotspot/os_cpu/linux_x86/linux_x86_32.s b/src/hotspot/os_cpu/linux_x86/linux_x86_32.s
deleted file mode 100644
index b1961848451..00000000000
--- a/src/hotspot/os_cpu/linux_x86/linux_x86_32.s
+++ /dev/null
@@ -1,645 +0,0 @@
-#
-# Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-
- # NOTE WELL! The _Copy functions are called directly
- # from server-compiler-generated code via CallLeafNoFP,
- # which means that they *must* either not use floating
- # point or use it in the same manner as does the server
- # compiler.
-
- .globl _Copy_conjoint_bytes
- .globl _Copy_arrayof_conjoint_bytes
- .globl _Copy_conjoint_jshorts_atomic
- .globl _Copy_arrayof_conjoint_jshorts
- .globl _Copy_conjoint_jints_atomic
- .globl _Copy_arrayof_conjoint_jints
- .globl _Copy_conjoint_jlongs_atomic
- .globl _mmx_Copy_arrayof_conjoint_jshorts
-
- .globl _Atomic_cmpxchg_long
- .globl _Atomic_move_long
-
- .text
-
- .globl SpinPause
- .type SpinPause,@function
- .p2align 4,,15
-SpinPause:
- rep
- nop
- movl $1, %eax
- ret
-
- # Support for void Copy::conjoint_bytes(void* from,
- # void* to,
- # size_t count)
- .p2align 4,,15
- .type _Copy_conjoint_bytes,@function
-_Copy_conjoint_bytes:
- pushl %esi
- movl 4+12(%esp),%ecx # count
- pushl %edi
- movl 8+ 4(%esp),%esi # from
- movl 8+ 8(%esp),%edi # to
- cmpl %esi,%edi
- leal -1(%esi,%ecx),%eax # from + count - 1
- jbe cb_CopyRight
- cmpl %eax,%edi
- jbe cb_CopyLeft
- # copy from low to high
-cb_CopyRight:
- cmpl $3,%ecx
- jbe 5f # <= 3 bytes
- # align source address at dword address boundary
- movl %ecx,%eax # original count
- movl $4,%ecx
- subl %esi,%ecx
- andl $3,%ecx # prefix byte count
- jz 1f # no prefix
- subl %ecx,%eax # byte count less prefix
- # copy prefix
- subl %esi,%edi
-0: movb (%esi),%dl
- movb %dl,(%edi,%esi,1)
- addl $1,%esi
- subl $1,%ecx
- jnz 0b
- addl %esi,%edi
-1: movl %eax,%ecx # byte count less prefix
- shrl $2,%ecx # dword count
- jz 4f # no dwords to move
- cmpl $32,%ecx
- jbe 2f # <= 32 dwords
- # copy aligned dwords
- rep; smovl
- jmp 4f
- # copy aligned dwords
-2: subl %esi,%edi
- .p2align 4,,15
-3: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- addl $4,%esi
- subl $1,%ecx
- jnz 3b
- addl %esi,%edi
-4: movl %eax,%ecx # byte count less prefix
-5: andl $3,%ecx # suffix byte count
- jz 7f # no suffix
- # copy suffix
- xorl %eax,%eax
-6: movb (%esi,%eax,1),%dl
- movb %dl,(%edi,%eax,1)
- addl $1,%eax
- subl $1,%ecx
- jnz 6b
-7: popl %edi
- popl %esi
- ret
- # copy from high to low
-cb_CopyLeft:
- std
- leal -4(%edi,%ecx),%edi # to + count - 4
- movl %eax,%esi # from + count - 1
- movl %ecx,%eax
- subl $3,%esi # from + count - 4
- cmpl $3,%ecx
- jbe 5f # <= 3 bytes
-1: shrl $2,%ecx # dword count
- jz 4f # no dwords to move
- cmpl $32,%ecx
- ja 3f # > 32 dwords
- # copy dwords, aligned or not
- subl %esi,%edi
- .p2align 4,,15
-2: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- subl $4,%esi
- subl $1,%ecx
- jnz 2b
- addl %esi,%edi
- jmp 4f
- # copy dwords, aligned or not
-3: rep; smovl
-4: movl %eax,%ecx # byte count
-5: andl $3,%ecx # suffix byte count
- jz 7f # no suffix
- # copy suffix
- subl %esi,%edi
- addl $3,%esi
-6: movb (%esi),%dl
- movb %dl,(%edi,%esi,1)
- subl $1,%esi
- subl $1,%ecx
- jnz 6b
-7: cld
- popl %edi
- popl %esi
- ret
-
- # Support for void Copy::arrayof_conjoint_bytes(void* from,
- # void* to,
- # size_t count)
- #
- # Same as _Copy_conjoint_bytes, except no source alignment check.
- .p2align 4,,15
- .type _Copy_arrayof_conjoint_bytes,@function
-_Copy_arrayof_conjoint_bytes:
- pushl %esi
- movl 4+12(%esp),%ecx # count
- pushl %edi
- movl 8+ 4(%esp),%esi # from
- movl 8+ 8(%esp),%edi # to
- cmpl %esi,%edi
- leal -1(%esi,%ecx),%eax # from + count - 1
- jbe acb_CopyRight
- cmpl %eax,%edi
- jbe acb_CopyLeft
- # copy from low to high
-acb_CopyRight:
- cmpl $3,%ecx
- jbe 5f
-1: movl %ecx,%eax
- shrl $2,%ecx
- jz 4f
- cmpl $32,%ecx
- ja 3f
- # copy aligned dwords
- subl %esi,%edi
- .p2align 4,,15
-2: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- addl $4,%esi
- subl $1,%ecx
- jnz 2b
- addl %esi,%edi
- jmp 4f
- # copy aligned dwords
-3: rep; smovl
-4: movl %eax,%ecx
-5: andl $3,%ecx
- jz 7f
- # copy suffix
- xorl %eax,%eax
-6: movb (%esi,%eax,1),%dl
- movb %dl,(%edi,%eax,1)
- addl $1,%eax
- subl $1,%ecx
- jnz 6b
-7: popl %edi
- popl %esi
- ret
-acb_CopyLeft:
- std
- leal -4(%edi,%ecx),%edi # to + count - 4
- movl %eax,%esi # from + count - 1
- movl %ecx,%eax
- subl $3,%esi # from + count - 4
- cmpl $3,%ecx
- jbe 5f
-1: shrl $2,%ecx
- jz 4f
- cmpl $32,%ecx
- jbe 2f # <= 32 dwords
- rep; smovl
- jmp 4f
- .space 8
-2: subl %esi,%edi
- .p2align 4,,15
-3: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- subl $4,%esi
- subl $1,%ecx
- jnz 3b
- addl %esi,%edi
-4: movl %eax,%ecx
-5: andl $3,%ecx
- jz 7f
- subl %esi,%edi
- addl $3,%esi
-6: movb (%esi),%dl
- movb %dl,(%edi,%esi,1)
- subl $1,%esi
- subl $1,%ecx
- jnz 6b
-7: cld
- popl %edi
- popl %esi
- ret
-
- # Support for void Copy::conjoint_jshorts_atomic(void* from,
- # void* to,
- # size_t count)
- .p2align 4,,15
- .type _Copy_conjoint_jshorts_atomic,@function
-_Copy_conjoint_jshorts_atomic:
- pushl %esi
- movl 4+12(%esp),%ecx # count
- pushl %edi
- movl 8+ 4(%esp),%esi # from
- movl 8+ 8(%esp),%edi # to
- cmpl %esi,%edi
- leal -2(%esi,%ecx,2),%eax # from + count*2 - 2
- jbe cs_CopyRight
- cmpl %eax,%edi
- jbe cs_CopyLeft
- # copy from low to high
-cs_CopyRight:
- # align source address at dword address boundary
- movl %esi,%eax # original from
- andl $3,%eax # either 0 or 2
- jz 1f # no prefix
- # copy prefix
- subl $1,%ecx
- jl 5f # zero count
- movw (%esi),%dx
- movw %dx,(%edi)
- addl %eax,%esi # %eax == 2
- addl %eax,%edi
-1: movl %ecx,%eax # word count less prefix
- sarl %ecx # dword count
- jz 4f # no dwords to move
- cmpl $32,%ecx
- jbe 2f # <= 32 dwords
- # copy aligned dwords
- rep; smovl
- jmp 4f
- # copy aligned dwords
-2: subl %esi,%edi
- .p2align 4,,15
-3: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- addl $4,%esi
- subl $1,%ecx
- jnz 3b
- addl %esi,%edi
-4: andl $1,%eax # suffix count
- jz 5f # no suffix
- # copy suffix
- movw (%esi),%dx
- movw %dx,(%edi)
-5: popl %edi
- popl %esi
- ret
- # copy from high to low
-cs_CopyLeft:
- std
- leal -4(%edi,%ecx,2),%edi # to + count*2 - 4
- movl %eax,%esi # from + count*2 - 2
- movl %ecx,%eax
- subl $2,%esi # from + count*2 - 4
-1: sarl %ecx # dword count
- jz 4f # no dwords to move
- cmpl $32,%ecx
- ja 3f # > 32 dwords
- subl %esi,%edi
- .p2align 4,,15
-2: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- subl $4,%esi
- subl $1,%ecx
- jnz 2b
- addl %esi,%edi
- jmp 4f
-3: rep; smovl
-4: andl $1,%eax # suffix count
- jz 5f # no suffix
- # copy suffix
- addl $2,%esi
- addl $2,%edi
- movw (%esi),%dx
- movw %dx,(%edi)
-5: cld
- popl %edi
- popl %esi
- ret
-
- # Support for void Copy::arrayof_conjoint_jshorts(void* from,
- # void* to,
- # size_t count)
- .p2align 4,,15
- .type _Copy_arrayof_conjoint_jshorts,@function
-_Copy_arrayof_conjoint_jshorts:
- pushl %esi
- movl 4+12(%esp),%ecx # count
- pushl %edi
- movl 8+ 4(%esp),%esi # from
- movl 8+ 8(%esp),%edi # to
- cmpl %esi,%edi
- leal -2(%esi,%ecx,2),%eax # from + count*2 - 2
- jbe acs_CopyRight
- cmpl %eax,%edi
- jbe acs_CopyLeft
-acs_CopyRight:
- movl %ecx,%eax # word count
- sarl %ecx # dword count
- jz 4f # no dwords to move
- cmpl $32,%ecx
- jbe 2f # <= 32 dwords
- # copy aligned dwords
- rep; smovl
- jmp 4f
- # copy aligned dwords
- .space 5
-2: subl %esi,%edi
- .p2align 4,,15
-3: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- addl $4,%esi
- subl $1,%ecx
- jnz 3b
- addl %esi,%edi
-4: andl $1,%eax # suffix count
- jz 5f # no suffix
- # copy suffix
- movw (%esi),%dx
- movw %dx,(%edi)
-5: popl %edi
- popl %esi
- ret
-acs_CopyLeft:
- std
- leal -4(%edi,%ecx,2),%edi # to + count*2 - 4
- movl %eax,%esi # from + count*2 - 2
- movl %ecx,%eax
- subl $2,%esi # from + count*2 - 4
- sarl %ecx # dword count
- jz 4f # no dwords to move
- cmpl $32,%ecx
- ja 3f # > 32 dwords
- subl %esi,%edi
- .p2align 4,,15
-2: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- subl $4,%esi
- subl $1,%ecx
- jnz 2b
- addl %esi,%edi
- jmp 4f
-3: rep; smovl
-4: andl $1,%eax # suffix count
- jz 5f # no suffix
- # copy suffix
- addl $2,%esi
- addl $2,%edi
- movw (%esi),%dx
- movw %dx,(%edi)
-5: cld
- popl %edi
- popl %esi
- ret
-
- # Support for void Copy::conjoint_jints_atomic(void* from,
- # void* to,
- # size_t count)
- # Equivalent to
- # arrayof_conjoint_jints
- .p2align 4,,15
- .type _Copy_conjoint_jints_atomic,@function
- .type _Copy_arrayof_conjoint_jints,@function
-_Copy_conjoint_jints_atomic:
-_Copy_arrayof_conjoint_jints:
- pushl %esi
- movl 4+12(%esp),%ecx # count
- pushl %edi
- movl 8+ 4(%esp),%esi # from
- movl 8+ 8(%esp),%edi # to
- cmpl %esi,%edi
- leal -4(%esi,%ecx,4),%eax # from + count*4 - 4
- jbe ci_CopyRight
- cmpl %eax,%edi
- jbe ci_CopyLeft
-ci_CopyRight:
- cmpl $32,%ecx
- jbe 2f # <= 32 dwords
- rep; smovl
- popl %edi
- popl %esi
- ret
- .space 10
-2: subl %esi,%edi
- jmp 4f
- .p2align 4,,15
-3: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- addl $4,%esi
-4: subl $1,%ecx
- jge 3b
- popl %edi
- popl %esi
- ret
-ci_CopyLeft:
- std
- leal -4(%edi,%ecx,4),%edi # to + count*4 - 4
- cmpl $32,%ecx
- ja 4f # > 32 dwords
- subl %eax,%edi # eax == from + count*4 - 4
- jmp 3f
- .p2align 4,,15
-2: movl (%eax),%edx
- movl %edx,(%edi,%eax,1)
- subl $4,%eax
-3: subl $1,%ecx
- jge 2b
- cld
- popl %edi
- popl %esi
- ret
-4: movl %eax,%esi # from + count*4 - 4
- rep; smovl
- cld
- popl %edi
- popl %esi
- ret
-
- # Support for void Copy::conjoint_jlongs_atomic(jlong* from,
- # jlong* to,
- # size_t count)
- #
- # 32-bit
- #
- # count treated as signed
- #
- # if (from > to) {
- # while (--count >= 0) {
- # *to++ = *from++;
- # }
- # } else {
- # while (--count >= 0) {
- # to[count] = from[count];
- # }
- # }
- .p2align 4,,15
- .type _Copy_conjoint_jlongs_atomic,@function
-_Copy_conjoint_jlongs_atomic:
- movl 4+8(%esp),%ecx # count
- movl 4+0(%esp),%eax # from
- movl 4+4(%esp),%edx # to
- cmpl %eax,%edx
- jae cla_CopyLeft
-cla_CopyRight:
- subl %eax,%edx
- jmp 2f
- .p2align 4,,15
-1: fildll (%eax)
- fistpll (%edx,%eax,1)
- addl $8,%eax
-2: subl $1,%ecx
- jge 1b
- ret
- .p2align 4,,15
-3: fildll (%eax,%ecx,8)
- fistpll (%edx,%ecx,8)
-cla_CopyLeft:
- subl $1,%ecx
- jge 3b
- ret
-
- # Support for void Copy::arrayof_conjoint_jshorts(void* from,
- # void* to,
- # size_t count)
- .p2align 4,,15
- .type _mmx_Copy_arrayof_conjoint_jshorts,@function
-_mmx_Copy_arrayof_conjoint_jshorts:
- pushl %esi
- movl 4+12(%esp),%ecx
- pushl %edi
- movl 8+ 4(%esp),%esi
- movl 8+ 8(%esp),%edi
- cmpl %esi,%edi
- leal -2(%esi,%ecx,2),%eax
- jbe mmx_acs_CopyRight
- cmpl %eax,%edi
- jbe mmx_acs_CopyLeft
-mmx_acs_CopyRight:
- movl %ecx,%eax
- sarl %ecx
- je 5f
- cmpl $33,%ecx
- jae 3f
-1: subl %esi,%edi
- .p2align 4,,15
-2: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- addl $4,%esi
- subl $1,%ecx
- jnz 2b
- addl %esi,%edi
- jmp 5f
-3: smovl # align to 8 bytes, we know we are 4 byte aligned to start
- subl $1,%ecx
-4: .p2align 4,,15
- movq 0(%esi),%mm0
- addl $64,%edi
- movq 8(%esi),%mm1
- subl $16,%ecx
- movq 16(%esi),%mm2
- movq %mm0,-64(%edi)
- movq 24(%esi),%mm0
- movq %mm1,-56(%edi)
- movq 32(%esi),%mm1
- movq %mm2,-48(%edi)
- movq 40(%esi),%mm2
- movq %mm0,-40(%edi)
- movq 48(%esi),%mm0
- movq %mm1,-32(%edi)
- movq 56(%esi),%mm1
- movq %mm2,-24(%edi)
- movq %mm0,-16(%edi)
- addl $64,%esi
- movq %mm1,-8(%edi)
- cmpl $16,%ecx
- jge 4b
- emms
- testl %ecx,%ecx
- ja 1b
-5: andl $1,%eax
- je 7f
-6: movw (%esi),%dx
- movw %dx,(%edi)
-7: popl %edi
- popl %esi
- ret
-mmx_acs_CopyLeft:
- std
- leal -4(%edi,%ecx,2),%edi
- movl %eax,%esi
- movl %ecx,%eax
- subl $2,%esi
- sarl %ecx
- je 4f
- cmpl $32,%ecx
- ja 3f
- subl %esi,%edi
- .p2align 4,,15
-2: movl (%esi),%edx
- movl %edx,(%edi,%esi,1)
- subl $4,%esi
- subl $1,%ecx
- jnz 2b
- addl %esi,%edi
- jmp 4f
-3: rep; smovl
-4: andl $1,%eax
- je 6f
- addl $2,%esi
- addl $2,%edi
-5: movw (%esi),%dx
- movw %dx,(%edi)
-6: cld
- popl %edi
- popl %esi
- ret
-
-
- # Support for jlong Atomic::cmpxchg(volatile jlong* dest,
- # jlong compare_value,
- # jlong exchange_value)
- #
- .p2align 4,,15
- .type _Atomic_cmpxchg_long,@function
-_Atomic_cmpxchg_long:
- # 8(%esp) : return PC
- pushl %ebx # 4(%esp) : old %ebx
- pushl %edi # 0(%esp) : old %edi
- movl 12(%esp), %ebx # 12(%esp) : exchange_value (low)
- movl 16(%esp), %ecx # 16(%esp) : exchange_value (high)
- movl 24(%esp), %eax # 24(%esp) : compare_value (low)
- movl 28(%esp), %edx # 28(%esp) : compare_value (high)
- movl 20(%esp), %edi # 20(%esp) : dest
- lock cmpxchg8b (%edi)
- popl %edi
- popl %ebx
- ret
-
-
- # Support for jlong Atomic::load and Atomic::store.
- # void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst)
- .p2align 4,,15
- .type _Atomic_move_long,@function
-_Atomic_move_long:
- movl 4(%esp), %eax # src
- fildll (%eax)
- movl 8(%esp), %eax # dest
- fistpll (%eax)
- ret
diff --git a/src/hotspot/os_cpu/linux_x86/linux_x86_64.s b/src/hotspot/os_cpu/linux_x86/linux_x86_64.S
similarity index 100%
rename from src/hotspot/os_cpu/linux_x86/linux_x86_64.s
rename to src/hotspot/os_cpu/linux_x86/linux_x86_64.S
diff --git a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp
index b030abe5b2b..28982d00f4b 100644
--- a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp
+++ b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "memory/metaspaceShared.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
diff --git a/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp b/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp
index 677e810b78c..6512df3e7b9 100644
--- a/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp
+++ b/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "memory/metaspaceShared.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
diff --git a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp
index 8cf064e0613..8eed73ec46e 100644
--- a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp
+++ b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "memory/metaspaceShared.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
diff --git a/src/hotspot/share/aot/aotCodeHeap.cpp b/src/hotspot/share/aot/aotCodeHeap.cpp
deleted file mode 100644
index 462665002ec..00000000000
--- a/src/hotspot/share/aot/aotCodeHeap.cpp
+++ /dev/null
@@ -1,1116 +0,0 @@
-/*
- * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "jvm_io.h"
-#include "aot/aotCodeHeap.hpp"
-#include "aot/aotLoader.hpp"
-#include "ci/ciUtilities.inline.hpp"
-#include "classfile/javaAssertions.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "classfile/vmClasses.hpp"
-#include "classfile/vmSymbols.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableBarrierSet.hpp"
-#include "gc/shared/gcConfig.hpp"
-#include "gc/shared/tlab_globals.hpp"
-#include "gc/g1/heapRegion.hpp"
-#include "interpreter/abstractInterpreter.hpp"
-#include "jvmci/compilerRuntime.hpp"
-#include "jvmci/jvmciRuntime.hpp"
-#include "logging/log.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/universe.hpp"
-#include "oops/compressedOops.hpp"
-#include "oops/klass.inline.hpp"
-#include "oops/method.inline.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/os.hpp"
-#include "runtime/java.hpp"
-#include "runtime/safepointVerifiers.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/vmOperations.hpp"
-#include "utilities/powerOfTwo.hpp"
-#include "utilities/sizes.hpp"
-
-bool AOTLib::_narrow_oop_shift_initialized = false;
-int AOTLib::_narrow_oop_shift = 0;
-int AOTLib::_narrow_klass_shift = 0;
-
-address AOTLib::load_symbol(const char *name) {
- address symbol = (address) os::dll_lookup(_dl_handle, name);
- if (symbol == NULL) {
- tty->print_cr("Shared file %s error: missing %s", _name, name);
- vm_exit(1);
- }
- return symbol;
-}
-
-Klass* AOTCodeHeap::get_klass_from_got(const char* klass_name, int klass_len, const Method* method) {
- AOTKlassData* klass_data = (AOTKlassData*)_lib->load_symbol(klass_name);
- Klass* k = (Klass*)_klasses_got[klass_data->_got_index];
- if (k == NULL) {
- Thread* thread = Thread::current();
- k = lookup_klass(klass_name, klass_len, method, thread);
- // Note, exceptions are cleared.
- if (k == NULL) {
- fatal("Shared file %s error: klass %s should be resolved already", _lib->name(), klass_name);
- vm_exit(1);
- }
- // Patch now to avoid extra runtime lookup
- _klasses_got[klass_data->_got_index] = k;
- if (k->is_instance_klass()) {
- InstanceKlass* ik = InstanceKlass::cast(k);
- if (ik->is_initialized()) {
- _klasses_got[klass_data->_got_index - 1] = ik;
- }
- }
- }
- return k;
-}
-
-Klass* AOTCodeHeap::lookup_klass(const char* name, int len, const Method* method, Thread* thread) {
- ResourceMark rm(thread);
- assert(method != NULL, "incorrect call parameter");
- methodHandle caller(thread, (Method*)method);
-
- // Use class loader of aot method.
- Handle loader(thread, caller->method_holder()->class_loader());
- Handle protection_domain(thread, caller->method_holder()->protection_domain());
-
- // Ignore wrapping L and ;
- if (name[0] == JVM_SIGNATURE_CLASS) {
- assert(len > 2, "small name %s", name);
- name++;
- len -= 2;
- }
- TempNewSymbol sym = SymbolTable::probe(name, len);
- if (sym == NULL) {
- log_debug(aot, class, resolve)("Probe failed for AOT class %s", name);
- return NULL;
- }
- Klass* k = SystemDictionary::find_instance_or_array_klass(sym, loader, protection_domain);
-
- if (k != NULL) {
- log_info(aot, class, resolve)("%s %s (lookup)", caller->method_holder()->external_name(), k->external_name());
- }
- return k;
-}
-
-void AOTLib::handle_config_error(const char* format, ...) {
- if (PrintAOT) {
- va_list ap;
- va_start(ap, format);
- tty->vprint_cr(format, ap);
- va_end(ap);
- }
- if (UseAOTStrictLoading) {
- vm_exit(1);
- }
- _valid = false;
-}
-
-void AOTLib::verify_flag(bool aot_flag, bool flag, const char* name) {
- if (_valid && aot_flag != flag) {
- handle_config_error("Shared file %s error: %s has different value '%s' from current '%s'", _name, name , (aot_flag ? "true" : "false"), (flag ? "true" : "false"));
- }
-}
-
-void AOTLib::verify_flag(int aot_flag, int flag, const char* name) {
- if (_valid && aot_flag != flag) {
- handle_config_error("Shared file %s error: %s has different value '%d' from current '%d'", _name, name , aot_flag, flag);
- }
-}
-
-void AOTLib::verify_config() {
- GrowableArray* libraries = AOTLoader::libraries();
- for (GrowableArrayIterator lib = libraries->begin(); lib != libraries->end(); ++lib) {
- if ((*lib)->_config == _config) {
- handle_config_error("AOT library %s already loaded.", (*lib)->_name);
- return;
- }
- }
-
- if (_header->_version != AOTHeader::AOT_SHARED_VERSION) {
- handle_config_error("Invalid version of the shared file %s. Expected %d but was %d", _name, _header->_version, AOTHeader::AOT_SHARED_VERSION);
- return;
- }
-
- const char* aot_jvm_version = (const char*)_header + _header->_jvm_version_offset + 2;
- if (strcmp(aot_jvm_version, VM_Version::jre_release_version()) != 0) {
- handle_config_error("JVM version '%s' recorded in the shared file %s does not match current version '%s'", aot_jvm_version, _name, VM_Version::jre_release_version());
- return;
- }
-
- // Debug VM has different layout of runtime and metadata structures
-#ifdef ASSERT
- verify_flag(_config->_debug_VM, true, "Debug VM version");
-#else
- verify_flag(!(_config->_debug_VM), true, "Product VM version");
-#endif
- // Check configuration size
- verify_flag(_config->_config_size, AOTConfiguration::CONFIG_SIZE, "AOT configuration size");
-
- // Check GC
- CollectedHeap::Name gc = (CollectedHeap::Name)_config->_gc;
- if (_valid && !GCConfig::is_gc_selected(gc)) {
- handle_config_error("Shared file %s error: used '%s' is different from current '%s'", _name, GCConfig::hs_err_name(gc), GCConfig::hs_err_name());
- }
-
- // Check flags
- verify_flag(_config->_useCompressedOops, UseCompressedOops, "UseCompressedOops");
- verify_flag(_config->_useCompressedClassPointers, UseCompressedClassPointers, "UseCompressedClassPointers");
- verify_flag(_config->_useTLAB, UseTLAB, "UseTLAB");
- verify_flag(_config->_useBiasedLocking, UseBiasedLocking, "UseBiasedLocking");
- verify_flag(_config->_objectAlignment, ObjectAlignmentInBytes, "ObjectAlignmentInBytes");
- verify_flag(_config->_contendedPaddingWidth, ContendedPaddingWidth, "ContendedPaddingWidth");
- verify_flag(_config->_enableContended, EnableContended, "EnableContended");
- verify_flag(_config->_restrictContended, RestrictContended, "RestrictContended");
-
- // Shifts are static values which initialized by 0 until java heap initialization.
- // AOT libs are loaded before heap initialized so shift values are not set.
- // It is okay since ObjectAlignmentInBytes flag which defines shifts value is set before AOT libs are loaded.
- // Set shifts value based on first AOT library config.
- if (UseCompressedOops && _valid) {
- if (!_narrow_oop_shift_initialized) {
- _narrow_oop_shift = _config->_narrowOopShift;
- if (UseCompressedClassPointers) { // It is set only if UseCompressedOops is set
- _narrow_klass_shift = _config->_narrowKlassShift;
- }
- _narrow_oop_shift_initialized = true;
- } else {
- verify_flag(_config->_narrowOopShift, _narrow_oop_shift, "aot_config->_narrowOopShift");
- if (UseCompressedClassPointers) { // It is set only if UseCompressedOops is set
- verify_flag(_config->_narrowKlassShift, _narrow_klass_shift, "aot_config->_narrowKlassShift");
- }
- }
- }
-}
-
-AOTLib::~AOTLib() {
- os::free((void*) _name);
-}
-
-AOTCodeHeap::~AOTCodeHeap() {
- FREE_C_HEAP_ARRAY(AOTClass, _classes);
- FREE_C_HEAP_ARRAY(CodeToAMethod, _code_to_aot);
-}
-
-AOTLib::AOTLib(void* handle, const char* name, int dso_id) : _valid(true), _dl_handle(handle), _dso_id(dso_id) {
- _name = (const char*) os::strdup(name);
-
- // Verify that VM runs with the same parameters as AOT tool.
- _config = (AOTConfiguration*) load_symbol("A.config");
- _header = (AOTHeader*) load_symbol("A.header");
-
- verify_config();
-
- if (!_valid && PrintAOT) {
- tty->print("%7d ", (int) tty->time_stamp().milliseconds());
- tty->print_cr("%4d skipped %s aot library", _dso_id, _name);
- }
-}
-
-AOTCodeHeap::AOTCodeHeap(AOTLib* lib) :
- CodeHeap("CodeHeap 'AOT'", CodeBlobType::AOT), _lib(lib), _classes(NULL), _code_to_aot(NULL) {
- assert(_lib->is_valid(), "invalid library");
-
- _lib_symbols_initialized = false;
- _aot_id = 0;
-
- _class_count = _lib->header()->_class_count;
- _method_count = _lib->header()->_method_count;
-
- // Collect metaspace info: names -> address in .got section
- _metaspace_names = (const char*) _lib->load_symbol("A.meta.names");
- _method_metadata = (address) _lib->load_symbol("A.meth.metadata");
- _methods_offsets = (address) _lib->load_symbol("A.meth.offsets");
- _klasses_offsets = (address) _lib->load_symbol("A.kls.offsets");
- _dependencies = (address) _lib->load_symbol("A.kls.dependencies");
- _code_space = (address) _lib->load_symbol("A.text");
-
- // First cell is number of elements.
- _klasses_got = (Metadata**) _lib->load_symbol("A.kls.got");
- _klasses_got_size = _lib->header()->_klasses_got_size;
-
- _metadata_got = (Metadata**) _lib->load_symbol("A.meta.got");
- _metadata_got_size = _lib->header()->_metadata_got_size;
-
- _oop_got = (oop*) _lib->load_symbol("A.oop.got");
- _oop_got_size = _lib->header()->_oop_got_size;
-
- // Collect stubs info
- _stubs_offsets = (int*) _lib->load_symbol("A.stubs.offsets");
-
- // code segments table
- _code_segments = (address) _lib->load_symbol("A.code.segments");
-
- // method state
- _method_state = (jlong*) _lib->load_symbol("A.meth.state");
-
- // Create a table for mapping classes
- _classes = NEW_C_HEAP_ARRAY(AOTClass, _class_count, mtCode);
- memset(_classes, 0, _class_count * sizeof(AOTClass));
-
- // Create table for searching AOTCompiledMethod based on pc.
- _code_to_aot = NEW_C_HEAP_ARRAY(CodeToAMethod, _method_count, mtCode);
- memset(_code_to_aot, 0, _method_count * sizeof(CodeToAMethod));
-
- _memory.set_low_boundary((char *)_code_space);
- _memory.set_high_boundary((char *)_code_space);
- _memory.set_low((char *)_code_space);
- _memory.set_high((char *)_code_space);
-
- _segmap.set_low_boundary((char *)_code_segments);
- _segmap.set_low((char *)_code_segments);
-
- _log2_segment_size = exact_log2(_lib->config()->_codeSegmentSize);
-
- // Register aot stubs
- register_stubs();
-
- if (PrintAOT || (PrintCompilation && PrintAOT)) {
- tty->print("%7d ", (int) tty->time_stamp().milliseconds());
- tty->print_cr("%4d loaded %s aot library", _lib->id(), _lib->name());
- }
-}
-
-void AOTCodeHeap::publish_aot(const methodHandle& mh, AOTMethodData* method_data, int code_id) {
- // The method may be explicitly excluded by the user.
- // Or Interpreter uses an intrinsic for this method.
- // Or method has breakpoints.
- if (CompilerOracle::should_exclude(mh) ||
- !AbstractInterpreter::can_be_compiled(mh) ||
- (mh->number_of_breakpoints() > 0)) {
- return;
- }
- // Make sure no break points were set in the method in case of a safepoint
- // in the following code until aot code is registered.
- NoSafepointVerifier nsv;
-
- address code = method_data->_code;
- const char* name = method_data->_name;
- aot_metadata* meta = method_data->_meta;
-
- if (meta->scopes_pcs_begin() == meta->scopes_pcs_end()) {
- // Switch off NoSafepointVerifier because log_info() may cause safepoint
- // and it is fine because aot code will not be registered here.
- PauseNoSafepointVerifier pnsv(&nsv);
-
- // When the AOT compiler compiles something big we fail to generate metadata
- // in CodeInstaller::gather_metadata. In that case the scopes_pcs_begin == scopes_pcs_end.
- // In all successful cases we always have 2 entries of scope pcs.
- log_info(aot, class, resolve)("Failed to load %s (no metadata available)", mh->name_and_sig_as_C_string());
- _code_to_aot[code_id]._state = invalid;
- return;
- }
-
- jlong* state_adr = &_method_state[code_id];
- address metadata_table = method_data->_metadata_table;
- int metadata_size = method_data->_metadata_size;
- assert(code_id < _method_count, "sanity");
- _aot_id++;
-
-#ifdef ASSERT
- if (_aot_id > CIStop || _aot_id < CIStart) {
- // Skip compilation
- return;
- }
-#endif
- // Check one more time.
- if (_code_to_aot[code_id]._state == invalid) {
- return;
- }
- AOTCompiledMethod *aot = new AOTCompiledMethod(code, mh(), meta, metadata_table, metadata_size, state_adr, this, name, code_id, _aot_id);
- assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized");
- _code_to_aot[code_id]._aot = aot; // Should set this first
- if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, in_use) != not_set) {
- _code_to_aot[code_id]._aot = NULL; // Clean
- } else { // success
- // Publish method
-#if COMPILER1_OR_COMPILER2
- mh->set_aot_code(aot);
-#endif
- {
- MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
- Method::set_code(mh, aot);
- }
- if (PrintAOT || (PrintCompilation && PrintAOT)) {
- PauseNoSafepointVerifier pnsv(&nsv); // aot code is registered already
- aot->print_on(tty, NULL);
- }
- // Publish oop only after we are visible to CompiledMethodIterator
- aot->set_oop(mh()->method_holder()->klass_holder());
- }
-}
-
-void AOTCodeHeap::link_klass(const Klass* klass) {
- ResourceMark rm;
- assert(klass != NULL, "Should be given a klass");
- AOTKlassData* klass_data = (AOTKlassData*) os::dll_lookup(_lib->dl_handle(), klass->signature_name());
- if (klass_data != NULL) {
- // Set both GOT cells, resolved and initialized klass pointers.
- // _got_index points to second cell - resolved klass pointer.
- _klasses_got[klass_data->_got_index-1] = (Metadata*)klass; // Initialized
- _klasses_got[klass_data->_got_index ] = (Metadata*)klass; // Resolved
- if (PrintAOT) {
- tty->print_cr("[Found %s in %s]", klass->internal_name(), _lib->name());
- }
- }
-}
-
-void AOTCodeHeap::link_known_klasses() {
- for (int i = T_BOOLEAN; i <= T_CONFLICT; i++) {
- BasicType t = (BasicType)i;
- if (is_java_primitive(t)) {
- const Klass* arr_klass = Universe::typeArrayKlassObj(t);
- link_klass(arr_klass);
- }
- }
- link_klass(vmClasses::Reference_klass());
-}
-
-void AOTCodeHeap::register_stubs() {
- int stubs_count = _stubs_offsets[0]; // contains number
- _stubs_offsets++;
- AOTMethodOffsets* stub_offsets = (AOTMethodOffsets*)_stubs_offsets;
- for (int i = 0; i < stubs_count; ++i) {
- const char* stub_name = _metaspace_names + stub_offsets[i]._name_offset;
- address entry = _code_space + stub_offsets[i]._code_offset;
- aot_metadata* meta = (aot_metadata *) (_method_metadata + stub_offsets[i]._meta_offset);
- address metadata_table = (address)_metadata_got + stub_offsets[i]._metadata_got_offset;
- int metadata_size = stub_offsets[i]._metadata_got_size;
- int code_id = stub_offsets[i]._code_id;
- assert(code_id < _method_count, "sanity");
- jlong* state_adr = &_method_state[code_id];
- int len = Bytes::get_Java_u2((address)stub_name);
- stub_name += 2;
- char* full_name = NEW_C_HEAP_ARRAY(char, len+5, mtCode);
- memcpy(full_name, "AOT ", 4);
- memcpy(full_name+4, stub_name, len);
- full_name[len+4] = 0;
- guarantee(_code_to_aot[code_id]._state != invalid, "stub %s can't be invalidated", full_name);
- AOTCompiledMethod* aot = new AOTCompiledMethod(entry, NULL, meta, metadata_table, metadata_size, state_adr, this, full_name, code_id, i);
- assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized");
- _code_to_aot[code_id]._aot = aot;
- if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, in_use) != not_set) {
- fatal("stab '%s' code state is %d", full_name, _code_to_aot[code_id]._state);
- }
- // Adjust code buffer boundaries only for stubs because they are last in the buffer.
- adjust_boundaries(aot);
- if (PrintAOT && Verbose) {
- aot->print_on(tty, NULL);
- }
- }
-}
-
-#define SET_AOT_GLOBAL_SYMBOL_VALUE(AOTSYMNAME, AOTSYMTYPE, VMSYMVAL) \
- { \
- AOTSYMTYPE * adr = (AOTSYMTYPE *) os::dll_lookup(_lib->dl_handle(), AOTSYMNAME); \
- /* Check for a lookup error */ \
- guarantee(adr != NULL, "AOT Symbol not found %s", AOTSYMNAME); \
- *adr = (AOTSYMTYPE) VMSYMVAL; \
- }
-
-void AOTCodeHeap::link_graal_runtime_symbols() {
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_monitorenter", address, JVMCIRuntime::monitorenter);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_monitorexit", address, JVMCIRuntime::monitorexit);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_log_object", address, JVMCIRuntime::log_object);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_log_printf", address, JVMCIRuntime::log_printf);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_log_primitive", address, JVMCIRuntime::log_primitive);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_instance", address, JVMCIRuntime::new_instance);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_array", address, JVMCIRuntime::new_array);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_multi_array", address, JVMCIRuntime::new_multi_array);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_instance", address, JVMCIRuntime::dynamic_new_instance);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_array", address, JVMCIRuntime::dynamic_new_array);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_instance_or_null", address, JVMCIRuntime::new_instance_or_null);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_array_or_null", address, JVMCIRuntime::new_array_or_null);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_multi_array_or_null", address, JVMCIRuntime::new_multi_array_or_null);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_instance_or_null", address, JVMCIRuntime::dynamic_new_instance_or_null);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_array_or_null", address, JVMCIRuntime::dynamic_new_array_or_null);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_validate_object", address, JVMCIRuntime::validate_object);
-#if INCLUDE_G1GC
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_pre", address, JVMCIRuntime::write_barrier_pre);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_post", address, JVMCIRuntime::write_barrier_post);
-#endif
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_identity_hash_code", address, JVMCIRuntime::identity_hash_code);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_exception_handler_for_pc", address, JVMCIRuntime::exception_handler_for_pc);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_test_deoptimize_call_int", address, JVMCIRuntime::test_deoptimize_call_int);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_throw_and_post_jvmti_exception", address, JVMCIRuntime::throw_and_post_jvmti_exception);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_throw_klass_external_name_exception", address, JVMCIRuntime::throw_klass_external_name_exception);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_throw_class_cast_exception", address, JVMCIRuntime::throw_class_cast_exception);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_vm_message", address, JVMCIRuntime::vm_message);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_vm_error", address, JVMCIRuntime::vm_error);
-}
-
-void AOTCodeHeap::link_shared_runtime_symbols() {
- SET_AOT_GLOBAL_SYMBOL_VALUE("_resolve_static_entry", address, SharedRuntime::get_resolve_static_call_stub());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_resolve_virtual_entry", address, SharedRuntime::get_resolve_virtual_call_stub());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_resolve_opt_virtual_entry", address, SharedRuntime::get_resolve_opt_virtual_call_stub());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_deopt_blob_unpack", address, SharedRuntime::deopt_blob()->unpack());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_deopt_blob_unpack_with_exception_in_tls", address, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_deopt_blob_uncommon_trap", address, SharedRuntime::deopt_blob()->uncommon_trap());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_ic_miss_stub", address, SharedRuntime::get_ic_miss_stub());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_handle_wrong_method_stub", address, SharedRuntime::get_handle_wrong_method_stub());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_exception_handler_for_return_address", address, SharedRuntime::exception_handler_for_return_address);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_register_finalizer", address, SharedRuntime::register_finalizer);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_object_notify", address, JVMCIRuntime::object_notify);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_object_notifyAll", address, JVMCIRuntime::object_notifyAll);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_OSR_migration_end", address, SharedRuntime::OSR_migration_end);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_enable_stack_reserved_zone", address, SharedRuntime::enable_stack_reserved_zone);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_dynamic_invoke", address, CompilerRuntime::resolve_dynamic_invoke);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_string_by_symbol", address, CompilerRuntime::resolve_string_by_symbol);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_klass_by_symbol", address, CompilerRuntime::resolve_klass_by_symbol);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_method_by_symbol_and_load_counters", address, CompilerRuntime::resolve_method_by_symbol_and_load_counters);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_initialize_klass_by_symbol", address, CompilerRuntime::initialize_klass_by_symbol);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_invocation_event", address, CompilerRuntime::invocation_event);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_backedge_event", address, CompilerRuntime::backedge_event);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dpow", address, SharedRuntime::dpow);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dexp", address, SharedRuntime::dexp);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dcos", address, SharedRuntime::dcos);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dsin", address, SharedRuntime::dsin);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dtan", address, SharedRuntime::dtan);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dlog", address, SharedRuntime::dlog);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dlog10", address, SharedRuntime::dlog10);
-}
-
-void AOTCodeHeap::link_stub_routines_symbols() {
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jbyte_arraycopy", address, StubRoutines::_jbyte_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jshort_arraycopy", address, StubRoutines::_jshort_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jint_arraycopy", address, StubRoutines::_jint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jlong_arraycopy", address, StubRoutines::_jlong_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_oop_arraycopy", address, StubRoutines::_oop_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_oop_arraycopy_uninit", address, StubRoutines::_oop_arraycopy_uninit);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jbyte_disjoint_arraycopy", address, StubRoutines::_jbyte_disjoint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jshort_disjoint_arraycopy", address, StubRoutines::_jshort_disjoint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jint_disjoint_arraycopy", address, StubRoutines::_jint_disjoint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jlong_disjoint_arraycopy", address, StubRoutines::_jlong_disjoint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_oop_disjoint_arraycopy", address, StubRoutines::_oop_disjoint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_oop_disjoint_arraycopy_uninit", address, StubRoutines::_oop_disjoint_arraycopy_uninit);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jbyte_arraycopy", address, StubRoutines::_arrayof_jbyte_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jshort_arraycopy", address, StubRoutines::_arrayof_jshort_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jint_arraycopy", address, StubRoutines::_arrayof_jint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jlong_arraycopy", address, StubRoutines::_arrayof_jlong_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_oop_arraycopy", address, StubRoutines::_arrayof_oop_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_oop_arraycopy_uninit", address, StubRoutines::_arrayof_oop_arraycopy_uninit);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jbyte_disjoint_arraycopy", address, StubRoutines::_arrayof_jbyte_disjoint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jshort_disjoint_arraycopy", address, StubRoutines::_arrayof_jshort_disjoint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jint_disjoint_arraycopy", address, StubRoutines::_arrayof_jint_disjoint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jlong_disjoint_arraycopy", address, StubRoutines::_arrayof_jlong_disjoint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_oop_disjoint_arraycopy", address, StubRoutines::_arrayof_oop_disjoint_arraycopy);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_oop_disjoint_arraycopy_uninit", address, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_unsafe_arraycopy", address, StubRoutines::_unsafe_arraycopy);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_checkcast_arraycopy", address, StubRoutines::_checkcast_arraycopy);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_generic_arraycopy", address, StubRoutines::_generic_arraycopy);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_aescrypt_encryptBlock", address, StubRoutines::_aescrypt_encryptBlock);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_aescrypt_decryptBlock", address, StubRoutines::_aescrypt_decryptBlock);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_cipherBlockChaining_encryptAESCrypt", address, StubRoutines::_cipherBlockChaining_encryptAESCrypt);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_cipherBlockChaining_decryptAESCrypt", address, StubRoutines::_cipherBlockChaining_decryptAESCrypt);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_electronicCodeBook_encryptAESCrypt", address, StubRoutines::_electronicCodeBook_encryptAESCrypt);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_electronicCodeBook_decryptAESCrypt", address, StubRoutines::_electronicCodeBook_decryptAESCrypt);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_update_bytes_crc32", address, StubRoutines::_updateBytesCRC32);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_crc_table_adr", address, StubRoutines::_crc_table_adr);
-
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_sha1_implCompress", address, StubRoutines::_sha1_implCompress);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_sha1_implCompressMB", address, StubRoutines::_sha1_implCompressMB);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_sha256_implCompress", address, StubRoutines::_sha256_implCompress);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_sha256_implCompressMB", address, StubRoutines::_sha256_implCompressMB);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_sha512_implCompress", address, StubRoutines::_sha512_implCompress);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_sha512_implCompressMB", address, StubRoutines::_sha512_implCompressMB);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_multiplyToLen", address, StubRoutines::_multiplyToLen);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_counterMode_AESCrypt", address, StubRoutines::_counterMode_AESCrypt);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_ghash_processBlocks", address, StubRoutines::_ghash_processBlocks);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_base64_encodeBlock", address, StubRoutines::_base64_encodeBlock);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_crc32c_table_addr", address, StubRoutines::_crc32c_table_addr);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_updateBytesCRC32C", address, StubRoutines::_updateBytesCRC32C);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_updateBytesAdler32", address, StubRoutines::_updateBytesAdler32);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_squareToLen", address, StubRoutines::_squareToLen);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_mulAdd", address, StubRoutines::_mulAdd);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_montgomeryMultiply", address, StubRoutines::_montgomeryMultiply);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_montgomerySquare", address, StubRoutines::_montgomerySquare);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_vectorizedMismatch", address, StubRoutines::_vectorizedMismatch);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_bigIntegerRightShiftWorker", address, StubRoutines::_bigIntegerRightShiftWorker);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_bigIntegerLeftShiftWorker", address, StubRoutines::_bigIntegerLeftShiftWorker);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_throw_delayed_StackOverflowError_entry", address, StubRoutines::_throw_delayed_StackOverflowError_entry);
-
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_verify_oops", intptr_t, VerifyOops);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_verify_oop_count_address", jint *, &StubRoutines::_verify_oop_count);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_verify_oop_bits", intptr_t, Universe::verify_oop_bits());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_verify_oop_mask", intptr_t, Universe::verify_oop_mask());
-}
-
-void AOTCodeHeap::link_os_symbols() {
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_os_javaTimeMillis", address, os::javaTimeMillis);
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_os_javaTimeNanos", address, os::javaTimeNanos);
-}
-
-/*
- * Link any global symbols in precompiled DSO with dlopen() _dl_handle
- * dso_handle.
- */
-
-void AOTCodeHeap::link_global_lib_symbols() {
- if (!_lib_symbols_initialized) {
- _lib_symbols_initialized = true;
-
- CollectedHeap* heap = Universe::heap();
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, (BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) ? ci_card_table_address() : NULL));
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_top_address", address, (heap->supports_inline_contig_alloc() ? heap->top_addr() : NULL));
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_end_address", address, (heap->supports_inline_contig_alloc() ? heap->end_addr() : NULL));
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_klass_base_address", address, CompressedKlassPointers::base());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_oop_base_address", address, CompressedOops::base());
-#if INCLUDE_G1GC
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_log_of_heap_region_grain_bytes", int, HeapRegion::LogOfHRGrainBytes);
-#endif
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_inline_contiguous_allocation_supported", bool, heap->supports_inline_contig_alloc());
- link_shared_runtime_symbols();
- link_stub_routines_symbols();
- link_os_symbols();
- link_graal_runtime_symbols();
- link_known_klasses();
- }
-}
-
-#ifndef PRODUCT
-int AOTCodeHeap::klasses_seen = 0;
-int AOTCodeHeap::aot_klasses_found = 0;
-int AOTCodeHeap::aot_klasses_fp_miss = 0;
-int AOTCodeHeap::aot_klasses_cl_miss = 0;
-int AOTCodeHeap::aot_methods_found = 0;
-
-void AOTCodeHeap::print_statistics() {
- tty->print_cr("Classes seen: %d AOT classes found: %d AOT methods found: %d", klasses_seen, aot_klasses_found, aot_methods_found);
- tty->print_cr("AOT fingerprint mismatches: %d AOT class loader mismatches: %d", aot_klasses_fp_miss, aot_klasses_cl_miss);
-}
-#endif
-
-Method* AOTCodeHeap::find_method(Klass* klass, Thread* thread, const char* method_name) {
- int method_name_len = Bytes::get_Java_u2((address)method_name);
- method_name += 2;
- const char* signature_name = method_name + method_name_len;
- int signature_name_len = Bytes::get_Java_u2((address)signature_name);
- signature_name += 2;
- // The class should have been loaded so the method and signature should already be
- // in the symbol table. If they're not there, the method doesn't exist.
- TempNewSymbol name = SymbolTable::probe(method_name, method_name_len);
- TempNewSymbol signature = SymbolTable::probe(signature_name, signature_name_len);
-
- Method* m;
- if (name == NULL || signature == NULL) {
- m = NULL;
- } else if (name == vmSymbols::object_initializer_name() ||
- name == vmSymbols::class_initializer_name()) {
- // Never search superclasses for constructors
- if (klass->is_instance_klass()) {
- m = InstanceKlass::cast(klass)->find_method(name, signature);
- } else {
- m = NULL;
- }
- } else {
- m = klass->lookup_method(name, signature);
- if (m == NULL && klass->is_instance_klass()) {
- m = InstanceKlass::cast(klass)->lookup_method_in_ordered_interfaces(name, signature);
- }
- }
- if (m == NULL) {
- // Fatal error because we assume classes and methods should not be changed since aot compilation.
- const char* klass_name = klass->external_name();
- int klass_len = (int)strlen(klass_name);
- char* meta_name = NEW_RESOURCE_ARRAY(char, klass_len + 1 + method_name_len + signature_name_len + 1);
- memcpy(meta_name, klass_name, klass_len);
- meta_name[klass_len] = '.';
- memcpy(&meta_name[klass_len + 1], method_name, method_name_len);
- memcpy(&meta_name[klass_len + 1 + method_name_len], signature_name, signature_name_len);
- meta_name[klass_len + 1 + method_name_len + signature_name_len] = '\0';
- Handle exception = Exceptions::new_exception(thread, vmSymbols::java_lang_NoSuchMethodError(), meta_name);
- java_lang_Throwable::print(exception(), tty);
- tty->cr();
- java_lang_Throwable::print_stack_trace(exception, tty);
- tty->cr();
- fatal("Failed to find method '%s'", meta_name);
- }
- NOT_PRODUCT( aot_methods_found++; )
- return m;
-}
-
-AOTKlassData* AOTCodeHeap::find_klass(const char *name) {
- return (AOTKlassData*) os::dll_lookup(_lib->dl_handle(), name);
-}
-
-AOTKlassData* AOTCodeHeap::find_klass(InstanceKlass* ik) {
- ResourceMark rm;
- AOTKlassData* klass_data = find_klass(ik->signature_name());
- return klass_data;
-}
-
-bool AOTCodeHeap::is_dependent_method(Klass* dependee, AOTCompiledMethod* aot) {
- InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
- AOTKlassData* klass_data = find_klass(dependee_ik);
- if (klass_data == NULL) {
- return false; // no AOT records for this class - no dependencies
- }
- if (!dependee_ik->has_passed_fingerprint_check()) {
- return false; // different class
- }
-
- int methods_offset = klass_data->_dependent_methods_offset;
- if (methods_offset >= 0) {
- address methods_cnt_adr = _dependencies + methods_offset;
- int methods_cnt = *(int*)methods_cnt_adr;
- int* indexes = (int*)(methods_cnt_adr + 4);
- for (int i = 0; i < methods_cnt; ++i) {
- int code_id = indexes[i];
- if (_code_to_aot[code_id]._aot == aot) {
- return true; // found dependent method
- }
- }
- }
- return false;
-}
-
-void AOTCodeHeap::mark_evol_dependent_methods(InstanceKlass* dependee) {
- AOTKlassData* klass_data = find_klass(dependee);
- if (klass_data == NULL) {
- return; // no AOT records for this class - no dependencies
- }
- if (!dependee->has_passed_fingerprint_check()) {
- return; // different class
- }
-
- int methods_offset = klass_data->_dependent_methods_offset;
- if (methods_offset >= 0) {
- address methods_cnt_adr = _dependencies + methods_offset;
- int methods_cnt = *(int*)methods_cnt_adr;
- int* indexes = (int*)(methods_cnt_adr + 4);
- for (int i = 0; i < methods_cnt; ++i) {
- int code_id = indexes[i];
- AOTCompiledMethod* aot = _code_to_aot[code_id]._aot;
- if (aot != NULL) {
- aot->mark_for_deoptimization(false);
- }
- }
- }
-}
-
-void AOTCodeHeap::sweep_dependent_methods(int* indexes, int methods_cnt) {
- int marked = 0;
- for (int i = 0; i < methods_cnt; ++i) {
- int code_id = indexes[i];
- // Invalidate aot code.
- if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, invalid) != not_set) {
- if (_code_to_aot[code_id]._state == in_use) {
- AOTCompiledMethod* aot = _code_to_aot[code_id]._aot;
- assert(aot != NULL, "aot should be set");
- if (!aot->is_runtime_stub()) { // Something is wrong - should not invalidate stubs.
- aot->mark_for_deoptimization(false);
- marked++;
- }
- }
- }
- }
- if (marked > 0) {
- Deoptimization::deoptimize_all_marked();
- }
-}
-
-void AOTCodeHeap::sweep_dependent_methods(AOTKlassData* klass_data) {
- // Make dependent methods non_entrant forever.
- int methods_offset = klass_data->_dependent_methods_offset;
- if (methods_offset >= 0) {
- address methods_cnt_adr = _dependencies + methods_offset;
- int methods_cnt = *(int*)methods_cnt_adr;
- int* indexes = (int*)(methods_cnt_adr + 4);
- sweep_dependent_methods(indexes, methods_cnt);
- }
-}
-
-void AOTCodeHeap::sweep_dependent_methods(InstanceKlass* ik) {
- AOTKlassData* klass_data = find_klass(ik);
- vmassert(klass_data != NULL, "dependency data missing");
- sweep_dependent_methods(klass_data);
-}
-
-void AOTCodeHeap::sweep_method(AOTCompiledMethod *aot) {
- int indexes[] = {aot->method_index()};
- sweep_dependent_methods(indexes, 1);
- vmassert(aot->method()->code() != aot COMPILER1_OR_COMPILER2_PRESENT( && aot->method()->aot_code() == NULL), "method still active");
-}
-
-
-bool AOTCodeHeap::load_klass_data(InstanceKlass* ik, Thread* thread) {
- ResourceMark rm;
-
- NOT_PRODUCT( klasses_seen++; )
-
- // AOT does not support custom class loaders.
- ClassLoaderData* cld = ik->class_loader_data();
- if (!cld->is_builtin_class_loader_data()) {
- log_trace(aot, class, load)("skip class %s for custom classloader %s (%p) tid=" INTPTR_FORMAT,
- ik->internal_name(), cld->loader_name(), cld, p2i(thread));
- return false;
- }
-
- AOTKlassData* klass_data = find_klass(ik);
- if (klass_data == NULL) {
- return false;
- }
-
- if (!ik->has_passed_fingerprint_check()) {
- NOT_PRODUCT( aot_klasses_fp_miss++; )
- log_trace(aot, class, fingerprint)("class %s%s has bad fingerprint in %s tid=" INTPTR_FORMAT,
- ik->internal_name(), ik->is_shared() ? " (shared)" : "",
- _lib->name(), p2i(thread));
- sweep_dependent_methods(klass_data);
- return false;
- }
-
- if (ik->has_been_redefined()) {
- log_trace(aot, class, load)("class %s%s in %s has been redefined tid=" INTPTR_FORMAT,
- ik->internal_name(), ik->is_shared() ? " (shared)" : "",
- _lib->name(), p2i(thread));
- sweep_dependent_methods(klass_data);
- return false;
- }
-
- assert(klass_data->_class_id < _class_count, "invalid class id");
- AOTClass* aot_class = &_classes[klass_data->_class_id];
- ClassLoaderData* aot_cld = aot_class->_classloader;
- if (aot_cld != NULL && aot_cld != cld) {
- log_trace(aot, class, load)("class %s in %s already loaded for classloader %s (%p) vs %s (%p) tid=" INTPTR_FORMAT,
- ik->internal_name(), _lib->name(), aot_cld->loader_name(), aot_cld, cld->loader_name(), cld, p2i(thread));
- NOT_PRODUCT( aot_klasses_cl_miss++; )
- return false;
- }
-
- if (_lib->config()->_omitAssertions && JavaAssertions::enabled(ik->name()->as_C_string(), ik->class_loader() == NULL)) {
- log_trace(aot, class, load)("class %s in %s does not have java assertions in compiled code, but assertions are enabled for this execution.", ik->internal_name(), _lib->name());
- sweep_dependent_methods(klass_data);
- return false;
- }
-
- NOT_PRODUCT( aot_klasses_found++; )
-
- log_trace(aot, class, load)("found %s in %s for classloader %s (%p) tid=" INTPTR_FORMAT, ik->internal_name(), _lib->name(), cld->loader_name(), cld, p2i(thread));
-
- aot_class->_classloader = cld;
- // Set klass's Resolve (second) got cell.
- _klasses_got[klass_data->_got_index] = ik;
- if (ik->is_initialized()) {
- _klasses_got[klass_data->_got_index - 1] = ik;
- }
-
- // Initialize global symbols of the DSO to the corresponding VM symbol values.
- link_global_lib_symbols();
-
- int methods_offset = klass_data->_compiled_methods_offset;
- if (methods_offset >= 0) {
- address methods_cnt_adr = _methods_offsets + methods_offset;
- int methods_cnt = *(int*)methods_cnt_adr;
- // Collect data about compiled methods
- AOTMethodData* methods_data = NEW_RESOURCE_ARRAY(AOTMethodData, methods_cnt);
- AOTMethodOffsets* methods_offsets = (AOTMethodOffsets*)(methods_cnt_adr + 4);
- for (int i = 0; i < methods_cnt; ++i) {
- AOTMethodOffsets* method_offsets = &methods_offsets[i];
- int code_id = method_offsets->_code_id;
- if (_code_to_aot[code_id]._state == invalid) {
- continue; // skip AOT methods slots which have been invalidated
- }
- AOTMethodData* method_data = &methods_data[i];
- const char* aot_name = _metaspace_names + method_offsets->_name_offset;
- method_data->_name = aot_name;
- method_data->_code = _code_space + method_offsets->_code_offset;
- method_data->_meta = (aot_metadata*)(_method_metadata + method_offsets->_meta_offset);
- method_data->_metadata_table = (address)_metadata_got + method_offsets->_metadata_got_offset;
- method_data->_metadata_size = method_offsets->_metadata_got_size;
- // aot_name format: "Ljava/lang/ThreadGroup;addUnstarted()V"
- int klass_len = Bytes::get_Java_u2((address)aot_name);
- const char* method_name = aot_name + 2 + klass_len;
- Method* m = AOTCodeHeap::find_method(ik, thread, method_name);
- methodHandle mh(thread, m);
- if (mh->code() != NULL) { // Does it have already compiled code?
- continue; // Don't overwrite
- }
- publish_aot(mh, method_data, code_id);
- }
- }
- return true;
-}
-
-AOTCompiledMethod* AOTCodeHeap::next_in_use_at(int start) const {
- for (int index = start; index < _method_count; index++) {
- if (_code_to_aot[index]._state != in_use) {
- continue; // Skip uninitialized entries.
- }
- AOTCompiledMethod* aot = _code_to_aot[index]._aot;
- return aot;
- }
- return NULL;
-}
-
-void* AOTCodeHeap::first() const {
- return next_in_use_at(0);
-}
-
-void* AOTCodeHeap::next(void* p) const {
- AOTCompiledMethod *aot = (AOTCompiledMethod *)p;
- int next_index = aot->method_index() + 1;
- assert(next_index <= _method_count, "");
- if (next_index == _method_count) {
- return NULL;
- }
- return next_in_use_at(next_index);
-}
-
-void* AOTCodeHeap::find_start(void* p) const {
- if (!contains(p)) {
- return NULL;
- }
- size_t offset = pointer_delta(p, low_boundary(), 1);
- // Use segments table
- size_t seg_idx = offset / _lib->config()->_codeSegmentSize;
- if ((int)(_code_segments[seg_idx]) == 0xff) {
- return NULL;
- }
- while (_code_segments[seg_idx] > 0) {
- seg_idx -= (int)_code_segments[seg_idx];
- }
- int code_offset = (int)seg_idx * _lib->config()->_codeSegmentSize;
- int aot_index = *(int*)(_code_space + code_offset);
- AOTCompiledMethod* aot = _code_to_aot[aot_index]._aot;
- assert(aot != NULL, "should find registered aot method");
- return aot;
-}
-
-AOTCompiledMethod* AOTCodeHeap::find_aot(address p) const {
- assert(contains(p), "should be here");
- return (AOTCompiledMethod *)find_start(p);
-}
-
-CodeBlob* AOTCodeHeap::find_blob_unsafe(void* start) const {
- return (CodeBlob*)AOTCodeHeap::find_start(start);
-}
-
-void AOTCodeHeap::oops_do(OopClosure* f) {
- for (int i = 0; i < _oop_got_size; i++) {
- oop* p = &_oop_got[i];
- if (*p == NULL) continue; // skip non-oops
- f->do_oop(p);
- }
- for (int index = 0; index < _method_count; index++) {
- if (_code_to_aot[index]._state != in_use) {
- continue; // Skip uninitialized entries.
- }
- AOTCompiledMethod* aot = _code_to_aot[index]._aot;
- aot->do_oops(f);
- }
-}
-
-// Scan only klasses_got cells which should have only Klass*,
-// metadata_got cells are scanned only for alive AOT methods
-// by AOTCompiledMethod::metadata_do().
-void AOTCodeHeap::got_metadata_do(MetadataClosure* f) {
- for (int i = 1; i < _klasses_got_size; i++) {
- Metadata** p = &_klasses_got[i];
- Metadata* md = *p;
- if (md == NULL) continue; // skip non-oops
- if (Metaspace::contains(md)) {
- f->do_metadata(md);
- } else {
- intptr_t meta = (intptr_t)md;
- fatal("Invalid value in _klasses_got[%d] = " INTPTR_FORMAT, i, meta);
- }
- }
-}
-
-void AOTCodeHeap::cleanup_inline_caches() {
- for (int index = 0; index < _method_count; index++) {
- if (_code_to_aot[index]._state != in_use) {
- continue; // Skip uninitialized entries.
- }
- AOTCompiledMethod* aot = _code_to_aot[index]._aot;
- aot->cleanup_inline_caches(false);
- }
-}
-
-#ifdef ASSERT
-int AOTCodeHeap::verify_icholder_relocations() {
- int count = 0;
- for (int index = 0; index < _method_count; index++) {
- if (_code_to_aot[index]._state != in_use) {
- continue; // Skip uninitialized entries.
- }
- AOTCompiledMethod* aot = _code_to_aot[index]._aot;
- count += aot->verify_icholder_relocations();
- }
- return count;
-}
-#endif
-
-void AOTCodeHeap::metadata_do(MetadataClosure* f) {
- for (int index = 0; index < _method_count; index++) {
- if (_code_to_aot[index]._state != in_use) {
- continue; // Skip uninitialized entries.
- }
- AOTCompiledMethod* aot = _code_to_aot[index]._aot;
- if (aot->_is_alive()) {
- aot->metadata_do(f);
- }
- }
- // Scan klasses_got cells.
- got_metadata_do(f);
-}
-
-bool AOTCodeHeap::reconcile_dynamic_klass(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Klass *dyno_klass, const char *descriptor1, const char *descriptor2) {
- const char * const descriptors[2] = {descriptor1, descriptor2};
- JavaThread *thread = JavaThread::current();
- ResourceMark rm(thread);
-
- AOTKlassData* holder_data = find_klass(holder);
- vmassert(holder_data != NULL, "klass %s not found", holder->signature_name());
- vmassert(is_dependent_method(holder, caller), "sanity");
-
- AOTKlassData* dyno_data = NULL;
- bool adapter_failed = false;
- char buf[64];
- int descriptor_index = 0;
- // descriptors[0] specific name ("adapter:") for matching
- // descriptors[1] fall-back name ("adapter") for depdencies
- while (descriptor_index < 2) {
- const char *descriptor = descriptors[descriptor_index];
- if (descriptor == NULL) {
- break;
- }
- jio_snprintf(buf, sizeof buf, "%s<%d:%d>", descriptor, holder_data->_class_id, index);
- dyno_data = find_klass(buf);
- if (dyno_data != NULL) {
- break;
- }
- // If match failed then try fall-back for dependencies
- ++descriptor_index;
- adapter_failed = true;
- }
-
- if (dyno_data == NULL && dyno_klass == NULL) {
- // all is well, no (appendix) at compile-time, and still none
- return true;
- }
-
- if (dyno_data == NULL) {
- // no (appendix) at build-time, but now there is
- sweep_dependent_methods(holder_data);
- return false;
- }
-
- if (adapter_failed) {
- // adapter method mismatch
- sweep_dependent_methods(holder_data);
- sweep_dependent_methods(dyno_data);
- return false;
- }
-
- if (dyno_klass == NULL) {
- // (appendix) at build-time, none now
- sweep_dependent_methods(holder_data);
- sweep_dependent_methods(dyno_data);
- return false;
- }
-
- // TODO: support array appendix object
- if (!dyno_klass->is_instance_klass()) {
- sweep_dependent_methods(holder_data);
- sweep_dependent_methods(dyno_data);
- return false;
- }
-
- InstanceKlass* dyno = InstanceKlass::cast(dyno_klass);
-
- if (!dyno->is_hidden() && !dyno->is_unsafe_anonymous()) {
- if (_klasses_got[dyno_data->_got_index] != dyno) {
- // compile-time class different from runtime class, fail and deoptimize
- sweep_dependent_methods(holder_data);
- sweep_dependent_methods(dyno_data);
- return false;
- }
-
- if (dyno->is_initialized()) {
- _klasses_got[dyno_data->_got_index - 1] = dyno;
- }
- return true;
- }
-
- // TODO: support anonymous supers
- if (!dyno->supers_have_passed_fingerprint_checks() || dyno->get_stored_fingerprint() != dyno_data->_fingerprint) {
- NOT_PRODUCT( aot_klasses_fp_miss++; )
- log_trace(aot, class, fingerprint)("class %s%s has bad fingerprint in %s tid=" INTPTR_FORMAT,
- dyno->internal_name(), dyno->is_shared() ? " (shared)" : "",
- _lib->name(), p2i(thread));
- sweep_dependent_methods(holder_data);
- sweep_dependent_methods(dyno_data);
- return false;
- }
-
- _klasses_got[dyno_data->_got_index] = dyno;
- if (dyno->is_initialized()) {
- _klasses_got[dyno_data->_got_index - 1] = dyno;
- }
-
- // TODO: hook up any AOT code
- // load_klass_data(dyno_data, thread);
- return true;
-}
-
-bool AOTCodeHeap::reconcile_dynamic_method(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Method *adapter_method) {
- InstanceKlass *adapter_klass = adapter_method->method_holder();
- char buf[64];
- jio_snprintf(buf, sizeof buf, "adapter:%d", adapter_method->method_idnum());
- if (!reconcile_dynamic_klass(caller, holder, index, adapter_klass, buf, "adapter")) {
- return false;
- }
- return true;
-}
-
-bool AOTCodeHeap::reconcile_dynamic_invoke(AOTCompiledMethod* caller, InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass) {
- if (!reconcile_dynamic_klass(caller, holder, index, appendix_klass, "appendix")) {
- return false;
- }
-
- if (!reconcile_dynamic_method(caller, holder, index, adapter_method)) {
- return false;
- }
-
- return true;
-}
diff --git a/src/hotspot/share/aot/aotCodeHeap.hpp b/src/hotspot/share/aot/aotCodeHeap.hpp
deleted file mode 100644
index a1be80cbb6a..00000000000
--- a/src/hotspot/share/aot/aotCodeHeap.hpp
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_AOT_AOTCODEHEAP_HPP
-#define SHARE_AOT_AOTCODEHEAP_HPP
-
-#include "aot/aotCompiledMethod.hpp"
-#include "classfile/symbolTable.hpp"
-#include "metaprogramming/integralConstant.hpp"
-#include "oops/metadata.hpp"
-#include "oops/method.hpp"
-
-enum CodeState {
- not_set = 0, // _aot fields is not set yet
- in_use = 1, // _aot field is set to corresponding AOTCompiledMethod
- invalid = 2 // AOT code is invalidated because dependencies failed
-};
-
-typedef struct {
- AOTCompiledMethod* _aot;
- CodeState _state; // State change cases: not_set->in_use, not_set->invalid
-} CodeToAMethod;
-
-class ClassLoaderData;
-
-class AOTClass {
-public:
- ClassLoaderData* _classloader;
-};
-
-typedef struct {
- int _name_offset;
- int _code_offset;
- int _meta_offset;
- int _metadata_got_offset;
- int _metadata_got_size;
- int _code_id;
-} AOTMethodOffsets;
-
-typedef struct {
- const char* _name;
- address _code;
- aot_metadata* _meta;
- jlong* _state_adr;
- address _metadata_table;
- int _metadata_size;
-} AOTMethodData;
-
-typedef struct {
- int _got_index;
- int _class_id;
- int _compiled_methods_offset;
- int _dependent_methods_offset;
- uint64_t _fingerprint;
-} AOTKlassData;
-
-typedef struct {
- int _version;
- int _class_count;
- int _method_count;
- int _klasses_got_size;
- int _metadata_got_size;
- int _oop_got_size;
- int _jvm_version_offset;
-
- enum {
- AOT_SHARED_VERSION = 1
- };
-} AOTHeader;
-
-typedef struct {
- enum { CONFIG_SIZE = 7 * jintSize + 9 };
- // 7 int values
- int _config_size;
- int _narrowOopShift;
- int _narrowKlassShift;
- int _contendedPaddingWidth;
- int _objectAlignment;
- int _codeSegmentSize;
- int _gc;
- // byte[9] array map to boolean values here
- bool _debug_VM;
- bool _useCompressedOops;
- bool _useCompressedClassPointers;
- bool _useTLAB;
- bool _useBiasedLocking;
- bool _tieredAOT;
- bool _enableContended;
- bool _restrictContended;
- bool _omitAssertions;
-} AOTConfiguration;
-
-class AOTLib : public CHeapObj {
- static bool _narrow_oop_shift_initialized;
- static int _narrow_oop_shift;
- static int _narrow_klass_shift;
-
- bool _valid;
- void* _dl_handle;
- const int _dso_id;
- const char* _name;
- // VM configuration during AOT compilation
- AOTConfiguration* _config;
- AOTHeader* _header;
-
- void handle_config_error(const char* format, ...) ATTRIBUTE_PRINTF(2, 3);
-public:
- AOTLib(void* handle, const char* name, int dso_id);
- virtual ~AOTLib();
- static int narrow_oop_shift() { return _narrow_oop_shift; }
- static int narrow_klass_shift() { return _narrow_klass_shift; }
- static bool narrow_oop_shift_initialized() { return _narrow_oop_shift_initialized; }
-
- bool is_valid() const {
- return _valid;
- }
- const char* name() const {
- return _name;
- }
- void* dl_handle() const {
- return _dl_handle;
- }
- int id() const {
- return _dso_id;
- }
- AOTHeader* header() const {
- return _header;
- }
- AOTConfiguration* config() const {
- return _config;
- }
- void verify_config();
- void verify_flag(bool aot_flag, bool flag, const char* name);
- void verify_flag(int aot_flag, int flag, const char* name);
-
- address load_symbol(const char *name);
-};
-
-
-class AOTCodeHeap : public CodeHeap {
- AOTLib* _lib;
- int _aot_id;
-
- int _class_count;
- int _method_count;
- AOTClass* _classes;
- CodeToAMethod* _code_to_aot;
-
- address _code_space;
- address _code_segments;
- jlong* _method_state;
-
-
- // Collect metaspace info: names -> address in .got section
- const char* _metaspace_names;
- address _method_metadata;
-
- address _methods_offsets;
- address _klasses_offsets;
- address _dependencies;
-
- Metadata** _klasses_got;
- Metadata** _metadata_got;
- oop* _oop_got;
-
- int _klasses_got_size;
- int _metadata_got_size;
- int _oop_got_size;
-
- // Collect stubs info
- int* _stubs_offsets;
-
- bool _lib_symbols_initialized;
-
- void adjust_boundaries(AOTCompiledMethod* method) {
- char* low = (char*)method->code_begin();
- if (low < low_boundary()) {
- _memory.set_low_boundary(low);
- _memory.set_low(low);
- }
- char* high = (char *)method->code_end();
- if (high > high_boundary()) {
- _memory.set_high_boundary(high);
- _memory.set_high(high);
- }
- assert(_method_count > 0, "methods count should be set already");
- }
-
- void register_stubs();
-
- void link_shared_runtime_symbols();
- void link_stub_routines_symbols();
- void link_os_symbols();
- void link_graal_runtime_symbols();
-
- void link_global_lib_symbols();
- void link_klass(const Klass* klass);
- void link_known_klasses();
- void publish_aot(const methodHandle& mh, AOTMethodData* method_data, int code_id);
-
-
- AOTCompiledMethod* next_in_use_at(int index) const;
-
- // Find klass in SystemDictionary for aot metadata.
- static Klass* lookup_klass(const char* name, int len, const Method* method, Thread* THREAD);
-public:
- AOTCodeHeap(AOTLib* lib);
- virtual ~AOTCodeHeap();
-
- AOTCompiledMethod* find_aot(address p) const;
-
- virtual void* find_start(void* p) const;
- virtual CodeBlob* find_blob_unsafe(void* start) const;
- virtual void* first() const;
- virtual void* next(void *p) const;
-
- AOTKlassData* find_klass(InstanceKlass* ik);
- bool load_klass_data(InstanceKlass* ik, Thread* thread);
- Klass* get_klass_from_got(const char* klass_name, int klass_len, const Method* method);
-
- bool is_dependent_method(Klass* dependee, AOTCompiledMethod* aot);
- void mark_evol_dependent_methods(InstanceKlass* dependee);
-
- const char* get_name_at(int offset) {
- return _metaspace_names + offset;
- }
-
-
- void oops_do(OopClosure* f);
- void metadata_do(MetadataClosure* f);
- void got_metadata_do(MetadataClosure* f);
-
-#ifdef ASSERT
- bool got_contains(Metadata **p) {
- return (p >= &_metadata_got[0] && p < &_metadata_got[_metadata_got_size]) ||
- (p >= &_klasses_got[0] && p < &_klasses_got[_klasses_got_size]);
- }
-#endif
-
- int dso_id() const { return _lib->id(); }
- int aot_id() const { return _aot_id; }
-
- int method_count() { return _method_count; }
-
- AOTCompiledMethod* get_code_desc_at_index(int index) {
- if (index < _method_count && _code_to_aot[index]._state == in_use) {
- AOTCompiledMethod* m = _code_to_aot[index]._aot;
- assert(m != NULL, "AOT method should be set");
- if (!m->is_runtime_stub()) {
- return m;
- }
- }
- return NULL;
- }
-
- static Method* find_method(Klass* klass, Thread* thread, const char* method_name);
-
- void cleanup_inline_caches();
-
- DEBUG_ONLY( int verify_icholder_relocations(); )
-
- void alive_methods_do(void f(CompiledMethod* nm));
-
-#ifndef PRODUCT
- static int klasses_seen;
- static int aot_klasses_found;
- static int aot_klasses_fp_miss;
- static int aot_klasses_cl_miss;
- static int aot_methods_found;
-
- static void print_statistics();
-#endif
-
- bool reconcile_dynamic_invoke(AOTCompiledMethod* caller, InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass);
-
-private:
- AOTKlassData* find_klass(const char* name);
-
- void sweep_dependent_methods(int* indexes, int methods_cnt);
- void sweep_dependent_methods(AOTKlassData* klass_data);
- void sweep_dependent_methods(InstanceKlass* ik);
- void sweep_method(AOTCompiledMethod* aot);
-
- bool reconcile_dynamic_klass(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Klass *dyno, const char *descriptor1, const char *descriptor2 = NULL);
-
- bool reconcile_dynamic_method(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Method *adapter_method);
-
-};
-
-#endif // SHARE_AOT_AOTCODEHEAP_HPP
diff --git a/src/hotspot/share/aot/aotCompiledMethod.cpp b/src/hotspot/share/aot/aotCompiledMethod.cpp
deleted file mode 100644
index 25841bf5954..00000000000
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-
-#include "aot/aotCodeHeap.hpp"
-#include "aot/aotLoader.hpp"
-#include "aot/compiledIC_aot.hpp"
-#include "code/codeCache.hpp"
-#include "code/compiledIC.hpp"
-#include "code/nativeInst.hpp"
-#include "compiler/compilerOracle.hpp"
-#include "gc/shared/cardTableBarrierSet.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "oops/klass.inline.hpp"
-#include "oops/method.inline.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
-#include "runtime/safepointVerifiers.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "utilities/sizes.hpp"
-#include "utilities/xmlstream.hpp"
-
-#include
-
-#if 0
-static void metadata_oops_do(Metadata** metadata_begin, Metadata **metadata_end, OopClosure* f) {
- // Visit the metadata/oops section
- for (Metadata** p = metadata_begin; p < metadata_end; p++) {
- Metadata* m = *p;
-
- intptr_t meta = (intptr_t)m;
- if ((meta & 1) == 1) {
- // already resolved
- m = (Metadata*)(meta & ~1);
- } else {
- continue;
- }
- assert(Metaspace::contains(m), "");
- if (m->is_method()) {
- m = ((Method*)m)->method_holder();
- }
- assert(m->is_klass(), "must be");
- oop o = ((Klass*)m)->klass_holder();
- if (o != NULL) {
- f->do_oop(&o);
- }
- }
-}
-#endif
-
-address* AOTCompiledMethod::orig_pc_addr(const frame* fr) {
- return (address*) ((address)fr->unextended_sp() + _meta->orig_pc_offset());
-}
-
-oop AOTCompiledMethod::oop_at(int index) const {
- if (index == 0) { // 0 is reserved
- return NULL;
- }
- Metadata** entry = _metadata_got + (index - 1);
- intptr_t meta = (intptr_t)*entry;
- if ((meta & 1) == 1) {
- // already resolved
- Klass* k = (Klass*)(meta & ~1);
- return k->java_mirror();
- }
- // The entry is string which we need to resolve.
- const char* meta_name = _heap->get_name_at((int)meta);
- int klass_len = Bytes::get_Java_u2((address)meta_name);
- const char* klass_name = meta_name + 2;
- // Quick check the current method's holder.
- Klass* k = _method->method_holder();
-
- ResourceMark rm; // for signature_name()
- if (strncmp(k->signature_name(), klass_name, klass_len) != 0) { // Does not match?
- // Search klass in got cells in DSO which have this compiled method.
- k = _heap->get_klass_from_got(klass_name, klass_len, _method);
- }
- int method_name_len = Bytes::get_Java_u2((address)klass_name + klass_len);
- guarantee(method_name_len == 0, "only klass is expected here");
- meta = ((intptr_t)k) | 1;
- *entry = (Metadata*)meta; // Should be atomic on x64
- return k->java_mirror();
-}
-
-Metadata* AOTCompiledMethod::metadata_at(int index) const {
- if (index == 0) { // 0 is reserved
- return NULL;
- }
- assert(index - 1 < _metadata_size, "");
- {
- Metadata** entry = _metadata_got + (index - 1);
- intptr_t meta = (intptr_t)*entry;
- if ((meta & 1) == 1) {
- // already resolved
- Metadata *m = (Metadata*)(meta & ~1);
- return m;
- }
- // The entry is string which we need to resolve.
- const char* meta_name = _heap->get_name_at((int)meta);
- int klass_len = Bytes::get_Java_u2((address)meta_name);
- const char* klass_name = meta_name + 2;
- // Quick check the current method's holder.
- Klass* k = _method->method_holder();
- bool klass_matched = true;
-
- ResourceMark rm; // for signature_name() and find_method()
- if (strncmp(k->signature_name(), klass_name, klass_len) != 0) { // Does not match?
- // Search klass in got cells in DSO which have this compiled method.
- k = _heap->get_klass_from_got(klass_name, klass_len, _method);
- klass_matched = false;
- }
- int method_name_len = Bytes::get_Java_u2((address)klass_name + klass_len);
- if (method_name_len == 0) { // Array or Klass name only?
- meta = ((intptr_t)k) | 1;
- *entry = (Metadata*)meta; // Should be atomic on x64
- return (Metadata*)k;
- } else { // Method
- // Quick check the current method's name.
- Method* m = _method;
- int signature_len = Bytes::get_Java_u2((address)klass_name + klass_len + 2 + method_name_len);
- int full_len = 2 + klass_len + 2 + method_name_len + 2 + signature_len;
- if (!klass_matched || memcmp(_name, meta_name, full_len) != 0) { // Does not match?
- Thread* thread = Thread::current();
- const char* method_name = klass_name + klass_len;
- m = AOTCodeHeap::find_method(k, thread, method_name);
- }
- meta = ((intptr_t)m) | 1;
- *entry = (Metadata*)meta; // Should be atomic on x64
- return (Metadata*)m;
- }
- }
- ShouldNotReachHere(); return NULL;
-}
-
-void AOTCompiledMethod::do_unloading(bool unloading_occurred) {
- unload_nmethod_caches(unloading_occurred);
-}
-
-bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
- NoSafepointVerifier nsv;
-
- {
- // Enter critical section. Does not block for safepoint.
- MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
-
- if (*_state_adr == new_state) {
- // another thread already performed this transition so nothing
- // to do, but return false to indicate this.
- return false;
- }
-
- // Change state
- OrderAccess::storestore();
- *_state_adr = new_state;
-
- // Log the transition once
- log_state_change();
-
-#if COMPILER1_OR_COMPILER2
- // Remain non-entrant forever
- if (new_state == not_entrant && method() != NULL) {
- method()->set_aot_code(NULL);
- }
-#endif // COMPILER1_OR_COMPILER2
-
- // Remove AOTCompiledMethod from method.
- if (method() != NULL) {
- method()->unlink_code(this);
- }
- } // leave critical region under CompiledMethod_lock
-
-
- if (TraceCreateZombies) {
- ResourceMark m;
- const char *new_state_str = (new_state == not_entrant) ? "not entrant" : "not used";
- tty->print_cr("aot method <" INTPTR_FORMAT "> %s code made %s", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null", new_state_str);
- }
-
- return true;
-}
-
-bool AOTCompiledMethod::make_entrant() {
-#if COMPILER1_OR_COMPILER2
- assert(!method()->is_old(), "reviving evolved method!");
-
- NoSafepointVerifier nsv;
- {
- // Enter critical section. Does not block for safepoint.
- MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
-
- if (*_state_adr == in_use || *_state_adr == not_entrant) {
- // another thread already performed this transition so nothing
- // to do, but return false to indicate this.
- return false;
- }
-
- // Change state
- OrderAccess::storestore();
- *_state_adr = in_use;
-
- // Log the transition once
- log_state_change();
- } // leave critical region under CompiledMethod_lock
-
-
- if (TraceCreateZombies) {
- ResourceMark m;
- tty->print_cr("aot method <" INTPTR_FORMAT "> %s code made entrant", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null");
- }
-
- return true;
-#else
- return false;
-#endif // COMPILER1_OR_COMPILER2
-}
-
-// Iterate over metadata calling this function. Used by RedefineClasses
-// Copied from nmethod::metadata_do
-void AOTCompiledMethod::metadata_do(MetadataClosure* f) {
- address low_boundary = verified_entry_point();
- {
- // Visit all immediate references that are embedded in the instruction stream.
- RelocIterator iter(this, low_boundary);
- while (iter.next()) {
- if (iter.type() == relocInfo::metadata_type ) {
- metadata_Relocation* r = iter.metadata_reloc();
- // In this metadata, we must only follow those metadatas directly embedded in
- // the code. Other metadatas (oop_index>0) are seen as part of
- // the metadata section below.
- assert(1 == (r->metadata_is_immediate()) +
- (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
- "metadata must be found in exactly one place");
- if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
- Metadata* md = r->metadata_value();
- if (md != _method) f->do_metadata(md);
- }
- } else if (iter.type() == relocInfo::virtual_call_type) {
- ResourceMark rm;
- // Check compiledIC holders associated with this nmethod
- CompiledIC *ic = CompiledIC_at(&iter);
- if (ic->is_icholder_call()) {
- CompiledICHolder* cichk = ic->cached_icholder();
- f->do_metadata(cichk->holder_metadata());
- f->do_metadata(cichk->holder_klass());
- } else {
- // Get Klass* or NULL (if value is -1) from GOT cell of virtual call PLT stub.
- Metadata* ic_oop = ic->cached_metadata();
- if (ic_oop != NULL) {
- f->do_metadata(ic_oop);
- }
- }
- } else if (iter.type() == relocInfo::static_call_type ||
- iter.type() == relocInfo::opt_virtual_call_type) {
- // Check Method* in AOT c2i stub for other calls.
- Metadata* meta = (Metadata*)nativeLoadGot_at(nativePltCall_at(iter.addr())->plt_c2i_stub())->data();
- if (meta != NULL) {
- f->do_metadata(meta);
- }
- }
- }
- }
-
- // Visit the metadata section
- for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
- Metadata* m = *p;
-
- intptr_t meta = (intptr_t)m;
- if ((meta & 1) == 1) {
- // already resolved
- m = (Metadata*)(meta & ~1);
- } else {
- continue;
- }
- assert(Metaspace::contains(m), "");
- f->do_metadata(m);
- }
-
- // Visit metadata not embedded in the other places.
- if (_method != NULL) f->do_metadata(_method);
-}
-
-void AOTCompiledMethod::print() const {
- print_on(tty, "AOTCompiledMethod");
-}
-
-void AOTCompiledMethod::print_on(outputStream* st) const {
- print_on(st, "AOTCompiledMethod");
-}
-
-// Print out more verbose output usually for a newly created aot method.
-void AOTCompiledMethod::print_on(outputStream* st, const char* msg) const {
- if (st != NULL) {
- ttyLocker ttyl;
- st->print("%7d ", (int) tty->time_stamp().milliseconds());
- st->print("%4d ", _aot_id); // print compilation number
- st->print(" aot[%2d]", _heap->dso_id());
- // Stubs have _method == NULL
- if (_method == NULL) {
- st->print(" %s", _name);
- } else {
- ResourceMark m;
- st->print(" %s", _method->name_and_sig_as_C_string());
- }
- if (Verbose) {
- st->print(" entry at " INTPTR_FORMAT, p2i(_code));
- }
- if (msg != NULL) {
- st->print(" %s", msg);
- }
- st->cr();
- }
-}
-
-void AOTCompiledMethod::print_value_on(outputStream* st) const {
- st->print("AOTCompiledMethod ");
- print_on(st, NULL);
-}
-
-// Print a short set of xml attributes to identify this aot method. The
-// output should be embedded in some other element.
-void AOTCompiledMethod::log_identity(xmlStream* log) const {
- log->print(" aot_id='%d'", _aot_id);
- log->print(" aot='%2d'", _heap->dso_id());
-}
-
-void AOTCompiledMethod::log_state_change() const {
- if (LogCompilation) {
- ResourceMark m;
- if (xtty != NULL) {
- ttyLocker ttyl; // keep the following output all in one block
- if (*_state_adr == not_entrant) {
- xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'",
- os::current_thread_id());
- } else if (*_state_adr == not_used) {
- xtty->begin_elem("make_not_used thread='" UINTX_FORMAT "'",
- os::current_thread_id());
- } else if (*_state_adr == in_use) {
- xtty->begin_elem("make_entrant thread='" UINTX_FORMAT "'",
- os::current_thread_id());
- }
- log_identity(xtty);
- xtty->stamp();
- xtty->end_elem();
- }
- }
- if (PrintCompilation) {
- ResourceMark m;
- if (*_state_adr == not_entrant) {
- print_on(tty, "made not entrant");
- } else if (*_state_adr == not_used) {
- print_on(tty, "made not used");
- } else if (*_state_adr == in_use) {
- print_on(tty, "made entrant");
- }
- }
-}
-
-
-NativeInstruction* PltNativeCallWrapper::get_load_instruction(virtual_call_Relocation* r) const {
- return nativeLoadGot_at(_call->plt_load_got());
-}
-
-void PltNativeCallWrapper::verify_resolve_call(address dest) const {
- CodeBlob* db = CodeCache::find_blob_unsafe(dest);
- if (db == NULL) {
- assert(dest == _call->plt_resolve_call(), "sanity");
- }
-}
-
-void PltNativeCallWrapper::set_to_interpreted(const methodHandle& method, CompiledICInfo& info) {
- assert(!info.to_aot(), "only for nmethod");
- CompiledPltStaticCall* csc = CompiledPltStaticCall::at(instruction_address());
- csc->set_to_interpreted(method, info.entry());
-}
-
-NativeCallWrapper* AOTCompiledMethod::call_wrapper_at(address call) const {
- return new PltNativeCallWrapper((NativePltCall*) call);
-}
-
-NativeCallWrapper* AOTCompiledMethod::call_wrapper_before(address return_pc) const {
- return new PltNativeCallWrapper(nativePltCall_before(return_pc));
-}
-
-CompiledStaticCall* AOTCompiledMethod::compiledStaticCall_at(Relocation* call_site) const {
- return CompiledPltStaticCall::at(call_site);
-}
-
-CompiledStaticCall* AOTCompiledMethod::compiledStaticCall_at(address call_site) const {
- return CompiledPltStaticCall::at(call_site);
-}
-
-CompiledStaticCall* AOTCompiledMethod::compiledStaticCall_before(address return_addr) const {
- return CompiledPltStaticCall::before(return_addr);
-}
-
-address AOTCompiledMethod::call_instruction_address(address pc) const {
- NativePltCall* pltcall = nativePltCall_before(pc);
- return pltcall->instruction_address();
-}
-
-void AOTCompiledMethod::clear_inline_caches() {
- assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
- if (is_zombie()) {
- return;
- }
-
- ResourceMark rm;
- RelocIterator iter(this);
- while (iter.next()) {
- iter.reloc()->clear_inline_cache();
- if (iter.type() == relocInfo::opt_virtual_call_type) {
- CompiledIC* cic = CompiledIC_at(&iter);
- assert(cic->is_clean(), "!");
- nativePltCall_at(iter.addr())->set_stub_to_clean();
- }
- }
-}
diff --git a/src/hotspot/share/aot/aotCompiledMethod.hpp b/src/hotspot/share/aot/aotCompiledMethod.hpp
deleted file mode 100644
index ce636995274..00000000000
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_AOT_AOTCOMPILEDMETHOD_HPP
-#define SHARE_AOT_AOTCOMPILEDMETHOD_HPP
-
-#include "code/codeCache.hpp"
-#include "code/compiledIC.hpp"
-#include "code/compiledMethod.hpp"
-#include "code/pcDesc.hpp"
-#include "code/relocInfo.hpp"
-
-class AOTCodeHeap;
-
-class aot_metadata {
-private:
- int _size;
- int _code_size;
- int _entry;
- int _verified_entry;
- int _exception_handler_offset;
- int _deopt_handler_offset;
- int _deopt_mh_handler_offset;
- int _stubs_offset;
- int _frame_size;
- // location in frame (offset for sp) that deopt can store the original
- // pc during a deopt.
- int _orig_pc_offset;
- int _unsafe_access;
-
- int _pc_desc_begin;
- int _scopes_begin;
- int _reloc_begin;
- int _exception_table_begin;
- int _nul_chk_table_begin;
- int _oopmap_begin;
- address at_offset(size_t offset) const { return ((address) this) + offset; }
-public:
- int code_size() const { return _code_size; }
- int frame_size() const { return _frame_size / HeapWordSize; }
- PcDesc *scopes_pcs_begin() const { return (PcDesc *) at_offset(_pc_desc_begin); }
- PcDesc *scopes_pcs_end() const { return (PcDesc *) at_offset(_scopes_begin); }
- address scopes_data_begin() const { return at_offset(_scopes_begin); }
- address scopes_data_end() const { return at_offset(_reloc_begin); }
- relocInfo* relocation_begin() const { return (relocInfo*) at_offset(_reloc_begin); }
- relocInfo* relocation_end() const { return (relocInfo*) at_offset(_exception_table_begin); }
- address handler_table_begin () const { return at_offset(_exception_table_begin); }
- address handler_table_end() const { return at_offset(_nul_chk_table_begin); }
-
- address nul_chk_table_begin() const { return at_offset(_nul_chk_table_begin); }
- address nul_chk_table_end() const { return at_offset(_oopmap_begin); }
-
- ImmutableOopMapSet* oopmap_set() const { return (ImmutableOopMapSet*) at_offset(_oopmap_begin); }
-
- address consts_begin() const { return at_offset(_size); }
- address consts_end() const { return at_offset(_size); }
- int stub_offset() const { return _stubs_offset; }
- int entry_offset() const { return _entry; }
- int verified_entry_offset() const { return _verified_entry; }
- int exception_handler_offset() const { return _exception_handler_offset; }
- int deopt_handler_offset() const { return _deopt_handler_offset; }
- int deopt_mh_handler_offset() const { return _deopt_mh_handler_offset; }
- int orig_pc_offset() const { return _orig_pc_offset; }
-
- int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
- int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
- bool has_unsafe_access() const { return _unsafe_access != 0; }
-
-};
-
-/*
- * Use this for AOTCompiledMethods since a lot of the fields in CodeBlob gets the same
- * value when they come from AOT. code_begin == content_begin, etc... */
-class AOTCompiledMethodLayout : public CodeBlobLayout {
-public:
- AOTCompiledMethodLayout(address code_begin, address code_end, address relocation_begin, address relocation_end) :
- CodeBlobLayout(
- code_begin, // code_begin
- code_end, // code_end
- code_begin, // content_begin
- code_end, // content_end
- code_end, // data_end
- relocation_begin, // relocation_begin
- relocation_end
- ) {
- }
-};
-
-class AOTCompiledMethod : public CompiledMethod, public CHeapObj {
-private:
- address _code;
- aot_metadata* _meta;
- Metadata** _metadata_got;
- jlong* _state_adr; // Address of cell to indicate aot method state (in_use or not_entrant)
- AOTCodeHeap* _heap; // code heap which has this method
- const char* _name; // For stub: "AOT Stub" for stub,
- // For nmethod: "Ljava/lang/ThreadGroup;addUnstarted()V"
- const int _metadata_size; // size of _metadata_got
- const int _aot_id;
- const int _method_index;
- oop _oop; // method()->method_holder()->klass_holder()
-
- address* orig_pc_addr(const frame* fr);
- bool make_not_entrant_helper(int new_state);
-
- public:
- using CHeapObj::operator new;
- using CHeapObj::operator delete;
-
- int method_index() const { return _method_index; }
- void set_oop(oop o) { _oop = o; }
-
- AOTCompiledMethod(address code, Method* method, aot_metadata* meta, address metadata_got, int metadata_size, jlong* state_adr, AOTCodeHeap* heap, const char* name, int method_index, int aot_id) :
- CompiledMethod(method, name, compiler_jvmci, // AOT code is generated by JVMCI compiler
- AOTCompiledMethodLayout(code, code + meta->code_size(), (address) meta->relocation_begin(), (address) meta->relocation_end()),
- 0 /* frame_complete_offset */, meta->frame_size() /* frame_size */, meta->oopmap_set(), false /* caller_must_gc_arguments */),
- _code(code),
- _meta(meta),
- _metadata_got((Metadata**) metadata_got),
- _state_adr(state_adr),
- _heap(heap),
- _name(name),
- _metadata_size(metadata_size),
- _aot_id(aot_id),
- _method_index(method_index) {
-
- _is_far_code = CodeCache::is_far_target(code) ||
- CodeCache::is_far_target(code + meta->code_size());
- _exception_cache = NULL;
-
- _scopes_data_begin = (address) _meta->scopes_data_begin();
- _deopt_handler_begin = (address) _code + _meta->deopt_handler_offset();
- if (_meta->deopt_mh_handler_offset() != -1) {
- _deopt_mh_handler_begin = (address) _code + _meta->deopt_mh_handler_offset();
- } else {
- _deopt_mh_handler_begin = (address) this;
- }
-
- _pc_desc_container.reset_to(scopes_pcs_begin());
-
- // Mark the AOTCompiledMethod as in_use
- *_state_adr = nmethod::in_use;
- set_has_unsafe_access(_meta->has_unsafe_access());
- _oop = NULL;
- }
-
- virtual bool is_aot() const { return true; }
- virtual bool is_runtime_stub() const { return is_aot_runtime_stub(); }
-
- virtual bool is_compiled() const { return !is_aot_runtime_stub(); }
-
- virtual bool is_locked_by_vm() const { return false; }
-
- int state() const { return *_state_adr; }
-
- // Non-virtual for speed
- bool _is_alive() const { return state() < unloaded; }
-
- virtual bool is_zombie() const { return state() == zombie; }
- virtual bool is_unloaded() const { return state() == unloaded; }
- virtual bool is_not_entrant() const { return state() == not_entrant ||
- state() == not_used; }
- virtual bool is_alive() const { return _is_alive(); }
- virtual bool is_in_use() const { return state() == in_use; }
-
- virtual bool is_unloading() { return false; }
-
- address exception_begin() const { return (address) _code + _meta->exception_handler_offset(); }
-
- virtual const char* name() const { return _name; }
-
- virtual int compile_id() const { return _aot_id; }
-
- void print_on(outputStream* st) const;
- void print_on(outputStream* st, const char* msg) const;
- void print() const;
-
- virtual void print_value_on(outputStream *stream) const;
- virtual void print_block_comment(outputStream *stream, address block_begin) const { }
- virtual void verify() {}
-
- virtual int comp_level() const { return CompLevel_aot; }
- virtual address verified_entry_point() const { return _code + _meta->verified_entry_offset(); }
- virtual address inline_entry_point() const { return _code + _meta->verified_entry_offset(); }
- virtual address verified_inline_entry_point() const { return _code + _meta->verified_entry_offset(); }
- virtual address verified_inline_ro_entry_point() const { return _code + _meta->verified_entry_offset(); }
- virtual void log_identity(xmlStream* stream) const;
- virtual void log_state_change() const;
- virtual bool make_entrant();
- virtual bool make_not_entrant() { return make_not_entrant_helper(not_entrant); }
- virtual bool make_not_used() { return make_not_entrant_helper(not_used); }
- virtual address entry_point() const { return _code + _meta->entry_offset(); }
- virtual bool make_zombie() { ShouldNotReachHere(); return false; }
- virtual bool is_osr_method() const { return false; }
- virtual int osr_entry_bci() const { ShouldNotReachHere(); return -1; }
- // AOT compiled methods do not get into zombie state
- virtual bool can_convert_to_zombie() { return false; }
-
- virtual bool is_dependent_on_method(Method* dependee) { return true; }
-
- virtual void clear_inline_caches();
-
- virtual void print_pcs() {}
-
- virtual address scopes_data_end() const { return _meta->scopes_data_end(); }
-
- virtual oop oop_at(int index) const;
- virtual Metadata* metadata_at(int index) const;
-
- virtual PcDesc* scopes_pcs_begin() const { return _meta->scopes_pcs_begin(); }
- virtual PcDesc* scopes_pcs_end() const { return _meta->scopes_pcs_end(); }
-
- virtual address handler_table_begin() const { return _meta->handler_table_begin(); }
- virtual address handler_table_end() const { return _meta->handler_table_end(); }
-
- virtual address nul_chk_table_begin() const { return _meta->nul_chk_table_begin(); }
- virtual address nul_chk_table_end() const { return _meta->nul_chk_table_end(); }
-
- virtual address consts_begin() const { return _meta->consts_begin(); }
- virtual address consts_end() const { return _meta->consts_end(); }
-
- virtual address stub_begin() const { return code_begin() + _meta->stub_offset(); }
- virtual address stub_end() const { return code_end(); }
-
- virtual oop* oop_addr_at(int index) const { ShouldNotReachHere(); return NULL; }
- virtual Metadata** metadata_addr_at(int index) const { ShouldNotReachHere(); return NULL; }
-
- // Accessor/mutator for the original pc of a frame before a frame was deopted.
- address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
- void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
-
- virtual void metadata_do(MetadataClosure* f);
-
- bool metadata_got_contains(Metadata **p) {
- return p >= &_metadata_got[0] && p < &_metadata_got[_metadata_size];
- }
-
- Metadata** metadata_begin() const { return &_metadata_got[0] ; }
- Metadata** metadata_end() const { return &_metadata_got[_metadata_size] ; }
- const char* compile_kind() const { return "AOT"; }
-
- int get_state() const {
- return (int) (*_state_adr);
- }
-
- // inlined and non-virtual for AOTCodeHeap::oops_do
- void do_oops(OopClosure* f) {
- assert(_is_alive(), "");
- if (_oop != NULL) {
- f->do_oop(&_oop);
- }
-#if 0
- metadata_oops_do(metadata_begin(), metadata_end(), f);
-#endif
- }
-
- virtual void do_unloading(bool unloading_occurred);
-
-protected:
- // AOT compiled methods are not flushed
- void flush() {};
-
- NativeCallWrapper* call_wrapper_at(address call) const;
- NativeCallWrapper* call_wrapper_before(address return_pc) const;
- address call_instruction_address(address pc) const;
-
- CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const;
- CompiledStaticCall* compiledStaticCall_at(address addr) const;
- CompiledStaticCall* compiledStaticCall_before(address addr) const;
-private:
- bool is_aot_runtime_stub() const { return _method == NULL; }
-};
-
-class PltNativeCallWrapper: public NativeCallWrapper {
-private:
- NativePltCall* _call;
-
-public:
- PltNativeCallWrapper(NativePltCall* call) : _call(call) {}
-
- virtual address destination() const { return _call->destination(); }
- virtual address instruction_address() const { return _call->instruction_address(); }
- virtual address next_instruction_address() const { return _call->next_instruction_address(); }
- virtual address return_address() const { return _call->return_address(); }
- virtual address get_resolve_call_stub(bool is_optimized) const { return _call->plt_resolve_call(); }
- virtual void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); }
- virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info);
- virtual void verify() const { _call->verify(); }
- virtual void verify_resolve_call(address dest) const;
-
- virtual bool is_call_to_interpreted(address dest) const { return (dest == _call->plt_c2i_stub()); }
- // TODO: assume for now that patching of aot code (got cell) is safe.
- virtual bool is_safe_for_patching() const { return true; }
-
- virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const;
-
- virtual void *get_data(NativeInstruction* instruction) const {
- return (void*)((NativeLoadGot*) instruction)->data();
- }
-
- virtual void set_data(NativeInstruction* instruction, intptr_t data) {
- ((NativeLoadGot*) instruction)->set_data(data);
- }
-};
-
-#endif // SHARE_AOT_AOTCOMPILEDMETHOD_HPP
diff --git a/src/hotspot/share/aot/aotLoader.cpp b/src/hotspot/share/aot/aotLoader.cpp
deleted file mode 100644
index ada9ddee89b..00000000000
--- a/src/hotspot/share/aot/aotLoader.cpp
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "aot/aotCodeHeap.hpp"
-#include "aot/aotLoader.inline.hpp"
-#include "classfile/javaClasses.hpp"
-#include "jvm.h"
-#include "jvmci/jvmci.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/compressedOops.hpp"
-#include "oops/method.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/os.inline.hpp"
-#include "runtime/registerMap.hpp"
-#include "runtime/timerTrace.hpp"
-
-GrowableArray* AOTLoader::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray (2, mtCode);
-GrowableArray* AOTLoader::_libraries = new(ResourceObj::C_HEAP, mtCode) GrowableArray (2, mtCode);
-
-// Iterate over all AOT CodeHeaps
-#define FOR_ALL_AOT_HEAPS(heap) for (GrowableArrayIterator heap = heaps()->begin(); heap != heaps()->end(); ++heap)
-// Iterate over all AOT Libraries
-#define FOR_ALL_AOT_LIBRARIES(lib) for (GrowableArrayIterator lib = libraries()->begin(); lib != libraries()->end(); ++lib)
-
-void AOTLoader::load_for_klass(InstanceKlass* ik, Thread* thread) {
- if (ik->is_hidden() || ik->is_unsafe_anonymous()) {
- // don't even bother
- return;
- }
- if (UseAOT) {
- // We allow hotswap to be enabled after the onload phase, but not breakpoints
- assert(!JvmtiExport::can_post_breakpoint(), "AOT should have been disabled.");
- FOR_ALL_AOT_HEAPS(heap) {
- (*heap)->load_klass_data(ik, thread);
- }
- }
-}
-
-uint64_t AOTLoader::get_saved_fingerprint(InstanceKlass* ik) {
- assert(UseAOT, "called only when AOT is enabled");
- if (ik->is_hidden() || ik->is_unsafe_anonymous()) {
- // don't even bother
- return 0;
- }
- FOR_ALL_AOT_HEAPS(heap) {
- AOTKlassData* klass_data = (*heap)->find_klass(ik);
- if (klass_data != NULL) {
- return klass_data->_fingerprint;
- }
- }
- return 0;
-}
-
-void AOTLoader::oops_do(OopClosure* f) {
- if (UseAOT) {
- FOR_ALL_AOT_HEAPS(heap) {
- (*heap)->oops_do(f);
- }
- }
-}
-
-void AOTLoader::metadata_do(MetadataClosure* f) {
- if (UseAOT) {
- FOR_ALL_AOT_HEAPS(heap) {
- (*heap)->metadata_do(f);
- }
- }
-}
-
-void AOTLoader::mark_evol_dependent_methods(InstanceKlass* dependee) {
- if (UseAOT) {
- FOR_ALL_AOT_HEAPS(heap) {
- (*heap)->mark_evol_dependent_methods(dependee);
- }
- }
-}
-
-/**
- * List of core modules for which we search for shared libraries.
- */
-static const char* modules[] = {
- "java.base",
- "java.logging",
- "jdk.compiler",
- "jdk.internal.vm.ci",
- "jdk.internal.vm.compiler"
-};
-
-void AOTLoader::initialize() {
- TraceTime timer("AOT initialization", TRACETIME_LOG(Info, aot, startuptime));
-
- if (FLAG_IS_DEFAULT(UseAOT) && AOTLibrary != NULL) {
- // Don't need to set UseAOT on command line when AOTLibrary is specified
- FLAG_SET_DEFAULT(UseAOT, true);
- }
- if (UseAOT) {
- // EagerInitialization is not compatible with AOT
- if (EagerInitialization) {
- if (PrintAOT) {
- warning("EagerInitialization is not compatible with AOT (switching AOT off)");
- }
- FLAG_SET_DEFAULT(UseAOT, false);
- return;
- }
-
- if (JvmtiExport::can_post_breakpoint()) {
- if (PrintAOT) {
- warning("JVMTI capability to post breakpoint is not compatible with AOT (switching AOT off)");
- }
- FLAG_SET_DEFAULT(UseAOT, false);
- return;
- }
-
- // -Xint is not compatible with AOT
- if (Arguments::is_interpreter_only()) {
- if (PrintAOT) {
- warning("-Xint is not compatible with AOT (switching AOT off)");
- }
- FLAG_SET_DEFAULT(UseAOT, false);
- return;
- }
-
-#ifdef _WINDOWS
- const char pathSep = ';';
-#else
- const char pathSep = ':';
-#endif
-
- // Scan the AOTLibrary option.
- if (AOTLibrary != NULL) {
- const int len = (int)strlen(AOTLibrary);
- char* cp = NEW_C_HEAP_ARRAY(char, len+1, mtCode);
- memcpy(cp, AOTLibrary, len);
- cp[len] = '\0';
- char* end = cp + len;
- while (cp < end) {
- const char* name = cp;
- while ((*cp) != '\0' && (*cp) != '\n' && (*cp) != ',' && (*cp) != pathSep) cp++;
- cp[0] = '\0'; // Terminate name
- cp++;
- load_library(name, true);
- }
- }
-
- // Load well-know AOT libraries from Java installation directory.
- const char* home = Arguments::get_java_home();
- const char* file_separator = os::file_separator();
-
- for (int i = 0; i < (int) (sizeof(modules) / sizeof(const char*)); i++) {
- char library[JVM_MAXPATHLEN];
- jio_snprintf(library, sizeof(library), "%s%slib%slib%s%s%s%s", home, file_separator, file_separator, modules[i], UseCompressedOops ? "-coop" : "", UseG1GC ? "" : "-nong1", os::dll_file_extension());
- load_library(library, false);
- }
- }
-}
-
-void AOTLoader::universe_init() {
- if (UseAOT && libraries_count() > 0) {
- // Shifts are static values which initialized by 0 until java heap initialization.
- // AOT libs are loaded before heap initialized so shift values are not set.
- // It is okay since ObjectAlignmentInBytes flag which defines shifts value is set before AOT libs are loaded.
- // AOT sets shift values during heap and metaspace initialization.
- // Check shifts value to make sure thay did not change.
- if (UseCompressedOops && AOTLib::narrow_oop_shift_initialized()) {
- int oop_shift = CompressedOops::shift();
- FOR_ALL_AOT_LIBRARIES(lib) {
- (*lib)->verify_flag((*lib)->config()->_narrowOopShift, oop_shift, "CompressedOops::shift");
- }
- if (UseCompressedClassPointers) { // It is set only if UseCompressedOops is set
- int klass_shift = CompressedKlassPointers::shift();
- FOR_ALL_AOT_LIBRARIES(lib) {
- (*lib)->verify_flag((*lib)->config()->_narrowKlassShift, klass_shift, "CompressedKlassPointers::shift");
- }
- }
- }
- // Create heaps for all valid libraries
- FOR_ALL_AOT_LIBRARIES(lib) {
- if ((*lib)->is_valid()) {
- AOTCodeHeap* heap = new AOTCodeHeap(*lib);
- {
- MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- add_heap(heap);
- CodeCache::add_heap(heap);
- }
- } else {
- // Unload invalid libraries
- os::dll_unload((*lib)->dl_handle());
- }
- }
- }
- if (heaps_count() == 0) {
- if (FLAG_IS_DEFAULT(UseAOT)) {
- FLAG_SET_DEFAULT(UseAOT, false);
- }
- }
-}
-
-// Set shift value for compressed oops and classes based on first AOT library config.
-// AOTLoader::universe_init(), which is called later, will check the shift value again to make sure nobody change it.
-// This code is not executed during CDS dump because it runs in Interpreter mode and AOT is disabled in this mode.
-
-void AOTLoader::set_narrow_oop_shift() {
- // This method is called from Universe::initialize_heap().
- if (UseAOT && libraries_count() > 0 &&
- UseCompressedOops && AOTLib::narrow_oop_shift_initialized()) {
- if (CompressedOops::shift() == 0) {
- // 0 is valid shift value for small heap but we can safely increase it
- // at this point when nobody used it yet.
- CompressedOops::set_shift(AOTLib::narrow_oop_shift());
- }
- }
-}
-
-void AOTLoader::set_narrow_klass_shift() {
- // This method is called from Metaspace::set_narrow_klass_base_and_shift().
- if (UseAOT && libraries_count() > 0 &&
- UseCompressedOops && AOTLib::narrow_oop_shift_initialized() &&
- UseCompressedClassPointers) {
- if (CompressedKlassPointers::shift() == 0) {
- CompressedKlassPointers::set_shift(AOTLib::narrow_klass_shift());
- }
- }
-}
-
-void AOTLoader::load_library(const char* name, bool exit_on_error) {
- // Skip library if a library with the same name is already loaded.
- const int file_separator = *os::file_separator();
- const char* start = strrchr(name, file_separator);
- const char* new_name = (start == NULL) ? name : (start + 1);
- FOR_ALL_AOT_LIBRARIES(lib) {
- const char* lib_name = (*lib)->name();
- start = strrchr(lib_name, file_separator);
- const char* old_name = (start == NULL) ? lib_name : (start + 1);
- if (strcmp(old_name, new_name) == 0) {
- if (PrintAOT) {
- warning("AOT library %s is already loaded as %s.", name, lib_name);
- }
- return;
- }
- }
- char ebuf[1024];
- void* handle = os::dll_load(name, ebuf, sizeof ebuf);
- if (handle == NULL) {
- if (exit_on_error) {
- tty->print_cr("error opening file: %s", ebuf);
- vm_exit(1);
- }
- return;
- }
- const int dso_id = libraries_count() + 1;
- AOTLib* lib = new AOTLib(handle, name, dso_id);
- if (!lib->is_valid()) {
- delete lib;
- os::dll_unload(handle);
- return;
- }
- add_library(lib);
-}
-
-#ifndef PRODUCT
-void AOTLoader::print_statistics() {
- { ttyLocker ttyl;
- tty->print_cr("--- AOT Statistics ---");
- tty->print_cr("AOT libraries loaded: %d", heaps_count());
- AOTCodeHeap::print_statistics();
- }
-}
-#endif
-
-
-bool AOTLoader::reconcile_dynamic_invoke(InstanceKlass* holder, int index, Method* adapter_method, Klass* appendix_klass) {
- if (!UseAOT) {
- return true;
- }
- JavaThread* thread = JavaThread::current();
- ResourceMark rm(thread);
- RegisterMap map(thread, false);
- frame caller_frame = thread->last_frame().sender(&map); // Skip stub
- CodeBlob* caller_cb = caller_frame.cb();
- guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
- CompiledMethod* cm = caller_cb->as_compiled_method();
-
- if (!cm->is_aot()) {
- return true;
- }
- AOTCompiledMethod* aot = (AOTCompiledMethod*)cm;
-
- AOTCodeHeap* caller_heap = NULL;
- FOR_ALL_AOT_HEAPS(heap) {
- if ((*heap)->contains_blob(aot)) {
- caller_heap = *heap;
- break;
- }
- }
- guarantee(caller_heap != NULL, "CodeHeap not found");
- bool success = caller_heap->reconcile_dynamic_invoke(aot, holder, index, adapter_method, appendix_klass);
- vmassert(success || thread->last_frame().sender(&map).is_deoptimized_frame(), "caller not deoptimized on failure");
- return success;
-}
-
-
-// This should be called very early during startup before any of the AOTed methods that use boxes can deoptimize.
-// Deoptimization machinery expects the caches to be present and populated.
-void AOTLoader::initialize_box_caches(TRAPS) {
- if (!UseAOT || libraries_count() == 0) {
- return;
- }
- TraceTime timer("AOT initialization of box caches", TRACETIME_LOG(Info, aot, startuptime));
- JVMCI::ensure_box_caches_initialized(CHECK);
-}
diff --git a/src/hotspot/share/aot/aotLoader.hpp b/src/hotspot/share/aot/aotLoader.hpp
deleted file mode 100644
index df6dbed2f08..00000000000
--- a/src/hotspot/share/aot/aotLoader.hpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_AOT_AOTLOADER_HPP
-#define SHARE_AOT_AOTLOADER_HPP
-
-#include "runtime/globals_extension.hpp"
-#include "runtime/handles.hpp"
-
-class AOTCodeHeap;
-class AOTCompiledMethod;
-class AOTLib;
-class CodeBlob;
-template class GrowableArray;
-class InstanceKlass;
-class JavaThread;
-class Metadata;
-class OopClosure;
-
-class AOTLoader {
-private:
-#if INCLUDE_AOT
- static GrowableArray* _heaps;
- static GrowableArray* _libraries;
-#endif
- static void load_library(const char* name, bool exit_on_error);
-
-public:
-#if INCLUDE_AOT
- static GrowableArray* heaps();
- static GrowableArray* libraries();
- static int heaps_count();
- static int libraries_count();
- static void add_heap(AOTCodeHeap *heap);
- static void add_library(AOTLib *lib);
-#endif
- static void initialize() NOT_AOT({ FLAG_SET_ERGO(UseAOT, false); });
-
- static void universe_init() NOT_AOT_RETURN;
- static void set_narrow_oop_shift() NOT_AOT_RETURN;
- static void set_narrow_klass_shift() NOT_AOT_RETURN;
- static void load_for_klass(InstanceKlass* ik, Thread* thread) NOT_AOT_RETURN;
- static uint64_t get_saved_fingerprint(InstanceKlass* ik) NOT_AOT({ return 0; });
- static void oops_do(OopClosure* f) NOT_AOT_RETURN;
- static void metadata_do(MetadataClosure* f) NOT_AOT_RETURN;
- static void mark_evol_dependent_methods(InstanceKlass* dependee) NOT_AOT_RETURN;
- static void initialize_box_caches(TRAPS) NOT_AOT_RETURN;
-
- NOT_PRODUCT( static void print_statistics() NOT_AOT_RETURN; )
-
- static bool reconcile_dynamic_invoke(InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass) NOT_AOT({ return true; });
-};
-
-#endif // SHARE_AOT_AOTLOADER_HPP
diff --git a/src/hotspot/share/aot/aotLoader.inline.hpp b/src/hotspot/share/aot/aotLoader.inline.hpp
deleted file mode 100644
index 7044e3a4aec..00000000000
--- a/src/hotspot/share/aot/aotLoader.inline.hpp
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_AOT_AOTLOADER_INLINE_HPP
-#define SHARE_AOT_AOTLOADER_INLINE_HPP
-
-#include "aot/aotLoader.hpp"
-#include "utilities/growableArray.hpp"
-
-#if INCLUDE_AOT
-GrowableArray* AOTLoader::heaps() { return _heaps; }
-GrowableArray* AOTLoader::libraries() { return _libraries; }
-int AOTLoader::heaps_count() { return heaps()->length(); }
-int AOTLoader::libraries_count() { return libraries()->length(); }
-void AOTLoader::add_heap(AOTCodeHeap *heap) { heaps()->append(heap); }
-void AOTLoader::add_library(AOTLib *lib) { libraries()->append(lib); }
-#endif
-
-#endif // SHARE_AOT_AOTLOADER_INLINE_HPP
diff --git a/src/hotspot/share/aot/compiledIC_aot.cpp b/src/hotspot/share/aot/compiledIC_aot.cpp
deleted file mode 100644
index 5961298e7be..00000000000
--- a/src/hotspot/share/aot/compiledIC_aot.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-
-#include "aot/compiledIC_aot.hpp"
-
-bool CompiledPltStaticCall::is_call_to_interpreted() const {
- // It is a call to interpreted, if it calls to a stub. Hence, the destination
- // must be in the stub part of the nmethod that contains the call
- return destination() == _call->plt_c2i_stub();
-}
-
-address CompiledPltStaticCall::find_stub() {
- // It is static NativePltCall. Return c2i stub address.
- return _call->plt_c2i_stub();
-}
diff --git a/src/hotspot/share/aot/compiledIC_aot.hpp b/src/hotspot/share/aot/compiledIC_aot.hpp
deleted file mode 100644
index e46fc97d117..00000000000
--- a/src/hotspot/share/aot/compiledIC_aot.hpp
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_AOT_COMPILEDIC_AOT_HPP
-#define SHARE_AOT_COMPILEDIC_AOT_HPP
-
-#include "code/compiledIC.hpp"
-#include "code/nativeInst.hpp"
-#include "interpreter/linkResolver.hpp"
-#include "oops/compiledICHolder.hpp"
-
-class CompiledPltStaticCall: public CompiledStaticCall {
- friend class CompiledIC;
- friend class PltNativeCallWrapper;
-
- // Also used by CompiledIC
- void set_to_interpreted(const methodHandle& callee, address entry);
-
- address instruction_address() const { return _call->instruction_address(); }
- void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); }
-
- NativePltCall* _call;
-
- CompiledPltStaticCall(NativePltCall* call) : _call(call) {}
-
- public:
-
- inline static CompiledPltStaticCall* before(address return_addr) {
- CompiledPltStaticCall* st = new CompiledPltStaticCall(nativePltCall_before(return_addr));
- st->verify();
- return st;
- }
-
- static inline CompiledPltStaticCall* at(address native_call) {
- CompiledPltStaticCall* st = new CompiledPltStaticCall(nativePltCall_at(native_call));
- st->verify();
- return st;
- }
-
- static inline CompiledPltStaticCall* at(Relocation* call_site) {
- return at(call_site->addr());
- }
-
- // Delegation
- address destination() const { return _call->destination(); }
-
- virtual bool is_call_to_interpreted() const;
-
- // Stub support
- address find_stub();
- static void set_stub_to_clean(static_stub_Relocation* static_stub);
-
- // Misc.
- void print() PRODUCT_RETURN;
- void verify() PRODUCT_RETURN;
-
- protected:
- virtual address resolve_call_stub() const { return _call->plt_resolve_call(); }
- virtual void set_to_far(const methodHandle& callee, address entry) { set_to_compiled(entry); }
- virtual const char* name() const { return "CompiledPltStaticCall"; }
-};
-
-#endif // SHARE_AOT_COMPILEDIC_AOT_HPP
diff --git a/src/hotspot/share/asm/codeBuffer.hpp b/src/hotspot/share/asm/codeBuffer.hpp
index b81974ed51c..770439d2996 100644
--- a/src/hotspot/share/asm/codeBuffer.hpp
+++ b/src/hotspot/share/asm/codeBuffer.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -413,10 +413,6 @@ class CodeBuffer: public StackObj {
address _last_insn; // used to merge consecutive memory barriers, loads or stores.
-#if INCLUDE_AOT
- bool _immutable_PIC;
-#endif
-
#ifndef PRODUCT
CodeStrings _code_strings;
bool _collect_comments; // Indicate if we need to collect block comments at all.
@@ -433,9 +429,6 @@ class CodeBuffer: public StackObj {
_oop_recorder = NULL;
_overflow_arena = NULL;
_last_insn = NULL;
-#if INCLUDE_AOT
- _immutable_PIC = false;
-#endif
#ifndef PRODUCT
_decode_begin = NULL;
@@ -682,13 +675,6 @@ class CodeBuffer: public StackObj {
// Log a little info about section usage in the CodeBuffer
void log_section_sizes(const char* name);
-#if INCLUDE_AOT
- // True if this is a code buffer used for immutable PIC, i.e. AOT
- // compilation.
- bool immutable_PIC() { return _immutable_PIC; }
- void set_immutable_PIC(bool pic) { _immutable_PIC = pic; }
-#endif
-
#ifndef PRODUCT
public:
// Printing / Decoding
diff --git a/src/hotspot/share/c1/c1_Compilation.hpp b/src/hotspot/share/c1/c1_Compilation.hpp
index a1c5d65ae51..4c518fd48e2 100644
--- a/src/hotspot/share/c1/c1_Compilation.hpp
+++ b/src/hotspot/share/c1/c1_Compilation.hpp
@@ -272,7 +272,7 @@ class Compilation: public StackObj {
// will compilation make optimistic assumptions that might lead to
// deoptimization and that the runtime will account for?
bool is_optimistic() {
- return CompilerConfig::is_c1_only_no_aot_or_jvmci() && !is_profiling() &&
+ return CompilerConfig::is_c1_only_no_jvmci() && !is_profiling() &&
(RangeCheckElimination || UseLoopInvariantCodeMotion) &&
method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
}
diff --git a/src/hotspot/share/c1/c1_Compiler.cpp b/src/hotspot/share/c1/c1_Compiler.cpp
index 96db6740862..e0fbafaac14 100644
--- a/src/hotspot/share/c1/c1_Compiler.cpp
+++ b/src/hotspot/share/c1/c1_Compiler.cpp
@@ -229,6 +229,8 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
break;
case vmIntrinsics::_getObjectSize:
break;
+ case vmIntrinsics::_blackhole:
+ break;
default:
return false; // Intrinsics not on the previous list are not available.
}
diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp
index 851d3dc6bb9..0a8211e35e1 100644
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp
@@ -3812,7 +3812,7 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, bool ignore_r
// handle intrinsics
if (callee->intrinsic_id() != vmIntrinsics::_none &&
- (CheckIntrinsics ? callee->intrinsic_candidate() : true)) {
+ callee->check_intrinsic_candidate()) {
if (try_inline_intrinsics(callee, ignore_return)) {
print_inlining(callee, "intrinsic");
if (callee->has_reserved_stack_access()) {
diff --git a/src/hotspot/share/c1/c1_LIRAssembler.cpp b/src/hotspot/share/c1/c1_LIRAssembler.cpp
index 491a1733176..746dbf0547e 100644
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp
@@ -491,7 +491,7 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
#if defined(IA32) && defined(COMPILER2)
// C2 leave fpu stack dirty clean it
- if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
+ if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
int i;
for ( i = 1; i <= 7 ; i++ ) {
ffree(i);
diff --git a/src/hotspot/share/c1/c1_LIRAssembler.hpp b/src/hotspot/share/c1/c1_LIRAssembler.hpp
index 7ed93cf5663..6297a6c290a 100644
--- a/src/hotspot/share/c1/c1_LIRAssembler.hpp
+++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -274,11 +274,7 @@ class LIR_Assembler: public CompilationResourceObj {
public:
static int call_stub_size() {
- if (UseAOT) {
- return _call_stub_size + _call_aot_stub_size;
- } else {
- return _call_stub_size;
- }
+ return _call_stub_size;
}
static int exception_handler_size() {
diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp
index 8427c0f0211..699191ed29b 100644
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp
@@ -465,7 +465,7 @@ void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitI
/* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation
* is active and the class hasn't yet been resolved we need to emit a patch that resolves
* the class. */
- if ((!CompilerConfig::is_c1_only_no_aot_or_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
+ if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
assert(info != NULL, "info must be set if class is not loaded");
__ klass2reg_patch(NULL, r, info);
} else {
@@ -666,7 +666,7 @@ void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, L
void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
- } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_aot_or_jvmci() && new_instance->is_unresolved())) {
+ } else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
}
}
@@ -2043,15 +2043,6 @@ LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType ty
}
}
-LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) {
- // Use stronger ACCESS_WRITE|ACCESS_READ by default.
- if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
- decorators |= ACCESS_READ | ACCESS_WRITE;
- }
-
- return _barrier_set->resolve(this, decorators, obj);
-}
-
bool LIRGenerator::inline_type_field_access_prolog(AccessField* x, CodeEmitInfo* info) {
ciField* field = x->field();
assert(!field->is_flattened(), "Flattened field access should have been expanded");
@@ -2190,12 +2181,11 @@ void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
if (GenerateRangeChecks) {
CodeEmitInfo* info = state_for(x);
CodeStub* stub = new RangeCheckStub(info, index.result());
- LIR_Opr buf_obj = access_resolve(IS_NOT_NULL | ACCESS_READ, buf.result());
if (index.result()->is_constant()) {
- cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
+ cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
__ branch(lir_cond_belowEqual, stub);
} else {
- cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj,
+ cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
java_nio_Buffer::limit_offset(), T_INT, info);
__ branch(lir_cond_aboveEqual, stub);
}
@@ -3711,6 +3701,10 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
do_vectorizedMismatch(x);
break;
+ case vmIntrinsics::_blackhole:
+ do_blackhole(x);
+ break;
+
default: ShouldNotReachHere(); break;
}
}
@@ -4177,6 +4171,15 @@ void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
}
}
+void LIRGenerator::do_blackhole(Intrinsic *x) {
+ assert(!x->has_receiver(), "Should have been checked before: only static methods here");
+ for (int c = 0; c < x->number_of_arguments(); c++) {
+ // Load the argument
+ LIRItem vitem(x->argument_at(c), this);
+ vitem.load_item();
+ // ...and leave it unused.
+ }
+}
LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
LIRItemList args(1);
diff --git a/src/hotspot/share/c1/c1_LIRGenerator.hpp b/src/hotspot/share/c1/c1_LIRGenerator.hpp
index 0699308622d..ee8c1c29d08 100644
--- a/src/hotspot/share/c1/c1_LIRGenerator.hpp
+++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -268,6 +268,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void do_update_CRC32(Intrinsic* x);
void do_update_CRC32C(Intrinsic* x);
void do_vectorizedMismatch(Intrinsic* x);
+ void do_blackhole(Intrinsic* x);
bool inline_type_field_access_prolog(AccessField* x, CodeEmitInfo* info);
void access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item, ciField* field = NULL, int offset = 0);
@@ -317,8 +318,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& value);
- LIR_Opr access_resolve(DecoratorSet decorators, LIR_Opr obj);
-
// These need to guarantee JMM volatile semantics are preserved on each platform
// and requires one implementation per architecture.
LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp
index e375ca66479..559611fde39 100644
--- a/src/hotspot/share/c1/c1_Runtime1.cpp
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp
@@ -305,12 +305,6 @@ const char* Runtime1::name_for_address(address entry) {
if (entry == entry_for((StubID)id)) return name_for((StubID)id);
}
- BarrierSetC1* bsc1 = BarrierSet::barrier_set()->barrier_set_c1();
- const char* name = bsc1->rtcall_name_for_address(entry);
- if (name != NULL) {
- return name;
- }
-
#define FUNCTION_CASE(a, f) \
if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f
diff --git a/src/hotspot/share/memory/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp
similarity index 98%
rename from src/hotspot/share/memory/archiveBuilder.cpp
rename to src/hotspot/share/cds/archiveBuilder.cpp
index bb0d7ec23a3..fe187073fa7 100644
--- a/src/hotspot/share/memory/archiveBuilder.cpp
+++ b/src/hotspot/share/cds/archiveBuilder.cpp
@@ -23,6 +23,11 @@
*/
#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/cppVtables.hpp"
+#include "cds/dumpAllocStats.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderDataShared.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionaryShared.hpp"
@@ -31,12 +36,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allStatic.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/archiveUtils.hpp"
-#include "memory/cppVtables.hpp"
-#include "memory/dumpAllocStats.hpp"
#include "memory/memRegion.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayKlass.hpp"
@@ -259,6 +259,7 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
log_info(cds)(" instance classes = %5d", _num_instance_klasses);
log_info(cds)(" obj array classes = %5d", _num_obj_array_klasses);
log_info(cds)(" type array classes = %5d", _num_type_array_klasses);
+ log_info(cds)(" symbols = %5d", _symbols->length());
if (DumpSharedSpaces) {
// To ensure deterministic contents in the static archive, we need to ensure that
@@ -341,13 +342,13 @@ size_t ArchiveBuilder::estimate_archive_size() {
address ArchiveBuilder::reserve_buffer() {
size_t buffer_size = estimate_archive_size();
- ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), false);
+ ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size());
if (!rs.is_reserved()) {
log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
vm_direct_exit(0);
}
- // buffer_bottom is the lowest address of the 3 core regions (mc, rw, ro) when
+ // buffer_bottom is the lowest address of the 2 core regions (rw, ro) when
// we are copying the class metadata into the buffer.
address buffer_bottom = (address)rs.base();
log_info(cds)("Reserved output buffer space at " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
@@ -789,11 +790,11 @@ void ArchiveBuilder::relocate_klass_ptr(oop o) {
o->set_narrow_klass(nk);
}
-// RelocateBufferToRequested --- Relocate all the pointers in mc/rw/ro,
+// RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
// so that the archive can be mapped to the "requested" location without runtime relocation.
//
// - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
-// - ArchivePtrMarker::ptrmap() marks all the pointers in the mc/rw/ro regions
+// - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
// - Every pointer must have one of the following values:
// [a] NULL:
// No relocation is needed. Remove this pointer from ptrmap so we don't need to
@@ -896,12 +897,12 @@ void ArchiveBuilder::relocate_to_requested() {
// consistency, we log everything using runtime addresses.
class ArchiveBuilder::CDSMapLogger : AllStatic {
static intx buffer_to_runtime_delta() {
- // Translate the buffers used by the MC/RW/RO regions to their eventual (requested) locations
+ // Translate the buffers used by the RW/RO regions to their eventual (requested) locations
// at runtime.
return ArchiveBuilder::current()->buffer_to_requested_delta();
}
- // mc/rw/ro regions only
+ // rw/ro regions only
static void write_dump_region(const char* name, DumpRegion* region) {
address region_base = address(region->base());
address region_top = address(region->top());
diff --git a/src/hotspot/share/memory/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp
similarity index 98%
rename from src/hotspot/share/memory/archiveBuilder.hpp
rename to src/hotspot/share/cds/archiveBuilder.hpp
index cc59da01d96..6c69c61c6ed 100644
--- a/src/hotspot/share/memory/archiveBuilder.hpp
+++ b/src/hotspot/share/cds/archiveBuilder.hpp
@@ -22,11 +22,11 @@
*
*/
-#ifndef SHARE_MEMORY_ARCHIVEBUILDER_HPP
-#define SHARE_MEMORY_ARCHIVEBUILDER_HPP
+#ifndef SHARE_CDS_ARCHIVEBUILDER_HPP
+#define SHARE_CDS_ARCHIVEBUILDER_HPP
-#include "memory/archiveUtils.hpp"
-#include "memory/dumpAllocStats.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/dumpAllocStats.hpp"
#include "memory/metaspaceClosure.hpp"
#include "oops/array.hpp"
#include "oops/klass.hpp"
@@ -437,4 +437,4 @@ class ArchiveBuilder : public StackObj {
void report_out_of_space(const char* name, size_t needed_bytes);
};
-#endif // SHARE_MEMORY_ARCHIVEBUILDER_HPP
+#endif // SHARE_CDS_ARCHIVEBUILDER_HPP
diff --git a/src/hotspot/share/memory/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp
similarity index 97%
rename from src/hotspot/share/memory/archiveUtils.cpp
rename to src/hotspot/share/cds/archiveUtils.cpp
index 65d168ffd01..bd279d35777 100644
--- a/src/hotspot/share/memory/archiveUtils.cpp
+++ b/src/hotspot/share/cds/archiveUtils.cpp
@@ -23,17 +23,17 @@
*/
#include "precompiled.hpp"
-#include "classfile/classListParser.hpp"
-#include "classfile/classListWriter.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/classListParser.hpp"
+#include "cds/classListWriter.hpp"
+#include "cds/dynamicArchive.hpp"
+#include "cds/filemap.hpp"
+#include "cds/heapShared.inline.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "interpreter/bootstrapInfo.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/archiveUtils.hpp"
-#include "memory/dynamicArchive.hpp"
-#include "memory/filemap.hpp"
-#include "memory/heapShared.inline.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compressedOops.inline.hpp"
diff --git a/src/hotspot/share/memory/archiveUtils.hpp b/src/hotspot/share/cds/archiveUtils.hpp
similarity index 98%
rename from src/hotspot/share/memory/archiveUtils.hpp
rename to src/hotspot/share/cds/archiveUtils.hpp
index e0dcc3dc4c2..cdb3d99ab53 100644
--- a/src/hotspot/share/memory/archiveUtils.hpp
+++ b/src/hotspot/share/cds/archiveUtils.hpp
@@ -22,13 +22,14 @@
*
*/
-#ifndef SHARE_MEMORY_ARCHIVEUTILS_HPP
-#define SHARE_MEMORY_ARCHIVEUTILS_HPP
+#ifndef SHARE_CDS_ARCHIVEUTILS_HPP
+#define SHARE_CDS_ARCHIVEUTILS_HPP
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "memory/virtualspace.hpp"
#include "utilities/bitMap.hpp"
+#include "utilities/exceptions.hpp"
class BootstrapInfo;
class ReservedSpace;
@@ -226,4 +227,4 @@ class ArchiveUtils {
static void log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) NOT_CDS_RETURN;
};
-#endif // SHARE_MEMORY_ARCHIVEUTILS_HPP
+#endif // SHARE_CDS_ARCHIVEUTILS_HPP
diff --git a/src/hotspot/share/memory/archiveUtils.inline.hpp b/src/hotspot/share/cds/archiveUtils.inline.hpp
similarity index 91%
rename from src/hotspot/share/memory/archiveUtils.inline.hpp
rename to src/hotspot/share/cds/archiveUtils.inline.hpp
index 5e65e9ba278..81d5a9243d7 100644
--- a/src/hotspot/share/memory/archiveUtils.inline.hpp
+++ b/src/hotspot/share/cds/archiveUtils.inline.hpp
@@ -22,10 +22,10 @@
*
*/
-#ifndef SHARE_MEMORY_ARCHIVEUTILS_INLINE_HPP
-#define SHARE_MEMORY_ARCHIVEUTILS_INLINE_HPP
+#ifndef SHARE_CDS_ARCHIVEUTILS_INLINE_HPP
+#define SHARE_CDS_ARCHIVEUTILS_INLINE_HPP
-#include "memory/archiveUtils.hpp"
+#include "cds/archiveUtils.hpp"
#include "utilities/bitMap.inline.hpp"
inline bool SharedDataRelocator::do_bit(size_t offset) {
@@ -46,4 +46,4 @@ inline bool SharedDataRelocator::do_bit(size_t offset) {
return true; // keep iterating
}
-#endif // SHARE_MEMORY_ARCHIVEUTILS_INLINE_HPP
+#endif // SHARE_CDS_ARCHIVEUTILS_INLINE_HPP
diff --git a/src/hotspot/share/prims/cdsoffsets.cpp b/src/hotspot/share/cds/cdsoffsets.cpp
similarity index 93%
rename from src/hotspot/share/prims/cdsoffsets.cpp
rename to src/hotspot/share/cds/cdsoffsets.cpp
index 6111e47626d..9d93900aa67 100644
--- a/src/hotspot/share/prims/cdsoffsets.cpp
+++ b/src/hotspot/share/cds/cdsoffsets.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,14 +23,13 @@
*/
#include "precompiled.hpp"
-#include "utilities/macros.hpp"
-#if INCLUDE_CDS
+#include "cds/cdsoffsets.hpp"
+#include "cds/dynamicArchive.hpp"
+#include "cds/filemap.hpp"
#include "runtime/os.hpp"
-#include "memory/dynamicArchive.hpp"
-#include "memory/filemap.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
-#include "prims/cdsoffsets.hpp"
+#include "utilities/macros.hpp"
CDSOffsets::CDSOffsets(const char* name, int offset, CDSOffsets* next) {
_name = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtInternal);
@@ -76,4 +75,3 @@ void CDSOffsets::add_end(CDSOffsets* n) {
while(p && p->_next) { p = p->_next; }
p->_next = n;
}
-#endif // INCLUDE_CDS
diff --git a/src/hotspot/share/prims/cdsoffsets.hpp b/src/hotspot/share/cds/cdsoffsets.hpp
similarity index 87%
rename from src/hotspot/share/prims/cdsoffsets.hpp
rename to src/hotspot/share/cds/cdsoffsets.hpp
index 8fdda9dab66..4598404791b 100644
--- a/src/hotspot/share/prims/cdsoffsets.hpp
+++ b/src/hotspot/share/cds/cdsoffsets.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,8 +22,10 @@
*
*/
-#ifndef SHARE_PRIMS_CDSOFFSETS_HPP
-#define SHARE_PRIMS_CDSOFFSETS_HPP
+#ifndef SHARE_CDS_CDSOFFSETS_HPP
+#define SHARE_CDS_CDSOFFSETS_HPP
+
+#include "memory/allocation.hpp"
class CDSOffsets: public CHeapObj {
private:
@@ -42,4 +44,4 @@ class CDSOffsets: public CHeapObj {
static int find_offset(const char* name);
};
-#endif // SHARE_PRIMS_CDSOFFSETS_HPP
+#endif // SHARE_CDS_CDSOFFSETS_HPP
diff --git a/src/hotspot/share/classfile/classListParser.cpp b/src/hotspot/share/cds/classListParser.cpp
similarity index 99%
rename from src/hotspot/share/classfile/classListParser.cpp
rename to src/hotspot/share/cds/classListParser.cpp
index c7d784e9db9..d9d0a545b1f 100644
--- a/src/hotspot/share/classfile/classListParser.cpp
+++ b/src/hotspot/share/cds/classListParser.cpp
@@ -25,10 +25,12 @@
#include "precompiled.hpp"
#include "jvm.h"
#include "jimage.hpp"
-#include "classfile/classListParser.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/classListParser.hpp"
+#include "cds/lambdaFormInvokers.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderExt.hpp"
#include "classfile/javaClasses.inline.hpp"
-#include "classfile/lambdaFormInvokers.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/systemDictionaryShared.hpp"
@@ -39,8 +41,6 @@
#include "interpreter/linkResolver.hpp"
#include "logging/log.hpp"
#include "logging/logTag.hpp"
-#include "memory/archiveUtils.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/constantPool.hpp"
#include "runtime/atomic.hpp"
diff --git a/src/hotspot/share/classfile/classListParser.hpp b/src/hotspot/share/cds/classListParser.hpp
similarity index 98%
rename from src/hotspot/share/classfile/classListParser.hpp
rename to src/hotspot/share/cds/classListParser.hpp
index 95e5f54f226..56d77c09ac6 100644
--- a/src/hotspot/share/classfile/classListParser.hpp
+++ b/src/hotspot/share/cds/classListParser.hpp
@@ -22,8 +22,8 @@
*
*/
-#ifndef SHARE_CLASSFILE_CLASSLISTPARSER_HPP
-#define SHARE_CLASSFILE_CLASSLISTPARSER_HPP
+#ifndef SHARE_CDS_CLASSLISTPARSER_HPP
+#define SHARE_CDS_CLASSLISTPARSER_HPP
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -183,4 +183,4 @@ class ClassListParser : public StackObj {
static void populate_cds_indy_info(const constantPoolHandle &pool, int cp_index, CDSIndyInfo* cii, TRAPS);
};
-#endif // SHARE_CLASSFILE_CLASSLISTPARSER_HPP
+#endif // SHARE_CDS_CLASSLISTPARSER_HPP
diff --git a/src/hotspot/share/classfile/classListWriter.hpp b/src/hotspot/share/cds/classListWriter.hpp
similarity index 95%
rename from src/hotspot/share/classfile/classListWriter.hpp
rename to src/hotspot/share/cds/classListWriter.hpp
index 58ee4c26845..6ece57ef429 100644
--- a/src/hotspot/share/classfile/classListWriter.hpp
+++ b/src/hotspot/share/cds/classListWriter.hpp
@@ -22,8 +22,8 @@
*
*/
-#ifndef SHARE_CLASSFILE_CLASSLISTWRITER_HPP
-#define SHARE_CLASSFILE_CLASSLISTWRITER_HPP
+#ifndef SHARE_CDS_CLASSLISTWRITER_HPP
+#define SHARE_CDS_CLASSLISTWRITER_HPP
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.hpp"
@@ -79,4 +79,4 @@ class ClassListWriter {
}
};
-#endif // SHARE_CLASSFILE_CLASSLISTWRITER_HPP
+#endif // SHARE_CDS_CLASSLISTWRITER_HPP
diff --git a/src/hotspot/share/memory/cppVtables.cpp b/src/hotspot/share/cds/cppVtables.cpp
similarity index 98%
rename from src/hotspot/share/memory/cppVtables.cpp
rename to src/hotspot/share/cds/cppVtables.cpp
index e3b2f422ab6..3bb30b392ae 100644
--- a/src/hotspot/share/memory/cppVtables.cpp
+++ b/src/hotspot/share/cds/cppVtables.cpp
@@ -23,11 +23,11 @@
*/
#include "precompiled.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/cppVtables.hpp"
+#include "cds/metaspaceShared.hpp"
#include "logging/log.hpp"
-#include "memory/archiveUtils.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/cppVtables.hpp"
-#include "memory/metaspaceShared.hpp"
#include "oops/flatArrayKlass.hpp"
#include "oops/inlineKlass.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
diff --git a/src/hotspot/share/memory/cppVtables.hpp b/src/hotspot/share/cds/cppVtables.hpp
similarity index 93%
rename from src/hotspot/share/memory/cppVtables.hpp
rename to src/hotspot/share/cds/cppVtables.hpp
index dbfe639cd6d..5318a9de2ba 100644
--- a/src/hotspot/share/memory/cppVtables.hpp
+++ b/src/hotspot/share/cds/cppVtables.hpp
@@ -22,8 +22,8 @@
*
*/
-#ifndef SHARE_MEMORY_CPPVTABLES_HPP
-#define SHARE_MEMORY_CPPVTABLES_HPP
+#ifndef SHARE_CDS_CPPVTABLES_HPP
+#define SHARE_CDS_CPPVTABLES_HPP
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
@@ -45,4 +45,4 @@ class CppVtables : AllStatic {
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
};
-#endif // SHARE_MEMORY_CPPVTABLES_HPP
+#endif // SHARE_CDS_CPPVTABLES_HPP
diff --git a/src/hotspot/share/memory/dumpAllocStats.cpp b/src/hotspot/share/cds/dumpAllocStats.cpp
similarity index 99%
rename from src/hotspot/share/memory/dumpAllocStats.cpp
rename to src/hotspot/share/cds/dumpAllocStats.cpp
index ffec46cd7f5..0568bc550e6 100644
--- a/src/hotspot/share/memory/dumpAllocStats.cpp
+++ b/src/hotspot/share/cds/dumpAllocStats.cpp
@@ -23,9 +23,9 @@
*/
#include "precompiled.hpp"
+#include "cds/dumpAllocStats.hpp"
#include "logging/log.hpp"
#include "logging/logMessage.hpp"
-#include "memory/dumpAllocStats.hpp"
void DumpAllocStats::print_stats(int ro_all, int rw_all) {
// symbols
diff --git a/src/hotspot/share/memory/dumpAllocStats.hpp b/src/hotspot/share/cds/dumpAllocStats.hpp
similarity index 96%
rename from src/hotspot/share/memory/dumpAllocStats.hpp
rename to src/hotspot/share/cds/dumpAllocStats.hpp
index 5bf7b9371a9..c897c1738ec 100644
--- a/src/hotspot/share/memory/dumpAllocStats.hpp
+++ b/src/hotspot/share/cds/dumpAllocStats.hpp
@@ -22,11 +22,11 @@
*
*/
-#ifndef SHARE_MEMORY_DUMPALLOCSTATS_HPP
-#define SHARE_MEMORY_DUMPALLOCSTATS_HPP
+#ifndef SHARE_CDS_DUMPALLOCSTATS_HPP
+#define SHARE_CDS_DUMPALLOCSTATS_HPP
-#include "memory/allocation.hpp"
#include "classfile/compactHashtable.hpp"
+#include "memory/allocation.hpp"
// This is for dumping detailed statistics for the allocations
// in the shared spaces.
@@ -100,4 +100,4 @@ class DumpAllocStats : public ResourceObj {
void print_stats(int ro_all, int rw_all);
};
-#endif // SHARE_MEMORY_DUMPALLOCSTATS_HPP
+#endif // SHARE_CDS_DUMPALLOCSTATS_HPP
diff --git a/src/hotspot/share/memory/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp
similarity index 94%
rename from src/hotspot/share/memory/dynamicArchive.cpp
rename to src/hotspot/share/cds/dynamicArchive.cpp
index ea4909077b9..24b927e8b5e 100644
--- a/src/hotspot/share/memory/dynamicArchive.cpp
+++ b/src/hotspot/share/cds/dynamicArchive.cpp
@@ -24,6 +24,11 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "cds/archiveBuilder.hpp"
+#include "cds/archiveUtils.inline.hpp"
+#include "cds/dynamicArchive.hpp"
+#include "cds/lambdaFormInvokers.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionaryShared.hpp"
@@ -32,11 +37,7 @@
#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/gc_globals.hpp"
#include "logging/log.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/archiveUtils.inline.hpp"
-#include "memory/dynamicArchive.hpp"
#include "memory/metaspaceClosure.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/klass.inline.hpp"
#include "runtime/arguments.hpp"
@@ -344,29 +345,34 @@ void DynamicArchive::dump(const char* archive_name, TRAPS) {
} else {
// prevent multiple dumps.
set_has_been_dumped_once();
- }
- ArchiveClassesAtExit = archive_name;
- if (Arguments::init_shared_archive_paths()) {
- dump();
- } else {
- ArchiveClassesAtExit = nullptr;
- THROW_MSG(vmSymbols::java_lang_RuntimeException(),
+ ArchiveClassesAtExit = archive_name;
+ if (Arguments::init_shared_archive_paths()) {
+ dump(CHECK);
+ } else {
+ ArchiveClassesAtExit = nullptr;
+ THROW_MSG(vmSymbols::java_lang_RuntimeException(),
"Could not setup SharedDynamicArchivePath");
- }
- // prevent do dynamic dump at exit.
- ArchiveClassesAtExit = nullptr;
- if (!Arguments::init_shared_archive_paths()) {
- THROW_MSG(vmSymbols::java_lang_RuntimeException(),
+ }
+ // prevent do dynamic dump at exit.
+ ArchiveClassesAtExit = nullptr;
+ if (!Arguments::init_shared_archive_paths()) {
+ THROW_MSG(vmSymbols::java_lang_RuntimeException(),
"Could not restore SharedDynamicArchivePath");
+ }
}
}
-void DynamicArchive::dump() {
+void DynamicArchive::dump(TRAPS) {
if (Arguments::GetSharedDynamicArchivePath() == NULL) {
log_warning(cds, dynamic)("SharedDynamicArchivePath is not specified");
return;
}
+ // regenerate lambdaform holder classes
+ log_info(cds, dynamic)("Regenerate lambdaform holder classes ...");
+ LambdaFormInvokers::regenerate_holder_classes(CHECK);
+ log_info(cds, dynamic)("Regenerate lambdaform holder classes ...done");
+
VM_PopulateDynamicDumpSharedSpace op;
VMThread::execute(&op);
}
diff --git a/src/hotspot/share/memory/dynamicArchive.hpp b/src/hotspot/share/cds/dynamicArchive.hpp
similarity index 92%
rename from src/hotspot/share/memory/dynamicArchive.hpp
rename to src/hotspot/share/cds/dynamicArchive.hpp
index 3b03a477414..21a5eada01d 100644
--- a/src/hotspot/share/memory/dynamicArchive.hpp
+++ b/src/hotspot/share/cds/dynamicArchive.hpp
@@ -22,14 +22,12 @@
*
*/
-#ifndef SHARE_VM_MEMORY_DYNAMICARCHIVE_HPP
-#define SHARE_VM_MEMORY_DYNAMICARCHIVE_HPP
-
-#if INCLUDE_CDS
+#ifndef SHARE_CDS_DYNAMICARCHIVE_HPP
+#define SHARE_CDS_DYNAMICARCHIVE_HPP
+#include "cds/filemap.hpp"
#include "classfile/compactHashtable.hpp"
#include "memory/allocation.hpp"
-#include "memory/filemap.hpp"
#include "memory/memRegion.hpp"
#include "memory/virtualspace.hpp"
#include "oops/oop.hpp"
@@ -37,6 +35,8 @@
#include "utilities/macros.hpp"
#include "utilities/resourceHash.hpp"
+#if INCLUDE_CDS
+
class DynamicArchiveHeader : public FileMapHeader {
friend class CDSOffsets;
private:
@@ -61,11 +61,11 @@ class DynamicArchive : AllStatic {
static bool _has_been_dumped_once;
public:
static void dump(const char* archive_name, TRAPS);
- static void dump();
+ static void dump(TRAPS);
static bool has_been_dumped_once() { return _has_been_dumped_once; }
static void set_has_been_dumped_once() { _has_been_dumped_once = true; }
static bool is_mapped() { return FileMapInfo::dynamic_info() != NULL; }
static bool validate(FileMapInfo* dynamic_info);
};
#endif // INCLUDE_CDS
-#endif // SHARE_VM_MEMORY_DYNAMICARCHIVE_HPP
+#endif // SHARE_CDS_DYNAMICARCHIVE_HPP
diff --git a/src/hotspot/share/memory/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
similarity index 99%
rename from src/hotspot/share/memory/filemap.cpp
rename to src/hotspot/share/cds/filemap.cpp
index df92dc09611..a259bc49918 100644
--- a/src/hotspot/share/memory/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -24,6 +24,12 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "cds/archiveBuilder.hpp"
+#include "cds/archiveUtils.inline.hpp"
+#include "cds/dynamicArchive.hpp"
+#include "cds/filemap.hpp"
+#include "cds/heapShared.inline.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.inline.hpp"
@@ -36,15 +42,9 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "logging/logMessage.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/archiveUtils.inline.hpp"
-#include "memory/dynamicArchive.hpp"
-#include "memory/filemap.hpp"
-#include "memory/heapShared.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.hpp"
@@ -55,7 +55,7 @@
#include "runtime/arguments.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
diff --git a/src/hotspot/share/memory/filemap.hpp b/src/hotspot/share/cds/filemap.hpp
similarity index 99%
rename from src/hotspot/share/memory/filemap.hpp
rename to src/hotspot/share/cds/filemap.hpp
index ace704b0131..9dc2b2309f2 100644
--- a/src/hotspot/share/memory/filemap.hpp
+++ b/src/hotspot/share/cds/filemap.hpp
@@ -22,11 +22,12 @@
*
*/
-#ifndef SHARE_MEMORY_FILEMAP_HPP
-#define SHARE_MEMORY_FILEMAP_HPP
+#ifndef SHARE_CDS_FILEMAP_HPP
+#define SHARE_CDS_FILEMAP_HPP
+#include "cds/metaspaceShared.hpp"
#include "include/cds.h"
-#include "memory/metaspaceShared.hpp"
+#include "oops/array.hpp"
#include "oops/compressedOops.hpp"
#include "utilities/align.hpp"
@@ -594,4 +595,4 @@ class FileMapInfo : public CHeapObj {
#endif
};
-#endif // SHARE_MEMORY_FILEMAP_HPP
+#endif // SHARE_CDS_FILEMAP_HPP
diff --git a/src/hotspot/share/memory/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp
similarity index 99%
rename from src/hotspot/share/memory/heapShared.cpp
rename to src/hotspot/share/cds/heapShared.cpp
index c8c73574625..63a450126ee 100644
--- a/src/hotspot/share/memory/heapShared.cpp
+++ b/src/hotspot/share/cds/heapShared.cpp
@@ -23,6 +23,11 @@
*/
#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/filemap.hpp"
+#include "cds/heapShared.inline.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataShared.hpp"
#include "classfile/javaClasses.inline.hpp"
@@ -38,14 +43,9 @@
#include "logging/log.hpp"
#include "logging/logMessage.hpp"
#include "logging/logStream.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/archiveUtils.hpp"
-#include "memory/filemap.hpp"
-#include "memory/heapShared.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.inline.hpp"
@@ -561,7 +561,15 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
_entry_field_records = NULL;
_subgraph_object_klasses = NULL;
_is_full_module_graph = info->is_full_module_graph();
- _has_non_early_klasses = info->has_non_early_klasses();
+
+ if (_is_full_module_graph) {
+ // Consider all classes referenced by the full module graph as early -- we will be
+ // allocating objects of these classes during JVMTI early phase, so they cannot
+ // be processed by (non-early) JVMTI ClassFileLoadHook
+ _has_non_early_klasses = false;
+ } else {
+ _has_non_early_klasses = info->has_non_early_klasses();
+ }
if (_has_non_early_klasses) {
ResourceMark rm;
diff --git a/src/hotspot/share/memory/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp
similarity index 99%
rename from src/hotspot/share/memory/heapShared.hpp
rename to src/hotspot/share/cds/heapShared.hpp
index 29e1c4e1195..9126136a9f7 100644
--- a/src/hotspot/share/memory/heapShared.hpp
+++ b/src/hotspot/share/cds/heapShared.hpp
@@ -22,15 +22,15 @@
*
*/
-#ifndef SHARE_MEMORY_HEAPSHARED_HPP
-#define SHARE_MEMORY_HEAPSHARED_HPP
+#ifndef SHARE_CDS_HEAPSHARED_HPP
+#define SHARE_CDS_HEAPSHARED_HPP
+#include "cds/metaspaceShared.hpp"
#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/gc_globals.hpp"
#include "memory/allocation.hpp"
-#include "memory/metaspaceShared.hpp"
#include "oops/compressedOops.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.hpp"
@@ -415,4 +415,4 @@ class DumpedInternedStrings :
{};
#endif
-#endif // SHARE_MEMORY_HEAPSHARED_HPP
+#endif // SHARE_CDS_HEAPSHARED_HPP
diff --git a/src/hotspot/share/memory/heapShared.inline.hpp b/src/hotspot/share/cds/heapShared.inline.hpp
similarity index 87%
rename from src/hotspot/share/memory/heapShared.inline.hpp
rename to src/hotspot/share/cds/heapShared.inline.hpp
index c6fc737c971..679dbc714c3 100644
--- a/src/hotspot/share/memory/heapShared.inline.hpp
+++ b/src/hotspot/share/cds/heapShared.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,12 +22,12 @@
*
*/
-#ifndef SHARE_MEMORY_HEAPSHARED_INLINE_HPP
-#define SHARE_MEMORY_HEAPSHARED_INLINE_HPP
+#ifndef SHARE_CDS_HEAPSHARED_INLINE_HPP
+#define SHARE_CDS_HEAPSHARED_INLINE_HPP
+#include "cds/heapShared.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "oops/compressedOops.inline.hpp"
-#include "memory/heapShared.hpp"
#include "utilities/align.hpp"
#if INCLUDE_CDS_JAVA_HEAP
@@ -45,4 +45,4 @@ inline oop HeapShared::decode_from_archive(narrowOop v) {
#endif
-#endif // SHARE_MEMORY_HEAPSHARED_INLINE_HPP
+#endif // SHARE_CDS_HEAPSHARED_INLINE_HPP
diff --git a/src/hotspot/share/cds/lambdaFormInvokers.cpp b/src/hotspot/share/cds/lambdaFormInvokers.cpp
new file mode 100644
index 00000000000..92eaa569a30
--- /dev/null
+++ b/src/hotspot/share/cds/lambdaFormInvokers.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/lambdaFormInvokers.hpp"
+#include "cds/metaspaceShared.hpp"
+#include "classfile/classLoadInfo.hpp"
+#include "classfile/classFileStream.hpp"
+#include "classfile/javaClasses.inline.hpp"
+#include "classfile/klassFactory.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#include "classfile/vmClasses.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "logging/log.hpp"
+#include "memory/oopFactory.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/klass.hpp"
+#include "oops/objArrayKlass.hpp"
+#include "oops/objArrayOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/typeArrayOop.inline.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/javaCalls.hpp"
+
+GrowableArrayCHeap* LambdaFormInvokers::_lambdaform_lines = nullptr;
+Array*>* LambdaFormInvokers::_static_archive_invokers = nullptr;
+
+#define NUM_FILTER 4
+static const char* filter[NUM_FILTER] = {"java.lang.invoke.Invokers$Holder",
+ "java.lang.invoke.DirectMethodHandle$Holder",
+ "java.lang.invoke.DelegatingMethodHandle$Holder",
+ "java.lang.invoke.LambdaForm$Holder"};
+
+static bool should_be_archived(char* line) {
+ for (int k = 0; k < NUM_FILTER; k++) {
+ if (strstr(line, filter[k]) != nullptr) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void LambdaFormInvokers::append_filtered(char* line) {
+ if (should_be_archived(line)) {
+ append(line);
+ }
+}
+#undef NUM_FILTER
+
+void LambdaFormInvokers::append(char* line) {
+ if (_lambdaform_lines == NULL) {
+ _lambdaform_lines = new GrowableArrayCHeap(150);
+ }
+ _lambdaform_lines->append(line);
+}
+
+void LambdaFormInvokers::regenerate_holder_classes(TRAPS) {
+ if (_lambdaform_lines == nullptr || _lambdaform_lines->length() == 0) {
+ log_info(cds)("Nothing to regenerate for holder classes");
+ return;
+ }
+
+ ResourceMark rm(THREAD);
+
+ Symbol* cds_name = vmSymbols::jdk_internal_misc_CDS();
+ Klass* cds_klass = SystemDictionary::resolve_or_null(cds_name, THREAD);
+ guarantee(cds_klass != NULL, "jdk/internal/misc/CDS must exist!");
+ log_debug(cds)("Total lambdaform lines %d", _lambdaform_lines->length());
+
+ HandleMark hm(THREAD);
+ int len = _lambdaform_lines->length();
+ objArrayHandle list_lines = oopFactory::new_objArray_handle(vmClasses::String_klass(), len, CHECK);
+ for (int i = 0; i < len; i++) {
+ Handle h_line = java_lang_String::create_from_str(_lambdaform_lines->at(i), CHECK);
+ list_lines->obj_at_put(i, h_line());
+ }
+
+ //
+ // Object[] CDS.generateLambdaFormHolderClasses(String[] lines)
+ // the returned Object[] layout:
+ // name, byte[], name, byte[] ....
+ Symbol* method = vmSymbols::generateLambdaFormHolderClasses();
+ Symbol* signrs = vmSymbols::generateLambdaFormHolderClasses_signature();
+
+ JavaValue result(T_OBJECT);
+ JavaCalls::call_static(&result, cds_klass, method, signrs, list_lines, THREAD);
+
+ if (HAS_PENDING_EXCEPTION) {
+ if (!PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())) {
+ log_error(cds)("%s: %s", PENDING_EXCEPTION->klass()->external_name(),
+ java_lang_String::as_utf8_string(java_lang_Throwable::message(PENDING_EXCEPTION)));
+ if (DumpSharedSpaces) {
+ log_error(cds)("Failed to generate LambdaForm holder classes. Is your classlist out of date?");
+ } else {
+ log_error(cds)("Failed to generate LambdaForm holder classes. Was the base archive generated with an outdated classlist?");
+ }
+ CLEAR_PENDING_EXCEPTION;
+ }
+ return;
+ }
+
+ objArrayHandle h_array(THREAD, (objArrayOop)result.get_oop());
+ int sz = h_array->length();
+ assert(sz % 2 == 0 && sz >= 2, "Must be even size of length");
+ for (int i = 0; i < sz; i+= 2) {
+ Handle h_name(THREAD, h_array->obj_at(i));
+ typeArrayHandle h_bytes(THREAD, (typeArrayOop)h_array->obj_at(i+1));
+ assert(h_name != NULL, "Class name is NULL");
+ assert(h_bytes != NULL, "Class bytes is NULL");
+
+ char *class_name = java_lang_String::as_utf8_string(h_name());
+ int len = h_bytes->length();
+ // make a copy of class bytes so GC will not affect us.
+ char *buf = NEW_RESOURCE_ARRAY(char, len);
+ memcpy(buf, (char*)h_bytes->byte_at_addr(0), len);
+ ClassFileStream st((u1*)buf, len, NULL, ClassFileStream::verify);
+ reload_class(class_name, st, CHECK);
+ }
+}
+
+// class_handle - the class name, bytes_handle - the class bytes
+void LambdaFormInvokers::reload_class(char* name, ClassFileStream& st, TRAPS) {
+ Symbol* class_name = SymbolTable::new_symbol((const char*)name);
+ // the class must exist
+ Klass* klass = SystemDictionary::resolve_or_null(class_name, THREAD);
+ if (klass == NULL) {
+ log_info(cds)("Class %s not present, skip", name);
+ return;
+ }
+ assert(klass->is_instance_klass(), "Should be");
+
+ ClassLoaderData* cld = ClassLoaderData::the_null_class_loader_data();
+ Handle protection_domain;
+ ClassLoadInfo cl_info(protection_domain);
+
+ InstanceKlass* result = KlassFactory::create_from_stream(&st,
+ class_name,
+ cld,
+ cl_info,
+ CHECK);
+
+ {
+ MutexLocker mu_r(THREAD, Compile_lock); // add_to_hierarchy asserts this.
+ SystemDictionary::add_to_hierarchy(result);
+ }
+ // new class not linked yet.
+ MetaspaceShared::try_link_class(THREAD, result);
+ assert(!HAS_PENDING_EXCEPTION, "Invariant");
+
+ // exclude the existing class from dump
+ SystemDictionaryShared::set_excluded(InstanceKlass::cast(klass));
+ SystemDictionaryShared::init_dumptime_info(result);
+ log_debug(cds, lambda)("Replaced class %s, old: %p new: %p", name, klass, result);
+}
+
+void LambdaFormInvokers::dump_static_archive_invokers() {
+ if (_lambdaform_lines != nullptr && _lambdaform_lines->length() > 0) {
+ int count = 0;
+ int len = _lambdaform_lines->length();
+ for (int i = 0; i < len; i++) {
+ char* str = _lambdaform_lines->at(i);
+ if (should_be_archived(str)) {
+ count++;
+ }
+ }
+ log_debug(cds)("Number of LF invoker lines stored: %d", count);
+ if (count > 0) {
+ _static_archive_invokers = ArchiveBuilder::new_ro_array*>(count);
+ int index = 0;
+ for (int i = 0; i < len; i++) {
+ char* str = _lambdaform_lines->at(i);
+ if (should_be_archived(str)) {
+ size_t str_len = strlen(str) + 1; // including terminating zero
+ Array* line = ArchiveBuilder::new_ro_array((int)str_len);
+ strncpy(line->adr_at(0), str, str_len);
+
+ _static_archive_invokers->at_put(index, line);
+ ArchivePtrMarker::mark_pointer(_static_archive_invokers->adr_at(index));
+ index++;
+ }
+ }
+ assert(index == count, "Should match");
+ }
+ }
+}
+
+void LambdaFormInvokers::read_static_archive_invokers() {
+ if (_static_archive_invokers != nullptr) {
+ for (int i = 0; i < _static_archive_invokers->length(); i++) {
+ Array* line = _static_archive_invokers->at(i);
+ char* str = line->adr_at(0);
+ append_filtered(str);
+ }
+ }
+}
+
+void LambdaFormInvokers::serialize(SerializeClosure* soc) {
+ soc->do_ptr((void**)&_static_archive_invokers);
+}
diff --git a/src/hotspot/share/cds/lambdaFormInvokers.hpp b/src/hotspot/share/cds/lambdaFormInvokers.hpp
new file mode 100644
index 00000000000..d3ced685314
--- /dev/null
+++ b/src/hotspot/share/cds/lambdaFormInvokers.hpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CDS_LAMBDAFORMINVOKERS_HPP
+#define SHARE_CDS_LAMBDAFORMINVOKERS_HPP
+#include "memory/allStatic.hpp"
+#include "runtime/handles.hpp"
+#include "utilities/growableArray.hpp"
+
+class ClassFileStream;
+template class Array;
+
+class LambdaFormInvokers : public AllStatic {
+ private:
+ static GrowableArrayCHeap* _lambdaform_lines;
+ // for storing LF form lines (LF_RESOLVE only) in read only table.
+ static Array*>* _static_archive_invokers;
+ static void reload_class(char* name, ClassFileStream& st, TRAPS);
+ public:
+ static void append(char* line);
+ static void append_filtered(char* line);
+ static void regenerate_holder_classes(TRAPS);
+ static GrowableArrayCHeap* lambdaform_lines() {
+ return _lambdaform_lines;
+ }
+ static void dump_static_archive_invokers();
+ static void read_static_archive_invokers();
+ static void serialize(SerializeClosure* soc);
+};
+#endif // SHARE_CDS_LAMBDAFORMINVOKERS_HPP
diff --git a/src/hotspot/share/memory/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp
similarity index 96%
rename from src/hotspot/share/memory/metaspaceShared.cpp
rename to src/hotspot/share/cds/metaspaceShared.cpp
index bd61bd806ac..d45d5ab0e67 100644
--- a/src/hotspot/share/memory/metaspaceShared.cpp
+++ b/src/hotspot/share/cds/metaspaceShared.cpp
@@ -24,12 +24,18 @@
#include "precompiled.hpp"
#include "jvm_io.h"
+#include "cds/archiveBuilder.hpp"
+#include "cds/classListParser.hpp"
+#include "cds/cppVtables.hpp"
+#include "cds/dumpAllocStats.hpp"
+#include "cds/filemap.hpp"
+#include "cds/heapShared.hpp"
+#include "cds/lambdaFormInvokers.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/classLoaderDataShared.hpp"
-#include "classfile/classListParser.hpp"
#include "classfile/classLoaderExt.hpp"
#include "classfile/javaClasses.inline.hpp"
-#include "classfile/lambdaFormInvokers.hpp"
#include "classfile/loaderConstraints.hpp"
#include "classfile/placeholders.hpp"
#include "classfile/symbolTable.hpp"
@@ -45,14 +51,8 @@
#include "logging/log.hpp"
#include "logging/logMessage.hpp"
#include "logging/logStream.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/cppVtables.hpp"
-#include "memory/dumpAllocStats.hpp"
-#include "memory/filemap.hpp"
-#include "memory/heapShared.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspaceClosure.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.inline.hpp"
@@ -101,8 +101,8 @@ bool MetaspaceShared::_use_full_module_graph = true;
//
// bm - bitmap for relocating the above 7 regions.
//
-// The rw, and ro regions are linearly allocated, in the order of rw->ro.
-// These regions are aligned with MetaspaceShared::reserved_space_alignment().
+// The rw and ro regions are linearly allocated, in the order of rw->ro.
+// These regions are aligned with MetaspaceShared::core_region_alignment().
//
// These 2 regions are populated in the following steps:
// [0] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
@@ -363,6 +363,7 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
CDS_JAVA_HEAP_ONLY(ClassLoaderDataShared::serialize(soc);)
+ LambdaFormInvokers::serialize(soc);
soc->do_tag(666);
}
@@ -391,7 +392,9 @@ static void rewrite_nofast_bytecode(const methodHandle& method) {
void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik) {
for (int i = 0; i < ik->methods()->length(); i++) {
methodHandle m(thread, ik->methods()->at(i));
- rewrite_nofast_bytecode(m);
+ if (!is_old_class(ik)) {
+ rewrite_nofast_bytecode(m);
+ }
Fingerprinter fp(m);
// The side effect of this call sets method's fingerprint field.
fp.fingerprint();
@@ -460,6 +463,8 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
SystemDictionaryShared::write_to_archive();
+ // Write lambform lines into archive
+ LambdaFormInvokers::dump_static_archive_invokers();
// Write the other data to the output array.
DumpRegion* ro_region = ArchiveBuilder::current()->ro_region();
char* start = ro_region->top();
@@ -487,12 +492,6 @@ void VM_PopulateDumpSharedSpace::doit() {
FileMapInfo::check_nonempty_dir_in_shared_path_table();
NOT_PRODUCT(SystemDictionary::verify();)
- // The following guarantee is meant to ensure that no loader constraints
- // exist yet, since the constraints table is not shared. This becomes
- // more important now that we don't re-initialize vtables/itables for
- // shared classes at runtime, where constraints were previously created.
- guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
- "loader constraints are not saved");
// At this point, many classes have been loaded.
// Gather systemDictionary classes in a global array and do everything to
@@ -580,9 +579,31 @@ class CollectCLDClosure : public CLDClosure {
ClassLoaderData* cld_at(int index) { return _loaded_cld.at(index); }
};
+// Check if a class or its super class/interface is old.
+bool MetaspaceShared::is_old_class(InstanceKlass* ik) {
+ if (ik == NULL) {
+ return false;
+ }
+ if (ik->major_version() < 50 /*JAVA_6_VERSION*/) {
+ return true;
+ }
+ if (is_old_class(ik->java_super())) {
+ return true;
+ }
+ Array* interfaces = ik->local_interfaces();
+ int len = interfaces->length();
+ for (int i = 0; i < len; i++) {
+ if (is_old_class(interfaces->at(i))) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool MetaspaceShared::linking_required(InstanceKlass* ik) {
+ // For static CDS dump, do not link old classes.
// For dynamic CDS dump, only link classes loaded by the builtin class loaders.
- return DumpSharedSpaces ? true : !ik->is_shared_unregistered_class();
+ return DumpSharedSpaces ? !MetaspaceShared::is_old_class(ik) : !ik->is_shared_unregistered_class();
}
bool MetaspaceShared::link_class_for_cds(InstanceKlass* ik, TRAPS) {
@@ -738,7 +759,7 @@ void MetaspaceShared::preload_and_dump_impl(TRAPS) {
// were not explicitly specified in the classlist. E.g., if an interface implemented by class K
// fails verification, all other interfaces that were not specified in the classlist but
// are implemented by K are not verified.
- link_and_cleanup_shared_classes(CATCH);
+ link_and_cleanup_shared_classes(CHECK);
log_info(cds)("Rewriting and linking classes: done");
#if INCLUDE_CDS_JAVA_HEAP
@@ -762,7 +783,7 @@ bool MetaspaceShared::try_link_class(Thread* current, InstanceKlass* ik) {
ExceptionMark em(current);
Thread* THREAD = current; // For exception macros.
Arguments::assert_is_dumping_archive();
- if (ik->is_loaded() && !ik->is_linked() &&
+ if (ik->is_loaded() && !ik->is_linked() && !MetaspaceShared::is_old_class(ik) &&
!SystemDictionaryShared::has_class_failed_verification(ik)) {
bool saved = BytecodeVerificationLocal;
if (ik->is_shared_unregistered_class() && ik->class_loader() == NULL) {
@@ -808,7 +829,9 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray* k
Klass* k = klasses->at(i);
if (k->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(k);
- ik->constants()->add_dumped_interned_strings();
+ if (ik->is_linked()) {
+ ik->constants()->add_dumped_interned_strings();
+ }
}
}
if (_extra_interned_strings != NULL) {
@@ -907,12 +930,12 @@ void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
char* cds_end = dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end();
set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end);
_relocation_delta = static_mapinfo->relocation_delta();
+ _requested_base_address = static_mapinfo->requested_base_address();
if (dynamic_mapped) {
FileMapInfo::set_shared_path_table(dynamic_mapinfo);
} else {
FileMapInfo::set_shared_path_table(static_mapinfo);
}
- _requested_base_address = static_mapinfo->requested_base_address();
} else {
set_shared_metaspace_range(NULL, NULL, NULL);
UseSharedSpaces = false;
@@ -1202,7 +1225,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
// Get the simple case out of the way first:
// no compressed class space, simple allocation.
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
- false /* bool large */, (char*)base_address);
+ os::vm_page_size(), (char*)base_address);
if (archive_space_rs.is_reserved()) {
assert(base_address == NULL ||
(address)archive_space_rs.base() == base_address, "Sanity");
@@ -1251,9 +1274,9 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
// via sequential file IO.
address ccs_base = base_address + archive_space_size + gap_size;
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
- false /* large */, (char*)base_address);
+ os::vm_page_size(), (char*)base_address);
class_space_rs = ReservedSpace(class_space_size, class_space_alignment,
- false /* large */, (char*)ccs_base);
+ os::vm_page_size(), (char*)ccs_base);
}
if (!archive_space_rs.is_reserved() || !class_space_rs.is_reserved()) {
release_reserved_spaces(total_space_rs, archive_space_rs, class_space_rs);
@@ -1262,7 +1285,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
} else {
if (use_archive_base_addr && base_address != nullptr) {
total_space_rs = ReservedSpace(total_range_size, archive_space_alignment,
- false /* bool large */, (char*) base_address);
+ os::vm_page_size(), (char*) base_address);
} else {
// Reserve at any address, but leave it up to the platform to choose a good one.
total_space_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size);
@@ -1426,6 +1449,12 @@ void MetaspaceShared::initialize_shared_spaces() {
dynamic_mapinfo->unmap_region(MetaspaceShared::bm);
}
+ // Set up LambdaFormInvokers::_lambdaform_lines for dynamic dump
+ if (DynamicDumpSharedSpaces) {
+ // Read stored LF format lines stored in static archive
+ LambdaFormInvokers::read_static_archive_invokers();
+ }
+
if (PrintSharedArchiveAndExit) {
// Print archive names
if (dynamic_mapinfo != nullptr) {
diff --git a/src/hotspot/share/memory/metaspaceShared.hpp b/src/hotspot/share/cds/metaspaceShared.hpp
similarity index 97%
rename from src/hotspot/share/memory/metaspaceShared.hpp
rename to src/hotspot/share/cds/metaspaceShared.hpp
index cc6386a0678..46b0a4c8416 100644
--- a/src/hotspot/share/memory/metaspaceShared.hpp
+++ b/src/hotspot/share/cds/metaspaceShared.hpp
@@ -22,8 +22,8 @@
*
*/
-#ifndef SHARE_MEMORY_METASPACESHARED_HPP
-#define SHARE_MEMORY_METASPACESHARED_HPP
+#ifndef SHARE_CDS_METASPACESHARED_HPP
+#define SHARE_CDS_METASPACESHARED_HPP
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
@@ -137,9 +137,10 @@ class MetaspaceShared : AllStatic {
static void link_and_cleanup_shared_classes(TRAPS) NOT_CDS_RETURN;
static bool link_class_for_cds(InstanceKlass* ik, TRAPS) NOT_CDS_RETURN_(false);
static bool linking_required(InstanceKlass* ik) NOT_CDS_RETURN_(false);
+ static bool is_old_class(InstanceKlass* ik) NOT_CDS_RETURN_(false);
#if INCLUDE_CDS
- // Alignment for the 3 core CDS regions (MC/RW/RO) only.
+ // Alignment for the 2 core CDS regions (RW/RO) only.
// (Heap region alignments are decided by GC).
static size_t core_region_alignment();
static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik);
@@ -202,4 +203,4 @@ class MetaspaceShared : AllStatic {
static MapArchiveResult map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs);
static void unmap_archive(FileMapInfo* mapinfo);
};
-#endif // SHARE_MEMORY_METASPACESHARED_HPP
+#endif // SHARE_CDS_METASPACESHARED_HPP
diff --git a/src/hotspot/share/ci/ciMethod.cpp b/src/hotspot/share/ci/ciMethod.cpp
index 1644e63e649..c931df67f0a 100644
--- a/src/hotspot/share/ci/ciMethod.cpp
+++ b/src/hotspot/share/ci/ciMethod.cpp
@@ -155,6 +155,8 @@ ciMethod::ciMethod(const methodHandle& h_m, ciInstanceKlass* holder) :
ciReplay::initialize(this);
}
#endif
+
+ CompilerOracle::tag_blackhole_if_possible(h_m);
}
@@ -302,7 +304,9 @@ bool ciMethod::has_balanced_monitors() {
ExceptionMark em(THREAD);
ResourceMark rm(THREAD);
GeneratePairingInfo gpi(method);
- gpi.compute_map(CATCH);
+ if (!gpi.compute_map(THREAD)) {
+ fatal("Unrecoverable verification or out-of-memory error");
+ }
if (!gpi.monitor_safe()) {
return false;
}
@@ -1259,6 +1263,7 @@ bool ciMethod::is_getter () const { FETCH_FLAG_FROM_VM(is_getter);
bool ciMethod::is_setter () const { FETCH_FLAG_FROM_VM(is_setter); }
bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor); }
bool ciMethod::is_object_constructor_or_class_initializer() const { FETCH_FLAG_FROM_VM(is_object_constructor_or_class_initializer); }
+bool ciMethod::is_empty () const { FETCH_FLAG_FROM_VM(is_empty_method); }
bool ciMethod::is_boxing_method() const {
if (intrinsic_id() != vmIntrinsics::_none && holder()->is_box_klass()) {
diff --git a/src/hotspot/share/ci/ciMethod.hpp b/src/hotspot/share/ci/ciMethod.hpp
index 25c6e6e21e8..7eaaa443347 100644
--- a/src/hotspot/share/ci/ciMethod.hpp
+++ b/src/hotspot/share/ci/ciMethod.hpp
@@ -205,6 +205,15 @@ class ciMethod : public ciMetadata {
bool intrinsic_candidate() const { return get_Method()->intrinsic_candidate(); }
bool is_class_initializer() const { return get_Method()->is_class_initializer(); }
+ bool check_intrinsic_candidate() const {
+ if (intrinsic_id() == vmIntrinsics::_blackhole) {
+ // This is the intrinsic without an associated method, so no intrinsic_candidate
+ // flag is set. The intrinsic is still correct.
+ return true;
+ }
+ return (CheckIntrinsics ? intrinsic_candidate() : true);
+ }
+
int highest_osr_comp_level();
Bytecodes::Code java_code_at_bci(int bci) {
@@ -350,6 +359,7 @@ class ciMethod : public ciMetadata {
bool is_getter () const;
bool is_setter () const;
bool is_accessor () const;
+ bool is_empty () const;
bool can_be_statically_bound() const { return _can_be_statically_bound; }
bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
bool is_boxing_method() const;
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index 883c3d656d4..bcd89d32e7d 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "aot/aotLoader.hpp"
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
@@ -5566,6 +5565,11 @@ static void check_methods_for_intrinsics(const InstanceKlass* ik,
// is defined for it.
continue;
}
+ if (vmIntrinsics::_blackhole == id) {
+ // The _blackhole intrinsic is a special marker. No explicit method
+ // is defined for it.
+ continue;
+ }
if (vmIntrinsics::class_for(id) == klass_id) {
// Check if the current class contains a method with the same
@@ -5615,25 +5619,6 @@ InstanceKlass* ClassFileParser::create_instance_klass(bool changed_by_loadhook,
assert(_klass == ik, "invariant");
-
- if (ik->should_store_fingerprint()) {
- ik->store_fingerprint(_stream->compute_fingerprint());
- }
-
- ik->set_has_passed_fingerprint_check(false);
- if (UseAOT && ik->supers_have_passed_fingerprint_checks()) {
- uint64_t aot_fp = AOTLoader::get_saved_fingerprint(ik);
- uint64_t fp = ik->has_stored_fingerprint() ? ik->get_stored_fingerprint() : _stream->compute_fingerprint();
- if (aot_fp != 0 && aot_fp == fp) {
- // This class matches with a class saved in an AOT library
- ik->set_has_passed_fingerprint_check(true);
- } else {
- ResourceMark rm;
- log_info(class, fingerprint)("%s : expected = " PTR64_FORMAT " actual = " PTR64_FORMAT,
- ik->external_name(), aot_fp, _stream->compute_fingerprint());
- }
- }
-
if (ik->is_inline_klass()) {
InlineKlass* vk = InlineKlass::cast(ik);
oop val = ik->allocate_instance(CHECK_NULL);
@@ -6298,18 +6283,6 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
_minor_version = stream->get_u2_fast();
_major_version = stream->get_u2_fast();
- if (DumpSharedSpaces && _major_version < JAVA_6_VERSION) {
- ResourceMark rm;
- warning("Pre JDK 6 class not supported by CDS: %u.%u %s",
- _major_version, _minor_version, _class_name->as_C_string());
- Exceptions::fthrow(
- THREAD_AND_LOCATION,
- vmSymbols::java_lang_UnsupportedClassVersionError(),
- "Unsupported major.minor version for dump time %u.%u",
- _major_version,
- _minor_version);
- }
-
// Check version numbers - we check this even with verifier off
verify_class_version(_major_version, _minor_version, _class_name, CHECK);
diff --git a/src/hotspot/share/classfile/classFileStream.cpp b/src/hotspot/share/classfile/classFileStream.cpp
index 2e87763fec8..b65b827c583 100644
--- a/src/hotspot/share/classfile/classFileStream.cpp
+++ b/src/hotspot/share/classfile/classFileStream.cpp
@@ -75,12 +75,3 @@ const ClassFileStream* ClassFileStream::clone() const {
need_verify(),
from_boot_loader_modules_image());
}
-
-uint64_t ClassFileStream::compute_fingerprint() const {
- int classfile_size = length();
- int classfile_crc = ClassLoader::crc32(0, (const char*)buffer(), length());
- uint64_t fingerprint = (uint64_t(classfile_size) << 32) | uint64_t(uint32_t(classfile_crc));
- assert(fingerprint != 0, "must not be zero");
-
- return fingerprint;
-}
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index cf84da623dd..21d89439169 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "jvm.h"
#include "jimage.hpp"
+#include "cds/filemap.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.inline.hpp"
#include "classfile/classLoaderData.inline.hpp"
@@ -47,7 +48,6 @@
#include "logging/logStream.hpp"
#include "logging/logTag.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/filemap.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@@ -65,7 +65,7 @@
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/perfData.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
@@ -105,10 +105,11 @@ int ClassLoader::_libzip_loaded = 0;
static JImageOpen_t JImageOpen = NULL;
static JImageClose_t JImageClose = NULL;
-static JImagePackageToModule_t JImagePackageToModule = NULL;
static JImageFindResource_t JImageFindResource = NULL;
static JImageGetResource_t JImageGetResource = NULL;
-static JImageResourceIterator_t JImageResourceIterator = NULL;
+
+// JimageFile pointer, or null if exploded JDK build.
+static JImageFile* JImage_file = NULL;
// Globals
@@ -343,16 +344,26 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
DEBUG_ONLY(ClassPathImageEntry* ClassPathImageEntry::_singleton = NULL;)
+JImageFile* ClassPathImageEntry::jimage() const {
+ return JImage_file;
+}
+
+JImageFile* ClassPathImageEntry::jimage_non_null() const {
+ assert(ClassLoader::has_jrt_entry(), "must be");
+ assert(jimage() != NULL, "should have been opened by ClassLoader::lookup_vm_options "
+ "and remained throughout normal JVM lifetime");
+ return jimage();
+}
+
void ClassPathImageEntry::close_jimage() {
- if (_jimage != NULL) {
- (*JImageClose)(_jimage);
- _jimage = NULL;
+ if (jimage() != NULL) {
+ (*JImageClose)(jimage());
+ JImage_file = NULL;
}
}
ClassPathImageEntry::ClassPathImageEntry(JImageFile* jimage, const char* name) :
- ClassPathEntry(),
- _jimage(jimage) {
+ ClassPathEntry() {
guarantee(jimage != NULL, "jimage file is null");
guarantee(name != NULL, "jimage file name is null");
assert(_singleton == NULL, "VM supports only one jimage");
@@ -361,18 +372,6 @@ ClassPathImageEntry::ClassPathImageEntry(JImageFile* jimage, const char* name) :
_name = copy_path(name);
}
-ClassPathImageEntry::~ClassPathImageEntry() {
- assert(_singleton == this, "must be");
- DEBUG_ONLY(_singleton = NULL);
-
- FREE_C_HEAP_ARRAY(const char, _name);
-
- if (_jimage != NULL) {
- (*JImageClose)(_jimage);
- _jimage = NULL;
- }
-}
-
ClassFileStream* ClassPathImageEntry::open_stream(Thread* current, const char* name) {
return open_stream_for_loader(current, name, ClassLoaderData::the_null_class_loader_data());
}
@@ -386,7 +385,7 @@ ClassFileStream* ClassPathImageEntry::open_stream(Thread* current, const char* n
//
ClassFileStream* ClassPathImageEntry::open_stream_for_loader(Thread* current, const char* name, ClassLoaderData* loader_data) {
jlong size;
- JImageLocationRef location = (*JImageFindResource)(_jimage, "", get_jimage_version_string(), name, &size);
+ JImageLocationRef location = (*JImageFindResource)(jimage_non_null(), "", get_jimage_version_string(), name, &size);
if (location == 0) {
TempNewSymbol class_name = SymbolTable::new_symbol(name);
@@ -394,7 +393,7 @@ ClassFileStream* ClassPathImageEntry::open_stream_for_loader(Thread* current, co
if (pkg_name != NULL) {
if (!Universe::is_module_initialized()) {
- location = (*JImageFindResource)(_jimage, JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
+ location = (*JImageFindResource)(jimage_non_null(), JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
} else {
PackageEntry* package_entry = ClassLoader::get_package_entry(pkg_name, loader_data);
if (package_entry != NULL) {
@@ -405,7 +404,7 @@ ClassFileStream* ClassPathImageEntry::open_stream_for_loader(Thread* current, co
assert(module->is_named(), "Boot classLoader package is in unnamed module");
const char* module_name = module->name()->as_C_string();
if (module_name != NULL) {
- location = (*JImageFindResource)(_jimage, module_name, get_jimage_version_string(), name, &size);
+ location = (*JImageFindResource)(jimage_non_null(), module_name, get_jimage_version_string(), name, &size);
}
}
}
@@ -416,7 +415,7 @@ ClassFileStream* ClassPathImageEntry::open_stream_for_loader(Thread* current, co
ClassLoader::perf_sys_classfile_bytes_read()->inc(size);
}
char* data = NEW_RESOURCE_ARRAY(char, size);
- (*JImageGetResource)(_jimage, location, data, size);
+ (*JImageGetResource)(jimage_non_null(), location, data, size);
// Resource allocated
assert(this == (ClassPathImageEntry*)ClassLoader::get_jrt_entry(), "must be");
return new ClassFileStream((u1*)data,
@@ -649,14 +648,17 @@ void ClassLoader::setup_bootstrap_search_path_impl(Thread* current, const char *
struct stat st;
if (os::stat(path, &st) == 0) {
// Directory found
- ClassPathEntry* new_entry = create_class_path_entry(current, path, &st, false, false);
+ if (JImage_file != NULL) {
+ assert(Arguments::has_jimage(), "sanity check");
+ const char* canonical_path = get_canonical_path(path, current);
+ assert(canonical_path != NULL, "canonical_path issue");
- // Check for a jimage
- if (Arguments::has_jimage()) {
- assert(_jrt_entry == NULL, "should not setup bootstrap class search path twice");
- _jrt_entry = new_entry;
- assert(new_entry != NULL && new_entry->is_modules_image(), "No java runtime image present");
+ _jrt_entry = new ClassPathImageEntry(JImage_file, canonical_path);
+ assert(_jrt_entry != NULL && _jrt_entry->is_modules_image(), "No java runtime image present");
assert(_jrt_entry->jimage() != NULL, "No java runtime image");
+ } else {
+ // It's an exploded build.
+ ClassPathEntry* new_entry = create_class_path_entry(current, path, &st, false, false);
}
} else {
// If path does not exist, exit
@@ -724,24 +726,18 @@ ClassPathEntry* ClassLoader::create_class_path_entry(Thread* current,
ClassPathEntry* new_entry = NULL;
if ((st->st_mode & S_IFMT) == S_IFREG) {
ResourceMark rm(thread);
- // Regular file, should be a zip or jimage file
+ // Regular file, should be a zip file
// Canonicalized filename
const char* canonical_path = get_canonical_path(path, thread);
if (canonical_path == NULL) {
return NULL;
}
- jint error;
- JImageFile* jimage =(*JImageOpen)(canonical_path, &error);
- if (jimage != NULL) {
- new_entry = new ClassPathImageEntry(jimage, canonical_path);
+ char* error_msg = NULL;
+ jzfile* zip = open_zip_file(canonical_path, &error_msg, thread);
+ if (zip != NULL && error_msg == NULL) {
+ new_entry = new ClassPathZipEntry(zip, path, is_boot_append, from_class_path_attr);
} else {
- char* error_msg = NULL;
- jzfile* zip = open_zip_file(canonical_path, &error_msg, thread);
- if (zip != NULL && error_msg == NULL) {
- new_entry = new ClassPathZipEntry(zip, path, is_boot_append, from_class_path_attr);
- } else {
- return NULL;
- }
+ return NULL;
}
log_info(class, path)("opened: %s", path);
log_info(class, load)("opened: %s", path);
@@ -968,10 +964,8 @@ void ClassLoader::load_jimage_library() {
JImageOpen = CAST_TO_FN_PTR(JImageOpen_t, dll_lookup(handle, "JIMAGE_Open", path));
JImageClose = CAST_TO_FN_PTR(JImageClose_t, dll_lookup(handle, "JIMAGE_Close", path));
- JImagePackageToModule = CAST_TO_FN_PTR(JImagePackageToModule_t, dll_lookup(handle, "JIMAGE_PackageToModule", path));
JImageFindResource = CAST_TO_FN_PTR(JImageFindResource_t, dll_lookup(handle, "JIMAGE_FindResource", path));
JImageGetResource = CAST_TO_FN_PTR(JImageGetResource_t, dll_lookup(handle, "JIMAGE_GetResource", path));
- JImageResourceIterator = CAST_TO_FN_PTR(JImageResourceIterator_t, dll_lookup(handle, "JIMAGE_ResourceIterator", path));
}
int ClassLoader::crc32(int crc, const char* buf, int len) {
@@ -1429,15 +1423,13 @@ char* ClassLoader::lookup_vm_options() {
load_jimage_library();
jio_snprintf(modules_path, JVM_MAXPATHLEN, "%s%slib%smodules", Arguments::get_java_home(), fileSep, fileSep);
- JImageFile* jimage =(*JImageOpen)(modules_path, &error);
- if (jimage == NULL) {
+ JImage_file =(*JImageOpen)(modules_path, &error);
+ if (JImage_file == NULL) {
return NULL;
}
const char *jimage_version = get_jimage_version_string();
- char *options = lookup_vm_resource(jimage, jimage_version, "jdk/internal/vm/options");
-
- (*JImageClose)(jimage);
+ char *options = lookup_vm_resource(JImage_file, jimage_version, "jdk/internal/vm/options");
return options;
}
diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp
index b729aeb8048..9872e9c5973 100644
--- a/src/hotspot/share/classfile/classLoader.hpp
+++ b/src/hotspot/share/classfile/classLoader.hpp
@@ -117,17 +117,16 @@ class ClassPathZipEntry: public ClassPathEntry {
// For java image files
class ClassPathImageEntry: public ClassPathEntry {
private:
- JImageFile* _jimage;
const char* _name;
DEBUG_ONLY(static ClassPathImageEntry* _singleton;)
public:
bool is_modules_image() const;
- bool is_open() const { return _jimage != NULL; }
const char* name() const { return _name == NULL ? "" : _name; }
- JImageFile* jimage() const { return _jimage; }
+ JImageFile* jimage() const;
+ JImageFile* jimage_non_null() const;
void close_jimage();
ClassPathImageEntry(JImageFile* jimage, const char* name);
- virtual ~ClassPathImageEntry();
+ virtual ~ClassPathImageEntry() { ShouldNotReachHere(); }
ClassFileStream* open_stream(Thread* current, const char* name);
ClassFileStream* open_stream_for_loader(Thread* current, const char* name, ClassLoaderData* loader_data);
};
diff --git a/src/hotspot/share/classfile/classLoaderDataShared.cpp b/src/hotspot/share/classfile/classLoaderDataShared.cpp
index f9840e566dd..237caba2ee3 100644
--- a/src/hotspot/share/classfile/classLoaderDataShared.cpp
+++ b/src/hotspot/share/classfile/classLoaderDataShared.cpp
@@ -23,13 +23,13 @@
*/
#include "precompiled.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderDataShared.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionary.hpp"
#include "logging/log.hpp"
-#include "memory/metaspaceShared.hpp"
#include "runtime/handles.inline.hpp"
#if INCLUDE_CDS_JAVA_HEAP
diff --git a/src/hotspot/share/classfile/classLoaderExt.cpp b/src/hotspot/share/classfile/classLoaderExt.cpp
index c281e254de6..b1c8e193f42 100644
--- a/src/hotspot/share/classfile/classLoaderExt.cpp
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "cds/filemap.hpp"
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.inline.hpp"
@@ -37,7 +38,6 @@
#include "gc/shared/collectedHeap.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/filemap.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
diff --git a/src/hotspot/share/classfile/compactHashtable.cpp b/src/hotspot/share/classfile/compactHashtable.cpp
index 808de7c7fb2..dc21ee4e7f3 100644
--- a/src/hotspot/share/classfile/compactHashtable.cpp
+++ b/src/hotspot/share/classfile/compactHashtable.cpp
@@ -24,11 +24,11 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "cds/archiveBuilder.hpp"
+#include "cds/heapShared.inline.hpp"
#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.hpp"
#include "logging/logMessage.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/heapShared.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
diff --git a/src/hotspot/share/classfile/dictionary.cpp b/src/hotspot/share/classfile/dictionary.cpp
index 3a516ad2096..9905eb4fc70 100644
--- a/src/hotspot/share/classfile/dictionary.cpp
+++ b/src/hotspot/share/classfile/dictionary.cpp
@@ -207,6 +207,7 @@ void DictionaryEntry::add_protection_domain(ClassLoaderData* loader_data, Handle
protection_domain->print_value_on(&ls);
ls.print(" ");
print_count(&ls);
+ ls.cr();
}
}
@@ -580,7 +581,7 @@ void DictionaryEntry::print_count(outputStream *st) {
current = current->next_acquire()) {
count++;
}
- st->print_cr("pd set count = #%d", count);
+ st->print("pd set count = #%d", count);
}
// ----------------------------------------------------------------------------
@@ -631,9 +632,8 @@ void Dictionary::verify() {
ClassLoaderData* cld = loader_data();
// class loader must be present; a null class loader is the
// boostrap loader
- guarantee(cld != NULL ||
- cld->class_loader() == NULL ||
- cld->class_loader()->is_instance(),
+ guarantee(cld != NULL &&
+ (cld->the_null_class_loader_data() || cld->class_loader()->is_instance()),
"checking type of class_loader");
ResourceMark rm;
diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp
index d20490fdc56..f7d7c6a7b5b 100644
--- a/src/hotspot/share/classfile/javaClasses.cpp
+++ b/src/hotspot/share/classfile/javaClasses.cpp
@@ -24,6 +24,9 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "cds/archiveBuilder.hpp"
+#include "cds/heapShared.inline.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.inline.hpp"
@@ -41,9 +44,6 @@
#include "interpreter/linkResolver.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/heapShared.inline.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp
index e75dc88baa4..c7e1d9c8ee2 100644
--- a/src/hotspot/share/classfile/javaClasses.hpp
+++ b/src/hotspot/share/classfile/javaClasses.hpp
@@ -382,8 +382,6 @@ class java_lang_Thread : AllStatic {
public:
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
- // Instance creation
- static oop create();
// Returns the JavaThread associated with the thread obj
static JavaThread* thread(oop java_thread);
// Set JavaThread for instance
@@ -520,7 +518,6 @@ class java_lang_Throwable: AllStatic {
static void set_message(oop throwable, oop value);
static Symbol* detail_message(oop throwable);
static void print_stack_element(outputStream *st, Method* method, int bci);
- static void print_stack_usage(Handle stream);
static void compute_offsets();
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
@@ -705,8 +702,6 @@ class java_lang_reflect_Field : public java_lang_reflect_AccessibleObject {
static void set_signature(oop constructor, oop value);
static void set_annotations(oop constructor, oop value);
- static void set_parameter_annotations(oop method, oop value);
- static void set_annotation_default(oop method, oop value);
// Debugging
friend class JavaClasses;
@@ -887,8 +882,6 @@ class java_lang_ref_Reference: AllStatic {
static inline void set_discovered(oop ref, oop value);
static inline void set_discovered_raw(oop ref, oop value);
static inline HeapWord* discovered_addr_raw(oop ref);
- static inline oop queue(oop ref);
- static inline void set_queue(oop ref, oop value);
static bool is_referent_field(oop obj, ptrdiff_t offset);
static inline bool is_final(oop ref);
static inline bool is_phantom(oop ref);
@@ -923,8 +916,6 @@ class java_lang_ref_SoftReference: public java_lang_ref_Reference {
// Interface to java.lang.invoke.MethodHandle objects
-class MethodHandleEntry;
-
class java_lang_invoke_MethodHandle: AllStatic {
friend class JavaClasses;
@@ -997,7 +988,6 @@ class java_lang_invoke_LambdaForm: AllStatic {
// Accessors
static oop vmentry(oop lform);
- static void set_vmentry(oop lform, oop invoker);
// Testers
static bool is_subclass(Klass* klass) {
diff --git a/src/hotspot/share/classfile/klassFactory.cpp b/src/hotspot/share/classfile/klassFactory.cpp
index 6b053549eb4..d64ecc5bf7e 100644
--- a/src/hotspot/share/classfile/klassFactory.cpp
+++ b/src/hotspot/share/classfile/klassFactory.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "cds/filemap.hpp"
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
@@ -30,7 +31,6 @@
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoadInfo.hpp"
#include "classfile/klassFactory.hpp"
-#include "memory/filemap.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvmtiEnvBase.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
diff --git a/src/hotspot/share/classfile/lambdaFormInvokers.cpp b/src/hotspot/share/classfile/lambdaFormInvokers.cpp
deleted file mode 100644
index 1bfca98b5ef..00000000000
--- a/src/hotspot/share/classfile/lambdaFormInvokers.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoadInfo.hpp"
-#include "classfile/classFileStream.hpp"
-#include "classfile/javaClasses.inline.hpp"
-#include "classfile/klassFactory.hpp"
-#include "classfile/lambdaFormInvokers.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "classfile/systemDictionaryShared.hpp"
-#include "classfile/vmClasses.hpp"
-#include "classfile/vmSymbols.hpp"
-#include "logging/log.hpp"
-#include "memory/oopFactory.hpp"
-#include "memory/metaspaceShared.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/instanceKlass.hpp"
-#include "oops/klass.hpp"
-#include "oops/objArrayKlass.hpp"
-#include "oops/objArrayOop.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/typeArrayOop.inline.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/javaCalls.hpp"
-
-GrowableArray* LambdaFormInvokers::_lambdaform_lines = NULL;
-
-void LambdaFormInvokers::append(char* line) {
- if (_lambdaform_lines == NULL) {
- _lambdaform_lines = new GrowableArray(100);
- }
- _lambdaform_lines->append(line);
-}
-
-void LambdaFormInvokers::regenerate_holder_classes(TRAPS) {
- assert(_lambdaform_lines != NULL, "Bad List");
- ResourceMark rm(THREAD);
-
- Symbol* cds_name = vmSymbols::jdk_internal_misc_CDS();
- Klass* cds_klass = SystemDictionary::resolve_or_null(cds_name, THREAD);
- guarantee(cds_klass != NULL, "jdk/internal/misc/CDS must exist!");
-
- int len = _lambdaform_lines->length();
- objArrayHandle list_lines = oopFactory::new_objArray_handle(vmClasses::String_klass(), len, CHECK);
- for (int i = 0; i < len; i++) {
- Handle h_line = java_lang_String::create_from_str(_lambdaform_lines->at(i), CHECK);
- list_lines->obj_at_put(i, h_line());
- }
-
- //
- // Object[] CDS.generateLambdaFormHolderClasses(String[] lines)
- // the returned Object[] layout:
- // name, byte[], name, byte[] ....
- Symbol* method = vmSymbols::generateLambdaFormHolderClasses();
- Symbol* signrs = vmSymbols::generateLambdaFormHolderClasses_signature();
-
- JavaValue result(T_OBJECT);
- JavaCalls::call_static(&result, cds_klass, method, signrs, list_lines, THREAD);
-
- if (HAS_PENDING_EXCEPTION) {
- log_info(cds)("%s: %s", THREAD->pending_exception()->klass()->external_name(),
- java_lang_String::as_utf8_string(java_lang_Throwable::message(THREAD->pending_exception())));
- CLEAR_PENDING_EXCEPTION;
- return;
- }
-
- objArrayHandle h_array(THREAD, (objArrayOop)result.get_oop());
- int sz = h_array->length();
- assert(sz % 2 == 0 && sz >= 2, "Must be even size of length");
- for (int i = 0; i < sz; i+= 2) {
- Handle h_name(THREAD, h_array->obj_at(i));
- typeArrayHandle h_bytes(THREAD, (typeArrayOop)h_array->obj_at(i+1));
- assert(h_name != NULL, "Class name is NULL");
- assert(h_bytes != NULL, "Class bytes is NULL");
-
- char *class_name = java_lang_String::as_utf8_string(h_name());
- int len = h_bytes->length();
- // make a copy of class bytes so GC will not affect us.
- char *buf = resource_allocate_bytes(THREAD, len);
- memcpy(buf, (char*)h_bytes->byte_at_addr(0), len);
- ClassFileStream st((u1*)buf, len, NULL, ClassFileStream::verify);
-
- reload_class(class_name, st, THREAD);
- // free buf
- resource_free_bytes(buf, len);
-
- if (HAS_PENDING_EXCEPTION) {
- log_info(cds)("Exception happened: %s", PENDING_EXCEPTION->klass()->name()->as_C_string());
- log_info(cds)("Could not create InstanceKlass for class %s", class_name);
- CLEAR_PENDING_EXCEPTION;
- return;
- }
- }
-}
-
-// class_handle - the class name, bytes_handle - the class bytes
-void LambdaFormInvokers::reload_class(char* name, ClassFileStream& st, TRAPS) {
- Symbol* class_name = SymbolTable::new_symbol((const char*)name);
- // the class must exist
- Klass* klass = SystemDictionary::resolve_or_null(class_name, THREAD);
- if (klass == NULL) {
- log_info(cds)("Class %s not present, skip", name);
- return;
- }
- assert(klass->is_instance_klass(), "Should be");
-
- ClassLoaderData* cld = ClassLoaderData::the_null_class_loader_data();
- Handle protection_domain;
- ClassLoadInfo cl_info(protection_domain);
-
- InstanceKlass* result = KlassFactory::create_from_stream(&st,
- class_name,
- cld,
- cl_info,
- CHECK);
-
- {
- MutexLocker mu_r(THREAD, Compile_lock); // add_to_hierarchy asserts this.
- SystemDictionary::add_to_hierarchy(result);
- }
- // new class not linked yet.
- MetaspaceShared::try_link_class(THREAD, result);
- assert(!HAS_PENDING_EXCEPTION, "Invariant");
-
- // exclude the existing class from dump
- SystemDictionaryShared::set_excluded(InstanceKlass::cast(klass));
- log_info(cds, lambda)("Replaced class %s, old: %p new: %p", name, klass, result);
-}
diff --git a/src/hotspot/share/classfile/lambdaFormInvokers.hpp b/src/hotspot/share/classfile/lambdaFormInvokers.hpp
deleted file mode 100644
index d21fb8e64ca..00000000000
--- a/src/hotspot/share/classfile/lambdaFormInvokers.hpp
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_MEMORY_LAMBDAFORMINVOKERS_HPP
-#define SHARE_MEMORY_LAMBDAFORMINVOKERS_HPP
-#include "memory/allStatic.hpp"
-#include "runtime/handles.hpp"
-
-template
-class GrowableArray;
-class ClassFileStream;
-
-class LambdaFormInvokers : public AllStatic {
- private:
- static GrowableArray* _lambdaform_lines;
- static void reload_class(char* name, ClassFileStream& st, TRAPS);
- public:
-
- static void append(char* line);
- static void regenerate_holder_classes(TRAPS);
- static GrowableArray* lambdaform_lines() {
- return _lambdaform_lines;
- }
-};
-#endif // SHARE_MEMORY_LAMBDAFORMINVOKERS_HPP
diff --git a/src/hotspot/share/classfile/loaderConstraints.cpp b/src/hotspot/share/classfile/loaderConstraints.cpp
index 9f981d00bc6..14951b2dc91 100644
--- a/src/hotspot/share/classfile/loaderConstraints.cpp
+++ b/src/hotspot/share/classfile/loaderConstraints.cpp
@@ -191,6 +191,7 @@ void log_ldr_constraint_msg(Symbol* class_name, const char* reason,
bool LoaderConstraintTable::add_entry(Symbol* class_name,
InstanceKlass* klass1, Handle class_loader1,
InstanceKlass* klass2, Handle class_loader2) {
+
LogTarget(Info, class, loader, constraints) lt;
if (klass1 != NULL && klass2 != NULL) {
if (klass1 == klass2) {
@@ -244,9 +245,8 @@ bool LoaderConstraintTable::add_entry(Symbol* class_name,
p->set_loaders(NEW_C_HEAP_ARRAY(ClassLoaderData*, 2, mtClass));
p->set_loader(0, class_loader1());
p->set_loader(1, class_loader2());
- p->set_klass(klass);
- p->set_next(bucket(index));
- set_entry(index, p);
+ Hashtable::add_entry(index, p);
+
if (lt.is_enabled()) {
ResourceMark rm;
lt.print("adding new constraint for name: %s, loader[0]: %s,"
@@ -476,13 +476,15 @@ void LoaderConstraintTable::print_on(outputStream* st) const {
probe != NULL;
probe = probe->next()) {
st->print("%4d: ", cindex);
- probe->name()->print_on(st);
- st->print(" , loaders:");
+ st->print("Symbol: %s loaders:", probe->name()->as_C_string());
for (int n = 0; n < probe->num_loaders(); n++) {
+ st->cr();
+ st->print(" ");
probe->loader_data(n)->print_value_on(st);
- st->print(", ");
}
st->cr();
}
}
}
+
+void LoaderConstraintTable::print() const { print_on(tty); }
diff --git a/src/hotspot/share/classfile/loaderConstraints.hpp b/src/hotspot/share/classfile/loaderConstraints.hpp
index 95c660ac192..5e211635fdc 100644
--- a/src/hotspot/share/classfile/loaderConstraints.hpp
+++ b/src/hotspot/share/classfile/loaderConstraints.hpp
@@ -80,6 +80,7 @@ class LoaderConstraintTable : public Hashtable {
void purge_loader_constraints();
void verify(PlaceholderTable* placeholders);
+ void print() const;
void print_on(outputStream* st) const;
};
diff --git a/src/hotspot/share/classfile/moduleEntry.cpp b/src/hotspot/share/classfile/moduleEntry.cpp
index 48be90e5002..a409d1096b3 100644
--- a/src/hotspot/share/classfile/moduleEntry.cpp
+++ b/src/hotspot/share/classfile/moduleEntry.cpp
@@ -24,15 +24,15 @@
#include "precompiled.hpp"
#include "jni.h"
+#include "cds/archiveBuilder.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/filemap.hpp"
+#include "cds/heapShared.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/moduleEntry.hpp"
#include "logging/log.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/archiveUtils.hpp"
-#include "memory/filemap.hpp"
-#include "memory/heapShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oopHandle.inline.hpp"
diff --git a/src/hotspot/share/classfile/modules.cpp b/src/hotspot/share/classfile/modules.cpp
index 27e84b1a462..ed251e10f24 100644
--- a/src/hotspot/share/classfile/modules.cpp
+++ b/src/hotspot/share/classfile/modules.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classFileParser.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
@@ -41,7 +42,6 @@
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/globals_extension.hpp"
diff --git a/src/hotspot/share/classfile/packageEntry.cpp b/src/hotspot/share/classfile/packageEntry.cpp
index 948f4f54dff..c6e6e55bfeb 100644
--- a/src/hotspot/share/classfile/packageEntry.cpp
+++ b/src/hotspot/share/classfile/packageEntry.cpp
@@ -23,13 +23,13 @@
*/
#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/archiveUtils.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/archiveUtils.hpp"
#include "memory/resourceArea.hpp"
#include "oops/array.hpp"
#include "oops/symbol.hpp"
diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp
index e8883f25f4b..605e3962c17 100644
--- a/src/hotspot/share/classfile/stringTable.cpp
+++ b/src/hotspot/share/classfile/stringTable.cpp
@@ -23,6 +23,8 @@
*/
#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/heapShared.inline.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.inline.hpp"
@@ -34,8 +36,6 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/heapShared.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp
index a285076514c..12c3b0a9910 100644
--- a/src/hotspot/share/classfile/symbolTable.cpp
+++ b/src/hotspot/share/classfile/symbolTable.cpp
@@ -23,14 +23,14 @@
*/
#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/dynamicArchive.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/dynamicArchive.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp
index 4ed968cf81a..431836c0178 100644
--- a/src/hotspot/share/classfile/systemDictionary.cpp
+++ b/src/hotspot/share/classfile/systemDictionary.cpp
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "aot/aotLoader.hpp"
+#include "cds/heapShared.hpp"
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
@@ -51,7 +51,6 @@
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
-#include "memory/heapShared.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
@@ -91,7 +90,6 @@
#include "jfr/jfr.hpp"
#endif
-LoaderConstraintTable* SystemDictionary::_loader_constraints = NULL;
ResolutionErrorTable* SystemDictionary::_resolution_errors = NULL;
SymbolPropertyTable* SystemDictionary::_invoke_method_table = NULL;
ProtectionDomainCacheTable* SystemDictionary::_pd_cache_table = NULL;
@@ -102,15 +100,19 @@ OopHandle SystemDictionary::_java_platform_loader;
// Default ProtectionDomainCacheSize value
const int defaultProtectionDomainCacheSize = 1009;
-const int _loader_constraint_size = 107; // number of entries in constraint table
const int _resolution_error_size = 107; // number of entries in resolution error table
const int _invoke_method_size = 139; // number of entries in invoke method table
// Hashtable holding placeholders for classes being loaded.
const int _placeholder_table_size = 1009;
-PlaceholderTable* _placeholders = NULL;
+static PlaceholderTable* _placeholders = NULL;
static PlaceholderTable* placeholders() { return _placeholders; }
+// Constraints on class loaders
+const int _loader_constraint_size = 107; // number of entries in constraint table
+static LoaderConstraintTable* _loader_constraints;
+static LoaderConstraintTable* constraints() { return _loader_constraints; }
+
// ----------------------------------------------------------------------------
// Java-level SystemLoader and PlatformLoader
oop SystemDictionary::java_system_loader() {
@@ -1306,21 +1308,6 @@ void SystemDictionary::load_shared_class_misc(InstanceKlass* ik, ClassLoaderData
// notify a class loaded from shared object
ClassLoadingService::notify_class_loaded(ik, true /* shared class */);
-
- ik->set_has_passed_fingerprint_check(false);
- if (UseAOT && ik->supers_have_passed_fingerprint_checks()) {
- uint64_t aot_fp = AOTLoader::get_saved_fingerprint(ik);
- uint64_t cds_fp = ik->get_stored_fingerprint();
- if (aot_fp != 0 && aot_fp == cds_fp) {
- // This class matches with a class saved in an AOT library
- ik->set_has_passed_fingerprint_check(true);
- } else {
- if (log_is_enabled(Info, class, fingerprint)) {
- ResourceMark rm;
- log_info(class, fingerprint)("%s : expected = " PTR64_FORMAT " actual = " PTR64_FORMAT, ik->external_name(), aot_fp, cds_fp);
- }
- }
- }
}
#endif // INCLUDE_CDS
diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp
index 81cf181f5f7..9718690f35e 100644
--- a/src/hotspot/share/classfile/systemDictionary.hpp
+++ b/src/hotspot/share/classfile/systemDictionary.hpp
@@ -71,7 +71,6 @@ class ClassFileStream;
class ClassLoadInfo;
class Dictionary;
class AllFieldStream;
-class LoaderConstraintTable;
template class HashtableBucket;
class ResolutionErrorTable;
class SymbolPropertyTable;
@@ -303,9 +302,6 @@ class SystemDictionary : AllStatic {
private:
// Static tables owned by the SystemDictionary
- // Constraints on class loaders
- static LoaderConstraintTable* _loader_constraints;
-
// Resolution errors
static ResolutionErrorTable* _resolution_errors;
@@ -325,8 +321,6 @@ class SystemDictionary : AllStatic {
static OopHandle _java_system_loader;
static OopHandle _java_platform_loader;
- friend class VM_PopulateDumpSharedSpace;
- static LoaderConstraintTable* constraints() { return _loader_constraints; }
static ResolutionErrorTable* resolution_errors() { return _resolution_errors; }
static SymbolPropertyTable* invoke_method_table() { return _invoke_method_table; }
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp
index 4ab5b6425a4..b489a5db4fc 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp
@@ -23,8 +23,14 @@
*/
#include "precompiled.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/classListParser.hpp"
+#include "cds/dynamicArchive.hpp"
+#include "cds/filemap.hpp"
+#include "cds/heapShared.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classFileStream.hpp"
-#include "classfile/classListParser.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderDataGraph.hpp"
@@ -43,14 +49,8 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.hpp"
-#include "memory/archiveUtils.hpp"
-#include "memory/archiveBuilder.hpp"
-#include "memory/dynamicArchive.hpp"
-#include "memory/filemap.hpp"
-#include "memory/heapShared.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@@ -1380,18 +1380,28 @@ bool SystemDictionaryShared::should_be_excluded(InstanceKlass* k) {
// class loader doesn't expect it.
if (has_class_failed_verification(k)) {
warn_excluded(k, "Failed verification");
+ return true;
} else {
- warn_excluded(k, "Not linked");
+ if (!MetaspaceShared::is_old_class(k)) {
+ warn_excluded(k, "Not linked");
+ return true;
+ }
}
- return true;
}
- if (k->major_version() < 50 /*JAVA_6_VERSION*/) {
+ if (DynamicDumpSharedSpaces && k->major_version() < 50 /*JAVA_6_VERSION*/) {
+ // In order to support old classes during dynamic dump, class rewriting needs to
+ // be reverted. This would result in more complex code and testing but not much gain.
ResourceMark rm;
log_warning(cds)("Pre JDK 6 class not supported by CDS: %u.%u %s",
k->major_version(), k->minor_version(), k->name()->as_C_string());
return true;
}
+ if (MetaspaceShared::is_old_class(k) && k->is_linked()) {
+ warn_excluded(k, "Old class has been linked");
+ return true;
+ }
+
InstanceKlass* super = k->java_super();
if (super != NULL && should_be_excluded(super)) {
ResourceMark rm;
@@ -2195,6 +2205,19 @@ SystemDictionaryShared::find_record(RunTimeSharedDictionary* static_dict, RunTim
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(name);
const RunTimeSharedClassInfo* record = NULL;
+ if (DynamicArchive::is_mapped()) {
+ // Those regenerated holder classes are in dynamic archive
+ if (name == vmSymbols::java_lang_invoke_Invokers_Holder() ||
+ name == vmSymbols::java_lang_invoke_DirectMethodHandle_Holder() ||
+ name == vmSymbols::java_lang_invoke_LambdaForm_Holder() ||
+ name == vmSymbols::java_lang_invoke_DelegatingMethodHandle_Holder()) {
+ record = dynamic_dict->lookup(name, hash, 0);
+ if (record != nullptr) {
+ return record;
+ }
+ }
+ }
+
if (!MetaspaceShared::is_shared_dynamic(name)) {
// The names of all shared classes in the static dict must also be in the
// static archive
@@ -2251,7 +2274,7 @@ class SharedDictionaryPrinter : StackObj {
void do_value(const RunTimeSharedClassInfo* record) {
ResourceMark rm;
- _st->print_cr("%4d: %s %s", (_index++), record->_klass->external_name(),
+ _st->print_cr("%4d: %s %s", _index++, record->_klass->external_name(),
class_loader_name_for_shared(record->_klass));
}
int index() const { return _index; }
@@ -2268,7 +2291,7 @@ class SharedLambdaDictionaryPrinter : StackObj {
ResourceMark rm;
Klass* k = record->proxy_klass_head();
while (k != nullptr) {
- _st->print_cr("%4d: %s %s", (++_index), k->external_name(),
+ _st->print_cr("%4d: %s %s", _index++, k->external_name(),
class_loader_name_for_shared(k));
k = k->next_link();
}
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.hpp b/src/hotspot/share/classfile/systemDictionaryShared.hpp
index 2c37bf2cfb3..bc734048356 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp
@@ -25,10 +25,10 @@
#ifndef SHARE_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
#define SHARE_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
+#include "cds/filemap.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionary.hpp"
-#include "memory/filemap.hpp"
#include "oops/klass.hpp"
#include "oops/oopHandle.hpp"
diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp
index 67d3c92ed01..4be8660f360 100644
--- a/src/hotspot/share/classfile/verifier.cpp
+++ b/src/hotspot/share/classfile/verifier.cpp
@@ -286,7 +286,9 @@ bool Verifier::is_eligible_for_verification(InstanceKlass* klass, bool should_ve
// already been rewritten to contain constant pool cache indices,
// which the verifier can't understand.
// Shared classes shouldn't have stackmaps either.
- !klass->is_shared() &&
+ // However, bytecodes for shared old classes can be verified because
+ // they have not been rewritten.
+ !(klass->is_shared() && klass->is_rewritten()) &&
// As of the fix for 4486457 we disable verification for all of the
// dynamically-generated bytecodes associated with the 1.4
diff --git a/src/hotspot/share/classfile/vmClasses.cpp b/src/hotspot/share/classfile/vmClasses.cpp
index b76aea1cf5d..2bd1ca65cc0 100644
--- a/src/hotspot/share/classfile/vmClasses.cpp
+++ b/src/hotspot/share/classfile/vmClasses.cpp
@@ -23,13 +23,13 @@
*/
#include "precompiled.hpp"
+#include "cds/heapShared.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/dictionary.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmClasses.hpp"
#include "classfile/vmSymbols.hpp"
-#include "memory/heapShared.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/universe.hpp"
#include "oops/instanceKlass.hpp"
diff --git a/src/hotspot/share/classfile/vmIntrinsics.cpp b/src/hotspot/share/classfile/vmIntrinsics.cpp
index 53d3d1b697f..4582ebea6ad 100644
--- a/src/hotspot/share/classfile/vmIntrinsics.cpp
+++ b/src/hotspot/share/classfile/vmIntrinsics.cpp
@@ -153,6 +153,7 @@ bool vmIntrinsics::should_be_pinned(vmIntrinsics::ID id) {
#endif
case vmIntrinsics::_currentTimeMillis:
case vmIntrinsics::_nanoTime:
+ case vmIntrinsics::_blackhole:
return true;
default:
return false;
diff --git a/src/hotspot/share/classfile/vmIntrinsics.hpp b/src/hotspot/share/classfile/vmIntrinsics.hpp
index 9d963c96f9b..61a49ec2eb3 100644
--- a/src/hotspot/share/classfile/vmIntrinsics.hpp
+++ b/src/hotspot/share/classfile/vmIntrinsics.hpp
@@ -534,6 +534,9 @@ class methodHandle;
do_name( getObjectSize_name, "getObjectSize0") \
do_alias( getObjectSize_signature, long_object_long_signature) \
\
+ /* special marker for blackholed methods: */ \
+ do_intrinsic(_blackhole, java_lang_Object, blackhole_name, star_name, F_S) \
+ \
/* unsafe memory references (there are a lot of them...) */ \
do_signature(getReference_signature, "(Ljava/lang/Object;J)Ljava/lang/Object;") \
do_signature(putReference_signature, "(Ljava/lang/Object;JLjava/lang/Object;)V") \
diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp
index 862d1fe6aa5..a4033f4ef65 100644
--- a/src/hotspot/share/classfile/vmSymbols.hpp
+++ b/src/hotspot/share/classfile/vmSymbols.hpp
@@ -286,6 +286,7 @@
template(signature_name, "signature") \
template(slot_name, "slot") \
template(trusted_final_name, "trustedFinal") \
+ template(blackhole_name, "") /*fake name*/ \
\
/* Support for annotations (JDK 1.5 and above) */ \
\
@@ -299,12 +300,6 @@
template(base_name, "base") \
/* Type Annotations (JDK 8 and above) */ \
template(type_annotations_name, "typeAnnotations") \
- /* used by CDS */ \
- template(jdk_internal_misc_CDS, "jdk/internal/misc/CDS") \
- template(generateLambdaFormHolderClasses, "generateLambdaFormHolderClasses") \
- template(generateLambdaFormHolderClasses_signature, "([Ljava/lang/String;)[Ljava/lang/Object;") \
- template(dumpSharedArchive, "dumpSharedArchive") \
- template(dumpSharedArchive_signature, "(ZLjava/lang/String;)V") \
\
/* Intrinsic Annotation (JDK 9 and above) */ \
template(jdk_internal_vm_annotation_DontInline_signature, "Ljdk/internal/vm/annotation/DontInline;") \
@@ -699,13 +694,22 @@
/* jfr signatures */ \
JFR_TEMPLATES(template) \
\
- /* cds */ \
- template(jdk_internal_loader_ClassLoaders, "jdk/internal/loader/ClassLoaders") \
- template(java_util_concurrent_ConcurrentHashMap, "java/util/concurrent/ConcurrentHashMap") \
- template(java_util_ArrayList, "java/util/ArrayList") \
- template(toFileURL_name, "toFileURL") \
- template(toFileURL_signature, "(Ljava/lang/String;)Ljava/net/URL;") \
- template(url_void_signature, "(Ljava/net/URL;)V") \
+ /* CDS */ \
+ template(dumpSharedArchive, "dumpSharedArchive") \
+ template(dumpSharedArchive_signature, "(ZLjava/lang/String;)V") \
+ template(generateLambdaFormHolderClasses, "generateLambdaFormHolderClasses") \
+ template(generateLambdaFormHolderClasses_signature, "([Ljava/lang/String;)[Ljava/lang/Object;") \
+ template(java_lang_invoke_Invokers_Holder, "java/lang/invoke/Invokers$Holder") \
+ template(java_lang_invoke_DirectMethodHandle_Holder, "java/lang/invoke/DirectMethodHandle$Holder") \
+ template(java_lang_invoke_LambdaForm_Holder, "java/lang/invoke/LambdaForm$Holder") \
+ template(java_lang_invoke_DelegatingMethodHandle_Holder, "java/lang/invoke/DelegatingMethodHandle$Holder") \
+ template(jdk_internal_loader_ClassLoaders, "jdk/internal/loader/ClassLoaders") \
+ template(jdk_internal_misc_CDS, "jdk/internal/misc/CDS") \
+ template(java_util_concurrent_ConcurrentHashMap, "java/util/concurrent/ConcurrentHashMap") \
+ template(java_util_ArrayList, "java/util/ArrayList") \
+ template(toFileURL_name, "toFileURL") \
+ template(toFileURL_signature, "(Ljava/lang/String;)Ljava/net/URL;") \
+ template(url_void_signature, "(Ljava/net/URL;)V") \
\
template(java_lang_invoke_ValueBootstrapMethods, "java/lang/invoke/ValueBootstrapMethods") \
template(isSubstitutable_name, "isSubstitutable") \
diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp
index b93cff3d215..ef33319fa45 100644
--- a/src/hotspot/share/code/codeBlob.hpp
+++ b/src/hotspot/share/code/codeBlob.hpp
@@ -44,8 +44,7 @@ struct CodeBlobType {
MethodProfiled = 1, // Execution level 2 and 3 (profiled) nmethods
NonNMethod = 2, // Non-nmethods like Buffers, Adapters and Runtime Stubs
All = 3, // All types (No code cache segmentation)
- AOT = 4, // AOT methods
- NumTypes = 5 // Number of CodeBlobTypes
+ NumTypes = 4 // Number of CodeBlobTypes
};
};
@@ -54,10 +53,6 @@ struct CodeBlobType {
// Subtypes are:
// CompiledMethod : Compiled Java methods (include method that calls to native code)
// nmethod : JIT Compiled Java methods
-// AOTCompiledMethod : AOT Compiled Java methods - Not in the CodeCache!
-// AOTCompiledMethod objects are allocated in the C-Heap, the code they
-// point to is allocated in the AOTCodeHeap which is in the C-Heap as
-// well (i.e. it's the memory where the shared library was loaded to)
// RuntimeBlob : Non-compiled method code; generated glue code
// BufferBlob : Used for non-relocatable code such as interpreter, stubroutines, etc.
// AdapterBlob : Used to hold C2I/I2C adapters
@@ -71,17 +66,12 @@ struct CodeBlobType {
// UncommonTrapBlob : Used to handle uncommon traps
//
//
-// Layout (all except AOTCompiledMethod) : continuous in the CodeCache
+// Layout : continuous in the CodeCache
// - header
// - relocation
// - content space
// - instruction space
// - data space
-//
-// Layout (AOTCompiledMethod) : in the C-Heap
-// - header -\
-// ... |
-// - code <-/
class CodeBlobLayout;
@@ -145,7 +135,6 @@ class CodeBlob {
virtual bool is_adapter_blob() const { return false; }
virtual bool is_vtable_blob() const { return false; }
virtual bool is_method_handles_adapter_blob() const { return false; }
- virtual bool is_aot() const { return false; }
virtual bool is_compiled() const { return false; }
virtual bool is_buffered_inline_type_blob() const { return false; }
@@ -247,7 +236,6 @@ class CodeBlob {
#ifndef PRODUCT
void set_strings(CodeStrings& strings) {
- assert(!is_aot(), "invalid on aot");
_strings.copy(strings);
}
#endif
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index 818e0aec061..eb2fad4409c 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "jvm_io.h"
-#include "aot/aotLoader.hpp"
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
#include "code/codeHeapState.hpp"
@@ -337,7 +336,7 @@ ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
const size_t rs_ps = page_size();
const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
const size_t rs_size = align_up(size, rs_align);
- ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size());
+ ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
if (!rs.is_reserved()) {
vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
rs_size/K));
@@ -683,7 +682,6 @@ void CodeCache::metadata_do(MetadataClosure* f) {
while(iter.next()) {
iter.method()->metadata_do(f);
}
- AOTLoader::metadata_do(f);
}
int CodeCache::alignment_unit() {
@@ -972,11 +970,6 @@ void codeCache_init() {
CodeCache::initialize();
}
-void AOTLoader_init() {
- // Load AOT libraries and add AOT code heaps.
- AOTLoader::initialize();
-}
-
//------------------------------------------------------------------------------------------------
int CodeCache::number_of_nmethods_with_dependencies() {
@@ -1038,15 +1031,6 @@ CompiledMethod* CodeCache::find_compiled(void* start) {
return (CompiledMethod*)cb;
}
-bool CodeCache::is_far_target(address target) {
-#if INCLUDE_AOT
- return NativeCall::is_far_call(_low_bound, target) ||
- NativeCall::is_far_call(_high_bound, target);
-#else
- return false;
-#endif
-}
-
#if INCLUDE_JVMTI
// RedefineClasses support for unloading nmethods that are dependent on "old" methods.
// We don't really expect this table to grow very large. If it does, it can become a hashtable.
@@ -1096,11 +1080,6 @@ void CodeCache::old_nmethods_do(MetadataClosure* f) {
// Just marks the methods in this class as needing deoptimization
void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) {
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
-
- // Mark dependent AOT nmethods, which are only found via the class redefined.
- // TODO: add dependencies to aotCompiledMethod's metadata section so this isn't
- // needed.
- AOTLoader::mark_evol_dependent_methods(dependee);
}
diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp
index fdbd2e14911..d889d006d57 100644
--- a/src/hotspot/share/code/codeCache.hpp
+++ b/src/hotspot/share/code/codeCache.hpp
@@ -205,9 +205,6 @@ class CodeCache : AllStatic {
static address high_bound() { return _high_bound; }
static address high_bound(int code_blob_type);
- // Have to use far call instructions to call this pc.
- static bool is_far_target(address pc);
-
// Profiling
static size_t capacity();
static size_t unallocated_capacity(int code_blob_type);
@@ -229,7 +226,6 @@ class CodeCache : AllStatic {
static bool code_blob_type_accepts_compiled(int type) {
bool result = type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled;
- AOT_ONLY( result = result || type == CodeBlobType::AOT; )
return result;
}
diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp
index cb1e432d51e..3cf6bb4f381 100644
--- a/src/hotspot/share/code/compiledIC.cpp
+++ b/src/hotspot/share/code/compiledIC.cpp
@@ -425,7 +425,7 @@ bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
// transitions are mt_safe
Thread *thread = Thread::current();
- if (info.to_interpreter() || info.to_aot()) {
+ if (info.to_interpreter()) {
// Call to interpreter
if (info.is_optimized() && is_optimized()) {
assert(is_clean(), "unsafe IC path");
@@ -439,9 +439,8 @@ bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
if (TraceICs) {
ResourceMark rm(thread);
- tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
+ tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
p2i(instruction_address()),
- (info.to_aot() ? "aot" : "interpreter"),
method->print_value_string());
}
} else {
@@ -542,20 +541,14 @@ void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
entry = caller_is_c1 ? method_code->inline_entry_point() : method_code->entry_point();
}
}
- bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code();
- if (entry != NULL && !far_c2a) {
- // Call to near compiled code (nmethod or aot).
+ if (entry != NULL) {
+ // Call to near compiled code.
info.set_compiled_entry(entry, is_optimized ? NULL : receiver_klass, is_optimized);
} else {
if (is_optimized) {
- if (far_c2a) {
- // Call to aot code from nmethod.
- info.set_aot_entry(entry, method());
- } else {
- // Use stub entry
- address entry = caller_is_c1 ? method()->get_c2i_inline_entry() : method()->get_c2i_entry();
- info.set_interpreter_entry(entry, method());
- }
+ // Use stub entry
+ address entry = caller_is_c1 ? method()->get_c2i_inline_entry() : method()->get_c2i_entry();
+ info.set_interpreter_entry(entry, method());
} else {
// Use icholder entry
assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
@@ -617,13 +610,6 @@ bool CompiledDirectStaticCall::is_call_to_interpreted() const {
return cm->stub_contains(destination());
}
-bool CompiledDirectStaticCall::is_call_to_far() const {
- // It is a call to aot method, if it calls to a stub. Hence, the destination
- // must be in the stub part of the nmethod that contains the call
- CodeBlob* desc = CodeCache::find_blob(instruction_address());
- return desc->as_compiled_method()->stub_contains(destination());
-}
-
void CompiledStaticCall::set_to_compiled(address entry) {
if (TraceICs) {
ResourceMark rm;
@@ -648,11 +634,6 @@ void CompiledStaticCall::set(const StaticCallInfo& info) {
if (info._to_interpreter) {
// Call to interpreted code
set_to_interpreted(info.callee(), info.entry());
-#if INCLUDE_AOT
- } else if (info._to_aot) {
- // Call to far code
- set_to_far(info.callee(), info.entry());
-#endif
} else {
set_to_compiled(info.entry());
}
@@ -665,12 +646,6 @@ void CompiledStaticCall::compute_entry(const methodHandle& m, CompiledMethod* ca
CompiledMethod* m_code = m->code();
info._callee = m;
if (m_code != NULL && m_code->is_in_use()) {
- if (caller_is_nmethod && m_code->is_far_code()) {
- // Call to far aot code from nmethod.
- info._to_aot = true;
- } else {
- info._to_aot = false;
- }
info._to_interpreter = false;
if (caller_nm->is_compiled_by_c1()) {
info._entry = m_code->verified_inline_entry_point();
@@ -693,18 +668,18 @@ void CompiledStaticCall::compute_entry(const methodHandle& m, CompiledMethod* ca
}
}
-address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot) {
+address CompiledDirectStaticCall::find_stub_for(address instruction) {
// Find reloc. information containing this call-site
RelocIterator iter((nmethod*)NULL, instruction);
while (iter.next()) {
if (iter.addr() == instruction) {
switch(iter.type()) {
case relocInfo::static_call_type:
- return iter.static_call_reloc()->static_stub(is_aot);
+ return iter.static_call_reloc()->static_stub();
// We check here for opt_virtual_call_type, since we reuse the code
// from the CompiledIC implementation
case relocInfo::opt_virtual_call_type:
- return iter.opt_virtual_call_reloc()->static_stub(is_aot);
+ return iter.opt_virtual_call_reloc()->static_stub();
case relocInfo::poll_type:
case relocInfo::poll_return_type: // A safepoint can't overlap a call.
default:
@@ -715,8 +690,8 @@ address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot
return NULL;
}
-address CompiledDirectStaticCall::find_stub(bool is_aot) {
- return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot);
+address CompiledDirectStaticCall::find_stub() {
+ return CompiledDirectStaticCall::find_stub_for(instruction_address());
}
address CompiledDirectStaticCall::resolve_call_stub() const {
@@ -749,8 +724,6 @@ void CompiledDirectStaticCall::print() {
tty->print("clean");
} else if (is_call_to_compiled()) {
tty->print("compiled");
- } else if (is_call_to_far()) {
- tty->print("far");
} else if (is_call_to_interpreted()) {
tty->print("interpreted");
}
diff --git a/src/hotspot/share/code/compiledIC.hpp b/src/hotspot/share/code/compiledIC.hpp
index 62f6b92aea8..7e999828d0c 100644
--- a/src/hotspot/share/code/compiledIC.hpp
+++ b/src/hotspot/share/code/compiledIC.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -84,7 +84,6 @@ class CompiledICInfo : public StackObj {
bool _is_icholder; // Is the cached value a CompiledICHolder*
bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
bool _to_interpreter; // Call it to interpreter
- bool _to_aot; // Call it to aot code
bool _release_icholder;
public:
address entry() const { return _entry; }
@@ -99,13 +98,11 @@ class CompiledICInfo : public StackObj {
}
bool is_optimized() const { return _is_optimized; }
bool to_interpreter() const { return _to_interpreter; }
- bool to_aot() const { return _to_aot; }
void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
_entry = entry;
_cached_value = (void*)klass;
_to_interpreter = false;
- _to_aot = false;
_is_icholder = false;
_is_optimized = is_optimized;
_release_icholder = false;
@@ -115,17 +112,6 @@ class CompiledICInfo : public StackObj {
_entry = entry;
_cached_value = (void*)method;
_to_interpreter = true;
- _to_aot = false;
- _is_icholder = false;
- _is_optimized = true;
- _release_icholder = false;
- }
-
- void set_aot_entry(address entry, Method* method) {
- _entry = entry;
- _cached_value = (void*)method;
- _to_interpreter = false;
- _to_aot = true;
_is_icholder = false;
_is_optimized = true;
_release_icholder = false;
@@ -135,14 +121,13 @@ class CompiledICInfo : public StackObj {
_entry = entry;
_cached_value = (void*)icholder;
_to_interpreter = true;
- _to_aot = false;
_is_icholder = true;
_is_optimized = false;
_release_icholder = true;
}
CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
- _is_optimized(false), _to_interpreter(false), _to_aot(false), _release_icholder(false) {
+ _is_optimized(false), _to_interpreter(false), _release_icholder(false) {
}
~CompiledICInfo() {
// In rare cases the info is computed but not used, so release any
@@ -341,7 +326,6 @@ class StaticCallInfo {
address _entry; // Entrypoint
methodHandle _callee; // Callee (used when calling interpreter)
bool _to_interpreter; // call to interpreted method (otherwise compiled)
- bool _to_aot; // call to aot method (otherwise compiled)
friend class CompiledStaticCall;
friend class CompiledDirectStaticCall;
@@ -358,9 +342,6 @@ class CompiledStaticCall : public ResourceObj {
static int to_interp_stub_size();
static int to_trampoline_stub_size();
static int reloc_to_interp_stub();
- static void emit_to_aot_stub(CodeBuffer &cbuf, address mark = NULL);
- static int to_aot_stub_size();
- static int reloc_to_aot_stub();
// Compute entry point given a method
static void compute_entry(const methodHandle& m, CompiledMethod* caller_nm, StaticCallInfo& info);
@@ -386,9 +367,6 @@ class CompiledStaticCall : public ResourceObj {
protected:
virtual address resolve_call_stub() const = 0;
virtual void set_destination_mt_safe(address dest) = 0;
-#if INCLUDE_AOT
- virtual void set_to_far(const methodHandle& callee, address entry) = 0;
-#endif
virtual void set_to_interpreted(const methodHandle& callee, address entry) = 0;
virtual const char* name() const = 0;
@@ -405,9 +383,6 @@ class CompiledDirectStaticCall : public CompiledStaticCall {
void verify_mt_safe(const methodHandle& callee, address entry,
NativeMovConstReg* method_holder,
NativeJump* jump) PRODUCT_RETURN;
-#if INCLUDE_AOT
- void set_to_far(const methodHandle& callee, address entry);
-#endif
address instruction_address() const { return _call->instruction_address(); }
void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); }
@@ -437,11 +412,10 @@ class CompiledDirectStaticCall : public CompiledStaticCall {
// State
virtual bool is_call_to_interpreted() const;
- bool is_call_to_far() const;
// Stub support
- static address find_stub_for(address instruction, bool is_aot);
- address find_stub(bool is_aot);
+ static address find_stub_for(address instruction);
+ address find_stub();
static void set_stub_to_clean(static_stub_Relocation* static_stub);
// Misc.
diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp
index 734711b2259..6bf108e8cd9 100644
--- a/src/hotspot/share/code/compiledMethod.cpp
+++ b/src/hotspot/share/code/compiledMethod.cpp
@@ -72,7 +72,6 @@ CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType ty
void CompiledMethod::init_defaults() {
{ // avoid uninitialized fields, even for short time periods
- _is_far_code = false;
_scopes_data_begin = NULL;
_deopt_handler_begin = NULL;
_deopt_mh_handler_begin = NULL;
diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp
index 6bca0e5a2f2..0605b8bfb2e 100644
--- a/src/hotspot/share/code/compiledMethod.hpp
+++ b/src/hotspot/share/code/compiledMethod.hpp
@@ -152,9 +152,6 @@ class CompiledMethod : public CodeBlob {
MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
- bool _is_far_code; // Code is far from CodeCache.
- // Have to use far call instructions to call it from code in CodeCache.
-
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
@@ -347,8 +344,6 @@ class CompiledMethod : public CodeBlob {
const char* state() const;
- bool is_far_code() const { return _is_far_code; }
-
bool inlinecache_check_contains(address addr) const {
return (addr >= code_begin() && addr < verified_entry_point());
}
diff --git a/src/hotspot/share/code/dependencies.cpp b/src/hotspot/share/code/dependencies.cpp
index f787d6f183e..2d2cccc00eb 100644
--- a/src/hotspot/share/code/dependencies.cpp
+++ b/src/hotspot/share/code/dependencies.cpp
@@ -954,107 +954,99 @@ bool DependencySignature::equals(DependencySignature const& s1, DependencySignat
/// Checking dependencies
-// This hierarchy walker inspects subtypes of a given type,
-// trying to find a "bad" class which breaks a dependency.
+// This hierarchy walker inspects subtypes of a given type, trying to find a "bad" class which breaks a dependency.
// Such a class is called a "witness" to the broken dependency.
-// While searching around, we ignore "participants", which
-// are already known to the dependency.
-class ClassHierarchyWalker {
+// While searching around, we ignore "participants", which are already known to the dependency.
+class AbstractClassHierarchyWalker {
public:
enum { PARTICIPANT_LIMIT = 3 };
private:
- // optional method descriptor to check for:
- Symbol* _name;
- Symbol* _signature;
+ // if non-zero, tells how many witnesses to convert to participants
+ uint _record_witnesses;
// special classes which are not allowed to be witnesses:
- Klass* _participants[PARTICIPANT_LIMIT+1];
- uint _num_participants;
+ Klass* _participants[PARTICIPANT_LIMIT+1];
+ uint _num_participants;
- // cache of method lookups
- Method* _found_methods[PARTICIPANT_LIMIT+1];
+#ifdef ASSERT
+ uint _nof_requests; // one-shot walker
+#endif // ASSERT
- // if non-zero, tells how many witnesses to convert to participants
- uint _record_witnesses;
+ static PerfCounter* _perf_find_witness_anywhere_calls_count;
+ static PerfCounter* _perf_find_witness_anywhere_steps_count;
+ static PerfCounter* _perf_find_witness_in_calls_count;
- void initialize(Klass* participant) {
- _record_witnesses = 0;
- _participants[0] = participant;
- _found_methods[0] = NULL;
- _num_participants = 0;
+ protected:
+ virtual Klass* find_witness_in(KlassDepChange* changes) = 0;
+ virtual Klass* find_witness_anywhere(InstanceKlass* context_type) = 0;
+
+ AbstractClassHierarchyWalker(Klass* participant) : _record_witnesses(0), _num_participants(0)
+#ifdef ASSERT
+ , _nof_requests(0)
+#endif // ASSERT
+ {
+ for (uint i = 0; i < PARTICIPANT_LIMIT+1; i++) {
+ _participants[i] = NULL;
+ }
if (participant != NULL) {
- // Terminating NULL.
- _participants[1] = NULL;
- _found_methods[1] = NULL;
- _num_participants = 1;
+ add_participant(participant);
}
}
- void initialize_from_method(Method* m) {
- assert(m != NULL && m->is_method(), "sanity");
- _name = m->name();
- _signature = m->signature();
+ bool is_participant(Klass* k) {
+ for (uint i = 0; i < _num_participants; i++) {
+ if (_participants[i] == k) {
+ return true;
+ }
+ }
+ return false;
}
- public:
- // The walker is initialized to recognize certain methods and/or types
- // as friendly participants.
- ClassHierarchyWalker(Klass* participant, Method* m) {
- initialize_from_method(m);
- initialize(participant);
- }
- ClassHierarchyWalker(Method* m) {
- initialize_from_method(m);
- initialize(NULL);
- }
- ClassHierarchyWalker(Klass* participant = NULL) {
- _name = NULL;
- _signature = NULL;
- initialize(participant);
- }
- ClassHierarchyWalker(Klass* participants[], uint num_participants) {
- _name = NULL;
- _signature = NULL;
- initialize(NULL);
- for (uint i = 0; i < num_participants; ++i) {
- add_participant(participants[i]);
+ bool record_witness(Klass* witness) {
+ if (_record_witnesses > 0) {
+ --_record_witnesses;
+ add_participant(witness);
+ return false; // not a witness
+ } else {
+ return true; // is a witness
}
}
- // This is common code for two searches: One for concrete subtypes,
- // the other for concrete method implementations and overrides.
- bool doing_subtype_search() {
- return _name == NULL;
- }
+ class CountingClassHierarchyIterator : public ClassHierarchyIterator {
+ private:
+ jlong _nof_steps;
+ public:
+ CountingClassHierarchyIterator(InstanceKlass* root) : ClassHierarchyIterator(root), _nof_steps(0) {}
- int num_participants() { return _num_participants; }
- Klass* participant(uint n) {
- assert(n <= _num_participants, "oob");
- return _participants[n];
- }
+ void next() {
+ _nof_steps++;
+ ClassHierarchyIterator::next();
+ }
- // Note: If n==num_participants, returns NULL.
- Method* found_method(uint n) {
+ ~CountingClassHierarchyIterator() {
+ if (UsePerfData) {
+ _perf_find_witness_anywhere_steps_count->inc(_nof_steps);
+ }
+ }
+ };
+
+ public:
+ uint num_participants() { return _num_participants; }
+ Klass* participant(uint n) {
assert(n <= _num_participants, "oob");
- Method* fm = _found_methods[n];
- assert(n == _num_participants || fm != NULL, "proper usage");
- if (fm != NULL && fm->method_holder() != _participants[n]) {
- // Default methods from interfaces can be added to classes. In
- // that case the holder of the method is not the class but the
- // interface where it's defined.
- assert(fm->is_default_method(), "sanity");
+ if (n < _num_participants) {
+ return _participants[n];
+ } else {
return NULL;
}
- return fm;
}
void add_participant(Klass* participant) {
+ assert(!is_participant(participant), "sanity");
assert(_num_participants + _record_witnesses < PARTICIPANT_LIMIT, "oob");
uint np = _num_participants++;
_participants[np] = participant;
- _participants[np+1] = NULL;
- _found_methods[np+1] = NULL;
}
void record_witnesses(uint add) {
@@ -1063,141 +1055,177 @@ class ClassHierarchyWalker {
_record_witnesses = add;
}
- bool is_witness(Klass* k) {
- if (doing_subtype_search()) {
- if (Dependencies::is_concrete_klass(k)) {
- return record_witness(k); // concrete subtype
- } else {
- return false; // not a concrete class
- }
- } else if (!k->is_instance_klass()) {
- return false; // no methods to find in an array type
- } else {
- InstanceKlass* ik = InstanceKlass::cast(k);
- // Search class hierarchy first, skipping private implementations
- // as they never override any inherited methods
- Method* m = ik->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
- if (Dependencies::is_concrete_method(m, ik)) {
- return record_witness(k, m); // concrete method found
- } else {
- // Check for re-abstraction of method
- if (!ik->is_interface() && m != NULL && m->is_abstract()) {
- // Found a matching abstract method 'm' in the class hierarchy.
- // This is fine iff 'k' is an abstract class and all concrete subtypes
- // of 'k' override 'm' and are participates of the current search.
- ClassHierarchyWalker wf(_participants, _num_participants);
- Klass* w = wf.find_witness_subtype(ik);
- if (w != NULL) {
- Method* wm = InstanceKlass::cast(w)->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
- if (!Dependencies::is_concrete_method(wm, w)) {
- // Found a concrete subtype 'w' which does not override abstract method 'm'.
- // Bail out because 'm' could be called with 'w' as receiver (leading to an
- // AbstractMethodError) and thus the method we are looking for is not unique.
- return record_witness(k, m);
- }
- }
- }
- // Check interface defaults also, if any exist.
- Array* default_methods = ik->default_methods();
- if (default_methods != NULL) {
- Method* dm = ik->find_method(default_methods, _name, _signature);
- if (Dependencies::is_concrete_method(dm, NULL)) {
- return record_witness(k, dm); // default method found
- }
- }
- return false; // no concrete method found
- }
+ Klass* find_witness(InstanceKlass* context_type, KlassDepChange* changes = NULL);
+
+ static void init();
+ static void print_statistics();
+};
+
+PerfCounter* AbstractClassHierarchyWalker::_perf_find_witness_anywhere_calls_count = NULL;
+PerfCounter* AbstractClassHierarchyWalker::_perf_find_witness_anywhere_steps_count = NULL;
+PerfCounter* AbstractClassHierarchyWalker::_perf_find_witness_in_calls_count = NULL;
+
+void AbstractClassHierarchyWalker::init() {
+ if (UsePerfData) {
+ EXCEPTION_MARK;
+ _perf_find_witness_anywhere_calls_count =
+ PerfDataManager::create_counter(SUN_CI, "findWitnessAnywhere", PerfData::U_Events, CHECK);
+ _perf_find_witness_anywhere_steps_count =
+ PerfDataManager::create_counter(SUN_CI, "findWitnessAnywhereSteps", PerfData::U_Events, CHECK);
+ _perf_find_witness_in_calls_count =
+ PerfDataManager::create_counter(SUN_CI, "findWitnessIn", PerfData::U_Events, CHECK);
+ }
+}
+
+Klass* AbstractClassHierarchyWalker::find_witness(InstanceKlass* context_type, KlassDepChange* changes) {
+ // Current thread must be in VM (not native mode, as in CI):
+ assert(must_be_in_vm(), "raw oops here");
+ // Must not move the class hierarchy during this check:
+ assert_locked_or_safepoint(Compile_lock);
+ assert(_nof_requests++ == 0, "repeated requests are not supported");
+
+ assert(changes == NULL || changes->involves_context(context_type), "irrelevant dependency");
+
+ // (Note: Interfaces do not have subclasses.)
+ // If it is an interface, search its direct implementors.
+ // (Their subclasses are additional indirect implementors. See InstanceKlass::add_implementor().)
+ if (context_type->is_interface()) {
+ int nof_impls = context_type->nof_implementors();
+ if (nof_impls == 0) {
+ return NULL; // no implementors
+ } else if (nof_impls == 1) { // unique implementor
+ assert(context_type != context_type->implementor(), "not unique");
+ context_type = InstanceKlass::cast(context_type->implementor());
+ } else { // nof_impls >= 2
+ // Avoid this case: *I.m > { A.m, C }; B.m > C
+ // Here, I.m has 2 concrete implementations, but m appears unique
+ // as A.m, because the search misses B.m when checking C.
+ // The inherited method B.m was getting missed by the walker
+ // when interface 'I' was the starting point.
+ // %%% Until this is fixed more systematically, bail out.
+ return context_type;
}
}
+ assert(!context_type->is_interface(), "no interfaces allowed");
- bool is_participant(Klass* k) {
- if (k == _participants[0]) {
- return true;
- } else if (_num_participants <= 1) {
- return false;
- } else {
- return in_list(k, &_participants[1]);
+ if (changes != NULL) {
+ if (UsePerfData) {
+ _perf_find_witness_in_calls_count->inc();
}
+ return find_witness_in(changes);
+ } else {
+ if (UsePerfData) {
+ _perf_find_witness_anywhere_calls_count->inc();
+ }
+ return find_witness_anywhere(context_type);
}
+}
- bool record_witness(Klass* witness, Method* m) {
- _found_methods[_num_participants] = m;
- return record_witness(witness);
+class ConcreteSubtypeFinder : public AbstractClassHierarchyWalker {
+ private:
+ bool is_witness(Klass* k);
+
+ protected:
+ virtual Klass* find_witness_in(KlassDepChange* changes);
+ virtual Klass* find_witness_anywhere(InstanceKlass* context_type);
+
+ public:
+ ConcreteSubtypeFinder(Klass* participant = NULL) : AbstractClassHierarchyWalker(participant) {}
+};
+
+bool ConcreteSubtypeFinder::is_witness(Klass* k) {
+ if (Dependencies::is_concrete_klass(k)) {
+ return record_witness(k); // concrete subtype
+ } else {
+ return false; // not a concrete class
}
+}
- // It is used by is_witness() to fill up participant list (of predefined size)
- // and to report the first witness candidate which doesn't fit into the list.
- // Returns true when no more witnesses can be recorded.
- bool record_witness(Klass* witness) {
- if (_record_witnesses == 0) {
- return true; // report the witness
- } else {
- --_record_witnesses;
- add_participant(witness);
- return false; // record the witness
+Klass* ConcreteSubtypeFinder::find_witness_in(KlassDepChange* changes) {
+ // When looking for unexpected concrete types, do not look beneath expected ones:
+ // * CX > CC > C' is OK, even if C' is new.
+ // * CX > { CC, C' } is not OK if C' is new, and C' is the witness.
+ Klass* new_type = changes->new_type();
+ assert(!is_participant(new_type), "only old classes are participants");
+ // If the new type is a subtype of a participant, we are done.
+ for (uint i = 0; i < num_participants(); i++) {
+ if (changes->involves_context(participant(i))) {
+ // new guy is protected from this check by previous participant
+ return NULL;
}
}
- static bool in_list(Klass* x, Klass** list) {
- for (int i = 0; ; i++) {
- Klass* y = list[i];
- if (y == NULL) break;
- if (y == x) return true;
+ if (is_witness(new_type)) {
+ return new_type;
+ }
+ // No witness found. The dependency remains unbroken.
+ return NULL;
+}
+
+Klass* ConcreteSubtypeFinder::find_witness_anywhere(InstanceKlass* context_type) {
+ for (CountingClassHierarchyIterator iter(context_type); !iter.done(); iter.next()) {
+ Klass* sub = iter.klass();
+ // Do not report participant types.
+ if (is_participant(sub)) {
+ // Don't walk beneath a participant since it hides witnesses.
+ iter.skip_subclasses();
+ } else if (is_witness(sub)) {
+ return sub; // found a witness
}
- return false; // not in list
}
+ // No witness found. The dependency remains unbroken.
+ return NULL;
+}
- class CountingClassHierarchyIterator : public ClassHierarchyIterator {
- private:
- jlong _nof_steps;
- public:
- CountingClassHierarchyIterator(InstanceKlass* root) : ClassHierarchyIterator(root), _nof_steps(0) {}
+class ConcreteMethodFinder : public AbstractClassHierarchyWalker {
+ private:
+ Symbol* _name;
+ Symbol* _signature;
- void next() {
- _nof_steps++;
- ClassHierarchyIterator::next();
- }
+ // cache of method lookups
+ Method* _found_methods[PARTICIPANT_LIMIT+1];
- ~CountingClassHierarchyIterator() {
- if (UsePerfData) {
- _perf_find_witness_anywhere_steps_count->inc(_nof_steps);
- }
- }
- };
+ bool is_witness(Klass* k);
+
+ protected:
+ virtual Klass* find_witness_in(KlassDepChange* changes);
+ virtual Klass* find_witness_anywhere(InstanceKlass* context_type);
- private:
- // the actual search method:
- Klass* find_witness_anywhere(InstanceKlass* context_type,
- bool participants_hide_witnesses);
- // the spot-checking version:
- Klass* find_witness_in(KlassDepChange& changes,
- InstanceKlass* context_type,
- bool participants_hide_witnesses);
bool witnessed_reabstraction_in_supers(Klass* k);
+
public:
- Klass* find_witness_subtype(InstanceKlass* context_type, KlassDepChange* changes = NULL) {
- assert(doing_subtype_search(), "must set up a subtype search");
- // When looking for unexpected concrete types,
- // do not look beneath expected ones.
- const bool participants_hide_witnesses = true;
- // CX > CC > C' is OK, even if C' is new.
- // CX > { CC, C' } is not OK if C' is new, and C' is the witness.
- if (changes != NULL) {
- return find_witness_in(*changes, context_type, participants_hide_witnesses);
- } else {
- return find_witness_anywhere(context_type, participants_hide_witnesses);
+ ConcreteMethodFinder(Method* m, Klass* participant = NULL) : AbstractClassHierarchyWalker(participant) {
+ assert(m != NULL && m->is_method(), "sanity");
+ _name = m->name();
+ _signature = m->signature();
+
+ for (int i = 0; i < PARTICIPANT_LIMIT+1; i++) {
+ _found_methods[i] = NULL;
}
}
- Klass* find_witness_definer(InstanceKlass* context_type, KlassDepChange* changes = NULL) {
- assert(!doing_subtype_search(), "must set up a method definer search");
- // When looking for unexpected concrete methods,
- // look beneath expected ones, to see if there are overrides.
- const bool participants_hide_witnesses = true;
- // CX.m > CC.m > C'.m is not OK, if C'.m is new, and C' is the witness.
- if (changes != NULL) {
- return find_witness_in(*changes, context_type, !participants_hide_witnesses);
- } else {
- return find_witness_anywhere(context_type, !participants_hide_witnesses);
+
+ // Note: If n==num_participants, returns NULL.
+ Method* found_method(uint n) {
+ assert(n <= num_participants(), "oob");
+ Method* fm = _found_methods[n];
+ assert(n == num_participants() || fm != NULL, "proper usage");
+ if (fm != NULL && fm->method_holder() != participant(n)) {
+ // Default methods from interfaces can be added to classes. In
+ // that case the holder of the method is not the class but the
+ // interface where it's defined.
+ assert(fm->is_default_method(), "sanity");
+ return NULL;
}
+ return fm;
+ }
+
+ void add_participant(Klass* participant) {
+ AbstractClassHierarchyWalker::add_participant(participant);
+ _found_methods[num_participants()] = NULL;
+ }
+
+ bool record_witness(Klass* witness, Method* m) {
+ _found_methods[num_participants()] = m;
+ return AbstractClassHierarchyWalker::record_witness(witness);
}
private:
@@ -1210,20 +1238,108 @@ class ClassHierarchyWalker {
static void print_statistics();
};
-PerfCounter* ClassHierarchyWalker::_perf_find_witness_anywhere_calls_count = NULL;
-PerfCounter* ClassHierarchyWalker::_perf_find_witness_anywhere_steps_count = NULL;
-PerfCounter* ClassHierarchyWalker::_perf_find_witness_in_calls_count = NULL;
+bool ConcreteMethodFinder::is_witness(Klass* k) {
+ if (is_participant(k)) {
+ return false; // do not report participant types
+ }
+ if (k->is_instance_klass()) {
+ InstanceKlass* ik = InstanceKlass::cast(k);
+ // Search class hierarchy first, skipping private implementations
+ // as they never override any inherited methods
+ Method* m = ik->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
+ if (Dependencies::is_concrete_method(m, ik)) {
+ return record_witness(k, m); // concrete method found
+ } else {
+ // Check for re-abstraction of method
+ if (!ik->is_interface() && m != NULL && m->is_abstract()) {
+ // Found a matching abstract method 'm' in the class hierarchy.
+ // This is fine iff 'k' is an abstract class and all concrete subtypes
+ // of 'k' override 'm' and are participates of the current search.
+ ConcreteSubtypeFinder wf;
+ for (uint i = 0; i < num_participants(); i++) {
+ Klass* p = participant(i);
+ wf.add_participant(p);
+ }
+ Klass* w = wf.find_witness(ik);
+ if (w != NULL) {
+ Method* wm = InstanceKlass::cast(w)->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
+ if (!Dependencies::is_concrete_method(wm, w)) {
+ // Found a concrete subtype 'w' which does not override abstract method 'm'.
+ // Bail out because 'm' could be called with 'w' as receiver (leading to an
+ // AbstractMethodError) and thus the method we are looking for is not unique.
+ return record_witness(k, m);
+ }
+ }
+ }
+ // Check interface defaults also, if any exist.
+ Array* default_methods = ik->default_methods();
+ if (default_methods != NULL) {
+ Method* dm = ik->find_method(default_methods, _name, _signature);
+ if (Dependencies::is_concrete_method(dm, NULL)) {
+ return record_witness(k, dm); // default method found
+ }
+ }
+ return false; // no concrete method found
+ }
+ } else {
+ return false; // no methods to find in an array type
+ }
+}
-void ClassHierarchyWalker::init() {
- if (UsePerfData) {
- EXCEPTION_MARK;
- _perf_find_witness_anywhere_calls_count =
- PerfDataManager::create_counter(SUN_CI, "findWitnessAnywhere", PerfData::U_Events, CHECK);
- _perf_find_witness_anywhere_steps_count =
- PerfDataManager::create_counter(SUN_CI, "findWitnessAnywhereSteps", PerfData::U_Events, CHECK);
- _perf_find_witness_in_calls_count =
- PerfDataManager::create_counter(SUN_CI, "findWitnessIn", PerfData::U_Events, CHECK);
+Klass* ConcreteMethodFinder::find_witness_in(KlassDepChange* changes) {
+ // When looking for unexpected concrete methods, look beneath expected ones, to see if there are overrides.
+ // * CX.m > CC.m > C'.m is not OK, if C'.m is new, and C' is the witness.
+ Klass* new_type = changes->new_type();
+ assert(!is_participant(new_type), "only old classes are participants");
+ if (is_witness(new_type)) {
+ return new_type;
+ } else {
+ // No witness found, but is_witness() doesn't detect method re-abstraction in case of spot-checking.
+ if (witnessed_reabstraction_in_supers(new_type)) {
+ return new_type;
+ }
}
+ // No witness found. The dependency remains unbroken.
+ return NULL;
+}
+
+bool ConcreteMethodFinder::witnessed_reabstraction_in_supers(Klass* k) {
+ if (!k->is_instance_klass()) {
+ return false; // no methods to find in an array type
+ } else {
+ // Looking for a case when an abstract method is inherited into a concrete class.
+ if (Dependencies::is_concrete_klass(k) && !k->is_interface()) {
+ Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
+ if (m != NULL) {
+ return false; // no reabstraction possible: local method found
+ }
+ for (InstanceKlass* super = k->java_super(); super != NULL; super = super->java_super()) {
+ m = super->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
+ if (m != NULL) { // inherited method found
+ if (m->is_abstract() || m->is_overpass()) {
+ return record_witness(super, m); // abstract method found
+ }
+ return false;
+ }
+ }
+ // Miranda.
+ return true;
+ }
+ return false;
+ }
+}
+
+
+Klass* ConcreteMethodFinder::find_witness_anywhere(InstanceKlass* context_type) {
+ // Walk hierarchy under a context type, looking for unexpected types.
+ for (CountingClassHierarchyIterator iter(context_type); !iter.done(); iter.next()) {
+ Klass* sub = iter.klass();
+ if (is_witness(sub)) {
+ return sub; // found a witness
+ }
+ }
+ // No witness found. The dependency remains unbroken.
+ return NULL;
}
#ifdef ASSERT
@@ -1284,132 +1400,6 @@ bool Dependencies::verify_method_context(InstanceKlass* ctxk, Method* m) {
}
#endif // ASSERT
-Klass* ClassHierarchyWalker::find_witness_in(KlassDepChange& changes,
- InstanceKlass* context_type,
- bool participants_hide_witnesses) {
- assert(changes.involves_context(context_type), "irrelevant dependency");
- Klass* new_type = changes.new_type();
-
- if (UsePerfData) {
- _perf_find_witness_in_calls_count->inc();
- }
-
- // Current thread must be in VM (not native mode, as in CI):
- assert(must_be_in_vm(), "raw oops here");
- // Must not move the class hierarchy during this check:
- assert_locked_or_safepoint(Compile_lock);
-
- int nof_impls = context_type->nof_implementors();
- if (nof_impls > 1) {
- // Avoid this case: *I.m > { A.m, C }; B.m > C
- // %%% Until this is fixed more systematically, bail out.
- // See corresponding comment in find_witness_anywhere.
- return context_type;
- }
-
- assert(!is_participant(new_type), "only old classes are participants");
- if (participants_hide_witnesses) {
- // If the new type is a subtype of a participant, we are done.
- for (int i = 0; i < num_participants(); i++) {
- if (changes.involves_context(participant(i))) {
- // new guy is protected from this check by previous participant
- return NULL;
- }
- }
- }
-
- if (is_witness(new_type)) {
- return new_type;
- } else if (!doing_subtype_search()) {
- // No witness found, but is_witness() doesn't detect method re-abstraction in case of spot-checking.
- if (witnessed_reabstraction_in_supers(new_type)) {
- return new_type;
- }
- }
-
- return NULL;
-}
-
-// Walk hierarchy under a context type, looking for unexpected types.
-Klass* ClassHierarchyWalker::find_witness_anywhere(InstanceKlass* context_type, bool participants_hide_witnesses) {
- // Current thread must be in VM (not native mode, as in CI):
- assert(must_be_in_vm(), "raw oops here");
- // Must not move the class hierarchy during this check:
- assert_locked_or_safepoint(Compile_lock);
-
- if (UsePerfData) {
- _perf_find_witness_anywhere_calls_count->inc();
- }
-
- // Check the root of the sub-hierarchy first.
-
- // (Note: Interfaces do not have subclasses.)
- // If it is an interface, search its direct implementors.
- // (Their subclasses are additional indirect implementors. See InstanceKlass::add_implementor().)
- if (context_type->is_interface()) {
- int nof_impls = context_type->nof_implementors();
- if (nof_impls == 0) {
- return NULL; // no implementors
- } else if (nof_impls == 1) { // unique implementor
- assert(context_type != context_type->implementor(), "not unique");
- context_type = context_type->implementor();
- } else { // nof_impls >= 2
- // Avoid this case: *I.m > { A.m, C }; B.m > C
- // Here, I.m has 2 concrete implementations, but m appears unique
- // as A.m, because the search misses B.m when checking C.
- // The inherited method B.m was getting missed by the walker
- // when interface 'I' was the starting point.
- // %%% Until this is fixed more systematically, bail out.
- return context_type;
- }
- }
-
- assert(!context_type->is_interface(), "not allowed");
-
- for (CountingClassHierarchyIterator iter(context_type); !iter.done(); iter.next()) {
- Klass* sub = iter.klass();
-
- // Do not report participant types.
- if (is_participant(sub)) {
- // Walk beneath a participant only when it doesn't hide witnesses.
- if (participants_hide_witnesses) {
- iter.skip_subclasses();
- }
- } else if (is_witness(sub)) {
- return sub; // found a witness
- }
- }
- // No witness found. The dependency remains unbroken.
- return NULL;
-}
-
-bool ClassHierarchyWalker::witnessed_reabstraction_in_supers(Klass* k) {
- if (!k->is_instance_klass()) {
- return false; // no methods to find in an array type
- } else {
- // Looking for a case when an abstract method is inherited into a concrete class.
- if (Dependencies::is_concrete_klass(k) && !k->is_interface()) {
- Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
- if (m != NULL) {
- return false; // no reabstraction possible: local method found
- }
- for (InstanceKlass* super = k->java_super(); super != NULL; super = super->java_super()) {
- m = super->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
- if (m != NULL) { // inherited method found
- if (m->is_abstract() || m->is_overpass()) {
- _found_methods[_num_participants] = m;
- return true; // abstract method found
- }
- return false;
- }
- }
- assert(false, "root method not found");
- return true;
- }
- return false;
- }
-}
-
bool Dependencies::is_concrete_klass(Klass* k) {
if (k->is_abstract()) return false;
// %%% We could treat classes which are concrete but
@@ -1512,8 +1502,9 @@ Klass* Dependencies::check_leaf_type(InstanceKlass* ctxk) {
Klass* Dependencies::check_abstract_with_unique_concrete_subtype(InstanceKlass* ctxk,
Klass* conck,
KlassDepChange* changes) {
- ClassHierarchyWalker wf(conck);
- return wf.find_witness_subtype(ctxk, changes);
+ ConcreteSubtypeFinder wf(conck);
+ Klass* k = wf.find_witness(ctxk, changes);
+ return k;
}
@@ -1523,9 +1514,9 @@ Klass* Dependencies::check_abstract_with_unique_concrete_subtype(InstanceKlass*
// The returned subtype is allowed to have have further concrete subtypes.
// That is, return CC1 for CX > CC1 > CC2, but NULL for CX > { CC1, CC2 }.
Klass* Dependencies::find_unique_concrete_subtype(InstanceKlass* ctxk) {
- ClassHierarchyWalker wf(ctxk); // Ignore ctxk when walking.
+ ConcreteSubtypeFinder wf(ctxk); // Ignore ctxk when walking.
wf.record_witnesses(1); // Record one other witness when walking.
- Klass* wit = wf.find_witness_subtype(ctxk);
+ Klass* wit = wf.find_witness(ctxk);
if (wit != NULL) return NULL; // Too many witnesses.
Klass* conck = wf.participant(0);
if (conck == NULL) {
@@ -1557,8 +1548,9 @@ Klass* Dependencies::check_unique_concrete_method(InstanceKlass* ctxk,
// we don't really need to search beneath it for overrides.
// This is probably not important, since we don't use dependencies
// to track final methods. (They can't be "definalized".)
- ClassHierarchyWalker wf(uniqm->method_holder(), uniqm);
- return wf.find_witness_definer(ctxk, changes);
+ ConcreteMethodFinder wf(uniqm, uniqm->method_holder());
+ Klass* k = wf.find_witness(ctxk, changes);
+ return k;
}
// Find the set of all non-abstract methods under ctxk that match m.
@@ -1570,10 +1562,10 @@ Method* Dependencies::find_unique_concrete_method(InstanceKlass* ctxk, Method* m
if (m->is_old()) {
return NULL;
}
- ClassHierarchyWalker wf(m);
assert(verify_method_context(ctxk, m), "proper context");
+ ConcreteMethodFinder wf(m);
wf.record_witnesses(1);
- Klass* wit = wf.find_witness_definer(ctxk);
+ Klass* wit = wf.find_witness(ctxk);
if (wit != NULL) return NULL; // Too many witnesses.
Method* fm = wf.found_method(0); // Will be NULL if num_parts == 0.
if (Dependencies::is_concrete_method(m, ctxk)) {
@@ -1805,10 +1797,10 @@ bool KlassDepChange::involves_context(Klass* k) {
}
void Dependencies::print_statistics() {
- ClassHierarchyWalker::print_statistics();
+ AbstractClassHierarchyWalker::print_statistics();
}
-void ClassHierarchyWalker::print_statistics() {
+void AbstractClassHierarchyWalker::print_statistics() {
if (UsePerfData) {
jlong deps_find_witness_calls = _perf_find_witness_anywhere_calls_count->get_value();
jlong deps_find_witness_steps = _perf_find_witness_anywhere_steps_count->get_value();
@@ -1838,5 +1830,5 @@ CallSiteDepChange::CallSiteDepChange(Handle call_site, Handle method_handle) :
}
void dependencies_init() {
- ClassHierarchyWalker::init();
+ AbstractClassHierarchyWalker::init();
}
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index 4ff054b474a..62bd2d340e4 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -439,7 +439,6 @@ void nmethod::init_defaults() {
_stack_traversal_mark = 0;
_load_reported = false; // jvmti state
_unload_reported = false;
- _is_far_code = false; // nmethods are located in CodeCache
#ifdef ASSERT
_oops_are_stale = false;
@@ -2333,7 +2332,6 @@ nmethodLocker::nmethodLocker(address pc) {
// should pass zombie_ok == true.
void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) {
if (cm == NULL) return;
- if (cm->is_aot()) return; // FIXME: Revisit once _lock_count is added to aot_method
nmethod* nm = cm->as_nmethod();
Atomic::inc(&nm->_lock_count);
assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method: %p", nm);
@@ -2341,7 +2339,6 @@ void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) {
void nmethodLocker::unlock_nmethod(CompiledMethod* cm) {
if (cm == NULL) return;
- if (cm->is_aot()) return; // FIXME: Revisit once _lock_count is added to aot_method
nmethod* nm = cm->as_nmethod();
Atomic::dec(&nm->_lock_count);
assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
@@ -2489,11 +2486,11 @@ void nmethod::verify_scopes() {
verify_interrupt_point(iter.addr());
break;
case relocInfo::opt_virtual_call_type:
- stub = iter.opt_virtual_call_reloc()->static_stub(false);
+ stub = iter.opt_virtual_call_reloc()->static_stub();
verify_interrupt_point(iter.addr());
break;
case relocInfo::static_call_type:
- stub = iter.static_call_reloc()->static_stub(false);
+ stub = iter.static_call_reloc()->static_stub();
//verify_interrupt_point(iter.addr());
break;
case relocInfo::runtime_call_type:
@@ -3464,28 +3461,11 @@ class DirectNativeCallWrapper: public NativeCallWrapper {
}
virtual void set_destination_mt_safe(address dest) {
-#if INCLUDE_AOT
- if (UseAOT) {
- CodeBlob* callee = CodeCache::find_blob(dest);
- CompiledMethod* cm = callee->as_compiled_method_or_null();
- if (cm != NULL && cm->is_far_code()) {
- // Temporary fix, see JDK-8143106
- CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address());
- csc->set_to_far(methodHandle(Thread::current(), cm->method()), dest);
- return;
- }
- }
-#endif
_call->set_destination_mt_safe(dest);
}
virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info) {
CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address());
-#if INCLUDE_AOT
- if (info.to_aot()) {
- csc->set_to_far(method, info.entry());
- } else
-#endif
{
csc->set_to_interpreted(method, info.entry());
}
diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp
index 78b507958eb..47769c53a5b 100644
--- a/src/hotspot/share/code/relocInfo.cpp
+++ b/src/hotspot/share/code/relocInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -415,18 +415,14 @@ void static_stub_Relocation::pack_data_to(CodeSection* dest) {
short* p = (short*) dest->locs_end();
CodeSection* insts = dest->outer()->insts();
normalize_address(_static_call, insts);
- jint is_aot = _is_aot ? 1 : 0;
- p = pack_2_ints_to(p, scaled_offset(_static_call, insts->start()), is_aot);
+ p = pack_1_int_to(p, scaled_offset(_static_call, insts->start()));
dest->set_locs_end((relocInfo*) p);
}
void static_stub_Relocation::unpack_data() {
address base = binding()->section_start(CodeBuffer::SECT_INSTS);
- jint offset;
- jint is_aot;
- unpack_2_ints(offset, is_aot);
+ jint offset = unpack_1_int();
_static_call = address_from_scaled_offset(offset, base);
- _is_aot = (is_aot == 1);
}
void trampoline_stub_Relocation::pack_data_to(CodeSection* dest ) {
@@ -648,14 +644,14 @@ bool opt_virtual_call_Relocation::clear_inline_cache() {
return set_to_clean_no_ic_refill(icache);
}
-address opt_virtual_call_Relocation::static_stub(bool is_aot) {
+address opt_virtual_call_Relocation::static_stub() {
// search for the static stub who points back to this static call
address static_call_addr = addr();
RelocIterator iter(code());
while (iter.next()) {
if (iter.type() == relocInfo::static_stub_type) {
static_stub_Relocation* stub_reloc = iter.static_stub_reloc();
- if (stub_reloc->static_call() == static_call_addr && stub_reloc->is_aot() == is_aot) {
+ if (stub_reloc->static_call() == static_call_addr) {
return iter.addr();
}
}
@@ -689,14 +685,14 @@ bool static_call_Relocation::clear_inline_cache() {
}
-address static_call_Relocation::static_stub(bool is_aot) {
+address static_call_Relocation::static_stub() {
// search for the static stub who points back to this static call
address static_call_addr = addr();
RelocIterator iter(code());
while (iter.next()) {
if (iter.type() == relocInfo::static_stub_type) {
static_stub_Relocation* stub_reloc = iter.static_stub_reloc();
- if (stub_reloc->static_call() == static_call_addr && stub_reloc->is_aot() == is_aot) {
+ if (stub_reloc->static_call() == static_call_addr) {
return iter.addr();
}
}
diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp
index 2d4029ade6d..55d4ac7c62d 100644
--- a/src/hotspot/share/code/relocInfo.hpp
+++ b/src/hotspot/share/code/relocInfo.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1066,7 +1066,7 @@ class opt_virtual_call_Relocation : public CallRelocation {
bool clear_inline_cache();
// find the matching static_stub
- address static_stub(bool is_aot);
+ address static_stub();
};
@@ -1098,24 +1098,23 @@ class static_call_Relocation : public CallRelocation {
bool clear_inline_cache();
// find the matching static_stub
- address static_stub(bool is_aot);
+ address static_stub();
};
class static_stub_Relocation : public Relocation {
public:
- static RelocationHolder spec(address static_call, bool is_aot = false) {
+ static RelocationHolder spec(address static_call) {
RelocationHolder rh = newHolder();
- new(rh) static_stub_Relocation(static_call, is_aot);
+ new(rh) static_stub_Relocation(static_call);
return rh;
}
private:
address _static_call; // location of corresponding static_call
- bool _is_aot; // trampoline to aot code
- static_stub_Relocation(address static_call, bool is_aot)
+ static_stub_Relocation(address static_call)
: Relocation(relocInfo::static_stub_type),
- _static_call(static_call), _is_aot(is_aot) { }
+ _static_call(static_call) { }
friend class RelocIterator;
static_stub_Relocation() : Relocation(relocInfo::static_stub_type) { }
@@ -1124,7 +1123,6 @@ class static_stub_Relocation : public Relocation {
bool clear_inline_cache();
address static_call() { return _static_call; }
- bool is_aot() { return _is_aot; }
// data is packed as a scaled offset in "1_int" format: [c] or [Cc]
void pack_data_to(CodeSection* dest);
diff --git a/src/hotspot/share/compiler/compilationPolicy.cpp b/src/hotspot/share/compiler/compilationPolicy.cpp
index 1acb2d60f19..c50b117a683 100644
--- a/src/hotspot/share/compiler/compilationPolicy.cpp
+++ b/src/hotspot/share/compiler/compilationPolicy.cpp
@@ -235,8 +235,6 @@ class LoopPredicate : AllStatic {
scale *= threshold_scaling;
}
switch(cur_level) {
- case CompLevel_aot:
- return b >= Tier3AOTBackEdgeThreshold * scale;
case CompLevel_none:
case CompLevel_limited_profile:
return b >= Tier3BackEdgeThreshold * scale;
@@ -250,10 +248,6 @@ class LoopPredicate : AllStatic {
static bool apply(int i, int b, CompLevel cur_level, const methodHandle& method) {
double k = 1;
switch(cur_level) {
- case CompLevel_aot: {
- k = CompilationModeFlag::disable_intermediate() ? 1 : CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
- break;
- }
case CompLevel_none:
// Fall through
case CompLevel_limited_profile: {
@@ -279,9 +273,6 @@ class CallPredicate : AllStatic {
scale *= threshold_scaling;
}
switch(cur_level) {
- case CompLevel_aot:
- return (i >= Tier3AOTInvocationThreshold * scale) ||
- (i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale);
case CompLevel_none:
case CompLevel_limited_profile:
return (i >= Tier3InvocationThreshold * scale) ||
@@ -297,10 +288,6 @@ class CallPredicate : AllStatic {
static bool apply(int i, int b, CompLevel cur_level, const methodHandle& method) {
double k = 1;
switch(cur_level) {
- case CompLevel_aot: {
- k = CompilationModeFlag::disable_intermediate() ? 1 : CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
- break;
- }
case CompLevel_none:
case CompLevel_limited_profile: {
k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
@@ -520,8 +507,8 @@ bool CompilationPolicy::verify_level(CompLevel level) {
return false;
}
- // AOT and interpreter levels are always valid.
- if (level == CompLevel_aot || level == CompLevel_none) {
+ // Interpreter level is always valid.
+ if (level == CompLevel_none) {
return true;
}
if (CompilationModeFlag::normal()) {
@@ -759,7 +746,7 @@ void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level
if (level == CompLevel_none) {
if (mh->has_compiled_code()) {
- // Happens when we switch from AOT to interpreter to profile.
+ // Happens when we switch to interpreter to profile.
MutexLocker ml(Compile_lock);
NoSafepointVerifier nsv;
if (mh->has_compiled_code()) {
@@ -773,24 +760,6 @@ void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level
}
return;
}
- if (level == CompLevel_aot) {
- if (mh->has_aot_code()) {
- if (PrintTieredEvents) {
- print_event(COMPILE, mh(), mh(), bci, level);
- }
- MutexLocker ml(Compile_lock);
- NoSafepointVerifier nsv;
- if (mh->has_aot_code() && mh->code() != mh->aot_code()) {
- mh->aot_code()->make_entrant();
- if (mh->has_compiled_code()) {
- mh->code()->make_not_entrant();
- }
- MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
- Method::set_code(mh, mh->aot_code());
- }
- }
- return;
- }
if (!CompilationModeFlag::disable_intermediate()) {
// Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
@@ -1038,16 +1007,6 @@ CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_le
} else {
switch(cur_level) {
default: break;
- case CompLevel_aot:
- // If we were at full profile level, would we switch to full opt?
- if (common(method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
- next_level = CompLevel_full_optimization;
- } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
- Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
- Predicate::apply(i, b, cur_level, method))) {
- next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_none : CompLevel_full_profile;
- }
- break;
case CompLevel_none:
// If we were at full profile level, would we switch to full opt?
if (common(method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
@@ -1152,26 +1111,6 @@ CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cu
return next_level;
}
-bool CompilationPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, Thread* thread) {
- if (UseAOT) {
- if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) {
- // If the current level is full profile or interpreter and we're switching to any other level,
- // activate the AOT code back first so that we won't waste time overprofiling.
- compile(mh, InvocationEntryBci, CompLevel_aot, thread);
- // Fall through for JIT compilation.
- }
- if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) {
- // If the next level is limited profile, use the aot code (if there is any),
- // since it's essentially the same thing.
- compile(mh, InvocationEntryBci, CompLevel_aot, thread);
- // Not need to JIT, we're done.
- return true;
- }
- }
- return false;
-}
-
-
// Handle the invocation event.
void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
CompLevel level, CompiledMethod* nm, TRAPS) {
@@ -1180,10 +1119,6 @@ void CompilationPolicy::method_invocation_event(const methodHandle& mh, const me
}
CompLevel next_level = call_event(mh, level, THREAD);
if (next_level != level) {
- if (maybe_switch_to_aot(mh, level, next_level, THREAD)) {
- // No JITting necessary
- return;
- }
if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
compile(mh, InvocationEntryBci, next_level, THREAD);
}
@@ -1214,14 +1149,7 @@ void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const m
// enough calls.
CompLevel cur_level, next_level;
if (mh() != imh()) { // If there is an enclosing method
- if (level == CompLevel_aot) {
- // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling.
- if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) {
- CompLevel enclosing_level = limit_level(CompLevel_full_profile);
- compile(mh, InvocationEntryBci, enclosing_level, THREAD);
- }
- } else {
- // Current loop event level is not AOT
+ {
guarantee(nm != NULL, "Should have nmethod here");
cur_level = comp_level(mh());
next_level = call_event(mh, cur_level, THREAD);
@@ -1253,7 +1181,7 @@ void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const m
next_level = CompLevel_full_profile;
}
if (cur_level != next_level) {
- if (!maybe_switch_to_aot(mh, cur_level, next_level, THREAD) && !CompileBroker::compilation_is_in_queue(mh)) {
+ if (!CompileBroker::compilation_is_in_queue(mh)) {
compile(mh, InvocationEntryBci, next_level, THREAD);
}
}
@@ -1262,7 +1190,7 @@ void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const m
cur_level = comp_level(mh());
next_level = call_event(mh, cur_level, THREAD);
if (next_level != cur_level) {
- if (!maybe_switch_to_aot(mh, cur_level, next_level, THREAD) && !CompileBroker::compilation_is_in_queue(mh)) {
+ if (!CompileBroker::compilation_is_in_queue(mh)) {
compile(mh, InvocationEntryBci, next_level, THREAD);
}
}
diff --git a/src/hotspot/share/compiler/compilationPolicy.hpp b/src/hotspot/share/compiler/compilationPolicy.hpp
index f4897f444fd..15df67a6ee5 100644
--- a/src/hotspot/share/compiler/compilationPolicy.hpp
+++ b/src/hotspot/share/compiler/compilationPolicy.hpp
@@ -210,8 +210,6 @@ class CompilationPolicy : AllStatic {
// Is method profiled enough?
static bool is_method_profiled(const methodHandle& method);
- static bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, Thread* thread);
-
static void set_c1_count(int x) { _c1_count = x; }
static void set_c2_count(int x) { _c2_count = x; }
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index c2bd8584ed9..c7ac4cc696f 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -653,7 +653,9 @@ void CompileBroker::compilation_init_phase1(Thread* THREAD) {
}
if (FLAG_IS_DEFAULT(JVMCIHostThreads)) {
} else {
+#ifdef COMPILER1
_c1_count = JVMCIHostThreads;
+#endif // COMPILER1
}
}
}
diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp
index f8eff0a6917..a6445c161b2 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp
@@ -159,16 +159,14 @@ intx CompilerConfig::scaled_freq_log(intx freq_log, double scale) {
}
}
-void set_client_emulation_mode_flags() {
+void CompilerConfig::set_client_emulation_mode_flags() {
+ assert(has_c1(), "Must have C1 compiler present");
CompilationModeFlag::set_quick_only();
FLAG_SET_ERGO(ProfileInterpreter, false);
#if INCLUDE_JVMCI
FLAG_SET_ERGO(EnableJVMCI, false);
FLAG_SET_ERGO(UseJVMCICompiler, false);
-#endif
-#if INCLUDE_AOT
- FLAG_SET_ERGO(UseAOT, false);
#endif
if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
@@ -204,7 +202,6 @@ void set_client_emulation_mode_flags() {
bool CompilerConfig::is_compilation_mode_selected() {
return !FLAG_IS_DEFAULT(TieredCompilation) ||
!FLAG_IS_DEFAULT(TieredStopAtLevel) ||
- !FLAG_IS_DEFAULT(UseAOT) ||
!FLAG_IS_DEFAULT(CompilationMode)
JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI)
|| !FLAG_IS_DEFAULT(UseJVMCICompiler));
@@ -275,14 +272,6 @@ void CompilerConfig::set_legacy_emulation_flags() {
FLAG_SET_ERGO(Tier4BackEdgeThreshold, osr_threshold);
FLAG_SET_ERGO(Tier0ProfilingStartPercentage, InterpreterProfilePercentage);
}
-#if INCLUDE_AOT
- if (UseAOT) {
- FLAG_SET_ERGO(Tier3AOTInvocationThreshold, threshold);
- FLAG_SET_ERGO(Tier3AOTMinInvocationThreshold, threshold);
- FLAG_SET_ERGO(Tier3AOTCompileThreshold, threshold);
- FLAG_SET_ERGO(Tier3AOTBackEdgeThreshold, CompilerConfig::is_c1_only() ? osr_threshold : osr_profile_threshold);
- }
-#endif
} else {
// Normal tiered mode, ignore legacy flags
}
@@ -331,23 +320,6 @@ void CompilerConfig::set_compilation_policy_flags() {
FLAG_SET_DEFAULT(Tier0ProfilingStartPercentage, 33);
}
-#if INCLUDE_AOT
- if (UseAOT) {
- if (FLAG_IS_DEFAULT(Tier3AOTInvocationThreshold)) {
- FLAG_SET_DEFAULT(Tier3AOTInvocationThreshold, 200);
- }
- if (FLAG_IS_DEFAULT(Tier3AOTMinInvocationThreshold)) {
- FLAG_SET_DEFAULT(Tier3AOTMinInvocationThreshold, 100);
- }
- if (FLAG_IS_DEFAULT(Tier3AOTCompileThreshold)) {
- FLAG_SET_DEFAULT(Tier3AOTCompileThreshold, 2000);
- }
- if (FLAG_IS_DEFAULT(Tier3AOTBackEdgeThreshold)) {
- FLAG_SET_DEFAULT(Tier3AOTBackEdgeThreshold, 2000);
- }
- }
-#endif
-
if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) {
FLAG_SET_DEFAULT(Tier4InvocationThreshold, 5000);
}
@@ -560,17 +532,19 @@ void CompilerConfig::ergo_initialize() {
return;
#endif
- if (!is_compilation_mode_selected()) {
+ if (has_c1()) {
+ if (!is_compilation_mode_selected()) {
#if defined(_WINDOWS) && !defined(_LP64)
- if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
- FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
- }
+ if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
+ FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
+ }
#endif
- if (NeverActAsServerClassMachine) {
+ if (NeverActAsServerClassMachine) {
+ set_client_emulation_mode_flags();
+ }
+ } else if (!has_c2() && !is_jvmci_compiler()) {
set_client_emulation_mode_flags();
}
- } else if (!has_c2() && !is_jvmci_compiler()) {
- set_client_emulation_mode_flags();
}
set_legacy_emulation_flags();
diff --git a/src/hotspot/share/compiler/compilerDefinitions.hpp b/src/hotspot/share/compiler/compilerDefinitions.hpp
index d87c892f091..1c8096918a6 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.hpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.hpp
@@ -55,9 +55,8 @@ enum MethodCompilation {
// Enumeration to distinguish tiers of compilation
enum CompLevel {
- CompLevel_any = -2, // Used for querying the state
- CompLevel_all = -2, // Used for changing the state
- CompLevel_aot = -1,
+ CompLevel_any = -1, // Used for querying the state
+ CompLevel_all = -1, // Used for changing the state
CompLevel_none = 0, // Interpreter
CompLevel_simple = 1, // C1
CompLevel_limited_profile = 2, // C1, invocation & backedge counters
@@ -138,18 +137,16 @@ class CompilerConfig : public AllStatic {
constexpr static bool has_c2() { return COMPILER2_PRESENT(true) NOT_COMPILER2(false); }
constexpr static bool has_jvmci() { return JVMCI_ONLY(true) NOT_JVMCI(false); }
constexpr static bool has_tiered() { return has_c1() && (has_c2() || has_jvmci()); }
- constexpr static bool has_aot() { return AOT_ONLY(true) NOT_AOT(false); }
- static bool is_aot() { return AOT_ONLY(has_aot() && UseAOT) NOT_AOT(false); }
static bool is_jvmci_compiler() { return JVMCI_ONLY(has_jvmci() && UseJVMCICompiler) NOT_JVMCI(false); }
static bool is_jvmci() { return JVMCI_ONLY(has_jvmci() && EnableJVMCI) NOT_JVMCI(false); }
static bool is_interpreter_only();
// is_*_only() functions describe situations in which the JVM is in one way or another
// forced to use a particular compiler or their combination. The constraint functions
- // deliberately ignore the fact that there may also be AOT methods and methods installed
+ // deliberately ignore the fact that there may also be methods installed
// through JVMCI (where the JVMCI compiler was invoked not through the broker). Be sure
- // to check for those (using is_jvmci() and is_aot()) in situations where it matters.
+ // to check for those (using is_jvmci()) in situations where it matters.
//
// Is the JVM in a configuration that permits only c1-compiled methods (level 1,2,3)?
@@ -163,13 +160,13 @@ class CompilerConfig : public AllStatic {
return false;
}
- static bool is_c1_or_interpreter_only_no_aot_or_jvmci() {
+ static bool is_c1_or_interpreter_only_no_jvmci() {
assert(is_jvmci_compiler() && is_jvmci() || !is_jvmci_compiler(), "JVMCI compiler implies enabled JVMCI");
- return !is_aot() && !is_jvmci() && (is_interpreter_only() || is_c1_only());
+ return !is_jvmci() && (is_interpreter_only() || is_c1_only());
}
- static bool is_c1_only_no_aot_or_jvmci() {
- return is_c1_only() && !is_aot() && !is_jvmci();
+ static bool is_c1_only_no_jvmci() {
+ return is_c1_only() && !is_jvmci();
}
// Is the JVM in a configuration that permits only c1-compiled methods at level 1?
@@ -246,6 +243,7 @@ class CompilerConfig : public AllStatic {
static void set_compilation_policy_flags();
static void set_jvmci_specific_flags();
static void set_legacy_emulation_flags();
+ static void set_client_emulation_mode_flags();
};
#endif // SHARE_COMPILER_COMPILERDEFINITIONS_HPP
diff --git a/src/hotspot/share/compiler/compilerOracle.cpp b/src/hotspot/share/compiler/compilerOracle.cpp
index d5cba686e4e..5b6d1c8f921 100644
--- a/src/hotspot/share/compiler/compilerOracle.cpp
+++ b/src/hotspot/share/compiler/compilerOracle.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -306,6 +306,11 @@ static void register_command(TypedMethodOptionMatcher* matcher,
}
assert(CompilerOracle::option_matches_type(option, value), "Value must match option type");
+ if (option == CompileCommand::Blackhole && !UnlockExperimentalVMOptions) {
+ warning("Blackhole compile option is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions");
+ return;
+ }
+
matcher->init(option, option_list);
matcher->set_value(value);
option_list = matcher;
@@ -417,6 +422,37 @@ bool CompilerOracle::should_break_at(const methodHandle& method) {
return check_predicate(CompileCommand::Break, method);
}
+void CompilerOracle::tag_blackhole_if_possible(const methodHandle& method) {
+ if (!check_predicate(CompileCommand::Blackhole, method)) {
+ return;
+ }
+ guarantee(UnlockExperimentalVMOptions, "Checked during initial parsing");
+ if (method->result_type() != T_VOID) {
+ warning("Blackhole compile option only works for methods with void type: %s",
+ method->name_and_sig_as_C_string());
+ return;
+ }
+ if (!method->is_empty_method()) {
+ warning("Blackhole compile option only works for empty methods: %s",
+ method->name_and_sig_as_C_string());
+ return;
+ }
+ if (!method->is_static()) {
+ warning("Blackhole compile option only works for static methods: %s",
+ method->name_and_sig_as_C_string());
+ return;
+ }
+ if (method->intrinsic_id() == vmIntrinsics::_blackhole) {
+ return;
+ }
+ if (method->intrinsic_id() != vmIntrinsics::_none) {
+ warning("Blackhole compile option only works for methods that do not have intrinsic set: %s, %s",
+ method->name_and_sig_as_C_string(), vmIntrinsics::name_at(method->intrinsic_id()));
+ return;
+ }
+ method->set_intrinsic_id(vmIntrinsics::_blackhole);
+}
+
static enum CompileCommand match_option_name(const char* line, int* bytes_read, char* errorbuf, int bufsize) {
assert(ARRAY_SIZE(option_names) == static_cast(CompileCommand::Count), "option_names size mismatch");
diff --git a/src/hotspot/share/compiler/compilerOracle.hpp b/src/hotspot/share/compiler/compilerOracle.hpp
index 7a58885884d..858e5c79406 100644
--- a/src/hotspot/share/compiler/compilerOracle.hpp
+++ b/src/hotspot/share/compiler/compilerOracle.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,7 @@ class methodHandle;
option(Print, "print", Bool) \
option(Inline, "inline", Bool) \
option(DontInline, "dontinline", Bool) \
+ option(Blackhole, "blackhole", Bool) \
option(CompileOnly, "compileonly", Bool)\
option(Exclude, "exclude", Bool) \
option(Break, "break", Bool) \
@@ -144,6 +145,9 @@ class CompilerOracle : AllStatic {
// Tells whether there are any methods to print for print_method_statistics()
static bool should_print_methods();
+ // Tags the method as blackhole candidate, if possible.
+ static void tag_blackhole_if_possible(const methodHandle& method);
+
// A wrapper for checking bool options
static bool has_option(const methodHandle& method, enum CompileCommand option);
diff --git a/src/hotspot/share/compiler/compiler_globals.hpp b/src/hotspot/share/compiler/compiler_globals.hpp
index d12bb9ba7ba..050e5e1f6db 100644
--- a/src/hotspot/share/compiler/compiler_globals.hpp
+++ b/src/hotspot/share/compiler/compiler_globals.hpp
@@ -190,33 +190,6 @@
"Back edge threshold at which tier 3 OSR compilation is invoked") \
range(0, max_jint) \
\
- product(intx, Tier3AOTInvocationThreshold, 10000, \
- "Compile if number of method invocations crosses this " \
- "threshold if coming from AOT;" \
- "with CompilationMode=high-only|high-only-quick-internal)" \
- "determines when to transition from AOT to interpreter") \
- range(0, max_jint) \
- \
- product(intx, Tier3AOTMinInvocationThreshold, 1000, \
- "Minimum invocation to compile at tier 3 if coming from AOT;" \
- "with CompilationMode=high-only|high-only-quick-internal)" \
- "determines when to transition from AOT to interpreter") \
- range(0, max_jint) \
- \
- product(intx, Tier3AOTCompileThreshold, 15000, \
- "Threshold at which tier 3 compilation is invoked (invocation " \
- "minimum must be satisfied) if coming from AOT;" \
- "with CompilationMode=high-only|high-only-quick-internal)" \
- "determines when to transition from AOT to interpreter") \
- range(0, max_jint) \
- \
- product(intx, Tier3AOTBackEdgeThreshold, 120000, \
- "Back edge threshold at which tier 3 OSR compilation is invoked " \
- "if coming from AOT;" \
- "with CompilationMode=high-only|high-only-quick-internal)" \
- "determines when to transition from AOT to interpreter") \
- range(0, max_jint) \
- \
product(intx, Tier4InvocationThreshold, 5000, \
"Compile if number of method invocations crosses this " \
"threshold") \
diff --git a/src/hotspot/share/compiler/directivesParser.cpp b/src/hotspot/share/compiler/directivesParser.cpp
index af17030648f..e88dda3768b 100644
--- a/src/hotspot/share/compiler/directivesParser.cpp
+++ b/src/hotspot/share/compiler/directivesParser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "compiler/directivesParser.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include
void DirectivesParser::push_tmp(CompilerDirectives* dir) {
diff --git a/src/hotspot/share/compiler/disassembler.cpp b/src/hotspot/share/compiler/disassembler.cpp
index 0798acf24e8..25f5fe73618 100644
--- a/src/hotspot/share/compiler/disassembler.cpp
+++ b/src/hotspot/share/compiler/disassembler.cpp
@@ -36,7 +36,7 @@
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/resourceHash.hpp"
@@ -881,23 +881,9 @@ void Disassembler::decode(CodeBlob* cb, outputStream* st) {
decode_env env(cb, st);
env.output()->print_cr("--------------------------------------------------------------------------------");
- if (cb->is_aot()) {
- env.output()->print("A ");
- if (cb->is_compiled()) {
- CompiledMethod* cm = (CompiledMethod*)cb;
- env.output()->print("%d ",cm->compile_id());
- cm->method()->method_holder()->name()->print_symbol_on(env.output());
- env.output()->print(".");
- cm->method()->name()->print_symbol_on(env.output());
- cm->method()->signature()->print_symbol_on(env.output());
- } else {
- env.output()->print_cr("%s", cb->name());
- }
- } else {
- env.output()->print("Decoding CodeBlob");
- if (cb->name() != NULL) {
- env.output()->print(", name: %s,", cb->name());
- }
+ env.output()->print("Decoding CodeBlob");
+ if (cb->name() != NULL) {
+ env.output()->print(", name: %s,", cb->name());
}
env.output()->print_cr(" at [" PTR_FORMAT ", " PTR_FORMAT "] " JLONG_FORMAT " bytes", p2i(cb->code_begin()), p2i(cb->code_end()), ((jlong)(cb->code_end() - cb->code_begin())));
diff --git a/src/hotspot/share/gc/g1/g1BatchedGangTask.cpp b/src/hotspot/share/gc/g1/g1BatchedGangTask.cpp
new file mode 100644
index 00000000000..0c108d8d86f
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1BatchedGangTask.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/g1/g1BatchedGangTask.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/growableArray.hpp"
+
+void G1AbstractSubTask::record_work_item(uint worker_id, uint index, size_t count) {
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ g1h->phase_times()->record_thread_work_item(_tag, worker_id, count, index);
+}
+
+const char* G1AbstractSubTask::name() const {
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ return g1h->phase_times()->phase_name(_tag);
+}
+
+bool G1BatchedGangTask::try_claim_serial_task(int& task) {
+ task = Atomic::fetch_and_add(&_num_serial_tasks_done, 1);
+ return task < _serial_tasks.length();
+}
+
+void G1BatchedGangTask::add_serial_task(G1AbstractSubTask* task) {
+ assert(task != nullptr, "must be");
+ _serial_tasks.push(task);
+}
+
+void G1BatchedGangTask::add_parallel_task(G1AbstractSubTask* task) {
+ assert(task != nullptr, "must be");
+ _parallel_tasks.push(task);
+}
+
+G1BatchedGangTask::G1BatchedGangTask(const char* name, G1GCPhaseTimes* phase_times) :
+ AbstractGangTask(name),
+ _num_serial_tasks_done(0),
+ _phase_times(phase_times),
+ _serial_tasks(),
+ _parallel_tasks() {
+}
+
+uint G1BatchedGangTask::num_workers_estimate() const {
+ double sum = 0.0;
+ for (G1AbstractSubTask* task : _serial_tasks) {
+ sum += task->worker_cost();
+ }
+ for (G1AbstractSubTask* task : _parallel_tasks) {
+ sum += task->worker_cost();
+ }
+ return ceil(sum);
+}
+
+void G1BatchedGangTask::set_max_workers(uint max_workers) {
+ for (G1AbstractSubTask* task : _serial_tasks) {
+ task->set_max_workers(max_workers);
+ }
+ for (G1AbstractSubTask* task : _parallel_tasks) {
+ task->set_max_workers(max_workers);
+ }
+}
+
+void G1BatchedGangTask::work(uint worker_id) {
+ int t = 0;
+ while (try_claim_serial_task(t)) {
+ G1AbstractSubTask* task = _serial_tasks.at(t);
+ G1GCParPhaseTimesTracker x(_phase_times, task->tag(), worker_id);
+ task->do_work(worker_id);
+ }
+ for (G1AbstractSubTask* task : _parallel_tasks) {
+ G1GCParPhaseTimesTracker x(_phase_times, task->tag(), worker_id);
+ task->do_work(worker_id);
+ }
+}
+
+G1BatchedGangTask::~G1BatchedGangTask() {
+ assert(Atomic::load(&_num_serial_tasks_done) >= _serial_tasks.length(),
+ "Only %d tasks of %d claimed", Atomic::load(&_num_serial_tasks_done), _serial_tasks.length());
+
+ for (G1AbstractSubTask* task : _parallel_tasks) {
+ delete task;
+ }
+ for (G1AbstractSubTask* task : _serial_tasks) {
+ delete task;
+ }
+}
diff --git a/src/hotspot/share/gc/g1/g1BatchedGangTask.hpp b/src/hotspot/share/gc/g1/g1BatchedGangTask.hpp
new file mode 100644
index 00000000000..7b423e45629
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1BatchedGangTask.hpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1BATCHEDGANGTASK_HPP
+#define SHARE_GC_G1_G1BATCHEDGANGTASK_HPP
+
+#include "gc/g1/g1GCPhaseTimes.hpp"
+#include "gc/shared/workgroup.hpp"
+#include "memory/allocation.hpp"
+
+template
+class GrowableArrayCHeap;
+
+// G1AbstractSubTask represents a task to be performed either within a
+// G1BatchedGangTask running on a single worker ("serially") or multiple workers
+// ("in parallel"). A G1AbstractSubTask is always associated with a phase tag
+// that is used to automatically store timing information.
+//
+// A "serial" task is some piece of work that either can not be parallelized
+// easily, or is typically so short that parallelization is not worth the effort.
+// Current examples would be summarizing per worker thread information gathered
+// during garbage collection (e.g. Merge PSS work).
+//
+// A "parallel" task could be some large amount of work that typically naturally
+// splits across the heap in some way. Current examples would be clearing the
+// card table.
+//
+// See G1BatchedGangTask for information on execution.
+class G1AbstractSubTask : public CHeapObj {
+ G1GCPhaseTimes::GCParPhases _tag;
+
+ NONCOPYABLE(G1AbstractSubTask);
+
+protected:
+ // Record work item for this tag in G1GCPhaseTimes.
+ void record_work_item(uint worker_id, uint index, size_t count);
+
+public:
+ G1AbstractSubTask(G1GCPhaseTimes::GCParPhases tag) : _tag(tag) { }
+ virtual ~G1AbstractSubTask() { }
+
+ // How many workers (threads) would this task be able to keep busy for at least
+ // as long as to amortize worker startup costs.
+ // Called by G1BatchedGangTask to determine total number of workers.
+ virtual double worker_cost() const = 0;
+
+ // Called by G1BatchedGangTask to provide information about the the maximum
+ // number of workers for all subtasks after it has been determined.
+ virtual void set_max_workers(uint max_workers) { }
+
+ // Perform the actual work. Gets the worker id it is run on passed in.
+ virtual void do_work(uint worker_id) = 0;
+
+ // Tag for this G1AbstractSubTask.
+ G1GCPhaseTimes::GCParPhases tag() const { return _tag; }
+ // Human readable name derived from the tag.
+ const char* name() const;
+};
+
+// G1BatchedGangTask runs a set of G1AbstractSubTask using a work gang.
+//
+// Subclasses of this class add their G1AbstractSubTasks into either the list
+// of "serial" or the list of "parallel" tasks. They are supposed to be the owners
+// of the G1AbstractSubTasks.
+//
+// Eg. the constructor contains code like the following:
+//
+// add_serial_task(new SomeSubTask());
+// [...]
+// add_parallel_task(new SomeOtherSubTask());
+// [...]
+//
+// During execution in the work gang, this class will make sure that the "serial"
+// tasks are executed by a single worker exactly once, but different "serial"
+// tasks may be executed in parallel using different workers. "Parallel" tasks'
+// do_work() method may be called by different workers passing a different
+// worker_id at the same time, but at most once per given worker_id.
+//
+// There is also no guarantee that G1AbstractSubTasks::do_work() of different tasks
+// are actually run in parallel.
+//
+// The current implementation assumes that constructors and destructors of the
+// G1AbstractSubTasks can executed in the constructor/destructor of an instance
+// of this class.
+//
+// The constructor, destructor and the do_work() methods from different
+// G1AbstractSubTasks may run in any order so they must not have any
+// dependencies at all.
+//
+// For a given G1AbstractSubTask T call order of its methods are as follows:
+//
+// 1) T()
+// 2) T::thread_usage()
+// 3) T::set_max_workers()
+// 4) T::do_work() // potentially in parallel with any other registered G1AbstractSubTask
+// 5) ~T()
+//
+class G1BatchedGangTask : public AbstractGangTask {
+ volatile int _num_serial_tasks_done;
+ G1GCPhaseTimes* _phase_times;
+
+ bool try_claim_serial_task(int& task);
+
+ NONCOPYABLE(G1BatchedGangTask);
+
+ GrowableArrayCHeap _serial_tasks;
+ GrowableArrayCHeap _parallel_tasks;
+
+protected:
+ void add_serial_task(G1AbstractSubTask* task);
+ void add_parallel_task(G1AbstractSubTask* task);
+
+ G1BatchedGangTask(const char* name, G1GCPhaseTimes* phase_times);
+
+public:
+ void work(uint worker_id) override;
+
+ // How many workers can this gang task keep busy and should be started for
+ // "optimal" performance.
+ uint num_workers_estimate() const;
+ // Informs the G1AbstractSubTasks about that we will start execution with the
+ // given number of workers.
+ void set_max_workers(uint max_workers);
+
+ ~G1BatchedGangTask();
+};
+
+#endif // SHARE_GC_G1_G1BATCHEDGANGTASK_HPP
\ No newline at end of file
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 3a31722e9ed..f17a4b257f5 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -1444,7 +1444,7 @@ G1CollectedHeap::G1CollectedHeap() :
_cm_thread(NULL),
_cr(NULL),
_task_queues(NULL),
- _evacuation_failed(false),
+ _num_regions_failed_evacuation(0),
_evacuation_failed_info_array(NULL),
_preserved_marks_set(true /* in_c_heap */),
#ifndef PRODUCT
@@ -1497,7 +1497,7 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
// Allocate a new reserved space, preferring to use large pages.
ReservedSpace rs(size, preferred_page_size);
- size_t page_size = ReservedSpace::actual_reserved_page_size(rs);
+ size_t page_size = rs.page_size();
G1RegionToSpaceMapper* result =
G1RegionToSpaceMapper::create_mapper(rs,
size,
@@ -1589,7 +1589,7 @@ jint G1CollectedHeap::initialize() {
_hot_card_cache = new G1HotCardCache(this);
// Create space mappers.
- size_t page_size = ReservedSpace::actual_reserved_page_size(heap_rs);
+ size_t page_size = heap_rs.page_size();
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_mapper(heap_rs,
heap_rs.size(),
@@ -3087,8 +3087,16 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
}
void G1CollectedHeap::remove_self_forwarding_pointers(G1RedirtyCardsQueueSet* rdcqs) {
- G1ParRemoveSelfForwardPtrsTask rsfp_task(rdcqs);
- workers()->run_task(&rsfp_task);
+ uint num_workers = MIN2(workers()->active_workers(), num_regions_failed_evacuation());
+
+ G1ParRemoveSelfForwardPtrsTask cl(rdcqs);
+ log_debug(gc, ergo)("Running %s using %u workers for %u failed regions",
+ cl.name(), num_workers, num_regions_failed_evacuation());
+ workers()->run_task(&cl, num_workers);
+
+ assert(cl.num_failed_regions() == num_regions_failed_evacuation(),
+ "Removed regions %u inconsistent with expected %u",
+ cl.num_failed_regions(), num_regions_failed_evacuation());
}
void G1CollectedHeap::restore_after_evac_failure(G1RedirtyCardsQueueSet* rdcqs) {
@@ -3101,10 +3109,6 @@ void G1CollectedHeap::restore_after_evac_failure(G1RedirtyCardsQueueSet* rdcqs)
}
void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m) {
- if (!_evacuation_failed) {
- _evacuation_failed = true;
- }
-
_evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
_preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);
}
@@ -3658,7 +3662,7 @@ void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_i
_bytes_used_during_gc = 0;
_expand_heap_after_alloc_failure = true;
- _evacuation_failed = false;
+ Atomic::store(&_num_regions_failed_evacuation, 0u);
// Disable the hot card cache.
_hot_card_cache->reset_hot_cache_claimed_index();
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index c4c95ba3406..4acae40a6b4 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -851,8 +851,8 @@ class G1CollectedHeap : public CollectedHeap {
// The parallel task queues
G1ScannerTasksQueueSet *_task_queues;
- // True iff a evacuation has failed in the current collection.
- bool _evacuation_failed;
+ // Number of regions evacuation failed in the current collection.
+ volatile uint _num_regions_failed_evacuation;
EvacuationFailedInfo* _evacuation_failed_info_array;
@@ -1137,7 +1137,11 @@ class G1CollectedHeap : public CollectedHeap {
bool try_collect(GCCause::Cause cause);
// True iff an evacuation has failed in the most-recent collection.
- bool evacuation_failed() { return _evacuation_failed; }
+ inline bool evacuation_failed() const;
+ inline uint num_regions_failed_evacuation() const;
+ // Notify that the garbage collection encountered an evacuation failure in a
+ // region. Should only be called once per region.
+ inline void notify_region_failed_evacuation();
void remove_from_old_gen_sets(const uint old_regions_removed,
const uint archive_regions_removed,
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
index 6066e2f1b6e..85edc3c5168 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
@@ -35,6 +35,7 @@
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/shared/markBitMap.inline.hpp"
#include "gc/shared/taskqueue.inline.hpp"
+#include "runtime/atomic.hpp"
G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
return _policy->phase_times();
@@ -188,6 +189,18 @@ void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
_region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
}
+bool G1CollectedHeap::evacuation_failed() const {
+ return num_regions_failed_evacuation() > 0;
+}
+
+uint G1CollectedHeap::num_regions_failed_evacuation() const {
+ return Atomic::load(&_num_regions_failed_evacuation);
+}
+
+void G1CollectedHeap::notify_region_failed_evacuation() {
+ Atomic::inc(&_num_regions_failed_evacuation, memory_order_relaxed);
+}
+
#ifndef PRODUCT
// Support for G1EvacuationFailureALot
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
index bed9343299d..752f8185679 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp
index b58d8c66d8b..f03bbb42a7b 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -139,8 +139,8 @@ void G1ConcurrentMarkThread::run_service() {
assert(in_progress(), "must be");
GCIdMark gc_id_mark;
- GCTraceConcTime(Info, gc) tt(FormatBuffer<128>("Concurrent %s Cycle",
- _state == FullMark ? "Mark" : "Undo"));
+ FormatBuffer<128> title("Concurrent %s Cycle", _state == FullMark ? "Mark" : "Undo");
+ GCTraceConcTime(Info, gc) tt(title);
concurrent_cycle_start();
diff --git a/src/hotspot/share/gc/g1/g1EvacFailure.cpp b/src/hotspot/share/gc/g1/g1EvacFailure.cpp
index 9b8fd794456..f821a144b25 100644
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp
@@ -205,12 +205,15 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
G1RedirtyCardsLocalQueueSet _rdc_local_qset;
UpdateLogBuffersDeferred _log_buffer_cl;
+ uint volatile* _num_failed_regions;
+
public:
- RemoveSelfForwardPtrHRClosure(G1RedirtyCardsQueueSet* rdcqs, uint worker_id) :
+ RemoveSelfForwardPtrHRClosure(G1RedirtyCardsQueueSet* rdcqs, uint worker_id, uint volatile* num_failed_regions) :
_g1h(G1CollectedHeap::heap()),
_worker_id(worker_id),
_rdc_local_qset(rdcqs),
- _log_buffer_cl(&_rdc_local_qset) {
+ _log_buffer_cl(&_rdc_local_qset),
+ _num_failed_regions(num_failed_regions) {
}
~RemoveSelfForwardPtrHRClosure() {
@@ -252,6 +255,8 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
hr->rem_set()->clear_locked(true);
hr->note_self_forwarding_removal_end(live_bytes);
+
+ Atomic::inc(_num_failed_regions, memory_order_relaxed);
}
return false;
}
@@ -261,10 +266,11 @@ G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQue
AbstractGangTask("G1 Remove Self-forwarding Pointers"),
_g1h(G1CollectedHeap::heap()),
_rdcqs(rdcqs),
- _hrclaimer(_g1h->workers()->active_workers()) { }
+ _hrclaimer(_g1h->workers()->active_workers()),
+ _num_failed_regions(0) { }
void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
- RemoveSelfForwardPtrHRClosure rsfp_cl(_rdcqs, worker_id);
+ RemoveSelfForwardPtrHRClosure rsfp_cl(_rdcqs, worker_id, &_num_failed_regions);
// We need to check all collection set regions whether they need self forward
// removals, not only the last collection set increment. The reason is that
@@ -273,3 +279,7 @@ void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
// might cause an evacuation failure in any region in the collection set.
_g1h->collection_set_par_iterate_all(&rsfp_cl, &_hrclaimer, worker_id);
}
+
+uint G1ParRemoveSelfForwardPtrsTask::num_failed_regions() const {
+ return Atomic::load(&_num_failed_regions);
+}
diff --git a/src/hotspot/share/gc/g1/g1EvacFailure.hpp b/src/hotspot/share/gc/g1/g1EvacFailure.hpp
index 67b0fb077f1..c73c2f40c43 100644
--- a/src/hotspot/share/gc/g1/g1EvacFailure.hpp
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.hpp
@@ -41,10 +41,14 @@ class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
G1RedirtyCardsQueueSet* _rdcqs;
HeapRegionClaimer _hrclaimer;
+ uint volatile _num_failed_regions;
+
public:
G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs);
void work(uint worker_id);
+
+ uint num_failed_regions() const;
};
#endif // SHARE_GC_G1_G1EVACFAILURE_HPP
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp
index dec709bf8ae..4dbf914c5a4 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp
@@ -160,7 +160,7 @@ class PrepareRegionsClosure : public HeapRegionClosure {
bool do_heap_region(HeapRegion* hr) {
G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr);
- _collector->update_attribute_table(hr);
+ _collector->before_marking_update_attribute_table(hr);
return false;
}
};
@@ -229,16 +229,17 @@ void G1FullCollector::complete_collection() {
_heap->print_heap_after_full_collection(scope()->heap_transition());
}
-void G1FullCollector::update_attribute_table(HeapRegion* hr, bool force_not_compacted) {
+void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {
if (hr->is_free()) {
- _region_attr_table.set_invalid(hr->hrm_index());
+ // Set as Invalid by default.
+ _region_attr_table.verify_is_invalid(hr->hrm_index());
} else if (hr->is_closed_archive()) {
_region_attr_table.set_skip_marking(hr->hrm_index());
- } else if (hr->is_pinned() || force_not_compacted) {
- _region_attr_table.set_not_compacted(hr->hrm_index());
+ } else if (hr->is_pinned()) {
+ _region_attr_table.set_skip_compacting(hr->hrm_index());
} else {
- // Everything else is processed normally.
- _region_attr_table.set_compacted(hr->hrm_index());
+ // Everything else should be compacted.
+ _region_attr_table.set_compacting(hr->hrm_index());
}
}
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.hpp b/src/hotspot/share/gc/g1/g1FullCollector.hpp
index 3707826014c..176d7b9f5ce 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.hpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -103,12 +103,15 @@ class G1FullCollector : StackObj {
return _live_stats[region_index]._live_words;
}
- void update_attribute_table(HeapRegion* hr, bool force_not_compacted = false);
+ void before_marking_update_attribute_table(HeapRegion* hr);
- inline bool is_compacted(oop obj) const;
- inline bool is_compacted_or_skip_marking(uint region_index) const;
+ inline bool is_compacting(oop obj) const;
+ inline bool is_skip_compacting(uint region_index) const;
inline bool is_skip_marking(oop obj) const;
+ inline void set_invalid(uint region_idx);
+ inline void update_from_compacting_to_skip_compacting(uint region_idx);
+
private:
void phase1_mark_live_objects();
void phase2_prepare_compaction();
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp b/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp
index b5ae4a84fa9..b08f7f6db7d 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,17 +30,26 @@
#include "oops/oopsHierarchy.hpp"
-bool G1FullCollector::is_compacted(oop obj) const {
- return _region_attr_table.is_compacted(cast_from_oop(obj));
+bool G1FullCollector::is_compacting(oop obj) const {
+ return _region_attr_table.is_compacting(cast_from_oop(obj));
}
-bool G1FullCollector::is_compacted_or_skip_marking(uint region_index) const {
- return _region_attr_table.is_compacted_or_skip_marking(region_index);
+bool G1FullCollector::is_skip_compacting(uint region_index) const {
+ return _region_attr_table.is_skip_compacting(region_index);
}
bool G1FullCollector::is_skip_marking(oop obj) const {
return _region_attr_table.is_skip_marking(cast_from_oop(obj));
}
+void G1FullCollector::set_invalid(uint region_idx) {
+ _region_attr_table.set_invalid(region_idx);
+}
+
+void G1FullCollector::update_from_compacting_to_skip_compacting(uint region_idx) {
+ _region_attr_table.verify_is_compacting(region_idx);
+ _region_attr_table.set_skip_compacting(region_idx);
+}
+
#endif // SHARE_GC_G1_G1FULLCOLLECTOR_INLINE_HPP
diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp
index 47f68a0044b..a7aec46d2ee 100644
--- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,24 +35,25 @@
#include "oops/oop.inline.hpp"
#include "utilities/ticks.hpp"
-// Do work for all not-compacted regions.
-class G1ResetNotCompactedClosure : public HeapRegionClosure {
+// Do work for all skip-compacting regions.
+class G1ResetSkipCompactingClosure : public HeapRegionClosure {
G1FullCollector* _collector;
public:
- G1ResetNotCompactedClosure(G1FullCollector* collector) : _collector(collector) { }
+ G1ResetSkipCompactingClosure(G1FullCollector* collector) : _collector(collector) { }
bool do_heap_region(HeapRegion* r) {
uint region_index = r->hrm_index();
- // There is nothing to do for compacted or skip marking regions.
- if (_collector->is_compacted_or_skip_marking(region_index)) {
+ // Only for skip-compaction regions; early return otherwise.
+ if (!_collector->is_skip_compacting(region_index)) {
+
return false;
}
assert(_collector->live_words(region_index) > _collector->scope()->region_compaction_threshold() ||
- !r->is_starts_humongous() ||
- _collector->mark_bitmap()->is_marked(cast_to_oop(r->bottom())),
- "must be, otherwise reclaimed earlier");
- r->reset_not_compacted_after_full_gc();
+ !r->is_starts_humongous() ||
+ _collector->mark_bitmap()->is_marked(cast_to_oop(r->bottom())),
+ "must be, otherwise reclaimed earlier");
+ r->reset_skip_compacting_after_full_gc();
return false;
}
};
@@ -97,7 +98,7 @@ void G1FullGCCompactTask::work(uint worker_id) {
compact_region(*it);
}
- G1ResetNotCompactedClosure hc(collector());
+ G1ResetSkipCompactingClosure hc(collector());
G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&hc, &_claimer, worker_id);
log_task("Compaction task", worker_id, start);
}
diff --git a/src/hotspot/share/gc/g1/g1FullGCHeapRegionAttr.hpp b/src/hotspot/share/gc/g1/g1FullGCHeapRegionAttr.hpp
index 3fd8ff758b8..65fb0624280 100644
--- a/src/hotspot/share/gc/g1/g1FullGCHeapRegionAttr.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCHeapRegionAttr.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,18 +28,18 @@
#include "gc/g1/g1BiasedArray.hpp"
// This table is used to store attribute values of all HeapRegions that need
-// fast access during the full collection. In particular some parts of the region
-// type information is encoded in these per-region bytes.
-// Value encoding has been specifically chosen to make required accesses fast.
-// In particular, the table collects whether a region should be compacted, not
-// compacted, or marking (liveness analysis) completely skipped.
+// fast access during the full collection. In particular some parts of the
+// region type information is encoded in these per-region bytes. Value encoding
+// has been specifically chosen to make required accesses fast. In particular,
+// the table specifies whether a Full GC cycle should be compacting, skip
+// compacting, or skip marking (liveness analysis) a region.
// Reasons for not compacting a region:
// (1) the HeapRegion itself has been pinned at the start of Full GC.
// (2) the occupancy of the region is too high to be considered eligible for compaction.
// The only examples for skipping marking for regions are Closed Archive regions.
class G1FullGCHeapRegionAttr : public G1BiasedMappedArray {
- static const uint8_t Compacted = 0; // Region will be compacted.
- static const uint8_t NotCompacted = 1; // Region should not be compacted, but otherwise handled as usual.
+ static const uint8_t Compacting = 0; // Region will be compacted.
+ static const uint8_t SkipCompacting = 1; // Region should not be compacted, but otherwise handled as usual.
static const uint8_t SkipMarking = 2; // Region contents are not even marked through, but contain live objects.
static const uint8_t Invalid = 255;
@@ -53,23 +53,28 @@ class G1FullGCHeapRegionAttr : public G1BiasedMappedArray {
public:
void set_invalid(uint idx) { set_by_index(idx, Invalid); }
- void set_compacted(uint idx) { set_by_index(idx, Compacted); }
+
+ void set_compacting(uint idx) { set_by_index(idx, Compacting); }
void set_skip_marking(uint idx) { set_by_index(idx, SkipMarking); }
- void set_not_compacted(uint idx) { set_by_index(idx, NotCompacted); }
+ void set_skip_compacting(uint idx) { set_by_index(idx, SkipCompacting); }
bool is_skip_marking(HeapWord* obj) const {
assert(!is_invalid(obj), "not initialized yet");
return get_by_address(obj) == SkipMarking;
}
- bool is_compacted(HeapWord* obj) const {
+ bool is_compacting(HeapWord* obj) const {
assert(!is_invalid(obj), "not initialized yet");
- return get_by_address(obj) == Compacted;
+ return get_by_address(obj) == Compacting;
}
- bool is_compacted_or_skip_marking(uint idx) const {
- return get_by_index(idx) != NotCompacted;
+ bool is_skip_compacting(uint idx) const {
+ return get_by_index(idx) == SkipCompacting;
}
+
+ void verify_is_compacting(uint idx) { assert(get_by_index(idx) == Compacting, "invariant"); }
+
+ void verify_is_invalid(uint idx) { assert(get_by_index(idx) == Invalid, "invariant"); }
};
#endif // SHARE_GC_G1_G1FULLGCHEAPREGIONATTR_HPP
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp b/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp
index 810a492e062..8a537c5c9be 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp
index 8b2a17e6645..f945cd5139f 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.hpp
index 53c475de502..95c9a411b41 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
index 5bb060c3430..77239a4884c 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
@@ -57,7 +57,7 @@ inline bool G1FullGCMarker::mark_object(oop obj) {
if (obj->mark_must_be_preserved(mark) &&
// It is not necessary to preserve marks for objects in regions we do not
// compact because we do not change their headers (i.e. forward them).
- _collector->is_compacted(obj)) {
+ _collector->is_compacting(obj)) {
preserved_stack()->push(obj, mark);
}
diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
index 8f572a1bdaa..9da68966c27 100644
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
@@ -70,8 +70,8 @@ template inline void G1AdjustClosure::adjust_pointer(T* p) {
oop obj = CompressedOops::decode_not_null(heap_oop);
assert(Universe::heap()->is_in(obj), "should be in heap");
- if (!_collector->is_compacted(obj)) {
- // We never forward objects in non-compacted regions so there is no need to
+ if (!_collector->is_compacting(obj)) {
+ // We never forward objects in non-compacting regions so there is no need to
// process them further.
return;
}
diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp
index 1d62ba00043..80dea7651cb 100644
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
-#include "gc/g1/g1FullCollector.hpp"
+#include "gc/g1/g1FullCollector.inline.hpp"
#include "gc/g1/g1FullGCCompactionPoint.hpp"
#include "gc/g1/g1FullGCMarker.hpp"
#include "gc/g1/g1FullGCOopClosures.inline.hpp"
@@ -48,6 +48,7 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::free_pinned_region(HeapReg
_g1h->free_region(hr, nullptr);
}
prepare_for_compaction(hr);
+ _collector->set_invalid(hr->hrm_index());
}
bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
@@ -76,9 +77,8 @@ bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion*
assert(MarkSweepDeadRatio > 0,
"only skip compaction for other regions when MarkSweepDeadRatio > 0");
- // Force the high live ratio region as not-compacting to skip these regions in the
- // later compaction step.
- force_not_compacted = true;
+ // Too many live objects; skip compacting it.
+ _collector->update_from_compacting_to_skip_compacting(hr->hrm_index());
if (hr->is_young()) {
// G1 updates the BOT for old region contents incrementally, but young regions
// lack BOT information for performance reasons.
@@ -93,7 +93,6 @@ bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion*
// Reset data structures not valid after Full GC.
reset_region_metadata(hr);
- _collector->update_attribute_table(hr, force_not_compacted);
return false;
}
diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp
index 97fdd9e98d2..7f601dec2cd 100644
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/g1FullGCScope.cpp b/src/hotspot/share/gc/g1/g1FullGCScope.cpp
index d0755b80247..95d14930ce2 100644
--- a/src/hotspot/share/gc/g1/g1FullGCScope.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCScope.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/g1FullGCScope.hpp b/src/hotspot/share/gc/g1/g1FullGCScope.hpp
index 09f26f5615f..e12f72b950f 100644
--- a/src/hotspot/share/gc/g1/g1FullGCScope.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCScope.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
index 023e0dca96b..4339856bda6 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
@@ -63,7 +63,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
// Root scanning phases
_gc_par_phases[ThreadRoots] = new WorkerDataArray("ThreadRoots", "Thread Roots (ms):", max_gc_threads);
_gc_par_phases[CLDGRoots] = new WorkerDataArray("CLDGRoots", "CLDG Roots (ms):", max_gc_threads);
- AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray("AOTCodeRoots", "AOT Root Scan (ms):", max_gc_threads);)
_gc_par_phases[CMRefRoots] = new WorkerDataArray("CMRefRoots", "CM RefProcessor Roots (ms):", max_gc_threads);
for (auto id : EnumRange()) {
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
index 6523ce18671..bde8c224119 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
@@ -50,7 +50,6 @@ class G1GCPhaseTimes : public CHeapObj {
ExtRootScan,
ThreadRoots,
CLDGRoots,
- AOT_ONLY(AOTCodeRoots COMMA)
CMRefRoots,
// For every strong OopStorage there will be one element in this enum,
// starting with StrongOopStorageSetRoots.
diff --git a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
index 515bf656c3d..1afa9ebe5d0 100644
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "oops/markWord.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp"
diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
index 84d4c3bb0b0..97867684374 100644
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
@@ -618,9 +618,9 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
// Forward-to-self succeeded. We are the "owner" of the object.
HeapRegion* r = _g1h->heap_region_containing(old);
- if (!r->evacuation_failed()) {
- r->set_evacuation_failed(true);
- _g1h->hr_printer()->evac_failure(r);
+ if (r->set_evacuation_failed()) {
+ _g1h->notify_region_failed_evacuation();
+ _g1h->hr_printer()->evac_failure(r);
}
_g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
diff --git a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.cpp b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.cpp
index 230060ab1c7..db9f91641ba 100644
--- a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.cpp
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp
index d103ad50d22..ab80a4f740a 100644
--- a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.cpp b/src/hotspot/share/gc/g1/g1RootProcessor.cpp
index 4f11d5cc3f7..c8609718c25 100644
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
#include "code/codeCache.hpp"
@@ -199,15 +198,6 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
uint worker_id) {
OopClosure* strong_roots = closures->strong_oops();
-#if INCLUDE_AOT
- if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
- if (UseAOT) {
- G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);
- AOTLoader::oops_do(strong_roots);
- }
- }
-#endif
-
for (auto id : EnumRange()) {
G1GCPhaseTimes::GCParPhases phase = G1GCPhaseTimes::strong_oopstorage_phase(id);
G1GCParPhaseTimesTracker x(phase_times, phase, worker_id);
diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.hpp b/src/hotspot/share/gc/g1/g1RootProcessor.hpp
index ac33a2c96c7..cdb843e57fc 100644
--- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,6 @@ class G1RootProcessor : public StackObj {
enum G1H_process_roots_tasks {
G1RP_PS_ClassLoaderDataGraph_oops_do,
G1RP_PS_CodeCache_oops_do,
- AOT_ONLY(G1RP_PS_aot_oops_do COMMA)
G1RP_PS_refProcessor_oops_do,
// Leave this one last.
G1RP_PS_NumElements
diff --git a/src/hotspot/share/gc/g1/g1VMOperations.cpp b/src/hotspot/share/gc/g1/g1VMOperations.cpp
index 0ead8b02651..08a059adb6c 100644
--- a/src/hotspot/share/gc/g1/g1VMOperations.cpp
+++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/heapRegion.cpp b/src/hotspot/share/gc/g1/heapRegion.cpp
index 791e1bfd61f..38952f183d5 100644
--- a/src/hotspot/share/gc/g1/heapRegion.cpp
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp
@@ -48,7 +48,6 @@
#include "utilities/powerOfTwo.hpp"
int HeapRegion::LogOfHRGrainBytes = 0;
-int HeapRegion::LogOfHRGrainWords = 0;
int HeapRegion::LogCardsPerRegion = 0;
size_t HeapRegion::GrainBytes = 0;
size_t HeapRegion::GrainWords = 0;
@@ -84,9 +83,6 @@ void HeapRegion::setup_heap_region_size(size_t max_heap_size) {
guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
LogOfHRGrainBytes = region_size_log;
- guarantee(LogOfHRGrainWords == 0, "we should only set it once");
- LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
-
guarantee(GrainBytes == 0, "we should only set it once");
// The cast to int is safe, given that we've bounded region_size by
// MIN_REGION_SIZE and MAX_REGION_SIZE.
@@ -94,7 +90,6 @@ void HeapRegion::setup_heap_region_size(size_t max_heap_size) {
guarantee(GrainWords == 0, "we should only set it once");
GrainWords = GrainBytes >> LogHeapWordSize;
- guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
guarantee(CardsPerRegion == 0, "we should only set it once");
CardsPerRegion = GrainBytes >> G1CardTable::card_shift;
@@ -109,8 +104,9 @@ void HeapRegion::setup_heap_region_size(size_t max_heap_size) {
void HeapRegion::handle_evacuation_failure() {
uninstall_surv_rate_group();
clear_young_index_in_cset();
- set_evacuation_failed(false);
+ reset_evacuation_failed();
set_old();
+ _next_marked_bytes = 0;
}
void HeapRegion::unlink_from_list() {
@@ -138,7 +134,7 @@ void HeapRegion::hr_clear(bool clear_space) {
init_top_at_mark_start();
if (clear_space) clear(SpaceDecorator::Mangle);
- _evacuation_failed = false;
+ Atomic::store(&_evacuation_failed, false);
_gc_efficiency = -1.0;
}
diff --git a/src/hotspot/share/gc/g1/heapRegion.hpp b/src/hotspot/share/gc/g1/heapRegion.hpp
index df2c2a182d4..6d15d11724d 100644
--- a/src/hotspot/share/gc/g1/heapRegion.hpp
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -170,8 +170,8 @@ class HeapRegion : public CHeapObj {
// Update heap region that has been compacted to be consistent after Full GC.
void reset_compacted_after_full_gc();
- // Update pinned heap region (not compacted) to be consistent after Full GC.
- void reset_not_compacted_after_full_gc();
+ // Update skip-compacting heap region to be consistent after Full GC.
+ void reset_skip_compacting_after_full_gc();
// All allocated blocks are occupied by objects in a HeapRegion
bool block_is_obj(const HeapWord* p) const;
@@ -207,7 +207,7 @@ class HeapRegion : public CHeapObj {
HeapRegion* _humongous_start_region;
// True iff an attempt to evacuate an object in the region failed.
- bool _evacuation_failed;
+ volatile bool _evacuation_failed;
static const uint InvalidCSetIndex = UINT_MAX;
@@ -293,7 +293,6 @@ class HeapRegion : public CHeapObj {
void initialize(bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
static int LogOfHRGrainBytes;
- static int LogOfHRGrainWords;
static int LogCardsPerRegion;
static size_t GrainBytes;
@@ -316,11 +315,10 @@ class HeapRegion : public CHeapObj {
static size_t max_region_size();
static size_t min_region_size_in_words();
- // It sets up the heap region size (GrainBytes / GrainWords), as
- // well as other related fields that are based on the heap region
- // size (LogOfHRGrainBytes / LogOfHRGrainWords /
- // CardsPerRegion). All those fields are considered constant
- // throughout the JVM's execution, therefore they should only be set
+ // It sets up the heap region size (GrainBytes / GrainWords), as well as
+ // other related fields that are based on the heap region size
+ // (LogOfHRGrainBytes / CardsPerRegion). All those fields are considered
+ // constant throughout the JVM's execution, therefore they should only be set
// up once during initialization time.
static void setup_heap_region_size(size_t max_heap_size);
@@ -497,16 +495,13 @@ class HeapRegion : public CHeapObj {
void clear_cardtable();
// Returns the "evacuation_failed" property of the region.
- bool evacuation_failed() { return _evacuation_failed; }
+ inline bool evacuation_failed() const;
- // Sets the "evacuation_failed" property of the region.
- void set_evacuation_failed(bool b) {
- _evacuation_failed = b;
+ // Sets the "evacuation_failed" property of the region, returning true if this
+ // has been the first call, false otherwise.
+ inline bool set_evacuation_failed();
- if (b) {
- _next_marked_bytes = 0;
- }
- }
+ inline void reset_evacuation_failed();
// Notify the region that we are about to start processing
// self-forwarded objects during evac failure handling.
diff --git a/src/hotspot/share/gc/g1/heapRegion.inline.hpp b/src/hotspot/share/gc/g1/heapRegion.inline.hpp
index 584984cd534..b6e9b4466a8 100644
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp
+++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp
@@ -205,8 +205,8 @@ inline void HeapRegion::reset_compacted_after_full_gc() {
reset_after_full_gc_common();
}
-inline void HeapRegion::reset_not_compacted_after_full_gc() {
- assert(!is_free(), "should not have compacted free region");
+inline void HeapRegion::reset_skip_compacting_after_full_gc() {
+ assert(!is_free(), "must be");
assert(compaction_top() == bottom(),
"region %u compaction_top " PTR_FORMAT " must not be different from bottom " PTR_FORMAT,
@@ -451,4 +451,16 @@ inline void HeapRegion::record_surv_words_in_group(size_t words_survived) {
_surv_rate_group->record_surviving_words(age_in_group, words_survived);
}
+inline bool HeapRegion::evacuation_failed() const {
+ return Atomic::load(&_evacuation_failed);
+}
+
+inline bool HeapRegion::set_evacuation_failed() {
+ return !Atomic::load(&_evacuation_failed) && !Atomic::cmpxchg(&_evacuation_failed, false, true, memory_order_relaxed);
+}
+
+inline void HeapRegion::reset_evacuation_failed() {
+ Atomic::store(&_evacuation_failed, false);
+}
+
#endif // SHARE_GC_G1_HEAPREGION_INLINE_HPP
diff --git a/src/hotspot/share/gc/g1/heapRegionSet.cpp b/src/hotspot/share/gc/g1/heapRegionSet.cpp
index 0cb0e4c66b5..0e48f17dbe5 100644
--- a/src/hotspot/share/gc/g1/heapRegionSet.cpp
+++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/g1/heapRegionSet.hpp b/src/hotspot/share/gc/g1/heapRegionSet.hpp
index f2fa882125a..323c54614dc 100644
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp
index 96e82a92c12..c00890d38e1 100644
--- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp
+++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp
@@ -33,6 +33,7 @@
#include "oops/typeArrayOop.hpp"
#include "runtime/atomic.hpp"
#include "runtime/java.hpp"
+#include "runtime/os.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
#include "utilities/align.hpp"
diff --git a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp
index 97d1e23b75e..b9415ebe8b2 100644
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp
+++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp
@@ -49,8 +49,8 @@ ParMarkBitMap::initialize(MemRegion covered_region)
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
- ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
- const size_t used_page_sz = ReservedSpace::actual_reserved_page_size(rs);
+ ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
+ const size_t used_page_sz = rs.page_size();
os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes, used_page_sz,
rs.base(), rs.size());
@@ -77,11 +77,6 @@ ParMarkBitMap::initialize(MemRegion covered_region)
return false;
}
-#ifdef ASSERT
-extern size_t mark_bitmap_count;
-extern size_t mark_bitmap_size;
-#endif // #ifdef ASSERT
-
bool
ParMarkBitMap::mark_obj(HeapWord* addr, size_t size)
{
@@ -90,8 +85,6 @@ ParMarkBitMap::mark_obj(HeapWord* addr, size_t size)
const idx_t end_bit = addr_to_bit(addr + size - 1);
bool end_bit_ok = _end_bits.par_set_bit(end_bit);
assert(end_bit_ok, "concurrency problem");
- DEBUG_ONLY(Atomic::inc(&mark_bitmap_count));
- DEBUG_ONLY(Atomic::add(&mark_bitmap_size, size));
return true;
}
return false;
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index 550002367c8..e7117d1d318 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -748,7 +748,7 @@ void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
// Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
if(log_is_enabled(Info, pagesize)) {
- const size_t page_size = ReservedSpace::actual_reserved_page_size(rs);
+ const size_t page_size = rs.page_size();
os::trace_page_sizes("Heap",
MinHeapSize,
reserved_heap_size,
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
index b926777269f..b7241e7f1bc 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/stringTable.hpp"
@@ -412,13 +411,6 @@ print_initial_summary_data(ParallelCompactData& summary_data,
}
#endif // #ifndef PRODUCT
-#ifdef ASSERT
-size_t add_obj_count;
-size_t add_obj_size;
-size_t mark_bitmap_count;
-size_t mark_bitmap_size;
-#endif // #ifdef ASSERT
-
ParallelCompactData::ParallelCompactData() :
_region_start(NULL),
DEBUG_ONLY(_region_end(NULL) COMMA)
@@ -455,7 +447,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
- ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
+ ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, page_sz, rs.base(),
rs.size());
@@ -539,9 +531,6 @@ void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
// end_region is inclusive
const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
- DEBUG_ONLY(Atomic::inc(&add_obj_count);)
- DEBUG_ONLY(Atomic::add(&add_obj_size, len);)
-
if (beg_region == end_region) {
// All in one region.
_region_data[beg_region].add_live_obj(len);
@@ -993,9 +982,6 @@ void PSParallelCompact::pre_compact()
_space_info[from_space_id].set_space(heap->young_gen()->from_space());
_space_info[to_space_id].set_space(heap->young_gen()->to_space());
- DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
- DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
-
// Increment the invocation count
heap->increment_total_collections(true);
@@ -1612,19 +1598,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
{
GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
-#ifdef ASSERT
- log_develop_debug(gc, marking)(
- "add_obj_count=" SIZE_FORMAT " "
- "add_obj_bytes=" SIZE_FORMAT,
- add_obj_count,
- add_obj_size * HeapWordSize);
- log_develop_debug(gc, marking)(
- "mark_bitmap_count=" SIZE_FORMAT " "
- "mark_bitmap_bytes=" SIZE_FORMAT,
- mark_bitmap_count,
- mark_bitmap_size * HeapWordSize);
-#endif // ASSERT
-
// Quick summarization of each space into itself, to see how much is live.
summarize_spaces_quick();
@@ -2012,7 +1985,6 @@ static void mark_from_roots_work(ParallelRootType::Value root_type, uint worker_
case ParallelRootType::code_cache:
// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
//ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
- AOTLoader::oops_do(&mark_and_push_closure);
break;
case ParallelRootType::sentinel:
@@ -2219,7 +2191,6 @@ class PSAdjustTask final : public AbstractGangTask {
enum PSAdjustSubTask {
PSAdjustSubTask_code_cache,
- PSAdjustSubTask_aot,
PSAdjustSubTask_old_ref_process,
PSAdjustSubTask_young_ref_process,
@@ -2263,9 +2234,6 @@ class PSAdjustTask final : public AbstractGangTask {
CodeBlobToOopClosure adjust_code(&adjust, CodeBlobToOopClosure::FixRelocations);
CodeCache::blobs_do(&adjust_code);
}
- if (_sub_tasks.try_claim_task(PSAdjustSubTask_aot)) {
- AOT_ONLY(AOTLoader::oops_do(&adjust);)
- }
if (_sub_tasks.try_claim_task(PSAdjustSubTask_old_ref_process)) {
PSParallelCompact::ref_processor()->weak_oops_do(&adjust);
}
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index ff96abe51d3..60760de716b 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
#include "code/codeCache.hpp"
@@ -104,7 +103,6 @@ static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_i
{
MarkingCodeBlobClosure code_closure(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
ScavengableNMethods::nmethods_do(&code_closure);
- AOTLoader::oops_do(&roots_closure);
}
break;
diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp
index 271d0d764b5..876b2785f6b 100644
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp
@@ -650,9 +650,6 @@ void DefNewGeneration::collect(bool full,
}
// We should have processed and cleared all the preserved marks.
_preserved_marks_set.reclaim();
- // set new iteration safe limit for the survivor spaces
- from()->set_concurrent_iteration_safe_limit(from()->top());
- to()->set_concurrent_iteration_safe_limit(to()->top());
heap->trace_heap_after_gc(&gc_tracer);
diff --git a/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp
index 2bd82d7deb4..663ff91372b 100644
--- a/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp
+++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp
@@ -343,7 +343,3 @@ void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) {
}
}
}
-
-LIR_Opr BarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) {
- return obj;
-}
diff --git a/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp b/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp
index d6d17abf3cf..e1fc0f410c1 100644
--- a/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp
+++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp
@@ -135,10 +135,6 @@ class BarrierSetC1: public CHeapObj {
virtual LIR_Opr atomic_xchg_at(LIRAccess& access, LIRItem& value);
virtual LIR_Opr atomic_add_at(LIRAccess& access, LIRItem& value);
- virtual LIR_Opr resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj);
-
- virtual const char* rtcall_name_for_address(address entry) { return NULL; }
-
virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob) {}
};
diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
index 743fbab58ce..60d4c7a91ed 100644
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
@@ -709,7 +709,7 @@ void BarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* co
}
}
-Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
+Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
Node*& i_o, Node*& needgc_ctrl,
Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
intx prefetch_lines) const {
@@ -729,7 +729,7 @@ Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem,
// this will require extensive changes to the loop optimization in order to
// prevent a degradation of the optimization.
// See comment in memnode.hpp, around line 227 in class LoadPNode.
- Node *eden_end = macro->make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
+ Node *eden_end = macro->make_load(toobig_false, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
// We need a Region for the loop-back contended case.
enum { fall_in_path = 1, contended_loopback_path = 2 };
@@ -752,7 +752,7 @@ Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem,
// Load(-locked) the heap top.
// See note above concerning the control input when using a TLAB
Node *old_eden_top = UseTLAB
- ? new LoadPNode (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered)
+ ? new LoadPNode (toobig_false, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered)
: new LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr, MemNode::acquire);
macro->transform_later(old_eden_top);
diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp
index f16e5a48e60..0902c0bb72f 100644
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp
@@ -244,7 +244,7 @@ class BarrierSetC2: public CHeapObj {
virtual void clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* countx, bool is_array) const;
- virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
+ virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobig_false, Node* size_in_bytes,
Node*& i_o, Node*& needgc_ctrl,
Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
intx prefetch_lines) const;
diff --git a/src/hotspot/share/gc/shared/cardTable.cpp b/src/hotspot/share/gc/shared/cardTable.cpp
index 84f624b3001..2728e278286 100644
--- a/src/hotspot/share/gc/shared/cardTable.cpp
+++ b/src/hotspot/share/gc/shared/cardTable.cpp
@@ -78,7 +78,7 @@ void CardTable::initialize() {
const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(_page_size, (size_t) os::vm_allocation_granularity());
- ReservedSpace heap_rs(_byte_map_size, rs_align, false);
+ ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size);
MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
diff --git a/src/hotspot/share/gc/shared/gcInitLogger.cpp b/src/hotspot/share/gc/shared/gcInitLogger.cpp
index ec9bd60a83f..1dfc27c5333 100644
--- a/src/hotspot/share/gc/shared/gcInitLogger.cpp
+++ b/src/hotspot/share/gc/shared/gcInitLogger.cpp
@@ -29,6 +29,7 @@
#include "logging/log.hpp"
#include "oops/compressedOops.hpp"
#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/globalDefinitions.hpp"
diff --git a/src/hotspot/share/gc/shared/gcTraceTime.inline.hpp b/src/hotspot/share/gc/shared/gcTraceTime.inline.hpp
index 92aee65bd15..2d64844adb9 100644
--- a/src/hotspot/share/gc/shared/gcTraceTime.inline.hpp
+++ b/src/hotspot/share/gc/shared/gcTraceTime.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.hpp"
#include "logging/log.hpp"
+#include "runtime/os.hpp"
#include "utilities/ticks.hpp"
inline GCTraceTimeDriver::GCTraceTimeDriver(
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
index e5beda39f1b..99a654c8baf 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/stringTable.hpp"
@@ -57,7 +56,6 @@
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/weakProcessor.hpp"
#include "gc/shared/workgroup.hpp"
-#include "memory/filemap.hpp"
#include "memory/iterator.hpp"
#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
#include "memory/metaspaceCounters.hpp"
@@ -174,7 +172,7 @@ ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
SIZE_FORMAT, total_reserved, alignment);
ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
- size_t used_page_size = ReservedSpace::actual_reserved_page_size(heap_rs);
+ size_t used_page_size = heap_rs.page_size();
os::trace_page_sizes("Heap",
MinHeapSize,
@@ -256,28 +254,9 @@ size_t GenCollectedHeap::max_capacity() const {
// Update the _full_collections_completed counter
// at the end of a stop-world full GC.
unsigned int GenCollectedHeap::update_full_collections_completed() {
- MonitorLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
assert(_full_collections_completed <= _total_full_collections,
"Can't complete more collections than were started");
_full_collections_completed = _total_full_collections;
- ml.notify_all();
- return _full_collections_completed;
-}
-
-// Update the _full_collections_completed counter, as appropriate,
-// at the end of a concurrent GC cycle. Note the conditional update
-// below to allow this method to be called by a concurrent collector
-// without synchronizing in any manner with the VM thread (which
-// may already have initiated a STW full collection "concurrently").
-unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
- MonitorLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
- assert((_full_collections_completed <= _total_full_collections) &&
- (count <= _total_full_collections),
- "Can't complete more collections than were started");
- if (count > _full_collections_completed) {
- _full_collections_completed = count;
- ml.notify_all();
- }
return _full_collections_completed;
}
@@ -810,11 +789,6 @@ void GenCollectedHeap::process_roots(ScanningOption so,
Threads::oops_do(strong_roots, roots_from_code_p);
-#if INCLUDE_AOT
- if (UseAOT) {
- AOTLoader::oops_do(strong_roots);
- }
-#endif
OopStorageSet::strong_oops_do(strong_roots);
if (so & SO_ScavengeCodeCache) {
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.hpp b/src/hotspot/share/gc/shared/genCollectedHeap.hpp
index f879b49c6dc..becc15e799c 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp
@@ -281,8 +281,6 @@ class GenCollectedHeap : public CollectedHeap {
// Update above counter, as appropriate, at the end of a stop-world GC cycle
unsigned int update_full_collections_completed();
- // Update above counter, as appropriate, at the end of a concurrent GC cycle
- unsigned int update_full_collections_completed(unsigned int count);
// Update the gc statistics for each generation.
void update_gc_stats(Generation* current_generation, bool full) {
diff --git a/src/hotspot/share/gc/shared/generationSpec.cpp b/src/hotspot/share/gc/shared/generationSpec.cpp
index 67c1305e730..b6ca7e24f09 100644
--- a/src/hotspot/share/gc/shared/generationSpec.cpp
+++ b/src/hotspot/share/gc/shared/generationSpec.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/generationSpec.hpp"
-#include "memory/filemap.hpp"
#include "runtime/java.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_SERIALGC
diff --git a/src/hotspot/share/gc/shared/memAllocator.cpp b/src/hotspot/share/gc/shared/memAllocator.cpp
index 362bcf9e1f4..44cae18e4cd 100644
--- a/src/hotspot/share/gc/shared/memAllocator.cpp
+++ b/src/hotspot/share/gc/shared/memAllocator.cpp
@@ -174,7 +174,7 @@ void MemAllocator::Allocation::check_for_valid_allocation_state() const {
"shouldn't be allocating with pending exception");
// Allocation of an oop can always invoke a safepoint.
assert(_thread->is_Java_thread(), "non Java threads shouldn't allocate on the Heap");
- _thread->check_for_valid_safepoint_state();
+ _thread->as_Java_thread()->check_for_valid_safepoint_state();
}
#endif
diff --git a/src/hotspot/share/gc/shared/preservedMarks.cpp b/src/hotspot/share/gc/shared/preservedMarks.cpp
index 8af2756f3bb..12fc7f2a989 100644
--- a/src/hotspot/share/gc/shared/preservedMarks.cpp
+++ b/src/hotspot/share/gc/shared/preservedMarks.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -77,7 +77,7 @@ void RemoveForwardedPointerClosure::do_object(oop obj) {
}
void PreservedMarksSet::init(uint num) {
- assert(_stacks == NULL && _num == 0, "do not re-initialize");
+ assert(_stacks == nullptr && _num == 0, "do not re-initialize");
assert(num > 0, "pre-condition");
if (_in_c_heap) {
_stacks = NEW_C_HEAP_ARRAY(Padded, num, mtGC);
@@ -92,57 +92,54 @@ void PreservedMarksSet::init(uint num) {
assert_empty();
}
-class ParRestoreTask : public AbstractGangTask {
-private:
+class RestorePreservedMarksTask : public AbstractGangTask {
PreservedMarksSet* const _preserved_marks_set;
SequentialSubTasksDone _sub_tasks;
- volatile size_t* const _total_size_addr;
+ volatile size_t _total_size;
+#ifdef ASSERT
+ size_t _total_size_before;
+#endif // ASSERT
public:
- virtual void work(uint worker_id) {
+ void work(uint worker_id) override {
uint task_id = 0;
- while (_sub_tasks.try_claim_task(/* reference */ task_id)) {
- _preserved_marks_set->get(task_id)->restore_and_increment(_total_size_addr);
+ while (_sub_tasks.try_claim_task(task_id)) {
+ _preserved_marks_set->get(task_id)->restore_and_increment(&_total_size);
}
}
- ParRestoreTask(PreservedMarksSet* preserved_marks_set,
- volatile size_t* total_size_addr)
- : AbstractGangTask("Parallel Preserved Mark Restoration"),
- _preserved_marks_set(preserved_marks_set),
- _sub_tasks(preserved_marks_set->num()),
- _total_size_addr(total_size_addr) {
+ RestorePreservedMarksTask(PreservedMarksSet* preserved_marks_set)
+ : AbstractGangTask("Restore Preserved Marks"),
+ _preserved_marks_set(preserved_marks_set),
+ _sub_tasks(preserved_marks_set->num()),
+ _total_size(0)
+ DEBUG_ONLY(COMMA _total_size_before(0)) {
+#ifdef ASSERT
+ // This is to make sure the total_size we'll calculate below is correct.
+ for (uint i = 0; i < _preserved_marks_set->num(); ++i) {
+ _total_size_before += _preserved_marks_set->get(i)->size();
}
-};
+#endif // ASSERT
+ }
-void PreservedMarksSet::restore(WorkGang* workers) {
- volatile size_t total_size = 0;
+ ~RestorePreservedMarksTask() {
+ assert(_total_size == _total_size_before, "total_size = %zu before = %zu", _total_size, _total_size_before);
-#ifdef ASSERT
- // This is to make sure the total_size we'll calculate below is correct.
- size_t total_size_before = 0;
- for (uint i = 0; i < _num; i += 1) {
- total_size_before += get(i)->size();
+ log_trace(gc)("Restored %zu marks", _total_size);
}
-#endif // ASSERT
+};
- if (workers == NULL) {
- for (uint i = 0; i < num(); i += 1) {
- total_size += get(i)->size();
- get(i)->restore();
+void PreservedMarksSet::restore(WorkGang* workers) {
+ {
+ RestorePreservedMarksTask cl(this);
+ if (workers == nullptr) {
+ cl.work(0);
+ } else {
+ workers->run_task(&cl);
}
- } else {
- ParRestoreTask task(this, &total_size);
- workers->run_task(&task);
}
assert_empty();
-
- assert(total_size == total_size_before,
- "total_size = " SIZE_FORMAT " before = " SIZE_FORMAT,
- total_size, total_size_before);
-
- log_trace(gc)("Restored " SIZE_FORMAT " marks", total_size);
}
void PreservedMarksSet::reclaim() {
@@ -157,13 +154,13 @@ void PreservedMarksSet::reclaim() {
} else {
// the array was resource-allocated, so nothing to do
}
- _stacks = NULL;
+ _stacks = nullptr;
_num = 0;
}
#ifndef PRODUCT
void PreservedMarksSet::assert_empty() {
- assert(_stacks != NULL && _num > 0, "should have been initialized");
+ assert(_stacks != nullptr && _num > 0, "should have been initialized");
for (uint i = 0; i < _num; i += 1) {
get(i)->assert_empty();
}
diff --git a/src/hotspot/share/gc/shared/space.cpp b/src/hotspot/share/gc/shared/space.cpp
index 3b36398625f..8e95115c2e7 100644
--- a/src/hotspot/share/gc/shared/space.cpp
+++ b/src/hotspot/share/gc/shared/space.cpp
@@ -264,8 +264,7 @@ void Space::clear(bool mangle_space) {
}
}
-ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
- _concurrent_iteration_safe_limit(NULL) {
+ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL) {
_mangler = new GenSpaceMangler(this);
}
@@ -278,7 +277,6 @@ void ContiguousSpace::initialize(MemRegion mr,
bool mangle_space)
{
CompactibleSpace::initialize(mr, clear_space, mangle_space);
- set_concurrent_iteration_safe_limit(top());
}
void ContiguousSpace::clear(bool mangle_space) {
diff --git a/src/hotspot/share/gc/shared/space.hpp b/src/hotspot/share/gc/shared/space.hpp
index 4de1658bd2e..f0e4e80bd2e 100644
--- a/src/hotspot/share/gc/shared/space.hpp
+++ b/src/hotspot/share/gc/shared/space.hpp
@@ -499,7 +499,6 @@ class ContiguousSpace: public CompactibleSpace {
protected:
HeapWord* _top;
- HeapWord* _concurrent_iteration_safe_limit;
// A helper for mangling the unused area of the space in debug builds.
GenSpaceMangler* _mangler;
@@ -564,24 +563,10 @@ class ContiguousSpace: public CompactibleSpace {
void oop_iterate(OopIterateClosure* cl);
void object_iterate(ObjectClosure* blk);
- HeapWord* concurrent_iteration_safe_limit() {
- assert(_concurrent_iteration_safe_limit <= top(),
- "_concurrent_iteration_safe_limit update missed");
- return _concurrent_iteration_safe_limit;
- }
- // changes the safe limit, all objects from bottom() to the new
- // limit should be properly initialized
- void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
- assert(new_limit <= top(), "uninitialized objects in the safe range");
- _concurrent_iteration_safe_limit = new_limit;
- }
-
// Compaction support
virtual void reset_after_compaction() {
assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
set_top(compaction_top());
- // set new iteration safe limit
- set_concurrent_iteration_safe_limit(compaction_top());
}
// Override.
diff --git a/src/hotspot/share/gc/shared/vmStructs_gc.hpp b/src/hotspot/share/gc/shared/vmStructs_gc.hpp
index 3fda88c4b4e..2de1cc8c57c 100644
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp
@@ -130,7 +130,6 @@
nonstatic_field(CompactibleSpace, _end_of_live, HeapWord*) \
\
nonstatic_field(ContiguousSpace, _top, HeapWord*) \
- nonstatic_field(ContiguousSpace, _concurrent_iteration_safe_limit, HeapWord*) \
nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \
\
nonstatic_field(Generation, _reserved, MemRegion) \
diff --git a/src/hotspot/share/gc/shared/workerManager.hpp b/src/hotspot/share/gc/shared/workerManager.hpp
index 4ae3920fd25..d9698b0d823 100644
--- a/src/hotspot/share/gc/shared/workerManager.hpp
+++ b/src/hotspot/share/gc/shared/workerManager.hpp
@@ -50,8 +50,7 @@ class WorkerManager : public AllStatic {
// create all the worker at start should considered a problem so exit.
// If initializing = false, there are already some number of worker
// threads and a failure would not be optimal but should not be fatal.
- template
- static uint add_workers (WorkerType* holder,
+ static uint add_workers (WorkGang* workers,
uint active_workers,
uint total_workers,
uint created_workers,
@@ -59,16 +58,14 @@ class WorkerManager : public AllStatic {
bool initializing);
// Log (at trace level) a change in the number of created workers.
- template
- static void log_worker_creation(WorkerType* holder,
+ static void log_worker_creation(WorkGang* workers,
uint previous_created_workers,
uint active_workers,
uint created_workers,
bool initializing);
};
-template
-uint WorkerManager::add_workers(WorkerType* holder,
+uint WorkerManager::add_workers(WorkGang* workers,
uint active_workers,
uint total_workers,
uint created_workers,
@@ -79,15 +76,13 @@ uint WorkerManager::add_workers(WorkerType* holder,
for (uint worker_id = start; worker_id < end; worker_id += 1) {
WorkerThread* new_worker = NULL;
if (initializing || !InjectGCWorkerCreationFailure) {
- new_worker = holder->install_worker(worker_id);
+ new_worker = workers->install_worker(worker_id);
}
if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) {
log_trace(gc, task)("WorkerManager::add_workers() : "
"creation failed due to failed allocation of native %s",
new_worker == NULL ? "memory" : "thread");
- if (new_worker != NULL) {
- delete new_worker;
- }
+ delete new_worker;
if (initializing) {
vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create worker GC thread. Out of system resources.");
}
@@ -103,8 +98,7 @@ uint WorkerManager::add_workers(WorkerType* holder,
return created_workers;
}
-template
-void WorkerManager::log_worker_creation(WorkerType* holder,
+void WorkerManager::log_worker_creation(WorkGang* workers,
uint previous_created_workers,
uint active_workers,
uint created_workers,
@@ -112,7 +106,7 @@ void WorkerManager::log_worker_creation(WorkerType* holder,
if (previous_created_workers < created_workers) {
const char* initializing_msg = initializing ? "Adding initial" : "Creating additional";
log_trace(gc, task)("%s %s(s) previously created workers %u active workers %u total created workers %u",
- initializing_msg, holder->group_name(), previous_created_workers, active_workers, created_workers);
+ initializing_msg, workers->group_name(), previous_created_workers, active_workers, created_workers);
}
}
diff --git a/src/hotspot/share/gc/shared/workerPolicy.cpp b/src/hotspot/share/gc/shared/workerPolicy.cpp
index 8116265bd01..6db711a959e 100644
--- a/src/hotspot/share/gc/shared/workerPolicy.cpp
+++ b/src/hotspot/share/gc/shared/workerPolicy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "logging/log.hpp"
#include "memory/universe.hpp"
#include "runtime/globals_extension.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
uint WorkerPolicy::_parallel_worker_threads = 0;
diff --git a/src/hotspot/share/gc/shared/workgroup.cpp b/src/hotspot/share/gc/shared/workgroup.cpp
index c1b9e48e4f2..d197efef09d 100644
--- a/src/hotspot/share/gc/shared/workgroup.cpp
+++ b/src/hotspot/share/gc/shared/workgroup.cpp
@@ -34,77 +34,6 @@
#include "runtime/semaphore.hpp"
#include "runtime/thread.inline.hpp"
-// Definitions of WorkGang methods.
-
-AbstractWorkGang::AbstractWorkGang(const char* name, uint workers, bool are_GC_task_threads, bool are_ConcurrentGC_threads) :
- _workers(NULL),
- _total_workers(workers),
- _active_workers(UseDynamicNumberOfGCThreads ? 1U : workers),
- _created_workers(0),
- _name(name),
- _are_GC_task_threads(are_GC_task_threads),
- _are_ConcurrentGC_threads(are_ConcurrentGC_threads)
- { }
-
-
-// The current implementation will exit if the allocation
-// of any worker fails.
-void AbstractWorkGang::initialize_workers() {
- log_develop_trace(gc, workgang)("Constructing work gang %s with %u threads", name(), total_workers());
- _workers = NEW_C_HEAP_ARRAY(AbstractGangWorker*, total_workers(), mtInternal);
- add_workers(true);
-}
-
-
-AbstractGangWorker* AbstractWorkGang::install_worker(uint worker_id) {
- AbstractGangWorker* new_worker = allocate_worker(worker_id);
- set_thread(worker_id, new_worker);
- return new_worker;
-}
-
-void AbstractWorkGang::add_workers(bool initializing) {
- add_workers(_active_workers, initializing);
-}
-
-void AbstractWorkGang::add_workers(uint active_workers, bool initializing) {
-
- os::ThreadType worker_type;
- if (are_ConcurrentGC_threads()) {
- worker_type = os::cgc_thread;
- } else {
- worker_type = os::pgc_thread;
- }
- uint previous_created_workers = _created_workers;
-
- _created_workers = WorkerManager::add_workers(this,
- active_workers,
- _total_workers,
- _created_workers,
- worker_type,
- initializing);
- _active_workers = MIN2(_created_workers, _active_workers);
-
- WorkerManager::log_worker_creation(this, previous_created_workers, _active_workers, _created_workers, initializing);
-}
-
-AbstractGangWorker* AbstractWorkGang::worker(uint i) const {
- // Array index bounds checking.
- AbstractGangWorker* result = NULL;
- assert(_workers != NULL, "No workers for indexing");
- assert(i < total_workers(), "Worker index out of bounds");
- result = _workers[i];
- assert(result != NULL, "Indexing to null worker");
- return result;
-}
-
-void AbstractWorkGang::threads_do(ThreadClosure* tc) const {
- assert(tc != NULL, "Null ThreadClosure");
- uint workers = created_workers();
- for (uint i = 0; i < workers; i++) {
- tc->do_thread(worker(i));
- }
-}
-
static void run_foreground_task_if_needed(AbstractGangTask* task, uint num_workers,
bool add_foreground_work) {
if (add_foreground_work) {
@@ -198,20 +127,77 @@ class GangTaskDispatcher : public CHeapObj {
}
}
};
+// Definitions of WorkGang methods.
-WorkGang::WorkGang(const char* name,
- uint workers,
- bool are_GC_task_threads,
- bool are_ConcurrentGC_threads) :
- AbstractWorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads),
+WorkGang::WorkGang(const char* name, uint workers, bool are_GC_task_threads, bool are_ConcurrentGC_threads) :
+ _workers(NULL),
+ _total_workers(workers),
+ _active_workers(UseDynamicNumberOfGCThreads ? 1U : workers),
+ _created_workers(0),
+ _name(name),
+ _are_GC_task_threads(are_GC_task_threads),
+ _are_ConcurrentGC_threads(are_ConcurrentGC_threads),
_dispatcher(new GangTaskDispatcher())
-{ }
+ { }
WorkGang::~WorkGang() {
delete _dispatcher;
}
-AbstractGangWorker* WorkGang::allocate_worker(uint worker_id) {
+// The current implementation will exit if the allocation
+// of any worker fails.
+void WorkGang::initialize_workers() {
+ log_develop_trace(gc, workgang)("Constructing work gang %s with %u threads", name(), total_workers());
+ _workers = NEW_C_HEAP_ARRAY(GangWorker*, total_workers(), mtInternal);
+ add_workers(true);
+}
+
+
+GangWorker* WorkGang::install_worker(uint worker_id) {
+ GangWorker* new_worker = allocate_worker(worker_id);
+ set_thread(worker_id, new_worker);
+ return new_worker;
+}
+
+void WorkGang::add_workers(bool initializing) {
+ os::ThreadType worker_type;
+ if (are_ConcurrentGC_threads()) {
+ worker_type = os::cgc_thread;
+ } else {
+ worker_type = os::pgc_thread;
+ }
+ uint previous_created_workers = _created_workers;
+
+ _created_workers = WorkerManager::add_workers(this,
+ _active_workers,
+ _total_workers,
+ _created_workers,
+ worker_type,
+ initializing);
+ _active_workers = MIN2(_created_workers, _active_workers);
+
+ WorkerManager::log_worker_creation(this, previous_created_workers, _active_workers, _created_workers, initializing);
+}
+
+GangWorker* WorkGang::worker(uint i) const {
+ // Array index bounds checking.
+ GangWorker* result = NULL;
+ assert(_workers != NULL, "No workers for indexing");
+ assert(i < total_workers(), "Worker index out of bounds");
+ result = _workers[i];
+ assert(result != NULL, "Indexing to null worker");
+ return result;
+}
+
+void WorkGang::threads_do(ThreadClosure* tc) const {
+ assert(tc != NULL, "Null ThreadClosure");
+ uint workers = created_workers();
+ for (uint i = 0; i < workers; i++) {
+ tc->do_thread(worker(i));
+ }
+}
+
+GangWorker* WorkGang::allocate_worker(uint worker_id) {
return new GangWorker(this, worker_id);
}
@@ -230,18 +216,18 @@ void WorkGang::run_task(AbstractGangTask* task, uint num_workers, bool add_foreg
update_active_workers(old_num_workers);
}
-AbstractGangWorker::AbstractGangWorker(AbstractWorkGang* gang, uint id) {
+GangWorker::GangWorker(WorkGang* gang, uint id) {
_gang = gang;
set_id(id);
set_name("%s#%d", gang->name(), id);
}
-void AbstractGangWorker::run() {
+void GangWorker::run() {
initialize();
loop();
}
-void AbstractGangWorker::initialize() {
+void GangWorker::initialize() {
assert(_gang != NULL, "No gang to run in");
os::set_priority(this, NearMaxPriority);
log_develop_trace(gc, workgang)("Running gang worker for gang %s id %u", gang()->name(), id());
@@ -249,22 +235,6 @@ void AbstractGangWorker::initialize() {
" of a work gang");
}
-bool AbstractGangWorker::is_GC_task_thread() const {
- return gang()->are_GC_task_threads();
-}
-
-bool AbstractGangWorker::is_ConcurrentGC_thread() const {
- return gang()->are_ConcurrentGC_threads();
-}
-
-void AbstractGangWorker::print_on(outputStream* st) const {
- st->print("\"%s\" ", name());
- Thread::print_on(st);
- st->cr();
-}
-
-void AbstractGangWorker::print() const { print_on(tty); }
-
WorkData GangWorker::wait_for_task() {
return gang()->dispatcher()->worker_wait_for_task();
}
diff --git a/src/hotspot/share/gc/shared/workgroup.hpp b/src/hotspot/share/gc/shared/workgroup.hpp
index 5867f36a289..0a86d43bf3f 100644
--- a/src/hotspot/share/gc/shared/workgroup.hpp
+++ b/src/hotspot/share/gc/shared/workgroup.hpp
@@ -40,21 +40,16 @@
// AbstractGangTask
//
// Gang/Group class hierarchy:
-// AbstractWorkGang
-// WorkGang
-// YieldingFlexibleWorkGang (defined in another file)
+// WorkGang
//
// Worker class hierarchy:
-// AbstractGangWorker (subclass of WorkerThread)
-// GangWorker
-// YieldingFlexibleGangWorker (defined in another file)
+// GangWorker (subclass of WorkerThread)
// Forward declarations of classes defined here
-class AbstractGangWorker;
+class GangWorker;
class Semaphore;
class ThreadClosure;
-class WorkGang;
class GangTaskDispatcher;
// An abstract task to be worked on by a gang.
@@ -86,11 +81,10 @@ struct WorkData {
// The work gang is the collection of workers to execute tasks.
// The number of workers run for a task is "_active_workers"
-// while "_total_workers" is the number of available of workers.
-class AbstractWorkGang : public CHeapObj {
- protected:
+// while "_total_workers" is the number of available workers.
+class WorkGang : public CHeapObj {
// The array of worker threads for this gang.
- AbstractGangWorker** _workers;
+ GangWorker** _workers;
// The count of the number of workers in the gang.
uint _total_workers;
// The currently active workers in this gang.
@@ -100,19 +94,32 @@ class AbstractWorkGang : public CHeapObj {
// Printing support.
const char* _name;
- ~AbstractWorkGang() {}
-
- private:
// Initialize only instance data.
const bool _are_GC_task_threads;
const bool _are_ConcurrentGC_threads;
- void set_thread(uint worker_id, AbstractGangWorker* worker) {
+ // To get access to the GangTaskDispatcher instance.
+ friend class GangWorker;
+ GangTaskDispatcher* const _dispatcher;
+
+ GangTaskDispatcher* dispatcher() const { return _dispatcher; }
+
+ void set_thread(uint worker_id, GangWorker* worker) {
_workers[worker_id] = worker;
}
+ // Add GC workers when _created_workers < _active_workers; otherwise, no-op.
+ // If there's no memory/thread allocation failure, _created_worker is
+ // adjusted to match _active_workers (_created_worker == _active_workers).
+ void add_workers(bool initializing);
+
+ GangWorker* allocate_worker(uint which);
+
public:
- AbstractWorkGang(const char* name, uint workers, bool are_GC_task_threads, bool are_ConcurrentGC_threads);
+ WorkGang(const char* name, uint workers, bool are_GC_task_threads, bool are_ConcurrentGC_threads);
+
+ ~WorkGang();
+
// Initialize workers in the gang. Return true if initialization succeeded.
void initialize_workers();
@@ -125,7 +132,8 @@ class AbstractWorkGang : public CHeapObj {
return _created_workers;
}
- virtual uint active_workers() const {
+ uint active_workers() const {
+ assert(_active_workers != 0, "zero active workers");
assert(_active_workers <= _total_workers,
"_active_workers: %u > _total_workers: %u", _active_workers, _total_workers);
return _active_workers;
@@ -134,21 +142,15 @@ class AbstractWorkGang : public CHeapObj {
uint update_active_workers(uint v) {
assert(v <= _total_workers,
"Trying to set more workers active than there are");
- _active_workers = MIN2(v, _total_workers);
- add_workers(false /* exit_on_failure */);
assert(v != 0, "Trying to set active workers to 0");
+ _active_workers = v;
+ add_workers(false /* initializing */);
log_trace(gc, task)("%s: using %d out of %d workers", name(), _active_workers, _total_workers);
return _active_workers;
}
- // Add GC workers as needed.
- void add_workers(bool initializing);
-
- // Add GC workers as needed to reach the specified number of workers.
- void add_workers(uint active_workers, bool initializing);
-
// Return the Ith worker.
- AbstractGangWorker* worker(uint i) const;
+ GangWorker* worker(uint i) const;
// Base name (without worker id #) of threads.
const char* group_name() { return name(); }
@@ -156,44 +158,20 @@ class AbstractWorkGang : public CHeapObj {
void threads_do(ThreadClosure* tc) const;
// Create a GC worker and install it into the work gang.
- virtual AbstractGangWorker* install_worker(uint which);
+ virtual GangWorker* install_worker(uint which);
// Debugging.
const char* name() const { return _name; }
- protected:
- virtual AbstractGangWorker* allocate_worker(uint which) = 0;
-};
-
-// An class representing a gang of workers.
-class WorkGang: public AbstractWorkGang {
- // To get access to the GangTaskDispatcher instance.
- friend class GangWorker;
-
- GangTaskDispatcher* const _dispatcher;
- GangTaskDispatcher* dispatcher() const {
- return _dispatcher;
- }
-
-public:
- WorkGang(const char* name,
- uint workers,
- bool are_GC_task_threads,
- bool are_ConcurrentGC_threads);
-
- ~WorkGang();
-
// Run a task using the current active number of workers, returns when the task is done.
- virtual void run_task(AbstractGangTask* task);
+ void run_task(AbstractGangTask* task);
+
// Run a task with the given number of workers, returns
// when the task is done. The number of workers must be at most the number of
// active workers. Additional workers may be created if an insufficient
// number currently exists. If the add_foreground_work flag is true, the current thread
// is used to run the task too.
void run_task(AbstractGangTask* task, uint num_workers, bool add_foreground_work = false);
-
-protected:
- virtual AbstractGangWorker* allocate_worker(uint which);
};
// Temporarily try to set the number of active workers.
@@ -201,11 +179,11 @@ class WorkGang: public AbstractWorkGang {
// query the number of active workers.
class WithUpdatedActiveWorkers : public StackObj {
private:
- AbstractWorkGang* const _gang;
+ WorkGang* const _gang;
const uint _old_active_workers;
public:
- WithUpdatedActiveWorkers(AbstractWorkGang* gang, uint requested_num_workers) :
+ WithUpdatedActiveWorkers(WorkGang* gang, uint requested_num_workers) :
_gang(gang),
_old_active_workers(gang->active_workers()) {
uint capped_num_workers = MIN2(requested_num_workers, gang->total_workers());
@@ -218,41 +196,29 @@ class WithUpdatedActiveWorkers : public StackObj {
};
// Several instances of this class run in parallel as workers for a gang.
-class AbstractGangWorker: public WorkerThread {
-public:
- AbstractGangWorker(AbstractWorkGang* gang, uint id);
-
- // The only real method: run a task for the gang.
- virtual void run();
- // Predicate for Thread
- virtual bool is_GC_task_thread() const;
- virtual bool is_ConcurrentGC_thread() const;
- // Printing
- void print_on(outputStream* st) const;
- virtual void print() const;
-
-protected:
- AbstractWorkGang* _gang;
-
- virtual void initialize();
- virtual void loop() = 0;
-
- AbstractWorkGang* gang() const { return _gang; }
-};
+class GangWorker: public WorkerThread {
+private:
+ WorkGang* _gang;
-class GangWorker: public AbstractGangWorker {
-public:
- GangWorker(WorkGang* gang, uint id) : AbstractGangWorker(gang, id) {}
+ void initialize();
+ void loop();
-protected:
- virtual void loop();
+ WorkGang* gang() const { return _gang; }
-private:
WorkData wait_for_task();
void run_task(WorkData work);
void signal_task_done();
- WorkGang* gang() const { return (WorkGang*)_gang; }
+protected:
+ // The only real method: run a task for the gang.
+ void run() override;
+
+public:
+ GangWorker(WorkGang* gang, uint id);
+
+ // Predicate for Thread
+ bool is_GC_task_thread() const override { return gang()->are_GC_task_threads(); }
+ bool is_ConcurrentGC_thread() const override { return gang()->are_ConcurrentGC_threads(); }
};
// A class that acts as a synchronisation barrier. Workers enter
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
index ca9ee95133d..c31bba9c3d4 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
@@ -247,7 +247,7 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
// if (!marking)
__ if_then(marking, BoolTest::ne, zero, unlikely); {
BasicType index_bt = TypeX_X->basic_type();
- assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
+ assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
if (do_load) {
@@ -360,7 +360,7 @@ void ShenandoahBarrierSetC2::shenandoah_write_barrier_pre(GraphKit* kit,
// Helper that guards and inserts a pre-barrier.
void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
Node* pre_val, bool need_mem_bar) const {
- // We could be accessing the referent field of a reference object. If so, when G1
+ // We could be accessing the referent field of a reference object. If so, when Shenandoah
// is enabled, we need to log the value in the referent field in an SATB buffer.
// This routine performs some compile time filters and generates suitable
// runtime filters that guard the pre-barrier code.
@@ -978,7 +978,7 @@ void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase p
if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeMacroExpand) {
ShenandoahBarrierC2Support::verify(Compile::current()->root());
} else if (phase == BarrierSetC2::BeforeCodeGen) {
- // Verify G1 pre-barriers
+ // Verify Shenandoah pre-barriers
const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
Unique_Node_List visited;
@@ -1174,7 +1174,7 @@ bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph
case Op_StoreP: {
Node* adr = n->in(MemNode::Address);
const Type* adr_type = gvn->type(adr);
- // Pointer stores in G1 barriers looks like unsafe access.
+ // Pointer stores in Shenandoah barriers looks like unsafe access.
// Ignore such stores to be able scalar replace non-escaping
// allocations.
if (adr_type->isa_rawptr() && adr->is_AddP()) {
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
index 63fa241495d..e06ce4ec1f4 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
@@ -1528,7 +1528,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
phase->register_new_node(index_adr, ctrl);
BasicType index_bt = TypeX_X->basic_type();
- assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
+ assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
const TypePtr* adr_type = TypeRawPtr::BOTTOM;
Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
phase->register_new_node(index, ctrl);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
index 2c322f8ea0a..d6a5a9269c5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
@@ -157,14 +157,6 @@ void ShenandoahArguments::initialize() {
FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
}
- // AOT is not supported yet
- if (UseAOT) {
- if (!FLAG_IS_DEFAULT(UseAOT)) {
- warning("Shenandoah does not support AOT at this moment, disabling UseAOT");
- }
- FLAG_SET_DEFAULT(UseAOT, false);
- }
-
// TLAB sizing policy makes resizing decisions before each GC cycle. It averages
// historical data, assigning more recent data the weight according to TLABAllocationWeight.
// Current default is good for generational collectors that run frequent young GCs.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp
index c346c9263ae..ebc288efda9 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetNMethod.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/threadWXSetters.inline.hpp"
bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
ShenandoahReentrantLock* lock = ShenandoahNMethod::lock_for_nmethod(nm);
@@ -45,6 +46,8 @@ bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
return true;
}
+ MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current());)
+
if (nm->is_unloading()) {
// We don't need to take the lock when unlinking nmethods from
// the Method, because it is only concurrently unlinked by
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
index 38c4adfad7d..d525856e574 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
@@ -144,7 +144,7 @@ class ShenandoahFinalMarkingTask : public AbstractGangTask {
while (satb_mq_set.apply_closure_to_completed_buffer(&cl)) {}
assert(!heap->has_forwarded_objects(), "Not expected");
- ShenandoahMarkRefsClosure mark_cl(q, rp);
+ ShenandoahMarkRefsClosure mark_cl(q, rp);
ShenandoahSATBAndRemarkThreadsClosure tc(satb_mq_set,
ShenandoahIUBarrier ? &mark_cl : NULL);
Threads::threads_do(&tc);
@@ -191,7 +191,9 @@ ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahO
void ShenandoahMarkConcurrentRootsTask::work(uint worker_id) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahObjToScanQueue* q = _queue_set->queue(worker_id);
- ShenandoahMarkRefsClosure cl(q, _rp);
+ // Cannot enable string deduplication during root scanning. Otherwise,
+ // may result lock inversion between stack watermark and string dedup queue lock.
+ ShenandoahMarkRefsClosure cl(q, _rp);
_root_scanner.roots_do(&cl, worker_id);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index 35c30d55d61..49c1c4903a5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -298,7 +298,7 @@ jint ShenandoahHeap::initialize() {
for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
char* req_addr = (char*)addr;
assert(is_aligned(req_addr, cset_align), "Should be aligned");
- ReservedSpace cset_rs(cset_size, cset_align, false, req_addr);
+ ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
if (cset_rs.is_reserved()) {
assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
@@ -307,7 +307,7 @@ jint ShenandoahHeap::initialize() {
}
if (_collection_set == NULL) {
- ReservedSpace cset_rs(cset_size, cset_align, false);
+ ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
}
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
index 85499b145bd..23eb5ecea34 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
@@ -41,11 +41,6 @@ ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToSc
_weak(false)
{ }
-ShenandoahInitMarkRootsClosure::ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
- _queue(q),
- _mark_context(ShenandoahHeap::heap()->marking_context()) {
-}
-
ShenandoahMark::ShenandoahMark() :
_task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) {
}
@@ -72,37 +67,45 @@ void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahRefe
if (heap->unload_classes()) {
if (heap->has_forwarded_objects()) {
if (strdedup) {
- ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
- mark_loop_work(&cl, ld, w, t);
+ using Closure = ShenandoahMarkUpdateRefsMetadataClosure;
+ Closure cl(q, rp);
+ mark_loop_work(&cl, ld, w, t);
} else {
- ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
- mark_loop_work(&cl, ld, w, t);
+ using Closure = ShenandoahMarkUpdateRefsMetadataClosure;
+ Closure cl(q, rp);
+ mark_loop_work(&cl, ld, w, t);
}
} else {
if (strdedup) {
- ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
- mark_loop_work(&cl, ld, w, t);
+ using Closure = ShenandoahMarkRefsMetadataClosure;
+ Closure cl(q, rp);
+ mark_loop_work(&cl, ld, w, t);
} else {
- ShenandoahMarkRefsMetadataClosure cl(q, rp);
- mark_loop_work(&cl, ld, w, t);
+ using Closure = ShenandoahMarkRefsMetadataClosure;
+ Closure cl(q, rp);
+ mark_loop_work(&cl, ld, w, t);
}
}
} else {
if (heap->has_forwarded_objects()) {
if (strdedup) {
- ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
- mark_loop_work(&cl, ld, w, t);
+ using Closure = ShenandoahMarkUpdateRefsClosure;
+ Closure cl(q, rp);
+ mark_loop_work(&cl, ld, w, t);
} else {
- ShenandoahMarkUpdateRefsClosure cl(q, rp);
- mark_loop_work(&cl, ld, w, t);
+ using Closure = ShenandoahMarkUpdateRefsClosure;
+ Closure cl(q, rp);
+ mark_loop_work(&cl, ld, w, t);
}
} else {
if (strdedup) {
- ShenandoahMarkRefsDedupClosure cl(q, rp);
- mark_loop_work(&cl, ld, w, t);
+ using Closure = ShenandoahMarkRefsClosure;
+ Closure cl(q, rp);
+ mark_loop_work(&cl, ld, w, t);
} else {
- ShenandoahMarkRefsClosure cl(q, rp);
- mark_loop_work(&cl, ld, w, t);
+ using Closure = ShenandoahMarkRefsClosure;
+ Closure cl(q, rp);
+ mark_loop_work(&cl, ld, w, t);
}
}
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp
index 9fea8f8d862..4ee242fdc78 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp
@@ -31,21 +31,6 @@
class ShenandoahCMDrainMarkingStackClosure;
-class ShenandoahInitMarkRootsClosure : public OopClosure {
-private:
- ShenandoahObjToScanQueue* const _queue;
- ShenandoahMarkingContext* const _mark_context;
-
- template
- inline void do_oop_work(T* p);
-
-public:
- ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q);
-
- void do_oop(narrowOop* p) { do_oop_work(p); }
- void do_oop(oop* p) { do_oop_work(p); }
-};
-
// Base class for mark
// Mark class does not maintain states. Instead, mark states are
// maintained by task queues, mark bitmap and SATB buffers (concurrent mark)
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp
index 4d10a42874e..40d740abaf2 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp
@@ -39,11 +39,6 @@
#include "runtime/prefetch.inline.hpp"
#include "utilities/powerOfTwo.hpp"
-template
-void ShenandoahInitMarkRootsClosure::do_oop_work(T* p) {
- ShenandoahMark::mark_through_ref(p, _queue, _mark_context, false);
-}
-
template
void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, ShenandoahMarkTask* task) {
oop obj = task->obj();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp
index c0a55fbd856..71471b661b2 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -75,10 +75,11 @@ class ShenandoahMarkUpdateRefsSuperClosure : public ShenandoahMarkRefsSuperClosu
};
};
+template
class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkUpdateRefsSuperClosure {
private:
template
- inline void do_oop_work(T* p) { work(p); }
+ inline void do_oop_work(T* p) { work(p); }
public:
ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
@@ -89,24 +90,11 @@ class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkUpdateRefsSuperClos
virtual bool do_metadata() { return false; }
};
-class ShenandoahMarkUpdateRefsDedupClosure : public ShenandoahMarkUpdateRefsSuperClosure {
-private:
- template
- inline void do_oop_work(T* p) { work(p); }
-
-public:
- ShenandoahMarkUpdateRefsDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
- ShenandoahMarkUpdateRefsSuperClosure(q, rp) {}
-
- virtual void do_oop(narrowOop* p) { do_oop_work(p); }
- virtual void do_oop(oop* p) { do_oop_work(p); }
- virtual bool do_metadata() { return false; }
-};
-
+template
class ShenandoahMarkUpdateRefsMetadataClosure : public ShenandoahMarkUpdateRefsSuperClosure {
private:
template
- inline void do_oop_work(T* p) { work(p); }
+ inline void do_oop_work(T* p) { work(p); }
public:
ShenandoahMarkUpdateRefsMetadataClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
@@ -117,24 +105,12 @@ class ShenandoahMarkUpdateRefsMetadataClosure : public ShenandoahMarkUpdateRefsS
virtual bool do_metadata() { return true; }
};
-class ShenandoahMarkUpdateRefsMetadataDedupClosure : public ShenandoahMarkUpdateRefsSuperClosure {
-private:
- template
- inline void do_oop_work(T* p) { work(p); }
-
-public:
- ShenandoahMarkUpdateRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
- ShenandoahMarkUpdateRefsSuperClosure(q, rp) {}
-
- virtual void do_oop(narrowOop* p) { do_oop_work(p); }
- virtual void do_oop(oop* p) { do_oop_work(p); }
- virtual bool do_metadata() { return true; }
-};
+template
class ShenandoahMarkRefsClosure : public ShenandoahMarkRefsSuperClosure {
private:
template
- inline void do_oop_work(T* p) { work(p); }
+ inline void do_oop_work(T* p) { work(p); }
public:
ShenandoahMarkRefsClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
@@ -145,24 +121,12 @@ class ShenandoahMarkRefsClosure : public ShenandoahMarkRefsSuperClosure {
virtual bool do_metadata() { return false; }
};
-class ShenandoahMarkRefsDedupClosure : public ShenandoahMarkRefsSuperClosure {
-private:
- template
- inline void do_oop_work(T* p) { work(p); }
-
-public:
- ShenandoahMarkRefsDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
- ShenandoahMarkRefsSuperClosure(q, rp) {};
-
- virtual void do_oop(narrowOop* p) { do_oop_work(p); }
- virtual void do_oop(oop* p) { do_oop_work(p); }
- virtual bool do_metadata() { return false; }
-};
+template
class ShenandoahMarkRefsMetadataClosure : public ShenandoahMarkRefsSuperClosure {
private:
template
- inline void do_oop_work(T* p) { work(p); }
+ inline void do_oop_work(T* p) { work(p); }
public:
ShenandoahMarkRefsMetadataClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
@@ -173,20 +137,6 @@ class ShenandoahMarkRefsMetadataClosure : public ShenandoahMarkRefsSuperClosure
virtual bool do_metadata() { return true; }
};
-class ShenandoahMarkRefsMetadataDedupClosure : public ShenandoahMarkRefsSuperClosure {
-private:
- template
- inline void do_oop_work(T* p) { work(p); }
-
-public:
- ShenandoahMarkRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
- ShenandoahMarkRefsSuperClosure(q, rp) {};
-
- virtual void do_oop(narrowOop* p) { do_oop_work(p); }
- virtual void do_oop(oop* p) { do_oop_work(p); }
- virtual bool do_metadata() { return true; }
-};
-
class ShenandoahUpdateRefsSuperClosure : public BasicOopIterateClosure {
protected:
ShenandoahHeap* _heap;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
index 787774824ec..c9065f33414 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
@@ -386,8 +386,11 @@ template
oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) {
log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
- assert(reference_referent(reference) == NULL ||
- ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent(reference)), "only drop references with alive referents");
+#ifdef ASSERT
+ oop referent = reference_referent(reference);
+ assert(referent == NULL || ShenandoahHeap::heap()->marking_context()->is_marked(referent),
+ "only drop references with alive referents");
+#endif
// Unlink and return next in list
oop next = reference_discovered(reference);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
index dbdffd1417b..ddaa66ccc14 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
@@ -36,6 +36,30 @@
#include "gc/shenandoah/shenandoahSTWMark.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
+class ShenandoahInitMarkRootsClosure : public OopClosure {
+private:
+ ShenandoahObjToScanQueue* const _queue;
+ ShenandoahMarkingContext* const _mark_context;
+
+ template
+ inline void do_oop_work(T* p);
+public:
+ ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q);
+
+ void do_oop(narrowOop* p) { do_oop_work(p); }
+ void do_oop(oop* p) { do_oop_work(p); }
+};
+
+ShenandoahInitMarkRootsClosure::ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
+ _queue(q),
+ _mark_context(ShenandoahHeap::heap()->marking_context()) {
+}
+
+template
+void ShenandoahInitMarkRootsClosure::do_oop_work(T* p) {
+ ShenandoahMark::mark_through_ref(p, _queue, _mark_context, false);
+}
+
class ShenandoahSTWMarkTask : public AbstractGangTask {
private:
ShenandoahSTWMark* const _mark;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp
index 538d739bf81..f41331d1d82 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp
@@ -71,8 +71,8 @@ ShenandoahPushWorkerScope::~ShenandoahPushWorkerScope() {
assert(nworkers == _old_workers, "Must be able to restore");
}
-AbstractGangWorker* ShenandoahWorkGang::install_worker(uint which) {
- AbstractGangWorker* worker = WorkGang::install_worker(which);
+GangWorker* ShenandoahWorkGang::install_worker(uint which) {
+ GangWorker* worker = WorkGang::install_worker(which);
ShenandoahThreadLocalData::create(worker);
if (_initialize_gclab) {
ShenandoahThreadLocalData::initialize_gclab(worker);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp
index ab1ea8f5da2..be3bac5cee5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp
@@ -64,17 +64,7 @@ class ShenandoahWorkGang : public WorkGang {
// Create a GC worker and install it into the work gang.
// We need to initialize gclab for dynamic allocated workers
- AbstractGangWorker* install_worker(uint which);
-
- // We allow _active_workers < _total_workers when UseDynamicNumberOfGCThreads is off.
- // We use the same WorkGang for concurrent and parallel processing, and honor
- // ConcGCThreads and ParallelGCThreads settings
- virtual uint active_workers() const {
- assert(_active_workers > 0, "no active worker");
- assert(_active_workers <= _total_workers,
- "_active_workers: %u > _total_workers: %u", _active_workers, _total_workers);
- return _active_workers;
- }
+ GangWorker* install_worker(uint which);
void set_initialize_gclab() { assert(!_initialize_gclab, "Can only enable once"); _initialize_gclab = true; }
};
diff --git a/src/hotspot/share/gc/z/zAbort.cpp b/src/hotspot/share/gc/z/zAbort.cpp
new file mode 100644
index 00000000000..1ac18ce9970
--- /dev/null
+++ b/src/hotspot/share/gc/z/zAbort.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAbort.hpp"
+#include "runtime/atomic.hpp"
+
+volatile bool ZAbort::_should_abort = false;
+
+void ZAbort::abort() {
+ Atomic::release_store_fence(&_should_abort, true);
+}
diff --git a/src/hotspot/share/gc/z/zAbort.hpp b/src/hotspot/share/gc/z/zAbort.hpp
new file mode 100644
index 00000000000..1a5bcc15f19
--- /dev/null
+++ b/src/hotspot/share/gc/z/zAbort.hpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZABORT_HPP
+#define SHARE_GC_Z_ZABORT_HPP
+
+#include "memory/allocation.hpp"
+
+class ZAbort : public AllStatic {
+private:
+ static volatile bool _should_abort;
+
+public:
+ static bool should_abort();
+ static void abort();
+};
+
+#endif // SHARE_GC_Z_ZABORT_HPP
diff --git a/src/hotspot/share/gc/z/zAbort.inline.hpp b/src/hotspot/share/gc/z/zAbort.inline.hpp
new file mode 100644
index 00000000000..b0106cc7b3f
--- /dev/null
+++ b/src/hotspot/share/gc/z/zAbort.inline.hpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZABORT_INLINE_HPP
+#define SHARE_GC_Z_ZABORT_INLINE_HPP
+
+#include "gc/z/zAbort.hpp"
+#include "runtime/atomic.hpp"
+
+inline bool ZAbort::should_abort() {
+ return Atomic::load_acquire(&_should_abort);
+}
+
+#endif // SHARE_GC_Z_ZABORT_INLINE_HPP
diff --git a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp
index 7047830305e..9916178cc2c 100644
--- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp
+++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "gc/z/zNMethod.hpp"
#include "gc/z/zThreadLocalData.hpp"
#include "logging/log.hpp"
+#include "runtime/threadWXSetters.inline.hpp"
bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
ZLocker locker(ZNMethod::lock_for_nmethod(nm));
@@ -40,6 +41,8 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
return true;
}
+ MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
+
if (nm->is_unloading()) {
// We don't need to take the lock when unlinking nmethods from
// the Method, because it is only concurrently unlinked by
diff --git a/src/hotspot/share/gc/z/zDriver.cpp b/src/hotspot/share/gc/z/zDriver.cpp
index 0dc974524a2..5f9c8f53c9a 100644
--- a/src/hotspot/share/gc/z/zDriver.cpp
+++ b/src/hotspot/share/gc/z/zDriver.cpp
@@ -26,6 +26,7 @@
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/z/zAbort.inline.hpp"
#include "gc/z/zBreakpoint.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zDriver.hpp"
@@ -397,6 +398,19 @@ class ZDriverGCScope : public StackObj {
}
};
+// Macro to execute a termination check after a concurrent phase. Note
+// that it's important that the termination check comes after the call
+// to the function f, since we can't abort between pause_relocate_start()
+// and concurrent_relocate(). We need to let concurrent_relocate() call
+// abort_page() on the remaining entries in the relocation set.
+#define concurrent(f) \
+ do { \
+ concurrent_##f(); \
+ if (should_terminate()) { \
+ return; \
+ } \
+ } while (false)
+
void ZDriver::gc(GCCause::Cause cause) {
ZDriverGCScope scope(cause);
@@ -404,31 +418,31 @@ void ZDriver::gc(GCCause::Cause cause) {
pause_mark_start();
// Phase 2: Concurrent Mark
- concurrent_mark();
+ concurrent(mark);
// Phase 3: Pause Mark End
while (!pause_mark_end()) {
// Phase 3.5: Concurrent Mark Continue
- concurrent_mark_continue();
+ concurrent(mark_continue);
}
// Phase 4: Concurrent Process Non-Strong References
- concurrent_process_non_strong_references();
+ concurrent(process_non_strong_references);
// Phase 5: Concurrent Reset Relocation Set
- concurrent_reset_relocation_set();
+ concurrent(reset_relocation_set);
// Phase 6: Pause Verify
pause_verify();
// Phase 7: Concurrent Select Relocation Set
- concurrent_select_relocation_set();
+ concurrent(select_relocation_set);
// Phase 8: Pause Relocate Start
pause_relocate_start();
// Phase 9: Concurrent Relocate
- concurrent_relocate();
+ concurrent(relocate);
}
void ZDriver::run_service() {
@@ -456,5 +470,6 @@ void ZDriver::run_service() {
}
void ZDriver::stop_service() {
+ ZAbort::abort();
_gc_cycle_port.send_async(GCCause::_no_gc);
}
diff --git a/src/hotspot/share/gc/z/zErrno.cpp b/src/hotspot/share/gc/z/zErrno.cpp
index c6c3ae1b747..2b239ca573c 100644
--- a/src/hotspot/share/gc/z/zErrno.cpp
+++ b/src/hotspot/share/gc/z/zErrno.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
#include "precompiled.hpp"
#include "gc/z/zErrno.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include
#include
diff --git a/src/hotspot/share/gc/z/zForwarding.cpp b/src/hotspot/share/gc/z/zForwarding.cpp
index 7cbaf88c9b4..cabd381cbc1 100644
--- a/src/hotspot/share/gc/z/zForwarding.cpp
+++ b/src/hotspot/share/gc/z/zForwarding.cpp
@@ -58,7 +58,8 @@ bool ZForwarding::retain_page() {
if (ref_count < 0) {
// Claimed
- wait_page_released();
+ const bool success = wait_page_released();
+ assert(success, "Should always succeed");
return false;
}
@@ -129,14 +130,20 @@ void ZForwarding::release_page() {
}
}
-void ZForwarding::wait_page_released() const {
+bool ZForwarding::wait_page_released() const {
if (Atomic::load_acquire(&_ref_count) != 0) {
ZStatTimer timer(ZCriticalPhaseRelocationStall);
ZLocker locker(&_ref_lock);
while (Atomic::load_acquire(&_ref_count) != 0) {
+ if (_ref_abort) {
+ return false;
+ }
+
_ref_lock.wait();
}
}
+
+ return true;
}
ZPage* ZForwarding::detach_page() {
@@ -154,6 +161,14 @@ ZPage* ZForwarding::detach_page() {
return page;
}
+void ZForwarding::abort_page() {
+ ZLocker locker(&_ref_lock);
+ assert(Atomic::load(&_ref_count) > 0, "Invalid state");
+ assert(!_ref_abort, "Invalid state");
+ _ref_abort = true;
+ _ref_lock.notify_all();
+}
+
void ZForwarding::verify() const {
guarantee(_ref_count != 0, "Invalid reference count");
guarantee(_page != NULL, "Invalid page");
diff --git a/src/hotspot/share/gc/z/zForwarding.hpp b/src/hotspot/share/gc/z/zForwarding.hpp
index b01f9c190ec..8212cb2f81b 100644
--- a/src/hotspot/share/gc/z/zForwarding.hpp
+++ b/src/hotspot/share/gc/z/zForwarding.hpp
@@ -48,6 +48,7 @@ class ZForwarding {
ZPage* _page;
mutable ZConditionLock _ref_lock;
volatile int32_t _ref_count;
+ bool _ref_abort;
bool _in_place;
ZForwardingEntry* entries() const;
@@ -70,8 +71,9 @@ class ZForwarding {
bool retain_page();
ZPage* claim_page();
void release_page();
- void wait_page_released() const;
+ bool wait_page_released() const;
ZPage* detach_page();
+ void abort_page();
void set_in_place();
bool in_place() const;
diff --git a/src/hotspot/share/gc/z/zForwarding.inline.hpp b/src/hotspot/share/gc/z/zForwarding.inline.hpp
index dc9998e6252..393684f92d6 100644
--- a/src/hotspot/share/gc/z/zForwarding.inline.hpp
+++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp
@@ -59,6 +59,7 @@ inline ZForwarding::ZForwarding(ZPage* page, size_t nentries) :
_page(page),
_ref_lock(),
_ref_count(1),
+ _ref_abort(false),
_in_place(false) {}
inline uint8_t ZForwarding::type() const {
diff --git a/src/hotspot/share/gc/z/zHeap.cpp b/src/hotspot/share/gc/z/zHeap.cpp
index 7626b93081e..962744c56ee 100644
--- a/src/hotspot/share/gc/z/zHeap.cpp
+++ b/src/hotspot/share/gc/z/zHeap.cpp
@@ -429,7 +429,7 @@ void ZHeap::relocate() {
_relocate.relocate(&_relocation_set);
// Update statistics
- ZStatHeap::set_at_relocate_end(_page_allocator.stats());
+ ZStatHeap::set_at_relocate_end(_page_allocator.stats(), _object_allocator.relocated());
}
void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
@@ -494,7 +494,7 @@ void ZHeap::print_extended_on(outputStream* st) const {
}
// Allow pages to be deleted
- _page_allocator.enable_deferred_delete();
+ _page_allocator.disable_deferred_delete();
}
bool ZHeap::print_location(outputStream* st, uintptr_t addr) const {
diff --git a/src/hotspot/share/gc/z/zHeap.hpp b/src/hotspot/share/gc/z/zHeap.hpp
index 52e0a404e14..95adbf4a68f 100644
--- a/src/hotspot/share/gc/z/zHeap.hpp
+++ b/src/hotspot/share/gc/z/zHeap.hpp
@@ -114,8 +114,8 @@ class ZHeap {
// Object allocation
uintptr_t alloc_tlab(size_t size);
uintptr_t alloc_object(size_t size);
- uintptr_t alloc_object_non_blocking(size_t size);
- void undo_alloc_object(uintptr_t addr, size_t size);
+ uintptr_t alloc_object_for_relocation(size_t size);
+ void undo_alloc_object_for_relocation(uintptr_t addr, size_t size);
bool is_alloc_stalled() const;
void check_out_of_memory();
diff --git a/src/hotspot/share/gc/z/zHeap.inline.hpp b/src/hotspot/share/gc/z/zHeap.inline.hpp
index f308bce4d9e..27e17f65937 100644
--- a/src/hotspot/share/gc/z/zHeap.inline.hpp
+++ b/src/hotspot/share/gc/z/zHeap.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -79,15 +79,15 @@ inline uintptr_t ZHeap::alloc_object(size_t size) {
return addr;
}
-inline uintptr_t ZHeap::alloc_object_non_blocking(size_t size) {
- uintptr_t addr = _object_allocator.alloc_object_non_blocking(size);
+inline uintptr_t ZHeap::alloc_object_for_relocation(size_t size) {
+ const uintptr_t addr = _object_allocator.alloc_object_for_relocation(&_page_table, size);
assert(ZAddress::is_good_or_null(addr), "Bad address");
return addr;
}
-inline void ZHeap::undo_alloc_object(uintptr_t addr, size_t size) {
+inline void ZHeap::undo_alloc_object_for_relocation(uintptr_t addr, size_t size) {
ZPage* const page = _page_table.get(addr);
- _object_allocator.undo_alloc_object(page, addr, size);
+ _object_allocator.undo_alloc_object_for_relocation(page, addr, size);
}
inline uintptr_t ZHeap::relocate_object(uintptr_t addr) {
diff --git a/src/hotspot/share/gc/z/zMark.cpp b/src/hotspot/share/gc/z/zMark.cpp
index 2761b951394..db1cee24389 100644
--- a/src/hotspot/share/gc/z/zMark.cpp
+++ b/src/hotspot/share/gc/z/zMark.cpp
@@ -26,6 +26,7 @@
#include "classfile/classLoaderDataGraph.hpp"
#include "code/nmethod.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
+#include "gc/z/zAbort.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zLock.inline.hpp"
@@ -346,20 +347,27 @@ bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCach
}
// Success
- return true;
+ return !timeout->has_expired();
}
-template
-bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
- const bool success = drain(stripe, stacks, cache, timeout);
-
- // Flush and publish worker stacks
- stacks->flush(&_allocator, &_stripes);
+bool ZMark::try_steal_local(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
+ // Try to steal a local stack from another stripe
+ for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
+ victim_stripe != stripe;
+ victim_stripe = _stripes.stripe_next(victim_stripe)) {
+ ZMarkStack* const stack = stacks->steal(&_stripes, victim_stripe);
+ if (stack != NULL) {
+ // Success, install the stolen stack
+ stacks->install(&_stripes, stripe, stack);
+ return true;
+ }
+ }
- return success;
+ // Nothing to steal
+ return false;
}
-bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
+bool ZMark::try_steal_global(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
// Try to steal a stack from another stripe
for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
victim_stripe != stripe;
@@ -376,6 +384,10 @@ bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
return false;
}
+bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
+ return try_steal_local(stripe, stacks) || try_steal_global(stripe, stacks);
+}
+
void ZMark::idle() const {
os::naked_short_sleep(1);
}
@@ -486,7 +498,8 @@ bool ZMark::try_terminate() {
class ZMarkNoTimeout : public StackObj {
public:
bool has_expired() {
- return false;
+ // No timeout, but check for signal to abort
+ return ZAbort::should_abort();
}
};
@@ -495,7 +508,10 @@ void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkTh
ZMarkNoTimeout no_timeout;
for (;;) {
- drain_and_flush(stripe, stacks, cache, &no_timeout);
+ if (!drain(stripe, stacks, cache, &no_timeout)) {
+ // Abort
+ break;
+ }
if (try_steal(stripe, stacks)) {
// Stole work
@@ -557,7 +573,7 @@ void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThrea
ZMarkTimeout timeout(timeout_in_micros);
for (;;) {
- if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
+ if (!drain(stripe, stacks, cache, &timeout)) {
// Timed out
break;
}
@@ -583,8 +599,8 @@ void ZMark::work(uint64_t timeout_in_micros) {
work_with_timeout(&cache, stripe, stacks, timeout_in_micros);
}
- // Make sure stacks have been flushed
- assert(stacks->is_empty(&_stripes), "Should be empty");
+ // Flush and publish stacks
+ stacks->flush(&_allocator, &_stripes);
// Free remaining stacks
stacks->free(&_allocator);
diff --git a/src/hotspot/share/gc/z/zMark.hpp b/src/hotspot/share/gc/z/zMark.hpp
index 9711899e89b..1a8c8a3b2d6 100644
--- a/src/hotspot/share/gc/z/zMark.hpp
+++ b/src/hotspot/share/gc/z/zMark.hpp
@@ -71,10 +71,8 @@ class ZMark {
ZMarkThreadLocalStacks* stacks,
ZMarkCache* cache,
T* timeout);
- template bool drain_and_flush(ZMarkStripe* stripe,
- ZMarkThreadLocalStacks* stacks,
- ZMarkCache* cache,
- T* timeout);
+ bool try_steal_local(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks);
+ bool try_steal_global(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks);
bool try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks);
void idle() const;
bool flush(bool at_safepoint);
diff --git a/src/hotspot/share/gc/z/zMarkStack.hpp b/src/hotspot/share/gc/z/zMarkStack.hpp
index d61ae5bfdae..823a664090e 100644
--- a/src/hotspot/share/gc/z/zMarkStack.hpp
+++ b/src/hotspot/share/gc/z/zMarkStack.hpp
@@ -139,6 +139,9 @@ class ZMarkThreadLocalStacks {
ZMarkStripe* stripe,
ZMarkStack* stack);
+ ZMarkStack* steal(ZMarkStripeSet* stripes,
+ ZMarkStripe* stripe);
+
bool push(ZMarkStackAllocator* allocator,
ZMarkStripeSet* stripes,
ZMarkStripe* stripe,
diff --git a/src/hotspot/share/gc/z/zMarkStack.inline.hpp b/src/hotspot/share/gc/z/zMarkStack.inline.hpp
index 806cff24b3b..63cee64c41f 100644
--- a/src/hotspot/share/gc/z/zMarkStack.inline.hpp
+++ b/src/hotspot/share/gc/z/zMarkStack.inline.hpp
@@ -219,6 +219,17 @@ inline void ZMarkThreadLocalStacks::install(ZMarkStripeSet* stripes,
*stackp = stack;
}
+inline ZMarkStack* ZMarkThreadLocalStacks::steal(ZMarkStripeSet* stripes,
+ ZMarkStripe* stripe) {
+ ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)];
+ ZMarkStack* const stack = *stackp;
+ if (stack != NULL) {
+ *stackp = NULL;
+ }
+
+ return stack;
+}
+
inline bool ZMarkThreadLocalStacks::push(ZMarkStackAllocator* allocator,
ZMarkStripeSet* stripes,
ZMarkStripe* stripe,
diff --git a/src/hotspot/share/gc/z/zNUMA.cpp b/src/hotspot/share/gc/z/zNUMA.cpp
index 2ee790ede30..05e97750936 100644
--- a/src/hotspot/share/gc/z/zNUMA.cpp
+++ b/src/hotspot/share/gc/z/zNUMA.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,18 +24,28 @@
#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zNUMA.hpp"
+#include "gc/z/zNUMA.inline.hpp"
-bool ZNUMA::_enabled;
+ZNUMA::State ZNUMA::_state;
void ZNUMA::initialize() {
pd_initialize();
log_info_p(gc, init)("NUMA Support: %s", to_string());
- if (_enabled) {
+ if (is_enabled()) {
log_info_p(gc, init)("NUMA Nodes: %u", count());
}
}
const char* ZNUMA::to_string() {
- return _enabled ? "Enabled" : "Disabled";
+ switch (_state) {
+ case Enabled:
+ return "Enabled";
+
+ case Unsupported:
+ return "Unsupported";
+
+ default:
+ return "Disabled";
+ }
}
diff --git a/src/hotspot/share/gc/z/zNUMA.hpp b/src/hotspot/share/gc/z/zNUMA.hpp
index 0dc4390e8b4..5caa5ce6e54 100644
--- a/src/hotspot/share/gc/z/zNUMA.hpp
+++ b/src/hotspot/share/gc/z/zNUMA.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,13 @@
class ZNUMA : public AllStatic {
private:
- static bool _enabled;
+ enum State {
+ Disabled,
+ Enabled,
+ Unsupported
+ };
+
+ static State _state;
static void pd_initialize();
diff --git a/src/hotspot/share/gc/z/zNUMA.inline.hpp b/src/hotspot/share/gc/z/zNUMA.inline.hpp
index 65fbd1040dc..d16eada27d3 100644
--- a/src/hotspot/share/gc/z/zNUMA.inline.hpp
+++ b/src/hotspot/share/gc/z/zNUMA.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "gc/z/zNUMA.hpp"
inline bool ZNUMA::is_enabled() {
- return _enabled;
+ return _state == Enabled;
}
#endif // SHARE_GC_Z_ZNUMA_INLINE_HPP
diff --git a/src/hotspot/share/gc/z/zObjectAllocator.cpp b/src/hotspot/share/gc/z/zObjectAllocator.cpp
index 8cd17272f05..bbed59c1301 100644
--- a/src/hotspot/share/gc/z/zObjectAllocator.cpp
+++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "gc/z/zHeuristics.hpp"
#include "gc/z/zObjectAllocator.hpp"
#include "gc/z/zPage.inline.hpp"
+#include "gc/z/zPageTable.inline.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zThread.inline.hpp"
#include "gc/z/zValue.inline.hpp"
@@ -43,6 +44,8 @@ ZObjectAllocator::ZObjectAllocator() :
_use_per_cpu_shared_small_pages(ZHeuristics::use_per_cpu_shared_small_pages()),
_used(0),
_undone(0),
+ _alloc_for_relocation(0),
+ _undo_alloc_for_relocation(0),
_shared_medium_page(NULL),
_shared_small_page(NULL) {}
@@ -54,6 +57,17 @@ ZPage* const* ZObjectAllocator::shared_small_page_addr() const {
return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0);
}
+void ZObjectAllocator::register_alloc_for_relocation(const ZPageTable* page_table, uintptr_t addr, size_t size) {
+ const ZPage* const page = page_table->get(addr);
+ const size_t aligned_size = align_up(size, page->object_alignment());
+ Atomic::add(_alloc_for_relocation.addr(), aligned_size);
+}
+
+void ZObjectAllocator::register_undo_alloc_for_relocation(const ZPage* page, size_t size) {
+ const size_t aligned_size = align_up(size, page->object_alignment());
+ Atomic::add(_undo_alloc_for_relocation.addr(), aligned_size);
+}
+
ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
if (page != NULL) {
@@ -160,20 +174,28 @@ uintptr_t ZObjectAllocator::alloc_object(size_t size) {
return alloc_object(size, flags);
}
-uintptr_t ZObjectAllocator::alloc_object_non_blocking(size_t size) {
+uintptr_t ZObjectAllocator::alloc_object_for_relocation(const ZPageTable* page_table, size_t size) {
ZAllocationFlags flags;
flags.set_non_blocking();
- return alloc_object(size, flags);
+
+ const uintptr_t addr = alloc_object(size, flags);
+ if (addr != 0) {
+ register_alloc_for_relocation(page_table, addr, size);
+ }
+
+ return addr;
}
-void ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
+void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) {
const uint8_t type = page->type();
if (type == ZPageTypeLarge) {
+ register_undo_alloc_for_relocation(page, size);
undo_alloc_page(page);
ZStatInc(ZCounterUndoObjectAllocationSucceeded);
} else {
if (page->undo_alloc_object_atomic(addr, size)) {
+ register_undo_alloc_for_relocation(page, size);
ZStatInc(ZCounterUndoObjectAllocationSucceeded);
} else {
ZStatInc(ZCounterUndoObjectAllocationFailed);
@@ -209,6 +231,25 @@ size_t ZObjectAllocator::remaining() const {
return 0;
}
+size_t ZObjectAllocator::relocated() const {
+ size_t total_alloc = 0;
+ size_t total_undo_alloc = 0;
+
+ ZPerCPUConstIterator iter_alloc(&_alloc_for_relocation);
+ for (const size_t* alloc; iter_alloc.next(&alloc);) {
+ total_alloc += Atomic::load(alloc);
+ }
+
+ ZPerCPUConstIterator iter_undo_alloc(&_undo_alloc_for_relocation);
+ for (const size_t* undo_alloc; iter_undo_alloc.next(&undo_alloc);) {
+ total_undo_alloc += Atomic::load(undo_alloc);
+ }
+
+ assert(total_alloc >= total_undo_alloc, "Mismatch");
+
+ return total_alloc - total_undo_alloc;
+}
+
void ZObjectAllocator::retire_pages() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
@@ -216,6 +257,10 @@ void ZObjectAllocator::retire_pages() {
_used.set_all(0);
_undone.set_all(0);
+ // Reset relocated bytes
+ _alloc_for_relocation.set_all(0);
+ _undo_alloc_for_relocation.set_all(0);
+
// Reset allocation pages
_shared_medium_page.set(NULL);
_shared_small_page.set_all(NULL);
diff --git a/src/hotspot/share/gc/z/zObjectAllocator.hpp b/src/hotspot/share/gc/z/zObjectAllocator.hpp
index c102608605d..406782486df 100644
--- a/src/hotspot/share/gc/z/zObjectAllocator.hpp
+++ b/src/hotspot/share/gc/z/zObjectAllocator.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,18 +28,24 @@
#include "gc/z/zValue.hpp"
class ZPage;
+class ZPageTable;
class ZObjectAllocator {
private:
const bool _use_per_cpu_shared_small_pages;
ZPerCPU _used;
ZPerCPU _undone;
+ ZPerCPU _alloc_for_relocation;
+ ZPerCPU _undo_alloc_for_relocation;
ZContended _shared_medium_page;
ZPerCPU _shared_small_page;
ZPage** shared_small_page_addr();
ZPage* const* shared_small_page_addr() const;
+ void register_alloc_for_relocation(const ZPageTable* page_table, uintptr_t addr, size_t size);
+ void register_undo_alloc_for_relocation(const ZPage* page, size_t size);
+
ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
void undo_alloc_page(ZPage* page);
@@ -60,11 +66,12 @@ class ZObjectAllocator {
ZObjectAllocator();
uintptr_t alloc_object(size_t size);
- uintptr_t alloc_object_non_blocking(size_t size);
- void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size);
+ uintptr_t alloc_object_for_relocation(const ZPageTable* page_table, size_t size);
+ void undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size);
size_t used() const;
size_t remaining() const;
+ size_t relocated() const;
void retire_pages();
};
diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp
index d3158e52a94..44873fe85a5 100644
--- a/src/hotspot/share/gc/z/zRelocate.cpp
+++ b/src/hotspot/share/gc/z/zRelocate.cpp
@@ -23,6 +23,7 @@
#include "precompiled.hpp"
#include "gc/shared/gc_globals.hpp"
+#include "gc/z/zAbort.inline.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zForwarding.inline.hpp"
@@ -64,7 +65,7 @@ static uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_a
// Allocate object
const size_t size = ZUtils::object_size(from_addr);
- const uintptr_t to_addr = ZHeap::heap()->alloc_object_non_blocking(size);
+ const uintptr_t to_addr = ZHeap::heap()->alloc_object_for_relocation(size);
if (to_addr == 0) {
// Allocation failed
return 0;
@@ -77,7 +78,7 @@ static uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_a
const uintptr_t to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor);
if (to_addr_final != to_addr) {
// Already relocated, try undo allocation
- ZHeap::heap()->undo_alloc_object(to_addr, size);
+ ZHeap::heap()->undo_alloc_object_for_relocation(to_addr, size);
}
return to_addr_final;
@@ -103,9 +104,14 @@ uintptr_t ZRelocate::relocate_object(ZForwarding* forwarding, uintptr_t from_add
return to_addr;
}
- // Failed to relocate object. Wait for a worker thread to
- // complete relocation of this page, and then forward object.
- forwarding->wait_page_released();
+ // Failed to relocate object. Wait for a worker thread to complete
+ // relocation of this page, and then forward the object. If the GC
+ // aborts the relocation phase before the page has been relocated,
+ // then wait return false and we just forward the object in-place.
+ if (!forwarding->wait_page_released()) {
+ // Forward object in-place
+ return forwarding_insert(forwarding, from_addr, from_addr, &cursor);
+ }
}
// Forward object
@@ -339,8 +345,15 @@ class ZRelocateClosure : public ObjectClosure {
}
void do_forwarding(ZForwarding* forwarding) {
- // Relocate objects
_forwarding = forwarding;
+
+ // Check if we should abort
+ if (ZAbort::should_abort()) {
+ _forwarding->abort_page();
+ return;
+ }
+
+ // Relocate objects
_forwarding->object_iterate(this);
// Verify
diff --git a/src/hotspot/share/gc/z/zRelocate.hpp b/src/hotspot/share/gc/z/zRelocate.hpp
index 0400837d452..647a640970a 100644
--- a/src/hotspot/share/gc/z/zRelocate.hpp
+++ b/src/hotspot/share/gc/z/zRelocate.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,6 @@ class ZRelocate {
uintptr_t relocate_object(ZForwarding* forwarding, uintptr_t from_addr) const;
uintptr_t forward_object(ZForwarding* forwarding, uintptr_t from_addr) const;
- void start();
void relocate(ZRelocationSet* relocation_set);
};
diff --git a/src/hotspot/share/gc/z/zStat.cpp b/src/hotspot/share/gc/z/zStat.cpp
index 7b1faab5000..a93f9fece35 100644
--- a/src/hotspot/share/gc/z/zStat.cpp
+++ b/src/hotspot/share/gc/z/zStat.cpp
@@ -23,6 +23,7 @@
#include "precompiled.hpp"
#include "gc/shared/gc_globals.hpp"
+#include "gc/z/zAbort.inline.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zCPU.inline.hpp"
#include "gc/z/zGlobals.hpp"
@@ -642,6 +643,12 @@ void ZStatPhaseCycle::register_start(const Ticks& start) const {
}
void ZStatPhaseCycle::register_end(const Ticks& start, const Ticks& end) const {
+ if (ZAbort::should_abort()) {
+ log_info(gc)("Garbage Collection (%s) Aborted",
+ GCCause::to_string(ZCollectedHeap::heap()->gc_cause()));
+ return;
+ }
+
timer()->register_gc_end(end);
ZCollectedHeap::heap()->print_heap_after_gc();
@@ -712,6 +719,10 @@ void ZStatPhaseConcurrent::register_start(const Ticks& start) const {
}
void ZStatPhaseConcurrent::register_end(const Ticks& start, const Ticks& end) const {
+ if (ZAbort::should_abort()) {
+ return;
+ }
+
timer()->register_gc_concurrent_end(end);
const Tickspan duration = end - start;
@@ -730,6 +741,10 @@ void ZStatSubPhase::register_start(const Ticks& start) const {
}
void ZStatSubPhase::register_end(const Ticks& start, const Ticks& end) const {
+ if (ZAbort::should_abort()) {
+ return;
+ }
+
ZTracer::tracer()->report_thread_phase(name(), start, end);
const Tickspan duration = end - start;
@@ -1326,7 +1341,9 @@ void ZStatHeap::set_at_relocate_start(const ZPageAllocatorStats& stats) {
_at_relocate_start.reclaimed = stats.reclaimed();
}
-void ZStatHeap::set_at_relocate_end(const ZPageAllocatorStats& stats) {
+void ZStatHeap::set_at_relocate_end(const ZPageAllocatorStats& stats, size_t non_worker_relocated) {
+ const size_t reclaimed = stats.reclaimed() - MIN2(non_worker_relocated, stats.reclaimed());
+
_at_relocate_end.capacity = stats.capacity();
_at_relocate_end.capacity_high = capacity_high();
_at_relocate_end.capacity_low = capacity_low();
@@ -1336,9 +1353,9 @@ void ZStatHeap::set_at_relocate_end(const ZPageAllocatorStats& stats) {
_at_relocate_end.used = stats.used();
_at_relocate_end.used_high = stats.used_high();
_at_relocate_end.used_low = stats.used_low();
- _at_relocate_end.allocated = allocated(stats.used(), stats.reclaimed());
- _at_relocate_end.garbage = garbage(stats.reclaimed());
- _at_relocate_end.reclaimed = stats.reclaimed();
+ _at_relocate_end.allocated = allocated(stats.used(), reclaimed);
+ _at_relocate_end.garbage = garbage(reclaimed);
+ _at_relocate_end.reclaimed = reclaimed;
}
size_t ZStatHeap::max_capacity() {
diff --git a/src/hotspot/share/gc/z/zStat.hpp b/src/hotspot/share/gc/z/zStat.hpp
index 453ad87375f..c3b28ff2c42 100644
--- a/src/hotspot/share/gc/z/zStat.hpp
+++ b/src/hotspot/share/gc/z/zStat.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -541,7 +541,7 @@ class ZStatHeap : public AllStatic {
static void set_at_mark_end(const ZPageAllocatorStats& stats);
static void set_at_select_relocation_set(const ZRelocationSetSelectorStats& stats);
static void set_at_relocate_start(const ZPageAllocatorStats& stats);
- static void set_at_relocate_end(const ZPageAllocatorStats& stats);
+ static void set_at_relocate_end(const ZPageAllocatorStats& stats, size_t non_worker_relocated);
static size_t max_capacity();
static size_t used_at_mark_start();
diff --git a/src/hotspot/share/interpreter/abstractInterpreter.cpp b/src/hotspot/share/interpreter/abstractInterpreter.cpp
index 9db41dc10ed..eadcaf55333 100644
--- a/src/hotspot/share/interpreter/abstractInterpreter.cpp
+++ b/src/hotspot/share/interpreter/abstractInterpreter.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "cds/metaspaceShared.hpp"
#include "compiler/disassembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/bytecodeStream.hpp"
@@ -33,7 +34,6 @@
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateTable.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/arrayOop.hpp"
#include "oops/constantPool.hpp"
diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp
index 85052fb1c6a..10adbadd76d 100644
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp
@@ -1635,7 +1635,7 @@ void SignatureHandlerLibrary::add(const methodHandle& method) {
method->set_signature_handler(_handlers->at(handler_index));
}
} else {
- DEBUG_ONLY(Thread::current()->check_possible_safepoint());
+ DEBUG_ONLY(JavaThread::current()->check_possible_safepoint());
// use generic signature handler
method->set_signature_handler(Interpreter::slow_signature_handler());
}
diff --git a/src/hotspot/share/interpreter/linkResolver.cpp b/src/hotspot/share/interpreter/linkResolver.cpp
index 19bbd5ce151..fc0e42730d5 100644
--- a/src/hotspot/share/interpreter/linkResolver.cpp
+++ b/src/hotspot/share/interpreter/linkResolver.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "cds/archiveUtils.hpp"
#include "classfile/defaultMethods.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/resolutionErrors.hpp"
@@ -40,7 +41,6 @@
#include "interpreter/linkResolver.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
-#include "memory/archiveUtils.hpp"
#include "memory/resourceArea.hpp"
#include "oops/constantPool.hpp"
#include "oops/cpCache.inline.hpp"
diff --git a/src/hotspot/share/interpreter/oopMapCache.cpp b/src/hotspot/share/interpreter/oopMapCache.cpp
index 509ac00e87d..d0581f66ead 100644
--- a/src/hotspot/share/interpreter/oopMapCache.cpp
+++ b/src/hotspot/share/interpreter/oopMapCache.cpp
@@ -90,7 +90,7 @@ class OopMapForCacheEntry: public GenerateOopMap {
OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry *entry);
// Computes stack map for (method,bci) and initialize entry
- void compute_map(TRAPS);
+ bool compute_map(Thread* current);
int size();
};
@@ -102,16 +102,20 @@ OopMapForCacheEntry::OopMapForCacheEntry(const methodHandle& method, int bci, Oo
}
-void OopMapForCacheEntry::compute_map(TRAPS) {
+bool OopMapForCacheEntry::compute_map(Thread* current) {
assert(!method()->is_native(), "cannot compute oop map for native methods");
// First check if it is a method where the stackmap is always empty
if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) {
_entry->set_mask_size(0);
} else {
ResourceMark rm;
- GenerateOopMap::compute_map(CATCH);
+ if (!GenerateOopMap::compute_map(current)) {
+ fatal("Unrecoverable verification or out-of-memory error");
+ return false;
+ }
result_for_basicblock(_bci);
}
+ return true;
}
@@ -333,9 +337,10 @@ void OopMapCacheEntry::fill(const methodHandle& method, int bci) {
// extra oop following the parameters (the mirror for static native methods).
fill_for_native(method);
} else {
- EXCEPTION_MARK;
OopMapForCacheEntry gen(method, bci, this);
- gen.compute_map(CATCH);
+ if (!gen.compute_map(Thread::current())) {
+ fatal("Unrecoverable verification or out-of-memory error");
+ }
}
}
diff --git a/src/hotspot/share/interpreter/rewriter.cpp b/src/hotspot/share/interpreter/rewriter.cpp
index 0ef88bfd6e9..79a38a55e8b 100644
--- a/src/hotspot/share/interpreter/rewriter.cpp
+++ b/src/hotspot/share/interpreter/rewriter.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/vmClasses.hpp"
#include "interpreter/bytecodes.hpp"
#include "interpreter/interpreter.hpp"
@@ -567,9 +568,12 @@ void Rewriter::rewrite_bytecodes(TRAPS) {
}
void Rewriter::rewrite(InstanceKlass* klass, TRAPS) {
- if (!DumpSharedSpaces) {
- assert(!klass->is_shared(), "archive methods must not be rewritten at run time");
+#if INCLUDE_CDS
+ if (klass->is_shared()) {
+ assert(!klass->is_rewritten(), "rewritten shared classes cannot be rewritten again");
+ assert(MetaspaceShared::is_old_class(klass), "only shared old classes aren't rewritten");
}
+#endif // INCLUDE_CDS
ResourceMark rm(THREAD);
constantPoolHandle cpool(THREAD, klass->constants());
Rewriter rw(klass, cpool, klass->methods(), CHECK);
diff --git a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp
index 331b140a4b9..2dd5553b8fd 100644
--- a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp
+++ b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp
@@ -125,7 +125,6 @@ void TemplateInterpreterGenerator::generate_all() {
}
{ CodeletMark cm(_masm, "earlyret entry points");
- address earlyret_entry_itos = generate_earlyret_entry_for(itos);
Interpreter::_earlyret_entry =
EntryPoint(
generate_earlyret_entry_for(atos),
@@ -236,7 +235,6 @@ void TemplateInterpreterGenerator::generate_all() {
Interpreter::_deopt_entry[0] = EntryPoint();
Interpreter::_deopt_entry[0].set_entry(vtos, generate_deopt_entry_for(vtos, 0));
for (int i = 1; i < Interpreter::number_of_deopt_entries; i++) {
- address deopt_itos = generate_deopt_entry_for(itos, i);
Interpreter::_deopt_entry[i] =
EntryPoint(
generate_deopt_entry_for(atos, i),
diff --git a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp
index a150739456b..9a76d3b40d5 100644
--- a/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/chains/rootSetClosure.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
#include "gc/shared/oopStorage.inline.hpp"
@@ -72,7 +71,6 @@ void RootSetClosure::process() {
// We don't follow code blob oops, because they have misaligned oops.
Threads::oops_do(this, NULL);
OopStorageSet::strong_oops_do(this);
- AOTLoader::oops_do(this);
}
template class RootSetClosure;
diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp
index b0e1ef3480c..d66e9236d56 100644
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
#include "gc/shared/oopStorage.inline.hpp"
@@ -101,7 +100,6 @@ class ReferenceToRootClosure : public StackObj {
bool do_cldg_roots();
bool do_oop_storage_roots();
bool do_string_table_roots();
- bool do_aot_loader_roots();
bool do_roots();
@@ -150,13 +148,6 @@ bool ReferenceToRootClosure::do_oop_storage_roots() {
return false;
}
-bool ReferenceToRootClosure::do_aot_loader_roots() {
- assert(!complete(), "invariant");
- ReferenceLocateClosure rcl(_callback, OldObjectRoot::_aot, OldObjectRoot::_type_undetermined, NULL);
- AOTLoader::oops_do(&rcl);
- return rcl.complete();
-}
-
bool ReferenceToRootClosure::do_roots() {
assert(!complete(), "invariant");
assert(OldObjectRoot::_system_undetermined == _info._system, "invariant");
@@ -172,11 +163,6 @@ bool ReferenceToRootClosure::do_roots() {
return true;
}
- if (do_aot_loader_roots()) {
- _complete = true;
- return true;
- }
-
return false;
}
diff --git a/src/hotspot/share/jfr/leakprofiler/utilities/rootType.cpp b/src/hotspot/share/jfr/leakprofiler/utilities/rootType.cpp
index f09e4f04aa6..aefbc59a572 100644
--- a/src/hotspot/share/jfr/leakprofiler/utilities/rootType.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/utilities/rootType.cpp
@@ -57,8 +57,6 @@ const char* OldObjectRoot::system_description(System system) {
return "Class Loader Data";
case _code_cache:
return "Code Cache";
- case _aot:
- return "AOT";
#if INCLUDE_JVMCI
case _jvmci:
return "JVMCI";
diff --git a/src/hotspot/share/jfr/leakprofiler/utilities/rootType.hpp b/src/hotspot/share/jfr/leakprofiler/utilities/rootType.hpp
index 6ea7814f8e2..ffc47c7b833 100644
--- a/src/hotspot/share/jfr/leakprofiler/utilities/rootType.hpp
+++ b/src/hotspot/share/jfr/leakprofiler/utilities/rootType.hpp
@@ -39,7 +39,6 @@ class OldObjectRoot : public AllStatic {
_strong_oop_storage_set_last = _strong_oop_storage_set_first + EnumRange().size() - 1,
_class_loader_data,
_code_cache,
- _aot,
JVMCI_ONLY(_jvmci COMMA)
_number_of_systems
};
diff --git a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp
index 486bee203d1..e03b57bb18d 100644
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp
@@ -53,7 +53,6 @@
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutex.hpp"
-#include "runtime/os.inline.hpp"
#include "runtime/safepoint.hpp"
typedef JfrCheckpointManager::BufferPtr BufferPtr;
diff --git a/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp b/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp
index 63cd0483857..c844b7db861 100644
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTimeConverter.hpp"
#include "jfr/utilities/jfrTypes.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
static const char* const MAGIC = "FLR";
static const u2 JFR_VERSION_MAJOR = 2;
diff --git a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp
index 95ff610bdf6..c8eb400c9e3 100644
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
static const int64_t MAGIC_OFFSET = 0;
static const int64_t MAGIC_LEN = 4;
diff --git a/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.cpp b/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.cpp
index b7735cc26c3..9551b84e970 100644
--- a/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.cpp
+++ b/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
const julong MAX_ADJUSTED_GLOBAL_BUFFER_SIZE = 1 * M;
const julong MIN_ADJUSTED_GLOBAL_BUFFER_SIZE_CUTOFF = 512 * K;
const julong MIN_GLOBAL_BUFFER_SIZE = 64 * K;
+const julong MAX_GLOBAL_BUFFER_SIZE = 2 * G;
// implies at least 2 * MIN_GLOBAL_BUFFER SIZE
const julong MIN_BUFFER_COUNT = 2;
// MAX global buffer count open ended
@@ -37,6 +38,7 @@ const julong DEFAULT_BUFFER_COUNT = 20;
// MAX thread local buffer size == size of a single global buffer (runtime determined)
// DEFAULT thread local buffer size = 2 * os page size (runtime determined)
const julong MIN_THREAD_BUFFER_SIZE = 4 * K;
+const julong MAX_THREAD_BUFFER_SIZE = 2 * G;
const julong MIN_MEMORY_SIZE = 1 * M;
const julong DEFAULT_MEMORY_SIZE = 10 * M;
@@ -305,6 +307,11 @@ static void thread_buffer_size(JfrMemoryOptions* options) {
options->global_buffer_size = div_total_by_units(options->memory_size, options->buffer_count);
if (options->thread_buffer_size > options->global_buffer_size) {
options->global_buffer_size = options->thread_buffer_size;
+ if (options->memory_size_configured) {
+ options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size);
+ } else {
+ options->memory_size = multiply(options->global_buffer_size, options->buffer_count);
+ }
options->buffer_count = div_total_by_per_unit(options->memory_size, options->global_buffer_size);
}
assert(options->global_buffer_size >= options->thread_buffer_size, "invariant");
@@ -324,7 +331,8 @@ static void assert_post_condition(const JfrMemoryOptions* options) {
assert(options->memory_size % os::vm_page_size() == 0, "invariant");
assert(options->global_buffer_size % os::vm_page_size() == 0, "invariant");
assert(options->thread_buffer_size % os::vm_page_size() == 0, "invariant");
- assert(options->buffer_count > 0, "invariant");
+ assert(options->buffer_count >= MIN_BUFFER_COUNT, "invariant");
+ assert(options->global_buffer_size >= options->thread_buffer_size, "invariant");
}
#endif
@@ -429,6 +437,10 @@ bool JfrMemorySizer::adjust_options(JfrMemoryOptions* options) {
default:
default_size(options);
}
+ if (options->buffer_count < MIN_BUFFER_COUNT ||
+ options->global_buffer_size < options->thread_buffer_size) {
+ return false;
+ }
DEBUG_ONLY(assert_post_condition(options);)
return true;
}
diff --git a/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.hpp b/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.hpp
index 2fcebe3fca8..8f056e95398 100644
--- a/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.hpp
+++ b/src/hotspot/share/jfr/recorder/service/jfrMemorySizer.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,8 @@ extern const julong MIN_BUFFER_COUNT;
extern const julong MIN_GLOBAL_BUFFER_SIZE;
extern const julong MIN_MEMORY_SIZE;
extern const julong MIN_THREAD_BUFFER_SIZE;
+extern const julong MAX_GLOBAL_BUFFER_SIZE;
+extern const julong MAX_THREAD_BUFFER_SIZE;
struct JfrMemoryOptions {
julong memory_size;
diff --git a/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp b/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp
index 9d84ae135df..2e65a7e6867 100644
--- a/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp
+++ b/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -393,34 +393,41 @@ static julong divide_with_user_unit(Argument& memory_argument, julong value) {
return value;
}
-template
-static void log_lower_than_min_value(Argument& memory_argument, julong min_value) {
+static const char higher_than_msg[] = "This value is higher than the maximum size limited ";
+static const char lower_than_msg[] = "This value is lower than the minimum size required ";
+template
+static void log_out_of_range_value(Argument& memory_argument, julong min_value) {
+ const char* msg = lower ? lower_than_msg : higher_than_msg;
if (memory_argument.value()._size != memory_argument.value()._val) {
// has multiplier
log_error(arguments) (
- "This value is lower than the minimum size required " JULONG_FORMAT "%c",
+ "%s" JULONG_FORMAT "%c", msg,
divide_with_user_unit(memory_argument, min_value),
memory_argument.value()._multiplier);
return;
}
log_error(arguments) (
- "This value is lower than the minimum size required " JULONG_FORMAT,
+ "%s" JULONG_FORMAT, msg,
divide_with_user_unit(memory_argument, min_value));
}
+static const char default_val_msg[] = "Value default for option ";
+static const char specified_val_msg[] = "Value specified for option ";
template
static void log_set_value(Argument& memory_argument) {
if (memory_argument.value()._size != memory_argument.value()._val) {
// has multiplier
log_error(arguments) (
- "Value specified for option \"%s\" is " JULONG_FORMAT "%c",
+ "%s\"%s\" is " JULONG_FORMAT "%c",
+ memory_argument.is_set() ? specified_val_msg: default_val_msg,
memory_argument.name(),
memory_argument.value()._val,
memory_argument.value()._multiplier);
return;
}
log_error(arguments) (
- "Value specified for option \"%s\" is " JULONG_FORMAT,
+ "%s\"%s\" is " JULONG_FORMAT,
+ memory_argument.is_set() ? specified_val_msg: default_val_msg,
memory_argument.name(), memory_argument.value()._val);
}
@@ -541,6 +548,10 @@ static bool valid_memory_relations(const JfrMemoryOptions& options) {
return false;
}
}
+ } else if (options.thread_buffer_size_configured && options.memory_size_configured) {
+ if (!ensure_first_gteq_second(_dcmd_memorysize, _dcmd_threadbuffersize)) {
+ return false;
+ }
}
return true;
}
@@ -609,7 +620,7 @@ template
static bool ensure_gteq(Argument& memory_argument, const jlong value) {
if ((jlong)memory_argument.value()._size < value) {
log_set_value(memory_argument);
- log_lower_than_min_value(memory_argument, value);
+ log_out_of_range_value(memory_argument, value);
return false;
}
return true;
@@ -640,6 +651,30 @@ static bool ensure_valid_minimum_sizes() {
return true;
}
+template
+static bool ensure_lteq(Argument& memory_argument, const jlong value) {
+ if ((jlong)memory_argument.value()._size > value) {
+ log_set_value(memory_argument);
+ log_out_of_range_value(memory_argument, value);
+ return false;
+ }
+ return true;
+}
+
+static bool ensure_valid_maximum_sizes() {
+ if (_dcmd_globalbuffersize.is_set()) {
+ if (!ensure_lteq(_dcmd_globalbuffersize, MAX_GLOBAL_BUFFER_SIZE)) {
+ return false;
+ }
+ }
+ if (_dcmd_threadbuffersize.is_set()) {
+ if (!ensure_lteq(_dcmd_threadbuffersize, MAX_THREAD_BUFFER_SIZE)) {
+ return false;
+ }
+ }
+ return true;
+}
+
/**
* Starting with the initial set of memory values from the user,
* sanitize, enforce min/max rules and adjust to a set of consistent options.
@@ -647,7 +682,7 @@ static bool ensure_valid_minimum_sizes() {
* Adjusted memory sizes will be page aligned.
*/
bool JfrOptionSet::adjust_memory_options() {
- if (!ensure_valid_minimum_sizes()) {
+ if (!ensure_valid_minimum_sizes() || !ensure_valid_maximum_sizes()) {
return false;
}
JfrMemoryOptions options;
@@ -656,6 +691,24 @@ bool JfrOptionSet::adjust_memory_options() {
return false;
}
if (!JfrMemorySizer::adjust_options(&options)) {
+ if (options.buffer_count < MIN_BUFFER_COUNT || options.global_buffer_size < options.thread_buffer_size) {
+ log_set_value(_dcmd_memorysize);
+ log_set_value(_dcmd_globalbuffersize);
+ log_error(arguments) ("%s \"%s\" is " JLONG_FORMAT,
+ _dcmd_numglobalbuffers.is_set() ? specified_val_msg: default_val_msg,
+ _dcmd_numglobalbuffers.name(), _dcmd_numglobalbuffers.value());
+ log_set_value(_dcmd_threadbuffersize);
+ if (options.buffer_count < MIN_BUFFER_COUNT) {
+ log_error(arguments) ("numglobalbuffers " JULONG_FORMAT " is less than minimal value " JULONG_FORMAT,
+ options.buffer_count, MIN_BUFFER_COUNT);
+ log_error(arguments) ("Decrease globalbuffersize/threadbuffersize or increase memorysize");
+ } else {
+ log_error(arguments) ("globalbuffersize " JULONG_FORMAT " is less than threadbuffersize" JULONG_FORMAT,
+ options.global_buffer_size, options.thread_buffer_size);
+ log_error(arguments) ("Decrease globalbuffersize or increase memorysize or adjust global/threadbuffersize");
+ }
+ return false;
+ }
if (!check_for_ambiguity(_dcmd_memorysize, _dcmd_globalbuffersize, _dcmd_numglobalbuffers)) {
return false;
}
diff --git a/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp b/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp
index 0723acdc723..f83132807dd 100644
--- a/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp
+++ b/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,6 @@
#include "jfr/writers/jfrNativeEventWriter.hpp"
#include "logging/log.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/os.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
diff --git a/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp b/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp
index 04e35440e40..144b9c8b2eb 100644
--- a/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp
+++ b/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp
@@ -104,7 +104,7 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes)
assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
_rs = ReservedSpace(reservation_size_request_bytes,
os::vm_allocation_granularity(),
- UseLargePages && os::can_commit_large_page_memory());
+ os::vm_page_size());
if (!_rs.is_reserved()) {
return false;
}
diff --git a/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp b/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp
index f6bbbb65d4d..e8cda186f1b 100644
--- a/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp
+++ b/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp
@@ -29,7 +29,6 @@
#include "jfr/utilities/jfrRelation.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/os.inline.hpp"
#include "utilities/globalDefinitions.hpp"
/*
diff --git a/src/hotspot/share/jfr/utilities/jfrVersionSystem.inline.hpp b/src/hotspot/share/jfr/utilities/jfrVersionSystem.inline.hpp
index 97f3077e9d4..f9283982095 100644
--- a/src/hotspot/share/jfr/utilities/jfrVersionSystem.inline.hpp
+++ b/src/hotspot/share/jfr/utilities/jfrVersionSystem.inline.hpp
@@ -27,7 +27,7 @@
#include "jfr/utilities/jfrVersionSystem.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
inline JfrVersionSystem::JfrVersionSystem() : _tip(), _head(NULL) {
_tip._value = 1;
diff --git a/src/hotspot/share/jvmci/compilerRuntime.cpp b/src/hotspot/share/jvmci/compilerRuntime.cpp
deleted file mode 100644
index a7ef060b97e..00000000000
--- a/src/hotspot/share/jvmci/compilerRuntime.cpp
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
-#include "classfile/stringTable.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "classfile/vmSymbols.hpp"
-#include "compiler/compilationPolicy.hpp"
-#include "interpreter/linkResolver.hpp"
-#include "jvmci/compilerRuntime.hpp"
-#include "oops/cpCache.inline.hpp"
-#include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/vframe.inline.hpp"
-#include "utilities/sizes.hpp"
-
-// Resolve and allocate String
-JRT_BLOCK_ENTRY(void, CompilerRuntime::resolve_string_by_symbol(JavaThread* current, void* string_result, const char* name))
- JRT_BLOCK
- oop str = *(oop*)string_result; // Is it resolved already?
- if (str == NULL) { // Do resolution
- // First 2 bytes of name contains length (number of bytes).
- int len = Bytes::get_Java_u2((address)name);
- name += 2;
- TempNewSymbol sym = SymbolTable::new_symbol(name, len);
- str = StringTable::intern(sym, CHECK);
- assert(java_lang_String::is_instance(str), "must be string");
- *(oop*)string_result = str; // Store result
- }
- assert(str != NULL, "Should be allocated!");
- current->set_vm_result(str);
- JRT_BLOCK_END
-JRT_END
-
-
-
-Klass* CompilerRuntime::resolve_klass_helper(const char* name, int len, TRAPS) {
- JavaThread* current = THREAD->as_Java_thread();
- ResourceMark rm(current);
- // last java frame on stack (which includes native call frames)
- RegisterMap cbl_map(current, false);
- // Skip stub
- frame caller_frame = current->last_frame().sender(&cbl_map);
- CodeBlob* caller_cb = caller_frame.cb();
- guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
- CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
- methodHandle caller(current, caller_nm->method());
-
- // Use class loader of aot method.
- Handle loader(current, caller->method_holder()->class_loader());
- Handle protection_domain(current, caller->method_holder()->protection_domain());
-
- TempNewSymbol sym = SymbolTable::new_symbol(name, len);
- if (sym != NULL && Signature::has_envelope(sym)) {
- // Ignore wrapping L and ;
- sym = Signature::strip_envelope(sym);
- }
- if (sym == NULL) {
- return NULL;
- }
- Klass* k = SystemDictionary::resolve_or_fail(sym, loader, protection_domain, true, CHECK_NULL);
-
- return k;
-}
-
-// Resolve Klass
-JRT_BLOCK_ENTRY(Klass*, CompilerRuntime::resolve_klass_by_symbol(JavaThread* current, Klass** klass_result, const char* name))
- Klass* k = NULL;
- JRT_BLOCK
- k = *klass_result; // Is it resolved already?
- if (k == NULL) { // Do resolution
- // First 2 bytes of name contains length (number of bytes).
- int len = Bytes::get_Java_u2((address)name);
- name += 2;
- k = CompilerRuntime::resolve_klass_helper(name, len, CHECK_NULL);
- *klass_result = k; // Store result
- }
- JRT_BLOCK_END
- assert(k != NULL, " Should be loaded!");
- return k;
-JRT_END
-
-
-Method* CompilerRuntime::resolve_method_helper(Klass* klass, const char* method_name, int method_name_len,
- const char* signature_name, int signature_name_len) {
- Method* m = NULL;
- TempNewSymbol name_symbol = SymbolTable::probe(method_name, method_name_len);
- TempNewSymbol signature_symbol = SymbolTable::probe(signature_name, signature_name_len);
- if (name_symbol != NULL && signature_symbol != NULL) {
- if (name_symbol == vmSymbols::object_initializer_name() ||
- name_symbol == vmSymbols::class_initializer_name()) {
- // Never search superclasses for constructors
- if (klass->is_instance_klass()) {
- m = InstanceKlass::cast(klass)->find_method(name_symbol, signature_symbol);
- }
- } else {
- m = klass->lookup_method(name_symbol, signature_symbol);
- if (m == NULL && klass->is_instance_klass()) {
- m = InstanceKlass::cast(klass)->lookup_method_in_ordered_interfaces(name_symbol, signature_symbol);
- }
- }
- }
- return m;
-}
-
-JRT_BLOCK_ENTRY(void, CompilerRuntime::resolve_dynamic_invoke(JavaThread* current, oop* appendix_result))
- JRT_BLOCK
- {
- ResourceMark rm(current);
- vframeStream vfst(current, true); // Do not skip and javaCalls
- assert(!vfst.at_end(), "Java frame must exist");
- methodHandle caller(current, vfst.method());
- InstanceKlass* holder = caller->method_holder();
- int bci = vfst.bci();
- Bytecode_invoke bytecode(caller, bci);
- int index = bytecode.index();
-
- // Make sure it's resolved first
- CallInfo callInfo;
- constantPoolHandle cp(current, holder->constants());
- ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index, true));
- Bytecodes::Code invoke_code = bytecode.invoke_code();
- if (!cp_cache_entry->is_resolved(invoke_code)) {
- LinkResolver::resolve_invoke(callInfo, Handle(), cp, index, invoke_code, CHECK);
- if (bytecode.is_invokedynamic()) {
- cp_cache_entry->set_dynamic_call(cp, callInfo);
- } else {
- cp_cache_entry->set_method_handle(cp, callInfo);
- }
- vmassert(cp_cache_entry->is_resolved(invoke_code), "sanity");
- }
-
- Handle appendix(current, cp_cache_entry->appendix_if_resolved(cp));
- Klass *appendix_klass = appendix.is_null() ? NULL : appendix->klass();
-
- methodHandle adapter_method(current, cp_cache_entry->f1_as_method());
- InstanceKlass *adapter_klass = adapter_method->method_holder();
-
- if (appendix_klass != NULL && appendix_klass->is_instance_klass()) {
- vmassert(InstanceKlass::cast(appendix_klass)->is_initialized(), "sanity");
- }
- if (!adapter_klass->is_initialized()) {
- // Force initialization of adapter class
- adapter_klass->initialize(CHECK);
- // Double-check that it was really initialized,
- // because we could be doing a recursive call
- // from inside .
- }
-
- int cpi = cp_cache_entry->constant_pool_index();
- if (!AOTLoader::reconcile_dynamic_invoke(holder, cpi, adapter_method(),
- appendix_klass)) {
- return;
- }
-
- *appendix_result = appendix();
- current->set_vm_result(appendix());
- }
- JRT_BLOCK_END
-JRT_END
-
-JRT_BLOCK_ENTRY(MethodCounters*, CompilerRuntime::resolve_method_by_symbol_and_load_counters(JavaThread* current, MethodCounters** counters_result, Klass* klass, const char* data))
- MethodCounters* c = *counters_result; // Is it resolved already?
- JRT_BLOCK
- if (c == NULL) { // Do resolution
- // Get method name and its length
- int method_name_len = Bytes::get_Java_u2((address)data);
- data += sizeof(u2);
- const char* method_name = data;
- data += method_name_len;
-
- // Get signature and its length
- int signature_name_len = Bytes::get_Java_u2((address)data);
- data += sizeof(u2);
- const char* signature_name = data;
-
- assert(klass != NULL, "Klass parameter must not be null");
- Method* m = resolve_method_helper(klass, method_name, method_name_len, signature_name, signature_name_len);
- assert(m != NULL, "Method must resolve successfully");
-
- // Create method counters immediately to avoid check at runtime.
- c = m->get_method_counters(current);
- if (c == NULL) {
- THROW_MSG_NULL(vmSymbols::java_lang_OutOfMemoryError(), "Cannot allocate method counters");
- }
-
- *counters_result = c;
- }
- JRT_BLOCK_END
- return c;
-JRT_END
-
-// Resolve and initialize Klass
-JRT_BLOCK_ENTRY(Klass*, CompilerRuntime::initialize_klass_by_symbol(JavaThread* current, Klass** klass_result, const char* name))
- Klass* k = NULL;
- JRT_BLOCK
- k = klass_result[0]; // Is it initialized already?
- if (k == NULL) { // Do initialized
- k = klass_result[1]; // Is it resolved already?
- if (k == NULL) { // Do resolution
- // First 2 bytes of name contains length (number of bytes).
- int len = Bytes::get_Java_u2((address)name);
- const char *cname = name + 2;
- k = CompilerRuntime::resolve_klass_helper(cname, len, CHECK_NULL);
- klass_result[1] = k; // Store resolved result
- }
- Klass* k0 = klass_result[0]; // Is it initialized already?
- if (k0 == NULL && k != NULL && k->is_instance_klass()) {
- // Force initialization of instance class
- InstanceKlass::cast(k)->initialize(CHECK_NULL);
- // Double-check that it was really initialized,
- // because we could be doing a recursive call
- // from inside .
- if (InstanceKlass::cast(k)->is_initialized()) {
- klass_result[0] = k; // Store initialized result
- }
- }
- }
- JRT_BLOCK_END
- assert(k != NULL, " Should be loaded!");
- return k;
-JRT_END
-
-
-JRT_BLOCK_ENTRY(void, CompilerRuntime::invocation_event(JavaThread* current, MethodCounters* counters))
- JRT_BLOCK
- methodHandle mh(current, counters->method());
- RegisterMap map(current, false);
- // Compute the enclosing method
- frame fr = current->last_frame().sender(&map);
- CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
- assert(cm != NULL && cm->is_compiled(), "Sanity check");
- methodHandle emh(current, cm->method());
- CompilationPolicy::event(emh, mh, InvocationEntryBci, InvocationEntryBci, CompLevel_aot, cm, CHECK);
- JRT_BLOCK_END
-JRT_END
-
-JRT_BLOCK_ENTRY(void, CompilerRuntime::backedge_event(JavaThread* current, MethodCounters* counters, int branch_bci, int target_bci))
- assert(branch_bci != InvocationEntryBci && target_bci != InvocationEntryBci, "Wrong bci");
- assert(target_bci <= branch_bci, "Expected a back edge");
- JRT_BLOCK
- methodHandle mh(current, counters->method());
- RegisterMap map(current, false);
-
- // Compute the enclosing method
- frame fr = current->last_frame().sender(&map);
- CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
- assert(cm != NULL && cm->is_compiled(), "Sanity check");
- methodHandle emh(current, cm->method());
- nmethod* osr_nm = CompilationPolicy::event(emh, mh, branch_bci, target_bci, CompLevel_aot, cm, CHECK);
- if (osr_nm != NULL) {
- Deoptimization::deoptimize_frame(current, fr.id());
- }
- JRT_BLOCK_END
-JRT_END
diff --git a/src/hotspot/share/jvmci/compilerRuntime.hpp b/src/hotspot/share/jvmci/compilerRuntime.hpp
deleted file mode 100644
index 17d40b59faf..00000000000
--- a/src/hotspot/share/jvmci/compilerRuntime.hpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_JVMCI_COMPILERRUNTIME_HPP
-#define SHARE_JVMCI_COMPILERRUNTIME_HPP
-
-#include "memory/allocation.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/klass.hpp"
-#include "oops/method.hpp"
-#include "utilities/exceptions.hpp"
-
-class CompilerRuntime : AllStatic {
- public:
- // Resolves klass for aot compiled method.
- static Klass* resolve_klass_helper(const char* name, int len, TRAPS);
- // Resolves method for aot compiled method.
- static Method* resolve_method_helper(Klass* klass, const char* method_name, int method_name_len,
- const char* signature_name, int signature_name_len);
- // Resolution methods for aot compiled code.
- static void resolve_string_by_symbol(JavaThread* current, void* string_result, const char* name);
- static void resolve_dynamic_invoke(JavaThread* current, oop* appendix_result);
-
- static Klass* resolve_klass_by_symbol(JavaThread* current, Klass** klass_result, const char* name);
- static Klass* initialize_klass_by_symbol(JavaThread* current, Klass** klass_result, const char* name);
- static MethodCounters* resolve_method_by_symbol_and_load_counters(JavaThread* current, MethodCounters** counters_result, Klass* klass_hint, const char* data);
- static void invocation_event(JavaThread* current, MethodCounters* counters);
- static void backedge_event(JavaThread* current, MethodCounters* counters, int branch_bci, int target_bci);
-};
-
-#endif // SHARE_JVMCI_COMPILERRUNTIME_HPP
diff --git a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp
index e98720ef2c4..1b72c212d09 100644
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp
@@ -172,70 +172,6 @@ OopMap* CodeInstaller::create_oop_map(JVMCIObject debug_info, JVMCI_TRAPS) {
return map;
}
-#if INCLUDE_AOT
-AOTOopRecorder::AOTOopRecorder(CodeInstaller* code_inst, Arena* arena, bool deduplicate) : OopRecorder(arena, deduplicate) {
- _code_inst = code_inst;
- _meta_refs = new GrowableArray();
-}
-
-int AOTOopRecorder::nr_meta_refs() const {
- return _meta_refs->length();
-}
-
-jobject AOTOopRecorder::meta_element(int pos) const {
- return _meta_refs->at(pos);
-}
-
-int AOTOopRecorder::find_index(Metadata* h) {
- JavaThread* THREAD = JavaThread::current();
- JVMCIEnv* JVMCIENV = _code_inst->jvmci_env();
- int oldCount = metadata_count();
- int index = this->OopRecorder::find_index(h);
- int newCount = metadata_count();
-
- if (oldCount == newCount) {
- // found a match
- return index;
- }
-
- vmassert(index + 1 == newCount, "must be last");
-
- JVMCIKlassHandle klass(THREAD);
- JVMCIObject result;
- guarantee(h != NULL,
- "If DebugInformationRecorder::describe_scope passes NULL oldCount == newCount must hold.");
- if (h->is_klass()) {
- klass = (Klass*) h;
- result = JVMCIENV->get_jvmci_type(klass, JVMCI_CATCH);
- } else if (h->is_method()) {
- Method* method = (Method*) h;
- methodHandle mh(THREAD, method);
- result = JVMCIENV->get_jvmci_method(mh, JVMCI_CATCH);
- }
- jobject ref = JVMCIENV->get_jobject(result);
- record_meta_ref(ref, index);
-
- return index;
-}
-
-int AOTOopRecorder::find_index(jobject h) {
- if (h == NULL) {
- return 0;
- }
- oop javaMirror = JNIHandles::resolve(h);
- Klass* klass = java_lang_Class::as_Klass(javaMirror);
- return find_index(klass);
-}
-
-void AOTOopRecorder::record_meta_ref(jobject o, int index) {
- assert(index > 0, "must be 1..n");
- index -= 1; // reduce by one to convert to array index
-
- assert(index == _meta_refs->length(), "must be last");
- _meta_refs->append(o);
-}
-#endif // INCLUDE_AOT
-
void* CodeInstaller::record_metadata_reference(CodeSection* section, address dest, JVMCIObject constant, JVMCI_TRAPS) {
/*
* This method needs to return a raw (untyped) pointer, since the value of a pointer to the base
@@ -538,69 +474,6 @@ void CodeInstaller::initialize_dependencies(JVMCIObject compiled_code, OopRecord
}
}
-#if INCLUDE_AOT
-RelocBuffer::~RelocBuffer() {
- FREE_C_HEAP_ARRAY(char, _buffer);
-}
-
-address RelocBuffer::begin() const {
- if (_buffer != NULL) {
- return (address) _buffer;
- }
- return (address) _static_buffer;
-}
-
-void RelocBuffer::set_size(size_t bytes) {
- assert(bytes <= _size, "can't grow in size!");
- _size = bytes;
-}
-
-void RelocBuffer::ensure_size(size_t bytes) {
- assert(_buffer == NULL, "can only be used once");
- assert(_size == 0, "can only be used once");
- if (bytes >= RelocBuffer::stack_size) {
- _buffer = NEW_C_HEAP_ARRAY(char, bytes, mtJVMCI);
- }
- _size = bytes;
-}
-
-JVMCI::CodeInstallResult CodeInstaller::gather_metadata(JVMCIObject target, JVMCIObject compiled_code, CodeMetadata& metadata, JVMCI_TRAPS) {
- assert(JVMCIENV->is_hotspot(), "AOT code is executed only in HotSpot mode");
- CodeBuffer buffer("JVMCI Compiler CodeBuffer for Metadata");
- AOTOopRecorder* recorder = new AOTOopRecorder(this, &_arena, true);
- initialize_dependencies(compiled_code, recorder, JVMCI_CHECK_OK);
-
- metadata.set_oop_recorder(recorder);
-
- // Get instructions and constants CodeSections early because we need it.
- _instructions = buffer.insts();
- _constants = buffer.consts();
- buffer.set_immutable_PIC(_immutable_pic_compilation);
-
- initialize_fields(target, compiled_code, JVMCI_CHECK_OK);
- JVMCI::CodeInstallResult result = initialize_buffer(buffer, false, JVMCI_CHECK_OK);
- if (result != JVMCI::ok) {
- return result;
- }
-
- _debug_recorder->pcs_size(); // create the sentinel record
-
- assert(_debug_recorder->pcs_length() >= 2, "must be at least 2");
-
- metadata.set_pc_desc(_debug_recorder->pcs(), _debug_recorder->pcs_length());
- metadata.set_scopes(_debug_recorder->stream()->buffer(), _debug_recorder->data_size());
- metadata.set_exception_table(&_exception_handler_table);
- metadata.set_implicit_exception_table(&_implicit_exception_table);
-
- RelocBuffer* reloc_buffer = metadata.get_reloc_buffer();
-
- reloc_buffer->ensure_size(buffer.total_relocation_size());
- size_t size = (size_t) buffer.copy_relocations_to(reloc_buffer->begin(), (CodeBuffer::csize_t) reloc_buffer->size(), true);
- reloc_buffer->set_size(size);
- return JVMCI::ok;
-}
-#endif // INCLUDE_AOT
-
// constructor used to create a method
JVMCI::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler,
JVMCIObject target,
@@ -619,9 +492,6 @@ JVMCI::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler,
// Get instructions and constants CodeSections early because we need it.
_instructions = buffer.insts();
_constants = buffer.consts();
-#if INCLUDE_AOT
- buffer.set_immutable_PIC(_immutable_pic_compilation);
-#endif
initialize_fields(target, compiled_code, JVMCI_CHECK_OK);
JVMCI::CodeInstallResult result = initialize_buffer(buffer, true, JVMCI_CHECK_OK);
@@ -741,9 +611,8 @@ void CodeInstaller::initialize_fields(JVMCIObject target, JVMCIObject compiled_c
}
int CodeInstaller::estimate_stubs_size(JVMCI_TRAPS) {
- // Estimate the number of static and aot call stubs that might be emitted.
+ // Estimate the number of static call stubs that might be emitted.
int static_call_stubs = 0;
- int aot_call_stubs = 0;
int trampoline_stubs = 0;
JVMCIObjectArray sites = this->sites();
for (int i = 0; i < JVMCIENV->get_length(sites); i++) {
@@ -771,22 +640,10 @@ int CodeInstaller::estimate_stubs_size(JVMCI_TRAPS) {
}
}
}
-#if INCLUDE_AOT
- if (UseAOT && jvmci_env()->isa_site_Call(site)) {
- JVMCIObject target = jvmci_env()-> get_site_Call_target(site);
- if (!jvmci_env()->isa_HotSpotForeignCallTarget(target)) {
- // Add far aot trampolines.
- aot_call_stubs++;
- }
- }
-#endif
}
}
int size = static_call_stubs * CompiledStaticCall::to_interp_stub_size();
size += trampoline_stubs * CompiledStaticCall::to_trampoline_stub_size();
-#if INCLUDE_AOT
- size += aot_call_stubs * CompiledStaticCall::to_aot_stub_size();
-#endif
return size;
}
@@ -1276,10 +1133,6 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, JVMCIObject si
if (foreign_call.is_non_null()) {
jlong foreign_call_destination = jvmci_env()->get_HotSpotForeignCallTarget_address(foreign_call);
- if (_immutable_pic_compilation) {
- // Use fake short distance during PIC compilation.
- foreign_call_destination = (jlong)(_instructions->start() + pc_offset);
- }
CodeInstaller::pd_relocate_ForeignCall(inst, foreign_call_destination, JVMCI_CHECK);
} else { // method != NULL
if (debug_info.is_null()) {
@@ -1292,10 +1145,6 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, JVMCIObject si
// Need a static call stub for transitions from compiled to interpreted.
CompiledStaticCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset);
}
-#if INCLUDE_AOT
- // Trampoline to far aot code.
- CompiledStaticCall::emit_to_aot_stub(buffer, _instructions->start() + pc_offset);
-#endif
}
_next_call_type = INVOKE_INVALID;
@@ -1319,25 +1168,11 @@ void CodeInstaller::site_DataPatch(CodeBuffer& buffer, jint pc_offset, JVMCIObje
const char* to_string = JVMCIENV->as_utf8_string(string);
JVMCI_THROW_MSG(IllegalArgumentException, err_msg("Direct object constant reached the backend: %s", to_string));
}
- if (!_immutable_pic_compilation) {
- // Do not patch during PIC compilation.
- pd_patch_OopConstant(pc_offset, constant, JVMCI_CHECK);
- }
+ pd_patch_OopConstant(pc_offset, constant, JVMCI_CHECK);
} else if (jvmci_env()->isa_IndirectHotSpotObjectConstantImpl(constant)) {
- if (!_immutable_pic_compilation) {
- // Do not patch during PIC compilation.
- pd_patch_OopConstant(pc_offset, constant, JVMCI_CHECK);
- }
+ pd_patch_OopConstant(pc_offset, constant, JVMCI_CHECK);
} else if (jvmci_env()->isa_HotSpotMetaspaceConstantImpl(constant)) {
- if (!_immutable_pic_compilation) {
- pd_patch_MetaspaceConstant(pc_offset, constant, JVMCI_CHECK);
- }
-#if INCLUDE_AOT
- } else if (jvmci_env()->isa_HotSpotSentinelConstant(constant)) {
- if (!_immutable_pic_compilation) {
- JVMCI_ERROR("sentinel constant not supported for normal compiles: %s", jvmci_env()->klass_name(constant));
- }
-#endif
+ pd_patch_MetaspaceConstant(pc_offset, constant, JVMCI_CHECK);
} else {
JVMCI_ERROR("unknown constant type in data patch: %s", jvmci_env()->klass_name(constant));
}
diff --git a/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp b/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp
index 347dccd209c..6bb47b23594 100644
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,42 +30,6 @@
#include "jvmci/jvmci.hpp"
#include "jvmci/jvmciEnv.hpp"
-#if INCLUDE_AOT
-class RelocBuffer : public StackObj {
- enum { stack_size = 1024 };
-public:
- RelocBuffer() : _size(0), _buffer(0) {}
- ~RelocBuffer();
- void ensure_size(size_t bytes);
- void set_size(size_t bytes);
- address begin() const;
- size_t size() const { return _size; }
-private:
- size_t _size;
- char _static_buffer[stack_size];
- char *_buffer;
-};
-
-class CodeInstaller;
-
-class AOTOopRecorder : public OopRecorder {
-public:
- AOTOopRecorder(CodeInstaller* code_inst, Arena* arena = NULL, bool deduplicate = false);
-
- virtual int find_index(Metadata* h);
- virtual int find_index(jobject h);
- int nr_meta_refs() const;
- jobject meta_element(int pos) const;
-
-private:
- void record_meta_ref(jobject ref, int index);
-
- GrowableArray* _meta_refs;
-
- CodeInstaller* _code_inst;
-};
-#endif // INCLUDE_AOT
-
class CodeMetadata {
public:
CodeMetadata() {}
@@ -78,11 +42,6 @@ class CodeMetadata {
u_char* get_scopes_desc() const { return _scopes_desc; }
int get_scopes_size() const { return _nr_scopes_desc; }
-#if INCLUDE_AOT
- RelocBuffer* get_reloc_buffer() { return &_reloc_buffer; }
- AOTOopRecorder* get_oop_recorder() { return _oop_recorder; }
-#endif
-
ExceptionHandlerTable* get_exception_table() { return _exception_table; }
ImplicitExceptionTable* get_implicit_exception_table() { return _implicit_exception_table; }
@@ -97,12 +56,6 @@ class CodeMetadata {
_nr_scopes_desc = size;
}
-#if INCLUDE_AOT
- void set_oop_recorder(AOTOopRecorder* recorder) {
- _oop_recorder = recorder;
- }
-#endif
-
void set_exception_table(ExceptionHandlerTable* table) {
_exception_table = table;
}
@@ -119,10 +72,6 @@ class CodeMetadata {
u_char* _scopes_desc;
int _nr_scopes_desc;
-#if INCLUDE_AOT
- RelocBuffer _reloc_buffer;
- AOTOopRecorder* _oop_recorder;
-#endif
ExceptionHandlerTable* _exception_table;
ImplicitExceptionTable* _implicit_exception_table;
};
@@ -202,8 +151,6 @@ class CodeInstaller : public StackObj {
ImplicitExceptionTable _implicit_exception_table;
bool _has_auto_box;
- bool _immutable_pic_compilation; // Installer is called for Immutable PIC compilation.
-
static ConstantOopWriteValue* _oop_null_scope_value;
static ConstantIntValue* _int_m1_scope_value;
static ConstantIntValue* _int_0_scope_value;
@@ -231,15 +178,11 @@ class CodeInstaller : public StackObj {
public:
- CodeInstaller(JVMCIEnv* jvmci_env, bool immutable_pic_compilation) :
+ CodeInstaller(JVMCIEnv* jvmci_env) :
_arena(mtJVMCI),
_jvmci_env(jvmci_env),
- _has_auto_box(false),
- _immutable_pic_compilation(immutable_pic_compilation) {}
+ _has_auto_box(false) {}
-#if INCLUDE_AOT
- JVMCI::CodeInstallResult gather_metadata(JVMCIObject target, JVMCIObject compiled_code, CodeMetadata& metadata, JVMCI_TRAPS);
-#endif
JVMCI::CodeInstallResult install(JVMCICompiler* compiler,
JVMCIObject target,
JVMCIObject compiled_code,
diff --git a/src/hotspot/share/jvmci/jvmciCompiler.cpp b/src/hotspot/share/jvmci/jvmciCompiler.cpp
index bffbacf63d0..f25494b033f 100644
--- a/src/hotspot/share/jvmci/jvmciCompiler.cpp
+++ b/src/hotspot/share/jvmci/jvmciCompiler.cpp
@@ -57,7 +57,7 @@ JVMCICompiler* JVMCICompiler::instance(bool require_non_null, TRAPS) {
// Initialization
void JVMCICompiler::initialize() {
- assert(!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci(), "JVMCI is launched, it's not c1/interpreter only mode");
+ assert(!CompilerConfig::is_c1_or_interpreter_only_no_jvmci(), "JVMCI is launched, it's not c1/interpreter only mode");
if (!UseCompiler || !EnableJVMCI || !UseJVMCICompiler || !should_perform_init()) {
return;
}
diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
index d58f0572b2a..98f4bdd0016 100644
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
@@ -44,6 +44,7 @@
#include "memory/oopFactory.hpp"
#include "memory/universe.hpp"
#include "oops/constantPool.inline.hpp"
+#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceKlass.inline.hpp"
#include "oops/method.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
@@ -249,18 +250,6 @@ C2V_VMENTRY_NULL(jobject, getFlagValue, (JNIEnv* env, jobject c2vm, jobject name
#undef RETURN_BOXED_DOUBLE
C2V_END
-C2V_VMENTRY_NULL(jobject, getObjectAtAddress, (JNIEnv* env, jobject c2vm, jlong oop_address))
- requireInHotSpot("getObjectAtAddress", JVMCI_CHECK_NULL);
- if (oop_address == 0) {
- JVMCI_THROW_MSG_NULL(InternalError, "Handle must be non-zero");
- }
- oop obj = *((oopDesc**) oop_address);
- if (obj != NULL) {
- oopDesc::verify(obj);
- }
- return JNIHandles::make_local(THREAD, obj);
-C2V_END
-
C2V_VMENTRY_NULL(jbyteArray, getBytecode, (JNIEnv* env, jobject, jobject jvmci_method))
methodHandle method(THREAD, JVMCIENV->asMethod(jvmci_method));
@@ -505,10 +494,11 @@ C2V_END
C2V_VMENTRY_0(jboolean, isCompilable,(JNIEnv* env, jobject, jobject jvmci_method))
Method* method = JVMCIENV->asMethod(jvmci_method);
- ConstantPool* cp = method->constMethod()->constants();
- assert(cp != NULL, "npe");
- // don't inline method when constant pool contains a CONSTANT_Dynamic
- return !method->is_not_compilable(CompLevel_full_optimization) && !cp->has_dynamic_constant();
+ // Skip redefined methods
+ if (method->is_old()) {
+ return false;
+ }
+ return !method->is_not_compilable(CompLevel_full_optimization);
C2V_END
C2V_VMENTRY_0(jboolean, hasNeverInlineDirective,(JNIEnv* env, jobject, jobject jvmci_method))
@@ -624,8 +614,41 @@ C2V_VMENTRY_NULL(jobject, lookupClass, (JNIEnv* env, jobject, jclass mirror))
C2V_VMENTRY_NULL(jobject, resolvePossiblyCachedConstantInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
- oop result = cp->resolve_possibly_cached_constant_at(index, CHECK_NULL);
- return JVMCIENV->get_jobject(JVMCIENV->get_object_constant(result));
+ oop obj = cp->resolve_possibly_cached_constant_at(index, CHECK_NULL);
+ constantTag tag = cp->tag_at(index);
+ if (tag.is_dynamic_constant() || tag.is_dynamic_constant_in_error()) {
+ if (obj == Universe::the_null_sentinel()) {
+ return JVMCIENV->get_jobject(JVMCIENV->get_JavaConstant_NULL_POINTER());
+ }
+ BasicType bt = Signature::basic_type(cp->uncached_signature_ref_at(index));
+ if (!is_reference_type(bt)) {
+ if (!is_java_primitive(bt)) {
+ return JVMCIENV->get_jobject(JVMCIENV->get_JavaConstant_ILLEGAL());
+ }
+
+ // Convert standard box (e.g. java.lang.Integer) to JVMCI box (e.g. jdk.vm.ci.meta.PrimitiveConstant)
+ jvalue value;
+ jlong raw_value;
+ JVMCIObject kind;
+ BasicType bt2 = java_lang_boxing_object::get_value(obj, &value);
+ assert(bt2 == bt, "");
+ switch (bt2) {
+ case T_LONG: kind = JVMCIENV->get_JavaKind_Long(); raw_value = value.j; break;
+ case T_DOUBLE: kind = JVMCIENV->get_JavaKind_Double(); raw_value = value.j; break;
+ case T_FLOAT: kind = JVMCIENV->get_JavaKind_Float(); raw_value = value.i; break;
+ case T_INT: kind = JVMCIENV->get_JavaKind_Int(); raw_value = value.i; break;
+ case T_SHORT: kind = JVMCIENV->get_JavaKind_Short(); raw_value = value.s; break;
+ case T_BYTE: kind = JVMCIENV->get_JavaKind_Byte(); raw_value = value.b; break;
+ case T_CHAR: kind = JVMCIENV->get_JavaKind_Char(); raw_value = value.c; break;
+ case T_BOOLEAN: kind = JVMCIENV->get_JavaKind_Boolean(); raw_value = value.z; break;
+ default: return JVMCIENV->get_jobject(JVMCIENV->get_JavaConstant_ILLEGAL());
+ }
+
+ JVMCIObject result = JVMCIENV->call_JavaConstant_forPrimitive(kind, raw_value, JVMCI_CHECK_NULL);
+ return JVMCIENV->get_jobject(result);
+ }
+ }
+ return JVMCIENV->get_jobject(JVMCIENV->get_object_constant(obj));
C2V_END
C2V_VMENTRY_0(jint, lookupNameAndTypeRefIndexInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
@@ -853,9 +876,8 @@ C2V_VMENTRY_0(jint, installCode, (JNIEnv *env, jobject, jobject target, jobject
JVMCICompiler* compiler = JVMCICompiler::instance(true, CHECK_JNI_ERR);
TraceTime install_time("installCode", JVMCICompiler::codeInstallTimer(!thread->is_Compiler_thread()));
- bool is_immutable_PIC = JVMCIENV->get_HotSpotCompiledCode_isImmutablePIC(compiled_code_handle) > 0;
- CodeInstaller installer(JVMCIENV, is_immutable_PIC);
+ CodeInstaller installer(JVMCIENV);
JVMCI::CodeInstallResult result = installer.install(compiler,
target_handle,
compiled_code_handle,
@@ -897,85 +919,7 @@ C2V_VMENTRY_0(jint, installCode, (JNIEnv *env, jobject, jobject target, jobject
C2V_END
C2V_VMENTRY_0(jint, getMetadata, (JNIEnv *env, jobject, jobject target, jobject compiled_code, jobject metadata))
-#if INCLUDE_AOT
- HandleMark hm(THREAD);
- assert(JVMCIENV->is_hotspot(), "AOT code is executed only in HotSpot mode");
-
- JVMCIObject target_handle = JVMCIENV->wrap(target);
- JVMCIObject compiled_code_handle = JVMCIENV->wrap(compiled_code);
- JVMCIObject metadata_handle = JVMCIENV->wrap(metadata);
-
- CodeMetadata code_metadata;
-
- CodeInstaller installer(JVMCIENV, true /* immutable PIC compilation */);
- JVMCI::CodeInstallResult result = installer.gather_metadata(target_handle, compiled_code_handle, code_metadata, JVMCI_CHECK_0);
- if (result != JVMCI::ok) {
- return result;
- }
-
- if (code_metadata.get_nr_pc_desc() > 0) {
- int size = sizeof(PcDesc) * code_metadata.get_nr_pc_desc();
- JVMCIPrimitiveArray array = JVMCIENV->new_byteArray(size, JVMCI_CHECK_(JVMCI::cache_full));
- JVMCIENV->copy_bytes_from((jbyte*) code_metadata.get_pc_desc(), array, 0, size);
- HotSpotJVMCI::HotSpotMetaData::set_pcDescBytes(JVMCIENV, metadata_handle, array);
- }
-
- if (code_metadata.get_scopes_size() > 0) {
- int size = code_metadata.get_scopes_size();
- JVMCIPrimitiveArray array = JVMCIENV->new_byteArray(size, JVMCI_CHECK_(JVMCI::cache_full));
- JVMCIENV->copy_bytes_from((jbyte*) code_metadata.get_scopes_desc(), array, 0, size);
- HotSpotJVMCI::HotSpotMetaData::set_scopesDescBytes(JVMCIENV, metadata_handle, array);
- }
-
- RelocBuffer* reloc_buffer = code_metadata.get_reloc_buffer();
- int size = (int) reloc_buffer->size();
- JVMCIPrimitiveArray array = JVMCIENV->new_byteArray(size, JVMCI_CHECK_(JVMCI::cache_full));
- JVMCIENV->copy_bytes_from((jbyte*) reloc_buffer->begin(), array, 0, size);
- HotSpotJVMCI::HotSpotMetaData::set_relocBytes(JVMCIENV, metadata_handle, array);
-
- const OopMapSet* oopMapSet = installer.oopMapSet();
- {
- ResourceMark mark;
- ImmutableOopMapBuilder builder(oopMapSet);
- int size = builder.heap_size();
- JVMCIPrimitiveArray array = JVMCIENV->new_byteArray(size, JVMCI_CHECK_(JVMCI::cache_full));
- builder.generate_into((address) HotSpotJVMCI::resolve(array)->byte_at_addr(0));
- HotSpotJVMCI::HotSpotMetaData::set_oopMaps(JVMCIENV, metadata_handle, array);
- }
-
- AOTOopRecorder* recorder = code_metadata.get_oop_recorder();
-
- int nr_meta_refs = recorder->nr_meta_refs();
- JVMCIObjectArray metadataArray = JVMCIENV->new_Object_array(nr_meta_refs, JVMCI_CHECK_(JVMCI::cache_full));
- for (int i = 0; i < nr_meta_refs; ++i) {
- jobject element = recorder->meta_element(i);
- if (element == NULL) {
- return JVMCI::cache_full;
- }
- JVMCIENV->put_object_at(metadataArray, i, JVMCIENV->wrap(element));
- }
- HotSpotJVMCI::HotSpotMetaData::set_metadata(JVMCIENV, metadata_handle, metadataArray);
-
- ExceptionHandlerTable* handler = code_metadata.get_exception_table();
- int table_size = handler->size_in_bytes();
- JVMCIPrimitiveArray exceptionArray = JVMCIENV->new_byteArray(table_size, JVMCI_CHECK_(JVMCI::cache_full));
- if (table_size > 0) {
- handler->copy_bytes_to((address) HotSpotJVMCI::resolve(exceptionArray)->byte_at_addr(0));
- }
- HotSpotJVMCI::HotSpotMetaData::set_exceptionBytes(JVMCIENV, metadata_handle, exceptionArray);
-
- ImplicitExceptionTable* implicit = code_metadata.get_implicit_exception_table();
- int implicit_table_size = implicit->size_in_bytes();
- JVMCIPrimitiveArray implicitExceptionArray = JVMCIENV->new_byteArray(implicit_table_size, JVMCI_CHECK_(JVMCI::cache_full));
- if (implicit_table_size > 0) {
- implicit->copy_bytes_to((address) HotSpotJVMCI::resolve(implicitExceptionArray)->byte_at_addr(0), implicit_table_size);
- }
- HotSpotJVMCI::HotSpotMetaData::set_implicitExceptionBytes(JVMCIENV, metadata_handle, implicitExceptionArray);
-
- return result;
-#else
JVMCI_THROW_MSG_0(InternalError, "unimplemented");
-#endif
C2V_END
C2V_VMENTRY(void, resetCompilationStatistics, (JNIEnv* env, jobject))
@@ -1150,11 +1094,6 @@ C2V_VMENTRY(void, invalidateHotSpotNmethod, (JNIEnv* env, jobject, jobject hs_nm
JVMCIENV->invalidate_nmethod_mirror(nmethod_mirror, JVMCI_CHECK);
C2V_END
-C2V_VMENTRY_NULL(jobject, readUncompressedOop, (JNIEnv* env, jobject, jlong addr))
- oop ret = RawAccess<>::oop_load((oop*)(address)addr);
- return JVMCIENV->get_jobject(JVMCIENV->get_object_constant(ret));
- C2V_END
-
C2V_VMENTRY_NULL(jlongArray, collectCounters, (JNIEnv* env, jobject))
// Returns a zero length array if counters aren't enabled
JVMCIPrimitiveArray array = JVMCIENV->new_longArray(JVMCICounterSize, JVMCI_CHECK_NULL);
@@ -1627,16 +1566,7 @@ C2V_VMENTRY_0(jint, methodDataProfileDataSize, (JNIEnv* env, jobject, jlong meta
C2V_END
C2V_VMENTRY_0(jlong, getFingerprint, (JNIEnv* env, jobject, jlong metaspace_klass))
-#if INCLUDE_AOT
- Klass *k = (Klass*) (address) metaspace_klass;
- if (k->is_instance_klass()) {
- return InstanceKlass::cast(k)->get_stored_fingerprint();
- } else {
- return 0;
- }
-#else
JVMCI_THROW_MSG_0(InternalError, "unimplemented");
-#endif
C2V_END
C2V_VMENTRY_NULL(jobject, getHostClass, (JNIEnv* env, jobject, jobject jvmci_type))
@@ -1908,79 +1838,130 @@ C2V_VMENTRY_NULL(jobjectArray, getDeclaredMethods, (JNIEnv* env, jobject, jobjec
return JVMCIENV->get_jobjectArray(methods);
C2V_END
-C2V_VMENTRY_NULL(jobject, readFieldValue, (JNIEnv* env, jobject, jobject object, jobject field, jboolean is_volatile))
- if (object == NULL || field == NULL) {
+C2V_VMENTRY_NULL(jobject, readFieldValue, (JNIEnv* env, jobject, jobject object, jobject expected_type, long displacement, jboolean is_volatile, jobject kind_object))
+ if (object == NULL || kind_object == NULL) {
JVMCI_THROW_0(NullPointerException);
}
- JVMCIObject field_object = JVMCIENV->wrap(field);
- JVMCIObject java_type = JVMCIENV->get_HotSpotResolvedJavaFieldImpl_type(field_object);
- int modifiers = JVMCIENV->get_HotSpotResolvedJavaFieldImpl_modifiers(field_object);
- Klass* holder = JVMCIENV->asKlass(JVMCIENV->get_HotSpotResolvedJavaFieldImpl_holder(field_object));
- if (!holder->is_instance_klass()) {
- JVMCI_THROW_MSG_0(InternalError, err_msg("Holder %s must be instance klass", holder->external_name()));
- }
- InstanceKlass* ik = InstanceKlass::cast(holder);
- BasicType constant_type;
- if (JVMCIENV->isa_HotSpotResolvedPrimitiveType(java_type)) {
- constant_type = JVMCIENV->kindToBasicType(JVMCIENV->get_HotSpotResolvedPrimitiveType_kind(java_type), JVMCI_CHECK_NULL);
- } else {
- constant_type = T_OBJECT;
- }
- int displacement = JVMCIENV->get_HotSpotResolvedJavaFieldImpl_offset(field_object);
- fieldDescriptor fd;
- if (!ik->find_local_field_from_offset(displacement, (modifiers & JVM_ACC_STATIC) != 0, &fd)) {
- JVMCI_THROW_MSG_0(InternalError, err_msg("Can't find field with displacement %d", displacement));
+
+ JVMCIObject kind = JVMCIENV->wrap(kind_object);
+ BasicType basic_type = JVMCIENV->kindToBasicType(kind, JVMCI_CHECK_NULL);
+
+ InstanceKlass* holder = NULL;
+ if (expected_type != NULL) {
+ holder = InstanceKlass::cast(JVMCIENV->asKlass(JVMCIENV->wrap(expected_type)));
}
- JVMCIObject base = JVMCIENV->wrap(object);
+
+ bool is_static = false;
Handle obj;
+ JVMCIObject base = JVMCIENV->wrap(object);
if (JVMCIENV->isa_HotSpotObjectConstantImpl(base)) {
obj = JVMCIENV->asConstant(base, JVMCI_CHECK_NULL);
+ // asConstant will throw an NPE if a constant contains NULL
+
+ if (holder != NULL && !obj->is_a(holder)) {
+ // Not a subtype of field holder
+ return NULL;
+ }
+ is_static = false;
+ if (holder == NULL && java_lang_Class::is_instance(obj()) && displacement >= InstanceMirrorKlass::offset_of_static_fields()) {
+ is_static = true;
+ }
} else if (JVMCIENV->isa_HotSpotResolvedObjectTypeImpl(base)) {
+ is_static = true;
Klass* klass = JVMCIENV->asKlass(base);
+ if (holder != NULL && holder != klass) {
+ return NULL;
+ }
obj = Handle(THREAD, klass->java_mirror());
} else {
- JVMCI_THROW_MSG_NULL(IllegalArgumentException,
- err_msg("Unexpected type: %s", JVMCIENV->klass_name(base)));
+ // The Java code is expected to guard against this path
+ ShouldNotReachHere();
}
- if (displacement == java_lang_Class::component_mirror_offset() && java_lang_Class::is_instance(obj()) &&
- !java_lang_Class::as_Klass(obj())->is_array_klass()) {
- // Class.componentType for non-array classes can transiently contain an int[] that's
- // used for locking so always return null to mimic Class.getComponentType()
- return JVMCIENV->get_jobject(JVMCIENV->get_JavaConstant_NULL_POINTER());
+ if (displacement < 0 || ((long) displacement + type2aelembytes(basic_type) > HeapWordSize * obj->size())) {
+ // Reading outside of the object bounds
+ JVMCI_THROW_MSG_NULL(IllegalArgumentException, "reading outside object bounds");
+ }
+
+ // Perform basic sanity checks on the read. Primitive reads are permitted to read outside the
+ // bounds of their fields but object reads must map exactly onto the underlying oop slot.
+ if (basic_type == T_OBJECT) {
+ if (obj->is_objArray()) {
+ if (displacement < arrayOopDesc::base_offset_in_bytes(T_OBJECT)) {
+ JVMCI_THROW_MSG_NULL(IllegalArgumentException, "reading from array header");
+ }
+ if (displacement + heapOopSize > arrayOopDesc::base_offset_in_bytes(T_OBJECT) + arrayOop(obj())->length() * heapOopSize) {
+ JVMCI_THROW_MSG_NULL(IllegalArgumentException, "reading after last array element");
+ }
+ if (((displacement - arrayOopDesc::base_offset_in_bytes(T_OBJECT)) % heapOopSize) != 0) {
+ JVMCI_THROW_MSG_NULL(IllegalArgumentException, "misaligned object read from array");
+ }
+ } else if (obj->is_instance()) {
+ InstanceKlass* klass = InstanceKlass::cast(is_static ? java_lang_Class::as_Klass(obj()) : obj->klass());
+ fieldDescriptor fd;
+ if (!klass->find_field_from_offset(displacement, is_static, &fd)) {
+ JVMCI_THROW_MSG_NULL(IllegalArgumentException, err_msg("Can't find field at displacement %d in object of type %s", (int) displacement, klass->external_name()));
+ }
+ if (fd.field_type() != T_OBJECT && fd.field_type() != T_ARRAY) {
+ JVMCI_THROW_MSG_NULL(IllegalArgumentException, err_msg("Field at displacement %d in object of type %s is %s but expected %s", (int) displacement,
+ klass->external_name(), type2name(fd.field_type()), type2name(basic_type)));
+ }
+ } else if (obj->is_typeArray()) {
+ JVMCI_THROW_MSG_NULL(IllegalArgumentException, "Can't read objects from primitive array");
+ } else {
+ ShouldNotReachHere();
+ }
+ } else {
+ if (obj->is_objArray()) {
+ JVMCI_THROW_MSG_NULL(IllegalArgumentException, "Reading primitive from object array");
+ } else if (obj->is_typeArray()) {
+ if (displacement < arrayOopDesc::base_offset_in_bytes(ArrayKlass::cast(obj->klass())->element_type())) {
+ JVMCI_THROW_MSG_NULL(IllegalArgumentException, "reading from array header");
+ }
+ }
}
jlong value = 0;
- JVMCIObject kind;
- switch (constant_type) {
+ switch (basic_type) {
+ case T_BOOLEAN: value = is_volatile ? obj->bool_field_acquire(displacement) : obj->bool_field(displacement); break;
+ case T_BYTE: value = is_volatile ? obj->byte_field_acquire(displacement) : obj->byte_field(displacement); break;
+ case T_SHORT: value = is_volatile ? obj->short_field_acquire(displacement) : obj->short_field(displacement); break;
+ case T_CHAR: value = is_volatile ? obj->char_field_acquire(displacement) : obj->char_field(displacement); break;
+ case T_FLOAT:
+ case T_INT: value = is_volatile ? obj->int_field_acquire(displacement) : obj->int_field(displacement); break;
+ case T_DOUBLE:
+ case T_LONG: value = is_volatile ? obj->long_field_acquire(displacement) : obj->long_field(displacement); break;
+
case T_OBJECT: {
- oop object = is_volatile ? obj->obj_field_acquire(displacement) : obj->obj_field(displacement);
- JVMCIObject result = JVMCIENV->get_object_constant(object);
- if (result.is_null()) {
+ if (displacement == java_lang_Class::component_mirror_offset() && java_lang_Class::is_instance(obj()) &&
+ (java_lang_Class::as_Klass(obj()) == NULL || !java_lang_Class::as_Klass(obj())->is_array_klass())) {
+ // Class.componentType for non-array classes can transiently contain an int[] that's
+ // used for locking so always return null to mimic Class.getComponentType()
return JVMCIENV->get_jobject(JVMCIENV->get_JavaConstant_NULL_POINTER());
}
- return JVMCIENV->get_jobject(result);
- }
- case T_FLOAT: {
- float f = is_volatile ? obj->float_field_acquire(displacement) : obj->float_field(displacement);
- JVMCIObject result = JVMCIENV->call_JavaConstant_forFloat(f, JVMCI_CHECK_NULL);
- return JVMCIENV->get_jobject(result);
- }
- case T_DOUBLE: {
- double f = is_volatile ? obj->double_field_acquire(displacement) : obj->double_field(displacement);
- JVMCIObject result = JVMCIENV->call_JavaConstant_forDouble(f, JVMCI_CHECK_NULL);
- return JVMCIENV->get_jobject(result);
+
+ oop value = is_volatile ? obj->obj_field_acquire(displacement) : obj->obj_field(displacement);
+ if (value == NULL) {
+ return JVMCIENV->get_jobject(JVMCIENV->get_JavaConstant_NULL_POINTER());
+ } else {
+ if (value != NULL && !oopDesc::is_oop(value)) {
+ // Throw an exception to improve debuggability. This check isn't totally reliable because
+ // is_oop doesn't try to be completety safe but for most invalid values it provides a good
+ // enough answer. It possible to crash in the is_oop call but that just means the crash happens
+ // closer to where things went wrong.
+ JVMCI_THROW_MSG_NULL(InternalError, err_msg("Read bad oop " INTPTR_FORMAT " at offset " JLONG_FORMAT " in object " INTPTR_FORMAT " of type %s",
+ p2i(value), displacement, p2i(obj()), obj->klass()->external_name()));
+ }
+
+ JVMCIObject result = JVMCIENV->get_object_constant(value);
+ return JVMCIENV->get_jobject(result);
+ }
}
- case T_BOOLEAN: value = is_volatile ? obj->bool_field_acquire(displacement) : obj->bool_field(displacement); break;
- case T_BYTE: value = is_volatile ? obj->byte_field_acquire(displacement) : obj->byte_field(displacement); break;
- case T_SHORT: value = is_volatile ? obj->short_field_acquire(displacement) : obj->short_field(displacement); break;
- case T_CHAR: value = is_volatile ? obj->char_field_acquire(displacement) : obj->char_field(displacement); break;
- case T_INT: value = is_volatile ? obj->int_field_acquire(displacement) : obj->int_field(displacement); break;
- case T_LONG: value = is_volatile ? obj->long_field_acquire(displacement) : obj->long_field(displacement); break;
+
default:
ShouldNotReachHere();
}
- JVMCIObject result = JVMCIENV->call_PrimitiveConstant_forTypeChar(type2char(constant_type), value, JVMCI_CHECK_NULL);
+ JVMCIObject result = JVMCIENV->call_JavaConstant_forPrimitive(kind, value, JVMCI_CHECK_NULL);
return JVMCIENV->get_jobject(result);
C2V_END
@@ -2138,55 +2119,6 @@ C2V_VMENTRY_0(jint, arrayIndexScale, (JNIEnv* env, jobject, jobject kind))
return type2aelembytes(type);
C2V_END
-C2V_VMENTRY_0(jbyte, getByte, (JNIEnv* env, jobject, jobject x, long displacement))
- if (x == NULL) {
- JVMCI_THROW_0(NullPointerException);
- }
- Handle xobj = JVMCIENV->asConstant(JVMCIENV->wrap(x), JVMCI_CHECK_0);
- return xobj->byte_field(displacement);
-}
-
-C2V_VMENTRY_0(jshort, getShort, (JNIEnv* env, jobject, jobject x, long displacement))
- if (x == NULL) {
- JVMCI_THROW_0(NullPointerException);
- }
- Handle xobj = JVMCIENV->asConstant(JVMCIENV->wrap(x), JVMCI_CHECK_0);
- return xobj->short_field(displacement);
-}
-
-C2V_VMENTRY_0(jint, getInt, (JNIEnv* env, jobject, jobject x, long displacement))
- if (x == NULL) {
- JVMCI_THROW_0(NullPointerException);
- }
- Handle xobj = JVMCIENV->asConstant(JVMCIENV->wrap(x), JVMCI_CHECK_0);
- return xobj->int_field(displacement);
-}
-
-C2V_VMENTRY_0(jlong, getLong, (JNIEnv* env, jobject, jobject x, long displacement))
- if (x == NULL) {
- JVMCI_THROW_0(NullPointerException);
- }
- Handle xobj = JVMCIENV->asConstant(JVMCIENV->wrap(x), JVMCI_CHECK_0);
- return xobj->long_field(displacement);
-}
-
-C2V_VMENTRY_NULL(jobject, getObject, (JNIEnv* env, jobject, jobject x, long displacement))
- if (x == NULL) {
- JVMCI_THROW_0(NullPointerException);
- }
- Handle xobj = JVMCIENV->asConstant(JVMCIENV->wrap(x), JVMCI_CHECK_0);
- if (displacement == java_lang_Class::component_mirror_offset() && java_lang_Class::is_instance(xobj()) &&
- !java_lang_Class::as_Klass(xobj())->is_array_klass()) {
- // Class.componentType for non-array classes can transiently contain an int[] that's
- // used for locking so always return null to mimic Class.getComponentType()
- return JVMCIENV->get_jobject(JVMCIENV->get_JavaConstant_NULL_POINTER());
- }
-
- oop res = xobj->obj_field(displacement);
- JVMCIObject result = JVMCIENV->get_object_constant(res);
- return JVMCIENV->get_jobject(result);
-}
-
C2V_VMENTRY(void, deleteGlobalHandle, (JNIEnv* env, jobject, jlong h))
jobject handle = (jobject)(address)h;
if (handle != NULL) {
@@ -2700,7 +2632,7 @@ JNINativeMethod CompilerToVM::methods[] = {
{CC "lookupAppendixInPool", CC "(" HS_CONSTANT_POOL "I)" OBJECTCONSTANT, FN_PTR(lookupAppendixInPool)},
{CC "lookupMethodInPool", CC "(" HS_CONSTANT_POOL "IB)" HS_RESOLVED_METHOD, FN_PTR(lookupMethodInPool)},
{CC "constantPoolRemapInstructionOperandFromCache", CC "(" HS_CONSTANT_POOL "I)I", FN_PTR(constantPoolRemapInstructionOperandFromCache)},
- {CC "resolvePossiblyCachedConstantInPool", CC "(" HS_CONSTANT_POOL "I)" OBJECTCONSTANT, FN_PTR(resolvePossiblyCachedConstantInPool)},
+ {CC "resolvePossiblyCachedConstantInPool", CC "(" HS_CONSTANT_POOL "I)" JAVACONSTANT, FN_PTR(resolvePossiblyCachedConstantInPool)},
{CC "resolveTypeInPool", CC "(" HS_CONSTANT_POOL "I)" HS_RESOLVED_KLASS, FN_PTR(resolveTypeInPool)},
{CC "resolveFieldInPool", CC "(" HS_CONSTANT_POOL "I" HS_RESOLVED_METHOD "B[I)" HS_RESOLVED_KLASS, FN_PTR(resolveFieldInPool)},
{CC "resolveInvokeDynamicInPool", CC "(" HS_CONSTANT_POOL "I)V", FN_PTR(resolveInvokeDynamicInPool)},
@@ -2727,7 +2659,6 @@ JNINativeMethod CompilerToVM::methods[] = {
{CC "getLocalVariableTableLength", CC "(" HS_RESOLVED_METHOD ")I", FN_PTR(getLocalVariableTableLength)},
{CC "reprofile", CC "(" HS_RESOLVED_METHOD ")V", FN_PTR(reprofile)},
{CC "invalidateHotSpotNmethod", CC "(" HS_NMETHOD ")V", FN_PTR(invalidateHotSpotNmethod)},
- {CC "readUncompressedOop", CC "(J)" OBJECTCONSTANT, FN_PTR(readUncompressedOop)},
{CC "collectCounters", CC "()[J", FN_PTR(collectCounters)},
{CC "getCountersSize", CC "()I", FN_PTR(getCountersSize)},
{CC "setCountersSize", CC "(I)Z", FN_PTR(setCountersSize)},
@@ -2746,7 +2677,6 @@ JNINativeMethod CompilerToVM::methods[] = {
{CC "interpreterFrameSize", CC "(" BYTECODE_FRAME ")I", FN_PTR(interpreterFrameSize)},
{CC "compileToBytecode", CC "(" OBJECTCONSTANT ")V", FN_PTR(compileToBytecode)},
{CC "getFlagValue", CC "(" STRING ")" OBJECT, FN_PTR(getFlagValue)},
- {CC "getObjectAtAddress", CC "(J)" OBJECT, FN_PTR(getObjectAtAddress)},
{CC "getInterfaces", CC "(" HS_RESOLVED_KLASS ")[" HS_RESOLVED_KLASS, FN_PTR(getInterfaces)},
{CC "getComponentType", CC "(" HS_RESOLVED_KLASS ")" HS_RESOLVED_TYPE, FN_PTR(getComponentType)},
{CC "ensureInitialized", CC "(" HS_RESOLVED_KLASS ")V", FN_PTR(ensureInitialized)},
@@ -2757,8 +2687,8 @@ JNINativeMethod CompilerToVM::methods[] = {
{CC "boxPrimitive", CC "(" OBJECT ")" OBJECTCONSTANT, FN_PTR(boxPrimitive)},
{CC "getDeclaredConstructors", CC "(" HS_RESOLVED_KLASS ")[" RESOLVED_METHOD, FN_PTR(getDeclaredConstructors)},
{CC "getDeclaredMethods", CC "(" HS_RESOLVED_KLASS ")[" RESOLVED_METHOD, FN_PTR(getDeclaredMethods)},
- {CC "readFieldValue", CC "(" HS_RESOLVED_KLASS HS_RESOLVED_FIELD "Z)" JAVACONSTANT, FN_PTR(readFieldValue)},
- {CC "readFieldValue", CC "(" OBJECTCONSTANT HS_RESOLVED_FIELD "Z)" JAVACONSTANT, FN_PTR(readFieldValue)},
+ {CC "readFieldValue", CC "(" HS_RESOLVED_KLASS HS_RESOLVED_KLASS "JZLjdk/vm/ci/meta/JavaKind;)" JAVACONSTANT, FN_PTR(readFieldValue)},
+ {CC "readFieldValue", CC "(" OBJECTCONSTANT HS_RESOLVED_KLASS "JZLjdk/vm/ci/meta/JavaKind;)" JAVACONSTANT, FN_PTR(readFieldValue)},
{CC "isInstance", CC "(" HS_RESOLVED_KLASS OBJECTCONSTANT ")Z", FN_PTR(isInstance)},
{CC "isAssignableFrom", CC "(" HS_RESOLVED_KLASS HS_RESOLVED_KLASS ")Z", FN_PTR(isAssignableFrom)},
{CC "isTrustedForIntrinsics", CC "(" HS_RESOLVED_KLASS ")Z", FN_PTR(isTrustedForIntrinsics)},
@@ -2770,11 +2700,6 @@ JNINativeMethod CompilerToVM::methods[] = {
{CC "readArrayElement", CC "(" OBJECTCONSTANT "I)Ljava/lang/Object;", FN_PTR(readArrayElement)},
{CC "arrayBaseOffset", CC "(Ljdk/vm/ci/meta/JavaKind;)I", FN_PTR(arrayBaseOffset)},
{CC "arrayIndexScale", CC "(Ljdk/vm/ci/meta/JavaKind;)I", FN_PTR(arrayIndexScale)},
- {CC "getByte", CC "(" OBJECTCONSTANT "J)B", FN_PTR(getByte)},
- {CC "getShort", CC "(" OBJECTCONSTANT "J)S", FN_PTR(getShort)},
- {CC "getInt", CC "(" OBJECTCONSTANT "J)I", FN_PTR(getInt)},
- {CC "getLong", CC "(" OBJECTCONSTANT "J)J", FN_PTR(getLong)},
- {CC "getObject", CC "(" OBJECTCONSTANT "J)" OBJECTCONSTANT, FN_PTR(getObject)},
{CC "deleteGlobalHandle", CC "(J)V", FN_PTR(deleteGlobalHandle)},
{CC "registerNativeMethods", CC "(" CLASS ")[J", FN_PTR(registerNativeMethods)},
{CC "isCurrentThreadAttached", CC "()Z", FN_PTR(isCurrentThreadAttached)},
diff --git a/src/hotspot/share/jvmci/jvmciEnv.cpp b/src/hotspot/share/jvmci/jvmciEnv.cpp
index b5a873d840b..cf16ccabb60 100644
--- a/src/hotspot/share/jvmci/jvmciEnv.cpp
+++ b/src/hotspot/share/jvmci/jvmciEnv.cpp
@@ -28,6 +28,7 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
+#include "compiler/compilerOracle.hpp"
#include "compiler/compileTask.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
@@ -801,69 +802,23 @@ JVMCIObject JVMCIEnv::call_HotSpotJVMCIRuntime_callToString(JVMCIObject object,
}
-JVMCIObject JVMCIEnv::call_PrimitiveConstant_forTypeChar(jchar kind, jlong value, JVMCI_TRAPS) {
+JVMCIObject JVMCIEnv::call_JavaConstant_forPrimitive(JVMCIObject kind, jlong value, JVMCI_TRAPS) {
JavaThread* THREAD = JVMCI::compilation_tick(JavaThread::current());
if (is_hotspot()) {
JavaCallArguments jargs;
- jargs.push_int(kind);
+ jargs.push_oop(Handle(THREAD, HotSpotJVMCI::resolve(kind)));
jargs.push_long(value);
JavaValue result(T_OBJECT);
- JavaCalls::call_static(&result,
- HotSpotJVMCI::PrimitiveConstant::klass(),
- vmSymbols::forTypeChar_name(),
- vmSymbols::forTypeChar_signature(), &jargs, CHECK_(JVMCIObject()));
- return wrap(result.get_oop());
- } else {
- JNIAccessMark jni(this, THREAD);
- jobject result = (jstring) jni()->CallStaticObjectMethod(JNIJVMCI::PrimitiveConstant::clazz(),
- JNIJVMCI::PrimitiveConstant::forTypeChar_method(),
- kind, value);
- if (jni()->ExceptionCheck()) {
- return JVMCIObject();
- }
- return wrap(result);
- }
-}
-
-JVMCIObject JVMCIEnv::call_JavaConstant_forFloat(float value, JVMCI_TRAPS) {
- JavaThread* THREAD = JVMCI::compilation_tick(JavaThread::current());
- if (is_hotspot()) {
- JavaCallArguments jargs;
- jargs.push_float(value);
- JavaValue result(T_OBJECT);
- JavaCalls::call_static(&result,
- HotSpotJVMCI::JavaConstant::klass(),
- vmSymbols::forFloat_name(),
- vmSymbols::forFloat_signature(), &jargs, CHECK_(JVMCIObject()));
- return wrap(result.get_oop());
- } else {
- JNIAccessMark jni(this, THREAD);
- jobject result = (jstring) jni()->CallStaticObjectMethod(JNIJVMCI::JavaConstant::clazz(),
- JNIJVMCI::JavaConstant::forFloat_method(),
- value);
- if (jni()->ExceptionCheck()) {
- return JVMCIObject();
- }
- return wrap(result);
- }
-}
-
-JVMCIObject JVMCIEnv::call_JavaConstant_forDouble(double value, JVMCI_TRAPS) {
- JavaThread* THREAD = JVMCI::compilation_tick(JavaThread::current());
- if (is_hotspot()) {
- JavaCallArguments jargs;
- jargs.push_double(value);
- JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
HotSpotJVMCI::JavaConstant::klass(),
- vmSymbols::forDouble_name(),
- vmSymbols::forDouble_signature(), &jargs, CHECK_(JVMCIObject()));
+ vmSymbols::forPrimitive_name(),
+ vmSymbols::forPrimitive_signature(), &jargs, CHECK_(JVMCIObject()));
return wrap(result.get_oop());
} else {
JNIAccessMark jni(this, THREAD);
jobject result = (jstring) jni()->CallStaticObjectMethod(JNIJVMCI::JavaConstant::clazz(),
- JNIJVMCI::JavaConstant::forDouble_method(),
- value);
+ JNIJVMCI::JavaConstant::forPrimitive_method(),
+ kind.as_jobject(), value);
if (jni()->ExceptionCheck()) {
return JVMCIObject();
}
@@ -1037,6 +992,7 @@ JVMCIObject JVMCIEnv::get_jvmci_method(const methodHandle& method, JVMCI_TRAPS)
jmetadata handle = _runtime->allocate_handle(method);
jboolean exception = false;
if (is_hotspot()) {
+ CompilerOracle::tag_blackhole_if_possible(method);
JavaValue result(T_OBJECT);
JavaCallArguments args;
args.push_long((jlong) handle);
diff --git a/src/hotspot/share/jvmci/jvmciEnv.hpp b/src/hotspot/share/jvmci/jvmciEnv.hpp
index 9a509f5f94a..5b83a233826 100644
--- a/src/hotspot/share/jvmci/jvmciEnv.hpp
+++ b/src/hotspot/share/jvmci/jvmciEnv.hpp
@@ -307,9 +307,7 @@ class JVMCIEnv : public ResourceObj {
JVMCIObject call_HotSpotJVMCIRuntime_callToString(JVMCIObject object, JVMCI_TRAPS);
- JVMCIObject call_PrimitiveConstant_forTypeChar(jchar kind, jlong value, JVMCI_TRAPS);
- JVMCIObject call_JavaConstant_forFloat(float value, JVMCI_TRAPS);
- JVMCIObject call_JavaConstant_forDouble(double value, JVMCI_TRAPS);
+ JVMCIObject call_JavaConstant_forPrimitive(JVMCIObject kind, jlong value, JVMCI_TRAPS);
jboolean call_HotSpotJVMCIRuntime_isGCSupported(JVMCIObject runtime, jint gcIdentifier);
diff --git a/src/hotspot/share/jvmci/jvmciJavaClasses.hpp b/src/hotspot/share/jvmci/jvmciJavaClasses.hpp
index 4db609d18d1..bb951be4b19 100644
--- a/src/hotspot/share/jvmci/jvmciJavaClasses.hpp
+++ b/src/hotspot/share/jvmci/jvmciJavaClasses.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -245,16 +245,15 @@
int_field(BytecodePosition, bci) \
end_class \
start_class(JavaConstant, jdk_vm_ci_meta_JavaConstant) \
+ static_object_field(JavaConstant, ILLEGAL, "Ljdk/vm/ci/meta/PrimitiveConstant;") \
static_object_field(JavaConstant, NULL_POINTER, "Ljdk/vm/ci/meta/JavaConstant;") \
- jvmci_method(CallStaticObjectMethod, GetStaticMethodID, call_static, JVMCIObject, JavaConstant, forFloat, forFloat_signature, (JVMCIObject kind, jlong value, JVMCI_TRAPS)) \
- jvmci_method(CallStaticObjectMethod, GetStaticMethodID, call_static, JVMCIObject, JavaConstant, forDouble, forDouble_signature, (JVMCIObject kind, jlong value, JVMCI_TRAPS)) \
+ jvmci_method(CallStaticObjectMethod, GetStaticMethodID, call_static, JVMCIObject, JavaConstant, forPrimitive, forPrimitive_signature, (JVMCIObject kind, jlong value, JVMCI_TRAPS)) \
end_class \
start_class(ResolvedJavaMethod, jdk_vm_ci_meta_ResolvedJavaMethod) \
end_class \
start_class(PrimitiveConstant, jdk_vm_ci_meta_PrimitiveConstant) \
object_field(PrimitiveConstant, kind, "Ljdk/vm/ci/meta/JavaKind;") \
long_field(PrimitiveConstant, primitive) \
- jvmci_method(CallStaticObjectMethod, GetStaticMethodID, call_static, JVMCIObject, PrimitiveConstant, forTypeChar, forTypeChar_signature, (JVMCIObject kind, jlong value, JVMCI_TRAPS)) \
end_class \
start_class(RawConstant, jdk_vm_ci_meta_RawConstant) \
end_class \
@@ -286,7 +285,9 @@
static_object_field(JavaKind, Char, "Ljdk/vm/ci/meta/JavaKind;") \
static_object_field(JavaKind, Short, "Ljdk/vm/ci/meta/JavaKind;") \
static_object_field(JavaKind, Int, "Ljdk/vm/ci/meta/JavaKind;") \
+ static_object_field(JavaKind, Float, "Ljdk/vm/ci/meta/JavaKind;") \
static_object_field(JavaKind, Long, "Ljdk/vm/ci/meta/JavaKind;") \
+ static_object_field(JavaKind, Double, "Ljdk/vm/ci/meta/JavaKind;") \
end_class \
start_class(ValueKind, jdk_vm_ci_meta_ValueKind) \
object_field(ValueKind, platformKind, "Ljdk/vm/ci/meta/PlatformKind;") \
diff --git a/src/hotspot/share/jvmci/vmStructs_compiler_runtime.hpp b/src/hotspot/share/jvmci/vmStructs_compiler_runtime.hpp
deleted file mode 100644
index 78cebb2ac48..00000000000
--- a/src/hotspot/share/jvmci/vmStructs_compiler_runtime.hpp
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_JVMCI_VMSTRUCTS_COMPILER_RUNTIME_HPP
-#define SHARE_JVMCI_VMSTRUCTS_COMPILER_RUNTIME_HPP
-
-#if INCLUDE_AOT
-#include "jvmci/compilerRuntime.hpp"
-
-#define VM_ADDRESSES_COMPILER_RUNTIME(declare_address, declare_preprocessor_address, declare_function) \
- declare_function(CompilerRuntime::resolve_dynamic_invoke) \
- declare_function(CompilerRuntime::resolve_string_by_symbol) \
- declare_function(CompilerRuntime::resolve_klass_by_symbol) \
- declare_function(CompilerRuntime::resolve_method_by_symbol_and_load_counters) \
- declare_function(CompilerRuntime::initialize_klass_by_symbol) \
- declare_function(CompilerRuntime::invocation_event) \
- declare_function(CompilerRuntime::backedge_event)
-
-#else // INCLUDE_AOT
-
-#define VM_ADDRESSES_COMPILER_RUNTIME(declare_address, declare_preprocessor_address, declare_function)
-
-#endif // INCLUDE_AOT
-
-#endif // SHARE_JVMCI_VMSTRUCTS_COMPILER_RUNTIME_HPP
diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
index e5dd73c48c3..33dcc87c277 100644
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
@@ -29,7 +29,6 @@
#include "jvmci/jvmciCodeInstaller.hpp"
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciRuntime.hpp"
-#include "jvmci/vmStructs_compiler_runtime.hpp"
#include "jvmci/vmStructs_jvmci.hpp"
#include "oops/klassVtable.hpp"
#include "oops/objArrayKlass.hpp"
@@ -188,6 +187,7 @@
nonstatic_field(JavaThread, _jvmci_reserved_oop0, oop) \
nonstatic_field(JavaThread, _should_post_on_exceptions_flag, int) \
nonstatic_field(JavaThread, _jni_environment, JNIEnv) \
+ nonstatic_field(JavaThread, _poll_data, SafepointMechanism::ThreadData) \
nonstatic_field(JavaThread, _stack_overflow_state._reserved_stack_activation, address) \
\
static_field(java_lang_Class, _klass_offset, int) \
@@ -233,7 +233,6 @@
JVMTI_ONLY(nonstatic_field(MethodCounters, _number_of_breakpoints, u2)) \
nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \
nonstatic_field(MethodCounters, _backedge_counter, InvocationCounter) \
- AOT_ONLY(nonstatic_field(MethodCounters, _method, Method*)) \
\
nonstatic_field(MethodData, _size, int) \
nonstatic_field(MethodData, _method, Method*) \
@@ -334,7 +333,6 @@
\
nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \
nonstatic_field(Thread, _allocated_bytes, jlong) \
- nonstatic_field(Thread, _poll_data, SafepointMechanism::ThreadData) \
\
nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \
nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \
@@ -430,6 +428,7 @@
declare_constant(JVM_CONSTANT_MethodHandle) \
declare_constant(JVM_CONSTANT_MethodType) \
declare_constant(JVM_CONSTANT_InvokeDynamic) \
+ declare_constant(JVM_CONSTANT_Dynamic) \
declare_constant(JVM_CONSTANT_Module) \
declare_constant(JVM_CONSTANT_Package) \
declare_constant(JVM_CONSTANT_ExternalMax) \
@@ -751,22 +750,11 @@
static_field(VM_Version, _zva_length, int) \
volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*)
-#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
- declare_constant(VM_Version::CPU_FP) \
- declare_constant(VM_Version::CPU_ASIMD) \
- declare_constant(VM_Version::CPU_EVTSTRM) \
- declare_constant(VM_Version::CPU_AES) \
- declare_constant(VM_Version::CPU_PMULL) \
- declare_constant(VM_Version::CPU_SHA1) \
- declare_constant(VM_Version::CPU_SHA2) \
- declare_constant(VM_Version::CPU_CRC32) \
- declare_constant(VM_Version::CPU_LSE) \
- declare_constant(VM_Version::CPU_STXR_PREFETCH) \
- declare_constant(VM_Version::CPU_A53MAC)
+#define DECLARE_INT_CPU_FEATURE_CONSTANT(id, name, bit) GENERATE_VM_INT_CONSTANT_ENTRY(VM_Version::CPU_##id)
+#define VM_INT_CPU_FEATURE_CONSTANTS CPU_FEATURE_FLAGS(DECLARE_INT_CPU_FEATURE_CONSTANT)
#endif
-
#ifdef X86
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
@@ -778,53 +766,8 @@
declare_constant(frame::interpreter_frame_sender_sp_offset) \
declare_constant(frame::interpreter_frame_last_sp_offset)
-#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
- declare_constant(VM_Version::CPU_CX8) \
- declare_constant(VM_Version::CPU_CMOV) \
- declare_constant(VM_Version::CPU_FXSR) \
- declare_constant(VM_Version::CPU_HT) \
- declare_constant(VM_Version::CPU_MMX) \
- declare_constant(VM_Version::CPU_3DNOW_PREFETCH) \
- declare_constant(VM_Version::CPU_SSE) \
- declare_constant(VM_Version::CPU_SSE2) \
- declare_constant(VM_Version::CPU_SSE3) \
- declare_constant(VM_Version::CPU_SSSE3) \
- declare_constant(VM_Version::CPU_SSE4A) \
- declare_constant(VM_Version::CPU_SSE4_1) \
- declare_constant(VM_Version::CPU_SSE4_2) \
- declare_constant(VM_Version::CPU_POPCNT) \
- declare_constant(VM_Version::CPU_LZCNT) \
- declare_constant(VM_Version::CPU_TSC) \
- declare_constant(VM_Version::CPU_TSCINV) \
- declare_constant(VM_Version::CPU_AVX) \
- declare_constant(VM_Version::CPU_AVX2) \
- declare_constant(VM_Version::CPU_AES) \
- declare_constant(VM_Version::CPU_ERMS) \
- declare_constant(VM_Version::CPU_CLMUL) \
- declare_constant(VM_Version::CPU_BMI1) \
- declare_constant(VM_Version::CPU_BMI2) \
- declare_constant(VM_Version::CPU_RTM) \
- declare_constant(VM_Version::CPU_ADX) \
- declare_constant(VM_Version::CPU_AVX512F) \
- declare_constant(VM_Version::CPU_AVX512DQ) \
- declare_constant(VM_Version::CPU_AVX512PF) \
- declare_constant(VM_Version::CPU_AVX512ER) \
- declare_constant(VM_Version::CPU_AVX512CD) \
- declare_constant(VM_Version::CPU_AVX512BW) \
- declare_constant(VM_Version::CPU_AVX512VL) \
- declare_constant(VM_Version::CPU_SHA) \
- declare_constant(VM_Version::CPU_FMA) \
- declare_constant(VM_Version::CPU_VZEROUPPER) \
- declare_constant(VM_Version::CPU_AVX512_VPOPCNTDQ) \
- declare_constant(VM_Version::CPU_AVX512_VPCLMULQDQ) \
- declare_constant(VM_Version::CPU_AVX512_VAES) \
- declare_constant(VM_Version::CPU_AVX512_VNNI) \
- declare_constant(VM_Version::CPU_FLUSH) \
- declare_constant(VM_Version::CPU_FLUSHOPT) \
- declare_constant(VM_Version::CPU_CLWB) \
- declare_constant(VM_Version::CPU_AVX512_VBMI2) \
- declare_constant(VM_Version::CPU_AVX512_VBMI) \
- declare_constant(VM_Version::CPU_HV)
+#define DECLARE_LONG_CPU_FEATURE_CONSTANT(id, name, bit) GENERATE_VM_LONG_CONSTANT_ENTRY(VM_Version::CPU_##id)
+#define VM_LONG_CPU_FEATURE_CONSTANTS CPU_FEATURE_FLAGS(DECLARE_LONG_CPU_FEATURE_CONSTANT)
#endif
@@ -901,7 +844,9 @@ VMIntConstantEntry JVMCIVMStructs::localHotSpotVMIntConstants[] = {
GENERATE_VM_INT_CONSTANT_WITH_VALUE_ENTRY,
GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY)
#endif
-
+#ifdef VM_INT_CPU_FEATURE_CONSTANTS
+ VM_INT_CPU_FEATURE_CONSTANTS
+#endif
GENERATE_VM_INT_CONSTANT_LAST_ENTRY()
};
@@ -914,17 +859,17 @@ VMLongConstantEntry JVMCIVMStructs::localHotSpotVMLongConstants[] = {
GENERATE_C1_VM_LONG_CONSTANT_ENTRY,
GENERATE_C2_VM_LONG_CONSTANT_ENTRY,
GENERATE_C2_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY)
-
+#ifdef VM_LONG_CPU_FEATURE_CONSTANTS
+ VM_LONG_CPU_FEATURE_CONSTANTS
+#endif
GENERATE_VM_LONG_CONSTANT_LAST_ENTRY()
};
+#undef DECLARE_CPU_FEATURE_FLAG
VMAddressEntry JVMCIVMStructs::localHotSpotVMAddresses[] = {
VM_ADDRESSES(GENERATE_VM_ADDRESS_ENTRY,
GENERATE_PREPROCESSOR_VM_ADDRESS_ENTRY,
GENERATE_VM_FUNCTION_ENTRY)
- VM_ADDRESSES_COMPILER_RUNTIME(GENERATE_VM_ADDRESS_ENTRY,
- GENERATE_PREPROCESSOR_VM_ADDRESS_ENTRY,
- GENERATE_VM_FUNCTION_ENTRY)
VM_ADDRESSES_OS(GENERATE_VM_ADDRESS_ENTRY,
GENERATE_PREPROCESSOR_VM_ADDRESS_ENTRY,
GENERATE_VM_FUNCTION_ENTRY)
diff --git a/src/hotspot/share/jvmci/vmSymbols_jvmci.hpp b/src/hotspot/share/jvmci/vmSymbols_jvmci.hpp
index 801c1cdd2b1..5db71140976 100644
--- a/src/hotspot/share/jvmci/vmSymbols_jvmci.hpp
+++ b/src/hotspot/share/jvmci/vmSymbols_jvmci.hpp
@@ -125,12 +125,8 @@
template(callToString_signature, "(Ljava/lang/Object;)Ljava/lang/String;") \
template(getName_name, "getName") \
template(bootstrapFinished_name, "bootstrapFinished") \
- template(forTypeChar_name, "forTypeChar") \
- template(forTypeChar_signature, "(CJ)Ljdk/vm/ci/meta/PrimitiveConstant;") \
- template(forFloat_name, "forFloat") \
- template(forFloat_signature, "(F)Ljdk/vm/ci/meta/PrimitiveConstant;") \
- template(forDouble_name, "forDouble") \
- template(forDouble_signature, "(D)Ljdk/vm/ci/meta/PrimitiveConstant;") \
+ template(forPrimitive_name, "forPrimitive") \
+ template(forPrimitive_signature, "(Ljdk/vm/ci/meta/JavaKind;J)Ljdk/vm/ci/meta/PrimitiveConstant;") \
template(method_string_bool_long_signature, "(Ljdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl;Ljava/lang/String;ZJ)V") \
template(initializeSavedProperties_name, "initializeSavedProperties") \
diff --git a/src/hotspot/share/logging/log.hpp b/src/hotspot/share/logging/log.hpp
index b0d6d83434e..068b640fb39 100644
--- a/src/hotspot/share/logging/log.hpp
+++ b/src/hotspot/share/logging/log.hpp
@@ -28,7 +28,6 @@
#include "logging/logPrefix.hpp"
#include "logging/logTagSet.hpp"
#include "logging/logTag.hpp"
-#include "runtime/os.hpp"
#include "utilities/debug.hpp"
class LogMessageBuffer;
diff --git a/src/hotspot/share/logging/logConfiguration.cpp b/src/hotspot/share/logging/logConfiguration.cpp
index 556a34f3672..54589926759 100644
--- a/src/hotspot/share/logging/logConfiguration.cpp
+++ b/src/hotspot/share/logging/logConfiguration.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
#include "logging/logTagSet.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/semaphore.hpp"
#include "utilities/globalDefinitions.hpp"
diff --git a/src/hotspot/share/logging/logDecorations.cpp b/src/hotspot/share/logging/logDecorations.cpp
index 413e16dad18..eb4ae0b10f6 100644
--- a/src/hotspot/share/logging/logDecorations.cpp
+++ b/src/hotspot/share/logging/logDecorations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#include "logging/logConfiguration.hpp"
#include "logging/logDecorations.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/thread.inline.hpp"
#include "services/management.hpp"
diff --git a/src/hotspot/share/logging/logDecorators.cpp b/src/hotspot/share/logging/logDecorators.cpp
index 335699714c2..389b6a01e7b 100644
--- a/src/hotspot/share/logging/logDecorators.cpp
+++ b/src/hotspot/share/logging/logDecorators.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "logging/logDecorators.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
const LogDecorators LogDecorators::None = LogDecorators(0);
diff --git a/src/hotspot/share/logging/logFileOutput.cpp b/src/hotspot/share/logging/logFileOutput.cpp
index 40a499c57b9..22f01186b88 100644
--- a/src/hotspot/share/logging/logFileOutput.cpp
+++ b/src/hotspot/share/logging/logFileOutput.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "logging/logFileOutput.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/arguments.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/defaultStream.hpp"
diff --git a/src/hotspot/share/logging/logOutput.cpp b/src/hotspot/share/logging/logOutput.cpp
index a857f425477..c21ed51f322 100644
--- a/src/hotspot/share/logging/logOutput.cpp
+++ b/src/hotspot/share/logging/logOutput.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
#include "logging/logTagSet.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
LogOutput::~LogOutput() {
os::free(_config_string);
diff --git a/src/hotspot/share/logging/logSelection.cpp b/src/hotspot/share/logging/logSelection.cpp
index 719b28a9803..84ecb413b9b 100644
--- a/src/hotspot/share/logging/logSelection.cpp
+++ b/src/hotspot/share/logging/logSelection.cpp
@@ -27,7 +27,7 @@
#include "logging/log.hpp"
#include "logging/logSelection.hpp"
#include "logging/logTagSet.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
diff --git a/src/hotspot/share/logging/logSelectionList.cpp b/src/hotspot/share/logging/logSelectionList.cpp
index ce8c9b6966f..328267b312a 100644
--- a/src/hotspot/share/logging/logSelectionList.cpp
+++ b/src/hotspot/share/logging/logSelectionList.cpp
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "logging/logSelectionList.hpp"
#include "logging/logTagSet.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
static const char* DefaultExpressionString = "all";
diff --git a/src/hotspot/share/logging/logStream.cpp b/src/hotspot/share/logging/logStream.cpp
index c31637e8442..f4e5a89584b 100644
--- a/src/hotspot/share/logging/logStream.cpp
+++ b/src/hotspot/share/logging/logStream.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "runtime/os.hpp"
#include "utilities/align.hpp"
LogStream::LineBuffer::LineBuffer()
diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp
index 76fec871911..63a5fecdb00 100644
--- a/src/hotspot/share/logging/logTag.hpp
+++ b/src/hotspot/share/logging/logTag.hpp
@@ -36,7 +36,6 @@
LOG_TAG(age) \
LOG_TAG(alloc) \
LOG_TAG(annotation) \
- LOG_TAG(aot) \
LOG_TAG(arguments) \
LOG_TAG(attach) \
LOG_TAG(barrier) \
@@ -174,6 +173,7 @@
LOG_TAG(stringtable) \
LOG_TAG(subclass) \
LOG_TAG(survivor) \
+ LOG_TAG(suspend) \
LOG_TAG(sweep) \
LOG_TAG(symboltable) \
LOG_TAG(system) \
diff --git a/src/hotspot/share/memory/heap.cpp b/src/hotspot/share/memory/heap.cpp
index f520f8eb131..60e2ad451c0 100644
--- a/src/hotspot/share/memory/heap.cpp
+++ b/src/hotspot/share/memory/heap.cpp
@@ -207,7 +207,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
_log2_segment_size = exact_log2(segment_size);
// Reserve and initialize space for _memory.
- const size_t page_size = ReservedSpace::actual_reserved_page_size(rs);
+ const size_t page_size = rs.page_size();
const size_t granularity = os::vm_allocation_granularity();
const size_t c_size = align_up(committed_size, page_size);
assert(c_size <= rs.size(), "alignment made committed size to large");
diff --git a/src/hotspot/share/memory/heap.hpp b/src/hotspot/share/memory/heap.hpp
index a3746b03909..7405cfe5943 100644
--- a/src/hotspot/share/memory/heap.hpp
+++ b/src/hotspot/share/memory/heap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -171,16 +171,7 @@ class CodeHeap : public CHeapObj {
// Containment means "contained in committed space".
bool contains(const void* p) const { return low() <= p && p < high(); }
bool contains_blob(const CodeBlob* blob) const {
- // AOT CodeBlobs (i.e. AOTCompiledMethod) objects aren't allocated in the AOTCodeHeap but on the C-Heap.
- // Only the code they are pointing to is located in the AOTCodeHeap. All other CodeBlobs are allocated
- // directly in their corresponding CodeHeap with their code appended to the actual C++ object.
- // So all CodeBlobs except AOTCompiledMethod are continuous in memory with their data and code while
- // AOTCompiledMethod and their code/data is distributed in the C-Heap. This means we can use the
- // address of a CodeBlob object in order to locate it in its heap while we have to use the address
- // of the actual code an AOTCompiledMethod object is pointing to in order to locate it.
- // Notice that for an ordinary CodeBlob with code size zero, code_begin() may point beyond the object!
- const void* start = AOT_ONLY( (code_blob_type() == CodeBlobType::AOT) ? blob->code_begin() : ) (void*)blob;
- return contains(start);
+ return contains((void*)blob);
}
virtual void* find_start(void* p) const; // returns the block containing p or NULL
diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp
index 13bd426d35c..af8e6793da8 100644
--- a/src/hotspot/share/memory/metaspace.cpp
+++ b/src/hotspot/share/memory/metaspace.cpp
@@ -23,12 +23,11 @@
*/
#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderData.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
-#include "memory/filemap.hpp"
#include "memory/classLoaderMetaspace.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspace/chunkHeaderPool.hpp"
@@ -41,7 +40,6 @@
#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
#include "memory/metaspace/runningCounters.hpp"
#include "memory/metaspace/virtualSpaceList.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/metaspaceTracer.hpp"
#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
@@ -565,7 +563,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
assert(CompressedKlassPointers::is_valid_base(a), "Sanity");
while (a < search_ranges[i].to) {
ReservedSpace rs(size, Metaspace::reserve_alignment(),
- false /*large_pages*/, (char*)a);
+ os::vm_page_size(), (char*)a);
if (rs.is_reserved()) {
assert(a == (address)rs.base(), "Sanity");
return rs;
@@ -581,7 +579,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
return ReservedSpace();
#else
// Default implementation: Just reserve anywhere.
- return ReservedSpace(size, Metaspace::reserve_alignment(), false, (char*)NULL);
+ return ReservedSpace(size, Metaspace::reserve_alignment(), os::vm_page_size(), (char*)NULL);
#endif // AARCH64
}
@@ -719,7 +717,7 @@ void Metaspace::global_initialize() {
if (base != NULL) {
if (CompressedKlassPointers::is_valid_base(base)) {
rs = ReservedSpace(size, Metaspace::reserve_alignment(),
- false /* large */, (char*)base);
+ os::vm_page_size(), (char*)base);
}
}
diff --git a/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp b/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp
index e11c2777e1d..ffc8630e9be 100644
--- a/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp
+++ b/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -29,6 +29,7 @@
#include "memory/metaspace/metaspaceSettings.hpp"
#include "runtime/globals.hpp"
#include "runtime/java.hpp"
+#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
diff --git a/src/hotspot/share/memory/metaspace/testHelpers.cpp b/src/hotspot/share/memory/metaspace/testHelpers.cpp
index 5259c48609f..901f48f9aec 100644
--- a/src/hotspot/share/memory/metaspace/testHelpers.cpp
+++ b/src/hotspot/share/memory/metaspace/testHelpers.cpp
@@ -71,7 +71,7 @@ MetaspaceTestContext::MetaspaceTestContext(const char* name, size_t commit_limit
reserve_limit, Metaspace::reserve_alignment_words());
if (reserve_limit > 0) {
// have reserve limit -> non-expandable context
- _rs = ReservedSpace(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), false);
+ _rs = ReservedSpace(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), os::vm_page_size());
_context = MetaspaceContext::create_nonexpandable_context(name, _rs, &_commit_limiter);
} else {
// no reserve limit -> expandable vslist
diff --git a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
index a4aa1e722c2..a0e280b55a9 100644
--- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
@@ -244,8 +244,7 @@ VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
DEBUG_ONLY(assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);)
ReservedSpace rs(word_size * BytesPerWord,
Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
- false // large
- );
+ os::vm_page_size());
if (!rs.is_reserved()) {
vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
}
diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp
index 1b2f722b029..999c4ad3641 100644
--- a/src/hotspot/share/memory/universe.cpp
+++ b/src/hotspot/share/memory/universe.cpp
@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
+#include "cds/heapShared.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/javaClasses.hpp"
@@ -44,11 +45,9 @@
#include "gc/shared/tlab_globals.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
-#include "memory/heapShared.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceCounters.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/metaspaceUtils.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
@@ -790,8 +789,6 @@ jint universe_init() {
MetaspaceCounters::initialize_performance_counters();
CompressedClassSpaceCounters::initialize_performance_counters();
- AOTLoader::universe_init();
-
// Checks 'AfterMemoryInit' constraints.
if (!JVMFlagLimit::check_all_constraints(JVMFlagConstraintPhase::AfterMemoryInit)) {
return JNI_EINVAL;
@@ -867,13 +864,17 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");
- bool use_large_pages = UseLargePages && is_aligned(alignment, os::large_page_size());
- assert(!UseLargePages
- || UseParallelGC
- || use_large_pages, "Wrong alignment to use large pages");
+ size_t page_size = os::vm_page_size();
+ if (UseLargePages && is_aligned(alignment, os::large_page_size())) {
+ page_size = os::large_page_size();
+ } else {
+ // Parallel is the only collector that might opt out of using large pages
+ // for the heap.
+ assert(!UseLargePages || UseParallelGC , "Wrong alignment to use large pages");
+ }
// Now create the space.
- ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, AllocateHeapAt);
+ ReservedHeapSpace total_rs(total_reserved, alignment, page_size, AllocateHeapAt);
if (total_rs.is_reserved()) {
assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0),
@@ -899,7 +900,7 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
// satisfy compiler
ShouldNotReachHere();
- return ReservedHeapSpace(0, 0, false);
+ return ReservedHeapSpace(0, 0, os::vm_page_size());
}
OopStorage* Universe::vm_weak() {
diff --git a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp
index 615a75c1b8f..acfb9d3f3c3 100644
--- a/src/hotspot/share/memory/virtualspace.cpp
+++ b/src/hotspot/share/memory/virtualspace.cpp
@@ -31,7 +31,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
#include "utilities/formatBuffer.hpp"
@@ -50,8 +50,7 @@ ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) {
// large and normal pages.
size_t page_size = os::page_size_for_region_unaligned(size, 1);
size_t alignment = os::vm_allocation_granularity();
- bool large_pages = page_size != (size_t)os::vm_page_size();
- initialize(size, alignment, large_pages, NULL, false);
+ initialize(size, alignment, page_size, NULL, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
@@ -59,25 +58,25 @@ ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_
// and normal pages. If the size is not a multiple of the
// page size it will be aligned up to achieve this.
size_t alignment = os::vm_allocation_granularity();;
- bool large_pages = preferred_page_size != (size_t)os::vm_page_size();
- if (large_pages) {
+ if (preferred_page_size != (size_t)os::vm_page_size()) {
alignment = MAX2(preferred_page_size, alignment);
size = align_up(size, alignment);
}
- initialize(size, alignment, large_pages, NULL, false);
+ initialize(size, alignment, preferred_page_size, NULL, false);
}
-ReservedSpace::ReservedSpace(size_t size, size_t alignment,
- bool large,
+ReservedSpace::ReservedSpace(size_t size,
+ size_t alignment,
+ size_t page_size,
char* requested_address) : _fd_for_heap(-1) {
- initialize(size, alignment, large, requested_address, false);
+ initialize(size, alignment, page_size, requested_address, false);
}
-ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
+ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t page_size,
bool special, bool executable) : _fd_for_heap(-1) {
assert((size % os::vm_allocation_granularity()) == 0,
"size not allocation aligned");
- initialize_members(base, size, alignment, special, executable);
+ initialize_members(base, size, alignment, page_size, special, executable);
}
// Helper method
@@ -115,46 +114,149 @@ static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped
}
}
-// Helper method.
-static bool failed_to_reserve_as_requested(char* base, char* requested_address,
- const size_t size, bool special, bool is_file_mapped = false)
-{
- if (base == requested_address || requested_address == NULL)
+// Helper method
+static bool failed_to_reserve_as_requested(char* base, char* requested_address) {
+ if (base == requested_address || requested_address == NULL) {
return false; // did not fail
+ }
if (base != NULL) {
// Different reserve address may be acceptable in other cases
// but for compressed oops heap should be at requested address.
assert(UseCompressedOops, "currently requested address used only for compressed oops");
log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
- // OS ignored requested address. Try different address.
- if (special) {
- if (!os::release_memory_special(base, size)) {
- fatal("os::release_memory_special failed");
- }
- } else {
- unmap_or_release_memory(base, size, is_file_mapped);
- }
}
return true;
}
+static bool use_explicit_large_pages(size_t page_size) {
+ return !os::can_commit_large_page_memory() &&
+ page_size != (size_t) os::vm_page_size();
+}
+
+static bool large_pages_requested() {
+ return UseLargePages &&
+ (!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes));
+}
+
+static char* reserve_memory(char* requested_address, const size_t size,
+ const size_t alignment, int fd, bool exec) {
+ char* base;
+ // If the memory was requested at a particular address, use
+ // os::attempt_reserve_memory_at() to avoid mapping over something
+ // important. If the reservation fails, return NULL.
+ if (requested_address != 0) {
+ assert(is_aligned(requested_address, alignment),
+ "Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
+ p2i(requested_address), alignment);
+ base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec);
+ } else {
+ // Optimistically assume that the OS returns an aligned base pointer.
+ // When reserving a large address range, most OSes seem to align to at
+ // least 64K.
+ base = map_or_reserve_memory(size, fd, exec);
+ // Check alignment constraints. This is only needed when there is
+ // no requested address.
+ if (!is_aligned(base, alignment)) {
+ // Base not aligned, retry.
+ unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/);
+ // Map using the requested alignment.
+ base = map_or_reserve_memory_aligned(size, alignment, fd, exec);
+ }
+ }
+
+ return base;
+}
+
+static char* reserve_memory_special(char* requested_address, const size_t size,
+ const size_t alignment, bool exec) {
+
+ log_trace(pagesize)("Attempt special mapping: size: " SIZE_FORMAT "%s, "
+ "alignment: " SIZE_FORMAT "%s",
+ byte_size_in_exact_unit(size), exact_unit_for_byte_size(size),
+ byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
+
+ char* base = os::reserve_memory_special(size, alignment, requested_address, exec);
+ if (base != NULL) {
+ // Check alignment constraints.
+ assert(is_aligned(base, alignment),
+ "reserve_memory_special() returned an unaligned address, base: " PTR_FORMAT
+ " alignment: " SIZE_FORMAT_HEX,
+ p2i(base), alignment);
+ } else {
+ if (large_pages_requested()) {
+ log_debug(gc, heap, coops)("Reserve regular memory without large pages");
+ }
+ }
+ return base;
+}
+
void ReservedSpace::clear_members() {
- initialize_members(NULL, 0, 0, false, false);
+ initialize_members(NULL, 0, 0, 0, false, false);
}
void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment,
- bool special, bool executable) {
+ size_t page_size, bool special, bool executable) {
_base = base;
_size = size;
_alignment = alignment;
+ _page_size = page_size;
_special = special;
_executable = executable;
_noaccess_prefix = 0;
}
+void ReservedSpace::reserve(size_t size,
+ size_t alignment,
+ size_t page_size,
+ char* requested_address,
+ bool executable) {
+ assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment");
+
+ // There are basically three different cases that we need to handle below:
+ // - Mapping backed by a file
+ // - Mapping backed by explicit large pages
+ // - Mapping backed by normal pages or transparent huge pages
+ // The first two have restrictions that requires the whole mapping to be
+ // committed up front. To record this the ReservedSpace is marked 'special'.
+
+ if (_fd_for_heap != -1) {
+ // When there is a backing file directory for this space then whether
+ // large pages are allocated is up to the filesystem of the backing file.
+ // So UseLargePages is not taken into account for this reservation.
+ char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable);
+ if (base != NULL) {
+ initialize_members(base, size, alignment, os::vm_page_size(), true, executable);
+ }
+ // Always return, not possible to fall back to reservation not using a file.
+ return;
+ } else if (use_explicit_large_pages(page_size)) {
+ // System can't commit large pages i.e. use transparent huge pages and
+ // the caller requested large pages. To satisfy this request we use
+ // explicit large pages and these have to be committed up front to ensure
+ // no reservations are lost.
+
+ char* base = reserve_memory_special(requested_address, size, alignment, executable);
+ if (base != NULL) {
+ // Successful reservation using large pages.
+ initialize_members(base, size, alignment, page_size, true, executable);
+ return;
+ }
+ // Failed to reserve explicit large pages, fall back to normal reservation.
+ page_size = os::vm_page_size();
+ }
+
+ // Not a 'special' reservation.
+ char* base = reserve_memory(requested_address, size, alignment, -1, executable);
+ if (base != NULL) {
+ // Successful mapping.
+ initialize_members(base, size, alignment, page_size, false, executable);
+ }
+}
-void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
+void ReservedSpace::initialize(size_t size,
+ size_t alignment,
+ size_t page_size,
char* requested_address,
bool executable) {
const size_t granularity = os::vm_allocation_granularity();
@@ -164,6 +266,8 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
"alignment not aligned to os::vm_allocation_granularity()");
assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
"not a power of 2");
+ assert(page_size >= (size_t) os::vm_page_size(), "Invalid page size");
+ assert(is_power_of_2(page_size), "Invalid page size");
clear_members();
@@ -171,101 +275,23 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
return;
}
+ // Adjust alignment to not be 0.
alignment = MAX2(alignment, (size_t)os::vm_page_size());
- // If OS doesn't support demand paging for large page memory, we need
- // to use reserve_memory_special() to reserve and pin the entire region.
- // If there is a backing file directory for this space then whether
- // large pages are allocated is up to the filesystem of the backing file.
- // So we ignore the UseLargePages flag in this case.
- bool special = large && !os::can_commit_large_page_memory();
- if (special && _fd_for_heap != -1) {
- special = false;
- if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
- !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
- log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
- }
- }
-
- char* base = NULL;
-
- if (special) {
+ // Reserve the memory.
+ reserve(size, alignment, page_size, requested_address, executable);
- base = os::reserve_memory_special(size, alignment, requested_address, executable);
-
- if (base != NULL) {
- if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
- // OS ignored requested address. Try different address.
- return;
- }
- // Check alignment constraints.
- assert((uintptr_t) base % alignment == 0,
- "Large pages returned a non-aligned address, base: "
- PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
- p2i(base), alignment);
- } else {
- // failed; try to reserve regular memory below. Reservation
- // should not be marked as special.
- special = false;
- if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
- !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
- log_debug(gc, heap, coops)("Reserve regular memory without large pages");
- }
- }
- }
-
- if (base == NULL) {
- // Optimistically assume that the OS returns an aligned base pointer.
- // When reserving a large address range, most OSes seem to align to at
- // least 64K.
-
- // If the memory was requested at a particular address, use
- // os::attempt_reserve_memory_at() to avoid over mapping something
- // important. If available space is not detected, return NULL.
-
- if (requested_address != 0) {
- base = attempt_map_or_reserve_memory_at(requested_address, size, _fd_for_heap, executable);
- if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
- // OS ignored requested address. Try different address.
- base = NULL;
- }
- } else {
- base = map_or_reserve_memory(size, _fd_for_heap, executable);
- }
-
- if (base == NULL) return;
-
- // Check alignment constraints
- if ((((size_t)base) & (alignment - 1)) != 0) {
- // Base not aligned, retry
- unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
-
- // Make sure that size is aligned
- size = align_up(size, alignment);
- base = map_or_reserve_memory_aligned(size, alignment, _fd_for_heap, executable);
-
- if (requested_address != 0 &&
- failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
- // As a result of the alignment constraints, the allocated base differs
- // from the requested address. Return back to the caller who can
- // take remedial action (like try again without a requested address).
- assert(_base == NULL, "should be");
- return;
- }
- }
- }
- // If heap is reserved with a backing file, the entire space has been committed. So set the special flag to true
- if (_fd_for_heap != -1) {
- special = true;
+ // Check that the requested address is used if given.
+ if (failed_to_reserve_as_requested(_base, requested_address)) {
+ // OS ignored the requested address, release the reservation.
+ release();
+ return;
}
-
- // Done
- initialize_members(base, size, alignment, special, executable);
}
ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment) {
assert(partition_size <= size(), "partition failed");
- ReservedSpace result(base(), partition_size, alignment, special(), executable());
+ ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable());
return result;
}
@@ -274,7 +300,7 @@ ReservedSpace
ReservedSpace::last_part(size_t partition_size, size_t alignment) {
assert(partition_size <= size(), "partition failed");
ReservedSpace result(base() + partition_size, size() - partition_size,
- alignment, special(), executable());
+ alignment, page_size(), special(), executable());
return result;
}
@@ -293,25 +319,6 @@ size_t ReservedSpace::allocation_align_size_up(size_t size) {
return align_up(size, os::vm_allocation_granularity());
}
-size_t ReservedSpace::actual_reserved_page_size(const ReservedSpace& rs) {
- size_t page_size = os::vm_page_size();
- if (UseLargePages) {
- // There are two ways to manage large page memory.
- // 1. OS supports committing large page memory.
- // 2. OS doesn't support committing large page memory so ReservedSpace manages it.
- // And ReservedSpace calls it 'special'. If we failed to set 'special',
- // we reserved memory without large page.
- if (os::can_commit_large_page_memory() || rs.special()) {
- // An alignment at ReservedSpace comes from preferred page size or
- // heap alignment, and if the alignment came from heap alignment, it could be
- // larger than large pages size. So need to cap with the large page size.
- page_size = MIN2(rs.alignment(), os::large_page_size());
- }
- }
-
- return page_size;
-}
-
void ReservedSpace::release() {
if (is_reserved()) {
char *real_base = _base - _noaccess_prefix;
@@ -368,76 +375,23 @@ void ReservedHeapSpace::establish_noaccess_prefix() {
// NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
void ReservedHeapSpace::try_reserve_heap(size_t size,
size_t alignment,
- bool large,
+ size_t page_size,
char* requested_address) {
if (_base != NULL) {
// We tried before, but we didn't like the address delivered.
release();
}
- // If OS doesn't support demand paging for large page memory, we need
- // to use reserve_memory_special() to reserve and pin the entire region.
- // If there is a backing file directory for this space then whether
- // large pages are allocated is up to the filesystem of the backing file.
- // So we ignore the UseLargePages flag in this case.
- bool special = large && !os::can_commit_large_page_memory();
- if (special && _fd_for_heap != -1) {
- special = false;
- if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
- !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
- log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
- }
- }
- char* base = NULL;
-
+ // Try to reserve the memory for the heap.
log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
" heap of size " SIZE_FORMAT_HEX,
p2i(requested_address),
size);
- if (special) {
- base = os::reserve_memory_special(size, alignment, requested_address, false);
-
- if (base != NULL) {
- // Check alignment constraints.
- assert((uintptr_t) base % alignment == 0,
- "Large pages returned a non-aligned address, base: "
- PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
- p2i(base), alignment);
- }
- }
-
- if (base == NULL) {
- // Failed; try to reserve regular memory below. Reservation
- // should not be marked as special.
- special = false;
- if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
- !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
- log_debug(gc, heap, coops)("Reserve regular memory without large pages");
- }
-
- if (requested_address != 0) {
- base = attempt_map_or_reserve_memory_at(requested_address, size, _fd_for_heap, executable());
- } else {
- // Optimistically assume that the OSes returns an aligned base pointer.
- // When reserving a large address range, most OSes seem to align to at
- // least 64K.
- // If the returned memory is not aligned we will release and retry.
- base = map_or_reserve_memory(size, _fd_for_heap, executable());
- }
- }
- if (base == NULL) { return; }
-
- // If heap is reserved with a backing file, the entire space has been committed. So set the special flag to true
- if (_fd_for_heap != -1) {
- special = true;
- }
-
- // Done
- initialize_members(base, size, alignment, special, false);
+ reserve(size, alignment, page_size, requested_address, false);
- // Check alignment constraints
- if (!is_aligned(base, alignment)) {
+ // Check alignment constraints.
+ if (is_reserved() && !is_aligned(_base, _alignment)) {
// Base not aligned, retry.
release();
}
@@ -450,7 +404,7 @@ void ReservedHeapSpace::try_reserve_range(char *highest_start,
char *upper_bound,
size_t size,
size_t alignment,
- bool large) {
+ size_t page_size) {
const size_t attach_range = highest_start - lowest_start;
// Cap num_attempts at possible number.
// At least one is possible even for 0 sized attach range.
@@ -466,7 +420,7 @@ void ReservedHeapSpace::try_reserve_range(char *highest_start,
attach_point <= highest_start && // Avoid wrap around.
((_base == NULL) ||
(_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
- try_reserve_heap(size, alignment, large, attach_point);
+ try_reserve_heap(size, alignment, page_size, attach_point);
attach_point -= stepsize;
}
}
@@ -517,7 +471,7 @@ static char** get_attach_addresses_for_disjoint_mode() {
return (char**) &addresses[start];
}
-void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
+void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) {
guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
"can not allocate compressed oop heap for this size");
guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
@@ -543,7 +497,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
// Attempt to alloc at user-given address.
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
- try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
+ try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
release();
}
@@ -569,7 +523,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
- aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
+ aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
}
// zerobased: Attempt to allocate in the lower 32G.
@@ -601,7 +555,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
}
lowest_start = align_up(lowest_start, attach_point_alignment);
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
- aligned_heap_base_min_address, zerobased_max, size, alignment, large);
+ aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
}
// Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
@@ -617,19 +571,19 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
!CompressedOops::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address.
char* const attach_point = addresses[i];
assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
- try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
+ try_reserve_heap(size + noaccess_prefix, alignment, page_size, attach_point);
i++;
}
// Last, desperate try without any placement.
if (_base == NULL) {
log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
- initialize(size + noaccess_prefix, alignment, large, NULL, false);
+ initialize(size + noaccess_prefix, alignment, page_size, NULL, false);
}
}
}
-ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
+ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) : ReservedSpace() {
if (size == 0) {
return;
@@ -641,13 +595,19 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large,
vm_exit_during_initialization(
err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
}
+ // When there is a backing file directory for this space then whether
+ // large pages are allocated is up to the filesystem of the backing file.
+ // If requested, let the user know that explicit large pages can't be used.
+ if (use_explicit_large_pages(page_size) && large_pages_requested()) {
+ log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
+ }
}
// Heap size should be aligned to alignment, too.
guarantee(is_aligned(size, alignment), "set by caller");
if (UseCompressedOops) {
- initialize_compressed_heap(size, alignment, large);
+ initialize_compressed_heap(size, alignment, page_size);
if (_size > size) {
// We allocated heap with noaccess prefix.
// It can happen we get a zerobased/unscaled heap with noaccess prefix,
@@ -655,7 +615,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large,
establish_noaccess_prefix();
}
} else {
- initialize(size, alignment, large, NULL, false);
+ initialize(size, alignment, page_size, NULL, false);
}
assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
@@ -680,8 +640,8 @@ MemRegion ReservedHeapSpace::region() const {
// executable.
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
size_t rs_align,
- bool large) : ReservedSpace() {
- initialize(r_size, rs_align, large, /*requested address*/ NULL, /*executable*/ true);
+ size_t rs_page_size) : ReservedSpace() {
+ initialize(r_size, rs_align, rs_page_size, /*requested address*/ NULL, /*executable*/ true);
MemTracker::record_virtual_memory_type((address)base(), mtCode);
}
diff --git a/src/hotspot/share/memory/virtualspace.hpp b/src/hotspot/share/memory/virtualspace.hpp
index 48b00518d0d..a3b8683d354 100644
--- a/src/hotspot/share/memory/virtualspace.hpp
+++ b/src/hotspot/share/memory/virtualspace.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,14 +39,15 @@ class ReservedSpace {
size_t _size;
size_t _noaccess_prefix;
size_t _alignment;
+ size_t _page_size;
bool _special;
int _fd_for_heap;
private:
bool _executable;
// ReservedSpace
- ReservedSpace(char* base, size_t size, size_t alignment, bool special,
- bool executable);
+ ReservedSpace(char* base, size_t size, size_t alignment,
+ size_t page_size, bool special, bool executable);
protected:
// Helpers to clear and set members during initialization. Two members
// require special treatment:
@@ -57,12 +58,13 @@ class ReservedSpace {
// 0 during initialization.
void clear_members();
void initialize_members(char* base, size_t size, size_t alignment,
- bool special, bool executable);
+ size_t page_size, bool special, bool executable);
- void initialize(size_t size, size_t alignment, bool large,
- char* requested_address,
- bool executable);
+ void initialize(size_t size, size_t alignment, size_t page_size,
+ char* requested_address, bool executable);
+ void reserve(size_t size, size_t alignment, size_t page_size,
+ char* requested_address, bool executable);
public:
// Constructor
ReservedSpace();
@@ -74,7 +76,7 @@ class ReservedSpace {
// the given size is not aligned to that value, as the reservation will be
// aligned up to the final alignment in this case.
ReservedSpace(size_t size, size_t preferred_page_size);
- ReservedSpace(size_t size, size_t alignment, bool large,
+ ReservedSpace(size_t size, size_t alignment, size_t page_size,
char* requested_address = NULL);
// Accessors
@@ -82,6 +84,7 @@ class ReservedSpace {
size_t size() const { return _size; }
char* end() const { return _base + _size; }
size_t alignment() const { return _alignment; }
+ size_t page_size() const { return _page_size; }
bool special() const { return _special; }
bool executable() const { return _executable; }
size_t noaccess_prefix() const { return _noaccess_prefix; }
@@ -104,8 +107,6 @@ class ReservedSpace {
bool contains(const void* p) const {
return (base() <= ((char*)p)) && (((char*)p) < (base() + size()));
}
-
- static size_t actual_reserved_page_size(const ReservedSpace& rs);
};
ReservedSpace
@@ -122,19 +123,19 @@ ReservedSpace ReservedSpace::last_part(size_t partition_size)
// Class encapsulating behavior specific of memory space reserved for Java heap.
class ReservedHeapSpace : public ReservedSpace {
private:
- void try_reserve_heap(size_t size, size_t alignment, bool large,
+ void try_reserve_heap(size_t size, size_t alignment, size_t page_size,
char *requested_address);
void try_reserve_range(char *highest_start, char *lowest_start,
size_t attach_point_alignment, char *aligned_HBMA,
- char *upper_bound, size_t size, size_t alignment, bool large);
- void initialize_compressed_heap(const size_t size, size_t alignment, bool large);
+ char *upper_bound, size_t size, size_t alignment, size_t page_size);
+ void initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size);
// Create protection page at the beginning of the space.
void establish_noaccess_prefix();
public:
// Constructor. Tries to find a heap that is good for compressed oops.
// heap_allocation_directory is the path to the backing memory for Java heap. When set, Java heap will be allocated
// on the device which is managed by the file system where the directory resides.
- ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large, const char* heap_allocation_directory = NULL);
+ ReservedHeapSpace(size_t size, size_t forced_base_alignment, size_t page_size, const char* heap_allocation_directory = NULL);
// Returns the base to be used for compression, i.e. so that null can be
// encoded safely and implicit null checks can work.
char *compressed_oop_base() const { return _base - _noaccess_prefix; }
@@ -145,7 +146,7 @@ class ReservedHeapSpace : public ReservedSpace {
class ReservedCodeSpace : public ReservedSpace {
public:
// Constructor
- ReservedCodeSpace(size_t r_size, size_t rs_align, bool large);
+ ReservedCodeSpace(size_t r_size, size_t rs_align, size_t page_size);
};
// VirtualSpace is data structure for committing a previously reserved address range in smaller chunks.
@@ -208,14 +209,6 @@ class VirtualSpace {
char* low_boundary() const { return _low_boundary; }
char* high_boundary() const { return _high_boundary; }
-#if INCLUDE_AOT
- // Set boundaries for code section in AOT library.
- void set_low_boundary(char *p) { _low_boundary = p; }
- void set_high_boundary(char *p) { _high_boundary = p; }
- void set_low(char *p) { _low = p; }
- void set_high(char *p) { _high = p; }
-#endif
-
bool special() const { return _special; }
public:
diff --git a/src/hotspot/share/oops/compressedOops.cpp b/src/hotspot/share/oops/compressedOops.cpp
index 1f094ed49f5..3039f623d1e 100644
--- a/src/hotspot/share/oops/compressedOops.cpp
+++ b/src/hotspot/share/oops/compressedOops.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/memRegion.hpp"
@@ -65,8 +64,6 @@ void CompressedOops::initialize(const ReservedHeapSpace& heap_space) {
set_base((address)heap_space.compressed_oop_base());
}
- AOTLoader::set_narrow_oop_shift();
-
_heap_address_range = heap_space.region();
LogTarget(Debug, gc, heap, coops) lt;
@@ -254,9 +251,6 @@ void CompressedKlassPointers::initialize(address addr, size_t len) {
set_base(base);
set_shift(shift);
set_range(range);
-
- // Note: this may modify our shift.
- AOTLoader::set_narrow_klass_shift();
#else
fatal("64bit only.");
#endif
diff --git a/src/hotspot/share/oops/constMethod.cpp b/src/hotspot/share/oops/constMethod.cpp
index ebf7a1e6fab..8e3ceb80815 100644
--- a/src/hotspot/share/oops/constMethod.cpp
+++ b/src/hotspot/share/oops/constMethod.cpp
@@ -405,7 +405,11 @@ void ConstMethod::copy_annotations_from(ClassLoaderData* loader_data, ConstMetho
void ConstMethod::metaspace_pointers_do(MetaspaceClosure* it) {
log_trace(cds)("Iter(ConstMethod): %p", this);
- it->push(&_constants);
+ if (!method()->method_holder()->is_rewritten()) {
+ it->push(&_constants, MetaspaceClosure::_writable);
+ } else {
+ it->push(&_constants);
+ }
it->push(&_stackmap_data);
if (has_method_annotations()) {
it->push(method_annotations_addr());
@@ -419,7 +423,6 @@ void ConstMethod::metaspace_pointers_do(MetaspaceClosure* it) {
if (has_default_annotations()) {
it->push(default_annotations_addr());
}
- ConstMethod* this_ptr = this;
}
// Printing
diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp
index a4a60aaebd8..c1ddd4e89f2 100644
--- a/src/hotspot/share/oops/constantPool.cpp
+++ b/src/hotspot/share/oops/constantPool.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "cds/heapShared.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/metadataOnStackMark.hpp"
@@ -36,7 +37,6 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/heapShared.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/oopFactory.hpp"
@@ -356,6 +356,9 @@ void ConstantPool::add_dumped_interned_strings() {
// CDS support. Create a new resolved_references array.
void ConstantPool::restore_unshareable_info(TRAPS) {
+ if (!_pool_holder->is_linked() && !_pool_holder->is_rewritten()) {
+ return;
+ }
assert(is_constantPool(), "ensure C++ vtable is restored");
assert(on_stack(), "should always be set for shared constant pools");
assert(is_shared(), "should always be set for shared constant pools");
@@ -393,6 +396,9 @@ void ConstantPool::restore_unshareable_info(TRAPS) {
}
void ConstantPool::remove_unshareable_info() {
+ if (!_pool_holder->is_linked() && _pool_holder->is_shared_old_klass()) {
+ return;
+ }
// Resolved references are not in the shared archive.
// Save the length for restoration. It is not necessarily the same length
// as reference_map.length() if invokedynamic is saved. It is needed when
diff --git a/src/hotspot/share/oops/cpCache.cpp b/src/hotspot/share/oops/cpCache.cpp
index 1d21f6f18b7..1170d4cf500 100644
--- a/src/hotspot/share/oops/cpCache.cpp
+++ b/src/hotspot/share/oops/cpCache.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "cds/heapShared.hpp"
#include "classfile/resolutionErrors.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmClasses.hpp"
@@ -33,7 +34,6 @@
#include "interpreter/rewriter.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
-#include "memory/heapShared.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
diff --git a/src/hotspot/share/oops/flatArrayKlass.cpp b/src/hotspot/share/oops/flatArrayKlass.cpp
index 850301911ae..8748cfd079a 100644
--- a/src/hotspot/share/oops/flatArrayKlass.cpp
+++ b/src/hotspot/share/oops/flatArrayKlass.cpp
@@ -331,15 +331,15 @@ void FlatArrayKlass::copy_array(arrayOop s, int src_pos,
}
-Klass* FlatArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) {
+Klass* FlatArrayKlass::array_klass(int n, TRAPS) {
assert(dimension() <= n, "check order of chain");
int dim = dimension();
if (dim == n) return this;
+ // lock-free read needs acquire semantics
if (higher_dimension_acquire() == NULL) {
- if (or_null) return NULL;
- ResourceMark rm;
+ ResourceMark rm(THREAD);
{
// Ensure atomic creation of higher dimensions
MutexLocker mu(THREAD, MultiArray_lock);
@@ -348,30 +348,45 @@ Klass* FlatArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) {
if (higher_dimension() == NULL) {
// Create multi-dim klass object and link them together
- Klass* k =
- ObjArrayKlass::allocate_objArray_klass(class_loader_data(), dim + 1, this, CHECK_NULL);
+ Klass* k = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), dim + 1, this, CHECK_NULL);
ObjArrayKlass* ak = ObjArrayKlass::cast(k);
ak->set_lower_dimension(this);
- OrderAccess::storestore();
+ // use 'release' to pair with lock-free load
release_set_higher_dimension(ak);
assert(ak->is_objArray_klass(), "incorrect initialization of ObjArrayKlass");
}
}
- } else {
- CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
}
ObjArrayKlass *ak = ObjArrayKlass::cast(higher_dimension());
- if (or_null) {
- return ak->array_klass_or_null(n);
- }
+ THREAD->as_Java_thread()->check_possible_safepoint();
return ak->array_klass(n, THREAD);
}
-Klass* FlatArrayKlass::array_klass_impl(bool or_null, TRAPS) {
- return array_klass_impl(or_null, dimension() + 1, THREAD);
+Klass* FlatArrayKlass::array_klass_or_null(int n) {
+
+ assert(dimension() <= n, "check order of chain");
+ int dim = dimension();
+ if (dim == n) return this;
+
+ // lock-free read needs acquire semantics
+ if (higher_dimension_acquire() == NULL) {
+ return NULL;
+ }
+
+ ObjArrayKlass *ak = ObjArrayKlass::cast(higher_dimension());
+ return ak->array_klass_or_null(n);
+}
+
+Klass* FlatArrayKlass::array_klass(TRAPS) {
+ return array_klass(dimension() + 1, THREAD);
+}
+
+Klass* FlatArrayKlass::array_klass_or_null() {
+ return array_klass_or_null(dimension() + 1);
}
+
ModuleEntry* FlatArrayKlass::module() const {
assert(element_klass() != NULL, "FlatArrayKlass returned unexpected NULL bottom_klass");
// The array is defined in the module of its bottom class
diff --git a/src/hotspot/share/oops/flatArrayKlass.hpp b/src/hotspot/share/oops/flatArrayKlass.hpp
index d265bacfe5e..7b61c614bbd 100644
--- a/src/hotspot/share/oops/flatArrayKlass.hpp
+++ b/src/hotspot/share/oops/flatArrayKlass.hpp
@@ -43,17 +43,18 @@ class FlatArrayKlass : public ArrayKlass {
// Constructor
FlatArrayKlass(Klass* element_klass, Symbol* name);
- protected:
- // Returns the ArrayKlass for n'th dimension.
- Klass* array_klass_impl(bool or_null, int n, TRAPS);
-
- // Returns the array class with this class as element type.
- Klass* array_klass_impl(bool or_null, TRAPS);
-
public:
FlatArrayKlass() {}
+ // Returns the ObjArrayKlass for n'th dimension.
+ virtual Klass* array_klass(int n, TRAPS);
+ virtual Klass* array_klass_or_null(int n);
+
+ // Returns the array class with this class as element type.
+ virtual Klass* array_klass(TRAPS);
+ virtual Klass* array_klass_or_null();
+
virtual InlineKlass* element_klass() const;
virtual void set_element_klass(Klass* k);
diff --git a/src/hotspot/share/oops/generateOopMap.cpp b/src/hotspot/share/oops/generateOopMap.cpp
index 8d2bf2e9b38..912decccb0b 100644
--- a/src/hotspot/share/oops/generateOopMap.cpp
+++ b/src/hotspot/share/oops/generateOopMap.cpp
@@ -2112,7 +2112,7 @@ GenerateOopMap::GenerateOopMap(const methodHandle& method) {
#endif
}
-void GenerateOopMap::compute_map(TRAPS) {
+bool GenerateOopMap::compute_map(Thread* current) {
#ifndef PRODUCT
if (TimeOopMap2) {
method()->print_short_name(tty);
@@ -2158,7 +2158,7 @@ void GenerateOopMap::compute_map(TRAPS) {
if (method()->code_size() == 0 || _max_locals + method()->max_stack() == 0) {
fill_stackmap_prolog(0);
fill_stackmap_epilog();
- return;
+ return true;
}
// Step 1: Compute all jump targets and their return value
if (!_got_error)
@@ -2170,25 +2170,20 @@ void GenerateOopMap::compute_map(TRAPS) {
// Step 3: Calculate stack maps
if (!_got_error)
- do_interpretation(THREAD);
+ do_interpretation(current);
// Step 4:Return results
if (!_got_error && report_results())
report_result();
- if (_got_error) {
- THROW_HANDLE(_exception);
- }
+ return !_got_error;
}
// Error handling methods
-// These methods create an exception for the current thread which is thrown
-// at the bottom of the call stack, when it returns to compute_map(). The
-// _got_error flag controls execution. NOT TODO: The VM exception propagation
-// mechanism using TRAPS/CHECKs could be used here instead but it would need
-// to be added as a parameter to every function and checked for every call.
-// The tons of extra code it would generate didn't seem worth the change.
//
+// If we compute from a suitable JavaThread then we create an exception for the GenerateOopMap
+// calling code to retrieve (via exception()) and throw if desired (in most cases errors are ignored).
+// Otherwise it is considered a fatal error to hit malformed bytecode.
void GenerateOopMap::error_work(const char *format, va_list ap) {
_got_error = true;
char msg_buffer[512];
@@ -2196,12 +2191,10 @@ void GenerateOopMap::error_work(const char *format, va_list ap) {
// Append method name
char msg_buffer2[512];
os::snprintf(msg_buffer2, sizeof(msg_buffer2), "%s in method %s", msg_buffer, method()->name()->as_C_string());
- if (Thread::current()->can_call_java()) {
- _exception = Exceptions::new_exception(Thread::current(),
- vmSymbols::java_lang_LinkageError(), msg_buffer2);
+ Thread* current = Thread::current();
+ if (current->can_call_java()) {
+ _exception = Exceptions::new_exception(current, vmSymbols::java_lang_LinkageError(), msg_buffer2);
} else {
- // We cannot instantiate an exception object from a compiler thread.
- // Exit the VM with a useful error message.
fatal("%s", msg_buffer2);
}
}
@@ -2587,7 +2580,9 @@ int ResolveOopMapConflicts::_nof_relocations = 0;
#endif
methodHandle ResolveOopMapConflicts::do_potential_rewrite(TRAPS) {
- compute_map(CHECK_(methodHandle()));
+ if (!compute_map(THREAD)) {
+ THROW_HANDLE_(exception(), methodHandle());
+ }
#ifndef PRODUCT
// Tracking and statistics
diff --git a/src/hotspot/share/oops/generateOopMap.hpp b/src/hotspot/share/oops/generateOopMap.hpp
index 2978c7e84d3..1f38ecc8acf 100644
--- a/src/hotspot/share/oops/generateOopMap.hpp
+++ b/src/hotspot/share/oops/generateOopMap.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -465,8 +465,11 @@ class GenerateOopMap {
public:
GenerateOopMap(const methodHandle& method);
- // Compute the map.
- void compute_map(TRAPS);
+ // Compute the map - returns true on success and false on error.
+ bool compute_map(Thread* current);
+ // Returns the exception related to any error, if the map was computed by a suitable JavaThread.
+ Handle exception() { return _exception; }
+
void result_for_basicblock(int bci); // Do a callback on fill_stackmap_for_opcodes for basicblock containing bci
// Query
@@ -565,7 +568,7 @@ class GeneratePairingInfo: public GenerateOopMap {
public:
GeneratePairingInfo(const methodHandle& method) : GenerateOopMap(method) {};
- // Call compute_map(CHECK) to generate info.
+ // Call compute_map() to generate info.
};
#endif // SHARE_OOPS_GENERATEOOPMAP_HPP
diff --git a/src/hotspot/share/oops/inlineKlass.cpp b/src/hotspot/share/oops/inlineKlass.cpp
index 043047385ac..ec8b8a9626f 100644
--- a/src/hotspot/share/oops/inlineKlass.cpp
+++ b/src/hotspot/share/oops/inlineKlass.cpp
@@ -179,20 +179,18 @@ void InlineKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle
InstanceKlass::restore_unshareable_info(loader_data, protection_domain, pkg_entry, CHECK);
}
-Klass* InlineKlass::array_klass_impl(bool or_null, int n, TRAPS) {
+Klass* InlineKlass::array_klass(int n, TRAPS) {
// Need load-acquire for lock-free read
if (array_klasses_acquire() == NULL) {
- if (or_null) return NULL;
-
ResourceMark rm(THREAD);
- JavaThread *jt = (JavaThread *)THREAD;
+ JavaThread *jt = THREAD->as_Java_thread();
{
// Atomic creation of array_klasses
MutexLocker ma(THREAD, MultiArray_lock);
- ArrayKlass* k = NULL;
// Check if update has already taken place
if (array_klasses() == NULL) {
+ ArrayKlass* k;
if (flatten_array()) {
k = FlatArrayKlass::allocate_klass(this, CHECK_NULL);
} else {
@@ -203,18 +201,30 @@ Klass* InlineKlass::array_klass_impl(bool or_null, int n, TRAPS) {
}
}
}
- // _this will always be set at this point
+ // array_klasses() will always be set at this point
ArrayKlass* ak = array_klasses();
- if (or_null) {
+ return ak->array_klass(n, THREAD);
+}
+
+Klass* InlineKlass::array_klass_or_null(int n) {
+ // Need load-acquire for lock-free read
+ ArrayKlass* ak = array_klasses_acquire();
+ if (ak == NULL) {
+ return NULL;
+ } else {
return ak->array_klass_or_null(n);
}
- return ak->array_klass(n, THREAD);
}
-Klass* InlineKlass::array_klass_impl(bool or_null, TRAPS) {
- return array_klass_impl(or_null, 1, THREAD);
+Klass* InlineKlass::array_klass(TRAPS) {
+ return array_klass(1, THREAD);
}
+Klass* InlineKlass::array_klass_or_null() {
+ return array_klass_or_null(1);
+}
+
+
// Inline type arguments are not passed by reference, instead each
// field of the inline type is passed as an argument. This helper
// function collects the inlined field (recursively)
diff --git a/src/hotspot/share/oops/inlineKlass.hpp b/src/hotspot/share/oops/inlineKlass.hpp
index e30626b0a0b..adc1c3e2a67 100644
--- a/src/hotspot/share/oops/inlineKlass.hpp
+++ b/src/hotspot/share/oops/inlineKlass.hpp
@@ -128,15 +128,16 @@ class InlineKlass: public InstanceKlass {
void cleanup_blobs();
-
- protected:
+ public:
// Returns the array class for the n'th dimension
- Klass* array_klass_impl(bool or_null, int n, TRAPS);
+ virtual Klass* array_klass(int n, TRAPS);
+ virtual Klass* array_klass_or_null(int n);
// Returns the array class with this class as element type
- Klass* array_klass_impl(bool or_null, TRAPS);
+ virtual Klass* array_klass(TRAPS);
+ virtual Klass* array_klass_or_null();
+
- public:
// Type testing
bool is_inline_klass_slow() const { return true; }
diff --git a/src/hotspot/share/oops/inlineKlass.inline.hpp b/src/hotspot/share/oops/inlineKlass.inline.hpp
index ecc33d9f580..01ff0662435 100644
--- a/src/hotspot/share/oops/inlineKlass.inline.hpp
+++ b/src/hotspot/share/oops/inlineKlass.inline.hpp
@@ -37,11 +37,6 @@ inline InlineKlassFixedBlock* InlineKlass::inlineklass_static_block() const {
return (InlineKlassFixedBlock*)(adr_jf + this->java_fields_count() * sizeof(Klass*));
}
- address adr_fing = adr_fingerprint();
- if (adr_fing != NULL) {
- return (InlineKlassFixedBlock*)(adr_fingerprint() + sizeof(u8));
- }
-
InstanceKlass** adr_host = adr_unsafe_anonymous_host();
if (adr_host != NULL) {
return (InlineKlassFixedBlock*)(adr_host + 1);
diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp
index a31c98a55b8..e59049185ff 100644
--- a/src/hotspot/share/oops/instanceKlass.cpp
+++ b/src/hotspot/share/oops/instanceKlass.cpp
@@ -24,10 +24,11 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "aot/aotLoader.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/classListWriter.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
-#include "classfile/classListWriter.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.hpp"
@@ -50,11 +51,9 @@
#include "logging/logMessage.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/archiveUtils.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@@ -471,7 +470,6 @@ InstanceKlass* InstanceKlass::allocate_instance_klass(const ClassFileParser& par
nonstatic_oop_map_size(parser.total_oop_map_count()),
parser.is_interface(),
parser.is_unsafe_anonymous(),
- should_store_fingerprint(is_hidden_or_anonymous),
parser.has_inline_fields() ? parser.java_fields_count() : 0,
parser.is_inline_type());
@@ -1283,9 +1281,6 @@ void InstanceKlass::initialize_impl(TRAPS) {
}
- // Look for aot compiled methods for this klass, including class initializer.
- AOTLoader::load_for_klass(this, THREAD);
-
// Step 9
{
DTRACE_CLASSINIT_PROBE_WAIT(clinit, -1, wait);
@@ -1568,11 +1563,9 @@ void InstanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
}
}
-Klass* InstanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
+Klass* InstanceKlass::array_klass(int n, TRAPS) {
// Need load-acquire for lock-free read
if (array_klasses_acquire() == NULL) {
- if (or_null) return NULL;
-
ResourceMark rm(THREAD);
JavaThread *jt = THREAD->as_Java_thread();
{
@@ -1587,16 +1580,27 @@ Klass* InstanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
}
}
}
- // _this will always be set at this point
- ArrayKlass* oak = array_klasses();
- if (or_null) {
- return oak->array_klass_or_null(n);
+ // array_klasses() will always be set at this point
+ ArrayKlass* ak = array_klasses();
+ return ak->array_klass(n, THREAD);
+}
+
+Klass* InstanceKlass::array_klass_or_null(int n) {
+ // Need load-acquire for lock-free read
+ ArrayKlass* ak = array_klasses_acquire();
+ if (ak == NULL) {
+ return NULL;
+ } else {
+ return ak->array_klass_or_null(n);
}
- return oak->array_klass(n, THREAD);
}
-Klass* InstanceKlass::array_klass_impl(bool or_null, TRAPS) {
- return array_klass_impl(or_null, 1, THREAD);
+Klass* InstanceKlass::array_klass(TRAPS) {
+ return array_klass(1, THREAD);
+}
+
+Klass* InstanceKlass::array_klass_or_null() {
+ return array_klass_or_null(1);
}
static int call_class_initializer_counter = 0; // for debugging
@@ -2516,77 +2520,6 @@ void InstanceKlass::clean_method_data() {
}
}
-bool InstanceKlass::supers_have_passed_fingerprint_checks() {
- if (java_super() != NULL && !java_super()->has_passed_fingerprint_check()) {
- ResourceMark rm;
- log_trace(class, fingerprint)("%s : super %s not fingerprinted", external_name(), java_super()->external_name());
- return false;
- }
-
- Array* local_interfaces = this->local_interfaces();
- if (local_interfaces != NULL) {
- int length = local_interfaces->length();
- for (int i = 0; i < length; i++) {
- InstanceKlass* intf = local_interfaces->at(i);
- if (!intf->has_passed_fingerprint_check()) {
- ResourceMark rm;
- log_trace(class, fingerprint)("%s : interface %s not fingerprinted", external_name(), intf->external_name());
- return false;
- }
- }
- }
-
- return true;
-}
-
-bool InstanceKlass::should_store_fingerprint(bool is_hidden_or_anonymous) {
-#if INCLUDE_AOT
- // We store the fingerprint into the InstanceKlass only in the following 2 cases:
- if (CalculateClassFingerprint) {
- // (1) We are running AOT to generate a shared library.
- return true;
- }
- if (Arguments::is_dumping_archive()) {
- // (2) We are running -Xshare:dump or -XX:ArchiveClassesAtExit to create a shared archive
- return true;
- }
- if (UseAOT && is_hidden_or_anonymous) {
- // (3) We are using AOT code from a shared library and see a hidden or unsafe anonymous class
- return true;
- }
-#endif
-
- // In all other cases we might set the _misc_has_passed_fingerprint_check bit,
- // but do not store the 64-bit fingerprint to save space.
- return false;
-}
-
-bool InstanceKlass::has_stored_fingerprint() const {
-#if INCLUDE_AOT
- return should_store_fingerprint() || is_shared();
-#else
- return false;
-#endif
-}
-
-uint64_t InstanceKlass::get_stored_fingerprint() const {
- address adr = adr_fingerprint();
- if (adr != NULL) {
- return (uint64_t)Bytes::get_native_u8(adr); // adr may not be 64-bit aligned
- }
- return 0;
-}
-
-void InstanceKlass::store_fingerprint(uint64_t fingerprint) {
- address adr = adr_fingerprint();
- if (adr != NULL) {
- Bytes::put_native_u8(adr, (u8)fingerprint); // adr may not be 64-bit aligned
-
- ResourceMark rm;
- log_trace(class, fingerprint)("stored as " PTR64_FORMAT " for class %s", fingerprint, external_name());
- }
-}
-
void InstanceKlass::metaspace_pointers_do(MetaspaceClosure* it) {
Klass::metaspace_pointers_do(it);
@@ -2597,7 +2530,11 @@ void InstanceKlass::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_annotations);
it->push((Klass**)&_array_klasses);
- it->push(&_constants);
+ if (!is_rewritten()) {
+ it->push(&_constants, MetaspaceClosure::_writable);
+ } else {
+ it->push(&_constants);
+ }
it->push(&_inner_classes);
#if INCLUDE_JVMTI
it->push(&_previous_versions);
@@ -2640,6 +2577,12 @@ void InstanceKlass::metaspace_pointers_do(MetaspaceClosure* it) {
}
void InstanceKlass::remove_unshareable_info() {
+
+ if (MetaspaceShared::is_old_class(this)) {
+ // Set the old class bit.
+ set_is_shared_old_klass();
+ }
+
Klass::remove_unshareable_info();
if (SystemDictionaryShared::has_class_failed_verification(this)) {
@@ -4231,30 +4174,6 @@ void InstanceKlass::purge_previous_version_list() {
_has_previous_versions = true;
}
- // At least one method is live in this previous version.
- // Reset dead EMCP methods not to get breakpoints.
- // All methods are deallocated when all of the methods for this class are no
- // longer running.
- Array* method_refs = pv_node->methods();
- if (method_refs != NULL) {
- log_trace(redefine, class, iklass, purge)("previous methods length=%d", method_refs->length());
- for (int j = 0; j < method_refs->length(); j++) {
- Method* method = method_refs->at(j);
-
- if (!method->on_stack()) {
- // no breakpoints for non-running methods
- if (method->is_running_emcp()) {
- method->set_running_emcp(false);
- }
- } else {
- assert (method->is_obsolete() || method->is_running_emcp(),
- "emcp method cannot run after emcp bit is cleared");
- log_trace(redefine, class, iklass, purge)
- ("purge: %s(%s): prev method @%d in version @%d is alive",
- method->name()->as_C_string(), method->signature()->as_C_string(), j, version);
- }
- }
- }
// next previous version
last = pv_node;
pv_node = pv_node->previous_versions();
@@ -4354,29 +4273,6 @@ void InstanceKlass::add_previous_version(InstanceKlass* scratch_class,
return;
}
- if (emcp_method_count != 0) {
- // At least one method is still running, check for EMCP methods
- for (int i = 0; i < old_methods->length(); i++) {
- Method* old_method = old_methods->at(i);
- if (!old_method->is_obsolete() && old_method->on_stack()) {
- // if EMCP method (not obsolete) is on the stack, mark as EMCP so that
- // we can add breakpoints for it.
-
- // We set the method->on_stack bit during safepoints for class redefinition
- // and use this bit to set the is_running_emcp bit.
- // After the safepoint, the on_stack bit is cleared and the running emcp
- // method may exit. If so, we would set a breakpoint in a method that
- // is never reached, but this won't be noticeable to the programmer.
- old_method->set_running_emcp(true);
- log_trace(redefine, class, iklass, add)
- ("EMCP method %s is on_stack " INTPTR_FORMAT, old_method->name_and_sig_as_C_string(), p2i(old_method));
- } else if (!old_method->is_obsolete()) {
- log_trace(redefine, class, iklass, add)
- ("EMCP method %s is NOT on_stack " INTPTR_FORMAT, old_method->name_and_sig_as_C_string(), p2i(old_method));
- }
- }
- }
-
// Add previous version if any methods are still running.
// Set has_previous_version flag for processing during class unloading.
_has_previous_versions = true;
diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp
index bd3dde7d798..ee9be15139a 100644
--- a/src/hotspot/share/oops/instanceKlass.hpp
+++ b/src/hotspot/share/oops/instanceKlass.hpp
@@ -32,7 +32,6 @@
#include "oops/fieldInfo.hpp"
#include "oops/instanceOop.hpp"
#include "runtime/handles.hpp"
-#include "runtime/os.hpp"
#include "utilities/accessFlags.hpp"
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
@@ -53,7 +52,6 @@ class RecordComponent;
// indicating where oops are located in instances of this klass.
// [EMBEDDED implementor of the interface] only exist for interface
// [EMBEDDED unsafe_anonymous_host klass] only exist for an unsafe anonymous class (JSR 292 enabled)
-// [EMBEDDED fingerprint ] only if should_store_fingerprint()==true
// [EMBEDDED inline_type_field_klasses] only if has_inline_fields() == true
// [EMBEDDED InlineKlassFixedBlock] only if is an InlineKlass instance
@@ -277,8 +275,7 @@ class InstanceKlass: public Klass {
_misc_has_nonstatic_concrete_methods = 1 << 5, // class/superclass/implemented interfaces has non-static, concrete methods
_misc_declares_nonstatic_concrete_methods = 1 << 6, // directly declares non-static, concrete methods
_misc_has_been_redefined = 1 << 7, // class has been redefined
- _misc_has_passed_fingerprint_check = 1 << 8, // when this class was loaded, the fingerprint computed from its
- // code source was found to be matching the value recorded by AOT.
+ _unused = 1 << 8, //
_misc_is_scratch_class = 1 << 9, // class is the redefined scratch class
_misc_is_shared_boot_class = 1 << 10, // defining class loader is boot class loader
_misc_is_shared_platform_class = 1 << 11, // defining class loader is platform class loader
@@ -909,24 +906,6 @@ class InstanceKlass: public Klass {
_misc_flags |= _misc_has_been_redefined;
}
- bool has_passed_fingerprint_check() const {
- return (_misc_flags & _misc_has_passed_fingerprint_check) != 0;
- }
- void set_has_passed_fingerprint_check(bool b) {
- if (b) {
- _misc_flags |= _misc_has_passed_fingerprint_check;
- } else {
- _misc_flags &= ~_misc_has_passed_fingerprint_check;
- }
- }
- bool supers_have_passed_fingerprint_checks();
-
- static bool should_store_fingerprint(bool is_hidden_or_anonymous);
- bool should_store_fingerprint() const { return should_store_fingerprint(is_hidden() || is_unsafe_anonymous()); }
- bool has_stored_fingerprint() const;
- uint64_t get_stored_fingerprint() const;
- void store_fingerprint(uint64_t fingerprint);
-
bool is_scratch_class() const {
return (_misc_flags & _misc_is_scratch_class) != 0;
}
@@ -1188,7 +1167,7 @@ class InstanceKlass: public Klass {
static int size(int vtable_length, int itable_length,
int nonstatic_oop_map_size,
- bool is_interface, bool is_unsafe_anonymous, bool has_stored_fingerprint,
+ bool is_interface, bool is_unsafe_anonymous,
int java_fields, bool is_inline_type) {
return align_metadata_size(header_size() +
vtable_length +
@@ -1196,7 +1175,6 @@ class InstanceKlass: public Klass {
nonstatic_oop_map_size +
(is_interface ? (int)sizeof(Klass*)/wordSize : 0) +
(is_unsafe_anonymous ? (int)sizeof(Klass*)/wordSize : 0) +
- (has_stored_fingerprint ? (int)sizeof(uint64_t*)/wordSize : 0) +
(java_fields * (int)sizeof(Klass*)/wordSize) +
(is_inline_type ? (int)sizeof(InlineKlassFixedBlock) : 0));
}
@@ -1205,7 +1183,6 @@ class InstanceKlass: public Klass {
nonstatic_oop_map_size(),
is_interface(),
is_unsafe_anonymous(),
- has_stored_fingerprint(),
has_inline_type_fields() ? java_fields_count() : 0,
is_inline_klass());
}
@@ -1221,7 +1198,6 @@ class InstanceKlass: public Klass {
inline InstanceKlass* volatile* adr_implementor() const;
inline InstanceKlass** adr_unsafe_anonymous_host() const;
- inline address adr_fingerprint() const;
inline address adr_inline_type_field_klasses() const;
inline Klass* get_inline_type_field_klass(int idx) const;
@@ -1359,6 +1335,15 @@ class InstanceKlass: public Klass {
// cannot lock it (like the mirror).
// It has to be an object not a Mutex because it's held through java calls.
oop init_lock() const;
+
+ // Returns the array class for the n'th dimension
+ virtual Klass* array_klass(int n, TRAPS);
+ virtual Klass* array_klass_or_null(int n);
+
+ // Returns the array class with this class as element type
+ virtual Klass* array_klass(TRAPS);
+ virtual Klass* array_klass_or_null();
+
private:
void fence_and_clear_init_lock();
@@ -1369,14 +1354,6 @@ class InstanceKlass: public Klass {
void eager_initialize_impl ();
/* jni_id_for_impl for jfieldID only */
JNIid* jni_id_for_impl (int offset);
-protected:
- // Returns the array class for the n'th dimension
- virtual Klass* array_klass_impl(bool or_null, int n, TRAPS);
-
- // Returns the array class with this class as element type
- virtual Klass* array_klass_impl(bool or_null, TRAPS);
-
-private:
// find a local method (returns NULL if not found)
Method* find_method_impl(const Symbol* name,
diff --git a/src/hotspot/share/oops/instanceKlass.inline.hpp b/src/hotspot/share/oops/instanceKlass.inline.hpp
index 1331757a72c..6374ec84f90 100644
--- a/src/hotspot/share/oops/instanceKlass.inline.hpp
+++ b/src/hotspot/share/oops/instanceKlass.inline.hpp
@@ -95,31 +95,8 @@ inline InstanceKlass** InstanceKlass::adr_unsafe_anonymous_host() const {
}
}
-inline address InstanceKlass::adr_fingerprint() const {
- if (has_stored_fingerprint()) {
- InstanceKlass** adr_host = adr_unsafe_anonymous_host();
- if (adr_host != NULL) {
- return (address)(adr_host + 1);
- }
-
- InstanceKlass* volatile* adr_impl = adr_implementor();
- if (adr_impl != NULL) {
- return (address)(adr_impl + 1);
- }
-
- return (address)end_of_nonstatic_oop_maps();
- } else {
- return NULL;
- }
-}
-
inline address InstanceKlass::adr_inline_type_field_klasses() const {
if (has_inline_type_fields()) {
- address adr_fing = adr_fingerprint();
- if (adr_fing != NULL) {
- return adr_fingerprint() + sizeof(u8);
- }
-
InstanceKlass** adr_host = adr_unsafe_anonymous_host();
if (adr_host != NULL) {
return (address)(adr_host + 1);
diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp
index 9397b7c8136..bccf9cdc9ea 100644
--- a/src/hotspot/share/oops/klass.cpp
+++ b/src/hotspot/share/oops/klass.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "jvm_io.h"
+#include "cds/heapShared.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderDataGraph.inline.hpp"
#include "classfile/javaClasses.hpp"
@@ -34,7 +35,6 @@
#include "classfile/vmSymbols.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "logging/log.hpp"
-#include "memory/heapShared.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/oopFactory.hpp"
@@ -644,33 +644,6 @@ void Klass::set_archived_java_mirror(oop m) {
}
#endif // INCLUDE_CDS_JAVA_HEAP
-Klass* Klass::array_klass_or_null(int rank) {
- EXCEPTION_MARK;
- // No exception can be thrown by array_klass_impl when called with or_null == true.
- // (In anycase, the execption mark will fail if it do so)
- return array_klass_impl(true, rank, THREAD);
-}
-
-
-Klass* Klass::array_klass_or_null() {
- EXCEPTION_MARK;
- // No exception can be thrown by array_klass_impl when called with or_null == true.
- // (In anycase, the execption mark will fail if it do so)
- return array_klass_impl(true, THREAD);
-}
-
-
-Klass* Klass::array_klass_impl(bool or_null, int rank, TRAPS) {
- fatal("array_klass should be dispatched to InstanceKlass, ObjArrayKlass or TypeArrayKlass");
- return NULL;
-}
-
-
-Klass* Klass::array_klass_impl(bool or_null, TRAPS) {
- fatal("array_klass should be dispatched to InstanceKlass, ObjArrayKlass or TypeArrayKlass");
- return NULL;
-}
-
void Klass::check_array_allocation_length(int length, int max_length, TRAPS) {
if (length > max_length) {
if (!THREAD->in_retryable_allocation()) {
diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp
index 0ed330c227c..7c789aa0ce2 100644
--- a/src/hotspot/share/oops/klass.hpp
+++ b/src/hotspot/share/oops/klass.hpp
@@ -178,7 +178,8 @@ class Klass : public Metadata {
u2 _shared_class_flags;
enum {
_archived_lambda_proxy_is_available = 2,
- _has_value_based_class_annotation = 4
+ _has_value_based_class_annotation = 4,
+ _is_shared_old_klass = 8
};
#endif
@@ -334,6 +335,14 @@ class Klass : public Metadata {
NOT_CDS(return false;)
}
+ void set_is_shared_old_klass() {
+ CDS_ONLY(_shared_class_flags |= _is_shared_old_klass;)
+ }
+ bool is_shared_old_klass() const {
+ CDS_ONLY(return (_shared_class_flags & _is_shared_old_klass) != 0;)
+ NOT_CDS(return false;)
+ }
+
// Obtain the module or package for this class
virtual ModuleEntry* module() const = 0;
@@ -514,15 +523,14 @@ class Klass : public Metadata {
}
// array class with specific rank
- Klass* array_klass(int rank, TRAPS) { return array_klass_impl(false, rank, THREAD); }
+ virtual Klass* array_klass(int rank, TRAPS) = 0;
// array class with this klass as element type
- Klass* array_klass(TRAPS) { return array_klass_impl(false, THREAD); }
+ virtual Klass* array_klass(TRAPS) = 0;
// These will return NULL instead of allocating on the heap:
- // NB: these can block for a mutex, like other functions with TRAPS arg.
- Klass* array_klass_or_null(int rank);
- Klass* array_klass_or_null();
+ virtual Klass* array_klass_or_null(int rank) = 0;
+ virtual Klass* array_klass_or_null() = 0;
virtual oop protection_domain() const = 0;
@@ -531,8 +539,6 @@ class Klass : public Metadata {
inline oop klass_holder() const;
protected:
- virtual Klass* array_klass_impl(bool or_null, int rank, TRAPS);
- virtual Klass* array_klass_impl(bool or_null, TRAPS);
// Error handling when length > max_length or length < 0
static void check_array_allocation_length(int length, int max_length, TRAPS);
diff --git a/src/hotspot/share/oops/klassVtable.cpp b/src/hotspot/share/oops/klassVtable.cpp
index 0f5d35fa8e8..75c079fde2a 100644
--- a/src/hotspot/share/oops/klassVtable.cpp
+++ b/src/hotspot/share/oops/klassVtable.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
@@ -31,7 +32,6 @@
#include "interpreter/linkResolver.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/instanceKlass.inline.hpp"
@@ -51,7 +51,7 @@ inline InstanceKlass* klassVtable::ik() const {
}
bool klassVtable::is_preinitialized_vtable() {
- return _klass->is_shared() && !MetaspaceShared::remapped_readwrite();
+ return _klass->is_shared() && !MetaspaceShared::remapped_readwrite() && !_klass->is_shared_old_klass();
}
@@ -1093,7 +1093,8 @@ void itableMethodEntry::initialize(Method* m) {
#ifdef ASSERT
if (MetaspaceShared::is_in_shared_metaspace((void*)&_method) &&
- !MetaspaceShared::remapped_readwrite()) {
+ !MetaspaceShared::remapped_readwrite() &&
+ !MetaspaceShared::is_old_class(m->method_holder())) {
// At runtime initialize_itable is rerun as part of link_class_impl()
// for a shared class loaded by the non-boot loader.
// The dumptime itable method entry should be the same as the runtime entry.
diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp
index 59e55dc05c9..1f5ed497efc 100644
--- a/src/hotspot/share/oops/method.cpp
+++ b/src/hotspot/share/oops/method.cpp
@@ -23,6 +23,8 @@
*/
#include "precompiled.hpp"
+#include "cds/cppVtables.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/symbolTable.hpp"
@@ -41,10 +43,8 @@
#include "logging/logTag.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/cppVtables.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@@ -354,11 +354,13 @@ Symbol* Method::klass_name() const {
void Method::metaspace_pointers_do(MetaspaceClosure* it) {
log_trace(cds)("Iter(Method): %p", this);
- it->push(&_constMethod);
+ if (!method_holder()->is_rewritten()) {
+ it->push(&_constMethod, MetaspaceClosure::_writable);
+ } else {
+ it->push(&_constMethod);
+ }
it->push(&_method_data);
it->push(&_method_counters);
-
- Method* this_ptr = this;
}
// Attempt to return method to original state. Clear any pointers
@@ -372,7 +374,7 @@ void Method::remove_unshareable_info() {
}
void Method::set_vtable_index(int index) {
- if (is_shared() && !MetaspaceShared::remapped_readwrite()) {
+ if (is_shared() && !MetaspaceShared::remapped_readwrite() && !method_holder()->is_shared_old_klass()) {
// At runtime initialize_vtable is rerun as part of link_class_impl()
// for a shared class loaded by the non-boot loader to obtain the loader
// constraints based on the runtime classloaders' context.
@@ -383,7 +385,7 @@ void Method::set_vtable_index(int index) {
}
void Method::set_itable_index(int index) {
- if (is_shared() && !MetaspaceShared::remapped_readwrite()) {
+ if (is_shared() && !MetaspaceShared::remapped_readwrite() && !method_holder()->is_shared_old_klass()) {
// At runtime initialize_itable is rerun as part of link_class_impl()
// for a shared class loaded by the non-boot loader to obtain the loader
// constraints based on the runtime classloaders' context. The dumptime
@@ -2293,8 +2295,6 @@ void Method::set_on_stack(const bool value) {
if (value && !already_set) {
MetadataOnStackMark::record(this);
}
- assert(!value || !is_old() || is_obsolete() || is_running_emcp(),
- "emcp methods cannot run after emcp bit is cleared");
}
// Called when the class loader is unloaded to make all methods weak.
diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp
index cf01dca2d45..77b8b509759 100644
--- a/src/hotspot/share/oops/method.hpp
+++ b/src/hotspot/share/oops/method.hpp
@@ -88,13 +88,12 @@ class Method : public Metadata {
_dont_inline = 1 << 2,
_hidden = 1 << 3,
_has_injected_profile = 1 << 4,
- _running_emcp = 1 << 5,
- _intrinsic_candidate = 1 << 6,
- _reserved_stack_access = 1 << 7,
- _scalarized_args = 1 << 8,
- _c1_needs_stack_repair = 1 << 9,
- _c2_needs_stack_repair = 1 << 10,
- _scoped = 1 << 11
+ _intrinsic_candidate = 1 << 5,
+ _reserved_stack_access = 1 << 6,
+ _scalarized_args = 1 << 7,
+ _c1_needs_stack_repair = 1 << 8,
+ _c2_needs_stack_repair = 1 << 9,
+ _scoped = 1 << 10
};
mutable u2 _flags;
@@ -118,10 +117,6 @@ class Method : public Metadata {
CompiledMethod* volatile _code; // Points to the corresponding piece of native code
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
-#if INCLUDE_AOT
- CompiledMethod* _aot_code;
-#endif
-
// Constructor
Method(ConstMethod* xconst, AccessFlags access_flags);
public:
@@ -410,18 +405,6 @@ class Method : public Metadata {
}
}
-#if INCLUDE_AOT
- void set_aot_code(CompiledMethod* aot_code) {
- _aot_code = aot_code;
- }
-
- CompiledMethod* aot_code() const {
- return _aot_code;
- }
-#else
- CompiledMethod* aot_code() const { return NULL; }
-#endif // INCLUDE_AOT
-
int nmethod_age() const {
if (method_counters() == NULL) {
return INT_MAX;
@@ -692,8 +675,6 @@ class Method : public Metadata {
// simultaneously. Use with caution.
bool has_compiled_code() const;
- bool has_aot_code() const { return aot_code() != NULL; }
-
bool needs_clinit_barrier() const;
// sizing
@@ -782,20 +763,6 @@ class Method : public Metadata {
bool is_deleted() const { return access_flags().is_deleted(); }
void set_is_deleted() { _access_flags.set_is_deleted(); }
- bool is_running_emcp() const {
- // EMCP methods are old but not obsolete or deleted. Equivalent
- // Modulo Constant Pool means the method is equivalent except
- // the constant pool and instructions that access the constant
- // pool might be different.
- // If a breakpoint is set in a redefined method, its EMCP methods that are
- // still running must have a breakpoint also.
- return (_flags & _running_emcp) != 0;
- }
-
- void set_running_emcp(bool x) {
- _flags = x ? (_flags | _running_emcp) : (_flags & ~_running_emcp);
- }
-
bool on_stack() const { return access_flags().on_stack(); }
void set_on_stack(const bool value);
diff --git a/src/hotspot/share/oops/methodCounters.cpp b/src/hotspot/share/oops/methodCounters.cpp
index ecb30191acc..67b2ef96060 100644
--- a/src/hotspot/share/oops/methodCounters.cpp
+++ b/src/hotspot/share/oops/methodCounters.cpp
@@ -23,15 +23,11 @@
*/
#include "precompiled.hpp"
#include "compiler/compiler_globals.hpp"
-#include "memory/metaspaceClosure.hpp"
#include "oops/method.hpp"
#include "oops/methodCounters.hpp"
#include "runtime/handles.inline.hpp"
MethodCounters::MethodCounters(const methodHandle& mh) :
-#if INCLUDE_AOT
- _method(mh()),
-#endif
_prev_time(0),
_rate(0),
_nmethod_age(INT_MAX),
@@ -77,13 +73,6 @@ void MethodCounters::clear_counters() {
set_highest_osr_comp_level(0);
}
-void MethodCounters::metaspace_pointers_do(MetaspaceClosure* it) {
- log_trace(cds)("Iter(MethodCounters): %p", this);
-#if INCLUDE_AOT
- it->push(&_method);
-#endif
-}
-
void MethodCounters::print_value_on(outputStream* st) const {
assert(is_methodCounters(), "must be methodCounters");
st->print("method counters");
diff --git a/src/hotspot/share/oops/methodCounters.hpp b/src/hotspot/share/oops/methodCounters.hpp
index ecc70928917..2698ce3d3ce 100644
--- a/src/hotspot/share/oops/methodCounters.hpp
+++ b/src/hotspot/share/oops/methodCounters.hpp
@@ -37,11 +37,6 @@ class MethodCounters : public Metadata {
private:
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequency-based optimizations
- // If you add a new field that points to any metaspace object, you
- // must add this field to MethodCounters::metaspace_pointers_do().
-#if INCLUDE_AOT
- Method* _method; // Back link to Method
-#endif
jlong _prev_time; // Previous time the rate was acquired
float _rate; // Events (invocation and backedge counter increments) per millisecond
int _nmethod_age;
@@ -74,15 +69,12 @@ class MethodCounters : public Metadata {
void deallocate_contents(ClassLoaderData* loader_data) {}
- AOT_ONLY(Method* method() const { return _method; })
-
static int method_counters_size() {
return align_up((int)sizeof(MethodCounters), wordSize) / wordSize;
}
virtual int size() const {
return method_counters_size();
}
- void metaspace_pointers_do(MetaspaceClosure* it);
MetaspaceObj::Type type() const { return MethodCountersType; }
void clear_counters();
diff --git a/src/hotspot/share/oops/objArrayKlass.cpp b/src/hotspot/share/oops/objArrayKlass.cpp
index 96ad8d30b3b..035d89e682e 100644
--- a/src/hotspot/share/oops/objArrayKlass.cpp
+++ b/src/hotspot/share/oops/objArrayKlass.cpp
@@ -321,14 +321,13 @@ void ObjArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
}
-Klass* ObjArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) {
+Klass* ObjArrayKlass::array_klass(int n, TRAPS) {
assert(dimension() <= n, "check order of chain");
int dim = dimension();
if (dim == n) return this;
// lock-free read needs acquire semantics
if (higher_dimension_acquire() == NULL) {
- if (or_null) return NULL;
ResourceMark rm(THREAD);
{
@@ -350,15 +349,31 @@ Klass* ObjArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) {
}
ObjArrayKlass *ak = ObjArrayKlass::cast(higher_dimension());
- if (or_null) {
- return ak->array_klass_or_null(n);
- }
- THREAD->check_possible_safepoint();
+ THREAD->as_Java_thread()->check_possible_safepoint();
return ak->array_klass(n, THREAD);
}
-Klass* ObjArrayKlass::array_klass_impl(bool or_null, TRAPS) {
- return array_klass_impl(or_null, dimension() + 1, THREAD);
+Klass* ObjArrayKlass::array_klass_or_null(int n) {
+
+ assert(dimension() <= n, "check order of chain");
+ int dim = dimension();
+ if (dim == n) return this;
+
+ // lock-free read needs acquire semantics
+ if (higher_dimension_acquire() == NULL) {
+ return NULL;
+ }
+
+ ObjArrayKlass *ak = ObjArrayKlass::cast(higher_dimension());
+ return ak->array_klass_or_null(n);
+}
+
+Klass* ObjArrayKlass::array_klass(TRAPS) {
+ return array_klass(dimension() + 1, THREAD);
+}
+
+Klass* ObjArrayKlass::array_klass_or_null() {
+ return array_klass_or_null(dimension() + 1);
}
bool ObjArrayKlass::can_be_primary_super_slow() const {
diff --git a/src/hotspot/share/oops/objArrayKlass.hpp b/src/hotspot/share/oops/objArrayKlass.hpp
index 7a9057bcf7d..529ca4b866a 100644
--- a/src/hotspot/share/oops/objArrayKlass.hpp
+++ b/src/hotspot/share/oops/objArrayKlass.hpp
@@ -86,14 +86,14 @@ class ObjArrayKlass : public ArrayKlass {
void do_copy(arrayOop s, size_t src_offset,
arrayOop d, size_t dst_offset,
int length, TRAPS);
- protected:
+ public:
// Returns the ObjArrayKlass for n'th dimension.
- virtual Klass* array_klass_impl(bool or_null, int n, TRAPS);
+ virtual Klass* array_klass(int n, TRAPS);
+ virtual Klass* array_klass_or_null(int n);
// Returns the array class with this class as element type.
- virtual Klass* array_klass_impl(bool or_null, TRAPS);
-
- public:
+ virtual Klass* array_klass(TRAPS);
+ virtual Klass* array_klass_or_null();
static ObjArrayKlass* cast(Klass* k) {
return const_cast(cast(const_cast(k)));
diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp
index 4412bc2461a..3337b04fe49 100644
--- a/src/hotspot/share/oops/oop.cpp
+++ b/src/hotspot/share/oops/oop.cpp
@@ -23,9 +23,9 @@
*/
#include "precompiled.hpp"
+#include "cds/heapShared.inline.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/javaClasses.inline.hpp"
-#include "memory/heapShared.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp
index 0a951955764..84f36f28105 100644
--- a/src/hotspot/share/oops/oop.inline.hpp
+++ b/src/hotspot/share/oops/oop.inline.hpp
@@ -34,7 +34,6 @@
#include "oops/oop.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
#include "runtime/globals.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
diff --git a/src/hotspot/share/oops/symbol.cpp b/src/hotspot/share/oops/symbol.cpp
index ce3ba0e4fc5..a8a312bda5e 100644
--- a/src/hotspot/share/oops/symbol.cpp
+++ b/src/hotspot/share/oops/symbol.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/vmSymbols.hpp"
@@ -31,7 +32,6 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/symbol.hpp"
diff --git a/src/hotspot/share/oops/typeArrayKlass.cpp b/src/hotspot/share/oops/typeArrayKlass.cpp
index 7fa3ae9a093..80209b5ed8e 100644
--- a/src/hotspot/share/oops/typeArrayKlass.cpp
+++ b/src/hotspot/share/oops/typeArrayKlass.cpp
@@ -170,7 +170,7 @@ void TypeArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos
}
// create a klass of array holding typeArrays
-Klass* TypeArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) {
+Klass* TypeArrayKlass::array_klass(int n, TRAPS) {
int dim = dimension();
assert(dim <= n, "check order of chain");
if (dim == n)
@@ -178,7 +178,6 @@ Klass* TypeArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) {
// lock-free read needs acquire semantics
if (higher_dimension_acquire() == NULL) {
- if (or_null) return NULL;
ResourceMark rm;
JavaThread *jt = THREAD->as_Java_thread();
@@ -199,15 +198,32 @@ Klass* TypeArrayKlass::array_klass_impl(bool or_null, int n, TRAPS) {
}
ObjArrayKlass* h_ak = ObjArrayKlass::cast(higher_dimension());
- if (or_null) {
- return h_ak->array_klass_or_null(n);
- }
- THREAD->check_possible_safepoint();
+ THREAD->as_Java_thread()->check_possible_safepoint();
return h_ak->array_klass(n, THREAD);
}
-Klass* TypeArrayKlass::array_klass_impl(bool or_null, TRAPS) {
- return array_klass_impl(or_null, dimension() + 1, THREAD);
+// return existing klass of array holding typeArrays
+Klass* TypeArrayKlass::array_klass_or_null(int n) {
+ int dim = dimension();
+ assert(dim <= n, "check order of chain");
+ if (dim == n)
+ return this;
+
+ // lock-free read needs acquire semantics
+ if (higher_dimension_acquire() == NULL) {
+ return NULL;
+ }
+
+ ObjArrayKlass* h_ak = ObjArrayKlass::cast(higher_dimension());
+ return h_ak->array_klass_or_null(n);
+}
+
+Klass* TypeArrayKlass::array_klass(TRAPS) {
+ return array_klass(dimension() + 1, THREAD);
+}
+
+Klass* TypeArrayKlass::array_klass_or_null() {
+ return array_klass_or_null(dimension() + 1);
}
int TypeArrayKlass::oop_size(oop obj) const {
diff --git a/src/hotspot/share/oops/typeArrayKlass.hpp b/src/hotspot/share/oops/typeArrayKlass.hpp
index 0c57c8a375f..1d6ed457c41 100644
--- a/src/hotspot/share/oops/typeArrayKlass.hpp
+++ b/src/hotspot/share/oops/typeArrayKlass.hpp
@@ -93,14 +93,15 @@ class TypeArrayKlass : public ArrayKlass {
template
inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
- protected:
+ public:
// Find n'th dimensional array
- virtual Klass* array_klass_impl(bool or_null, int n, TRAPS);
+ virtual Klass* array_klass(int n, TRAPS);
+ virtual Klass* array_klass_or_null(int n);
// Returns the array class with this class as element type
- virtual Klass* array_klass_impl(bool or_null, TRAPS);
+ virtual Klass* array_klass(TRAPS);
+ virtual Klass* array_klass_or_null();
- public:
static TypeArrayKlass* cast(Klass* k) {
return const_cast(cast(const_cast(k)));
}
diff --git a/src/hotspot/share/opto/bytecodeInfo.cpp b/src/hotspot/share/opto/bytecodeInfo.cpp
index df0ed0ae5d7..a6553923dc0 100644
--- a/src/hotspot/share/opto/bytecodeInfo.cpp
+++ b/src/hotspot/share/opto/bytecodeInfo.cpp
@@ -112,24 +112,18 @@ static bool is_unboxing_method(ciMethod* callee_method, Compile* C) {
// positive filter: should callee be inlined?
bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
- int caller_bci, ciCallProfile& profile,
- WarmCallInfo* wci_result) {
+ int caller_bci, ciCallProfile& profile) {
// Allows targeted inlining
if (C->directive()->should_inline(callee_method)) {
- *wci_result = *(WarmCallInfo::always_hot());
- if (C->print_inlining() && Verbose) {
- CompileTask::print_inline_indent(inline_level());
- tty->print_cr("Inlined method is hot: ");
- }
set_msg("force inline by CompileCommand");
_forced_inline = true;
return true;
}
if (callee_method->force_inline()) {
- set_msg("force inline by annotation");
- _forced_inline = true;
- return true;
+ set_msg("force inline by annotation");
+ _forced_inline = true;
+ return true;
}
#ifndef PRODUCT
@@ -146,7 +140,6 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
// Check for too many throws (and not too huge)
if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
size < InlineThrowMaxSize ) {
- wci_result->set_profit(wci_result->profit() * 100);
if (C->print_inlining() && Verbose) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
@@ -202,8 +195,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
// negative filter: should callee NOT be inlined?
bool InlineTree::should_not_inline(ciMethod *callee_method,
ciMethod* caller_method,
- JVMState* jvms,
- WarmCallInfo* wci_result) {
+ JVMState* jvms) {
const char* fail_msg = NULL;
@@ -361,7 +353,7 @@ bool InlineTree::is_not_reached(ciMethod* callee_method, ciMethod* caller_method
// Relocated from "InliningClosure::try_to_inline"
bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
int caller_bci, JVMState* jvms, ciCallProfile& profile,
- WarmCallInfo* wci_result, bool& should_delay) {
+ bool& should_delay) {
if (ClipInlining && (int)count_inline_bcs() >= DesiredMethodLimit) {
if (!callee_method->force_inline() || !IncrementalInline) {
@@ -373,11 +365,10 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
}
_forced_inline = false; // Reset
- if (!should_inline(callee_method, caller_method, caller_bci, profile,
- wci_result)) {
+ if (!should_inline(callee_method, caller_method, caller_bci, profile)) {
return false;
}
- if (should_not_inline(callee_method, caller_method, jvms, wci_result)) {
+ if (should_not_inline(callee_method, caller_method, jvms)) {
return false;
}
@@ -560,7 +551,8 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
}
//------------------------------ok_to_inline-----------------------------------
-WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* initial_wci, bool& should_delay) {
+bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile,
+ bool& should_delay) {
assert(callee_method != NULL, "caller checks for optimized virtual!");
assert(!should_delay, "should be initialized to false");
#ifdef ASSERT
@@ -580,68 +572,35 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
set_msg("failed initial checks");
print_inlining(callee_method, caller_bci, caller_method, false /* !success */);
- return NULL;
+ return false;
}
// Do some parse checks.
set_msg(check_can_parse(callee_method));
if (msg() != NULL) {
print_inlining(callee_method, caller_bci, caller_method, false /* !success */);
- return NULL;
+ return false;
}
// Check if inlining policy says no.
- WarmCallInfo wci = *(initial_wci);
- bool success = try_to_inline(callee_method, caller_method, caller_bci,
- jvms, profile, &wci, should_delay);
-
-#ifndef PRODUCT
- if (InlineWarmCalls && (PrintOpto || C->print_inlining())) {
- bool cold = wci.is_cold();
- bool hot = !cold && wci.is_hot();
- bool old_cold = !success;
- if (old_cold != cold || (Verbose || WizardMode)) {
- if (msg() == NULL) {
- set_msg("OK");
- }
- tty->print(" OldInlining= %4s : %s\n WCI=",
- old_cold ? "cold" : "hot", msg());
- wci.print();
- }
- }
-#endif
+ bool success = try_to_inline(callee_method, caller_method, caller_bci, jvms, profile,
+ should_delay); // out
if (success) {
- wci = *(WarmCallInfo::always_hot());
- } else {
- wci = *(WarmCallInfo::always_cold());
- }
-
- if (!InlineWarmCalls) {
- if (!wci.is_cold() && !wci.is_hot()) {
- // Do not inline the warm calls.
- wci = *(WarmCallInfo::always_cold());
- }
- }
-
- if (!wci.is_cold()) {
// Inline!
if (msg() == NULL) {
set_msg("inline (hot)");
}
print_inlining(callee_method, caller_bci, caller_method, true /* success */);
build_inline_tree_for_callee(callee_method, jvms, caller_bci);
- if (InlineWarmCalls && !wci.is_hot()) {
- return new (C) WarmCallInfo(wci); // copy to heap
+ return true;
+ } else {
+ // Do not inline
+ if (msg() == NULL) {
+ set_msg("too cold to inline");
}
- return WarmCallInfo::always_hot();
- }
-
- // Do not inline
- if (msg() == NULL) {
- set_msg("too cold to inline");
+ print_inlining(callee_method, caller_bci, caller_method, false /* !success */ );
+ return false;
}
- print_inlining(callee_method, caller_bci, caller_method, false /* !success */ );
- return NULL;
}
//------------------------------build_inline_tree_for_callee-------------------
diff --git a/src/hotspot/share/opto/c2_globals.hpp b/src/hotspot/share/opto/c2_globals.hpp
index 7e65dc64fd2..20370cbc61a 100644
--- a/src/hotspot/share/opto/c2_globals.hpp
+++ b/src/hotspot/share/opto/c2_globals.hpp
@@ -419,46 +419,6 @@
"If parser node generation exceeds limit stop inlining") \
range(0, max_jint) \
\
- develop(intx, NodeCountInliningStep, 1000, \
- "Target size of warm calls inlined between optimization passes") \
- range(0, max_jint) \
- \
- develop(bool, InlineWarmCalls, false, \
- "Use a heat-based priority queue to govern inlining") \
- \
- /* Max values must not exceed WarmCallInfo::MAX_VALUE(). */ \
- develop(intx, HotCallCountThreshold, 999999, \
- "large numbers of calls (per method invocation) force hotness") \
- range(0, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
- \
- develop(intx, HotCallProfitThreshold, 999999, \
- "highly profitable inlining opportunities force hotness") \
- range(0, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
- \
- develop(intx, HotCallTrivialWork, -1, \
- "trivial execution time (no larger than this) forces hotness") \
- range(-1, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
- \
- develop(intx, HotCallTrivialSize, -1, \
- "trivial methods (no larger than this) force calls to be hot") \
- range(-1, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
- \
- develop(intx, WarmCallMinCount, -1, \
- "number of calls (per method invocation) to enable inlining") \
- range(-1, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
- \
- develop(intx, WarmCallMinProfit, -1, \
- "number of calls (per method invocation) to enable inlining") \
- range(-1, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
- \
- develop(intx, WarmCallMaxWork, 999999, \
- "execution time of the largest inlinable method") \
- range(0, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
- \
- develop(intx, WarmCallMaxSize, 999999, \
- "size of the largest inlinable method") \
- range(0, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
- \
product(intx, MaxNodeLimit, 80000, \
"Maximum number of nodes") \
range(1000, max_jint / 3) \
diff --git a/src/hotspot/share/opto/c2compiler.cpp b/src/hotspot/share/opto/c2compiler.cpp
index a98c82a3eb2..6d1a50f2a86 100644
--- a/src/hotspot/share/opto/c2compiler.cpp
+++ b/src/hotspot/share/opto/c2compiler.cpp
@@ -76,7 +76,7 @@ bool C2Compiler::init_c2_runtime() {
}
void C2Compiler::initialize() {
- assert(!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci(), "C2 compiler is launched, it's not c1/interpreter only mode");
+ assert(!CompilerConfig::is_c1_or_interpreter_only_no_jvmci(), "C2 compiler is launched, it's not c1/interpreter only mode");
// The first compiler thread that gets here will initialize the
// small amount of global state (and runtime stubs) that C2 needs.
@@ -681,6 +681,8 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
case vmIntrinsics::_VectorInsert:
case vmIntrinsics::_VectorExtract:
return EnableVectorSupport;
+ case vmIntrinsics::_blackhole:
+ break;
default:
return false;
diff --git a/src/hotspot/share/opto/callGenerator.cpp b/src/hotspot/share/opto/callGenerator.cpp
index 1227e2a0ac6..951ca19612b 100644
--- a/src/hotspot/share/opto/callGenerator.cpp
+++ b/src/hotspot/share/opto/callGenerator.cpp
@@ -947,81 +947,6 @@ class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
}
-//---------------------------WarmCallGenerator--------------------------------
-// Internal class which handles initial deferral of inlining decisions.
-class WarmCallGenerator : public CallGenerator {
- WarmCallInfo* _call_info;
- CallGenerator* _if_cold;
- CallGenerator* _if_hot;
- bool _is_virtual; // caches virtuality of if_cold
- bool _is_inline; // caches inline-ness of if_hot
-
-public:
- WarmCallGenerator(WarmCallInfo* ci,
- CallGenerator* if_cold,
- CallGenerator* if_hot)
- : CallGenerator(if_cold->method())
- {
- assert(method() == if_hot->method(), "consistent choices");
- _call_info = ci;
- _if_cold = if_cold;
- _if_hot = if_hot;
- _is_virtual = if_cold->is_virtual();
- _is_inline = if_hot->is_inline();
- }
-
- virtual bool is_inline() const { return _is_inline; }
- virtual bool is_virtual() const { return _is_virtual; }
- virtual bool is_deferred() const { return true; }
-
- virtual JVMState* generate(JVMState* jvms);
-};
-
-
-CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
- CallGenerator* if_cold,
- CallGenerator* if_hot) {
- return new WarmCallGenerator(ci, if_cold, if_hot);
-}
-
-JVMState* WarmCallGenerator::generate(JVMState* jvms) {
- Compile* C = Compile::current();
- C->print_inlining_update(this);
-
- if (C->log() != NULL) {
- C->log()->elem("warm_call bci='%d'", jvms->bci());
- }
- jvms = _if_cold->generate(jvms);
- if (jvms != NULL) {
- Node* m = jvms->map()->control();
- if (m->is_CatchProj()) m = m->in(0); else m = C->top();
- if (m->is_Catch()) m = m->in(0); else m = C->top();
- if (m->is_Proj()) m = m->in(0); else m = C->top();
- if (m->is_CallJava()) {
- _call_info->set_call(m->as_Call());
- _call_info->set_hot_cg(_if_hot);
-#ifndef PRODUCT
- if (PrintOpto || PrintOptoInlining) {
- tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
- tty->print("WCI: ");
- _call_info->print();
- }
-#endif
- _call_info->set_heat(_call_info->compute_heat());
- C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
- }
- }
- return jvms;
-}
-
-void WarmCallInfo::make_hot() {
- Unimplemented();
-}
-
-void WarmCallInfo::make_cold() {
- // No action: Just dequeue.
-}
-
//------------------------PredictedCallGenerator------------------------------
// Internal class which handles all out-of-line calls checking receiver type.
@@ -1686,158 +1611,3 @@ JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
// (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
// (Node: Merged hook_up_exits into ParseGenerator::generate.)
-
-#define NODES_OVERHEAD_PER_METHOD (30.0)
-#define NODES_PER_BYTECODE (9.5)
-
-void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
- int call_count = profile.count();
- int code_size = call_method->code_size();
-
- // Expected execution count is based on the historical count:
- _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
-
- // Expected profit from inlining, in units of simple call-overheads.
- _profit = 1.0;
-
- // Expected work performed by the call in units of call-overheads.
- // %%% need an empirical curve fit for "work" (time in call)
- float bytecodes_per_call = 3;
- _work = 1.0 + code_size / bytecodes_per_call;
-
- // Expected size of compilation graph:
- // -XX:+PrintParseStatistics once reported:
- // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
- // Histogram of 144298 parsed bytecodes:
- // %%% Need an better predictor for graph size.
- _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
-}
-
-// is_cold: Return true if the node should never be inlined.
-// This is true if any of the key metrics are extreme.
-bool WarmCallInfo::is_cold() const {
- if (count() < WarmCallMinCount) return true;
- if (profit() < WarmCallMinProfit) return true;
- if (work() > WarmCallMaxWork) return true;
- if (size() > WarmCallMaxSize) return true;
- return false;
-}
-
-// is_hot: Return true if the node should be inlined immediately.
-// This is true if any of the key metrics are extreme.
-bool WarmCallInfo::is_hot() const {
- assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
- if (count() >= HotCallCountThreshold) return true;
- if (profit() >= HotCallProfitThreshold) return true;
- if (work() <= HotCallTrivialWork) return true;
- if (size() <= HotCallTrivialSize) return true;
- return false;
-}
-
-// compute_heat:
-float WarmCallInfo::compute_heat() const {
- assert(!is_cold(), "compute heat only on warm nodes");
- assert(!is_hot(), "compute heat only on warm nodes");
- int min_size = MAX2(0, (int)HotCallTrivialSize);
- int max_size = MIN2(500, (int)WarmCallMaxSize);
- float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
- float size_factor;
- if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
- else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
- else if (method_size < 0.5) size_factor = 1; // better than avg.
- else size_factor = 0.5; // worse than avg.
- return (count() * profit() * size_factor);
-}
-
-bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
- assert(this != that, "compare only different WCIs");
- assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
- if (this->heat() > that->heat()) return true;
- if (this->heat() < that->heat()) return false;
- assert(this->heat() == that->heat(), "no NaN heat allowed");
- // Equal heat. Break the tie some other way.
- if (!this->call() || !that->call()) return (address)this > (address)that;
- return this->call()->_idx > that->call()->_idx;
-}
-
-//#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
-#define UNINIT_NEXT ((WarmCallInfo*)NULL)
-
-WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
- assert(next() == UNINIT_NEXT, "not yet on any list");
- WarmCallInfo* prev_p = NULL;
- WarmCallInfo* next_p = head;
- while (next_p != NULL && next_p->warmer_than(this)) {
- prev_p = next_p;
- next_p = prev_p->next();
- }
- // Install this between prev_p and next_p.
- this->set_next(next_p);
- if (prev_p == NULL)
- head = this;
- else
- prev_p->set_next(this);
- return head;
-}
-
-WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
- WarmCallInfo* prev_p = NULL;
- WarmCallInfo* next_p = head;
- while (next_p != this) {
- assert(next_p != NULL, "this must be in the list somewhere");
- prev_p = next_p;
- next_p = prev_p->next();
- }
- next_p = this->next();
- debug_only(this->set_next(UNINIT_NEXT));
- // Remove this from between prev_p and next_p.
- if (prev_p == NULL)
- head = next_p;
- else
- prev_p->set_next(next_p);
- return head;
-}
-
-WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
- WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
-WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
- WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
-
-WarmCallInfo* WarmCallInfo::always_hot() {
- assert(_always_hot.is_hot(), "must always be hot");
- return &_always_hot;
-}
-
-WarmCallInfo* WarmCallInfo::always_cold() {
- assert(_always_cold.is_cold(), "must always be cold");
- return &_always_cold;
-}
-
-
-#ifndef PRODUCT
-
-void WarmCallInfo::print() const {
- tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
- is_cold() ? "cold" : is_hot() ? "hot " : "warm",
- count(), profit(), work(), size(), compute_heat(), next());
- tty->cr();
- if (call() != NULL) call()->dump();
-}
-
-void print_wci(WarmCallInfo* ci) {
- ci->print();
-}
-
-void WarmCallInfo::print_all() const {
- for (const WarmCallInfo* p = this; p != NULL; p = p->next())
- p->print();
-}
-
-int WarmCallInfo::count_all() const {
- int cnt = 0;
- for (const WarmCallInfo* p = this; p != NULL; p = p->next())
- cnt++;
- return cnt;
-}
-
-#endif //PRODUCT
diff --git a/src/hotspot/share/opto/callGenerator.hpp b/src/hotspot/share/opto/callGenerator.hpp
index 84485fb0bd7..2b14e709edb 100644
--- a/src/hotspot/share/opto/callGenerator.hpp
+++ b/src/hotspot/share/opto/callGenerator.hpp
@@ -143,12 +143,6 @@ class CallGenerator : public ResourceObj {
static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg);
static CallGenerator* for_boxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
-
- // How to make a call but defer the decision whether to inline or not.
- static CallGenerator* for_warm_call(WarmCallInfo* ci,
- CallGenerator* if_cold,
- CallGenerator* if_hot);
-
static CallGenerator* for_late_inline_virtual(ciMethod* m, int vtable_index, float expected_uses);
// How to make a call that optimistically assumes a receiver type:
@@ -207,158 +201,4 @@ class InlineCallGenerator : public CallGenerator {
virtual bool is_inline() const { return true; }
};
-
-//---------------------------WarmCallInfo--------------------------------------
-// A struct to collect information about a given call site.
-// Helps sort call sites into "hot", "medium", and "cold".
-// Participates in the queueing of "medium" call sites for possible inlining.
-class WarmCallInfo : public ResourceObj {
- private:
-
- CallNode* _call; // The CallNode which may be inlined.
- CallGenerator* _hot_cg;// CG for expanding the call node
-
- // These are the metrics we use to evaluate call sites:
-
- float _count; // How often do we expect to reach this site?
- float _profit; // How much time do we expect to save by inlining?
- float _work; // How long do we expect the average call to take?
- float _size; // How big do we expect the inlined code to be?
-
- float _heat; // Combined score inducing total order on call sites.
- WarmCallInfo* _next; // Next cooler call info in pending queue.
-
- // Count is the number of times this call site is expected to be executed.
- // Large count is favorable for inlining, because the extra compilation
- // work will be amortized more completely.
-
- // Profit is a rough measure of the amount of time we expect to save
- // per execution of this site if we inline it. (1.0 == call overhead)
- // Large profit favors inlining. Negative profit disables inlining.
-
- // Work is a rough measure of the amount of time a typical out-of-line
- // call from this site is expected to take. (1.0 == call, no-op, return)
- // Small work is somewhat favorable for inlining, since methods with
- // short "hot" traces are more likely to inline smoothly.
-
- // Size is the number of graph nodes we expect this method to produce,
- // not counting the inlining of any further warm calls it may include.
- // Small size favors inlining, since small methods are more likely to
- // inline smoothly. The size is estimated by examining the native code
- // if available. The method bytecodes are also examined, assuming
- // empirically observed node counts for each kind of bytecode.
-
- // Heat is the combined "goodness" of a site's inlining. If we were
- // omniscient, it would be the difference of two sums of future execution
- // times of code emitted for this site (amortized across multiple sites if
- // sharing applies). The two sums are for versions of this call site with
- // and without inlining.
-
- // We approximate this mythical quantity by playing with averages,
- // rough estimates, and assumptions that history repeats itself.
- // The basic formula count * profit is heuristically adjusted
- // by looking at the expected compilation and execution times of
- // of the inlined call.
-
- // Note: Some of these metrics may not be present in the final product,
- // but exist in development builds to experiment with inline policy tuning.
-
- // This heuristic framework does not model well the very significant
- // effects of multiple-level inlining. It is possible to see no immediate
- // profit from inlining X->Y, but to get great profit from a subsequent
- // inlining X->Y->Z.
-
- // This framework does not take well into account the problem of N**2 code
- // size in a clique of mutually inlinable methods.
-
- WarmCallInfo* next() const { return _next; }
- void set_next(WarmCallInfo* n) { _next = n; }
-
- static WarmCallInfo _always_hot;
- static WarmCallInfo _always_cold;
-
- // Constructor intitialization of always_hot and always_cold
- WarmCallInfo(float c, float p, float w, float s) {
- _call = NULL;
- _hot_cg = NULL;
- _next = NULL;
- _count = c;
- _profit = p;
- _work = w;
- _size = s;
- _heat = 0;
- }
-
- public:
- // Because WarmInfo objects live over the entire lifetime of the
- // Compile object, they are allocated into the comp_arena, which
- // does not get resource marked or reset during the compile process
- void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
- void operator delete( void * ) { } // fast deallocation
-
- static WarmCallInfo* always_hot();
- static WarmCallInfo* always_cold();
-
- WarmCallInfo() {
- _call = NULL;
- _hot_cg = NULL;
- _next = NULL;
- _count = _profit = _work = _size = _heat = 0;
- }
-
- CallNode* call() const { return _call; }
- float count() const { return _count; }
- float size() const { return _size; }
- float work() const { return _work; }
- float profit() const { return _profit; }
- float heat() const { return _heat; }
-
- void set_count(float x) { _count = x; }
- void set_size(float x) { _size = x; }
- void set_work(float x) { _work = x; }
- void set_profit(float x) { _profit = x; }
- void set_heat(float x) { _heat = x; }
-
- // Load initial heuristics from profiles, etc.
- // The heuristics can be tweaked further by the caller.
- void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor);
-
- static float MAX_VALUE() { return +1.0e10; }
- static float MIN_VALUE() { return -1.0e10; }
-
- float compute_heat() const;
-
- void set_call(CallNode* call) { _call = call; }
- void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; }
-
- // Do not queue very hot or very cold calls.
- // Make very cold ones out of line immediately.
- // Inline very hot ones immediately.
- // These queries apply various tunable limits
- // to the above metrics in a systematic way.
- // Test for coldness before testing for hotness.
- bool is_cold() const;
- bool is_hot() const;
-
- // Force a warm call to be hot. This worklists the call node for inlining.
- void make_hot();
-
- // Force a warm call to be cold. This worklists the call node for out-of-lining.
- void make_cold();
-
- // A reproducible total ordering, in which heat is the major key.
- bool warmer_than(WarmCallInfo* that);
-
- // List management. These methods are called with the list head,
- // and return the new list head, inserting or removing the receiver.
- WarmCallInfo* insert_into(WarmCallInfo* head);
- WarmCallInfo* remove_from(WarmCallInfo* head);
-
-#ifndef PRODUCT
- void print() const;
- void print_all() const;
- int count_all() const;
-#endif
-};
-
#endif // SHARE_OPTO_CALLGENERATOR_HPP
diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp
index aa1664e8e41..1e1a9ab77d1 100644
--- a/src/hotspot/share/opto/classes.hpp
+++ b/src/hotspot/share/opto/classes.hpp
@@ -44,6 +44,7 @@ macro(ArrayCopy)
macro(AryEq)
macro(AtanD)
macro(Binary)
+macro(Blackhole)
macro(Bool)
macro(BoxLock)
macro(ReverseBytesI)
diff --git a/src/hotspot/share/opto/coalesce.cpp b/src/hotspot/share/opto/coalesce.cpp
index 183e653a9e9..3fa627d42cb 100644
--- a/src/hotspot/share/opto/coalesce.cpp
+++ b/src/hotspot/share/opto/coalesce.cpp
@@ -535,7 +535,7 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
// Factored code from copy_copy that computes extra interferences from
// lengthening a live range by double-coalescing.
-uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint reg_degree, uint rm_size, uint lr1, uint lr2 ) {
+uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint rm_size, uint reg_degree, uint lr1, uint lr2 ) {
assert(!lrgs(lr1)._fat_proj, "cannot coalesce fat_proj");
assert(!lrgs(lr2)._fat_proj, "cannot coalesce fat_proj");
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index afc113859ab..f72fcf9005c 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -288,13 +288,6 @@ void Compile::gvn_replace_by(Node* n, Node* nn) {
}
-static inline bool not_a_node(const Node* n) {
- if (n == NULL) return true;
- if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
- if (*(address*)n == badAddress) return true; // kill by Node::destruct
- return false;
-}
-
// Identify all nodes that are reachable from below, useful.
// Use breadth-first pass that records state in a Unique_Node_List,
// recursive traversal is slower.
@@ -438,7 +431,9 @@ void Compile::remove_useless_nodes(Unique_Node_List &useful) {
}
}
- remove_useless_nodes(_macro_nodes, useful); // remove useless macro and predicate opaq nodes
+ remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
+ remove_useless_nodes(_predicate_opaqs, useful); // remove useless predicate opaque nodes
+ remove_useless_nodes(_skeleton_predicate_opaqs, useful);
remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
remove_useless_nodes(_inline_type_nodes, useful); // remove useless inline type nodes
@@ -558,6 +553,7 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
_do_cleanup(false),
_has_reserved_stack_access(target->has_reserved_stack_access()),
#ifndef PRODUCT
+ _igv_idx(0),
_trace_opto_output(directive->TraceOptoOutputOption),
_print_ideal(directive->PrintIdealOption),
#endif
@@ -587,7 +583,6 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
_Compile_types(mtCompiler),
_initial_gvn(NULL),
_for_igvn(NULL),
- _warm_calls(NULL),
_late_inlines(comp_arena(), 2, 0, NULL),
_string_late_inlines(comp_arena(), 2, 0, NULL),
_boxing_late_inlines(comp_arena(), 2, 0, NULL),
@@ -758,14 +753,6 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
// clone(), or the like.
set_default_node_notes(NULL);
- for (;;) {
- int successes = Inline_Warm();
- if (failing()) return;
- if (successes == 0) break;
- }
-
- // Drain the list.
- Finish_Warm();
#ifndef PRODUCT
if (should_print(1)) {
_printer->print_inlining();
@@ -867,6 +854,7 @@ Compile::Compile( ciEnv* ci_env,
_inlining_incrementally(false),
_has_reserved_stack_access(false),
#ifndef PRODUCT
+ _igv_idx(0),
_trace_opto_output(directive->TraceOptoOutputOption),
_print_ideal(directive->PrintIdealOption),
#endif
@@ -889,7 +877,6 @@ Compile::Compile( ciEnv* ci_env,
_Compile_types(mtCompiler),
_initial_gvn(NULL),
_for_igvn(NULL),
- _warm_calls(NULL),
_number_of_mh_late_inlines(0),
_native_invokers(),
_print_inlining_stream(NULL),
@@ -1812,57 +1799,6 @@ bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
return adr_idx == alias_idx;
}
-
-
-//---------------------------pop_warm_call-------------------------------------
-WarmCallInfo* Compile::pop_warm_call() {
- WarmCallInfo* wci = _warm_calls;
- if (wci != NULL) _warm_calls = wci->remove_from(wci);
- return wci;
-}
-
-//----------------------------Inline_Warm--------------------------------------
-int Compile::Inline_Warm() {
- // If there is room, try to inline some more warm call sites.
- // %%% Do a graph index compaction pass when we think we're out of space?
- if (!InlineWarmCalls) return 0;
-
- int calls_made_hot = 0;
- int room_to_grow = NodeCountInliningCutoff - unique();
- int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep);
- int amount_grown = 0;
- WarmCallInfo* call;
- while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) {
- int est_size = (int)call->size();
- if (est_size > (room_to_grow - amount_grown)) {
- // This one won't fit anyway. Get rid of it.
- call->make_cold();
- continue;
- }
- call->make_hot();
- calls_made_hot++;
- amount_grown += est_size;
- amount_to_grow -= est_size;
- }
-
- if (calls_made_hot > 0) set_major_progress();
- return calls_made_hot;
-}
-
-
-//----------------------------Finish_Warm--------------------------------------
-void Compile::Finish_Warm() {
- if (!InlineWarmCalls) return;
- if (failing()) return;
- if (warm_calls() == NULL) return;
-
- // Clean up loose ends, if we are out of space for inlining.
- WarmCallInfo* call;
- while ((call = pop_warm_call()) != NULL) {
- call->make_cold();
- }
-}
-
//---------------------cleanup_loop_predicates-----------------------
// Remove the opaque nodes that protect the predicates so that all unused
// checks and uncommon_traps will be eliminated from the ideal graph
@@ -4043,6 +3979,8 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
}
break;
}
+ case Op_Blackhole:
+ break;
case Op_RangeCheck: {
RangeCheckNode* rc = n->as_RangeCheck();
Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
diff --git a/src/hotspot/share/opto/compile.hpp b/src/hotspot/share/opto/compile.hpp
index eb156c86450..7cd3080ef41 100644
--- a/src/hotspot/share/opto/compile.hpp
+++ b/src/hotspot/share/opto/compile.hpp
@@ -90,7 +90,6 @@ class TypeVect;
class Unique_Node_List;
class InlineTypeBaseNode;
class nmethod;
-class WarmCallInfo;
class Node_Stack;
struct Final_Reshape_Counts;
@@ -295,6 +294,7 @@ class Compile : public Phase {
bool _print_inlining; // True if we should print inlining for this compilation
bool _print_intrinsics; // True if we should print intrinsics for this compilation
#ifndef PRODUCT
+ uint _igv_idx; // Counter for IGV node identifiers
bool _trace_opto_output;
bool _print_ideal;
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
@@ -383,7 +383,6 @@ class Compile : public Phase {
// Parsing, optimization
PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
- WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining.
GrowableArray _late_inlines; // List of CallGenerators to be revisited after main parsing has finished.
GrowableArray _string_late_inlines; // same but for string operations
@@ -616,6 +615,7 @@ class Compile : public Phase {
}
#ifndef PRODUCT
+ uint next_igv_idx() { return _igv_idx++; }
bool trace_opto_output() const { return _trace_opto_output; }
bool print_ideal() const { return _print_ideal; }
bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
@@ -947,10 +947,6 @@ class Compile : public Phase {
void remove_useless_node(Node* dead);
- WarmCallInfo* warm_calls() const { return _warm_calls; }
- void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
- WarmCallInfo* pop_warm_call();
-
// Record this CallGenerator for inlining at the end of parsing.
void add_late_inline(CallGenerator* cg) {
_late_inlines.insert_before(_late_inlines_pos, cg);
diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp
index a0989850b46..95ee08c926f 100644
--- a/src/hotspot/share/opto/doCall.cpp
+++ b/src/hotspot/share/opto/doCall.cpp
@@ -170,17 +170,10 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// Try inlining a bytecoded method:
if (!call_does_dispatch) {
InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
- WarmCallInfo scratch_ci;
bool should_delay = false;
- WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
- assert(ci != &scratch_ci, "do not let this pointer escape");
- bool allow_inline = (ci != NULL && !ci->is_cold());
- bool require_inline = (allow_inline && ci->is_hot());
-
- if (allow_inline) {
+ if (ilt->ok_to_inline(callee, jvms, profile, should_delay)) {
CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
-
- if (require_inline && cg != NULL) {
+ if (cg != NULL) {
// Delay the inlining of this method to give us the
// opportunity to perform some high level optimizations
// first.
@@ -192,16 +185,10 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
return CallGenerator::for_vector_reboxing_late_inline(callee, cg);
} else if ((should_delay || AlwaysIncrementalInline)) {
return CallGenerator::for_late_inline(callee, cg);
+ } else {
+ return cg;
}
}
- if (cg == NULL || should_delay) {
- // Fall through.
- } else if (require_inline || !InlineWarmCalls) {
- return cg;
- } else {
- CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor);
- return CallGenerator::for_warm_call(ci, cold_cg, cg);
- }
}
}
diff --git a/src/hotspot/share/opto/idealGraphPrinter.cpp b/src/hotspot/share/opto/idealGraphPrinter.cpp
index 11dfc43a79f..05e3e18bdcd 100644
--- a/src/hotspot/share/opto/idealGraphPrinter.cpp
+++ b/src/hotspot/share/opto/idealGraphPrinter.cpp
@@ -340,14 +340,12 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
if (edges) {
- // Output edge
- node_idx_t dest_id = n->_idx;
- for ( uint i = 0; i < n->len(); i++ ) {
- if ( n->in(i) ) {
+ for (uint i = 0; i < n->len(); i++) {
+ if (n->in(i)) {
Node *source = n->in(i);
begin_elem(EDGE_ELEMENT);
- print_attr(FROM_PROPERTY, source->_idx);
- print_attr(TO_PROPERTY, dest_id);
+ print_attr(FROM_PROPERTY, source->_igv_idx);
+ print_attr(TO_PROPERTY, n->_igv_idx);
print_attr(INDEX_PROPERTY, i);
end_elem();
}
@@ -357,7 +355,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
// Output node
begin_head(NODE_ELEMENT);
- print_attr(NODE_ID_PROPERTY, n->_idx);
+ print_attr(NODE_ID_PROPERTY, n->_igv_idx);
end_head();
head(PROPERTIES_ELEMENT);
@@ -452,13 +450,10 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
} else {
print_prop("is_dontcare", "false");
}
-
-#ifdef ASSERT
Node* old = C->matcher()->find_old_node(node);
if (old != NULL) {
print_prop("old_node_idx", old->_idx);
}
-#endif
}
if (node->is_Proj()) {
@@ -715,7 +710,7 @@ void IdealGraphPrinter::print(const char *name, Node *node) {
head(NODES_ELEMENT);
for (uint s = 0; s < block->number_of_nodes(); s++) {
begin_elem(NODE_ELEMENT);
- print_attr(NODE_ID_PROPERTY, block->get_node(s)->_idx);
+ print_attr(NODE_ID_PROPERTY, block->get_node(s)->_igv_idx);
end_elem();
}
tail(NODES_ELEMENT);
diff --git a/src/hotspot/share/opto/lcm.cpp b/src/hotspot/share/opto/lcm.cpp
index 28e6ad24f74..710b48edb05 100644
--- a/src/hotspot/share/opto/lcm.cpp
+++ b/src/hotspot/share/opto/lcm.cpp
@@ -378,7 +378,21 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
// Check if we need to hoist decodeHeapOop_not_null first.
Block *valb = get_block_for_node(val);
if( block != valb && block->_dom_depth < valb->_dom_depth ) {
- // Hoist it up to the end of the test block.
+ // Hoist it up to the end of the test block together with its inputs if they exist.
+ for (uint i = 2; i < val->req(); i++) {
+ // DecodeN has 2 regular inputs + optional MachTemp or load Base inputs.
+ Node *temp = val->in(i);
+ Block *tempb = get_block_for_node(temp);
+ if (!tempb->dominates(block)) {
+ assert(block->dominates(tempb), "sanity check: temp node placement");
+ // We only expect nodes without further inputs, like MachTemp or load Base.
+ assert(temp->req() == 0 || (temp->req() == 1 && temp->in(0) == (Node*)C->root()),
+ "need for recursive hoisting not expected");
+ tempb->find_remove(temp);
+ block->add_inst(temp);
+ map_node_to_block(temp, block);
+ }
+ }
valb->find_remove(val);
block->add_inst(val);
map_node_to_block(val, block);
diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp
index 053c24bb5d2..23a922cc86c 100644
--- a/src/hotspot/share/opto/library_call.cpp
+++ b/src/hotspot/share/opto/library_call.cpp
@@ -112,7 +112,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
Node* ctrl = kit.control();
#endif
// Try to inline the intrinsic.
- if ((CheckIntrinsics ? callee->intrinsic_candidate() : true) &&
+ if (callee->check_intrinsic_candidate() &&
kit.try_to_inline(_last_predicate)) {
const char *inline_msg = is_virtual() ? "(intrinsic, virtual)"
: "(intrinsic)";
@@ -674,6 +674,9 @@ bool LibraryCallKit::try_to_inline(int predicate) {
case vmIntrinsics::_getObjectSize:
return inline_getObjectSize();
+ case vmIntrinsics::_blackhole:
+ return inline_blackhole();
+
default:
// If you get here, it may be that someone has added a new intrinsic
// to the list in vmIntrinsics.hpp without implementing it here.
@@ -1638,6 +1641,65 @@ bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, c
return true;
}
+//------------------------------inline_math_pow-----------------------------
+bool LibraryCallKit::inline_math_pow() {
+ Node* exp = round_double_node(argument(2));
+ const TypeD* d = _gvn.type(exp)->isa_double_constant();
+ if (d != NULL) {
+ if (d->getd() == 2.0) {
+ // Special case: pow(x, 2.0) => x * x
+ Node* base = round_double_node(argument(0));
+ set_result(_gvn.transform(new MulDNode(base, base)));
+ return true;
+ } else if (d->getd() == 0.5 && Matcher::match_rule_supported(Op_SqrtD)) {
+ // Special case: pow(x, 0.5) => sqrt(x)
+ Node* base = round_double_node(argument(0));
+ Node* zero = _gvn.zerocon(T_DOUBLE);
+
+ RegionNode* region = new RegionNode(3);
+ Node* phi = new PhiNode(region, Type::DOUBLE);
+
+ Node* cmp = _gvn.transform(new CmpDNode(base, zero));
+ // According to the API specs, pow(-0.0, 0.5) = 0.0 and sqrt(-0.0) = -0.0.
+ // So pow(-0.0, 0.5) shouldn't be replaced with sqrt(-0.0).
+ // -0.0/+0.0 are both excluded since floating-point comparison doesn't distinguish -0.0 from +0.0.
+ Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::le));
+
+ Node* if_pow = generate_slow_guard(test, NULL);
+ Node* value_sqrt = _gvn.transform(new SqrtDNode(C, control(), base));
+ phi->init_req(1, value_sqrt);
+ region->init_req(1, control());
+
+ if (if_pow != NULL) {
+ set_control(if_pow);
+ address target = StubRoutines::dpow() != NULL ? StubRoutines::dpow() :
+ CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
+ const TypePtr* no_memory_effects = NULL;
+ Node* trig = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), target, "POW",
+ no_memory_effects, base, top(), exp, top());
+ Node* value_pow = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0));
+#ifdef ASSERT
+ Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1));
+ assert(value_top == top(), "second value must be top");
+#endif
+ phi->init_req(2, value_pow);
+ region->init_req(2, _gvn.transform(new ProjNode(trig, TypeFunc::Control)));
+ }
+
+ C->set_has_split_ifs(true); // Has chance for split-if optimization
+ set_control(_gvn.transform(region));
+ record_for_igvn(region);
+ set_result(_gvn.transform(phi));
+
+ return true;
+ }
+ }
+
+ return StubRoutines::dpow() != NULL ?
+ runtime_math(OptoRuntime::Math_DD_D_Type(), StubRoutines::dpow(), "dpow") :
+ runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
+}
+
//------------------------------inline_math_native-----------------------------
bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
#define FN_PTR(f) CAST_FROM_FN_PTR(address, f)
@@ -1678,25 +1740,13 @@ bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
return StubRoutines::dexp() != NULL ?
runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(), "dexp") :
runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP");
- case vmIntrinsics::_dpow: {
- Node* exp = round_double_node(argument(2));
- const TypeD* d = _gvn.type(exp)->isa_double_constant();
- if (d != NULL && d->getd() == 2.0) {
- // Special case: pow(x, 2.0) => x * x
- Node* base = round_double_node(argument(0));
- set_result(_gvn.transform(new MulDNode(base, base)));
- return true;
- }
- return StubRoutines::dpow() != NULL ?
- runtime_math(OptoRuntime::Math_DD_D_Type(), StubRoutines::dpow(), "dpow") :
- runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow), "POW");
- }
#undef FN_PTR
+ case vmIntrinsics::_dpow: return inline_math_pow();
case vmIntrinsics::_dcopySign: return inline_double_math(id);
case vmIntrinsics::_fcopySign: return inline_math(id);
- case vmIntrinsics::_dsignum: return inline_double_math(id);
- case vmIntrinsics::_fsignum: return inline_math(id);
+ case vmIntrinsics::_dsignum: return Matcher::match_rule_supported(Op_SignumD) ? inline_double_math(id) : false;
+ case vmIntrinsics::_fsignum: return Matcher::match_rule_supported(Op_SignumF) ? inline_math(id) : false;
// These intrinsics are not yet correctly implemented
case vmIntrinsics::_datan2:
@@ -2023,7 +2073,7 @@ LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type)
}
}
-Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type, bool can_cast) {
+Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type, bool can_cast) {
Node* uncasted_base = base;
int kind = classify_unsafe_addr(uncasted_base, offset, type);
if (kind == Type::RawPtr) {
@@ -2268,7 +2318,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
uint old_sp = sp();
SafePointNode* old_map = clone_map();
- Node* adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
+ Node* adr = make_unsafe_address(base, offset, type, kind == Relaxed);
if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
if (type != T_OBJECT && (inline_klass == NULL || !inline_klass->has_object_fields())) {
@@ -2692,7 +2742,7 @@ bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadSt
// Save state and restore on bailout
uint old_sp = sp();
SafePointNode* old_map = clone_map();
- Node* adr = make_unsafe_address(base, offset, ACCESS_WRITE | ACCESS_READ, type, false);
+ Node* adr = make_unsafe_address(base, offset,type, false);
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
Compile::AliasType* alias_type = C->alias_type(adr_type);
@@ -4273,8 +4323,8 @@ bool LibraryCallKit::inline_unsafe_copyMemory() {
assert(Unsafe_field_offset_to_byte_offset(11) == 11,
"fieldOffset must be byte-scaled");
- Node* src = make_unsafe_address(src_ptr, src_off, ACCESS_READ);
- Node* dst = make_unsafe_address(dst_ptr, dst_off, ACCESS_WRITE);
+ Node* src = make_unsafe_address(src_ptr, src_off);
+ Node* dst = make_unsafe_address(dst_ptr, dst_off);
// Conservatively insert a memory barrier on all memory slices.
// Do not let writes of the copy source or destination float below the copy.
@@ -5503,8 +5553,8 @@ bool LibraryCallKit::inline_vectorizedMismatch() {
Node* call;
jvms()->set_should_reexecute(true);
- Node* obja_adr = make_unsafe_address(obja, aoffset, ACCESS_READ);
- Node* objb_adr = make_unsafe_address(objb, boffset, ACCESS_READ);
+ Node* obja_adr = make_unsafe_address(obja, aoffset);
+ Node* objb_adr = make_unsafe_address(objb, boffset);
call = make_runtime_call(RC_LEAF,
OptoRuntime::vectorizedMismatch_Type(),
@@ -7203,3 +7253,23 @@ bool LibraryCallKit::inline_getObjectSize() {
return true;
}
+
+//------------------------------- inline_blackhole --------------------------------------
+//
+// Make sure all arguments to this node are alive.
+// This matches methods that were requested to be blackholed through compile commands.
+//
+bool LibraryCallKit::inline_blackhole() {
+ assert(callee()->is_static(), "Should have been checked before: only static methods here");
+ assert(callee()->is_empty(), "Should have been checked before: only empty methods here");
+ assert(callee()->holder()->is_loaded(), "Should have been checked before: only methods for loaded classes here");
+
+ // Bind call arguments as blackhole arguments to keep them alive
+ Node* bh = insert_mem_bar(Op_Blackhole);
+ uint nargs = callee()->arg_size();
+ for (uint i = 0; i < nargs; i++) {
+ bh->add_req(argument(i));
+ }
+
+ return true;
+}
diff --git a/src/hotspot/share/opto/library_call.hpp b/src/hotspot/share/opto/library_call.hpp
index 32f52b1ce81..03f591999f7 100644
--- a/src/hotspot/share/opto/library_call.hpp
+++ b/src/hotspot/share/opto/library_call.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -232,6 +232,7 @@ class LibraryCallKit : public GraphKit {
bool inline_math_native(vmIntrinsics::ID id);
bool inline_math(vmIntrinsics::ID id);
bool inline_double_math(vmIntrinsics::ID id);
+ bool inline_math_pow();
template
bool inline_math_overflow(Node* arg1, Node* arg2);
void inline_math_mathExact(Node* math, Node* test);
@@ -249,7 +250,7 @@ class LibraryCallKit : public GraphKit {
Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
// This returns Type::AnyPtr, RawPtr, or OopPtr.
int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
- Node* make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type = T_ILLEGAL, bool can_cast = false);
+ Node* make_unsafe_address(Node*& base, Node* offset, BasicType type = T_ILLEGAL, bool can_cast = false);
typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
@@ -376,5 +377,7 @@ class LibraryCallKit : public GraphKit {
}
bool inline_getObjectSize();
+
+ bool inline_blackhole();
};
diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp
index ade1f07c179..010d1885e07 100644
--- a/src/hotspot/share/opto/loopopts.cpp
+++ b/src/hotspot/share/opto/loopopts.cpp
@@ -725,7 +725,9 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
break;
}
}
- if (phi == NULL) break;
+ if (phi == NULL || _igvn.type(phi) == Type::TOP) {
+ break;
+ }
if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); }
// Move speculative ops
wq.push(phi);
diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp
index 2382fc3c5dd..d2343f57c9a 100644
--- a/src/hotspot/share/opto/macro.cpp
+++ b/src/hotspot/share/opto/macro.cpp
@@ -1469,7 +1469,7 @@ void PhaseMacroExpand::expand_allocate_common(
intx prefetch_lines = length != NULL ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
- Node* fast_oop = bs->obj_allocate(this, ctrl, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
+ Node* fast_oop = bs->obj_allocate(this, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
fast_oop_ctrl, fast_oop_rawmem,
prefetch_lines);
@@ -2704,7 +2704,7 @@ void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) {
Node* layout_val = make_load(NULL, mem, klass_node, in_bytes(Klass::layout_helper_offset()), TypeInt::INT, T_INT);
Node* size_in_bytes = ConvI2X(layout_val);
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
- Node* fast_oop = bs->obj_allocate(this, allocation_ctl, mem, allocation_ctl, size_in_bytes, io, needgc_ctrl,
+ Node* fast_oop = bs->obj_allocate(this, mem, allocation_ctl, size_in_bytes, io, needgc_ctrl,
fast_oop_ctrl, fast_oop_rawmem,
AllocateInstancePrefetchLines);
// Allocation succeed, initialize buffered inline instance header firstly,
diff --git a/src/hotspot/share/opto/matcher.cpp b/src/hotspot/share/opto/matcher.cpp
index f94aab0c051..e478af4c6a0 100644
--- a/src/hotspot/share/opto/matcher.cpp
+++ b/src/hotspot/share/opto/matcher.cpp
@@ -71,10 +71,11 @@ Matcher::Matcher()
_end_inst_chain_rule(_END_INST_CHAIN_RULE),
_must_clone(must_clone),
_shared_nodes(C->comp_arena()),
-#ifdef ASSERT
+#ifndef PRODUCT
_old2new_map(C->comp_arena()),
_new2old_map(C->comp_arena()),
-#endif
+ _reused(C->comp_arena()),
+#endif // !PRODUCT
_allocation_started(false),
_ruleName(ruleName),
_register_save_policy(register_save_policy),
@@ -1137,16 +1138,12 @@ Node *Matcher::xform( Node *n, int max_stack ) {
mask = return_values_mask(n->in(0)->as_Call()->tf()->range_cc());
}
m = n->in(0)->as_Multi()->match(n->as_Proj(), this, mask);
-#ifdef ASSERT
- _new2old_map.map(m->_idx, n);
-#endif
+ NOT_PRODUCT(record_new2old(m, n);)
if (m->in(0) != NULL) // m might be top
collect_null_checks(m, n);
} else { // Else just a regular 'ol guy
m = n->clone(); // So just clone into new-space
-#ifdef ASSERT
- _new2old_map.map(m->_idx, n);
-#endif
+ NOT_PRODUCT(record_new2old(m, n);)
// Def-Use edges will be added incrementally as Uses
// of this node are matched.
assert(m->outcnt() == 0, "no Uses of this clone yet");
@@ -1204,9 +1201,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
// || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
) {
m = m->clone();
-#ifdef ASSERT
- _new2old_map.map(m->_idx, n);
-#endif
+ NOT_PRODUCT(record_new2old(m, n));
mstack.push(m, Post_Visit, n, i); // Don't need to visit
mstack.push(m->in(0), Visit, m, 0);
} else {
@@ -1543,10 +1538,8 @@ MachNode *Matcher::match_tree( const Node *n ) {
}
// Reduce input tree based upon the state labels to machine Nodes
MachNode *m = ReduceInst(s, s->rule(mincost), mem);
-#ifdef ASSERT
- _old2new_map.map(n->_idx, m);
- _new2old_map.map(m->_idx, (Node*)n);
-#endif
+ // New-to-old mapping is done in ReduceInst, to cover complex instructions.
+ NOT_PRODUCT(_old2new_map.map(n->_idx, m);)
// Add any Matcher-ignored edges
uint cnt = n->req();
@@ -1803,6 +1796,7 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
assert( mach->_opnds[0] != NULL, "Missing result operand" );
Node *leaf = s->_leaf;
+ NOT_PRODUCT(record_new2old(mach, leaf);)
// Check for instruction or instruction chain rule
if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
@@ -1871,9 +1865,7 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
for( uint i=0; ireq(); i++ ) {
mach->set_req(i,NULL);
}
-#ifdef ASSERT
- _new2old_map.map(ex->_idx, s->_leaf);
-#endif
+ NOT_PRODUCT(record_new2old(ex, s->_leaf);)
}
// PhaseChaitin::fixup_spills will sometimes generate spill code
@@ -2077,7 +2069,7 @@ OptoReg::Name Matcher::find_receiver() {
return OptoReg::as_OptoReg(regs.first());
}
-bool Matcher::is_vshift_con_pattern(Node *n, Node *m) {
+bool Matcher::is_vshift_con_pattern(Node* n, Node* m) {
if (n != NULL && m != NULL) {
return VectorNode::is_vector_shift(n) &&
VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
@@ -2478,12 +2470,22 @@ void Matcher::find_shared_post_visit(Node* n, uint opcode) {
}
}
-#ifdef ASSERT
+#ifndef PRODUCT
+void Matcher::record_new2old(Node* newn, Node* old) {
+ _new2old_map.map(newn->_idx, old);
+ if (!_reused.test_set(old->_igv_idx)) {
+ // Reuse the Ideal-level IGV identifier so that the node can be tracked
+ // across matching. If there are multiple machine nodes expanded from the
+ // same Ideal node, only one will reuse its IGV identifier.
+ newn->_igv_idx = old->_igv_idx;
+ }
+}
+
// machine-independent root to machine-dependent root
void Matcher::dump_old2new_map() {
_old2new_map.dump();
}
-#endif
+#endif // !PRODUCT
//---------------------------collect_null_checks-------------------------------
// Find null checks in the ideal graph; write a machine-specific node for
diff --git a/src/hotspot/share/opto/matcher.hpp b/src/hotspot/share/opto/matcher.hpp
index ea4a4ff5085..203cb33f3e5 100644
--- a/src/hotspot/share/opto/matcher.hpp
+++ b/src/hotspot/share/opto/matcher.hpp
@@ -121,7 +121,7 @@ class Matcher : public PhaseTransform {
bool find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx);
void find_shared_post_visit(Node* n, uint opcode);
- bool is_vshift_con_pattern(Node *n, Node *m);
+ bool is_vshift_con_pattern(Node* n, Node* m);
// Debug and profile information for nodes in old space:
GrowableArray* _old_node_note_array;
@@ -135,8 +135,11 @@ class Matcher : public PhaseTransform {
Node_Array _shared_nodes;
- debug_only(Node_Array _old2new_map;) // Map roots of ideal-trees to machine-roots
- debug_only(Node_Array _new2old_map;) // Maps machine nodes back to ideal
+#ifndef PRODUCT
+ Node_Array _old2new_map; // Map roots of ideal-trees to machine-roots
+ Node_Array _new2old_map; // Maps machine nodes back to ideal
+ VectorSet _reused; // Ideal IGV identifiers reused by machine nodes
+#endif // !PRODUCT
// Accessors for the inherited field PhaseTransform::_nodes:
void grow_new_node_array(uint idx_limit) {
@@ -558,13 +561,16 @@ class Matcher : public PhaseTransform {
// Does n lead to an uncommon trap that can cause deoptimization?
static bool branches_to_uncommon_trap(const Node *n);
-#ifdef ASSERT
+#ifndef PRODUCT
+ // Record mach-to-Ideal mapping, reusing the Ideal IGV identifier if possible.
+ void record_new2old(Node* newn, Node* old);
+
void dump_old2new_map(); // machine-independent to machine-dependent
Node* find_old_node(Node* new_node) {
return _new2old_map[new_node->_idx];
}
-#endif
+#endif // !PRODUCT
};
#endif // SHARE_OPTO_MATCHER_HPP
diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp
index 4fcd9b7234d..453f3b088b8 100644
--- a/src/hotspot/share/opto/memnode.cpp
+++ b/src/hotspot/share/opto/memnode.cpp
@@ -36,6 +36,7 @@
#include "opto/addnode.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/cfgnode.hpp"
+#include "opto/regalloc.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
@@ -49,6 +50,7 @@
#include "opto/phaseX.hpp"
#include "opto/regmask.hpp"
#include "opto/rootnode.hpp"
+#include "opto/vectornode.hpp"
#include "utilities/align.hpp"
#include "utilities/copy.hpp"
#include "utilities/macros.hpp"
@@ -1136,8 +1138,17 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
// Thus, we are able to replace L by V.
}
// Now prove that we have a LoadQ matched to a StoreQ, for some Q.
- if (store_Opcode() != st->Opcode())
+ if (store_Opcode() != st->Opcode()) {
return NULL;
+ }
+ // LoadVector/StoreVector needs additional check to ensure the types match.
+ if (store_Opcode() == Op_StoreVector) {
+ const TypeVect* in_vt = st->as_StoreVector()->vect_type();
+ const TypeVect* out_vt = as_LoadVector()->vect_type();
+ if (in_vt != out_vt) {
+ return NULL;
+ }
+ }
return st->in(MemNode::ValueIn);
}
@@ -1398,10 +1409,10 @@ Node* StoreNode::convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Ty
// merging a newly allocated object and a load from the cache.
// We want to replace this load with the original incoming
// argument to the valueOf call.
-Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
- assert(phase->C->eliminate_boxing(), "sanity");
+Node* LoadNode::eliminate_autobox(PhaseIterGVN* igvn) {
+ assert(igvn->C->eliminate_boxing(), "sanity");
intptr_t ignore = 0;
- Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore);
+ Node* base = AddPNode::Ideal_base_and_offset(in(Address), igvn, ignore);
if ((base == NULL) || base->is_Phi()) {
// Push the loads from the phi that comes from valueOf up
// through it to allow elimination of the loads and the recovery
@@ -1435,7 +1446,7 @@ Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
if (count > 0 && elements[0]->is_Con() &&
(count == 1 ||
(count == 2 && elements[1]->Opcode() == Op_LShiftX &&
- elements[1]->in(2) == phase->intcon(shift)))) {
+ elements[1]->in(2) == igvn->intcon(shift)))) {
ciObjArray* array = base_type->const_oop()->as_obj_array();
// Fetch the box object cache[0] at the base of the array and get its value
ciInstance* box = array->obj_at(0)->as_instance();
@@ -1462,43 +1473,45 @@ Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
// Add up all the offsets making of the address of the load
Node* result = elements[0];
for (int i = 1; i < count; i++) {
- result = phase->transform(new AddXNode(result, elements[i]));
+ result = igvn->transform(new AddXNode(result, elements[i]));
}
// Remove the constant offset from the address and then
- result = phase->transform(new AddXNode(result, phase->MakeConX(-(int)offset)));
+ result = igvn->transform(new AddXNode(result, igvn->MakeConX(-(int)offset)));
// remove the scaling of the offset to recover the original index.
- if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
+ if (result->Opcode() == Op_LShiftX && result->in(2) == igvn->intcon(shift)) {
// Peel the shift off directly but wrap it in a dummy node
// since Ideal can't return existing nodes
- result = new RShiftXNode(result->in(1), phase->intcon(0));
+ igvn->_worklist.push(result); // remove dead node later
+ result = new RShiftXNode(result->in(1), igvn->intcon(0));
} else if (result->is_Add() && result->in(2)->is_Con() &&
result->in(1)->Opcode() == Op_LShiftX &&
- result->in(1)->in(2) == phase->intcon(shift)) {
+ result->in(1)->in(2) == igvn->intcon(shift)) {
// We can't do general optimization: ((X<> Z ==> X + (Y>>Z)
// but for boxing cache access we know that X<in(2), phase->intcon(shift));
- result = new AddXNode(result->in(1)->in(1), phase->transform(add_con));
+ igvn->_worklist.push(result); // remove dead node later
+ Node* add_con = new RShiftXNode(result->in(2), igvn->intcon(shift));
+ result = new AddXNode(result->in(1)->in(1), igvn->transform(add_con));
} else {
- result = new RShiftXNode(result, phase->intcon(shift));
+ result = new RShiftXNode(result, igvn->intcon(shift));
}
#ifdef _LP64
if (bt != T_LONG) {
- result = new ConvL2INode(phase->transform(result));
+ result = new ConvL2INode(igvn->transform(result));
}
#else
if (bt == T_LONG) {
- result = new ConvI2LNode(phase->transform(result));
+ result = new ConvI2LNode(igvn->transform(result));
}
#endif
// Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair).
// Need to preserve unboxing load type if it is unsigned.
switch(this->Opcode()) {
case Op_LoadUB:
- result = new AndINode(phase->transform(result), phase->intcon(0xFF));
+ result = new AndINode(igvn->transform(result), igvn->intcon(0xFF));
break;
case Op_LoadUS:
- result = new AndINode(phase->transform(result), phase->intcon(0xFFFF));
+ result = new AndINode(igvn->transform(result), igvn->intcon(0xFFFF));
break;
}
return result;
@@ -1791,7 +1804,8 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
(t_oop->is_known_instance_field() ||
t_oop->is_ptr_to_boxed_value())) {
PhaseIterGVN *igvn = phase->is_IterGVN();
- if (igvn != NULL && igvn->_worklist.member(opt_mem)) {
+ assert(igvn != NULL, "must be PhaseIterGVN when can_reshape is true");
+ if (igvn->_worklist.member(opt_mem)) {
// Delay this transformation until memory Phi is processed.
igvn->_worklist.push(this);
return NULL;
@@ -1801,7 +1815,7 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (result != NULL) return result;
if (t_oop->is_ptr_to_boxed_value()) {
- Node* result = eliminate_autobox(phase);
+ Node* result = eliminate_autobox(igvn);
if (result != NULL) return result;
}
}
@@ -3416,6 +3430,7 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn);
case Op_Initialize: return new InitializeNode(C, atp, pn);
case Op_MemBarStoreStore: return new MemBarStoreStoreNode(C, atp, pn);
+ case Op_Blackhole: return new BlackholeNode(C, atp, pn);
default: ShouldNotReachHere(); return NULL;
}
}
@@ -3650,6 +3665,27 @@ MemBarNode* MemBarNode::leading_membar() const {
return mb;
}
+#ifndef PRODUCT
+void BlackholeNode::format(PhaseRegAlloc* ra, outputStream* st) const {
+ st->print("blackhole ");
+ bool first = true;
+ for (uint i = 0; i < req(); i++) {
+ Node* n = in(i);
+ if (n != NULL && OptoReg::is_valid(ra->get_reg_first(n))) {
+ if (first) {
+ first = false;
+ } else {
+ st->print(", ");
+ }
+ char buf[128];
+ ra->dump_register(n, buf);
+ st->print("%s", buf);
+ }
+ }
+ st->cr();
+}
+#endif
+
//===========================InitializeNode====================================
// SUMMARY:
// This node acts as a memory barrier on raw memory, after some raw stores.
diff --git a/src/hotspot/share/opto/memnode.hpp b/src/hotspot/share/opto/memnode.hpp
index 822cf133421..ae274cdefa8 100644
--- a/src/hotspot/share/opto/memnode.hpp
+++ b/src/hotspot/share/opto/memnode.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -252,7 +252,7 @@ class LoadNode : public MemNode {
Node* split_through_phi(PhaseGVN *phase);
// Recover original value from boxed values
- Node *eliminate_autobox(PhaseGVN *phase);
+ Node *eliminate_autobox(PhaseIterGVN *igvn);
// Compute a new Type for this node. Basically we just do the pre-check,
// then call the virtual add() to set the type.
@@ -1348,6 +1348,26 @@ class OnSpinWaitNode: public MemBarNode {
virtual int Opcode() const;
};
+//------------------------------BlackholeNode----------------------------
+// Blackhole all arguments. This node would survive through the compiler
+// the effects on its arguments, and would be finally matched to nothing.
+class BlackholeNode : public MemBarNode {
+public:
+ BlackholeNode(Compile* C, int alias_idx, Node* precedent)
+ : MemBarNode(C, alias_idx, precedent) {}
+ virtual int Opcode() const;
+ virtual uint ideal_reg() const { return 0; } // not matched in the AD file
+ const RegMask &in_RegMask(uint idx) const {
+ // Fake the incoming arguments mask for blackholes: accept all registers
+ // and all stack slots. This would avoid any redundant register moves
+ // for blackhole inputs.
+ return RegMask::All;
+ }
+#ifndef PRODUCT
+ virtual void format(PhaseRegAlloc* ra, outputStream* st) const;
+#endif
+};
+
// Isolation of object setup after an AllocateNode and before next safepoint.
// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
class InitializeNode: public MemBarNode {
diff --git a/src/hotspot/share/opto/multnode.cpp b/src/hotspot/share/opto/multnode.cpp
index 1af091afb13..c27378cddc8 100644
--- a/src/hotspot/share/opto/multnode.cpp
+++ b/src/hotspot/share/opto/multnode.cpp
@@ -140,7 +140,7 @@ void ProjNode::dump_spec(outputStream *st) const { st->print("#%d",_con); if(_is
void ProjNode::dump_compact_spec(outputStream *st) const {
for (DUIterator i = this->outs(); this->has_out(i); i++) {
Node* o = this->out(i);
- if (NotANode(o)) {
+ if (not_a_node(o)) {
st->print("[?]");
} else if (o == NULL) {
st->print("[_]");
diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp
index 0b54666ad3e..cb6edb3e586 100644
--- a/src/hotspot/share/opto/node.cpp
+++ b/src/hotspot/share/opto/node.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -303,6 +303,7 @@ static void init_node_notes(Compile* C, int idx, Node_Notes* nn) {
inline int Node::Init(int req) {
Compile* C = Compile::current();
int idx = C->next_unique();
+ NOT_PRODUCT(_igv_idx = C->next_igv_idx());
// Allocate memory for the necessary number of edges.
if (req > 0) {
@@ -531,6 +532,7 @@ Node *Node::clone() const {
bs->register_potential_barrier_node(n);
n->set_idx(C->next_unique()); // Get new unique index as well
+ NOT_PRODUCT(n->_igv_idx = C->next_igv_idx());
debug_only( n->verify_construction() );
NOT_PRODUCT(nodes_created++);
// Do not patch over the debug_idx of a clone, because it makes it
@@ -563,6 +565,10 @@ Node *Node::clone() const {
if (cg != NULL) {
CallGenerator* cloned_cg = cg->with_call_node(n->as_Call());
n->as_Call()->set_generator(cloned_cg);
+
+ C->print_inlining_assert_ready();
+ C->print_inlining_move_to(cg);
+ C->print_inlining_update(cloned_cg);
}
}
if (n->is_SafePoint()) {
@@ -1659,7 +1665,7 @@ Node* Node::find(const int idx, bool only_ctrl) {
}
bool Node::add_to_worklist(Node* n, Node_List* worklist, Arena* old_arena, VectorSet* old_space, VectorSet* new_space) {
- if (NotANode(n)) {
+ if (not_a_node(n)) {
return false; // Gracefully handle NULL, -1, 0xabababab, etc.
}
@@ -1687,14 +1693,14 @@ static bool is_disconnected(const Node* n) {
void Node::dump_orig(outputStream *st, bool print_key) const {
Compile* C = Compile::current();
Node* orig = _debug_orig;
- if (NotANode(orig)) orig = NULL;
+ if (not_a_node(orig)) orig = NULL;
if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
if (orig == NULL) return;
if (print_key) {
st->print(" !orig=");
}
Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
- if (NotANode(fast)) fast = NULL;
+ if (not_a_node(fast)) fast = NULL;
while (orig != NULL) {
bool discon = is_disconnected(orig); // if discon, print [123] else 123
if (discon) st->print("[");
@@ -1703,16 +1709,16 @@ void Node::dump_orig(outputStream *st, bool print_key) const {
st->print("%d", orig->_idx);
if (discon) st->print("]");
orig = orig->debug_orig();
- if (NotANode(orig)) orig = NULL;
+ if (not_a_node(orig)) orig = NULL;
if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
if (orig != NULL) st->print(",");
if (fast != NULL) {
// Step fast twice for each single step of orig:
fast = fast->debug_orig();
- if (NotANode(fast)) fast = NULL;
+ if (not_a_node(fast)) fast = NULL;
if (fast != NULL && fast != orig) {
fast = fast->debug_orig();
- if (NotANode(fast)) fast = NULL;
+ if (not_a_node(fast)) fast = NULL;
}
if (fast == orig) {
st->print("...");
@@ -1725,7 +1731,7 @@ void Node::dump_orig(outputStream *st, bool print_key) const {
void Node::set_debug_orig(Node* orig) {
_debug_orig = orig;
if (BreakAtNode == 0) return;
- if (NotANode(orig)) orig = NULL;
+ if (not_a_node(orig)) orig = NULL;
int trip = 10;
while (orig != NULL) {
if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
@@ -1734,7 +1740,7 @@ void Node::set_debug_orig(Node* orig) {
BREAKPOINT;
}
orig = orig->debug_orig();
- if (NotANode(orig)) orig = NULL;
+ if (not_a_node(orig)) orig = NULL;
if (trip-- <= 0) break;
}
}
@@ -1830,8 +1836,8 @@ void Node::dump_req(outputStream *st) const {
Node* d = in(i);
if (d == NULL) {
st->print("_ ");
- } else if (NotANode(d)) {
- st->print("NotANode "); // uninitialized, sentinel, garbage, etc.
+ } else if (not_a_node(d)) {
+ st->print("not_a_node "); // uninitialized, sentinel, garbage, etc.
} else {
st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx);
}
@@ -1847,7 +1853,7 @@ void Node::dump_prec(outputStream *st) const {
Node* p = in(i);
if (p != NULL) {
if (!any_prec++) st->print(" |");
- if (NotANode(p)) { st->print("NotANode "); continue; }
+ if (not_a_node(p)) { st->print("not_a_node "); continue; }
st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
}
}
@@ -1862,8 +1868,8 @@ void Node::dump_out(outputStream *st) const {
Node* u = _out[i];
if (u == NULL) {
st->print("_ ");
- } else if (NotANode(u)) {
- st->print("NotANode ");
+ } else if (not_a_node(u)) {
+ st->print("not_a_node ");
} else {
st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx);
}
@@ -1900,7 +1906,7 @@ static void collect_nodes_i(GrowableArray* queue, const Node* start, int
for(uint k = 0; k < limit; k++) {
Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k);
- if (NotANode(n)) continue;
+ if (not_a_node(n)) continue;
// do not recurse through top or the root (would reach unrelated stuff)
if (n->is_Root() || n->is_top()) continue;
if (only_ctrl && !n->is_CFG()) continue;
@@ -1921,7 +1927,7 @@ static void collect_nodes_i(GrowableArray* queue, const Node* start, int
//------------------------------dump_nodes-------------------------------------
static void dump_nodes(const Node* start, int d, bool only_ctrl) {
- if (NotANode(start)) return;
+ if (not_a_node(start)) return;
GrowableArray queue(Compile::current()->live_nodes());
collect_nodes_i(&queue, start, d, (uint) ABS(d), true, only_ctrl, false);
@@ -2089,7 +2095,7 @@ static void collect_nodes_in(Node* start, GrowableArray *ns, bool primary
Node* current = nodes.at(n_idx++);
for (uint i = 0; i < current->len(); i++) {
Node* n = current->in(i);
- if (NotANode(n)) {
+ if (not_a_node(n)) {
continue;
}
if ((primary_is_data && n->is_CFG()) || (!primary_is_data && !n->is_CFG())) {
@@ -2151,7 +2157,7 @@ void Node::collect_nodes_out_all_ctrl_boundary(GrowableArray *ns) const {
nodes.push((Node*) this);
while (nodes.length() > 0) {
Node* current = nodes.pop();
- if (NotANode(current)) {
+ if (not_a_node(current)) {
continue;
}
ns->append_if_missing(current);
diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp
index 6c36f5e0806..b2cd17c1523 100644
--- a/src/hotspot/share/opto/node.hpp
+++ b/src/hotspot/share/opto/node.hpp
@@ -44,6 +44,7 @@ class AllocateNode;
class ArrayCopyNode;
class BaseCountedLoopNode;
class BaseCountedLoopEndNode;
+class BlackholeNode;
class Block;
class BoolNode;
class BoxLockNode;
@@ -136,7 +137,6 @@ class Node;
class Node_Array;
class Node_List;
class Node_Stack;
-class NullCheckNode;
class OopMap;
class ParmNode;
class PCTableNode;
@@ -329,6 +329,12 @@ class Node {
// preserved in _parse_idx.
const node_idx_t _idx;
DEBUG_ONLY(const node_idx_t _parse_idx;)
+ // IGV node identifier. Two nodes, possibly in different compilation phases,
+ // have the same IGV identifier if (and only if) they are the very same node
+ // (same memory address) or one is "derived" from the other (by e.g.
+ // renumbering or matching). This identifier makes it possible to follow the
+ // entire lifetime of a node in IGV even if its C2 identifier (_idx) changes.
+ NOT_PRODUCT(node_idx_t _igv_idx;)
// Get the (read-only) number of input edges
uint req() const { return _cnt; }
@@ -1265,20 +1271,13 @@ class Node {
}
};
-
-#ifndef PRODUCT
-
-// Used in debugging code to avoid walking across dead or uninitialized edges.
-inline bool NotANode(const Node* n) {
+inline bool not_a_node(const Node* n) {
if (n == NULL) return true;
if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
if (*(address*)n == badAddress) return true; // kill by Node::destruct
return false;
}
-#endif
-
-
//-----------------------------------------------------------------------------
// Iterators over DU info, and associated Node functions.
diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp
index 459604fbfb1..7615b8b0808 100644
--- a/src/hotspot/share/opto/output.cpp
+++ b/src/hotspot/share/opto/output.cpp
@@ -616,10 +616,6 @@ void PhaseOutput::shorten_branches(uint* blk_starts) {
if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
stub_size += CompiledStaticCall::to_interp_stub_size();
reloc_size += CompiledStaticCall::reloc_to_interp_stub();
-#if INCLUDE_AOT
- stub_size += CompiledStaticCall::to_aot_stub_size();
- reloc_size += CompiledStaticCall::reloc_to_aot_stub();
-#endif
}
} else if (mach->is_MachSafePoint()) {
// If call/safepoint are adjacent, account for possible
diff --git a/src/hotspot/share/opto/parse.hpp b/src/hotspot/share/opto/parse.hpp
index 39529f40903..2afe6fdef80 100644
--- a/src/hotspot/share/opto/parse.hpp
+++ b/src/hotspot/share/opto/parse.hpp
@@ -71,17 +71,14 @@ class InlineTree : public ResourceObj {
int caller_bci,
JVMState* jvms,
ciCallProfile& profile,
- WarmCallInfo* wci_result,
bool& should_delay);
bool should_inline(ciMethod* callee_method,
ciMethod* caller_method,
int caller_bci,
- ciCallProfile& profile,
- WarmCallInfo* wci_result);
+ ciCallProfile& profile);
bool should_not_inline(ciMethod* callee_method,
ciMethod* caller_method,
- JVMState* jvms,
- WarmCallInfo* wci_result);
+ JVMState* jvms);
bool is_not_reached(ciMethod* callee_method,
ciMethod* caller_method,
int caller_bci,
@@ -112,7 +109,7 @@ class InlineTree : public ResourceObj {
// and may be accessed by find_subtree_from_root.
// The call_method is the dest_method for a special or static invocation.
// The call_method is an optimized virtual method candidate otherwise.
- WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay);
+ bool ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, bool& should_delay);
// Information about inlined method
JVMState* caller_jvms() const { return _caller_jvms; }
diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp
index 53a92ff7b79..3b18afd2ca5 100644
--- a/src/hotspot/share/opto/parse1.cpp
+++ b/src/hotspot/share/opto/parse1.cpp
@@ -2442,7 +2442,7 @@ void Parse::add_safepoint() {
// Create a node for the polling address
Node *polladr;
Node *thread = _gvn.transform(new ThreadLocalNode());
- Node *polling_page_load_addr = _gvn.transform(basic_plus_adr(top(), thread, in_bytes(Thread::polling_page_offset())));
+ Node *polling_page_load_addr = _gvn.transform(basic_plus_adr(top(), thread, in_bytes(JavaThread::polling_page_offset())));
polladr = make_load(control(), polling_page_load_addr, TypeRawPtr::BOTTOM, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr));
diff --git a/src/hotspot/share/opto/regmask.cpp b/src/hotspot/share/opto/regmask.cpp
index 86b7d092098..df7f8f4db42 100644
--- a/src/hotspot/share/opto/regmask.cpp
+++ b/src/hotspot/share/opto/regmask.cpp
@@ -51,6 +51,13 @@ void OptoReg::dump(int r, outputStream *st) {
//=============================================================================
const RegMask RegMask::Empty;
+const RegMask RegMask::All(
+# define BODY(I) -1,
+ FORALL_BODY
+# undef BODY
+ 0
+);
+
//=============================================================================
bool RegMask::is_vector(uint ireg) {
return (ireg == Op_VecA || ireg == Op_VecS || ireg == Op_VecD ||
diff --git a/src/hotspot/share/opto/regmask.hpp b/src/hotspot/share/opto/regmask.hpp
index 1a7eb7e934e..1694367554d 100644
--- a/src/hotspot/share/opto/regmask.hpp
+++ b/src/hotspot/share/opto/regmask.hpp
@@ -357,6 +357,7 @@ class RegMask {
#endif
static const RegMask Empty; // Common empty mask
+ static const RegMask All; // Common all mask
static bool can_represent(OptoReg::Name reg) {
// NOTE: -1 in computation reflects the usage of the last
diff --git a/src/hotspot/share/opto/vectorIntrinsics.cpp b/src/hotspot/share/opto/vectorIntrinsics.cpp
index 4f8bb7f08ad..7f88b761572 100644
--- a/src/hotspot/share/opto/vectorIntrinsics.cpp
+++ b/src/hotspot/share/opto/vectorIntrinsics.cpp
@@ -608,13 +608,12 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) {
Node* base = argument(3);
Node* offset = ConvL2X(argument(4));
- DecoratorSet decorators = C2_UNSAFE_ACCESS;
// Save state and restore on bailout
uint old_sp = sp();
SafePointNode* old_map = clone_map();
- Node* addr = make_unsafe_address(base, offset, decorators, (is_mask ? T_BOOLEAN : elem_bt), true);
+ Node* addr = make_unsafe_address(base, offset, (is_mask ? T_BOOLEAN : elem_bt), true);
// Can base be NULL? Otherwise, always on-heap access.
bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(gvn().type(base));
@@ -802,7 +801,7 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) {
uint old_sp = sp();
SafePointNode* old_map = clone_map();
- Node* addr = make_unsafe_address(base, offset, C2_UNSAFE_ACCESS, elem_bt, true);
+ Node* addr = make_unsafe_address(base, offset, elem_bt, true);
const TypePtr *addr_type = gvn().type(addr)->isa_ptr();
const TypeAryPtr* arr_type = addr_type->isa_aryptr();
diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp
index c3f72e3a089..00bee2cb9b8 100644
--- a/src/hotspot/share/prims/jvm.cpp
+++ b/src/hotspot/share/prims/jvm.cpp
@@ -24,9 +24,12 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "cds/classListParser.hpp"
+#include "cds/classListWriter.hpp"
+#include "cds/dynamicArchive.hpp"
+#include "cds/heapShared.hpp"
+#include "cds/lambdaFormInvokers.hpp"
#include "classfile/classFileStream.hpp"
-#include "classfile/classListParser.hpp"
-#include "classfile/classListWriter.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderData.inline.hpp"
@@ -46,8 +49,6 @@
#include "interpreter/bytecodeUtils.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
-#include "memory/dynamicArchive.hpp"
-#include "memory/heapShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/referenceType.hpp"
#include "memory/resourceArea.hpp"
@@ -3049,32 +3050,9 @@ JVM_ENTRY(void, JVM_SuspendThread(JNIEnv* env, jobject jthread))
JavaThread* receiver = NULL;
bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &receiver, NULL);
if (is_alive) {
- // jthread refers to a live JavaThread.
- {
- MutexLocker ml(receiver->SR_lock(), Mutex::_no_safepoint_check_flag);
- if (receiver->is_external_suspend()) {
- // Don't allow nested external suspend requests. We can't return
- // an error from this interface so just ignore the problem.
- return;
- }
- if (receiver->is_exiting()) { // thread is in the process of exiting
- return;
- }
- receiver->set_external_suspend();
- }
-
- // java_suspend() will catch threads in the process of exiting
- // and will ignore them.
+ // jthread refers to a live JavaThread, but java_suspend() will
+ // detect a thread that has started to exit and will ignore it.
receiver->java_suspend();
-
- // It would be nice to have the following assertion in all the
- // time, but it is possible for a racing resume request to have
- // resumed this thread right after we suspended it. Temporarily
- // enable this assertion if you are chasing a different kind of
- // bug.
- //
- // assert(java_lang_Thread::thread(receiver->threadObj()) == NULL ||
- // receiver->is_being_ext_suspended(), "thread is not suspended");
}
JVM_END
@@ -3085,22 +3063,6 @@ JVM_ENTRY(void, JVM_ResumeThread(JNIEnv* env, jobject jthread))
bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &receiver, NULL);
if (is_alive) {
// jthread refers to a live JavaThread.
-
- // This is the original comment for this Threads_lock grab:
- // We need to *always* get the threads lock here, since this operation cannot be allowed during
- // a safepoint. The safepoint code relies on suspending a thread to examine its state. If other
- // threads randomly resumes threads, then a thread might not be suspended when the safepoint code
- // looks at it.
- //
- // The above comment dates back to when we had both internal and
- // external suspend APIs that shared a common underlying mechanism.
- // External suspend is now entirely cooperative and doesn't share
- // anything with internal suspend. That said, there are some
- // assumptions in the VM that an external resume grabs the
- // Threads_lock. We can't drop the Threads_lock grab here until we
- // resolve the assumptions that exist elsewhere.
- //
- MutexLocker ml(Threads_lock);
receiver->java_resume();
}
JVM_END
@@ -3753,7 +3715,7 @@ JVM_END
JVM_ENTRY(jboolean, JVM_IsDumpingClassList(JNIEnv *env))
#if INCLUDE_CDS
- return ClassListWriter::is_enabled();
+ return ClassListWriter::is_enabled() || DynamicDumpSharedSpaces;
#else
return false;
#endif // INCLUDE_CDS
@@ -3761,13 +3723,20 @@ JVM_END
JVM_ENTRY(void, JVM_LogLambdaFormInvoker(JNIEnv *env, jstring line))
#if INCLUDE_CDS
- assert(ClassListWriter::is_enabled(), "Should be set and open");
+ assert(ClassListWriter::is_enabled() || DynamicDumpSharedSpaces, "Should be set and open or do dynamic dump");
if (line != NULL) {
ResourceMark rm(THREAD);
Handle h_line (THREAD, JNIHandles::resolve_non_null(line));
char* c_line = java_lang_String::as_utf8_string(h_line());
- ClassListWriter w;
- w.stream()->print_cr("%s %s", LAMBDA_FORM_TAG, c_line);
+ if (DynamicDumpSharedSpaces) {
+ // Note: LambdaFormInvokers::append_filtered and LambdaFormInvokers::append take same format which is not
+ // same as below the print format. The line does not include LAMBDA_FORM_TAG.
+ LambdaFormInvokers::append_filtered(os::strdup((const char*)c_line, mtInternal));
+ }
+ if (ClassListWriter::is_enabled()) {
+ ClassListWriter w;
+ w.stream()->print_cr("%s %s", LAMBDA_FORM_TAG, c_line);
+ }
}
#endif // INCLUDE_CDS
JVM_END
diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp
index 9e004c2c9a2..d98ad163a7a 100644
--- a/src/hotspot/share/prims/jvmtiEnv.cpp
+++ b/src/hotspot/share/prims/jvmtiEnv.cpp
@@ -876,7 +876,7 @@ JvmtiEnv::GetThreadState(jthread thread, jint* thread_state_ptr) {
// We have a JavaThread* so add more state bits.
JavaThreadState jts = java_thread->thread_state();
- if (java_thread->is_being_ext_suspended()) {
+ if (java_thread->is_suspended()) {
state |= JVMTI_THREAD_STATE_SUSPENDED;
}
if (jts == _thread_in_native) {
@@ -942,24 +942,18 @@ jvmtiError
JvmtiEnv::SuspendThread(JavaThread* java_thread) {
// don't allow hidden thread suspend request.
if (java_thread->is_hidden_from_external_view()) {
- return (JVMTI_ERROR_NONE);
+ return JVMTI_ERROR_NONE;
}
-
- {
- MutexLocker ml(java_thread->SR_lock(), Mutex::_no_safepoint_check_flag);
- if (java_thread->is_external_suspend()) {
- // don't allow nested external suspend requests.
- return (JVMTI_ERROR_THREAD_SUSPENDED);
- }
- if (java_thread->is_exiting()) { // thread is in the process of exiting
- return (JVMTI_ERROR_THREAD_NOT_ALIVE);
- }
- java_thread->set_external_suspend();
+ if (java_thread->is_suspended()) {
+ return JVMTI_ERROR_THREAD_SUSPENDED;
}
-
if (!JvmtiSuspendControl::suspend(java_thread)) {
- // the thread was in the process of exiting
- return (JVMTI_ERROR_THREAD_NOT_ALIVE);
+ // Either the thread is already suspended or
+ // it was in the process of exiting.
+ if (java_thread->is_exiting()) {
+ return JVMTI_ERROR_THREAD_NOT_ALIVE;
+ }
+ return JVMTI_ERROR_THREAD_SUSPENDED;
}
return JVMTI_ERROR_NONE;
} /* end SuspendThread */
@@ -970,8 +964,10 @@ JvmtiEnv::SuspendThread(JavaThread* java_thread) {
// results - pre-checked for NULL
jvmtiError
JvmtiEnv::SuspendThreadList(jint request_count, const jthread* request_list, jvmtiError* results) {
+ int self_index = -1;
int needSafepoint = 0; // > 0 if we need a safepoint
- ThreadsListHandle tlh;
+ JavaThread* current = JavaThread::current();
+ ThreadsListHandle tlh(current);
for (int i = 0; i < request_count; i++) {
JavaThread *java_thread = NULL;
jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), request_list[i], &java_thread, NULL);
@@ -984,38 +980,38 @@ JvmtiEnv::SuspendThreadList(jint request_count, const jthread* request_list, jvm
results[i] = JVMTI_ERROR_NONE; // indicate successful suspend
continue;
}
-
- {
- MutexLocker ml(java_thread->SR_lock(), Mutex::_no_safepoint_check_flag);
- if (java_thread->is_external_suspend()) {
- // don't allow nested external suspend requests.
- results[i] = JVMTI_ERROR_THREAD_SUSPENDED;
- continue;
- }
- if (java_thread->is_exiting()) { // thread is in the process of exiting
- results[i] = JVMTI_ERROR_THREAD_NOT_ALIVE;
- continue;
- }
- java_thread->set_external_suspend();
- }
- if (java_thread->thread_state() == _thread_in_native) {
- // We need to try and suspend native threads here. Threads in
- // other states will self-suspend on their next transition.
- if (!JvmtiSuspendControl::suspend(java_thread)) {
- // The thread was in the process of exiting. Force another
- // safepoint to make sure that this thread transitions.
- needSafepoint++;
+ if (java_thread->is_suspended()) {
+ results[i] = JVMTI_ERROR_THREAD_SUSPENDED;
+ continue;
+ }
+ if (java_thread == current) {
+ self_index = i;
+ continue;
+ }
+ if (!JvmtiSuspendControl::suspend(java_thread)) {
+ // Either the thread is already suspended or
+ // it was in the process of exiting.
+ if (java_thread->is_exiting()) {
results[i] = JVMTI_ERROR_THREAD_NOT_ALIVE;
continue;
}
- } else {
- needSafepoint++;
+ results[i] = JVMTI_ERROR_THREAD_SUSPENDED;
+ continue;
}
results[i] = JVMTI_ERROR_NONE; // indicate successful suspend
}
- if (needSafepoint > 0) {
- VM_ThreadsSuspendJVMTI tsj;
- VMThread::execute(&tsj);
+ if (self_index >= 0) {
+ if (!JvmtiSuspendControl::suspend(current)) {
+ // Either the thread is already suspended or
+ // it was in the process of exiting.
+ if (current->is_exiting()) {
+ results[self_index] = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ } else {
+ results[self_index] = JVMTI_ERROR_THREAD_SUSPENDED;
+ }
+ } else {
+ results[self_index] = JVMTI_ERROR_NONE; // indicate successful suspend
+ }
}
// per-thread suspend results returned via results parameter
return JVMTI_ERROR_NONE;
@@ -1030,11 +1026,9 @@ JvmtiEnv::ResumeThread(JavaThread* java_thread) {
if (java_thread->is_hidden_from_external_view()) {
return JVMTI_ERROR_NONE;
}
-
- if (!java_thread->is_being_ext_suspended()) {
+ if (!java_thread->is_suspended()) {
return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
}
-
if (!JvmtiSuspendControl::resume(java_thread)) {
return JVMTI_ERROR_INTERNAL;
}
@@ -1060,7 +1054,7 @@ JvmtiEnv::ResumeThreadList(jint request_count, const jthread* request_list, jvmt
results[i] = JVMTI_ERROR_NONE; // indicate successful resume
continue;
}
- if (!java_thread->is_being_ext_suspended()) {
+ if (!java_thread->is_suspended()) {
results[i] = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
continue;
}
diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp
index 5e303157c9a..f21c8a42034 100644
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp
@@ -1183,8 +1183,7 @@ MultipleStackTracesCollector::fill_frames(jthread jt, JavaThread *thr, oop threa
}
if (thr != NULL) { // add more state bits if there is a JavaThead to query
- // same as is_being_ext_suspended() but without locking
- if (thr->is_ext_suspended() || thr->is_external_suspend()) {
+ if (thr->is_suspended()) {
state |= JVMTI_THREAD_STATE_SUSPENDED;
}
JavaThreadState jts = thr->thread_state();
@@ -1402,7 +1401,7 @@ SetForceEarlyReturn::doit(Thread *target, bool self) {
HandleMark hm(current_thread);
if (!self) {
- if (!java_thread->is_external_suspend()) {
+ if (!java_thread->is_suspended()) {
_result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
return;
}
@@ -1535,7 +1534,7 @@ UpdateForPopTopFrameClosure::doit(Thread *target, bool self) {
JavaThread* java_thread = target->as_Java_thread();
assert(java_thread == _state->get_thread(), "Must be");
- if (!self && !java_thread->is_external_suspend()) {
+ if (!self && !java_thread->is_suspended()) {
_result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
return;
}
@@ -1626,7 +1625,7 @@ SetFramePopClosure::doit(Thread *target, bool self) {
assert(_state->get_thread() == java_thread, "Must be");
- if (!self && !java_thread->is_external_suspend()) {
+ if (!self && !java_thread->is_suspended()) {
_result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
return;
}
diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp
index 58335f9d17e..658e9952d81 100644
--- a/src/hotspot/share/prims/jvmtiExport.cpp
+++ b/src/hotspot/share/prims/jvmtiExport.cpp
@@ -61,7 +61,7 @@
#include "runtime/jniHandles.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/serviceThread.hpp"
diff --git a/src/hotspot/share/prims/jvmtiImpl.cpp b/src/hotspot/share/prims/jvmtiImpl.cpp
index 7f6192f1517..4fdc4a4bec3 100644
--- a/src/hotspot/share/prims/jvmtiImpl.cpp
+++ b/src/hotspot/share/prims/jvmtiImpl.cpp
@@ -243,8 +243,17 @@ void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
for (int i = methods->length() - 1; i >= 0; i--) {
Method* method = methods->at(i);
- // Only set breakpoints in running EMCP methods.
- if (method->is_running_emcp() &&
+ // Only set breakpoints in EMCP methods.
+ // EMCP methods are old but not obsolete. Equivalent
+ // Modulo Constant Pool means the method is equivalent except
+ // the constant pool and instructions that access the constant
+ // pool might be different.
+ // If a breakpoint is set in a redefined method, its EMCP methods
+ // must have a breakpoint also.
+ // None of the methods are deleted until none are running.
+ // This code could set a breakpoint in a method that
+ // is never reached, but this won't be noticeable to the programmer.
+ if (!method->is_obsolete() &&
method->name() == m_name &&
method->signature() == m_signature) {
ResourceMark rm;
@@ -769,47 +778,13 @@ VM_GetReceiver::VM_GetReceiver(
//
bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {
- // external suspend should have caught suspending a thread twice
-
- // Immediate suspension required for JPDA back-end so JVMTI agent threads do
- // not deadlock due to later suspension on transitions while holding
- // raw monitors. Passing true causes the immediate suspension.
- // java_suspend() will catch threads in the process of exiting
- // and will ignore them.
- java_thread->java_suspend();
-
- // It would be nice to have the following assertion in all the time,
- // but it is possible for a racing resume request to have resumed
- // this thread right after we suspended it. Temporarily enable this
- // assertion if you are chasing a different kind of bug.
- //
- // assert(java_lang_Thread::thread(java_thread->threadObj()) == NULL ||
- // java_thread->is_being_ext_suspended(), "thread is not suspended");
-
- if (java_lang_Thread::thread(java_thread->threadObj()) == NULL) {
- // check again because we can get delayed in java_suspend():
- // the thread is in process of exiting.
- return false;
- }
-
- return true;
+ return java_thread->java_suspend();
}
bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
- // external suspend should have caught resuming a thread twice
- assert(java_thread->is_being_ext_suspended(), "thread should be suspended");
-
- // resume thread
- {
- // must always grab Threads_lock, see JVM_SuspendThread
- MutexLocker ml(Threads_lock);
- java_thread->java_resume();
- }
-
- return true;
+ return java_thread->java_resume();
}
-
void JvmtiSuspendControl::print() {
#ifndef PRODUCT
ResourceMark rm;
@@ -821,7 +796,7 @@ void JvmtiSuspendControl::print() {
#else
const char *name = "";
#endif /*JVMTI_TRACE */
- log_stream.print("%s(%c ", name, thread->is_being_ext_suspended() ? 'S' : '_');
+ log_stream.print("%s(%c ", name, thread->is_suspended() ? 'S' : '_');
if (!thread->has_last_Java_frame()) {
log_stream.print("no stack");
}
diff --git a/src/hotspot/share/prims/jvmtiRawMonitor.cpp b/src/hotspot/share/prims/jvmtiRawMonitor.cpp
index 06efea26c5f..b423a79360c 100644
--- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -241,7 +241,6 @@ int JvmtiRawMonitor::simple_wait(Thread* self, jlong millis) {
ret = M_INTERRUPTED;
} else {
ThreadBlockInVM tbivm(jt);
- jt->set_suspend_equivalent();
if (millis <= 0) {
self->_ParkEvent->park();
} else {
@@ -307,7 +306,8 @@ void JvmtiRawMonitor::simple_notify(Thread* self, bool all) {
return;
}
-// Any JavaThread will enter here with state _thread_blocked
+// Any JavaThread will enter here with state _thread_blocked unless we
+// are in single-threaded mode during startup.
void JvmtiRawMonitor::raw_enter(Thread* self) {
void* contended;
JavaThread* jt = NULL;
@@ -315,15 +315,28 @@ void JvmtiRawMonitor::raw_enter(Thread* self) {
// surprise the suspender if a "suspended" thread can still enter monitor
if (self->is_Java_thread()) {
jt = self->as_Java_thread();
- jt->SR_lock()->lock_without_safepoint_check();
- while (jt->is_external_suspend()) {
- jt->SR_lock()->unlock();
- jt->java_suspend_self();
- jt->SR_lock()->lock_without_safepoint_check();
+ while (true) {
+ // To pause suspend requests while in blocked we must block handshakes.
+ jt->handshake_state()->lock();
+ // Suspend request flag can only be set in handshakes.
+ // By blocking handshakes, suspend request flag cannot change its value.
+ if (!jt->handshake_state()->is_suspended()) {
+ contended = Atomic::cmpxchg(&_owner, (Thread*)NULL, jt);
+ jt->handshake_state()->unlock();
+ break;
+ }
+ jt->handshake_state()->unlock();
+
+ // We may only be in states other than _thread_blocked when we are
+ // in single-threaded mode during startup.
+ guarantee(jt->thread_state() == _thread_blocked, "invariant");
+
+ jt->set_thread_state_fence(_thread_blocked_trans);
+ SafepointMechanism::process_if_requested(jt);
+ // We should transition to thread_in_vm and then to thread_in_vm_trans,
+ // but those are always treated the same as _thread_blocked_trans.
+ jt->set_thread_state(_thread_blocked);
}
- // guarded by SR_lock to avoid racing with new external suspend requests.
- contended = Atomic::cmpxchg(&_owner, (Thread*)NULL, jt);
- jt->SR_lock()->unlock();
} else {
contended = Atomic::cmpxchg(&_owner, (Thread*)NULL, self);
}
@@ -344,28 +357,24 @@ void JvmtiRawMonitor::raw_enter(Thread* self) {
if (!self->is_Java_thread()) {
simple_enter(self);
} else {
+ // In multi-threaded mode, we must enter this method blocked.
guarantee(jt->thread_state() == _thread_blocked, "invariant");
for (;;) {
- jt->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self()
simple_enter(jt);
-
- // were we externally suspended while we were waiting?
- if (!jt->handle_special_suspend_equivalent_condition()) {
+ if (!SafepointMechanism::should_process(jt)) {
+ // Not suspended so we're done here.
+ break;
+ }
+ if (!jt->is_suspended()) {
+ // Not suspended so we're done here.
break;
}
-
- // This thread was externally suspended
- // We have reentered the contended monitor, but while we were
- // waiting another thread suspended us. We don't want to reenter
- // the monitor while suspended because that would surprise the
- // thread that suspended us.
- //
- // Drop the lock
simple_exit(jt);
-
- jt->java_suspend_self();
+ jt->set_thread_state_fence(_thread_blocked_trans);
+ SafepointMechanism::process_if_requested(jt);
+ // We should transition to thread_in_vm and then to thread_in_vm_trans,
+ // but those are always treated the same as _thread_blocked_trans.
+ jt->set_thread_state(_thread_blocked);
}
}
@@ -411,29 +420,22 @@ int JvmtiRawMonitor::raw_wait(jlong millis, Thread* self) {
if (self->is_Java_thread()) {
JavaThread* jt = self->as_Java_thread();
+ guarantee(jt->thread_state() == _thread_in_native, "invariant");
for (;;) {
- jt->set_suspend_equivalent();
- if (!jt->handle_special_suspend_equivalent_condition()) {
+ if (!SafepointMechanism::should_process(jt)) {
+ // Not suspended so we're done here:
break;
- } else {
- // We've been suspended whilst waiting and so we have to
- // relinquish the raw monitor until we are resumed. Of course
- // after reacquiring we have to re-check for suspension again.
- // Suspension requires we are _thread_blocked, and we also have to
- // recheck for being interrupted.
- simple_exit(jt);
- {
- ThreadInVMfromNative tivm(jt);
- {
- ThreadBlockInVM tbivm(jt);
- jt->java_suspend_self();
- }
- if (jt->is_interrupted(true)) {
- ret = M_INTERRUPTED;
- }
- }
- simple_enter(jt);
}
+ simple_exit(jt);
+ jt->set_thread_state_fence(_thread_in_native_trans);
+ SafepointMechanism::process_if_requested(jt);
+ if (jt->is_interrupted(true)) {
+ ret = M_INTERRUPTED;
+ }
+ // We should transition to thread_in_vm and then to thread_in_vm_trans,
+ // but those are always treated the same as _thread_in_native_trans.
+ jt->set_thread_state(_thread_in_native);
+ simple_enter(jt);
}
guarantee(jt == _owner, "invariant");
} else {
diff --git a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp
index 108c601d739..353ca3aa239 100644
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/classLoadInfo.hpp"
@@ -41,7 +41,6 @@
#include "jfr/jfrEvents.hpp"
#include "logging/logStream.hpp"
#include "memory/metadataFactory.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/annotations.hpp"
@@ -4415,18 +4414,8 @@ void VM_RedefineClasses::redefine_single_class(Thread* current, jclass the_jclas
scratch_class->enclosing_method_method_index());
scratch_class->set_enclosing_method_indices(old_class_idx, old_method_idx);
- // Replace fingerprint data
- the_class->set_has_passed_fingerprint_check(scratch_class->has_passed_fingerprint_check());
- the_class->store_fingerprint(scratch_class->get_stored_fingerprint());
-
the_class->set_has_been_redefined();
- if (!the_class->should_be_initialized()) {
- // Class was already initialized, so AOT has only seen the original version.
- // We need to let AOT look at it again.
- AOTLoader::load_for_klass(the_class, current);
- }
-
// keep track of previous versions of this class
the_class->add_previous_version(scratch_class, emcp_method_count);
diff --git a/src/hotspot/share/prims/jvmtiTagMap.cpp b/src/hotspot/share/prims/jvmtiTagMap.cpp
index 2f6afc02d5c..e54e526a44c 100644
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp
@@ -118,7 +118,7 @@ JvmtiTagMap* JvmtiTagMap::tag_map_for(JvmtiEnv* env) {
tag_map = new JvmtiTagMap(env);
}
} else {
- DEBUG_ONLY(Thread::current()->check_possible_safepoint());
+ DEBUG_ONLY(JavaThread::current()->check_possible_safepoint());
}
return tag_map;
}
@@ -2599,7 +2599,7 @@ inline bool VM_HeapWalkOperation::iterate_over_class(oop java_class) {
} else if (tag.is_klass()) {
entry = pool->resolved_klass_at(i)->java_mirror();
} else {
- // Code generated by JIT and AOT compilers might not resolve constant
+ // Code generated by JIT compilers might not resolve constant
// pool entries. Treat them as resolved if they are loaded.
assert(tag.is_unresolved_klass(), "must be");
constantPoolHandle cp(Thread::current(), pool);
diff --git a/src/hotspot/share/prims/jvmtiThreadState.cpp b/src/hotspot/share/prims/jvmtiThreadState.cpp
index b405fcae9f5..e61241fdd63 100644
--- a/src/hotspot/share/prims/jvmtiThreadState.cpp
+++ b/src/hotspot/share/prims/jvmtiThreadState.cpp
@@ -294,11 +294,6 @@ int JvmtiThreadState::cur_stack_depth() {
return _cur_stack_depth;
}
-bool JvmtiThreadState::may_be_walked() {
- return (get_thread()->is_being_ext_suspended() || (JavaThread::current() == get_thread()));
-}
-
-
void JvmtiThreadState::process_pending_step_for_popframe() {
// We are single stepping as the last part of the PopFrame() dance
// so we have some house keeping to do.
diff --git a/src/hotspot/share/prims/jvmtiThreadState.hpp b/src/hotspot/share/prims/jvmtiThreadState.hpp
index 32d19fa6d20..9ddd8308c47 100644
--- a/src/hotspot/share/prims/jvmtiThreadState.hpp
+++ b/src/hotspot/share/prims/jvmtiThreadState.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -312,8 +312,6 @@ class JvmtiThreadState : public CHeapObj {
public:
- bool may_be_walked();
-
// Thread local event collector setter and getter methods.
JvmtiDynamicCodeEventCollector* get_dynamic_code_event_collector() {
return _dynamic_code_event_collector;
diff --git a/src/hotspot/share/prims/nativeLookup.cpp b/src/hotspot/share/prims/nativeLookup.cpp
index a10ea5cb563..3ae101ec602 100644
--- a/src/hotspot/share/prims/nativeLookup.cpp
+++ b/src/hotspot/share/prims/nativeLookup.cpp
@@ -46,7 +46,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "utilities/macros.hpp"
diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp
index 71440b993fe..e7fae9053aa 100644
--- a/src/hotspot/share/prims/whitebox.cpp
+++ b/src/hotspot/share/prims/whitebox.cpp
@@ -24,6 +24,10 @@
#include "precompiled.hpp"
#include
+#include "cds/cdsoffsets.hpp"
+#include "cds/filemap.hpp"
+#include "cds/heapShared.inline.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/modules.hpp"
@@ -43,13 +47,10 @@
#include "gc/shared/genCollectedHeap.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
#include "logging/log.hpp"
-#include "memory/filemap.hpp"
-#include "memory/heapShared.inline.hpp"
#include "memory/iterator.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/metaspace/testHelpers.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/metaspaceUtils.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
@@ -95,9 +96,6 @@
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
-#if INCLUDE_CDS
-#include "prims/cdsoffsets.hpp"
-#endif // INCLUDE_CDS
#if INCLUDE_G1GC
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
@@ -118,9 +116,6 @@
#include "jvmci/jvmciEnv.hpp"
#include "jvmci/jvmciRuntime.hpp"
#endif
-#if INCLUDE_AOT
-#include "aot/aotLoader.hpp"
-#endif // INCLUDE_AOT
#ifdef LINUX
#include "osContainer_linux.hpp"
@@ -259,7 +254,7 @@ WB_END
WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
size_t granularity = os::vm_allocation_granularity();
- ReservedHeapSpace rhs(100 * granularity, granularity, false);
+ ReservedHeapSpace rhs(100 * granularity, granularity, os::vm_page_size());
VirtualSpace vs;
vs.initialize(rhs, 50 * granularity);
@@ -286,7 +281,7 @@ WB_END
static jint wb_stress_virtual_space_resize(size_t reserved_space_size,
size_t magnitude, size_t iterations) {
size_t granularity = os::vm_allocation_granularity();
- ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, false);
+ ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, os::vm_page_size());
VirtualSpace vs;
if (!vs.initialize(rhs, 0)) {
tty->print_cr("Failed to initialize VirtualSpace. Can't proceed.");
@@ -1047,7 +1042,7 @@ WB_END
WB_ENTRY(jboolean, WB_EnqueueInitializerForCompilation(JNIEnv* env, jobject o, jclass klass, jint comp_level))
InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
Method* clinit = ik->class_initializer();
- if (clinit == NULL) {
+ if (clinit == NULL || clinit->method_holder()->is_not_initialized()) {
return false;
}
return WhiteBox::compile_method(clinit, comp_level, InvocationEntryBci, THREAD);
@@ -1431,9 +1426,6 @@ WB_END
int WhiteBox::get_blob_type(const CodeBlob* code) {
guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to be enabled");
- if (code->is_aot()) {
- return -1;
- }
return CodeCache::get_code_heap(code)->code_blob_type();
}
@@ -1491,7 +1483,7 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo
return result;
}
int comp_level = code->comp_level();
- int insts_size = comp_level == CompLevel_aot ? code->code_end() - code->code_begin() : code->insts_size();
+ int insts_size = code->insts_size();
ThreadToNativeFromVM ttn(thread);
jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
@@ -2320,14 +2312,6 @@ WB_ENTRY(jint, WB_ProtectionDomainRemovedCount(JNIEnv* env, jobject o))
return (jint) SystemDictionary::pd_cache_table()->removed_entries_count();
WB_END
-WB_ENTRY(jint, WB_AotLibrariesCount(JNIEnv* env, jobject o))
- jint result = 0;
-#if INCLUDE_AOT
- result = (jint) AOTLoader::heaps_count();
-#endif
- return result;
-WB_END
-
WB_ENTRY(jint, WB_GetKlassMetadataSize(JNIEnv* env, jobject wb, jclass mirror))
Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(mirror));
// Return size in bytes.
@@ -2680,7 +2664,6 @@ static JNINativeMethod methods[] = {
{CC"disableElfSectionCache", CC"()V", (void*)&WB_DisableElfSectionCache },
{CC"resolvedMethodItemsCount", CC"()J", (void*)&WB_ResolvedMethodItemsCount },
{CC"protectionDomainRemovedCount", CC"()I", (void*)&WB_ProtectionDomainRemovedCount },
- {CC"aotLibrariesCount", CC"()I", (void*)&WB_AotLibrariesCount },
{CC"getKlassMetadataSize", CC"(Ljava/lang/Class;)I",(void*)&WB_GetKlassMetadataSize},
{CC"createMetaspaceTestContext", CC"(JJ)J", (void*)&WB_CreateMetaspaceTestContext},
diff --git a/src/hotspot/share/runtime/abstract_vm_version.cpp b/src/hotspot/share/runtime/abstract_vm_version.cpp
index 8c6604b42b0..67aa1f8559f 100644
--- a/src/hotspot/share/runtime/abstract_vm_version.cpp
+++ b/src/hotspot/share/runtime/abstract_vm_version.cpp
@@ -128,17 +128,13 @@ const char* Abstract_VM_Version::vm_info_string() {
return UseSharedSpaces ? "interpreted mode, sharing" : "interpreted mode";
case Arguments::_mixed:
if (UseSharedSpaces) {
- if (UseAOT) {
- return "mixed mode, aot, sharing";
- } else if (CompilationModeFlag::quick_only()) {
+ if (CompilationModeFlag::quick_only()) {
return "mixed mode, emulated-client, sharing";
} else {
return "mixed mode, sharing";
}
} else {
- if (UseAOT) {
- return "mixed mode, aot";
- } else if (CompilationModeFlag::quick_only()) {
+ if (CompilationModeFlag::quick_only()) {
return "mixed mode, emulated-client";
} else {
return "mixed mode";
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index 60ac260d39c..0620d32df0f 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "cds/filemap.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/javaAssertions.hpp"
#include "classfile/moduleEntry.hpp"
@@ -38,7 +39,6 @@
#include "logging/logStream.hpp"
#include "logging/logTag.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/filemap.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
@@ -47,7 +47,7 @@
#include "runtime/flags/jvmFlagLimit.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/vm_version.hpp"
@@ -521,6 +521,8 @@ static SpecialFlag const special_jvm_flags[] = {
{ "InitialRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "AllowRedefinitionToAddDeleteMethods", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
{ "FlightRecorder", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
+ { "SuspendRetryCount", JDK_Version::undefined(), JDK_Version::jdk(17), JDK_Version::jdk(18) },
+ { "SuspendRetryDelay", JDK_Version::undefined(), JDK_Version::jdk(17), JDK_Version::jdk(18) },
{ "CriticalJNINatives", JDK_Version::jdk(16), JDK_Version::jdk(17), JDK_Version::jdk(18) },
{ "AlwaysLockClassLoader", JDK_Version::jdk(17), JDK_Version::jdk(18), JDK_Version::jdk(19) },
{ "UseBiasedLocking", JDK_Version::jdk(15), JDK_Version::jdk(18), JDK_Version::jdk(19) },
@@ -3170,21 +3172,6 @@ jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) {
}
#endif
-#if !INCLUDE_AOT
- UNSUPPORTED_OPTION(UseAOT);
- UNSUPPORTED_OPTION(PrintAOT);
- UNSUPPORTED_OPTION(UseAOTStrictLoading);
- UNSUPPORTED_OPTION_NULL(AOTLibrary);
-
- UNSUPPORTED_OPTION_INIT(Tier3AOTInvocationThreshold, 0);
- UNSUPPORTED_OPTION_INIT(Tier3AOTMinInvocationThreshold, 0);
- UNSUPPORTED_OPTION_INIT(Tier3AOTCompileThreshold, 0);
- UNSUPPORTED_OPTION_INIT(Tier3AOTBackEdgeThreshold, 0);
-#ifndef PRODUCT
- UNSUPPORTED_OPTION(PrintAOTStatistics);
-#endif
-#endif
-
#ifndef CAN_SHOW_REGISTERS_ON_ASSERT
UNSUPPORTED_OPTION(ShowRegistersOnAssert);
#endif // CAN_SHOW_REGISTERS_ON_ASSERT
diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomic.hpp
index 0c6906173e6..82e8222e327 100644
--- a/src/hotspot/share/runtime/atomic.hpp
+++ b/src/hotspot/share/runtime/atomic.hpp
@@ -818,7 +818,8 @@ inline uint32_t Atomic::CmpxchgByteUsingInt::set_byte_in_int(uint32_t n,
uint8_t b,
uint32_t idx) {
int bitsIdx = BitsPerByte * idx;
- return (n & ~(0xff << bitsIdx)) | (b << bitsIdx);
+ return (n & ~(static_cast(0xff) << bitsIdx))
+ | (static_cast(b) << bitsIdx);
}
inline uint8_t Atomic::CmpxchgByteUsingInt::get_byte_in_int(uint32_t n,
diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp
index 28b1bb4adb8..ffaeab3fe95 100644
--- a/src/hotspot/share/runtime/deoptimization.cpp
+++ b/src/hotspot/share/runtime/deoptimization.cpp
@@ -925,9 +925,7 @@ void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) {
Deoptimization::DeoptAction Deoptimization::_unloaded_action
= Deoptimization::Action_reinterpret;
-
-
-#if COMPILER2_OR_JVMCI || INCLUDE_AOT
+#if COMPILER2_OR_JVMCI
template
class BoxCacheBase : public CHeapObj {
protected:
@@ -1055,9 +1053,7 @@ oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMa
}
return NULL;
}
-#endif // COMPILER2_OR_JVMCI || INCLUDE_AOT
-#if COMPILER2_OR_JVMCI
bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray* objects, TRAPS) {
Handle pending_exception(THREAD, thread->pending_exception());
const char* exception_file = thread->exception_file();
@@ -1074,7 +1070,6 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap*
oop obj = NULL;
if (k->is_instance_klass()) {
-#if COMPILER2_OR_JVMCI || INCLUDE_AOT
if (sv->is_auto_box()) {
AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
obj = get_cached_box(abv, fr, reg_map, THREAD);
@@ -1083,7 +1078,7 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap*
abv->set_cached(true);
}
}
-#endif // COMPILER2_OR_JVMCI || INCLUDE_AOT
+
InstanceKlass* ik = InstanceKlass::cast(k);
if (obj == NULL) {
#ifdef COMPILER2
@@ -1478,12 +1473,11 @@ void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableAr
if (obj.is_null()) {
continue;
}
-#if COMPILER2_OR_JVMCI || INCLUDE_AOT
+
// Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
continue;
}
-#endif // COMPILER2_OR_JVMCI || INCLUDE_AOT
#ifdef COMPILER2
if (EnableVectorSupport && VectorSupport::is_vector(k)) {
assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
@@ -2060,7 +2054,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
int trap_bci = trap_scope->bci();
#if INCLUDE_JVMCI
jlong speculation = current->pending_failed_speculation();
- if (nm->is_compiled_by_jvmci() && nm->is_nmethod()) { // Exclude AOTed methods
+ if (nm->is_compiled_by_jvmci()) {
nm->as_nmethod()->update_speculation(current);
} else {
assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers");
diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp
index b3517dbe961..5b59e87d7ee 100644
--- a/src/hotspot/share/runtime/frame.cpp
+++ b/src/hotspot/share/runtime/frame.cpp
@@ -591,7 +591,6 @@ void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
//
// First letter indicates type of the frame:
// J: Java frame (compiled)
-// A: Java frame (aot compiled)
// j: Java frame (interpreted)
// V: VM frame (C/C++)
// v: Other frames running VM generated code (e.g. stubs, adapters, etc.)
@@ -633,9 +632,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
CompiledMethod* cm = (CompiledMethod*)_cb;
Method* m = cm->method();
if (m != NULL) {
- if (cm->is_aot()) {
- st->print("A %d ", cm->compile_id());
- } else if (cm->is_nmethod()) {
+ if (cm->is_nmethod()) {
nmethod* nm = cm->as_nmethod();
st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : ""));
st->print(" %s", nm->compiler_name());
@@ -1250,9 +1247,8 @@ void frame::describe(FrameValues& values, int frame_no) {
// For now just label the frame
CompiledMethod* cm = (CompiledMethod*)cb();
values.describe(-1, info_address,
- FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s%s", frame_no,
+ FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method J %s%s", frame_no,
p2i(cm),
- (cm->is_aot() ? "A ": "J "),
cm->method()->name_and_sig_as_C_string(),
(_deopt_state == is_deoptimized) ?
" (deoptimized)" :
diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp
index 35d2d9cca05..b6fde3e6a41 100644
--- a/src/hotspot/share/runtime/globals.hpp
+++ b/src/hotspot/share/runtime/globals.hpp
@@ -426,16 +426,6 @@ const intx ObjectAlignmentInBytes = 8;
"Delay in milliseconds for option AbortVMOnVMOperationTimeout") \
range(0, max_intx) \
\
- /* 50 retries * (5 * current_retry_count) millis = ~6.375 seconds */ \
- /* typically, at most a few retries are needed */ \
- product(intx, SuspendRetryCount, 50, \
- "Maximum retry count for an external suspend request") \
- range(0, max_intx) \
- \
- product(intx, SuspendRetryDelay, 5, \
- "Milliseconds to delay per retry (* current_retry_count)") \
- range(0, max_intx) \
- \
product(bool, MaxFDLimit, true, \
"Bump the number of file descriptors to maximum (Unix only)") \
\
@@ -1662,25 +1652,6 @@ const intx ObjectAlignmentInBytes = 8;
"Non-segmented code cache: X[%] of the total code cache") \
range(0, 100) \
\
- /* AOT parameters */ \
- product(bool, UseAOT, false, EXPERIMENTAL, \
- "Use AOT compiled files") \
- \
- product(ccstrlist, AOTLibrary, NULL, EXPERIMENTAL, \
- "AOT library") \
- \
- product(bool, PrintAOT, false, EXPERIMENTAL, \
- "Print used AOT klasses and methods") \
- \
- notproduct(bool, PrintAOTStatistics, false, \
- "Print AOT statistics") \
- \
- product(bool, UseAOTStrictLoading, false, DIAGNOSTIC, \
- "Exit the VM if any of the AOT libraries has invalid config") \
- \
- product(bool, CalculateClassFingerprint, false, \
- "Calculate class fingerprint") \
- \
/* interpreter debugging */ \
develop(intx, BinarySwitchThreshold, 5, \
"Minimal number of lookupswitch entries for rewriting to binary " \
diff --git a/src/hotspot/share/runtime/handshake.cpp b/src/hotspot/share/runtime/handshake.cpp
index 1cd0ba85090..fdc05c7d13c 100644
--- a/src/hotspot/share/runtime/handshake.cpp
+++ b/src/hotspot/share/runtime/handshake.cpp
@@ -405,14 +405,14 @@ HandshakeState::HandshakeState(JavaThread* target) :
_handshakee(target),
_queue(),
_lock(Monitor::leaf, "HandshakeState", Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never),
- _active_handshaker()
+ _active_handshaker(),
+ _suspended(false),
+ _async_suspend_handshake(false)
{
}
void HandshakeState::add_operation(HandshakeOperation* op) {
// Adds are done lock free and so is arming.
- // Calling this method with lock held is considered an error.
- assert(!_lock.owned_by_self(), "Lock should not be held");
_queue.push(op);
SafepointMechanism::arm_local_poll_release(_handshakee);
}
@@ -453,22 +453,23 @@ HandshakeOperation* HandshakeState::pop() {
return _queue.pop(non_self_queue_filter);
};
-void HandshakeState::process_by_self() {
+bool HandshakeState::process_by_self() {
assert(Thread::current() == _handshakee, "should call from _handshakee");
assert(!_handshakee->is_terminated(), "should not be a terminated thread");
assert(_handshakee->thread_state() != _thread_blocked, "should not be in a blocked state");
assert(_handshakee->thread_state() != _thread_in_native, "should not be in native");
ThreadInVMForHandshake tivm(_handshakee);
{
+ // Handshakes cannot safely safepoint.
+ // The exception to this rule is the asynchronous suspension handshake.
+ // It by-passes the NSV by manually doing the transition.
NoSafepointVerifier nsv;
- process_self_inner();
+ return process_self_inner();
}
}
-void HandshakeState::process_self_inner() {
+bool HandshakeState::process_self_inner() {
while (should_process()) {
- HandleMark hm(_handshakee);
- PreserveExceptionMark pem(_handshakee);
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
HandshakeOperation* op = pop_for_self();
if (op != NULL) {
@@ -477,13 +478,24 @@ void HandshakeState::process_self_inner() {
log_trace(handshake)("Proc handshake %s " INTPTR_FORMAT " on " INTPTR_FORMAT " by self",
async ? "asynchronous" : "synchronous", p2i(op), p2i(_handshakee));
op->prepare(_handshakee, _handshakee);
- op->do_handshake(_handshakee);
- if (async) {
+ if (!async) {
+ HandleMark hm(_handshakee);
+ PreserveExceptionMark pem(_handshakee);
+ op->do_handshake(_handshakee);
+ } else {
+ // An asynchronous handshake may put the JavaThread in blocked state (safepoint safe).
+ // The destructor ~PreserveExceptionMark touches the exception oop so it must not be executed,
+ // since a safepoint may be in-progress when returning from the async handshake.
+ op->do_handshake(_handshakee);
log_handshake_info(((AsyncHandshakeOperation*)op)->start_time(), op->name(), 1, 0, "asynchronous");
delete op;
+ return true; // Must check for safepoints
}
+ } else {
+ return false;
}
}
+ return false;
}
bool HandshakeState::can_process_handshake() {
@@ -587,3 +599,102 @@ HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* ma
pr_ret == HandshakeState::_succeeded ? "including" : "excluding", p2i(match_op));
return pr_ret;
}
+
+void HandshakeState::lock() {
+ _lock.lock_without_safepoint_check();
+}
+
+void HandshakeState::unlock() {
+ _lock.unlock();
+}
+
+void HandshakeState::do_self_suspend() {
+ assert(Thread::current() == _handshakee, "should call from _handshakee");
+ assert(_lock.owned_by_self(), "Lock must be held");
+ assert(!_handshakee->has_last_Java_frame() || _handshakee->frame_anchor()->walkable(), "should have walkable stack");
+ JavaThreadState jts = _handshakee->thread_state();
+ while (is_suspended()) {
+ _handshakee->set_thread_state(_thread_blocked);
+ log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended", p2i(_handshakee));
+ _lock.wait_without_safepoint_check();
+ }
+ _handshakee->set_thread_state(jts);
+ set_async_suspend_handshake(false);
+ log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " resumed", p2i(_handshakee));
+}
+
+// This is the closure that prevents a suspended JavaThread from
+// escaping the suspend request.
+class ThreadSelfSuspensionHandshake : public AsyncHandshakeClosure {
+ public:
+ ThreadSelfSuspensionHandshake() : AsyncHandshakeClosure("ThreadSelfSuspensionHandshake") {}
+ void do_thread(Thread* thr) {
+ JavaThread* current = thr->as_Java_thread();
+ assert(current == Thread::current(), "Must be self executed.");
+ current->handshake_state()->do_self_suspend();
+ }
+};
+
+bool HandshakeState::suspend_with_handshake() {
+ if (_handshakee->is_exiting() ||
+ _handshakee->threadObj() == NULL) {
+ log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " exiting", p2i(_handshakee));
+ return false;
+ }
+ if (has_async_suspend_handshake()) {
+ if (is_suspended()) {
+ // Target is already suspended.
+ log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " already suspended", p2i(_handshakee));
+ return false;
+ } else {
+ // Target is going to wake up and leave suspension.
+ // Let's just stop the thread from doing that.
+ log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " re-suspended", p2i(_handshakee));
+ set_suspended(true);
+ return true;
+ }
+ }
+ // no suspend request
+ assert(!is_suspended(), "cannot be suspended without a suspend request");
+ // Thread is safe, so it must execute the request, thus we can count it as suspended
+ // from this point.
+ set_suspended(true);
+ set_async_suspend_handshake(true);
+ log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended, arming ThreadSuspension", p2i(_handshakee));
+ ThreadSelfSuspensionHandshake* ts = new ThreadSelfSuspensionHandshake();
+ Handshake::execute(ts, _handshakee);
+ return true;
+}
+
+// This is the closure that synchronously honors the suspend request.
+class SuspendThreadHandshake : public HandshakeClosure {
+ bool _did_suspend;
+public:
+ SuspendThreadHandshake() : HandshakeClosure("SuspendThread"), _did_suspend(false) {}
+ void do_thread(Thread* thr) {
+ JavaThread* target = thr->as_Java_thread();
+ _did_suspend = target->handshake_state()->suspend_with_handshake();
+ }
+ bool did_suspend() { return _did_suspend; }
+};
+
+bool HandshakeState::suspend() {
+ SuspendThreadHandshake st;
+ Handshake::execute(&st, _handshakee);
+ return st.did_suspend();
+}
+
+bool HandshakeState::resume() {
+ if (!is_suspended()) {
+ return false;
+ }
+ MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
+ if (!is_suspended()) {
+ assert(!_handshakee->is_suspended(), "cannot be suspended without a suspend request");
+ return false;
+ }
+ // Resume the thread.
+ set_suspended(false);
+ _lock.notify();
+ return true;
+}
diff --git a/src/hotspot/share/runtime/handshake.hpp b/src/hotspot/share/runtime/handshake.hpp
index 4aa9ec30376..15ea79c8a60 100644
--- a/src/hotspot/share/runtime/handshake.hpp
+++ b/src/hotspot/share/runtime/handshake.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
class HandshakeOperation;
class JavaThread;
+class SuspendThreadHandshake;
+class ThreadSelfSuspensionHandshake;
// A handshake closure is a callback that is executed for a JavaThread
// while it is in a safepoint/handshake-safe state. Depending on the
@@ -43,9 +45,9 @@ class HandshakeClosure : public ThreadClosure, public CHeapObj {
const char* const _name;
public:
HandshakeClosure(const char* name) : _name(name) {}
- virtual ~HandshakeClosure() {}
- const char* name() const { return _name; }
- virtual bool is_async() { return false; }
+ virtual ~HandshakeClosure() {}
+ const char* name() const { return _name; }
+ virtual bool is_async() { return false; }
virtual void do_thread(Thread* thread) = 0;
};
@@ -61,33 +63,49 @@ class Handshake : public AllStatic {
// Execution of handshake operation
static void execute(HandshakeClosure* hs_cl);
static void execute(HandshakeClosure* hs_cl, JavaThread* target);
- static void execute(AsyncHandshakeClosure* hs_cl, JavaThread* target);
+ static void execute(AsyncHandshakeClosure* hs_cl, JavaThread* target);
};
+class JvmtiRawMonitor;
+
// The HandshakeState keeps track of an ongoing handshake for this JavaThread.
// VMThread/Handshaker and JavaThread are serialized with _lock making sure the
// operation is only done by either VMThread/Handshaker on behalf of the
// JavaThread or by the target JavaThread itself.
class HandshakeState {
+ friend JvmtiRawMonitor;
+ friend ThreadSelfSuspensionHandshake;
+ friend SuspendThreadHandshake;
+ friend JavaThread;
// This a back reference to the JavaThread,
// the target for all operation in the queue.
JavaThread* _handshakee;
// The queue containing handshake operations to be performed on _handshakee.
FilterQueue _queue;
- // Provides mutual exclusion to this state and queue.
- Mutex _lock;
+ // Provides mutual exclusion to this state and queue. Also used for
+ // JavaThread suspend/resume operations.
+ Monitor _lock;
// Set to the thread executing the handshake operation.
Thread* _active_handshaker;
bool claim_handshake();
bool possibly_can_process_handshake();
bool can_process_handshake();
- void process_self_inner();
+
+ // Returns false if the JavaThread finished all its handshake operations.
+ // If the method returns true there is still potential work to be done,
+ // but we need to check for a safepoint before.
+ // (This is due to a suspension handshake which put the JavaThread in blocked
+ // state so a safepoint may be in-progress.)
+ bool process_self_inner();
bool have_non_self_executable_operation();
HandshakeOperation* pop_for_self();
HandshakeOperation* pop();
+ void lock();
+ void unlock();
+
public:
HandshakeState(JavaThread* thread);
@@ -107,10 +125,21 @@ class HandshakeState {
// while handshake operations are being executed, the _handshakee
// must take slow path, process_by_self(), if _lock is held.
bool should_process() {
- return !_queue.is_empty() || _lock.is_locked();
+ // The holder of the _lock can add an asynchronous handshake to queue.
+ // To make sure it is seen by the handshakee, the handshakee must first
+ // check the _lock, and if held go to slow path.
+ // Since the handshakee is unsafe if _lock gets locked after this check
+ // we know other threads cannot process any handshakes.
+ // Now we can check the queue to see if there is anything we should processs.
+ if (_lock.is_locked()) {
+ return true;
+ }
+ // Lock check must be done before queue check, force ordering.
+ OrderAccess::loadload();
+ return !_queue.is_empty();
}
- void process_by_self();
+ bool process_by_self();
enum ProcessResult {
_no_operation = 0,
@@ -123,6 +152,31 @@ class HandshakeState {
ProcessResult try_process(HandshakeOperation* match_op);
Thread* active_handshaker() const { return _active_handshaker; }
+
+ // Suspend/resume support
+ private:
+ // This flag is true when the thread owning this
+ // HandshakeState (the _handshakee) is suspended.
+ volatile bool _suspended;
+ // This flag is true while there is async handshake (trap)
+ // on queue. Since we do only need one, we can reuse it if
+ // thread gets suspended again (after a resume)
+ // and we have not yet processed it.
+ bool _async_suspend_handshake;
+
+ // Called from the suspend handshake.
+ bool suspend_with_handshake();
+ // Called from the async handshake (the trap)
+ // to stop a thread from continuing execution when suspended.
+ void do_self_suspend();
+
+ bool is_suspended() { return Atomic::load(&_suspended); }
+ void set_suspended(bool to) { return Atomic::store(&_suspended, to); }
+ bool has_async_suspend_handshake() { return _async_suspend_handshake; }
+ void set_async_suspend_handshake(bool to) { _async_suspend_handshake = to; }
+
+ bool suspend();
+ bool resume();
};
#endif // SHARE_RUNTIME_HANDSHAKE_HPP
diff --git a/src/hotspot/share/runtime/init.cpp b/src/hotspot/share/runtime/init.cpp
index 3ba4aebe13e..8c83f5c5700 100644
--- a/src/hotspot/share/runtime/init.cpp
+++ b/src/hotspot/share/runtime/init.cpp
@@ -66,7 +66,6 @@ void classLoader_init1();
void compilationPolicy_init();
void codeCache_init();
void VM_Version_init();
-void AOTLoader_init();
void stubRoutines_init1();
jint universe_init(); // depends on codeCache_init and stubRoutines_init
// depends on universe_init, must be before interpreter_init (currently only on SPARC)
@@ -119,7 +118,6 @@ jint init_globals() {
codeCache_init();
VM_Version_init(); // depends on codeCache_init for emitting code
VMRegImpl::set_regName(); // need this before generate_stubs (for printing oop maps).
- AOTLoader_init(); // depends on VM_Version_init to adjust vm options
stubRoutines_init1();
jint status = universe_init(); // dependent on codeCache_init and
// stubRoutines_init1 and metaspace_init.
diff --git a/src/hotspot/share/runtime/interfaceSupport.cpp b/src/hotspot/share/runtime/interfaceSupport.cpp
index d5ea58da362..cfdc6e2a717 100644
--- a/src/hotspot/share/runtime/interfaceSupport.cpp
+++ b/src/hotspot/share/runtime/interfaceSupport.cpp
@@ -33,7 +33,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/stackFrameStream.inline.hpp"
diff --git a/src/hotspot/share/runtime/interfaceSupport.inline.hpp b/src/hotspot/share/runtime/interfaceSupport.inline.hpp
index 8b7828e3d08..09b41227605 100644
--- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp
+++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp
@@ -111,18 +111,18 @@ class ThreadStateTransition : public StackObj {
static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
assert((to & 1) == 0, "odd numbers are transitions states");
assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
+ assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition");
+
// Change to transition state and ensure it is seen by the VM thread.
thread->set_thread_state_fence(_thread_in_native_trans);
// We never install asynchronous exceptions when coming (back) in
// to the runtime from native code because the runtime is not set
// up to handle exceptions floating around at arbitrary points.
- if (SafepointMechanism::should_process(thread) || thread->is_suspend_after_native()) {
- JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
- }
-
+ SafepointMechanism::process_if_requested_with_exit_check(thread, false /* check asyncs */);
thread->set_thread_state(to);
}
+
protected:
void trans(JavaThreadState from, JavaThreadState to) { transition(_thread, from, to); }
void trans_from_java(JavaThreadState to) { transition_from_java(_thread, to); }
diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp
index c64a64257a4..1004a71e622 100644
--- a/src/hotspot/share/runtime/java.cpp
+++ b/src/hotspot/share/runtime/java.cpp
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "aot/aotLoader.hpp"
+#include "cds/dynamicArchive.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/stringTable.hpp"
@@ -45,7 +45,6 @@
#include "memory/metaspaceUtils.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/dynamicArchive.hpp"
#include "memory/universe.hpp"
#include "oops/constantPool.hpp"
#include "oops/generateOopMap.hpp"
@@ -267,10 +266,6 @@ void print_statistics() {
#endif // INCLUDE_JVMCI
#endif // COMPILER2
- if (PrintAOTStatistics) {
- AOTLoader::print_statistics();
- }
-
if (PrintNMethodStatistics) {
nmethod::print_statistics();
}
@@ -508,7 +503,15 @@ void before_exit(JavaThread* thread) {
#if INCLUDE_CDS
if (DynamicDumpSharedSpaces) {
- DynamicArchive::dump();
+ ExceptionMark em(thread);
+ DynamicArchive::dump(thread);
+ if (thread->has_pending_exception()) {
+ ResourceMark rm(thread);
+ oop pending_exception = thread->pending_exception();
+ log_error(cds)("ArchiveClassesAtExit has failed %s: %s", pending_exception->klass()->external_name(),
+ java_lang_String::as_utf8_string(java_lang_Throwable::message(pending_exception)));
+ thread->clear_pending_exception();
+ }
}
#endif
diff --git a/src/hotspot/share/runtime/java.hpp b/src/hotspot/share/runtime/java.hpp
index 8fb5aff75fd..6170bc45e7a 100644
--- a/src/hotspot/share/runtime/java.hpp
+++ b/src/hotspot/share/runtime/java.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,11 @@
#define SHARE_RUNTIME_JAVA_HPP
#include "runtime/os.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class Handle;
+class JavaThread;
+class Symbol;
// Execute code before all handles are released and thread is killed; prologue to vm_exit
extern void before_exit(JavaThread * thread);
@@ -136,35 +141,35 @@ class JDK_Version {
return _java_version;
}
static void set_java_version(const char* version) {
- _java_version = version;
+ _java_version = os::strdup(version);
}
static const char* runtime_name() {
return _runtime_name;
}
static void set_runtime_name(const char* name) {
- _runtime_name = name;
+ _runtime_name = os::strdup(name);
}
static const char* runtime_version() {
return _runtime_version;
}
static void set_runtime_version(const char* version) {
- _runtime_version = version;
+ _runtime_version = os::strdup(version);
}
static const char* runtime_vendor_version() {
return _runtime_vendor_version;
}
static void set_runtime_vendor_version(const char* vendor_version) {
- _runtime_vendor_version = vendor_version;
+ _runtime_vendor_version = os::strdup(vendor_version);
}
static const char* runtime_vendor_vm_bug_url() {
return _runtime_vendor_vm_bug_url;
}
static void set_runtime_vendor_vm_bug_url(const char* vendor_vm_bug_url) {
- _runtime_vendor_vm_bug_url = vendor_vm_bug_url;
+ _runtime_vendor_vm_bug_url = os::strdup(vendor_vm_bug_url);
}
};
diff --git a/src/hotspot/share/runtime/monitorDeflationThread.cpp b/src/hotspot/share/runtime/monitorDeflationThread.cpp
index 86805c1b67f..86fda7b8462 100644
--- a/src/hotspot/share/runtime/monitorDeflationThread.cpp
+++ b/src/hotspot/share/runtime/monitorDeflationThread.cpp
@@ -81,10 +81,6 @@ void MonitorDeflationThread::monitor_deflation_thread_entry(JavaThread* jt, TRAP
// will be handled by safepoint correctly when this thread is
// notified at a safepoint.
- // This ThreadBlockInVM object is not also considered to be
- // suspend-equivalent because MonitorDeflationThread is not
- // visible to external suspension.
-
ThreadBlockInVM tbivm(jt);
MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
diff --git a/src/hotspot/share/runtime/mutex.cpp b/src/hotspot/share/runtime/mutex.cpp
index 6a5b6a604da..ddfd8a8b98a 100644
--- a/src/hotspot/share/runtime/mutex.cpp
+++ b/src/hotspot/share/runtime/mutex.cpp
@@ -26,6 +26,7 @@
#include "logging/log.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutex.hpp"
+#include "runtime/os.inline.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/thread.inline.hpp"
@@ -54,7 +55,7 @@ void Mutex::check_safepoint_state(Thread* thread) {
name());
// Also check NoSafepointVerifier, and thread state is _thread_in_vm
- thread->check_for_valid_safepoint_state();
+ thread->as_Java_thread()->check_for_valid_safepoint_state();
} else {
// If initialized with safepoint_check_never, a NonJavaThread should never ask to safepoint check either.
assert(_safepoint_check_required != _safepoint_check_never,
@@ -215,7 +216,7 @@ bool Monitor::wait_without_safepoint_check(int64_t timeout) {
return wait_status != 0; // return true IFF timeout
}
-bool Monitor::wait(int64_t timeout, bool as_suspend_equivalent) {
+bool Monitor::wait(int64_t timeout) {
JavaThread* const self = JavaThread::current();
// Safepoint checking logically implies an active JavaThread.
assert(self->is_active_Java_thread(), "invariant");
@@ -238,25 +239,9 @@ bool Monitor::wait(int64_t timeout, bool as_suspend_equivalent) {
{
ThreadBlockInVM tbivmdc(self, &in_flight_mutex);
OSThreadWaitState osts(self->osthread(), false /* not Object.wait() */);
- if (as_suspend_equivalent) {
- self->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self()
- }
wait_status = _lock.wait(timeout);
in_flight_mutex = this; // save for ~ThreadBlockInVM
-
- // were we externally suspended while we were waiting?
- if (as_suspend_equivalent && self->handle_special_suspend_equivalent_condition()) {
- // Our event wait has finished and we own the lock, but
- // while we were waiting another thread suspended us. We don't
- // want to hold the lock while suspended because that
- // would surprise the thread that suspended us.
- _lock.unlock();
- self->java_suspend_self();
- _lock.lock();
- }
}
if (in_flight_mutex != NULL) {
@@ -273,17 +258,19 @@ bool Monitor::wait(int64_t timeout, bool as_suspend_equivalent) {
Mutex::~Mutex() {
assert_owner(NULL);
+ os::free(const_cast(_name));
}
-// Only Threads_lock, Heap_lock and SR_lock may be safepoint_check_sometimes.
+// Only Threads_lock and Heap_lock may be safepoint_check_sometimes.
bool is_sometimes_ok(const char* name) {
- return (strcmp(name, "Threads_lock") == 0 || strcmp(name, "Heap_lock") == 0 || strcmp(name, "SR_lock") == 0);
+ return (strcmp(name, "Threads_lock") == 0 || strcmp(name, "Heap_lock") == 0);
}
Mutex::Mutex(int Rank, const char * name, bool allow_vm_block,
- SafepointCheckRequired safepoint_check_required) : _owner(NULL), _name(name) {
+ SafepointCheckRequired safepoint_check_required) : _owner(NULL) {
assert(os::mutex_init_done(), "Too early!");
assert(name != NULL, "Mutex requires a name");
+ _name = os::strdup(name, mtInternal);
#ifdef ASSERT
_allow_vm_block = allow_vm_block;
_rank = Rank;
@@ -387,10 +374,9 @@ void Mutex::check_rank(Thread* thread) {
}
}
- // Locks with rank native or suspend_resume are an exception and are not
+ // Locks with rank native are an exception and are not
// subject to the verification rules.
- bool check_can_be_skipped = this->rank() == Mutex::native || this->rank() == Mutex::suspend_resume
- || SafepointSynchronize::is_at_safepoint();
+ bool check_can_be_skipped = this->rank() == Mutex::native || SafepointSynchronize::is_at_safepoint();
if (owned_by_self()) {
// wait() case
Mutex* least = get_least_ranked_lock_besides_this(locks_owned);
@@ -433,23 +419,6 @@ bool Mutex::contains(Mutex* locks, Mutex* lock) {
return false;
}
-// NSV implied with locking allow_vm_block or !safepoint_check locks.
-void Mutex::no_safepoint_verifier(Thread* thread, bool enable) {
- // The tty_lock is special because it is released for the safepoint by
- // the safepoint mechanism.
- if (this == tty_lock) {
- return;
- }
-
- if (_allow_vm_block) {
- if (enable) {
- thread->_no_safepoint_count++;
- } else {
- thread->_no_safepoint_count--;
- }
- }
-}
-
// Called immediately after lock acquisition or release as a diagnostic
// to track the lock-set of the thread.
// Rather like an EventListener for _owner (:>).
@@ -477,7 +446,11 @@ void Mutex::set_owner_implementation(Thread *new_owner) {
new_owner->_owned_locks = this;
// NSV implied with locking allow_vm_block flag.
- no_safepoint_verifier(new_owner, true);
+ // The tty_lock is special because it is released for the safepoint by
+ // the safepoint mechanism.
+ if (new_owner->is_Java_thread() && _allow_vm_block && this != tty_lock) {
+ new_owner->as_Java_thread()->inc_no_safepoint_count();
+ }
} else {
// the thread is releasing this lock
@@ -512,7 +485,9 @@ void Mutex::set_owner_implementation(Thread *new_owner) {
_next = NULL;
// ~NSV implied with locking allow_vm_block flag.
- no_safepoint_verifier(old_owner, false);
+ if (old_owner->is_Java_thread() && _allow_vm_block && this != tty_lock) {
+ old_owner->as_Java_thread()->dec_no_safepoint_count();
+ }
}
}
#endif // ASSERT
diff --git a/src/hotspot/share/runtime/mutex.hpp b/src/hotspot/share/runtime/mutex.hpp
index 13d24b2fcb1..de29d09de8f 100644
--- a/src/hotspot/share/runtime/mutex.hpp
+++ b/src/hotspot/share/runtime/mutex.hpp
@@ -67,8 +67,7 @@ class Mutex : public CHeapObj {
access = event + 1,
tty = access + 2,
special = tty + 3,
- suspend_resume = special + 1,
- oopstorage = suspend_resume + 2,
+ oopstorage = special + 3,
leaf = oopstorage + 2,
safepoint = leaf + 10,
barrier = safepoint + 1,
@@ -119,11 +118,9 @@ class Mutex : public CHeapObj {
void check_no_safepoint_state(Thread* thread) NOT_DEBUG_RETURN;
void check_rank (Thread* thread) NOT_DEBUG_RETURN;
void assert_owner (Thread* expected) NOT_DEBUG_RETURN;
- void no_safepoint_verifier (Thread* thread, bool enable) NOT_DEBUG_RETURN;
public:
static const bool _allow_vm_block_flag = true;
- static const bool _as_suspend_equivalent_flag = true;
// Locks can be acquired with or without a safepoint check. NonJavaThreads do not follow
// the safepoint protocol when acquiring locks.
@@ -222,10 +219,8 @@ class Monitor : public Mutex {
// Wait until monitor is notified (or times out).
// Defaults are to make safepoint checks, wait time is forever (i.e.,
- // zero), and not a suspend-equivalent condition. Returns true if wait
- // times out; otherwise returns false.
- bool wait(int64_t timeout = 0,
- bool as_suspend_equivalent = !_as_suspend_equivalent_flag);
+ // zero). Returns true if wait times out; otherwise returns false.
+ bool wait(int64_t timeout = 0);
bool wait_without_safepoint_check(int64_t timeout = 0);
void notify();
void notify_all();
diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp
index 51766328e8a..7f0ccff58f4 100644
--- a/src/hotspot/share/runtime/mutexLocker.cpp
+++ b/src/hotspot/share/runtime/mutexLocker.cpp
@@ -72,7 +72,6 @@ Mutex* NonJavaThreadsList_lock = NULL;
Mutex* NonJavaThreadsListSync_lock = NULL;
Monitor* CGC_lock = NULL;
Monitor* STS_lock = NULL;
-Monitor* FullGCCount_lock = NULL;
Monitor* G1OldGCCount_lock = NULL;
Mutex* Shared_DirtyCardQ_lock = NULL;
Mutex* G1DetachedRefinementStats_lock = NULL;
@@ -212,7 +211,6 @@ void mutex_init() {
def(CGC_lock , PaddedMonitor, special, true, _safepoint_check_never); // coordinate between fore- and background GC
def(STS_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
- def(FullGCCount_lock , PaddedMonitor, leaf, true, _safepoint_check_never); // in support of ExplicitGCInvokesConcurrent
if (UseG1GC) {
def(G1OldGCCount_lock , PaddedMonitor, leaf, true, _safepoint_check_always);
diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp
index 656bf4e55b1..41b12839128 100644
--- a/src/hotspot/share/runtime/mutexLocker.hpp
+++ b/src/hotspot/share/runtime/mutexLocker.hpp
@@ -66,7 +66,6 @@ extern Mutex* NonJavaThreadsListSync_lock; // a lock for NonJavaThreads li
extern Monitor* CGC_lock; // used for coordination between
// fore- & background GC threads.
extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet.
-extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc
extern Monitor* G1OldGCCount_lock; // in support of "concurrent" full gc
extern Mutex* Shared_DirtyCardQ_lock; // Lock protecting dirty card
// queue shared by
@@ -259,10 +258,9 @@ class MonitorLocker: public MutexLocker {
assert(monitor != NULL, "NULL monitor not allowed");
}
- bool wait(int64_t timeout = 0,
- bool as_suspend_equivalent = !Mutex::_as_suspend_equivalent_flag) {
+ bool wait(int64_t timeout = 0) {
if (_flag == Mutex::_safepoint_check_flag) {
- return as_monitor()->wait(timeout, as_suspend_equivalent);
+ return as_monitor()->wait(timeout);
} else {
return as_monitor()->wait_without_safepoint_check(timeout);
}
diff --git a/src/hotspot/share/runtime/nonJavaThread.cpp b/src/hotspot/share/runtime/nonJavaThread.cpp
index 7332e59fe0f..b6291c6eabe 100644
--- a/src/hotspot/share/runtime/nonJavaThread.cpp
+++ b/src/hotspot/share/runtime/nonJavaThread.cpp
@@ -323,9 +323,8 @@ void WatcherThread::stop() {
MonitorLocker mu(Terminator_lock);
while (watcher_thread() != NULL) {
- // This wait should make safepoint checks, wait without a timeout,
- // and wait as a suspend-equivalent condition.
- mu.wait(0, Mutex::_as_suspend_equivalent_flag);
+ // This wait should make safepoint checks and wait without a timeout.
+ mu.wait(0);
}
}
diff --git a/src/hotspot/share/runtime/notificationThread.cpp b/src/hotspot/share/runtime/notificationThread.cpp
index 0df0b89df3d..a7c903a8526 100644
--- a/src/hotspot/share/runtime/notificationThread.cpp
+++ b/src/hotspot/share/runtime/notificationThread.cpp
@@ -108,8 +108,8 @@ void NotificationThread::notification_thread_entry(JavaThread* jt, TRAPS) {
(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) |
(has_gc_notification_event = GCNotifier::has_event()))
== 0) {
- // Wait as a suspend equalent until notified that there is some work to do.
- ml.wait(0, true);
+ // Wait until notified that there is some work to do.
+ ml.wait(0);
}
}
diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp
index a029da7b916..dbd24611e72 100644
--- a/src/hotspot/share/runtime/objectMonitor.cpp
+++ b/src/hotspot/share/runtime/objectMonitor.cpp
@@ -403,39 +403,49 @@ bool ObjectMonitor::enter(JavaThread* current) {
}
OSThreadContendState osts(current->osthread());
- ThreadBlockInVM tbivm(current);
- // TODO-FIXME: change the following for(;;) loop to straight-line code.
- for (;;) {
- current->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition()
- // or java_suspend_self()
+ assert(current->thread_state() == _thread_in_vm, "invariant");
+ current->frame_anchor()->make_walkable(current);
+ // Thread must be walkable before it is blocked.
+ // Read in reverse order.
+ OrderAccess::storestore();
+ for (;;) {
+ current->set_thread_state(_thread_blocked);
EnterI(current);
-
- if (!current->handle_special_suspend_equivalent_condition()) break;
-
- // We have acquired the contended monitor, but while we were
- // waiting another thread suspended us. We don't want to enter
- // the monitor while suspended because that would surprise the
- // thread that suspended us.
- //
- _recursions = 0;
- _succ = NULL;
- exit(current, false /* not_suspended */);
-
- current->java_suspend_self();
+ current->set_thread_state_fence(_thread_blocked_trans);
+ if (SafepointMechanism::should_process(current) &&
+ current->is_suspended()) {
+ // We have acquired the contended monitor, but while we were
+ // waiting another thread suspended us. We don't want to enter
+ // the monitor while suspended because that would surprise the
+ // thread that suspended us.
+ _recursions = 0;
+ _succ = NULL;
+ // Don't need a full fence after clearing successor here because of the call to exit().
+ exit(current, false /* not_suspended */);
+ SafepointMechanism::process_if_requested(current);
+ // Since we are going to _thread_blocked we skip setting _thread_in_vm here.
+ } else {
+ // Only exit path from for loop
+ break;
+ }
}
+
current->set_current_pending_monitor(NULL);
// We cleared the pending monitor info since we've just gotten past
// the enter-check-for-suspend dance and we now own the monitor free
- // and clear, i.e., it is no longer pending. The ThreadBlockInVM
- // destructor can go to a safepoint at the end of this block. If we
+ // and clear, i.e., it is no longer pending.
+ // We can go to a safepoint at the end of this block. If we
// do a thread dump during that safepoint, then this thread will show
// as having "-locked" the monitor, but the OS and java.lang.Thread
// states will still report that the thread is blocked trying to
// acquire it.
+
+ // Completed the tranisition.
+ SafepointMechanism::process_if_requested(current);
+ current->set_thread_state(_thread_in_vm);
}
add_to_contentions(-1);
@@ -954,25 +964,26 @@ void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) {
if (TryLock(current) > 0) break;
if (TrySpin(current) > 0) break;
- // State transition wrappers around park() ...
- // ReenterI() wisely defers state transitions until
- // it's clear we must park the thread.
{
OSThreadContendState osts(current->osthread());
- ThreadBlockInVM tbivm(current);
- // cleared by handle_special_suspend_equivalent_condition()
- // or java_suspend_self()
- current->set_suspend_equivalent();
- current->_ParkEvent->park();
+ assert(current->thread_state() == _thread_in_vm, "invariant");
- // were we externally suspended while we were waiting?
- for (;;) {
- if (!current->handle_special_suspend_equivalent_condition()) break;
- if (_succ == current) { _succ = NULL; OrderAccess::fence(); }
- current->java_suspend_self();
- current->set_suspend_equivalent();
+ current->frame_anchor()->make_walkable(current);
+ // Thread must be walkable before it is blocked.
+ // Read in reverse order.
+ OrderAccess::storestore();
+ current->set_thread_state(_thread_blocked);
+ current->_ParkEvent->park();
+ current->set_thread_state_fence(_thread_blocked_trans);
+ if (SafepointMechanism::should_process(current)) {
+ if (_succ == current) {
+ _succ = NULL;
+ OrderAccess::fence(); // always do a full fence when successor is cleared
+ }
+ SafepointMechanism::process_if_requested(current);
}
+ current->set_thread_state(_thread_in_vm);
}
// Try again, but just so we distinguish between futile wakeups and
@@ -1526,11 +1537,15 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
{ // State transition wrappers
OSThread* osthread = current->osthread();
OSThreadWaitState osts(osthread, true);
- {
- ThreadBlockInVM tbivm(current);
- // Thread is in thread_blocked state and oop access is unsafe.
- current->set_suspend_equivalent();
+ assert(current->thread_state() == _thread_in_vm, "invariant");
+
+ {
+ current->frame_anchor()->make_walkable(current);
+ // Thread must be walkable before it is blocked.
+ // Read in reverse order.
+ OrderAccess::storestore();
+ current->set_thread_state(_thread_blocked);
if (interrupted || HAS_PENDING_EXCEPTION) {
// Intentionally empty
} else if (node._notified == 0) {
@@ -1540,14 +1555,16 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
ret = current->_ParkEvent->park(millis);
}
}
-
- // were we externally suspended while we were waiting?
- if (current->handle_special_suspend_equivalent_condition()) {
- // TODO-FIXME: add -- if succ == current then succ = null.
- current->java_suspend_self();
+ current->set_thread_state_fence(_thread_blocked_trans);
+ if (SafepointMechanism::should_process(current)) {
+ if (_succ == current) {
+ _succ = NULL;
+ OrderAccess::fence(); // always do a full fence when successor is cleared
+ }
+ SafepointMechanism::process_if_requested(current);
}
-
- } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
+ current->set_thread_state(_thread_in_vm);
+ }
// Node may be on the WaitSet, the EntryList (or cxq), or in transition
// from the WaitSet to the EntryList.
diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp
index 6847ffb4df5..d90780215b4 100644
--- a/src/hotspot/share/runtime/os.cpp
+++ b/src/hotspot/share/runtime/os.cpp
@@ -871,8 +871,6 @@ int os::random() {
// locking.
void os::start_thread(Thread* thread) {
- // guard suspend/resume
- MutexLocker ml(thread->SR_lock(), Mutex::_no_safepoint_check_flag);
OSThread* osthread = thread->osthread();
osthread->set_state(RUNNABLE);
pd_start_thread(thread);
@@ -1368,6 +1366,10 @@ FILE* os::fopen(const char* path, const char* mode) {
return file;
}
+ssize_t os::read(int fd, void *buf, unsigned int nBytes) {
+ return ::read(fd, buf, nBytes);
+}
+
bool os::set_boot_path(char fileSep, char pathSep) {
const char* home = Arguments::get_java_home();
int home_len = (int)strlen(home);
diff --git a/src/hotspot/share/runtime/os.inline.hpp b/src/hotspot/share/runtime/os.inline.hpp
index cf1625ef689..dc591365430 100644
--- a/src/hotspot/share/runtime/os.inline.hpp
+++ b/src/hotspot/share/runtime/os.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,10 +27,6 @@
#include "runtime/os.hpp"
-inline ssize_t os::read(int fd, void *buf, unsigned int nBytes) {
- return ::read(fd, buf, nBytes);
-}
-
#include OS_HEADER_INLINE(os)
#endif // SHARE_RUNTIME_OS_INLINE_HPP
diff --git a/src/hotspot/share/runtime/safepointMechanism.cpp b/src/hotspot/share/runtime/safepointMechanism.cpp
index 2357c510d28..c3350344abe 100644
--- a/src/hotspot/share/runtime/safepointMechanism.cpp
+++ b/src/hotspot/share/runtime/safepointMechanism.cpp
@@ -77,24 +77,26 @@ void SafepointMechanism::default_initialize() {
}
void SafepointMechanism::process(JavaThread *thread) {
- if (global_poll()) {
- // Any load in ::block must not pass the global poll load.
- // Otherwise we might load an old safepoint counter (for example).
- OrderAccess::loadload();
- SafepointSynchronize::block(thread);
- }
+ bool need_rechecking;
+ do {
+ if (global_poll()) {
+ // Any load in ::block() must not pass the global poll load.
+ // Otherwise we might load an old safepoint counter (for example).
+ OrderAccess::loadload();
+ SafepointSynchronize::block(thread);
+ }
- // The call to on_safepoint fixes the thread's oops and the first few frames.
- //
- // The call has been carefully placed here to cater for a few situations:
- // 1) After we exit from block after a global poll
- // 2) After a thread races with the disarming of the global poll and transitions from native/blocked
- // 3) Before the handshake code is run
- StackWatermarkSet::on_safepoint(thread);
+ // The call to on_safepoint fixes the thread's oops and the first few frames.
+ //
+ // The call has been carefully placed here to cater to a few situations:
+ // 1) After we exit from block after a global poll
+ // 2) After a thread races with the disarming of the global poll and transitions from native/blocked
+ // 3) Before the handshake code is run
+ StackWatermarkSet::on_safepoint(thread);
- if (thread->handshake_state()->should_process()) {
- thread->handshake_state()->process_by_self();
- }
+ need_rechecking = thread->handshake_state()->should_process() && thread->handshake_state()->process_by_self();
+
+ } while (need_rechecking);
}
uintptr_t SafepointMechanism::compute_poll_word(bool armed, uintptr_t stack_watermark) {
@@ -111,6 +113,8 @@ uintptr_t SafepointMechanism::compute_poll_word(bool armed, uintptr_t stack_wate
}
void SafepointMechanism::update_poll_values(JavaThread* thread) {
+ assert(thread->thread_state() != _thread_blocked, "Must not be");
+ assert(thread->thread_state() != _thread_in_native, "Must not be");
for (;;) {
bool armed = global_poll() || thread->handshake_state()->has_operation();
uintptr_t stack_watermark = StackWatermarkSet::lowest_watermark(thread);
diff --git a/src/hotspot/share/runtime/safepointMechanism.hpp b/src/hotspot/share/runtime/safepointMechanism.hpp
index c99e0212481..309a8e789a5 100644
--- a/src/hotspot/share/runtime/safepointMechanism.hpp
+++ b/src/hotspot/share/runtime/safepointMechanism.hpp
@@ -48,7 +48,6 @@ class SafepointMechanism : public AllStatic {
static inline void disarm_local_poll(JavaThread* thread);
- static inline bool local_poll(Thread* thread);
static inline bool global_poll();
static void process(JavaThread *thread);
@@ -80,7 +79,7 @@ class SafepointMechanism : public AllStatic {
};
// Call this method to see if this thread should block for a safepoint or process handshake.
- static inline bool should_process(Thread* thread);
+ static inline bool should_process(JavaThread* thread);
// Processes a pending requested operation.
static inline void process_if_requested(JavaThread* thread);
diff --git a/src/hotspot/share/runtime/safepointMechanism.inline.hpp b/src/hotspot/share/runtime/safepointMechanism.inline.hpp
index 1279401049f..019001b59c5 100644
--- a/src/hotspot/share/runtime/safepointMechanism.inline.hpp
+++ b/src/hotspot/share/runtime/safepointMechanism.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -60,24 +60,14 @@ bool SafepointMechanism::global_poll() {
return (SafepointSynchronize::_state != SafepointSynchronize::_not_synchronized);
}
-bool SafepointMechanism::local_poll(Thread* thread) {
- if (thread->is_Java_thread()) {
- return local_poll_armed(thread->as_Java_thread());
- } else {
- // If the poll is on a non-java thread we can only check the global state.
- return global_poll();
- }
-}
-
-bool SafepointMechanism::should_process(Thread* thread) {
- return local_poll(thread);
+bool SafepointMechanism::should_process(JavaThread* thread) {
+ return local_poll_armed(thread);
}
-void SafepointMechanism::process_if_requested(JavaThread *thread) {
- if (!local_poll_armed(thread)) {
- return;
+void SafepointMechanism::process_if_requested(JavaThread* thread) {
+ if (local_poll_armed(thread)) {
+ process_if_requested_slow(thread);
}
- process_if_requested_slow(thread);
}
void SafepointMechanism::process_if_requested_with_exit_check(JavaThread* thread, bool check_asyncs) {
diff --git a/src/hotspot/share/runtime/safepointVerifiers.cpp b/src/hotspot/share/runtime/safepointVerifiers.cpp
index d4fc41b6006..f09aba498e3 100644
--- a/src/hotspot/share/runtime/safepointVerifiers.cpp
+++ b/src/hotspot/share/runtime/safepointVerifiers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,20 +32,28 @@
#ifdef ASSERT
NoSafepointVerifier::NoSafepointVerifier() : _thread(Thread::current()) {
- _thread->_no_safepoint_count++;
+ if (_thread->is_Java_thread()) {
+ _thread->as_Java_thread()->inc_no_safepoint_count();
+ }
}
NoSafepointVerifier::~NoSafepointVerifier() {
- _thread->_no_safepoint_count--;
+ if (_thread->is_Java_thread()) {
+ _thread->as_Java_thread()->dec_no_safepoint_count();
+ }
}
PauseNoSafepointVerifier::PauseNoSafepointVerifier(NoSafepointVerifier* nsv)
: _nsv(nsv) {
assert(_nsv->_thread == Thread::current(), "must be");
- _nsv->_thread->_no_safepoint_count--;
+ if (_nsv->_thread->is_Java_thread()) {
+ _nsv->_thread->as_Java_thread()->dec_no_safepoint_count();
+ }
}
PauseNoSafepointVerifier::~PauseNoSafepointVerifier() {
- _nsv->_thread->_no_safepoint_count++;
+ if (_nsv->_thread->is_Java_thread()) {
+ _nsv->_thread->as_Java_thread()->inc_no_safepoint_count();
+ }
}
#endif // ASSERT
diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp
index 36830a84f85..4579a34c41a 100644
--- a/src/hotspot/share/runtime/sharedRuntime.cpp
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "jvm.h"
-#include "aot/aotLoader.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/vmClasses.hpp"
#include "classfile/vmSymbols.hpp"
@@ -1369,8 +1368,8 @@ bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, cons
if (VM_Version::supports_fast_class_init_checks() &&
invoke_code == Bytecodes::_invokestatic &&
callee_method->needs_clinit_barrier() &&
- callee != NULL && (callee->is_compiled_by_jvmci() || callee->is_aot())) {
- return true; // skip patching for JVMCI or AOT code
+ callee != NULL && callee->is_compiled_by_jvmci()) {
+ return true; // skip patching for JVMCI
}
CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
if (ssc->is_clean()) ssc->set(static_call_info);
@@ -3166,9 +3165,12 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
// Fill in the signature array, for the calling-convention call.
const int total_args_passed = method->size_of_parameters();
- BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
- VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
- int i=0;
+ BasicType stack_sig_bt[16];
+ VMRegPair stack_regs[16];
+ BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
+ VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
+
+ int i = 0;
if (!method->is_static()) // Pass in receiver first
sig_bt[i++] = T_OBJECT;
SignatureStream ss(method->signature());
diff --git a/src/hotspot/share/runtime/signature.cpp b/src/hotspot/share/runtime/signature.cpp
index 52fc89277ea..f95d644aa5d 100644
--- a/src/hotspot/share/runtime/signature.cpp
+++ b/src/hotspot/share/runtime/signature.cpp
@@ -204,6 +204,12 @@ SignatureStream::SignatureStream(const Symbol* signature,
}
SignatureStream::~SignatureStream() {
+ if (_previous_name == vmSymbols::java_lang_Object()) {
+ // no names were created
+ assert(_names == NULL, "_names unexpectedly created");
+ return;
+ }
+
// decrement refcount for names created during signature parsing
_previous_name->decrement_refcount();
if (_names != NULL) {
diff --git a/src/hotspot/share/runtime/sweeper.cpp b/src/hotspot/share/runtime/sweeper.cpp
index cffae92ccc2..6df1161b6f0 100644
--- a/src/hotspot/share/runtime/sweeper.cpp
+++ b/src/hotspot/share/runtime/sweeper.cpp
@@ -273,7 +273,6 @@ void NMethodSweeper::handle_safepoint_request() {
MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
ThreadBlockInVM tbivm(thread);
- thread->java_suspend_self();
}
}
diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp
index a441b90310c..3b87919db81 100644
--- a/src/hotspot/share/runtime/synchronizer.cpp
+++ b/src/hotspot/share/runtime/synchronizer.cpp
@@ -41,6 +41,7 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
+#include "runtime/os.inline.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfData.hpp"
#include "runtime/safepointMechanism.inline.hpp"
diff --git a/src/hotspot/share/runtime/synchronizer.hpp b/src/hotspot/share/runtime/synchronizer.hpp
index 90994454629..89cf4f38de7 100644
--- a/src/hotspot/share/runtime/synchronizer.hpp
+++ b/src/hotspot/share/runtime/synchronizer.hpp
@@ -29,7 +29,7 @@
#include "oops/markWord.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/handles.hpp"
-#include "runtime/os.hpp"
+#include "utilities/growableArray.hpp"
class LogStream;
class ObjectMonitor;
diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp
index 16518adf333..3ef4ae62d81 100644
--- a/src/hotspot/share/runtime/thread.cpp
+++ b/src/hotspot/share/runtime/thread.cpp
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "aot/aotLoader.hpp"
+#include "cds/metaspaceShared.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/javaThreadStatus.hpp"
@@ -55,7 +55,6 @@
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/iterator.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@@ -262,21 +261,11 @@ Thread::Thread() {
// plain initialization
debug_only(_owned_locks = NULL;)
- NOT_PRODUCT(_no_safepoint_count = 0;)
NOT_PRODUCT(_skip_gcalot = false;)
_jvmti_env_iteration_count = 0;
set_allocated_bytes(0);
- _current_pending_monitor = NULL;
- _current_pending_monitor_is_from_java = true;
- _current_waiting_monitor = NULL;
_current_pending_raw_monitor = NULL;
-#ifdef ASSERT
- _visited_for_critical_count = false;
-#endif
-
- _SR_lock = new Monitor(Mutex::suspend_resume, "SR_lock", true,
- Monitor::_safepoint_check_sometimes);
_suspend_flags = 0;
// thread-specific hashCode stream generator state - Marsaglia shift-xor form
@@ -285,10 +274,6 @@ Thread::Thread() {
_hashStateZ = 0x8767; // (int)(3579807591LL & 0xffff) ;
_hashStateW = 273326509;
- _OnTrap = 0;
- _Stalled = 0;
- _TypeTag = 0x2BAD;
-
// Many of the following fields are effectively final - immutable
// Note that nascent threads can't use the Native Monitor-Mutex
// construct until the _MutexEvent is initialized ...
@@ -451,18 +436,13 @@ Thread::~Thread() {
delete last_handle_mark();
assert(last_handle_mark() == NULL, "check we have reached the end");
- // It's possible we can encounter a null _ParkEvent, etc., in stillborn threads.
- // We NULL out the fields for good hygiene.
- ParkEvent::Release(_ParkEvent); _ParkEvent = NULL;
+ ParkEvent::Release(_ParkEvent);
+ // Set to NULL as a termination indicator for has_terminated().
+ Atomic::store(&_ParkEvent, (ParkEvent*)NULL);
delete handle_area();
delete metadata_handles();
- // SR_handler uses this as a termination indicator -
- // needs to happen before os::free_thread()
- delete _SR_lock;
- _SR_lock = NULL;
-
// osthread() can be NULL, if creation of thread failed.
if (osthread() != NULL) os::free_thread(osthread());
@@ -583,119 +563,6 @@ void Thread::send_async_exception(oop java_thread, oop java_throwable) {
Handshake::execute(&vm_stop, target);
}
-
-// Check if an external suspend request has completed (or has been
-// cancelled). Returns true if the thread is externally suspended and
-// false otherwise.
-bool JavaThread::is_ext_suspend_completed() {
- bool did_trans_retry = false; // only do thread_in_native_trans retry once
- bool do_trans_retry; // flag to force the retry
-
- do {
- do_trans_retry = false;
-
- if (is_exiting()) {
- // Thread is in the process of exiting. This is always checked
- // first to reduce the risk of dereferencing a freed JavaThread.
- return false;
- }
-
- if (!is_external_suspend()) {
- // Suspend request is cancelled. This is always checked before
- // is_ext_suspended() to reduce the risk of a rogue resume
- // confusing the thread that made the suspend request.
- return false;
- }
-
- if (is_ext_suspended()) {
- // thread is suspended
- return true;
- }
-
- // Now that we no longer do hard suspends of threads running
- // native code, the target thread can be changing thread state
- // while we are in this routine:
- //
- // _thread_in_native -> _thread_in_native_trans -> _thread_blocked
- //
- // We save a copy of the thread state as observed at this moment
- // and make our decision about suspend completeness based on the
- // copy. This closes the race where the thread state is seen as
- // _thread_in_native_trans in the if-thread_blocked check, but is
- // seen as _thread_blocked in if-thread_in_native_trans check.
- JavaThreadState save_state = thread_state();
-
- if (save_state == _thread_blocked && is_suspend_equivalent()) {
- // If the thread's state is _thread_blocked and this blocking
- // condition is known to be equivalent to a suspend, then we can
- // consider the thread to be externally suspended. This means that
- // the code that sets _thread_blocked has been modified to do
- // self-suspension if the blocking condition releases. We also
- // used to check for CONDVAR_WAIT here, but that is now covered by
- // the _thread_blocked with self-suspension check.
- //
- // Return true since we wouldn't be here unless there was still an
- // external suspend request.
- return true;
- } else if (save_state == _thread_in_native && frame_anchor()->walkable()) {
- // Threads running native code will self-suspend on native==>VM/Java
- // transitions. If its stack is walkable (should always be the case
- // unless this function is called before the actual java_suspend()
- // call), then the wait is done.
- return true;
- } else if (!did_trans_retry &&
- save_state == _thread_in_native_trans &&
- frame_anchor()->walkable()) {
- // The thread is transitioning from thread_in_native to another
- // thread state. check_safepoint_and_suspend_for_native_trans()
- // will force the thread to self-suspend. If it hasn't gotten
- // there yet we may have caught the thread in-between the native
- // code check above and the self-suspend.
- //
- // Since we use the saved thread state in the if-statement above,
- // there is a chance that the thread has already transitioned to
- // _thread_blocked by the time we get here. In that case, we will
- // make a single unnecessary pass through the logic below. This
- // doesn't hurt anything since we still do the trans retry.
-
- // Once the thread leaves thread_in_native_trans for another
- // thread state, we break out of this retry loop. We shouldn't
- // need this flag to prevent us from getting back here, but
- // sometimes paranoia is good.
- did_trans_retry = true;
-
- // We wait for the thread to transition to a more usable state.
- for (int i = 1; i <= SuspendRetryCount; i++) {
- // We used to do an "os::yield_all(i)" call here with the intention
- // that yielding would increase on each retry. However, the parameter
- // is ignored on Linux which means the yield didn't scale up. Waiting
- // on the SR_lock below provides a much more predictable scale up for
- // the delay. It also provides a simple/direct point to check for any
- // safepoint requests from the VMThread
-
- // temporarily drops SR_lock while doing wait with safepoint check
- // (if we're a JavaThread - the WatcherThread can also call this)
- // and increase delay with each retry
- if (Thread::current()->is_Java_thread()) {
- SR_lock()->wait(i * SuspendRetryDelay);
- } else {
- SR_lock()->wait_without_safepoint_check(i * SuspendRetryDelay);
- }
-
- // check the actual thread state instead of what we saved above
- if (thread_state() != _thread_in_native_trans) {
- // the thread has transitioned to another thread state so
- // try all the checks (except this one) one more time.
- do_trans_retry = true;
- break;
- }
- } // end retry loop
- }
- } while (do_trans_retry);
-
- return false;
-}
-
// GC Support
bool Thread::claim_par_threads_do(uintx claim_token) {
uintx token = _threads_do_token;
@@ -841,38 +708,6 @@ void Thread::print_owned_locks_on(outputStream* st) const {
}
}
}
-
-// Checks safepoint allowed and clears unhandled oops at potential safepoints.
-void Thread::check_possible_safepoint() {
- if (!is_Java_thread()) return;
-
- if (_no_safepoint_count > 0) {
- print_owned_locks();
- assert(false, "Possible safepoint reached by thread that does not allow it");
- }
-#ifdef CHECK_UNHANDLED_OOPS
- // Clear unhandled oops in JavaThreads so we get a crash right away.
- clear_unhandled_oops();
-#endif // CHECK_UNHANDLED_OOPS
-}
-
-void Thread::check_for_valid_safepoint_state() {
- if (!is_Java_thread()) return;
-
- // Check NoSafepointVerifier, which is implied by locks taken that can be
- // shared with the VM thread. This makes sure that no locks with allow_vm_block
- // are held.
- check_possible_safepoint();
-
- if (this->as_Java_thread()->thread_state() != _thread_in_vm) {
- fatal("LEAF method calling lock?");
- }
-
- if (GCALotAtAllSafepoints) {
- // We could enter a safepoint here and thus have a gc
- InterfaceSupport::check_gc_alot();
- }
-}
#endif // ASSERT
// We had to move these methods here, because vm threads get into ObjectSynchronizer::enter
@@ -947,17 +782,12 @@ static void create_initial_thread(Handle thread_group, JavaThread* thread,
JavaThreadStatus::RUNNABLE);
}
-static char java_version[64] = "";
-static char java_runtime_name[128] = "";
-static char java_runtime_version[128] = "";
-static char java_runtime_vendor_version[128] = "";
-static char java_runtime_vendor_vm_bug_url[128] = "";
-
-// Extract version and vendor specific information.
+// Extract version and vendor specific information from
+// java.lang.VersionProps fields.
+// Returned char* is allocated in the thread's resource area
+// so must be copied for permanency.
static const char* get_java_version_info(InstanceKlass* ik,
- Symbol* field_name,
- char* buffer,
- int buffer_size) {
+ Symbol* field_name) {
fieldDescriptor fd;
bool found = ik != NULL &&
ik->find_local_field(field_name,
@@ -967,9 +797,7 @@ static const char* get_java_version_info(InstanceKlass* ik,
if (name_oop == NULL) {
return NULL;
}
- const char* name = java_lang_String::as_utf8_string(name_oop,
- buffer,
- buffer_size);
+ const char* name = java_lang_String::as_utf8_string(name_oop);
return name;
} else {
return NULL;
@@ -1165,6 +993,36 @@ bool JavaThread::resize_all_jvmci_counters(int new_size) {
#endif // INCLUDE_JVMCI
+#ifdef ASSERT
+// Checks safepoint allowed and clears unhandled oops at potential safepoints.
+void JavaThread::check_possible_safepoint() {
+ if (_no_safepoint_count > 0) {
+ print_owned_locks();
+ assert(false, "Possible safepoint reached by thread that does not allow it");
+ }
+#ifdef CHECK_UNHANDLED_OOPS
+ // Clear unhandled oops in JavaThreads so we get a crash right away.
+ clear_unhandled_oops();
+#endif // CHECK_UNHANDLED_OOPS
+}
+
+void JavaThread::check_for_valid_safepoint_state() {
+ // Check NoSafepointVerifier, which is implied by locks taken that can be
+ // shared with the VM thread. This makes sure that no locks with allow_vm_block
+ // are held.
+ check_possible_safepoint();
+
+ if (thread_state() != _thread_in_vm) {
+ fatal("LEAF method calling lock?");
+ }
+
+ if (GCALotAtAllSafepoints) {
+ // We could enter a safepoint here and thus have a gc
+ InterfaceSupport::check_gc_alot();
+ }
+}
+#endif // ASSERT
+
// A JavaThread is a normal Java thread
JavaThread::JavaThread() :
@@ -1183,15 +1041,23 @@ JavaThread::JavaThread() :
_vm_result_2(nullptr),
_return_buffered_value(nullptr),
+ _current_pending_monitor(NULL),
+ _current_pending_monitor_is_from_java(true),
+ _current_waiting_monitor(NULL),
+ _Stalled(0),
+
_monitor_chunks(nullptr),
_special_runtime_exit_condition(_no_async_condition),
_pending_async_exception(nullptr),
_thread_state(_thread_new),
_saved_exception_pc(nullptr),
+#ifdef ASSERT
+ _no_safepoint_count(0),
+ _visited_for_critical_count(false),
+#endif
_terminated(_not_terminated),
- _suspend_equivalent(false),
_in_deopt_handler(0),
_doing_unsafe_access(false),
_do_not_unlock_if_synchronized(false),
@@ -1239,7 +1105,6 @@ JavaThread::JavaThread() :
_SleepEvent(ParkEvent::Allocate(this))
{
-
set_jni_functions(jni_functions());
#if INCLUDE_JVMCI
assert(_jvmci._implicit_exception_pc == nullptr, "must be");
@@ -1564,33 +1429,12 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
JvmtiExport::post_thread_end(this);
}
- // We have notified the agents that we are exiting, before we go on,
- // we must check for a pending external suspend request and honor it
- // in order to not surprise the thread that made the suspend request.
- while (true) {
- {
- MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
- if (!is_external_suspend()) {
- set_terminated(_thread_exiting);
- ThreadService::current_thread_exiting(this, is_daemon(threadObj()));
- break;
- }
- // Implied else:
- // Things get a little tricky here. We have a pending external
- // suspend request, but we are holding the SR_lock so we
- // can't just self-suspend. So we temporarily drop the lock
- // and then self-suspend.
- }
+ // The careful dance between thread suspension and exit is handled here.
+ // Since we are in thread_in_vm state and suspension is done with handshakes,
+ // we can just put in the exiting state and it will be correctly handled.
+ set_terminated(_thread_exiting);
- ThreadBlockInVM tbivm(this);
- java_suspend_self();
-
- // We're done with this suspend request, but we have to loop around
- // and check again. Eventually we will get SR_lock without a pending
- // external suspend request and will be able to mark ourselves as
- // exiting.
- }
- // no more external suspends are allowed at this point
+ ThreadService::current_thread_exiting(this, is_daemon(threadObj()));
} else {
assert(!is_terminated() && !is_exiting(), "must not be exiting");
// before_exit() has already posted JVMTI THREAD_END events
@@ -1864,12 +1708,6 @@ void JavaThread::check_and_handle_async_exceptions(bool check_unsafe_error) {
void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) {
- // Check for pending external suspend.
- if (is_external_suspend_with_lock()) {
- frame_anchor()->make_walkable(this);
- java_suspend_self_with_safepoint_check();
- }
-
if (is_obj_deopt_suspend()) {
frame_anchor()->make_walkable(this);
wait_for_object_deoptimization();
@@ -1931,164 +1769,42 @@ void JavaThread::send_thread_stop(oop java_throwable) {
this->interrupt();
}
+
// External suspension mechanism.
//
-// Tell the VM to suspend a thread when ever it knows that it does not hold on
-// to any VM_locks and it is at a transition
-// Self-suspension will happen on the transition out of the vm.
-// Catch "this" coming in from JNIEnv pointers when the thread has been freed
-//
-// Guarantees on return:
-// + Target thread will not execute any new bytecode (that's why we need to
-// force a safepoint)
-// + Target thread will not enter any new monitors
+// Guarantees on return (for a valid target thread):
+// - Target thread will not execute any new bytecode.
+// - Target thread will not enter any new monitors.
//
-void JavaThread::java_suspend() {
+bool JavaThread::java_suspend() {
ThreadsListHandle tlh;
- if (!tlh.includes(this) || threadObj() == NULL || is_exiting()) {
- return;
- }
-
- { MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
- if (!is_external_suspend()) {
- // a racing resume has cancelled us; bail out now
- return;
- }
-
- // suspend is done
-
- // Warning: is_ext_suspend_completed() may temporarily drop the
- // SR_lock to allow the thread to reach a stable thread state if
- // it is currently in a transient thread state.
- if (is_ext_suspend_completed()) {
- return;
- }
- }
-
- if (Thread::current() == this) {
- // Safely self-suspend.
- // If we don't do this explicitly it will implicitly happen
- // before we transition back to Java, and on some other thread-state
- // transition paths, but not as we exit a JVM TI SuspendThread call.
- // As SuspendThread(current) must not return (until resumed) we must
- // self-suspend here.
- ThreadBlockInVM tbivm(this);
- java_suspend_self();
- } else {
- VM_ThreadSuspend vm_suspend;
- VMThread::execute(&vm_suspend);
+ if (!tlh.includes(this)) {
+ log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " not on ThreadsList, no suspension", p2i(this));
+ return false;
}
+ return this->handshake_state()->suspend();
}
-// Part II of external suspension.
-// A JavaThread self suspends when it detects a pending external suspend
-// request. This is usually on transitions. It is also done in places
-// where continuing to the next transition would surprise the caller,
-// e.g., monitor entry.
-//
-// Returns the number of times that the thread self-suspended.
-//
-// Note: DO NOT call java_suspend_self() when you just want to block current
-// thread. java_suspend_self() is the second stage of cooperative
-// suspension for external suspend requests and should only be used
-// to complete an external suspend request.
-//
-int JavaThread::java_suspend_self() {
- assert(thread_state() == _thread_blocked, "wrong state for java_suspend_self()");
- int ret = 0;
-
- // we are in the process of exiting so don't suspend
- if (is_exiting()) {
- clear_external_suspend();
- return ret;
- }
-
- assert(_anchor.walkable() || !has_last_Java_frame(),
- "must have walkable stack");
-
- MonitorLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
-
- assert(!this->is_ext_suspended(),
- "a thread trying to self-suspend should not already be suspended");
-
- if (this->is_suspend_equivalent()) {
- // If we are self-suspending as a result of the lifting of a
- // suspend equivalent condition, then the suspend_equivalent
- // flag is not cleared until we set the ext_suspended flag.
- this->clear_suspend_equivalent();
- }
-
- // A racing resume may have cancelled us before we grabbed SR_lock
- // above. Or another external suspend request could be waiting for us
- // by the time we return from SR_lock()->wait(). The thread
- // that requested the suspension may already be trying to walk our
- // stack and if we return now, we can change the stack out from under
- // it. This would be a "bad thing (TM)" and cause the stack walker
- // to crash. We stay self-suspended until there are no more pending
- // external suspend requests.
- while (is_external_suspend()) {
- ret++;
- this->set_ext_suspended();
-
- // _ext_suspended flag is cleared by java_resume()
- while (is_ext_suspended()) {
- ml.wait();
- }
+bool JavaThread::java_resume() {
+ ThreadsListHandle tlh;
+ if (!tlh.includes(this)) {
+ log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " not on ThreadsList, nothing to resume", p2i(this));
+ return false;
}
- return ret;
-}
-
-// Helper routine to set up the correct thread state before calling java_suspend_self.
-// This is called when regular thread-state transition helpers can't be used because
-// we can be in various states, in particular _thread_in_native_trans.
-// We have to set the thread state directly to _thread_blocked so that it will
-// be seen to be safepoint/handshake safe whilst suspended. This is also
-// necessary to allow a thread in is_ext_suspend_completed, that observed the
-// _thread_in_native_trans state, to proceed.
-// The problem with setting thread state directly is that a
-// safepoint could happen just after java_suspend_self() returns after being resumed,
-// and the VM thread will see the _thread_blocked state. So we must check for a safepoint
-// after restoring the state to make sure we won't leave while a safepoint is in progress.
-// However, not all initial-states are allowed when performing a safepoint check, as we
-// should never be blocking at a safepoint whilst in those states(*). Of these 'bad' states
-// only _thread_in_native is possible when executing this code (based on our two callers).
-// A thread that is _thread_in_native is already safepoint-safe and so it doesn't matter
-// whether the VMThread sees the _thread_blocked state, or the _thread_in_native state,
-// and so we don't need the explicit safepoint check.
-// (*) See switch statement in SafepointSynchronize::block() for thread states that are
-// allowed when performing a safepoint check.
-
-void JavaThread::java_suspend_self_with_safepoint_check() {
- assert(this == Thread::current(), "invariant");
- JavaThreadState state = thread_state();
-
- do {
- set_thread_state(_thread_blocked);
- java_suspend_self();
- // The current thread could have been suspended again. We have to check for
- // suspend after restoring the saved state. Without this the current thread
- // might return to _thread_in_Java and execute bytecodes for an arbitrary
- // long time.
- set_thread_state_fence(state);
-
- if (state != _thread_in_native) {
- SafepointMechanism::process_if_requested(this);
- }
- } while (is_external_suspend());
+ return this->handshake_state()->resume();
}
// Wait for another thread to perform object reallocation and relocking on behalf of
// this thread.
-// This method is very similar to JavaThread::java_suspend_self_with_safepoint_check()
-// and has the same callers. It also performs a raw thread state transition to
-// _thread_blocked and back again to the original state before returning. The current
-// thread is required to change to _thread_blocked in order to be seen to be
-// safepoint/handshake safe whilst suspended and only after becoming handshake safe,
-// the other thread can complete the handshake used to synchronize with this thread
-// and then perform the reallocation and relocking. We cannot use the thread state
-// transition helpers because we arrive here in various states and also because the
-// helpers indirectly call this method. After leaving _thread_blocked we have to
-// check for safepoint/handshake, except if _thread_in_native. The thread is safe
+// Raw thread state transition to _thread_blocked and back again to the original
+// state before returning are performed. The current thread is required to
+// change to _thread_blocked in order to be seen to be safepoint/handshake safe
+// whilst suspended and only after becoming handshake safe, the other thread can
+// complete the handshake used to synchronize with this thread and then perform
+// the reallocation and relocking. We cannot use the thread state transition
+// helpers because we arrive here in various states and also because the helpers
+// indirectly call this method. After leaving _thread_blocked we have to check
+// for safepoint/handshake, except if _thread_in_native. The thread is safe
// without blocking then. Allowed states are enumerated in
// SafepointSynchronize::block(). See also EscapeBarrier::sync_and_suspend_*()
@@ -2100,10 +1816,6 @@ void JavaThread::wait_for_object_deoptimization() {
bool spin_wait = os::is_MP();
do {
set_thread_state(_thread_blocked);
- // Check if _external_suspend was set in the previous loop iteration.
- if (is_external_suspend()) {
- java_suspend_self();
- }
// Wait for object deoptimization if requested.
if (spin_wait) {
// A single deoptimization is typically very short. Microbenchmarks
@@ -2131,7 +1843,7 @@ void JavaThread::wait_for_object_deoptimization() {
}
// A handshake for obj. deoptimization suspend could have been processed so
// we must check after processing.
- } while (is_obj_deopt_suspend() || is_external_suspend());
+ } while (is_obj_deopt_suspend());
}
#ifdef ASSERT
@@ -2145,32 +1857,19 @@ void JavaThread::verify_not_published() {
}
#endif
-// Slow path when the native==>VM/Java barriers detect a safepoint is in
-// progress or when _suspend_flags is non-zero.
-// Current thread needs to self-suspend if there is a suspend request and/or
-// block if a safepoint is in progress.
-// Async exception ISN'T checked.
-// Note only the ThreadInVMfromNative transition can call this function
-// directly and when thread state is _thread_in_native_trans
-void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread) {
+// Slow path when the native==>Java barriers detect a safepoint/handshake is
+// pending, when _suspend_flags is non-zero or when we need to process a stack
+// watermark. Also check for pending async exceptions (except unsafe access error).
+// Note only the native==>Java barriers can call this function when thread state
+// is _thread_in_native_trans.
+void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) {
assert(thread->thread_state() == _thread_in_native_trans, "wrong state");
- assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition");
+ assert(!thread->has_last_Java_frame() || thread->frame_anchor()->walkable(), "Unwalkable stack in native->Java transition");
- SafepointMechanism::process_if_requested_with_exit_check(thread, false /* check asyncs */);
-}
-
-// Slow path when the native==>VM/Java barriers detect a safepoint is in
-// progress or when _suspend_flags is non-zero.
-// Current thread needs to self-suspend if there is a suspend request and/or
-// block if a safepoint is in progress.
-// Also check for pending async exception (not including unsafe access error).
-// Note only the native==>VM/Java barriers can call this function and when
-// thread state is _thread_in_native_trans.
-void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) {
// Enable WXWrite: called directly from interpreter native wrapper.
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread));
- check_safepoint_and_suspend_for_native_trans(thread);
+ SafepointMechanism::process_if_requested_with_exit_check(thread, false /* check asyncs */);
// After returning from native, it could be that the stack frames are not
// yet safe to use. We catch such situations in the subsequent stack watermark
@@ -2184,29 +1883,6 @@ void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) {
}
}
-// We need to guarantee the Threads_lock here, since resumes are not
-// allowed during safepoint synchronization
-// Can only resume from an external suspension
-void JavaThread::java_resume() {
- assert_locked_or_safepoint(Threads_lock);
-
- // Sanity check: thread is gone, has started exiting or the thread
- // was not externally suspended.
- ThreadsListHandle tlh;
- if (!tlh.includes(this) || is_exiting() || !is_external_suspend()) {
- return;
- }
-
- MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
-
- clear_external_suspend();
-
- if (is_ext_suspended()) {
- clear_ext_suspended();
- SR_lock()->notify_all();
- }
-}
-
#ifndef PRODUCT
// Deoptimization
// Function for testing deoptimization
@@ -2775,15 +2451,7 @@ bool JavaThread::sleep(jlong millis) {
{
ThreadBlockInVM tbivm(this);
OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */);
-
- this->set_suspend_equivalent();
- // cleared by handle_special_suspend_equivalent_condition() or
- // java_suspend_self() via check_and_wait_while_suspended()
-
slp->park(millis);
-
- // were we externally suspended while we were waiting?
- this->check_and_wait_while_suspended();
}
// Update elapsed time tracking
@@ -2990,26 +2658,23 @@ void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
// Phase 1 of the system initialization in the library, java.lang.System class initialization
call_initPhase1(CHECK);
- // get the Java runtime name, version, and vendor info after java.lang.System is initialized
+ // Get the Java runtime name, version, and vendor info after java.lang.System is initialized.
+ // Some values are actually configure-time constants but some can be set via the jlink tool and
+ // so must be read dynamically. We treat them all the same.
InstanceKlass* ik = SystemDictionary::find_instance_klass(vmSymbols::java_lang_VersionProps(),
Handle(), Handle());
+ {
+ ResourceMark rm(main_thread);
+ JDK_Version::set_java_version(get_java_version_info(ik, vmSymbols::java_version_name()));
- JDK_Version::set_java_version(get_java_version_info(ik, vmSymbols::java_version_name(),
- java_version, sizeof(java_version)));
-
- JDK_Version::set_runtime_name(get_java_version_info(ik, vmSymbols::java_runtime_name_name(),
- java_runtime_name, sizeof(java_runtime_name)));
+ JDK_Version::set_runtime_name(get_java_version_info(ik, vmSymbols::java_runtime_name_name()));
- JDK_Version::set_runtime_version(get_java_version_info(ik, vmSymbols::java_runtime_version_name(),
- java_runtime_version, sizeof(java_runtime_version)));
+ JDK_Version::set_runtime_version(get_java_version_info(ik, vmSymbols::java_runtime_version_name()));
- JDK_Version::set_runtime_vendor_version(get_java_version_info(ik, vmSymbols::java_runtime_vendor_version_name(),
- java_runtime_vendor_version,
- sizeof(java_runtime_vendor_version)));
+ JDK_Version::set_runtime_vendor_version(get_java_version_info(ik, vmSymbols::java_runtime_vendor_version_name()));
- JDK_Version::set_runtime_vendor_vm_bug_url(get_java_version_info(ik, vmSymbols::java_runtime_vendor_vm_bug_url_name(),
- java_runtime_vendor_vm_bug_url,
- sizeof(java_runtime_vendor_vm_bug_url)));
+ JDK_Version::set_runtime_vendor_vm_bug_url(get_java_version_info(ik, vmSymbols::java_runtime_vendor_vm_bug_url_name()));
+ }
// an instance of OutOfMemory exception has been allocated earlier
initialize_class(vmSymbols::java_lang_OutOfMemoryError(), CHECK);
@@ -3020,9 +2685,6 @@ void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK);
initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK);
initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK);
-
- // Eager box cache initialization only if AOT is on and any library is loaded.
- AOTLoader::initialize_box_caches(CHECK);
}
void Threads::initialize_jsr292_core_classes(TRAPS) {
@@ -3229,8 +2891,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
// We need this to update the java.vm.info property in case any flags used
- // to initially define it have been changed. This is needed for both CDS and
- // AOT, since UseSharedSpaces and UseAOT may be changed after java.vm.info
+ // to initially define it have been changed. This is needed for both CDS
+ // since UseSharedSpaces may be changed after java.vm.info
// is initially computed. See Abstract_VM_Version::vm_info_string().
// This update must happen before we initialize the java classes, but
// after any initialization logic that might modify the flags.
@@ -3707,11 +3369,11 @@ void Threads::destroy_vm() {
_vm_complete = false;
#endif
// Wait until we are the last non-daemon thread to execute
- { MonitorLocker nu(Threads_lock);
+ {
+ MonitorLocker nu(Threads_lock);
while (Threads::number_of_non_daemon_threads() > 1)
- // This wait should make safepoint checks, wait without a timeout,
- // and wait as a suspend-equivalent condition.
- nu.wait(0, Mutex::_as_suspend_equivalent_flag);
+ // This wait should make safepoint checks, wait without a timeout.
+ nu.wait(0);
}
EventShutdown e;
@@ -3880,7 +3542,7 @@ void Threads::remove(JavaThread* p, bool is_daemon) {
// the thread might mess around with locks after this point. This can cause it
// to do callbacks into the safepoint code. However, the safepoint code is not aware
// of this thread since it is removed from the queue.
- p->set_terminated_value();
+ p->set_terminated(JavaThread::_thread_terminated);
// Notify threads waiting in EscapeBarriers
EscapeBarrier::thread_removed(p);
diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp
index fab4729a3f1..84ebbe77f5a 100644
--- a/src/hotspot/share/runtime/thread.hpp
+++ b/src/hotspot/share/runtime/thread.hpp
@@ -214,80 +214,11 @@ class Thread: public ThreadShadow {
protected:
static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
- private:
-
- // ***************************************************************
- // Suspend and resume support
- // ***************************************************************
- //
- // VM suspend/resume no longer exists - it was once used for various
- // things including safepoints but was deprecated and finally removed
- // in Java 7. Because VM suspension was considered "internal" Java-level
- // suspension was considered "external", and this legacy naming scheme
- // remains.
- //
- // External suspend/resume requests come from JVM_SuspendThread,
- // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI
- // ResumeThread. External
- // suspend requests cause _external_suspend to be set and external
- // resume requests cause _external_suspend to be cleared.
- // External suspend requests do not nest on top of other external
- // suspend requests. The higher level APIs reject suspend requests
- // for already suspended threads.
- //
- // The external_suspend
- // flag is checked by has_special_runtime_exit_condition() and java thread
- // will self-suspend when handle_special_runtime_exit_condition() is
- // called. Most uses of the _thread_blocked state in JavaThreads are
- // considered the same as being externally suspended; if the blocking
- // condition lifts, the JavaThread will self-suspend. Other places
- // where VM checks for external_suspend include:
- // + mutex granting (do not enter monitors when thread is suspended)
- // + state transitions from _thread_in_native
- //
- // In general, java_suspend() does not wait for an external suspend
- // request to complete. When it returns, the only guarantee is that
- // the _external_suspend field is true.
- //
- // wait_for_ext_suspend_completion() is used to wait for an external
- // suspend request to complete. External suspend requests are usually
- // followed by some other interface call that requires the thread to
- // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into
- // the interface that requires quiescence, we give the JavaThread a
- // chance to self-suspend before we need it to be quiescent. This
- // improves overall suspend/query performance.
- //
- // _suspend_flags controls the behavior of java_ suspend/resume.
- // It must be set under the protection of SR_lock. Read from the flag is
- // OK without SR_lock as long as the value is only used as a hint.
- // (e.g., check _external_suspend first without lock and then recheck
- // inside SR_lock and finish the suspension)
- //
- // _suspend_flags is also overloaded for other "special conditions" so
- // that a single check indicates whether any special action is needed
- // eg. for async exceptions.
- // -------------------------------------------------------------------
- // Notes:
- // 1. The suspend/resume logic no longer uses ThreadState in OSThread
- // but we still update its value to keep other part of the system (mainly
- // JVMTI) happy. ThreadState is legacy code (see notes in
- // osThread.hpp).
- //
- // 2. It would be more natural if set_external_suspend() is private and
- // part of java_suspend(), but that probably would affect the suspend/query
- // performance. Need more investigation on this.
-
- // suspend/resume lock: used for self-suspend
- Monitor* _SR_lock;
- protected:
enum SuspendFlags {
// NOTE: avoid using the sign-bit as cc generates different test code
// when the sign-bit is used, and sometimes incorrectly - see CR 6398077
- _external_suspend = 0x20000000U, // thread is asked to self suspend
- _ext_suspended = 0x40000000U, // thread has self-suspended
-
_has_async_exception = 0x00000001U, // there is a pending async exception
_trace_flag = 0x00000004U, // call tracing backend
@@ -361,28 +292,11 @@ class Thread: public ThreadShadow {
}
#endif // ASSERT
- private:
-
- // Debug support for checking if code allows safepoints or not.
- // Safepoints in the VM can happen because of allocation, invoking a VM operation, or blocking on
- // mutex, or blocking on an object synchronizer (Java locking).
- // If _no_safepoint_count is non-zero, then an assertion failure will happen in any of
- // the above cases.
- //
- // The class NoSafepointVerifier is used to set this counter.
- //
- NOT_PRODUCT(int _no_safepoint_count;) // If 0, thread allow a safepoint to happen
-
private:
// Used by SkipGCALot class.
NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
friend class GCLocker;
- friend class NoSafepointVerifier;
- friend class PauseNoSafepointVerifier;
-
- protected:
- SafepointMechanism::ThreadData _poll_data;
private:
ThreadLocalAllocBuffer _tlab; // Thread-local eden
@@ -394,40 +308,8 @@ class Thread: public ThreadShadow {
JFR_ONLY(DEFINE_THREAD_LOCAL_FIELD_JFR;) // Thread-local data for jfr
- ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread
- // is waiting to lock
- bool _current_pending_monitor_is_from_java; // locking is from Java code
JvmtiRawMonitor* _current_pending_raw_monitor; // JvmtiRawMonitor this thread
// is waiting to lock
-
-
- // ObjectMonitor on which this thread called Object.wait()
- ObjectMonitor* _current_waiting_monitor;
-
-#ifdef ASSERT
- private:
- volatile uint64_t _visited_for_critical_count;
-
- public:
- void set_visited_for_critical_count(uint64_t safepoint_id) {
- assert(_visited_for_critical_count == 0, "Must be reset before set");
- assert((safepoint_id & 0x1) == 1, "Must be odd");
- _visited_for_critical_count = safepoint_id;
- }
- void reset_visited_for_critical_count(uint64_t safepoint_id) {
- assert(_visited_for_critical_count == safepoint_id, "Was not visited");
- _visited_for_critical_count = 0;
- }
- bool was_visited_for_critical_count(uint64_t safepoint_id) const {
- return _visited_for_critical_count == safepoint_id;
- }
-#endif
-
- public:
- enum {
- is_definitely_current_thread = true
- };
-
public:
// Constructor
Thread();
@@ -514,8 +396,6 @@ class Thread: public ThreadShadow {
os::set_native_thread_name(name);
}
- Monitor* SR_lock() const { return _SR_lock; }
-
bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
inline void set_suspend_flag(SuspendFlags f);
@@ -602,28 +482,6 @@ class Thread: public ThreadShadow {
bool is_obj_deopt_suspend() { return (_suspend_flags & _obj_deopt) != 0; }
- // For tracking the heavyweight monitor the thread is pending on.
- ObjectMonitor* current_pending_monitor() {
- return _current_pending_monitor;
- }
- void set_current_pending_monitor(ObjectMonitor* monitor) {
- _current_pending_monitor = monitor;
- }
- void set_current_pending_monitor_is_from_java(bool from_java) {
- _current_pending_monitor_is_from_java = from_java;
- }
- bool current_pending_monitor_is_from_java() {
- return _current_pending_monitor_is_from_java;
- }
-
- // For tracking the ObjectMonitor on which this thread called Object.wait()
- ObjectMonitor* current_waiting_monitor() {
- return _current_waiting_monitor;
- }
- void set_current_waiting_monitor(ObjectMonitor* monitor) {
- _current_waiting_monitor = monitor;
- }
-
// For tracking the Jvmti raw monitor the thread is pending on.
JvmtiRawMonitor* current_pending_raw_monitor() {
return _current_pending_raw_monitor;
@@ -775,11 +633,6 @@ class Thread: public ThreadShadow {
void set_current_resource_mark(ResourceMark* rm) { _current_resource_mark = rm; }
#endif // ASSERT
- // These functions check conditions on a JavaThread before possibly going to a safepoint,
- // including NoSafepointVerifier.
- void check_for_valid_safepoint_state() NOT_DEBUG_RETURN;
- void check_possible_safepoint() NOT_DEBUG_RETURN;
-
private:
volatile int _jvmti_env_iteration_count;
@@ -796,9 +649,6 @@ class Thread: public ThreadShadow {
static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base); }
static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size); }
- static ByteSize polling_word_offset() { return byte_offset_of(Thread, _poll_data) + byte_offset_of(SafepointMechanism::ThreadData, _polling_word);}
- static ByteSize polling_page_offset() { return byte_offset_of(Thread, _poll_data) + byte_offset_of(SafepointMechanism::ThreadData, _polling_page);}
-
static ByteSize tlab_start_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::start_offset(); }
static ByteSize tlab_end_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::end_offset(); }
static ByteSize tlab_top_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::top_offset(); }
@@ -809,13 +659,14 @@ class Thread: public ThreadShadow {
JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;)
public:
- volatile intptr_t _Stalled;
- volatile int _TypeTag;
- ParkEvent * _ParkEvent; // for Object monitors, JVMTI raw monitors,
+ ParkEvent * volatile _ParkEvent; // for Object monitors, JVMTI raw monitors,
// and ObjectSynchronizer::read_stable_mark
- int NativeSyncRecursion; // diagnostic
- volatile int _OnTrap; // Resume-at IP delta
+ // Termination indicator used by the signal handler.
+ // _ParkEvent is just a convenient field we can NULL out after setting the JavaThread termination state
+ // (which can't itself be read from the signal handler if a signal hits during the Thread destructor).
+ bool has_terminated() { return Atomic::load(&_ParkEvent) == NULL; };
+
jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG
jint _hashStateX; // thread-specific hashCode generator state
jint _hashStateY;
@@ -871,6 +722,7 @@ class JavaThread: public Thread {
friend class WhiteBox;
friend class VTBuffer;
friend class ThreadsSMRSupport; // to access _threadObj for exiting_threads_oops_do
+ friend class HandshakeState;
private:
bool _on_thread_list; // Is set when this JavaThread is added to the Threads list
OopHandle _threadObj; // The Java level thread object
@@ -889,14 +741,6 @@ class JavaThread: public Thread {
private: // restore original namespace restriction
#endif // ifdef ASSERT
-#ifndef PRODUCT
- public:
- enum {
- jump_ring_buffer_size = 16
- };
- private: // restore original namespace restriction
-#endif
-
JavaFrameAnchor _anchor; // Encapsulation of current java frame and it state
ThreadFunction _entry_point;
@@ -934,9 +778,36 @@ class JavaThread: public Thread {
// elided card-marks for performance along the fast-path.
MemRegion _deferred_card_mark;
- MonitorChunk* _monitor_chunks; // Contains the off stack monitors
- // allocated during deoptimization
- // and by JNI_MonitorEnter/Exit
+ ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread is waiting to lock
+ bool _current_pending_monitor_is_from_java; // locking is from Java code
+ ObjectMonitor* _current_waiting_monitor; // ObjectMonitor on which this thread called Object.wait()
+ public:
+ volatile intptr_t _Stalled;
+
+ // For tracking the heavyweight monitor the thread is pending on.
+ ObjectMonitor* current_pending_monitor() {
+ return _current_pending_monitor;
+ }
+ void set_current_pending_monitor(ObjectMonitor* monitor) {
+ _current_pending_monitor = monitor;
+ }
+ void set_current_pending_monitor_is_from_java(bool from_java) {
+ _current_pending_monitor_is_from_java = from_java;
+ }
+ bool current_pending_monitor_is_from_java() {
+ return _current_pending_monitor_is_from_java;
+ }
+ ObjectMonitor* current_waiting_monitor() {
+ return _current_waiting_monitor;
+ }
+ void set_current_waiting_monitor(ObjectMonitor* monitor) {
+ _current_waiting_monitor = monitor;
+ }
+
+ private:
+ MonitorChunk* _monitor_chunks; // Contains the off stack monitors
+ // allocated during deoptimization
+ // and by JNI_MonitorEnter/Exit
// Async. requests support
enum AsyncRequests {
@@ -948,14 +819,52 @@ class JavaThread: public Thread {
oop _pending_async_exception;
// Safepoint support
- public: // Expose _thread_state for SafeFetchInt()
+ public: // Expose _thread_state for SafeFetchInt()
volatile JavaThreadState _thread_state;
private:
- ThreadSafepointState* _safepoint_state; // Holds information about a thread during a safepoint
- address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened
- NOT_PRODUCT(bool _requires_cross_modify_fence;) // State used by VerifyCrossModifyFence
+ SafepointMechanism::ThreadData _poll_data;
+ ThreadSafepointState* _safepoint_state; // Holds information about a thread during a safepoint
+ address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened
+ NOT_PRODUCT(bool _requires_cross_modify_fence;) // State used by VerifyCrossModifyFence
+#ifdef ASSERT
+ // Debug support for checking if code allows safepoints or not.
+ // Safepoints in the VM can happen because of allocation, invoking a VM operation, or blocking on
+ // mutex, or blocking on an object synchronizer (Java locking).
+ // If _no_safepoint_count is non-zero, then an assertion failure will happen in any of
+ // the above cases. The class NoSafepointVerifier is used to set this counter.
+ int _no_safepoint_count; // If 0, thread allow a safepoint to happen
+
+ public:
+ void inc_no_safepoint_count() { _no_safepoint_count++; }
+ void dec_no_safepoint_count() { _no_safepoint_count--; }
+#endif // ASSERT
+ public:
+ // These functions check conditions before possibly going to a safepoint.
+ // including NoSafepointVerifier.
+ void check_for_valid_safepoint_state() NOT_DEBUG_RETURN;
+ void check_possible_safepoint() NOT_DEBUG_RETURN;
+
+#ifdef ASSERT
+ private:
+ volatile uint64_t _visited_for_critical_count;
+
+ public:
+ void set_visited_for_critical_count(uint64_t safepoint_id) {
+ assert(_visited_for_critical_count == 0, "Must be reset before set");
+ assert((safepoint_id & 0x1) == 1, "Must be odd");
+ _visited_for_critical_count = safepoint_id;
+ }
+ void reset_visited_for_critical_count(uint64_t safepoint_id) {
+ assert(_visited_for_critical_count == safepoint_id, "Was not visited");
+ _visited_for_critical_count = 0;
+ }
+ bool was_visited_for_critical_count(uint64_t safepoint_id) const {
+ return _visited_for_critical_count == safepoint_id;
+ }
+#endif // ASSERT
// JavaThread termination support
+ public:
enum TerminatedTypes {
_not_terminated = 0xDEAD - 2,
_thread_exiting, // JavaThread::exit() has been called for this thread
@@ -964,6 +873,7 @@ class JavaThread: public Thread {
// only VM_Exit can set _vm_exited
};
+ private:
// In general a JavaThread's _terminated field transitions as follows:
//
// _not_terminated => _thread_exiting => _thread_terminated
@@ -971,8 +881,7 @@ class JavaThread: public Thread {
// _vm_exited is a special value to cover the case of a JavaThread
// executing native code after the VM itself is terminated.
volatile TerminatedTypes _terminated;
- // suspend/resume support
- volatile bool _suspend_equivalent; // Suspend equivalent condition
+
jint _in_deopt_handler; // count of deoptimization
// handlers thread is in
volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access
@@ -1183,8 +1092,7 @@ class JavaThread: public Thread {
}
bool is_terminated() const;
void set_terminated(TerminatedTypes t);
- // special for Threads::remove() which is static:
- void set_terminated_value();
+
void block_if_vm_exited();
bool doing_unsafe_access() { return _doing_unsafe_access; }
@@ -1210,98 +1118,17 @@ class JavaThread: public Thread {
}
// Suspend/resume support for JavaThread
- private:
- inline void set_ext_suspended();
- inline void clear_ext_suspended();
+ bool java_suspend(); // higher-level suspension logic called by the public APIs
+ bool java_resume(); // higher-level resume logic called by the public APIs
+ bool is_suspended() { return _handshake.is_suspended(); }
- public:
- void java_suspend(); // higher-level suspension logic called by the public APIs
- void java_resume(); // higher-level resume logic called by the public APIs
- int java_suspend_self(); // low-level self-suspension mechanics
+ // Check for async exception in addition to safepoint.
+ static void check_special_condition_for_native_trans(JavaThread *thread);
// Synchronize with another thread that is deoptimizing objects of the
// current thread, i.e. reverts optimizations based on escape analysis.
void wait_for_object_deoptimization();
- private:
- // mid-level wrapper around java_suspend_self to set up correct state and
- // check for a pending safepoint at the end
- void java_suspend_self_with_safepoint_check();
-
- public:
- void check_and_wait_while_suspended() {
- assert(JavaThread::current() == this, "sanity check");
-
- bool do_self_suspend;
- do {
- // were we externally suspended while we were waiting?
- do_self_suspend = handle_special_suspend_equivalent_condition();
- if (do_self_suspend) {
- // don't surprise the thread that suspended us by returning
- java_suspend_self();
- set_suspend_equivalent();
- }
- } while (do_self_suspend);
- }
- static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread);
- // Check for async exception in addition to safepoint and suspend request.
- static void check_special_condition_for_native_trans(JavaThread *thread);
-
- bool is_ext_suspend_completed();
-
- inline void set_external_suspend();
- inline void clear_external_suspend();
-
- bool is_external_suspend() const {
- return (_suspend_flags & _external_suspend) != 0;
- }
- // Whenever a thread transitions from native to vm/java it must suspend
- // if external|deopt suspend is present.
- bool is_suspend_after_native() const {
- return (_suspend_flags & (_external_suspend | _obj_deopt JFR_ONLY(| _trace_flag))) != 0;
- }
-
- // external suspend request is completed
- bool is_ext_suspended() const {
- return (_suspend_flags & _ext_suspended) != 0;
- }
-
- bool is_external_suspend_with_lock() const {
- MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
- return is_external_suspend();
- }
-
- // Special method to handle a pending external suspend request
- // when a suspend equivalent condition lifts.
- bool handle_special_suspend_equivalent_condition() {
- assert(is_suspend_equivalent(),
- "should only be called in a suspend equivalence condition");
- MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
- bool ret = is_external_suspend();
- if (!ret) {
- // not about to self-suspend so clear suspend equivalence
- clear_suspend_equivalent();
- }
- // implied else:
- // We have a pending external suspend request so we leave the
- // suspend_equivalent flag set until java_suspend_self() sets
- // the ext_suspended flag and clears the suspend_equivalent
- // flag. This insures that wait_for_ext_suspend_completion()
- // will return consistent values.
- return ret;
- }
-
- // utility methods to see if we are doing some kind of suspension
- bool is_being_ext_suspended() const {
- MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
- return is_ext_suspended() || is_external_suspend();
- }
-
- bool is_suspend_equivalent() const { return _suspend_equivalent; }
-
- void set_suspend_equivalent() { _suspend_equivalent = true; }
- void clear_suspend_equivalent() { _suspend_equivalent = false; }
-
// Thread.stop support
void send_thread_stop(oop throwable);
AsyncRequests clear_special_runtime_exit_condition() {
@@ -1321,17 +1148,8 @@ class JavaThread: public Thread {
// Return true if JavaThread has an asynchronous condition or
// if external suspension is requested.
bool has_special_runtime_exit_condition() {
- // Because we don't use is_external_suspend_with_lock
- // it is possible that we won't see an asynchronous external suspend
- // request that has just gotten started, i.e., SR_lock grabbed but
- // _external_suspend field change either not made yet or not visible
- // yet. However, this is okay because the request is asynchronous and
- // we will see the new flag value the next time through. It's also
- // possible that the external suspend request is dropped after
- // we have checked is_external_suspend(), we will recheck its value
- // under SR_lock in java_suspend_self().
return (_special_runtime_exit_condition != _no_async_condition) ||
- is_external_suspend() || is_trace_suspend() || is_obj_deopt_suspend();
+ (_suspend_flags & (_obj_deopt JFR_ONLY(| _trace_flag))) != 0;
}
void set_pending_unsafe_access_error() { _special_runtime_exit_condition = _async_unsafe_access_error; }
@@ -1443,6 +1261,8 @@ class JavaThread: public Thread {
static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2); }
static ByteSize return_buffered_value_offset() { return byte_offset_of(JavaThread, _return_buffered_value); }
static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state); }
+ static ByteSize polling_word_offset() { return byte_offset_of(JavaThread, _poll_data) + byte_offset_of(SafepointMechanism::ThreadData, _polling_word);}
+ static ByteSize polling_page_offset() { return byte_offset_of(JavaThread, _poll_data) + byte_offset_of(SafepointMechanism::ThreadData, _polling_page);}
static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc); }
static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread); }
#if INCLUDE_JVMCI
diff --git a/src/hotspot/share/runtime/thread.inline.hpp b/src/hotspot/share/runtime/thread.inline.hpp
index 01df7ae32e3..f84dfa21148 100644
--- a/src/hotspot/share/runtime/thread.inline.hpp
+++ b/src/hotspot/share/runtime/thread.inline.hpp
@@ -29,10 +29,13 @@
#include "gc/shared/tlab_globals.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
-#include "runtime/os.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
+#if defined(__APPLE__) && defined(AARCH64)
+#include "runtime/os.hpp"
+#endif
+
inline void Thread::set_suspend_flag(SuspendFlags f) {
uint32_t flags;
do {
@@ -116,20 +119,6 @@ inline WXMode Thread::enable_wx(WXMode new_state) {
}
#endif // __APPLE__ && AARCH64
-inline void JavaThread::set_ext_suspended() {
- set_suspend_flag (_ext_suspended);
-}
-inline void JavaThread::clear_ext_suspended() {
- clear_suspend_flag(_ext_suspended);
-}
-
-inline void JavaThread::set_external_suspend() {
- set_suspend_flag(_external_suspend);
-}
-inline void JavaThread::clear_external_suspend() {
- clear_suspend_flag(_external_suspend);
-}
-
inline void JavaThread::set_pending_async_exception(oop e) {
_pending_async_exception = e;
_special_runtime_exit_condition = _async_exception;
@@ -191,28 +180,20 @@ inline void JavaThread::set_done_attaching_via_jni() {
inline bool JavaThread::is_exiting() const {
// Use load-acquire so that setting of _terminated by
// JavaThread::exit() is seen more quickly.
- TerminatedTypes l_terminated = (TerminatedTypes)
- Atomic::load_acquire((volatile jint *) &_terminated);
+ TerminatedTypes l_terminated = Atomic::load_acquire(&_terminated);
return l_terminated == _thread_exiting || check_is_terminated(l_terminated);
}
inline bool JavaThread::is_terminated() const {
// Use load-acquire so that setting of _terminated by
// JavaThread::exit() is seen more quickly.
- TerminatedTypes l_terminated = (TerminatedTypes)
- Atomic::load_acquire((volatile jint *) &_terminated);
+ TerminatedTypes l_terminated = Atomic::load_acquire(&_terminated);
return check_is_terminated(l_terminated);
}
inline void JavaThread::set_terminated(TerminatedTypes t) {
// use release-store so the setting of _terminated is seen more quickly
- Atomic::release_store((volatile jint *) &_terminated, (jint) t);
-}
-
-// special for Threads::remove() which is static:
-inline void JavaThread::set_terminated_value() {
- // use release-store so the setting of _terminated is seen more quickly
- Atomic::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
+ Atomic::release_store(&_terminated, t);
}
// Allow tracking of class initialization monitor use
diff --git a/src/hotspot/share/runtime/threadHeapSampler.cpp b/src/hotspot/share/runtime/threadHeapSampler.cpp
index b65f3084982..3604b8d1709 100644
--- a/src/hotspot/share/runtime/threadHeapSampler.cpp
+++ b/src/hotspot/share/runtime/threadHeapSampler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Google and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -394,7 +394,7 @@ void ThreadHeapSampler::pick_next_geometric_sample() {
double log_val = (fast_log2(q) - 26);
double result =
(0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (get_sampling_interval())) + 1;
- assert(result > 0 && result < SIZE_MAX, "Result is not in an acceptable range.");
+ assert(result > 0 && result < static_cast(SIZE_MAX), "Result is not in an acceptable range.");
size_t interval = static_cast(result);
_bytes_until_sample = interval;
}
diff --git a/src/hotspot/share/runtime/threadWXSetters.inline.hpp b/src/hotspot/share/runtime/threadWXSetters.inline.hpp
index 02e200072a6..4465100c92a 100644
--- a/src/hotspot/share/runtime/threadWXSetters.inline.hpp
+++ b/src/hotspot/share/runtime/threadWXSetters.inline.hpp
@@ -26,9 +26,10 @@
#ifndef SHARE_RUNTIME_THREADWXSETTERS_INLINE_HPP
#define SHARE_RUNTIME_THREADWXSETTERS_INLINE_HPP
+#if defined(__APPLE__) && defined(AARCH64)
+
#include "runtime/thread.inline.hpp"
-#if defined(__APPLE__) && defined(AARCH64)
class ThreadWXEnable {
Thread* _thread;
WXMode _old_mode;
diff --git a/src/hotspot/share/runtime/timer.cpp b/src/hotspot/share/runtime/timer.cpp
index 94a410df6c8..af96c14e790 100644
--- a/src/hotspot/share/runtime/timer.cpp
+++ b/src/hotspot/share/runtime/timer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/timer.hpp"
#include "utilities/ostream.hpp"
diff --git a/src/hotspot/share/runtime/timerTrace.cpp b/src/hotspot/share/runtime/timerTrace.cpp
index 03320e45638..f38d8162e15 100644
--- a/src/hotspot/share/runtime/timerTrace.cpp
+++ b/src/hotspot/share/runtime/timerTrace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "runtime/timerTrace.hpp"
+#include "utilities/ostream.hpp"
TraceTime::TraceTime(const char* title,
bool doit) {
diff --git a/src/hotspot/share/runtime/timerTrace.hpp b/src/hotspot/share/runtime/timerTrace.hpp
index 9271cb1d89b..237afc9cd52 100644
--- a/src/hotspot/share/runtime/timerTrace.hpp
+++ b/src/hotspot/share/runtime/timerTrace.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#define SHARE_RUNTIME_TIMERTRACE_HPP
#include "logging/log.hpp"
+#include "runtime/timer.hpp"
#include "utilities/globalDefinitions.hpp"
// TraceTime is used for tracing the execution time of a block
diff --git a/src/hotspot/share/runtime/vframe_hp.cpp b/src/hotspot/share/runtime/vframe_hp.cpp
index e4ccd65ed4b..7e6396c4151 100644
--- a/src/hotspot/share/runtime/vframe_hp.cpp
+++ b/src/hotspot/share/runtime/vframe_hp.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -228,7 +228,7 @@ GrowableArray* compiledVFrame::monitors() const {
if (scope() == NULL) {
CompiledMethod* nm = code();
Method* method = nm->method();
- assert(method->is_native() || nm->is_aot(), "Expect a native method or precompiled method");
+ assert(method->is_native(), "Expect a native method");
if (!method->is_synchronized()) {
return new GrowableArray(0);
}
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index 4473ee8cca5..8f98b4619e2 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -23,12 +23,12 @@
*/
#include "precompiled.hpp"
+#include "cds/filemap.hpp"
#include "ci/ciField.hpp"
#include "ci/ciInstance.hpp"
#include "ci/ciMethodData.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciSymbol.hpp"
-#include "classfile/compactHashtable.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/dictionary.hpp"
#include "classfile/javaClasses.hpp"
@@ -58,7 +58,6 @@
#include "memory/referenceType.hpp"
#include "memory/universe.hpp"
#include "memory/virtualspace.hpp"
-#include "memory/filemap.hpp"
#include "oops/array.hpp"
#include "oops/arrayKlass.hpp"
#include "oops/arrayOop.hpp"
@@ -300,7 +299,6 @@ typedef HashtableEntry KlassHashtableEntry;
JVMTI_ONLY(nonstatic_field(MethodCounters, _number_of_breakpoints, u2)) \
nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \
nonstatic_field(MethodCounters, _backedge_counter, InvocationCounter) \
- AOT_ONLY(nonstatic_field(MethodCounters, _method, Method*)) \
nonstatic_field(Method, _constMethod, ConstMethod*) \
nonstatic_field(Method, _method_data, MethodData*) \
nonstatic_field(Method, _method_counters, MethodCounters*) \
@@ -736,15 +734,15 @@ typedef HashtableEntry KlassHashtableEntry;
nonstatic_field(Thread, _active_handles, JNIHandleBlock*) \
nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \
nonstatic_field(Thread, _allocated_bytes, jlong) \
- nonstatic_field(Thread, _current_pending_monitor, ObjectMonitor*) \
- nonstatic_field(Thread, _current_pending_monitor_is_from_java, bool) \
- nonstatic_field(Thread, _current_waiting_monitor, ObjectMonitor*) \
nonstatic_field(NamedThread, _name, char*) \
nonstatic_field(NamedThread, _processed_thread, Thread*) \
nonstatic_field(JavaThread, _threadObj, OopHandle) \
nonstatic_field(JavaThread, _anchor, JavaFrameAnchor) \
nonstatic_field(JavaThread, _vm_result, oop) \
nonstatic_field(JavaThread, _vm_result_2, Metadata*) \
+ nonstatic_field(JavaThread, _current_pending_monitor, ObjectMonitor*) \
+ nonstatic_field(JavaThread, _current_pending_monitor_is_from_java, bool) \
+ nonstatic_field(JavaThread, _current_waiting_monitor, ObjectMonitor*) \
nonstatic_field(JavaThread, _pending_async_exception, oop) \
volatile_nonstatic_field(JavaThread, _exception_oop, oop) \
volatile_nonstatic_field(JavaThread, _exception_pc, address) \
@@ -1598,6 +1596,7 @@ typedef HashtableEntry KlassHashtableEntry;
declare_c2_type(MemBarVolatileNode, MemBarNode) \
declare_c2_type(MemBarCPUOrderNode, MemBarNode) \
declare_c2_type(OnSpinWaitNode, MemBarNode) \
+ declare_c2_type(BlackholeNode, MemBarNode) \
declare_c2_type(InitializeNode, MemBarNode) \
declare_c2_type(ThreadLocalNode, Node) \
declare_c2_type(Opaque1Node, Node) \
@@ -2146,8 +2145,6 @@ typedef HashtableEntry KlassHashtableEntry;
/* Thread::SuspendFlags enum */ \
/*****************************/ \
\
- declare_constant(Thread::_external_suspend) \
- declare_constant(Thread::_ext_suspended) \
declare_constant(Thread::_has_async_exception) \
\
/*******************/ \
@@ -2306,7 +2303,6 @@ typedef HashtableEntry KlassHashtableEntry;
declare_constant(InstanceKlass::_misc_has_nonstatic_concrete_methods) \
declare_constant(InstanceKlass::_misc_declares_nonstatic_concrete_methods)\
declare_constant(InstanceKlass::_misc_has_been_redefined) \
- declare_constant(InstanceKlass::_misc_has_passed_fingerprint_check) \
declare_constant(InstanceKlass::_misc_is_scratch_class) \
declare_constant(InstanceKlass::_misc_is_shared_boot_class) \
declare_constant(InstanceKlass::_misc_is_shared_platform_class) \
@@ -2499,7 +2495,6 @@ typedef HashtableEntry KlassHashtableEntry;
declare_constant(CompLevel_limited_profile) \
declare_constant(CompLevel_full_profile) \
declare_constant(CompLevel_full_optimization) \
- declare_constant(CompLevel_aot) \
\
/***************/ \
/* OopMapValue */ \
@@ -2916,6 +2911,9 @@ VMIntConstantEntry VMStructs::localHotSpotVMIntConstants[] = {
GENERATE_C1_VM_INT_CONSTANT_ENTRY,
GENERATE_C2_VM_INT_CONSTANT_ENTRY,
GENERATE_C2_PREPROCESSOR_VM_INT_CONSTANT_ENTRY)
+#ifdef VM_INT_CPU_FEATURE_CONSTANTS
+ VM_INT_CPU_FEATURE_CONSTANTS
+#endif
GENERATE_VM_INT_CONSTANT_LAST_ENTRY()
};
@@ -2949,6 +2947,9 @@ VMLongConstantEntry VMStructs::localHotSpotVMLongConstants[] = {
GENERATE_C1_VM_LONG_CONSTANT_ENTRY,
GENERATE_C2_VM_LONG_CONSTANT_ENTRY,
GENERATE_C2_PREPROCESSOR_VM_LONG_CONSTANT_ENTRY)
+#ifdef VM_LONG_CPU_FEATURE_CONSTANTS
+ VM_LONG_CPU_FEATURE_CONSTANTS
+#endif
GENERATE_VM_LONG_CONSTANT_LAST_ENTRY()
};
diff --git a/src/hotspot/share/runtime/vmThread.cpp b/src/hotspot/share/runtime/vmThread.cpp
index d4285da0a95..6f5d8c1f8ed 100644
--- a/src/hotspot/share/runtime/vmThread.cpp
+++ b/src/hotspot/share/runtime/vmThread.cpp
@@ -521,7 +521,9 @@ void VMThread::execute(VM_Operation* op) {
SkipGCALot sgcalot(t);
// JavaThread or WatcherThread
- t->check_for_valid_safepoint_state();
+ if (t->is_Java_thread()) {
+ t->as_Java_thread()->check_for_valid_safepoint_state();
+ }
// New request from Java thread, evaluate prologue
if (!op->doit_prologue()) {
diff --git a/src/hotspot/share/services/attachListener.hpp b/src/hotspot/share/services/attachListener.hpp
index d80f2be9511..25fad127d0f 100644
--- a/src/hotspot/share/services/attachListener.hpp
+++ b/src/hotspot/share/services/attachListener.hpp
@@ -29,6 +29,7 @@
#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
+#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp
index 864a529e3a8..311205b3a57 100644
--- a/src/hotspot/share/services/heapDumper.cpp
+++ b/src/hotspot/share/services/heapDumper.cpp
@@ -46,7 +46,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/reflectionUtils.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
diff --git a/src/hotspot/share/services/heapDumperCompression.cpp b/src/hotspot/share/services/heapDumperCompression.cpp
index 99996f86352..eddeafa98bf 100644
--- a/src/hotspot/share/services/heapDumperCompression.cpp
+++ b/src/hotspot/share/services/heapDumperCompression.cpp
@@ -26,7 +26,7 @@
#include "jvm.h"
#include "runtime/arguments.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/os.inline.hpp"
+#include "runtime/os.hpp"
#include "runtime/thread.inline.hpp"
#include "services/heapDumperCompression.hpp"
diff --git a/src/hotspot/share/services/management.cpp b/src/hotspot/share/services/management.cpp
index 04e0619c347..cc27d9caa9f 100644
--- a/src/hotspot/share/services/management.cpp
+++ b/src/hotspot/share/services/management.cpp
@@ -309,7 +309,7 @@ static void initialize_ThreadInfo_constructor_arguments(JavaCallArguments* args,
int thread_status = static_cast(snapshot->thread_status());
assert((thread_status & JMM_THREAD_STATE_FLAG_MASK) == 0, "Flags already set in thread_status in Thread object");
- if (snapshot->is_ext_suspended()) {
+ if (snapshot->is_suspended()) {
thread_status |= JMM_THREAD_STATE_FLAG_SUSPENDED;
}
if (snapshot->is_in_native()) {
diff --git a/src/hotspot/share/services/management.hpp b/src/hotspot/share/services/management.hpp
index 2e4f74a13bc..96b50eb3e61 100644
--- a/src/hotspot/share/services/management.hpp
+++ b/src/hotspot/share/services/management.hpp
@@ -28,6 +28,7 @@
#include "jmm.h"
#include "memory/allocation.hpp"
#include "runtime/handles.hpp"
+#include "runtime/os.hpp"
#include "runtime/perfData.hpp"
#include "runtime/timer.hpp"
diff --git a/src/hotspot/share/services/memBaseline.cpp b/src/hotspot/share/services/memBaseline.cpp
index b390eaca4b8..bffb4667c01 100644
--- a/src/hotspot/share/services/memBaseline.cpp
+++ b/src/hotspot/share/services/memBaseline.cpp
@@ -95,7 +95,7 @@ class MallocAllocationSiteWalker : public MallocSiteWalker {
}
bool do_malloc_site(const MallocSite* site) {
- if (site->size() >= MemBaseline::SIZE_THRESHOLD) {
+ if (site->size() > 0) {
if (_malloc_sites.add(*site) != NULL) {
_count++;
return true;
@@ -103,7 +103,7 @@ class MallocAllocationSiteWalker : public MallocSiteWalker {
return false; // OOM
}
} else {
- // malloc site does not meet threshold, ignore and continue
+ // Ignore empty sites.
return true;
}
}
@@ -125,15 +125,17 @@ class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
VirtualMemoryAllocationWalker() : _count(0) { }
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
- if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) {
+ if (rgn->size() > 0) {
if (_virtual_memory_regions.add(*rgn) != NULL) {
_count ++;
return true;
} else {
return false;
}
+ } else {
+ // Ignore empty sites.
+ return true;
}
- return true;
}
LinkedList* virtual_memory_allocations() {
diff --git a/src/hotspot/share/services/memBaseline.hpp b/src/hotspot/share/services/memBaseline.hpp
index 5e40b5e80b0..96e1a74a587 100644
--- a/src/hotspot/share/services/memBaseline.hpp
+++ b/src/hotspot/share/services/memBaseline.hpp
@@ -43,9 +43,6 @@ typedef LinkedListIterator VirtualMemoryAllocation
*/
class MemBaseline {
public:
- enum BaselineThreshold {
- SIZE_THRESHOLD = K // Only allocation size over this threshold will be baselined.
- };
enum BaselineType {
Not_baselined,
diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp
index 4cb216e9fbf..cf81d535522 100644
--- a/src/hotspot/share/services/threadService.cpp
+++ b/src/hotspot/share/services/threadService.cpp
@@ -879,7 +879,7 @@ void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
_sleep_count = stat->sleep_count();
_thread_status = java_lang_Thread::get_thread_status(threadObj);
- _is_ext_suspended = thread->is_being_ext_suspended();
+ _is_suspended = thread->is_suspended();
_is_in_native = (thread->thread_state() == _thread_in_native);
Handle obj = ThreadService::get_current_contended_monitor(thread);
diff --git a/src/hotspot/share/services/threadService.hpp b/src/hotspot/share/services/threadService.hpp
index 6784fbc5c03..be16a9684bf 100644
--- a/src/hotspot/share/services/threadService.hpp
+++ b/src/hotspot/share/services/threadService.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -196,7 +196,7 @@ class ThreadSnapshot : public CHeapObj {
OopHandle _threadObj;
JavaThreadStatus _thread_status;
- bool _is_ext_suspended;
+ bool _is_suspended;
bool _is_in_native;
jlong _contended_enter_ticks;
@@ -229,7 +229,7 @@ class ThreadSnapshot : public CHeapObj