diff --git a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp index 5db59741a5864..0dd82162bd707 100644 --- a/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp +++ b/src/hotspot/os/linux/gc/x/xPhysicalMemoryBacking_linux.cpp @@ -623,7 +623,7 @@ bool XPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { } static int offset_to_node(size_t offset) { - const GrowableArray* mapping = os::Linux::numa_nindex_to_node(); + const GrowableArrayCHeap* mapping = os::Linux::numa_nindex_to_node(); const size_t nindex = (offset >> XGranuleSizeShift) % mapping->length(); return mapping->at((int)nindex); } diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp index ff891509365a0..ae4750e1030a4 100644 --- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp +++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp @@ -627,7 +627,7 @@ bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const { } static int offset_to_node(zoffset offset) { - const GrowableArray* mapping = os::Linux::numa_nindex_to_node(); + const GrowableArrayCHeap* mapping = os::Linux::numa_nindex_to_node(); const size_t nindex = (untype(offset) >> ZGranuleSizeShift) % mapping->length(); return mapping->at((int)nindex); } diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index e94e366acc741..9a16a5debc7f2 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -3145,10 +3145,10 @@ bool os::Linux::libnuma_init() { set_numa_interleave_bitmask(_numa_get_interleave_mask()); set_numa_membind_bitmask(_numa_get_membind()); // Create an index -> node mapping, since nodes are not always consecutive - _nindex_to_node = new (mtInternal) GrowableArray(0, mtInternal); + _nindex_to_node = new GrowableArrayCHeap(0); rebuild_nindex_to_node_map(); // Create a cpu -> node mapping - _cpu_to_node = new (mtInternal) GrowableArray(0, mtInternal); + _cpu_to_node = new GrowableArrayCHeap(0); rebuild_cpu_to_node_map(); return true; } @@ -3307,8 +3307,8 @@ int os::Linux::get_node_by_cpu(int cpu_id) { return -1; } -GrowableArray* os::Linux::_cpu_to_node; -GrowableArray* os::Linux::_nindex_to_node; +GrowableArrayCHeap* os::Linux::_cpu_to_node; +GrowableArrayCHeap* os::Linux::_nindex_to_node; os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu; os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus; os::Linux::numa_node_to_cpus_v2_func_t os::Linux::_numa_node_to_cpus_v2; diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp index 4b2ccf8e370db..865f51a6973e2 100644 --- a/src/hotspot/os/linux/os_linux.hpp +++ b/src/hotspot/os/linux/os_linux.hpp @@ -45,8 +45,8 @@ class os::Linux { static bool _supports_fast_thread_cpu_time; - static GrowableArray* _cpu_to_node; - static GrowableArray* _nindex_to_node; + static GrowableArrayCHeap* _cpu_to_node; + static GrowableArrayCHeap* _nindex_to_node; static julong available_memory_in_container(); @@ -71,8 +71,8 @@ class os::Linux { static void rebuild_cpu_to_node_map(); static void rebuild_nindex_to_node_map(); - static GrowableArray* cpu_to_node() { return _cpu_to_node; } - static GrowableArray* nindex_to_node() { return _nindex_to_node; } + static GrowableArrayCHeap* cpu_to_node() { return _cpu_to_node; } + static GrowableArrayCHeap* nindex_to_node() { return _nindex_to_node; } static void print_process_memory_info(outputStream* st); static void print_system_memory_info(outputStream* st); @@ -384,7 +384,7 @@ class os::Linux { } } - static const GrowableArray* numa_nindex_to_node() { + static const GrowableArrayCHeap* numa_nindex_to_node() { return _nindex_to_node; } diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp index d563e8f264953..d3c0b34453020 100644 --- a/src/hotspot/share/cds/archiveBuilder.cpp +++ b/src/hotspot/share/cds/archiveBuilder.cpp @@ -67,7 +67,7 @@ ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() { ArchiveBuilder::SourceObjList::SourceObjList() : _ptrmap(16 * K, mtClassShared) { _total_bytes = 0; - _objs = new (mtClassShared) GrowableArray(128 * K, mtClassShared); + _objs = new GrowableArrayCHeap(128 * K); } ArchiveBuilder::SourceObjList::~SourceObjList() { @@ -166,8 +166,8 @@ ArchiveBuilder::ArchiveBuilder() : _estimated_metaspaceobj_bytes(0), _estimated_hashtable_bytes(0) { - _klasses = new (mtClassShared) GrowableArray(4 * K, mtClassShared); - _symbols = new (mtClassShared) GrowableArray(256 * K, mtClassShared); + _klasses = new GrowableArrayCHeap(4 * K); + _symbols = new GrowableArrayCHeap(256 * K); assert(_current == nullptr, "must be"); _current = this; diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp index 4f811a0c51265..28233c061cdfc 100644 --- a/src/hotspot/share/cds/archiveBuilder.hpp +++ b/src/hotspot/share/cds/archiveBuilder.hpp @@ -177,14 +177,14 @@ class ArchiveBuilder : public StackObj { class SourceObjList { uintx _total_bytes; - GrowableArray* _objs; // Source objects to be archived + GrowableArrayCHeap* _objs; // Source objects to be archived CHeapBitMap _ptrmap; // Marks the addresses of the pointer fields // in the source objects public: SourceObjList(); ~SourceObjList(); - GrowableArray* objs() const { return _objs; } + GrowableArrayCHeap* objs() const { return _objs; } void append(SourceObjInfo* src_info); void remember_embedded_pointer(SourceObjInfo* pointing_obj, MetaspaceClosure::Ref* ref); @@ -210,8 +210,8 @@ class ArchiveBuilder : public StackObj { SourceObjList _ro_src_objs; // objs to put in ro region ResizeableResourceHashtable _src_obj_table; ResizeableResourceHashtable _buffered_to_src_table; - GrowableArray* _klasses; - GrowableArray* _symbols; + GrowableArrayCHeap* _klasses; + GrowableArrayCHeap* _symbols; // statistics DumpAllocStats _alloc_stats; @@ -401,8 +401,8 @@ class ArchiveBuilder : public StackObj { } // All klasses and symbols that will be copied into the archive - GrowableArray* klasses() const { return _klasses; } - GrowableArray* symbols() const { return _symbols; } + GrowableArrayCHeap* klasses() const { return _klasses; } + GrowableArrayCHeap* symbols() const { return _symbols; } static bool is_active() { return (_current != nullptr); diff --git a/src/hotspot/share/cds/classListParser.cpp b/src/hotspot/share/cds/classListParser.cpp index e2099620a7761..cfbb56cf5c762 100644 --- a/src/hotspot/share/cds/classListParser.cpp +++ b/src/hotspot/share/cds/classListParser.cpp @@ -74,8 +74,8 @@ ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : _id2k } _line_no = 0; _token = _line; - _interfaces = new (mtClass) GrowableArray(10, mtClass); - _indy_items = new (mtClass) GrowableArray(9, mtClass); + _interfaces = new GrowableArrayCHeap(10); + _indy_items = new GrowableArrayCHeap(9); _parse_mode = parse_mode; // _instance should only be accessed by the thread that created _instance. diff --git a/src/hotspot/share/cds/classListParser.hpp b/src/hotspot/share/cds/classListParser.hpp index 74a2ff10515f8..c1089a1027885 100644 --- a/src/hotspot/share/cds/classListParser.hpp +++ b/src/hotspot/share/cds/classListParser.hpp @@ -107,10 +107,10 @@ class ClassListParser : public StackObj { int _line_len; // Original length of the input line. int _line_no; // Line number for current line being parsed const char* _class_name; - GrowableArray* _indy_items; // items related to invoke dynamic for archiving lambda proxy classes + GrowableArrayCHeap* _indy_items; // items related to invoke dynamic for archiving lambda proxy classes int _id; int _super; - GrowableArray* _interfaces; + GrowableArrayCHeap* _interfaces; bool _interfaces_specified; const char* _source; bool _lambda_form_line; diff --git a/src/hotspot/share/cds/dumpTimeClassInfo.cpp b/src/hotspot/share/cds/dumpTimeClassInfo.cpp index 6ee18c17a57da..d247c911f9807 100644 --- a/src/hotspot/share/cds/dumpTimeClassInfo.cpp +++ b/src/hotspot/share/cds/dumpTimeClassInfo.cpp @@ -51,12 +51,12 @@ size_t DumpTimeClassInfo::runtime_info_bytesize() const { void DumpTimeClassInfo::add_verification_constraint(InstanceKlass* k, Symbol* name, Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) { if (_verifier_constraints == nullptr) { - _verifier_constraints = new (mtClass) GrowableArray(4, mtClass); + _verifier_constraints = new GrowableArrayCHeap(4); } if (_verifier_constraint_flags == nullptr) { - _verifier_constraint_flags = new (mtClass) GrowableArray(4, mtClass); + _verifier_constraint_flags = new GrowableArrayCHeap(4); } - GrowableArray* vc_array = _verifier_constraints; + GrowableArrayCHeap* vc_array = _verifier_constraints; for (int i = 0; i < vc_array->length(); i++) { if (vc_array->at(i).equals(name, from_name)) { return; @@ -65,7 +65,7 @@ void DumpTimeClassInfo::add_verification_constraint(InstanceKlass* k, Symbol* na DTVerifierConstraint cons(name, from_name); vc_array->append(cons); - GrowableArray* vcflags_array = _verifier_constraint_flags; + GrowableArrayCHeap* vcflags_array = _verifier_constraint_flags; char c = 0; c |= from_field_is_protected ? SystemDictionaryShared::FROM_FIELD_IS_PROTECTED : 0; c |= from_is_array ? SystemDictionaryShared::FROM_IS_ARRAY : 0; @@ -96,7 +96,7 @@ void DumpTimeClassInfo::record_linking_constraint(Symbol* name, Handle loader1, assert(loader1 != loader2, "sanity"); LogTarget(Info, class, loader, constraints) log; if (_loader_constraints == nullptr) { - _loader_constraints = new (mtClass) GrowableArray(4, mtClass); + _loader_constraints = new GrowableArrayCHeap(4); } char lt1 = get_loader_type_by(loader1()); char lt2 = get_loader_type_by(loader2()); @@ -128,7 +128,7 @@ void DumpTimeClassInfo::record_linking_constraint(Symbol* name, Handle loader1, void DumpTimeClassInfo::add_enum_klass_static_field(int archived_heap_root_index) { if (_enum_klass_static_fields == nullptr) { - _enum_klass_static_fields = new (mtClass) GrowableArray(20, mtClass); + _enum_klass_static_fields = new GrowableArrayCHeap(20); } _enum_klass_static_fields->append(archived_heap_root_index); } diff --git a/src/hotspot/share/cds/dumpTimeClassInfo.hpp b/src/hotspot/share/cds/dumpTimeClassInfo.hpp index 5ba79e54c79b9..82b3dedd8df5c 100644 --- a/src/hotspot/share/cds/dumpTimeClassInfo.hpp +++ b/src/hotspot/share/cds/dumpTimeClassInfo.hpp @@ -126,10 +126,10 @@ class DumpTimeClassInfo: public CHeapObj { int _id; int _clsfile_size; int _clsfile_crc32; - GrowableArray* _verifier_constraints; - GrowableArray* _verifier_constraint_flags; - GrowableArray* _loader_constraints; - GrowableArray* _enum_klass_static_fields; + GrowableArrayCHeap* _verifier_constraints; + GrowableArrayCHeap* _verifier_constraint_flags; + GrowableArrayCHeap* _loader_constraints; + GrowableArrayCHeap* _enum_klass_static_fields; DumpTimeClassInfo() { _klass = nullptr; @@ -159,7 +159,7 @@ class DumpTimeClassInfo: public CHeapObj { private: template - static int array_length_or_zero(GrowableArray* array) { + static int array_length_or_zero(GrowableArrayCHeap* array) { if (array == nullptr) { return 0; } else { diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp index 9f265520686a2..91b89932da841 100644 --- a/src/hotspot/share/cds/dynamicArchive.cpp +++ b/src/hotspot/share/cds/dynamicArchive.cpp @@ -399,12 +399,12 @@ class VM_PopulateDynamicDumpSharedSpace: public VM_GC_Sync_Operation { // _array_klasses and _dynamic_archive_array_klasses only hold the array klasses // which have element klass in the static archive. -GrowableArray* DynamicArchive::_array_klasses = nullptr; +GrowableArrayCHeap* DynamicArchive::_array_klasses = nullptr; Array* DynamicArchive::_dynamic_archive_array_klasses = nullptr; void DynamicArchive::append_array_klass(ObjArrayKlass* ak) { if (_array_klasses == nullptr) { - _array_klasses = new (mtClassShared) GrowableArray(50, mtClassShared); + _array_klasses = new GrowableArrayCHeap(50); } _array_klasses->append(ak); } diff --git a/src/hotspot/share/cds/dynamicArchive.hpp b/src/hotspot/share/cds/dynamicArchive.hpp index 479e7daa153db..8d69df0247b7a 100644 --- a/src/hotspot/share/cds/dynamicArchive.hpp +++ b/src/hotspot/share/cds/dynamicArchive.hpp @@ -61,7 +61,7 @@ class DynamicArchiveHeader : public FileMapHeader { class DynamicArchive : AllStatic { private: - static GrowableArray* _array_klasses; + static GrowableArrayCHeap* _array_klasses; static Array* _dynamic_archive_array_klasses; public: static void check_for_dynamic_dump(); diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp index 3b50e01479718..2d44cd64640fd 100644 --- a/src/hotspot/share/cds/filemap.cpp +++ b/src/hotspot/share/cds/filemap.cpp @@ -542,7 +542,7 @@ void FileMapInfo::record_non_existent_class_path_entry(const char* path) { assert(CDSConfig::is_dumping_archive(), "sanity"); log_info(class, path)("non-existent Class-Path entry %s", path); if (_non_existent_class_paths == nullptr) { - _non_existent_class_paths = new (mtClass) GrowableArray(10, mtClass); + _non_existent_class_paths = new GrowableArrayCHeap(10); } _non_existent_class_paths->append(os::strdup(path)); } @@ -2246,7 +2246,7 @@ bool FileMapInfo::_heap_pointers_need_patching = false; SharedPathTable FileMapInfo::_shared_path_table; bool FileMapInfo::_validating_shared_path_table = false; bool FileMapInfo::_memory_mapping_failed = false; -GrowableArray* FileMapInfo::_non_existent_class_paths = nullptr; +GrowableArrayCHeap* FileMapInfo::_non_existent_class_paths = nullptr; // Open the shared archive file, read and validate the header // information (version, boot classpath, etc.). If initialization diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp index 3d2062093c6b7..7def087b2ee04 100644 --- a/src/hotspot/share/cds/filemap.hpp +++ b/src/hotspot/share/cds/filemap.hpp @@ -49,6 +49,9 @@ class ClassLoaderData; class ClassPathEntry; class outputStream; +template class GrowableArray; +template class GrowableArrayCHeap; + class SharedClassPathEntry : public MetaspaceObj { enum { modules_image_entry, @@ -342,7 +345,7 @@ class FileMapInfo : public CHeapObj { static FileMapInfo* _dynamic_archive_info; static bool _heap_pointers_need_patching; static bool _memory_mapping_failed; - static GrowableArray* _non_existent_class_paths; + static GrowableArrayCHeap* _non_existent_class_paths; public: FileMapHeader *header() const { return _header; } diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp index 0225977e874e5..02e16718d3e4b 100644 --- a/src/hotspot/share/cds/heapShared.cpp +++ b/src/hotspot/share/cds/heapShared.cpp @@ -390,7 +390,7 @@ void HeapShared::archive_java_mirrors() { } } - GrowableArray* klasses = ArchiveBuilder::current()->klasses(); + GrowableArrayCHeap* klasses = ArchiveBuilder::current()->klasses(); assert(klasses != nullptr, "sanity"); for (int i = 0; i < klasses->length(); i++) { Klass* orig_k = klasses->at(i); @@ -621,8 +621,7 @@ KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) { assert(CDSConfig::is_dumping_heap(), "dump time only"); if (_subgraph_entry_fields == nullptr) { - _subgraph_entry_fields = - new (mtClass) GrowableArray(10, mtClass); + _subgraph_entry_fields = new GrowableArrayCHeap(10); } _subgraph_entry_fields->append(static_field_offset); _subgraph_entry_fields->append(HeapShared::append_root(v)); @@ -635,8 +634,7 @@ void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) { Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k); if (_subgraph_object_klasses == nullptr) { - _subgraph_object_klasses = - new (mtClass) GrowableArray(50, mtClass); + _subgraph_object_klasses = new GrowableArrayCHeap(50); } assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class"); @@ -751,7 +749,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { } // populate the entry fields - GrowableArray* entry_fields = info->subgraph_entry_fields(); + GrowableArrayCHeap* entry_fields = info->subgraph_entry_fields(); if (entry_fields != nullptr) { int num_entry_fields = entry_fields->length(); assert(num_entry_fields % 2 == 0, "sanity"); @@ -763,7 +761,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { } // the Klasses of the objects in the sub-graphs - GrowableArray* subgraph_object_klasses = info->subgraph_object_klasses(); + GrowableArrayCHeap* subgraph_object_klasses = info->subgraph_object_klasses(); if (subgraph_object_klasses != nullptr) { int num_subgraphs_klasses = subgraph_object_klasses->length(); _subgraph_object_klasses = @@ -1341,7 +1339,7 @@ void HeapShared::verify_reachable_objects_from(oop obj) { // Make sure that these are only instances of the very few specific types // that we can handle. void HeapShared::check_default_subgraph_classes() { - GrowableArray* klasses = _default_subgraph_info->subgraph_object_klasses(); + GrowableArrayCHeap* klasses = _default_subgraph_info->subgraph_object_klasses(); int num = klasses->length(); for (int i = 0; i < num; i++) { Klass* subgraph_k = klasses->at(i); diff --git a/src/hotspot/share/cds/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp index c41339173cdbd..eb3c643cbac00 100644 --- a/src/hotspot/share/cds/heapShared.hpp +++ b/src/hotspot/share/cds/heapShared.hpp @@ -63,10 +63,10 @@ class KlassSubGraphInfo: public CHeapObj { Klass* _k; // A list of classes need to be loaded and initialized before the archived // object sub-graphs can be accessed at runtime. - GrowableArray* _subgraph_object_klasses; + GrowableArrayCHeap* _subgraph_object_klasses; // A list of _k's static fields as the entry points of archived sub-graphs. // For each entry field, it is a tuple of field_offset, field_value - GrowableArray* _subgraph_entry_fields; + GrowableArrayCHeap* _subgraph_entry_fields; // Does this KlassSubGraphInfo belong to the archived full module graph bool _is_full_module_graph; @@ -94,10 +94,10 @@ class KlassSubGraphInfo: public CHeapObj { }; Klass* klass() { return _k; } - GrowableArray* subgraph_object_klasses() { + GrowableArrayCHeap* subgraph_object_klasses() { return _subgraph_object_klasses; } - GrowableArray* subgraph_entry_fields() { + GrowableArrayCHeap* subgraph_entry_fields() { return _subgraph_entry_fields; } void add_subgraph_entry_field(int static_field_offset, oop v); diff --git a/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp b/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp index b9766066484c6..18781289851c8 100644 --- a/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp +++ b/src/hotspot/share/cds/lambdaProxyClassDictionary.hpp @@ -112,14 +112,14 @@ class LambdaProxyClassKey { class DumpTimeLambdaProxyClassInfo { public: - GrowableArray* _proxy_klasses; + GrowableArrayCHeap* _proxy_klasses; DumpTimeLambdaProxyClassInfo() : _proxy_klasses(nullptr) {} DumpTimeLambdaProxyClassInfo& operator=(const DumpTimeLambdaProxyClassInfo&) = delete; ~DumpTimeLambdaProxyClassInfo(); void add_proxy_klass(InstanceKlass* proxy_klass) { if (_proxy_klasses == nullptr) { - _proxy_klasses = new (mtClassShared) GrowableArray(5, mtClassShared); + _proxy_klasses = new GrowableArrayCHeap(5); } assert(_proxy_klasses != nullptr, "sanity"); _proxy_klasses->append(proxy_klass); diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp index fedbc5841b682..b4fd718cfba8e 100644 --- a/src/hotspot/share/cds/metaspaceShared.cpp +++ b/src/hotspot/share/cds/metaspaceShared.cpp @@ -437,8 +437,8 @@ class VM_PopulateDumpSharedSpace : public VM_Operation { private: ArchiveHeapInfo _heap_info; - void dump_java_heap_objects(GrowableArray* klasses) NOT_CDS_JAVA_HEAP_RETURN; - void dump_shared_symbol_table(GrowableArray* symbols) { + void dump_java_heap_objects(GrowableArrayCHeap* klasses) NOT_CDS_JAVA_HEAP_RETURN; + void dump_shared_symbol_table(GrowableArrayView* symbols) { log_info(cds)("Dumping symbol table ..."); SymbolTable::write_to_archive(symbols); } @@ -837,7 +837,7 @@ bool MetaspaceShared::try_link_class(JavaThread* current, InstanceKlass* ik) { } #if INCLUDE_CDS_JAVA_HEAP -void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray* klasses) { +void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArrayCHeap* klasses) { if(!HeapShared::can_write()) { log_info(cds)( "Archived java heap is not supported as UseG1GC " diff --git a/src/hotspot/share/cds/metaspaceShared.hpp b/src/hotspot/share/cds/metaspaceShared.hpp index 737340788f16a..c796b94f4c5fe 100644 --- a/src/hotspot/share/cds/metaspaceShared.hpp +++ b/src/hotspot/share/cds/metaspaceShared.hpp @@ -35,8 +35,6 @@ class FileMapInfo; class outputStream; class SerializeClosure; -template class GrowableArray; - enum MapArchiveResult { MAP_ARCHIVE_SUCCESS, MAP_ARCHIVE_MMAP_FAILURE, diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp index 43aa82b67f85f..cb72a63176630 100644 --- a/src/hotspot/share/classfile/classLoader.cpp +++ b/src/hotspot/share/classfile/classLoader.cpp @@ -120,8 +120,8 @@ PerfCounter* ClassLoader::_perf_app_classfile_bytes_read = nullptr; PerfCounter* ClassLoader::_perf_sys_classfile_bytes_read = nullptr; PerfCounter* ClassLoader::_unsafe_defineClassCallCounter = nullptr; -GrowableArray* ClassLoader::_patch_mod_entries = nullptr; -GrowableArray* ClassLoader::_exploded_entries = nullptr; +GrowableArrayCHeap* ClassLoader::_patch_mod_entries = nullptr; +GrowableArrayCHeap* ClassLoader::_exploded_entries = nullptr; ClassPathEntry* ClassLoader::_jrt_entry = nullptr; ClassPathEntry* volatile ClassLoader::_first_append_entry_list = nullptr; @@ -561,11 +561,11 @@ void ClassLoader::close_jrt_image() { // loaded is defined to a module that has been specified to --patch-module. void ClassLoader::setup_patch_mod_entries() { JavaThread* current = JavaThread::current(); - GrowableArray* patch_mod_args = Arguments::get_patch_mod_prefix(); + GrowableArrayCHeap* patch_mod_args = Arguments::get_patch_mod_prefix(); int num_of_entries = patch_mod_args->length(); // Set up the boot loader's _patch_mod_entries list - _patch_mod_entries = new (mtModule) GrowableArray(num_of_entries, mtModule); + _patch_mod_entries = new GrowableArrayCHeap(num_of_entries); for (int i = 0; i < num_of_entries; i++) { const char* module_name = (patch_mod_args->at(i))->module_name(); @@ -854,7 +854,7 @@ bool ClassLoader::update_class_path_entry_list(JavaThread* current, } } -static void print_module_entry_table(const GrowableArray* const module_list) { +static void print_module_entry_table(const GrowableArrayCHeap* const module_list) { ResourceMark rm; int num_of_entries = module_list->length(); for (int i = 0; i < num_of_entries; i++) { @@ -1010,7 +1010,7 @@ const char* ClassLoader::file_name_for_class_name(const char* class_name, } ClassPathEntry* find_first_module_cpe(ModuleEntry* mod_entry, - const GrowableArray* const module_list) { + const GrowableArrayCHeap* const module_list) { int num_of_entries = module_list->length(); const Symbol* class_module_name = mod_entry->name(); @@ -1030,7 +1030,7 @@ ClassPathEntry* find_first_module_cpe(ModuleEntry* mod_entry, // Search either the patch-module or exploded build entries for class. ClassFileStream* ClassLoader::search_module_entries(JavaThread* current, - const GrowableArray* const module_list, + const GrowableArrayCHeap* const module_list, const char* const class_name, const char* const file_name) { ClassFileStream* stream = nullptr; @@ -1496,8 +1496,7 @@ void ClassLoader::classLoader_init2(JavaThread* current) { // done before loading any classes, by the same thread that will // subsequently do the first class load. So, no lock is needed for this. assert(_exploded_entries == nullptr, "Should only get initialized once"); - _exploded_entries = new (mtModule) - GrowableArray(EXPLODED_ENTRY_SIZE, mtModule); + _exploded_entries = new GrowableArrayCHeap(EXPLODED_ENTRY_SIZE); add_to_exploded_build_list(current, vmSymbols::java_base()); } } diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp index 10373dbcf9f91..5e408580500a5 100644 --- a/src/hotspot/share/classfile/classLoader.hpp +++ b/src/hotspot/share/classfile/classLoader.hpp @@ -44,6 +44,7 @@ class JImageFile; class ClassFileStream; class PackageEntry; template class GrowableArray; +template class GrowableArrayCHeap; class ClassPathEntry : public CHeapObj { private: @@ -181,14 +182,14 @@ class ClassLoader: AllStatic { // to load a class. // 1. Contains the module/path pairs specified to --patch-module - static GrowableArray* _patch_mod_entries; + static GrowableArrayCHeap* _patch_mod_entries; // 2. the base piece // Contains the ClassPathEntry of the modular java runtime image. // If no java runtime image is present, this indicates a // build with exploded modules is being used instead. static ClassPathEntry* _jrt_entry; - static GrowableArray* _exploded_entries; + static GrowableArrayCHeap* _exploded_entries; enum { EXPLODED_ENTRY_SIZE = 80 }; // Initial number of exploded modules // 3. the boot loader's append path @@ -294,7 +295,7 @@ class ClassLoader: AllStatic { // Attempt load of individual class from either the patched or exploded modules build lists static ClassFileStream* search_module_entries(JavaThread* current, - const GrowableArray* const module_list, + const GrowableArrayCHeap* const module_list, const char* const class_name, const char* const file_name); diff --git a/src/hotspot/share/classfile/classLoaderData.cpp b/src/hotspot/share/classfile/classLoaderData.cpp index d383fff3eb13e..87da99968798a 100644 --- a/src/hotspot/share/classfile/classLoaderData.cpp +++ b/src/hotspot/share/classfile/classLoaderData.cpp @@ -862,7 +862,7 @@ void ClassLoaderData::add_to_deallocate_list(Metadata* m) { if (!m->is_shared()) { MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); if (_deallocate_list == nullptr) { - _deallocate_list = new (mtClass) GrowableArray(100, mtClass); + _deallocate_list = new GrowableArrayCHeap(100); } _deallocate_list->append_if_missing(m); ResourceMark rm; diff --git a/src/hotspot/share/classfile/classLoaderData.hpp b/src/hotspot/share/classfile/classLoaderData.hpp index c9d025aded141..d972c187581ed 100644 --- a/src/hotspot/share/classfile/classLoaderData.hpp +++ b/src/hotspot/share/classfile/classLoaderData.hpp @@ -150,7 +150,7 @@ class ClassLoaderData : public CHeapObj { // Metadata to be deallocated when it's safe at class unloading, when // this class loader isn't unloaded itself. - GrowableArray* _deallocate_list; + GrowableArrayCHeap* _deallocate_list; // Support for walking class loader data objects // diff --git a/src/hotspot/share/classfile/compactHashtable.cpp b/src/hotspot/share/classfile/compactHashtable.cpp index d4657e35a84eb..60ce1764e1749 100644 --- a/src/hotspot/share/classfile/compactHashtable.cpp +++ b/src/hotspot/share/classfile/compactHashtable.cpp @@ -51,9 +51,9 @@ CompactHashtableWriter::CompactHashtableWriter(int num_entries, assert(_num_buckets > 0, "no buckets"); _num_entries_written = 0; - _buckets = NEW_C_HEAP_ARRAY(GrowableArray*, _num_buckets, mtSymbol); + _buckets = NEW_C_HEAP_ARRAY(EntryBucket*, _num_buckets, mtSymbol); for (int i=0; i<_num_buckets; i++) { - _buckets[i] = new (mtSymbol) GrowableArray(0, mtSymbol); + _buckets[i] = new EntryBucket(0); } _stats = stats; @@ -66,11 +66,11 @@ CompactHashtableWriter::CompactHashtableWriter(int num_entries, CompactHashtableWriter::~CompactHashtableWriter() { for (int index = 0; index < _num_buckets; index++) { - GrowableArray* bucket = _buckets[index]; + EntryBucket* bucket = _buckets[index]; delete bucket; } - FREE_C_HEAP_ARRAY(GrowableArray*, _buckets); + FREE_C_HEAP_ARRAY(EntryBucket*, _buckets); } size_t CompactHashtableWriter::estimate_size(int num_entries) { @@ -96,7 +96,7 @@ void CompactHashtableWriter::add(unsigned int hash, u4 value) { void CompactHashtableWriter::allocate_table() { int entries_space = 0; for (int index = 0; index < _num_buckets; index++) { - GrowableArray* bucket = _buckets[index]; + EntryBucket* bucket = _buckets[index]; int bucket_size = bucket->length(); if (bucket_size == 1) { entries_space++; @@ -125,7 +125,7 @@ void CompactHashtableWriter::allocate_table() { void CompactHashtableWriter::dump_table(NumberSeq* summary) { u4 offset = 0; for (int index = 0; index < _num_buckets; index++) { - GrowableArray* bucket = _buckets[index]; + EntryBucket* bucket = _buckets[index]; int bucket_size = bucket->length(); if (bucket_size == 1) { // bucket with one entry is compacted and only has the symbol offset diff --git a/src/hotspot/share/classfile/compactHashtable.hpp b/src/hotspot/share/classfile/compactHashtable.hpp index e99369cc5c376..2557dbbc7b89c 100644 --- a/src/hotspot/share/classfile/compactHashtable.hpp +++ b/src/hotspot/share/classfile/compactHashtable.hpp @@ -111,7 +111,8 @@ class CompactHashtableWriter: public StackObj { int _num_empty_buckets; int _num_value_only_buckets; int _num_other_buckets; - GrowableArray** _buckets; + typedef GrowableArrayCHeap EntryBucket; + EntryBucket** _buckets; CompactHashtableStats* _stats; Array* _compact_buckets; Array* _compact_entries; diff --git a/src/hotspot/share/classfile/dictionary.cpp b/src/hotspot/share/classfile/dictionary.cpp index 01f324eb27ec8..445463234abcf 100644 --- a/src/hotspot/share/classfile/dictionary.cpp +++ b/src/hotspot/share/classfile/dictionary.cpp @@ -409,7 +409,7 @@ void Dictionary::validate_protection_domain(InstanceKlass* klass, // During class loading we may have cached a protection domain that has // since been unreferenced, so this entry should be cleared. -void Dictionary::clean_cached_protection_domains(GrowableArray* delete_list) { +void Dictionary::clean_cached_protection_domains(GrowableArrayCHeap* delete_list) { assert(Thread::current()->is_Java_thread(), "only called by JavaThread"); assert_lock_strong(SystemDictionary_lock); assert(!loader_data()->has_class_mirror_holder(), "cld should have a ClassLoader holder not a Class holder"); diff --git a/src/hotspot/share/classfile/dictionary.hpp b/src/hotspot/share/classfile/dictionary.hpp index 153356783aa2a..6aea6b5fa9145 100644 --- a/src/hotspot/share/classfile/dictionary.hpp +++ b/src/hotspot/share/classfile/dictionary.hpp @@ -73,7 +73,7 @@ class Dictionary : public CHeapObj { void all_entries_do(KlassClosure* closure); void classes_do(MetaspaceClosure* it); - void clean_cached_protection_domains(GrowableArray* delete_list); + void clean_cached_protection_domains(GrowableArrayCHeap* delete_list); // Protection domains InstanceKlass* find(Thread* current, Symbol* name, Handle protection_domain); diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp index d65c8b2b79ee6..56cab316c3846 100644 --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -795,8 +795,8 @@ int java_lang_Class::_classData_offset; int java_lang_Class::_classRedefinedCount_offset; bool java_lang_Class::_offsets_computed = false; -GrowableArray* java_lang_Class::_fixup_mirror_list = nullptr; -GrowableArray* java_lang_Class::_fixup_module_field_list = nullptr; +GrowableArrayCHeap* java_lang_Class::_fixup_mirror_list = nullptr; +GrowableArrayCHeap* java_lang_Class::_fixup_module_field_list = nullptr; #ifdef ASSERT inline static void assert_valid_static_string_field(fieldDescriptor* fd) { @@ -962,13 +962,8 @@ void java_lang_Class::set_mirror_module_field(JavaThread* current, Klass* k, Han // Statically allocate fixup lists because they always get created. void java_lang_Class::allocate_fixup_lists() { - GrowableArray* mirror_list = - new (mtClass) GrowableArray(40, mtClass); - set_fixup_mirror_list(mirror_list); - - GrowableArray* module_list = - new (mtModule) GrowableArray(500, mtModule); - set_fixup_module_field_list(module_list); + set_fixup_mirror_list(new GrowableArrayCHeap(40)); + set_fixup_module_field_list(new GrowableArrayCHeap(500)); } void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protection_domain, Handle classData, @@ -1778,8 +1773,8 @@ oop java_lang_Thread::async_get_stack_trace(oop java_thread, TRAPS) { const Handle _java_thread; int _depth; bool _retry_handshake; - GrowableArray* _methods; - GrowableArray* _bcis; + GrowableArrayCHeap* _methods; + GrowableArrayCHeap* _bcis; GetStackTraceClosure(Handle java_thread) : HandshakeClosure("GetStackTraceClosure"), _java_thread(java_thread), _depth(0), _retry_handshake(false), @@ -1826,8 +1821,8 @@ oop java_lang_Thread::async_get_stack_trace(oop java_thread, TRAPS) { // Pick minimum length that will cover most cases int init_length = 64; - _methods = new (mtInternal) GrowableArray(init_length, mtInternal); - _bcis = new (mtInternal) GrowableArray(init_length, mtInternal); + _methods = new GrowableArrayCHeap(init_length); + _bcis = new GrowableArrayCHeap(init_length); int total_count = 0; for (vframeStream vfst(thread, false, false, carrier); // we don't process frames as we don't care about oops diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp index 23409f8247065..ca68678ec7834 100644 --- a/src/hotspot/share/classfile/javaClasses.hpp +++ b/src/hotspot/share/classfile/javaClasses.hpp @@ -237,8 +237,8 @@ class java_lang_Class : AllStatic { static bool _offsets_computed; - static GrowableArray* _fixup_mirror_list; - static GrowableArray* _fixup_module_field_list; + static GrowableArrayCHeap* _fixup_mirror_list; + static GrowableArrayCHeap* _fixup_module_field_list; static void set_protection_domain(oop java_class, oop protection_domain); static void set_class_loader(oop java_class, oop class_loader); @@ -314,17 +314,17 @@ class java_lang_Class : AllStatic { static int static_oop_field_count(oop java_class); static void set_static_oop_field_count(oop java_class, int size); - static GrowableArray* fixup_mirror_list() { + static GrowableArrayCHeap* fixup_mirror_list() { return _fixup_mirror_list; } - static void set_fixup_mirror_list(GrowableArray* v) { + static void set_fixup_mirror_list(GrowableArrayCHeap* v) { _fixup_mirror_list = v; } - static GrowableArray* fixup_module_field_list() { + static GrowableArrayCHeap* fixup_module_field_list() { return _fixup_module_field_list; } - static void set_fixup_module_field_list(GrowableArray* v) { + static void set_fixup_module_field_list(GrowableArrayCHeap* v) { _fixup_module_field_list = v; } diff --git a/src/hotspot/share/classfile/loaderConstraints.cpp b/src/hotspot/share/classfile/loaderConstraints.cpp index 4206fc10d4f39..46d35c9fe396a 100644 --- a/src/hotspot/share/classfile/loaderConstraints.cpp +++ b/src/hotspot/share/classfile/loaderConstraints.cpp @@ -84,11 +84,11 @@ class LoaderConstraint : public CHeapObj { // Loader constraints enforce correct linking behavior. // Thus, it really operates on ClassLoaderData which represents linking domain, // not class loaders. - GrowableArray* _loaders; // initiating loaders + GrowableArrayCHeap* _loaders; // initiating loaders public: LoaderConstraint(InstanceKlass* klass, ClassLoaderData* loader1, ClassLoaderData* loader2) : _klass(klass) { - _loaders = new (mtClass) GrowableArray(10, mtClass); + _loaders = new GrowableArrayCHeap(10); add_loader_data(loader1); add_loader_data(loader2); } @@ -115,7 +115,7 @@ class LoaderConstraint : public CHeapObj { // For this class name, these are the set of LoaderConstraints for classes loaded with this name. class ConstraintSet { // copied into hashtable as value private: - GrowableArray* _constraints; // loader constraints for this class name. + GrowableArrayCHeap* _constraints; // loader constraints for this class name. public: ConstraintSet() : _constraints(nullptr) {} @@ -123,7 +123,7 @@ class ConstraintSet { // copied into hashtable as ConstraintSet& operator=(const ConstraintSet&) = delete; void initialize(LoaderConstraint* constraint) { - _constraints = new (mtClass) GrowableArray(5, mtClass); + _constraints = new GrowableArrayCHeap(5); _constraints->push(constraint); } diff --git a/src/hotspot/share/classfile/moduleEntry.cpp b/src/hotspot/share/classfile/moduleEntry.cpp index 24beecdcaf72f..a1156f23a27d7 100644 --- a/src/hotspot/share/classfile/moduleEntry.cpp +++ b/src/hotspot/share/classfile/moduleEntry.cpp @@ -166,7 +166,7 @@ void ModuleEntry::add_read(ModuleEntry* m) { } else { if (_reads == nullptr) { // Lazily create a module's reads list - _reads = new (mtModule) GrowableArray(MODULE_READS_SIZE, mtModule); + _reads = new GrowableArrayCHeap(MODULE_READS_SIZE); } // Determine, based on this newly established read edge to module m, @@ -430,7 +430,7 @@ ModuleEntry* ModuleEntry::get_archived_entry(ModuleEntry* orig_entry) { // This function is used to archive ModuleEntry::_reads and PackageEntry::_qualified_exports. // GrowableArray cannot be directly archived, as it needs to be expandable at runtime. // Write it out as an Array, and convert it back to GrowableArray at runtime. -Array* ModuleEntry::write_growable_array(GrowableArray* array) { +Array* ModuleEntry::write_growable_array(GrowableArrayCHeap* array) { Array* archived_array = nullptr; int length = (array == nullptr) ? 0 : array->length(); if (length > 0) { @@ -445,11 +445,11 @@ Array* ModuleEntry::write_growable_array(GrowableArray* ModuleEntry::restore_growable_array(Array* archived_array) { - GrowableArray* array = nullptr; +GrowableArrayCHeap* ModuleEntry::restore_growable_array(Array* archived_array) { + GrowableArrayCHeap* array = nullptr; int length = (archived_array == nullptr) ? 0 : archived_array->length(); if (length > 0) { - array = new (mtModule) GrowableArray(length, mtModule); + array = new GrowableArrayCHeap(length); for (int i = 0; i < length; i++) { ModuleEntry* archived_entry = archived_array->at(i); array->append(archived_entry); @@ -474,7 +474,8 @@ void ModuleEntry::init_as_archived_entry() { _name = ArchiveBuilder::get_buffered_symbol(_name); ArchivePtrMarker::mark_pointer((address*)&_name); } - _reads = (GrowableArray*)archived_reads; + // TODO investigate, this looks like a terrible hack! + _reads = (GrowableArrayCHeap*)archived_reads; if (_version != nullptr) { _version = ArchiveBuilder::get_buffered_symbol(_version); } @@ -687,7 +688,7 @@ void ModuleEntryTable::patch_javabase_entries(JavaThread* current, Handle module java_lang_Class::set_module(Universe::void_mirror(), module_handle()); // Do the fixups for classes that have already been created. - GrowableArray * list = java_lang_Class::fixup_module_field_list(); + GrowableArrayCHeap * list = java_lang_Class::fixup_module_field_list(); int list_length = list->length(); for (int i = 0; i < list_length; i++) { Klass* k = list->at(i); diff --git a/src/hotspot/share/classfile/moduleEntry.hpp b/src/hotspot/share/classfile/moduleEntry.hpp index 62a0ba2a0b739..024f894980477 100644 --- a/src/hotspot/share/classfile/moduleEntry.hpp +++ b/src/hotspot/share/classfile/moduleEntry.hpp @@ -68,7 +68,7 @@ class ModuleEntry : public CHeapObj { // for shared classes from this module Symbol* _name; // name of this module ClassLoaderData* _loader_data; - GrowableArray* _reads; // list of modules that are readable by this module + GrowableArrayCHeap* _reads; // list of modules that are readable by this module Symbol* _version; // module version number Symbol* _location; // module location CDS_ONLY(int _shared_path_index;) // >=0 if classes in this module are in CDS archive @@ -177,8 +177,8 @@ class ModuleEntry : public CHeapObj { void init_as_archived_entry(); static ModuleEntry* get_archived_entry(ModuleEntry* orig_entry); bool has_been_archived(); - static Array* write_growable_array(GrowableArray* array); - static GrowableArray* restore_growable_array(Array* archived_array); + static Array* write_growable_array(GrowableArrayCHeap* array); + static GrowableArrayCHeap* restore_growable_array(Array* archived_array); void load_from_archive(ClassLoaderData* loader_data); void restore_archived_oops(ClassLoaderData* loader_data); void clear_archived_oops(); diff --git a/src/hotspot/share/classfile/packageEntry.cpp b/src/hotspot/share/classfile/packageEntry.cpp index 1b315bc24be16..e8b55be212b5e 100644 --- a/src/hotspot/share/classfile/packageEntry.cpp +++ b/src/hotspot/share/classfile/packageEntry.cpp @@ -80,7 +80,7 @@ void PackageEntry::add_qexport(ModuleEntry* m) { if (!has_qual_exports_list()) { // Lazily create a package's qualified exports list. // Initial size is small, do not anticipate export lists to be large. - _qualified_exports = new (mtModule) GrowableArray(QUAL_EXP_SIZE, mtModule); + _qualified_exports = new GrowableArrayCHeap(QUAL_EXP_SIZE); } // Determine, based on this newly established export to module m, @@ -250,7 +250,8 @@ void PackageEntry::init_as_archived_entry() { _name = ArchiveBuilder::get_buffered_symbol(_name); _module = ModuleEntry::get_archived_entry(_module); - _qualified_exports = (GrowableArray*)archived_qualified_exports; + // TODO this looks like a terrible hack + _qualified_exports = (GrowableArrayCHeap*)archived_qualified_exports; _defined_by_cds_in_class_path = 0; JFR_ONLY(set_trace_id(0)); // re-init at runtime diff --git a/src/hotspot/share/classfile/packageEntry.hpp b/src/hotspot/share/classfile/packageEntry.hpp index 213f115b2d08f..4a1ae33821dd6 100644 --- a/src/hotspot/share/classfile/packageEntry.hpp +++ b/src/hotspot/share/classfile/packageEntry.hpp @@ -114,7 +114,7 @@ class PackageEntry : public CHeapObj { bool _must_walk_exports; // Contains list of modules this package is qualifiedly exported to. Access // to this list is protected by the Module_lock. - GrowableArray* _qualified_exports; + GrowableArrayCHeap* _qualified_exports; JFR_ONLY(DEFINE_TRACE_ID_FIELD;) // Initial size of a package entry's list of qualified exports. diff --git a/src/hotspot/share/classfile/protectionDomainCache.cpp b/src/hotspot/share/classfile/protectionDomainCache.cpp index 7d06a4e09bb5a..e6ad70dbaf32a 100644 --- a/src/hotspot/share/classfile/protectionDomainCache.cpp +++ b/src/hotspot/share/classfile/protectionDomainCache.cpp @@ -69,9 +69,9 @@ void ProtectionDomainCacheTable::trigger_cleanup() { } class CleanProtectionDomainEntries : public CLDClosure { - GrowableArray* _delete_list; + GrowableArrayCHeap* _delete_list; public: - CleanProtectionDomainEntries(GrowableArray* delete_list) : + CleanProtectionDomainEntries(GrowableArrayCHeap* delete_list) : _delete_list(delete_list) {} void do_cld(ClassLoaderData* data) { @@ -82,7 +82,7 @@ class CleanProtectionDomainEntries : public CLDClosure { } }; -static GrowableArray* _delete_list = nullptr; +static GrowableArrayCHeap* _delete_list = nullptr; class HandshakeForPD : public HandshakeClosure { public: @@ -120,8 +120,7 @@ void ProtectionDomainCacheTable::unlink() { // Create a list for holding deleted entries if (_delete_list == nullptr) { - _delete_list = new (mtClass) - GrowableArray(20, mtClass); + _delete_list = new GrowableArrayCHeap(20); } { diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp index 53c6bb06bec20..2b9d47e79a5a4 100644 --- a/src/hotspot/share/classfile/symbolTable.cpp +++ b/src/hotspot/share/classfile/symbolTable.cpp @@ -649,7 +649,7 @@ void SymbolTable::dump(outputStream* st, bool verbose) { } #if INCLUDE_CDS -void SymbolTable::copy_shared_symbol_table(GrowableArray* symbols, +void SymbolTable::copy_shared_symbol_table(const GrowableArrayView* symbols, CompactHashtableWriter* writer) { ArchiveBuilder* builder = ArchiveBuilder::current(); int len = symbols->length(); @@ -670,7 +670,7 @@ size_t SymbolTable::estimate_size_for_archive() { return CompactHashtableWriter::estimate_size(int(_items_count)); } -void SymbolTable::write_to_archive(GrowableArray* symbols) { +void SymbolTable::write_to_archive(const GrowableArrayView* symbols) { CompactHashtableWriter writer(int(_items_count), ArchiveBuilder::symbol_stats()); copy_shared_symbol_table(symbols, &writer); if (CDSConfig::is_dumping_static_archive()) { diff --git a/src/hotspot/share/classfile/symbolTable.hpp b/src/hotspot/share/classfile/symbolTable.hpp index 282dd574c683a..bdc1dac68f8bc 100644 --- a/src/hotspot/share/classfile/symbolTable.hpp +++ b/src/hotspot/share/classfile/symbolTable.hpp @@ -32,7 +32,7 @@ #include "utilities/tableStatistics.hpp" class JavaThread; -template class GrowableArray; +template class GrowableArrayView; // TempNewSymbol in symbolHandle.hpp is used with SymbolTable operations, // so include it here. @@ -163,11 +163,11 @@ class SymbolTable : public AllStatic { // Sharing static void shared_symbols_do(SymbolClosure *cl); // no safepoint iteration. private: - static void copy_shared_symbol_table(GrowableArray* symbols, + static void copy_shared_symbol_table(const GrowableArrayView* symbols, CompactHashtableWriter* ch_table); public: static size_t estimate_size_for_archive() NOT_CDS_RETURN_(0); - static void write_to_archive(GrowableArray* symbols) NOT_CDS_RETURN; + static void write_to_archive(const GrowableArrayView* symbols) NOT_CDS_RETURN; static void serialize_shared_table_header(SerializeClosure* soc, bool is_static_archive = true) NOT_CDS_RETURN; diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp index 9b10dab2ef77f..92bb2f8c2d2f4 100644 --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -173,10 +173,10 @@ volatile int CodeCache::_number_of_nmethods_with_dependencies = 0; ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr; // Initialize arrays of CodeHeap subsets -GrowableArray* CodeCache::_heaps = new(mtCode) GrowableArray (static_cast(CodeBlobType::All), mtCode); -GrowableArray* CodeCache::_compiled_heaps = new(mtCode) GrowableArray (static_cast(CodeBlobType::All), mtCode); -GrowableArray* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray (static_cast(CodeBlobType::All), mtCode); -GrowableArray* CodeCache::_allocable_heaps = new(mtCode) GrowableArray (static_cast(CodeBlobType::All), mtCode); +CodeCache::CodeHeapArray* CodeCache::_heaps = new CodeHeapArray(static_cast(CodeBlobType::All)); +CodeCache::CodeHeapArray* CodeCache::_compiled_heaps = new CodeHeapArray(static_cast(CodeBlobType::All)); +CodeCache::CodeHeapArray* CodeCache::_nmethod_heaps = new CodeHeapArray(static_cast(CodeBlobType::All)); +CodeCache::CodeHeapArray* CodeCache::_allocable_heaps = new CodeHeapArray(static_cast(CodeBlobType::All)); void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; @@ -1301,11 +1301,11 @@ CompiledMethod* CodeCache::find_compiled(void* start) { #if INCLUDE_JVMTI // RedefineClasses support for saving nmethods that are dependent on "old" methods. // We don't really expect this table to grow very large. If it does, it can become a hashtable. -static GrowableArray* old_compiled_method_table = nullptr; +static GrowableArrayCHeap* old_compiled_method_table = nullptr; static void add_to_old_table(CompiledMethod* c) { if (old_compiled_method_table == nullptr) { - old_compiled_method_table = new (mtCode) GrowableArray(100, mtCode); + old_compiled_method_table = new GrowableArrayCHeap(100); } old_compiled_method_table->push(c); } diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp index 5418a29e58fc8..9a77f05fa3b37 100644 --- a/src/hotspot/share/code/codeCache.hpp +++ b/src/hotspot/share/code/codeCache.hpp @@ -89,10 +89,11 @@ class CodeCache : AllStatic { friend class ShenandoahParallelCodeHeapIterator; private: // CodeHeaps of the cache - static GrowableArray* _heaps; - static GrowableArray* _compiled_heaps; - static GrowableArray* _nmethod_heaps; - static GrowableArray* _allocable_heaps; + typedef GrowableArrayCHeap CodeHeapArray; + static CodeHeapArray* _heaps; + static CodeHeapArray* _compiled_heaps; + static CodeHeapArray* _nmethod_heaps; + static CodeHeapArray* _allocable_heaps; static address _low_bound; // Lower bound of CodeHeap addresses static address _high_bound; // Upper bound of CodeHeap addresses @@ -143,9 +144,9 @@ class CodeCache : AllStatic { static int code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs); static void add_heap(CodeHeap* heap); - static const GrowableArray* heaps() { return _heaps; } - static const GrowableArray* compiled_heaps() { return _compiled_heaps; } - static const GrowableArray* nmethod_heaps() { return _nmethod_heaps; } + static const GrowableArrayCHeap* heaps() { return _heaps; } + static const GrowableArrayCHeap* compiled_heaps() { return _compiled_heaps; } + static const GrowableArrayCHeap* nmethod_heaps() { return _nmethod_heaps; } // Allocation/administration static CodeBlob* allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure = true, CodeBlobType orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob @@ -442,18 +443,18 @@ template class CodeBlobIterator : publi struct CompiledMethodFilter { static bool apply(CodeBlob* cb) { return cb->is_compiled(); } - static const GrowableArray* heaps() { return CodeCache::compiled_heaps(); } + static const GrowableArrayCHeap* heaps() { return CodeCache::compiled_heaps(); } }; struct NMethodFilter { static bool apply(CodeBlob* cb) { return cb->is_nmethod(); } - static const GrowableArray* heaps() { return CodeCache::nmethod_heaps(); } + static const GrowableArrayCHeap* heaps() { return CodeCache::nmethod_heaps(); } }; struct AllCodeBlobsFilter { static bool apply(CodeBlob* cb) { return true; } - static const GrowableArray* heaps() { return CodeCache::heaps(); } + static const GrowableArrayCHeap* heaps() { return CodeCache::heaps(); } }; typedef CodeBlobIterator CompiledMethodIterator; diff --git a/src/hotspot/share/compiler/compilerEvent.cpp b/src/hotspot/share/compiler/compilerEvent.cpp index 0e14f62a4680a..3e94d42a3fc24 100644 --- a/src/hotspot/share/compiler/compilerEvent.cpp +++ b/src/hotspot/share/compiler/compilerEvent.cpp @@ -57,7 +57,7 @@ class PhaseTypeGuard : public StackObj { Semaphore PhaseTypeGuard::_mutex_semaphore(1); // Table for mapping compiler phases names to int identifiers. -static GrowableArray* phase_names = nullptr; +static GrowableArrayCHeap* phase_names = nullptr; class CompilerPhaseTypeConstant : public JfrSerializer { public: @@ -90,7 +90,8 @@ int CompilerEvent::PhaseEvent::get_phase_id(const char* phase_name, bool may_exi { PhaseTypeGuard guard(sync); if (phase_names == nullptr) { - phase_names = new (mtInternal) GrowableArray(100, mtCompiler); + // TODO mtInternal or mtCompiler? + phase_names = new GrowableArrayCHeap(100); register_jfr_serializer = true; } else if (may_exist) { index = lookup_phase(phase_name); diff --git a/src/hotspot/share/compiler/disassembler.cpp b/src/hotspot/share/compiler/disassembler.cpp index 091f1a2410e31..0a49d641e355e 100644 --- a/src/hotspot/share/compiler/disassembler.cpp +++ b/src/hotspot/share/compiler/disassembler.cpp @@ -196,7 +196,7 @@ class decode_env { static SourceFileInfoTable* _src_table; static const char* _cached_src; - static GrowableArray* _cached_src_lines; + static GrowableArrayCHeap* _cached_src_lines; static SourceFileInfoTable& src_table() { if (_src_table == nullptr) { @@ -230,7 +230,7 @@ bool decode_env::_optionsParsed = false; decode_env::SourceFileInfoTable* decode_env::_src_table = nullptr; const char* decode_env::_cached_src = nullptr; -GrowableArray* decode_env::_cached_src_lines = nullptr; +GrowableArrayCHeap* decode_env::_cached_src_lines = nullptr; void decode_env::hook(const char* file, int line, address pc) { // For simplication, we never free from this table. It's really not @@ -265,7 +265,7 @@ void decode_env::print_hook_comments(address pc, bool newline) { } _cached_src_lines->clear(); } else { - _cached_src_lines = new (mtCode) GrowableArray(0, mtCode); + _cached_src_lines = new GrowableArrayCHeap(0); } if ((fp = os::fopen(file, "r")) == nullptr) { diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp index 9f903bc924f1e..8872a7105df3a 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp @@ -29,7 +29,7 @@ #include "utilities/bitMap.inline.hpp" #include "utilities/growableArray.hpp" -G1CollectionCandidateList::G1CollectionCandidateList() : _candidates(2, mtGC) { } +G1CollectionCandidateList::G1CollectionCandidateList() : _candidates(2) { } void G1CollectionCandidateList::set(G1CollectionSetCandidateInfo* candidate_infos, uint num_infos) { assert(_candidates.is_empty(), "must be"); @@ -58,7 +58,7 @@ void G1CollectionCandidateList::remove(G1CollectionCandidateRegionList* other) { // Create a list from scratch, copying over the elements from the candidate // list not in the other list. Finally deallocate and overwrite the old list. int new_length = _candidates.length() - other->length(); - GrowableArray new_list(new_length, mtGC); + GrowableArrayCHeap new_list(new_length); uint other_idx = 0; @@ -118,7 +118,7 @@ int G1CollectionCandidateList::compare(G1CollectionSetCandidateInfo* ci1, G1Coll } } -G1CollectionCandidateRegionList::G1CollectionCandidateRegionList() : _regions(2, mtGC) { } +G1CollectionCandidateRegionList::G1CollectionCandidateRegionList() : _regions(2) { } void G1CollectionCandidateRegionList::append(HeapRegion* r) { assert(!_regions.contains(r), "must be"); diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp index 4eb5523d0399e..eb9832a7c83cb 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp @@ -43,7 +43,7 @@ using G1CollectionCandidateRegionListIterator = GrowableArrayIterator _regions; + GrowableArrayCHeap _regions; public: G1CollectionCandidateRegionList(); @@ -99,7 +99,7 @@ class G1CollectionCandidateListIterator : public StackObj { class G1CollectionCandidateList : public CHeapObj { friend class G1CollectionCandidateListIterator; - GrowableArray _candidates; + GrowableArrayCHeap _candidates; public: G1CollectionCandidateList(); diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp index 2ed6ccd4735fb..317da19289712 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp @@ -87,7 +87,7 @@ void G1FullGCCompactTask::compact_region(HeapRegion* hr) { void G1FullGCCompactTask::work(uint worker_id) { Ticks start = Ticks::now(); - GrowableArray* compaction_queue = collector()->compaction_point(worker_id)->regions(); + GrowableArrayCHeap* compaction_queue = collector()->compaction_point(worker_id)->regions(); for (GrowableArrayIterator it = compaction_queue->begin(); it != compaction_queue->end(); ++it) { @@ -97,7 +97,7 @@ void G1FullGCCompactTask::work(uint worker_id) { void G1FullGCCompactTask::serial_compaction() { GCTraceTime(Debug, gc, phases) tm("Phase 4: Serial Compaction", collector()->scope()->timer()); - GrowableArray* compaction_queue = collector()->serial_compaction_point()->regions(); + GrowableArrayCHeap* compaction_queue = collector()->serial_compaction_point()->regions(); for (GrowableArrayIterator it = compaction_queue->begin(); it != compaction_queue->end(); ++it) { diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp index 7669771eb6c3c..f814de9db31c0 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp @@ -34,7 +34,7 @@ G1FullGCCompactionPoint::G1FullGCCompactionPoint(G1FullCollector* collector) : _collector(collector), _current_region(nullptr), _compaction_top(nullptr) { - _compaction_regions = new (mtGC) GrowableArray(32, mtGC); + _compaction_regions = new GrowableArrayCHeap(32); _compaction_region_iterator = _compaction_regions->begin(); } @@ -75,7 +75,7 @@ HeapRegion* G1FullGCCompactionPoint::next_region() { return next; } -GrowableArray* G1FullGCCompactionPoint::regions() { +GrowableArrayCHeap* G1FullGCCompactionPoint::regions() { return _compaction_regions; } diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp index ca76f7e6b9408..3dca08e9e1608 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp @@ -37,7 +37,7 @@ class G1FullGCCompactionPoint : public CHeapObj { G1FullCollector* _collector; HeapRegion* _current_region; HeapWord* _compaction_top; - GrowableArray* _compaction_regions; + GrowableArrayCHeap* _compaction_regions; GrowableArrayIterator _compaction_region_iterator; bool object_will_fit(size_t size); @@ -62,7 +62,7 @@ class G1FullGCCompactionPoint : public CHeapObj { void remove_at_or_above(uint bottom); HeapRegion* current_region(); - GrowableArray* regions(); + GrowableArrayCHeap* regions(); }; #endif // SHARE_GC_G1_G1FULLGCCOMPACTIONPOINT_HPP diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp index 9e22acc73ca33..768c845c5e9ef 100644 --- a/src/hotspot/share/gc/g1/g1Policy.cpp +++ b/src/hotspot/share/gc/g1/g1Policy.cpp @@ -507,7 +507,7 @@ uint G1Policy::calculate_desired_eden_length_before_mixed(double base_time_ms, } double G1Policy::predict_survivor_regions_evac_time() const { - const GrowableArray* survivor_regions = _g1h->survivor()->regions(); + const GrowableArrayCHeap* survivor_regions = _g1h->survivor()->regions(); double survivor_regions_evac_time = predict_young_region_other_time_ms(_g1h->survivor()->length()); for (GrowableArrayIterator it = survivor_regions->begin(); it != survivor_regions->end(); diff --git a/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp b/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp index dc821b135148f..8927aca17e13c 100644 --- a/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp +++ b/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp @@ -29,7 +29,7 @@ #include "utilities/debug.hpp" G1SurvivorRegions::G1SurvivorRegions() : - _regions(new (mtGC) GrowableArray(8, mtGC)), + _regions(new GrowableArrayCHeap(8)), _used_bytes(0), _regions_on_node() {} diff --git a/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp b/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp index 2648e71ea0385..3e1bc1f3f116e 100644 --- a/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp +++ b/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp @@ -28,15 +28,14 @@ #include "gc/g1/g1RegionsOnNodes.hpp" #include "runtime/globals.hpp" -template -class GrowableArray; +template class GrowableArrayCHeap; class HeapRegion; class G1SurvivorRegions { private: - GrowableArray* _regions; - volatile size_t _used_bytes; - G1RegionsOnNodes _regions_on_node; + GrowableArrayCHeap* _regions; + volatile size_t _used_bytes; + G1RegionsOnNodes _regions_on_node; public: G1SurvivorRegions(); @@ -50,7 +49,7 @@ class G1SurvivorRegions { uint length() const; uint regions_on_node(uint node_index) const; - const GrowableArray* regions() const { + const GrowableArrayCHeap* regions() const { return _regions; } diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp index 2a39ce7b4700d..1ef5b9d3f3e25 100644 --- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp @@ -39,7 +39,7 @@ #include "utilities/align.hpp" MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) { - _lgrp_spaces = new (mtGC) GrowableArray(0, mtGC); + _lgrp_spaces = new GrowableArrayCHeap(0); _page_size = os::vm_page_size(); _adaptation_cycles = 0; _samples_count = 0; diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp index 77ecb4da46671..151e89a9ddb8e 100644 --- a/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp +++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp @@ -120,7 +120,7 @@ class MutableNUMASpace : public MutableSpace { void accumulate_statistics(size_t page_size); }; - GrowableArray* _lgrp_spaces; + GrowableArrayCHeap* _lgrp_spaces; size_t _page_size; unsigned _adaptation_cycles, _samples_count; @@ -157,7 +157,7 @@ class MutableNUMASpace : public MutableSpace { int lgrp_space_index(int lgrp_id) const; public: - GrowableArray* lgrp_spaces() const { return _lgrp_spaces; } + GrowableArrayCHeap* lgrp_spaces() const { return _lgrp_spaces; } MutableNUMASpace(size_t alignment); virtual ~MutableNUMASpace(); // Space initialization. diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.cpp b/src/hotspot/share/gc/parallel/psCompactionManager.cpp index e3b35db4bff52..f2cfb9b98c166 100644 --- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp +++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp @@ -48,7 +48,7 @@ ParCompactionManager::RegionTaskQueueSet* ParCompactionManager::_region_task_q ObjectStartArray* ParCompactionManager::_start_array = nullptr; ParMarkBitMap* ParCompactionManager::_mark_bitmap = nullptr; -GrowableArray* ParCompactionManager::_shadow_region_array = nullptr; +GrowableArrayCHeap* ParCompactionManager::_shadow_region_array = nullptr; Monitor* ParCompactionManager::_shadow_region_monitor = nullptr; ParCompactionManager::ParCompactionManager() { @@ -60,7 +60,7 @@ ParCompactionManager::ParCompactionManager() { reset_bitmap_query_cache(); - _deferred_obj_array = new (mtGC) GrowableArray(10, mtGC); + _deferred_obj_array = new GrowableArrayCHeap(10); } void ParCompactionManager::initialize(ParMarkBitMap* mbm) { @@ -89,7 +89,7 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) { assert(ParallelScavengeHeap::heap()->workers().max_workers() != 0, "Not initialized?"); - _shadow_region_array = new (mtGC) GrowableArray(10, mtGC); + _shadow_region_array = new GrowableArrayCHeap(10); _shadow_region_monitor = new Monitor(Mutex::nosafepoint, "CompactionManager_lock"); } diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.hpp index 458d33af74a87..705da37b907d4 100644 --- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp +++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp @@ -75,13 +75,13 @@ class ParCompactionManager : public CHeapObj { // type of TaskQueue. RegionTaskQueue _region_stack; - GrowableArray* _deferred_obj_array; + GrowableArrayCHeap* _deferred_obj_array; static ParMarkBitMap* _mark_bitmap; // Contains currently free shadow regions. We use it in // a LIFO fashion for better data locality and utilization. - static GrowableArray* _shadow_region_array; + static GrowableArrayCHeap* _shadow_region_array; // Provides mutual exclusive access of _shadow_region_array. // See pop/push_shadow_region_mt_safe() below diff --git a/src/hotspot/share/gc/shared/gcTimer.cpp b/src/hotspot/share/gc/shared/gcTimer.cpp index e293cb335a89d..f379041024437 100644 --- a/src/hotspot/share/gc/shared/gcTimer.cpp +++ b/src/hotspot/share/gc/shared/gcTimer.cpp @@ -114,7 +114,7 @@ GCPhase::PhaseType TimePartitions::current_phase_type() const { } TimePartitions::TimePartitions() { - _phases = new (mtGC) GrowableArray(INITIAL_CAPACITY, mtGC); + _phases = new GrowableArrayCHeap(INITIAL_CAPACITY); clear(); } diff --git a/src/hotspot/share/gc/shared/gcTimer.hpp b/src/hotspot/share/gc/shared/gcTimer.hpp index c6a8b7ec08202..324b3e814d62c 100644 --- a/src/hotspot/share/gc/shared/gcTimer.hpp +++ b/src/hotspot/share/gc/shared/gcTimer.hpp @@ -33,7 +33,7 @@ class ConcurrentPhase; class GCPhase; class PausePhase; -template class GrowableArray; +template class GrowableArrayCHeap; class PhaseVisitor { public: @@ -99,7 +99,7 @@ class TimePartitions { static const int INITIAL_CAPACITY = 10; - GrowableArray* _phases; + GrowableArrayCHeap* _phases; PhasesStack _active_phases; Tickspan _sum_of_pauses; diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index 70439459d3523..b9d89d98484b4 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -1288,8 +1288,8 @@ void SignatureHandlerLibrary::initialize() { SignatureHandlerLibrary::buffer_size); _buffer = bb->code_begin(); - _fingerprints = new (mtCode) GrowableArray(32, mtCode); - _handlers = new (mtCode) GrowableArray
(32, mtCode); + _fingerprints = new GrowableArrayCHeap(32); + _handlers = new GrowableArrayCHeap(32); } address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) { @@ -1439,11 +1439,11 @@ void SignatureHandlerLibrary::add(uint64_t fingerprint, address handler) { } -BufferBlob* SignatureHandlerLibrary::_handler_blob = nullptr; -address SignatureHandlerLibrary::_handler = nullptr; -GrowableArray* SignatureHandlerLibrary::_fingerprints = nullptr; -GrowableArray
* SignatureHandlerLibrary::_handlers = nullptr; -address SignatureHandlerLibrary::_buffer = nullptr; +BufferBlob* SignatureHandlerLibrary::_handler_blob = nullptr; +address SignatureHandlerLibrary::_handler = nullptr; +GrowableArrayCHeap* SignatureHandlerLibrary::_fingerprints = nullptr; +GrowableArrayCHeap* SignatureHandlerLibrary::_handlers = nullptr; +address SignatureHandlerLibrary::_buffer = nullptr; JRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* current, Method* method)) diff --git a/src/hotspot/share/interpreter/interpreterRuntime.hpp b/src/hotspot/share/interpreter/interpreterRuntime.hpp index 297585d37e849..f580699d85d4a 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.hpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.hpp @@ -166,11 +166,11 @@ class SignatureHandlerLibrary: public AllStatic { enum { blob_size = 32*K }; // the size of a handler code blob. private: - static BufferBlob* _handler_blob; // the current buffer blob containing the generated handlers - static address _handler; // next available address within _handler_blob; - static GrowableArray* _fingerprints; // the fingerprint collection - static GrowableArray
* _handlers; // the corresponding handlers - static address _buffer; // the temporary code buffer + static BufferBlob* _handler_blob; // the current buffer blob containing the generated handlers + static address _handler; // next available address within _handler_blob; + static GrowableArrayCHeap* _fingerprints; // the fingerprint collection + static GrowableArrayCHeap* _handlers; // the corresponding handlers + static address _buffer; // the temporary code buffer static address set_handler_blob(); static void initialize(); diff --git a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp index bb91016e52223..a4dfb6026015f 100644 --- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp +++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp @@ -1663,7 +1663,7 @@ static void copy_traceid(const InstanceKlass* ik, const InstanceKlass* new_ik) { static const Klass* klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state) { assert(ik != nullptr, "invariant"); assert(state != nullptr, "invariant"); - const GrowableArray* const redef_klasses = state->get_classes_being_redefined(); + const GrowableArrayCHeap* const redef_klasses = state->get_classes_being_redefined(); if (redef_klasses == nullptr || redef_klasses->is_empty()) { return nullptr; } diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp index c08cc543f2f96..1d907d69edb2c 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp @@ -217,7 +217,7 @@ bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t li return nullptr == *current; } -static GrowableArray* _leak_context_edges = nullptr; +static GrowableArrayCHeap* _leak_context_edges = nullptr; EdgeStore::EdgeStore() : _edges(new EdgeHashTable(this)) {} @@ -284,7 +284,7 @@ static const int initial_size = 64; static int save(const StoredEdge* edge) { assert(edge != nullptr, "invariant"); if (_leak_context_edges == nullptr) { - _leak_context_edges = new (mtTracing) GrowableArray(initial_size, mtTracing); + _leak_context_edges = new GrowableArrayCHeap(initial_size); _leak_context_edges->append(nullptr); // next idx now at 1, for disambiguation in markword. } return _leak_context_edges->append(edge); diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp index 9f6679c93ebfc..7e6a2fbc55da2 100644 --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp @@ -51,11 +51,11 @@ const int initial_array_size = 64; template -static GrowableArray* c_heap_allocate_array(int size = initial_array_size) { - return new (mtTracing) GrowableArray(size, mtTracing); +static GrowableArrayCHeap* c_heap_allocate_array(int size = initial_array_size) { + return new GrowableArrayCHeap(size); } -static GrowableArray* unloaded_thread_id_set = nullptr; +static GrowableArrayCHeap* unloaded_thread_id_set = nullptr; class ThreadIdExclusiveAccess : public StackObj { private: diff --git a/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp b/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp index 9d57cddb44808..aaa1cc0c3a57a 100644 --- a/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp +++ b/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp @@ -42,7 +42,7 @@ struct InterfaceEntry { mutable bool written; }; -static GrowableArray* _interfaces = nullptr; +static GrowableArrayCHeap* _interfaces = nullptr; void JfrNetworkUtilization::destroy() { if (_interfaces != nullptr) { @@ -54,7 +54,7 @@ void JfrNetworkUtilization::destroy() { } } -static InterfaceEntry& new_entry(const NetworkInterface* iface, GrowableArray* interfaces) { +static InterfaceEntry& new_entry(const NetworkInterface* iface, GrowableArrayCHeap* interfaces) { assert(iface != nullptr, "invariant"); assert(interfaces != nullptr, "invariant"); @@ -75,9 +75,9 @@ static InterfaceEntry& new_entry(const NetworkInterface* iface, GrowableArrayat(_interfaces->append(entry)); } -static GrowableArray* get_interfaces() { +static GrowableArrayCHeap* get_interfaces() { if (_interfaces == nullptr) { - _interfaces = new (mtTracing) GrowableArray(10, mtTracing); + _interfaces = new GrowableArrayCHeap(10); } return _interfaces; } @@ -87,7 +87,7 @@ static InterfaceEntry& get_entry(const NetworkInterface* iface) { // in the same order every time. static int saved_index = -1; - GrowableArray* interfaces = get_interfaces(); + GrowableArrayCHeap* interfaces = get_interfaces(); assert(interfaces != nullptr, "invariant"); for (int i = 0; i < _interfaces->length(); ++i) { saved_index = (saved_index + 1) % _interfaces->length(); diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp index f16542d13ac39..00f90915ac708 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp @@ -263,7 +263,7 @@ void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group(JfrThreadGroupPointer } JfrThreadGroup::JfrThreadGroup() : - _list(new (mtTracing) GrowableArray(initial_array_size, mtTracing)) {} + _list(new GrowableArrayCHeap(initial_array_size)) {} JfrThreadGroup::~JfrThreadGroup() { if (_list != nullptr) { diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.hpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.hpp index 8226c6ebef293..c6b612016a0cb 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.hpp @@ -40,7 +40,7 @@ class JfrThreadGroup : public JfrCHeapObj { private: static JfrThreadGroup* _instance; class JfrThreadGroupEntry; - GrowableArray* _list; + GrowableArrayCHeap* _list; JfrThreadGroup(); JfrThreadGroupEntry* find_entry(const JfrThreadGroupPointers& ptrs) const; diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp index f07078eaf06de..2e0483247a866 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp @@ -127,7 +127,7 @@ void JfrTraceId::assign(const Klass* klass) { if (state == nullptr) { return; } - const GrowableArray* const redef_klasses = state->get_classes_being_redefined(); + const GrowableArrayCHeap* const redef_klasses = state->get_classes_being_redefined(); if (redef_klasses == nullptr || redef_klasses->is_empty()) { return; } diff --git a/src/hotspot/share/jfr/recorder/jfrRecorder.cpp b/src/hotspot/share/jfr/recorder/jfrRecorder.cpp index 40c3d7a8c4f4d..4368fd2344f6b 100644 --- a/src/hotspot/share/jfr/recorder/jfrRecorder.cpp +++ b/src/hotspot/share/jfr/recorder/jfrRecorder.cpp @@ -108,7 +108,7 @@ bool JfrRecorder::on_create_vm_1() { return JfrTime::initialize(); } -static GrowableArray* dcmd_recordings_array = nullptr; +static GrowableArrayCHeap* dcmd_recordings_array = nullptr; static void release_recordings() { if (dcmd_recordings_array != nullptr) { @@ -141,14 +141,14 @@ static bool parse_recording_options(const char* options, JfrStartFlightRecording } static bool validate_recording_options(TRAPS) { - const GrowableArray* options = JfrOptionSet::start_flight_recording_options(); + const GrowableArrayCHeap* options = JfrOptionSet::start_flight_recording_options(); if (options == nullptr) { return true; } const int length = options->length(); assert(length >= 1, "invariant"); assert(dcmd_recordings_array == nullptr, "invariant"); - dcmd_recordings_array = new (mtTracing) GrowableArray(length, mtTracing); + dcmd_recordings_array = new GrowableArrayCHeap(length); assert(dcmd_recordings_array != nullptr, "invariant"); for (int i = 0; i < length; ++i) { JfrStartFlightRecordingDCmd* const dcmd_recording = new (mtTracing) JfrStartFlightRecordingDCmd(tty, true); diff --git a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp index e0bbd8a6ddc28..706b8435a8068 100644 --- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp +++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp @@ -250,7 +250,7 @@ static int64_t file_size(fio_fd fd) { class RepositoryIterator : public StackObj { private: - GrowableArray* _file_names; + GrowableArrayCHeap* _file_names; int _path_buffer_file_name_offset; mutable int _iterator; const char* fully_qualified(const char* file_name) const; @@ -328,7 +328,7 @@ RepositoryIterator::RepositoryIterator(const char* repository_path) : if (_path_buffer_file_name_offset == -1) { return; } - _file_names = new (mtTracing) GrowableArray(10, mtTracing); + _file_names = new GrowableArrayCHeap(10); if (_file_names == nullptr) { log_error(jfr, system)("Unable to malloc memory during jfr emergency dump"); return; diff --git a/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp b/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp index 595fcc9c65daa..b77c40dcd7302 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp @@ -758,7 +758,7 @@ bool JfrOptionSet::parse_flight_recorder_option(const JavaVMOption** option, cha return false; } -static GrowableArray* start_flight_recording_options_array = nullptr; +static GrowableArrayCHeap* start_flight_recording_options_array = nullptr; bool JfrOptionSet::parse_start_flight_recording_option(const JavaVMOption** option, char* delimiter) { assert(option != nullptr, "invariant"); @@ -782,7 +782,7 @@ bool JfrOptionSet::parse_start_flight_recording_option(const JavaVMOption** opti const size_t value_length = strlen(value); if (start_flight_recording_options_array == nullptr) { - start_flight_recording_options_array = new (mtTracing) GrowableArray(8, mtTracing); + start_flight_recording_options_array = new GrowableArrayCHeap(8); } assert(start_flight_recording_options_array != nullptr, "invariant"); char* const startup_value = NEW_C_HEAP_ARRAY(char, value_length + 1, mtTracing); @@ -792,7 +792,7 @@ bool JfrOptionSet::parse_start_flight_recording_option(const JavaVMOption** opti return false; } -const GrowableArray* JfrOptionSet::start_flight_recording_options() { +const GrowableArrayCHeap* JfrOptionSet::start_flight_recording_options() { return start_flight_recording_options_array; } diff --git a/src/hotspot/share/jfr/recorder/service/jfrOptionSet.hpp b/src/hotspot/share/jfr/recorder/service/jfrOptionSet.hpp index 9ad810bc3cd9c..a35c5d2b36b06 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrOptionSet.hpp +++ b/src/hotspot/share/jfr/recorder/service/jfrOptionSet.hpp @@ -29,8 +29,7 @@ #include "memory/allStatic.hpp" #include "utilities/exceptions.hpp" -template -class GrowableArray; +template class GrowableArrayCHeap; // // Command-line options and defaults @@ -77,7 +76,7 @@ class JfrOptionSet : public AllStatic { static bool parse_flight_recorder_option(const JavaVMOption** option, char* delimiter); static bool parse_start_flight_recording_option(const JavaVMOption** option, char* delimiter); - static const GrowableArray* start_flight_recording_options(); + static const GrowableArrayCHeap* start_flight_recording_options(); static void release_start_flight_recording_options(); }; diff --git a/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp b/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp index cd47630228931..ebcf638e9eb4e 100644 --- a/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp +++ b/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp @@ -49,10 +49,11 @@ static oop new_java_util_arraylist(TRAPS) { static const int initial_array_size = 64; -template -static GrowableArray* c_heap_allocate_array(int size = initial_array_size) { - return new (mtTracing) GrowableArray(size, mtTracing); -} +// TODO remove? +//template +//static GrowableArrayCHeap* c_heap_allocate_array(int size = initial_array_size) { +// return new GrowableArrayCHeap(size); +//} static bool initialize(TRAPS) { static bool initialized = false; diff --git a/src/hotspot/share/jfr/support/jfrKlassUnloading.cpp b/src/hotspot/share/jfr/support/jfrKlassUnloading.cpp index 46d9cea90e90c..bb73cc8da04c1 100644 --- a/src/hotspot/share/jfr/support/jfrKlassUnloading.cpp +++ b/src/hotspot/share/jfr/support/jfrKlassUnloading.cpp @@ -35,43 +35,43 @@ static const int initial_array_size = 64; template -static GrowableArray* c_heap_allocate_array(int size = initial_array_size) { - return new (mtTracing) GrowableArray(size, mtTracing); +static GrowableArrayCHeap* c_heap_allocate_array(int size = initial_array_size) { + return new GrowableArrayCHeap(size); } // Track the set of unloaded klasses during a chunk / epoch. -static GrowableArray* _unload_set_epoch_0 = nullptr; -static GrowableArray* _unload_set_epoch_1 = nullptr; +static GrowableArrayCHeap* _unload_set_epoch_0 = nullptr; +static GrowableArrayCHeap* _unload_set_epoch_1 = nullptr; static s8 event_klass_unloaded_count = 0; -static GrowableArray* unload_set_epoch_0() { +static GrowableArrayCHeap* unload_set_epoch_0() { if (_unload_set_epoch_0 == nullptr) { _unload_set_epoch_0 = c_heap_allocate_array(initial_array_size); } return _unload_set_epoch_0; } -static GrowableArray* unload_set_epoch_1() { +static GrowableArrayCHeap* unload_set_epoch_1() { if (_unload_set_epoch_1 == nullptr) { _unload_set_epoch_1 = c_heap_allocate_array(initial_array_size); } return _unload_set_epoch_1; } -static GrowableArray* get_unload_set(u1 epoch) { +static GrowableArrayCHeap* get_unload_set(u1 epoch) { return epoch == 0 ? unload_set_epoch_0() : unload_set_epoch_1(); } -static GrowableArray* get_unload_set() { +static GrowableArrayCHeap* get_unload_set() { return get_unload_set(JfrTraceIdEpoch::current()); } -static GrowableArray* get_unload_set_previous_epoch() { +static GrowableArrayCHeap* get_unload_set_previous_epoch() { return get_unload_set(JfrTraceIdEpoch::previous()); } -static void sort_set(GrowableArray* set) { +static void sort_set(GrowableArrayCHeap* set) { assert(set != nullptr, "invariant"); assert(set->is_nonempty(), "invariant"); set->sort(sort_traceid); @@ -103,7 +103,7 @@ void JfrKlassUnloading::clear() { static bool add_to_unloaded_klass_set(traceid klass_id, bool current_epoch) { assert_locked_or_safepoint(ClassLoaderDataGraph_lock); - GrowableArray* const unload_set = current_epoch ? get_unload_set() : get_unload_set_previous_epoch(); + GrowableArrayCHeap* const unload_set = current_epoch ? get_unload_set() : get_unload_set_previous_epoch(); assert(unload_set != nullptr, "invariant"); assert(unload_set->find(klass_id) == -1, "invariant"); unload_set->append(klass_id); diff --git a/src/hotspot/share/jfr/utilities/jfrPredicate.hpp b/src/hotspot/share/jfr/utilities/jfrPredicate.hpp index 88d4b7b6ba2aa..8dd914f9d641f 100644 --- a/src/hotspot/share/jfr/utilities/jfrPredicate.hpp +++ b/src/hotspot/share/jfr/utilities/jfrPredicate.hpp @@ -34,7 +34,7 @@ template class JfrPredicate : AllStatic { public: - static bool test(GrowableArray* set, T value) { + static bool test(GrowableArrayView* set, T value) { assert(set != nullptr, "invariant"); bool found = false; set->template find_sorted(value, found); @@ -48,6 +48,16 @@ class JfrPredicate : AllStatic { template class JfrMutablePredicate : AllStatic { public: + static bool test(GrowableArrayCHeap* set, T value) { + assert(set != nullptr, "invariant"); + bool found = false; + const int location = set->template find_sorted(value, found); + if (!found) { + set->insert_before(location, value); + } + return found; + } + // TODO can we unify this somehow? But we need insert_before, and how do I do the WithAllocator? static bool test(GrowableArray* set, T value) { assert(set != nullptr, "invariant"); bool found = false; diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp index 5e9999f2733c8..6273d23f549f4 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp @@ -919,13 +919,6 @@ static bool is_referent_non_null(oop* handle) { return handle != nullptr && *handle != nullptr; } -// Swaps the elements in `array` at index `a` and index `b` -static void swap(GrowableArray* array, int a, int b) { - oop* tmp = array->at(a); - array->at_put(a, array->at(b)); - array->at_put(b, tmp); -} - int JVMCIRuntime::release_cleared_oop_handles() { // Despite this lock, it's possible for another thread // to clear a handle's referent concurrently (e.g., a thread @@ -949,7 +942,7 @@ int JVMCIRuntime::release_cleared_oop_handles() { if (is_referent_non_null(handle)) { if (i != next && !is_referent_non_null(_oop_handles.at(next))) { // Swap elements at index `next` and `i` - swap(&_oop_handles, next, i); + _oop_handles.at_swap(next, i); } next++; } @@ -967,7 +960,7 @@ int JVMCIRuntime::release_cleared_oop_handles() { if (handle != nullptr) { if (i != next && _oop_handles.at(next) == nullptr) { // Swap elements at index `next` and `i` - swap(&_oop_handles, next, i); + _oop_handles.at_swap(next, i); } next++; } @@ -1045,7 +1038,7 @@ JVMCIRuntime::JVMCIRuntime(JVMCIRuntime* next, int id, bool for_compile_broker) _id(id), _next(next), _metadata_handles(new MetadataHandles()), - _oop_handles(100, mtJVMCI), + _oop_handles(100), _num_attached_threads(0), _for_compile_broker(for_compile_broker) { diff --git a/src/hotspot/share/jvmci/jvmciRuntime.hpp b/src/hotspot/share/jvmci/jvmciRuntime.hpp index c12c18abd7836..fa1bca527d668 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.hpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.hpp @@ -205,7 +205,7 @@ class JVMCIRuntime: public CHeapObj { // List of oop handles allocated via make_oop_handle. This is to support // destroying remaining oop handles when the JavaVM associated // with this runtime is shutdown. - GrowableArray _oop_handles; + GrowableArrayCHeap _oop_handles; // Number of threads attached or about to be attached to this runtime. // Must only be mutated under JVMCI_lock to facilitate safely moving diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp index 44f1648921f6f..78210b201850e 100644 --- a/src/hotspot/share/memory/arena.cpp +++ b/src/hotspot/share/memory/arena.cpp @@ -357,3 +357,9 @@ bool Arena::contains( const void *ptr ) const { } return false; // Not in any Chunk, so not in Arena } + +#ifdef ASSERT +bool Arena_contains(const Arena* arena, const void* ptr) { + return arena->contains(ptr); +} +#endif // ASSERT diff --git a/src/hotspot/share/memory/arena.hpp b/src/hotspot/share/memory/arena.hpp index d5af068ffe1df..0c3518a009ab0 100644 --- a/src/hotspot/share/memory/arena.hpp +++ b/src/hotspot/share/memory/arena.hpp @@ -205,4 +205,8 @@ class Arena : public CHeapObjBase { #define NEW_ARENA_OBJ(arena, type) \ NEW_ARENA_ARRAY(arena, type, 1) +#ifdef ASSERT +bool Arena_contains(const Arena* arena, const void* ptr); +#endif // ASSERT + #endif // SHARE_MEMORY_ARENA_HPP diff --git a/src/hotspot/share/memory/heapInspection.cpp b/src/hotspot/share/memory/heapInspection.cpp index 262dc62d977de..f0529e75067f8 100644 --- a/src/hotspot/share/memory/heapInspection.cpp +++ b/src/hotspot/share/memory/heapInspection.cpp @@ -51,7 +51,7 @@ inline KlassInfoEntry::~KlassInfoEntry() { inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) { if (_subclasses == nullptr) { - _subclasses = new (mtServiceability) GrowableArray(4, mtServiceability); + _subclasses = new GrowableArrayCHeap(4); } _subclasses->append(cie); } @@ -279,7 +279,7 @@ int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) : _cit(cit) { - _elements = new (mtServiceability) GrowableArray(_histo_initial_size, mtServiceability); + _elements = new GrowableArrayCHeap(_histo_initial_size); } KlassInfoHisto::~KlassInfoHisto() { @@ -614,10 +614,11 @@ void HeapInspection::heap_inspection(outputStream* st, WorkerThreads* workers) { class FindInstanceClosure : public ObjectClosure { private: Klass* _klass; - GrowableArray* _result; + GrowableArrayCHeap* _result; public: - FindInstanceClosure(Klass* k, GrowableArray* result) : _klass(k), _result(result) {}; + FindInstanceClosure(Klass* k, GrowableArrayCHeap* result) : + _klass(k), _result(result) {}; void do_object(oop obj) { if (obj->is_a(_klass)) { @@ -630,7 +631,7 @@ class FindInstanceClosure : public ObjectClosure { } }; -void HeapInspection::find_instances_at_safepoint(Klass* k, GrowableArray* result) { +void HeapInspection::find_instances_at_safepoint(Klass* k, GrowableArrayCHeap* result) { assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); assert(Heap_lock->is_locked(), "should have the Heap_lock"); diff --git a/src/hotspot/share/memory/heapInspection.hpp b/src/hotspot/share/memory/heapInspection.hpp index 4282e99a199f7..a43565175ca5b 100644 --- a/src/hotspot/share/memory/heapInspection.hpp +++ b/src/hotspot/share/memory/heapInspection.hpp @@ -58,7 +58,7 @@ class KlassInfoEntry: public CHeapObj { size_t _instance_words; int64_t _index; bool _do_print; // True if we should print this class when printing the class hierarchy. - GrowableArray* _subclasses; + GrowableArrayCHeap* _subclasses; public: KlassInfoEntry(Klass* k, KlassInfoEntry* next) : @@ -75,7 +75,7 @@ class KlassInfoEntry: public CHeapObj { void set_words(size_t wds) { _instance_words = wds; } void set_index(int64_t index) { _index = index; } int64_t index() const { return _index; } - GrowableArray* subclasses() const { return _subclasses; } + GrowableArrayCHeap* subclasses() const { return _subclasses; } void add_subclass(KlassInfoEntry* cie); void set_do_print(bool do_print) { _do_print = do_print; } bool do_print() const { return _do_print; } @@ -147,8 +147,8 @@ class KlassInfoHisto : public StackObj { private: static const int _histo_initial_size = 1000; KlassInfoTable *_cit; - GrowableArray* _elements; - GrowableArray* elements() const { return _elements; } + GrowableArrayCHeap* _elements; + GrowableArrayCHeap* elements() const { return _elements; } static int sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2); void print_elements(outputStream* st) const; bool is_selected(const char *col_name); @@ -202,7 +202,7 @@ class HeapInspection : public StackObj { public: void heap_inspection(outputStream* st, WorkerThreads* workers) NOT_SERVICES_RETURN; uintx populate_table(KlassInfoTable* cit, BoolObjectClosure* filter, WorkerThreads* workers) NOT_SERVICES_RETURN_(0); - static void find_instances_at_safepoint(Klass* k, GrowableArray* result) NOT_SERVICES_RETURN; + static void find_instances_at_safepoint(Klass* k, GrowableArrayCHeap* result) NOT_SERVICES_RETURN; }; // Parallel heap inspection task. Parallel inspection can fail due to diff --git a/src/hotspot/share/memory/resourceArea.hpp b/src/hotspot/share/memory/resourceArea.hpp index ba294e33effbb..000bce9c65255 100644 --- a/src/hotspot/share/memory/resourceArea.hpp +++ b/src/hotspot/share/memory/resourceArea.hpp @@ -27,7 +27,7 @@ #include "memory/allocation.hpp" #include "memory/arena.hpp" -#include "runtime/javaThread.hpp" +#include "runtime/thread.hpp" // The resource area holds temporary data structures in the VM. // The actual allocation areas are thread local. Typical usage: diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index 25ec68cf8fec5..34f6b3ca67c33 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -503,7 +503,7 @@ void Universe::fixup_mirrors(TRAPS) { InstanceMirrorKlass::init_offset_of_static_fields(); } - GrowableArray * list = java_lang_Class::fixup_mirror_list(); + GrowableArrayCHeap* list = java_lang_Class::fixup_mirror_list(); int list_length = list->length(); for (int i = 0; i < list_length; i++) { Klass* k = list->at(i); diff --git a/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp b/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp index df7e6de6c3363..19421441059e0 100644 --- a/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp +++ b/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp @@ -61,11 +61,11 @@ class CodeBlobCollector : StackObj { private: - GrowableArray* _code_blobs; // collected blobs - int _pos; // iterator position + GrowableArrayCHeap* _code_blobs; // collected blobs + int _pos; // iterator position // used during a collection - static GrowableArray* _global_code_blobs; + static GrowableArrayCHeap* _global_code_blobs; static void do_blob(CodeBlob* cb); static void do_vtable_stub(VtableStub* vs); public: @@ -107,7 +107,7 @@ class CodeBlobCollector : StackObj { }; // used during collection -GrowableArray* CodeBlobCollector::_global_code_blobs; +GrowableArrayCHeap* CodeBlobCollector::_global_code_blobs; // called for each CodeBlob in the CodeCache @@ -173,7 +173,7 @@ void CodeBlobCollector::collect() { assert(_global_code_blobs == nullptr, "checking"); // create the global list - _global_code_blobs = new (mtServiceability) GrowableArray(50, mtServiceability); + _global_code_blobs = new GrowableArrayCHeap(50); // iterate over the stub code descriptors and put them in the list first. for (StubCodeDesc* desc = StubCodeDesc::first(); desc != nullptr; desc = StubCodeDesc::next(desc)) { diff --git a/src/hotspot/share/prims/jvmtiDeferredUpdates.cpp b/src/hotspot/share/prims/jvmtiDeferredUpdates.cpp index bddbdd4f9ffef..22bddd923f1bc 100644 --- a/src/hotspot/share/prims/jvmtiDeferredUpdates.cpp +++ b/src/hotspot/share/prims/jvmtiDeferredUpdates.cpp @@ -62,7 +62,7 @@ int JvmtiDeferredUpdates::get_and_reset_relock_count_after_wait(JavaThread* jt) void JvmtiDeferredUpdates::delete_updates_for_frame(JavaThread* jt, intptr_t* frame_id) { JvmtiDeferredUpdates* updates = jt->deferred_updates(); if (updates != nullptr) { - GrowableArray* list = updates->deferred_locals(); + GrowableArrayCHeap* list = updates->deferred_locals(); assert(list->length() > 0, "Updates holder not deleted"); int i = 0; do { diff --git a/src/hotspot/share/prims/jvmtiDeferredUpdates.hpp b/src/hotspot/share/prims/jvmtiDeferredUpdates.hpp index f23f942780aaf..af60f65a7f23a 100644 --- a/src/hotspot/share/prims/jvmtiDeferredUpdates.hpp +++ b/src/hotspot/share/prims/jvmtiDeferredUpdates.hpp @@ -72,7 +72,7 @@ class jvmtiDeferredLocalVariableSet : public CHeapObj { int _bci; intptr_t* _id; int _vframe_id; - GrowableArray* _locals; + GrowableArrayCHeap* _locals; bool _objects_are_deoptimized; void update_value(StackValueCollection* locals, BasicType type, int index, jvalue value); @@ -116,7 +116,7 @@ class JvmtiDeferredUpdates : public CHeapObj { int _relock_count_after_wait; // Deferred updates of locals, expressions, and monitors - GrowableArray _deferred_locals_updates; + GrowableArrayCHeap _deferred_locals_updates; void inc_relock_count_after_wait() { _relock_count_after_wait++; @@ -128,19 +128,19 @@ class JvmtiDeferredUpdates : public CHeapObj { return result; } - GrowableArray* deferred_locals() { return &_deferred_locals_updates; } + GrowableArrayCHeap* deferred_locals() { return &_deferred_locals_updates; } JvmtiDeferredUpdates() : _relock_count_after_wait(0), - _deferred_locals_updates((AnyObj::set_allocation_type((address) &_deferred_locals_updates, - AnyObj::C_HEAP), 1), mtCompiler) { } + // TODO ok to remove the set_allocation_type? + _deferred_locals_updates(1) { } public: ~JvmtiDeferredUpdates(); static void create_for(JavaThread* thread); - static GrowableArray* deferred_locals(JavaThread* jt) { + static GrowableArrayCHeap* deferred_locals(JavaThread* jt) { return jt->deferred_updates() == nullptr ? nullptr : jt->deferred_updates()->deferred_locals(); } diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp index e02820a2beb7d..a8ba36eaad737 100644 --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -1354,8 +1354,8 @@ JvmtiEnv::GetOwnedMonitorInfo(jthread thread, jint* owned_monitor_count_ptr, job HandleMark hm(calling_thread); // growable array of jvmti monitors info on the C-heap - GrowableArray *owned_monitors_list = - new (mtServiceability) GrowableArray(1, mtServiceability); + GrowableArrayCHeap* owned_monitors_list = + new GrowableArrayCHeap(1); JvmtiVTMSTransitionDisabler disabler(thread); ThreadsListHandle tlh(calling_thread); @@ -1427,8 +1427,8 @@ JvmtiEnv::GetOwnedMonitorStackDepthInfo(jthread thread, jint* monitor_info_count HandleMark hm(calling_thread); // growable array of jvmti monitors info on the C-heap - GrowableArray *owned_monitors_list = - new (mtServiceability) GrowableArray(1, mtServiceability); + GrowableArrayCHeap* owned_monitors_list = + new GrowableArrayCHeap(1); JvmtiVTMSTransitionDisabler disabler(thread); ThreadsListHandle tlh(calling_thread); diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp index 9d6296ee31603..552bdd6d584bd 100644 --- a/src/hotspot/share/prims/jvmtiEnvBase.cpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp @@ -952,7 +952,7 @@ JvmtiEnvBase::get_current_contended_monitor(JavaThread *calling_thread, JavaThre jvmtiError JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_thread, - GrowableArray *owned_monitors_list) { + GrowableArrayCHeap* owned_monitors_list) { // Note: // calling_thread is the thread that requested the list of monitors for java_thread. // java_thread is the thread owning the monitors. @@ -999,7 +999,7 @@ JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_th jvmtiError JvmtiEnvBase::get_owned_monitors(JavaThread* calling_thread, JavaThread* java_thread, javaVFrame* jvf, - GrowableArray *owned_monitors_list) { + GrowableArrayCHeap *owned_monitors_list) { jvmtiError err = JVMTI_ERROR_NONE; Thread *current_thread = Thread::current(); assert(java_thread->is_handshake_safe_for(current_thread), @@ -1027,7 +1027,8 @@ JvmtiEnvBase::get_owned_monitors(JavaThread* calling_thread, JavaThread* java_th // Save JNI local handles for any objects that this frame owns. jvmtiError JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread* java_thread, - javaVFrame *jvf, GrowableArray* owned_monitors_list, jint stack_depth) { + javaVFrame *jvf, + GrowableArrayCHeap* owned_monitors_list, jint stack_depth) { jvmtiError err = JVMTI_ERROR_NONE; Thread* current_thread = Thread::current(); ResourceMark rm(current_thread); @@ -1814,7 +1815,7 @@ JvmtiEnvBase::resume_thread(oop thread_oop, JavaThread* java_thread, bool single ResourceTracker::ResourceTracker(JvmtiEnv* env) { _env = env; - _allocations = new (mtServiceability) GrowableArray(20, mtServiceability); + _allocations = new GrowableArrayCHeap(20); _failed = false; } ResourceTracker::~ResourceTracker() { diff --git a/src/hotspot/share/prims/jvmtiEnvBase.hpp b/src/hotspot/share/prims/jvmtiEnvBase.hpp index 30f8183924fe6..3beb002c5b25f 100644 --- a/src/hotspot/share/prims/jvmtiEnvBase.hpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp @@ -359,7 +359,7 @@ class JvmtiEnvBase : public CHeapObj { jvmtiError get_locked_objects_in_frame(JavaThread *calling_thread, JavaThread* java_thread, javaVFrame *jvf, - GrowableArray* owned_monitors_list, + GrowableArrayCHeap* owned_monitors_list, jint depth); public: static javaVFrame* jvf_for_thread_and_depth(JavaThread* java_thread, jint depth); @@ -420,9 +420,9 @@ class JvmtiEnvBase : public CHeapObj { jvmtiError get_current_contended_monitor(JavaThread* calling_thread, JavaThread* java_thread, jobject* monitor_ptr, bool is_virtual); jvmtiError get_owned_monitors(JavaThread* calling_thread, JavaThread* java_thread, - GrowableArray *owned_monitors_list); + GrowableArrayCHeap* owned_monitors_list); jvmtiError get_owned_monitors(JavaThread* calling_thread, JavaThread* java_thread, javaVFrame* jvf, - GrowableArray *owned_monitors_list); + GrowableArrayCHeap* owned_monitors_list); static jvmtiError check_top_frame(Thread* current_thread, JavaThread* java_thread, jvalue value, TosState tos, Handle* ret_ob_h); jvmtiError force_early_return(jthread thread, jvalue value, TosState tos); @@ -543,11 +543,11 @@ class GetOwnedMonitorInfoClosure : public JvmtiHandshakeClosure { private: JavaThread* _calling_thread; JvmtiEnv *_env; - GrowableArray *_owned_monitors_list; + GrowableArrayCHeap* _owned_monitors_list; public: GetOwnedMonitorInfoClosure(JavaThread* calling_thread, JvmtiEnv* env, - GrowableArray* owned_monitor_list) + GrowableArrayCHeap* owned_monitor_list) : JvmtiHandshakeClosure("GetOwnedMonitorInfo"), _calling_thread(calling_thread), _env(env), @@ -769,13 +769,13 @@ class VirtualThreadGetOwnedMonitorInfoClosure : public HandshakeClosure { private: JvmtiEnv *_env; Handle _vthread_h; - GrowableArray *_owned_monitors_list; + GrowableArrayCHeap* _owned_monitors_list; jvmtiError _result; public: VirtualThreadGetOwnedMonitorInfoClosure(JvmtiEnv* env, Handle vthread_h, - GrowableArray* owned_monitors_list) + GrowableArrayCHeap* owned_monitors_list) : HandshakeClosure("VirtualThreadGetOwnedMonitorInfo"), _env(env), _vthread_h(vthread_h), @@ -842,7 +842,7 @@ class VirtualThreadGetThreadStateClosure : public HandshakeClosure { class ResourceTracker : public StackObj { private: JvmtiEnv* _env; - GrowableArray *_allocations; + GrowableArrayCHeap *_allocations; bool _failed; public: ResourceTracker(JvmtiEnv* env); @@ -857,13 +857,13 @@ class ResourceTracker : public StackObj { class JvmtiMonitorClosure: public MonitorClosure { private: JavaThread *_calling_thread; - GrowableArray *_owned_monitors_list; + GrowableArrayCHeap* _owned_monitors_list; jvmtiError _error; JvmtiEnvBase *_env; public: JvmtiMonitorClosure(JavaThread *calling_thread, - GrowableArray *owned_monitors, + GrowableArrayCHeap* owned_monitors, JvmtiEnvBase *env) { _calling_thread = calling_thread; _owned_monitors_list = owned_monitors; diff --git a/src/hotspot/share/prims/jvmtiEnvThreadState.cpp b/src/hotspot/share/prims/jvmtiEnvThreadState.cpp index 6f2891fdc18cd..b8d33eb520cb5 100644 --- a/src/hotspot/share/prims/jvmtiEnvThreadState.cpp +++ b/src/hotspot/share/prims/jvmtiEnvThreadState.cpp @@ -95,7 +95,7 @@ JvmtiFramePops::clear_to(JvmtiFramePop& fp) { // JvmtiFramePops::JvmtiFramePops() { - _pops = new (mtServiceability) GrowableArray (2, mtServiceability); + _pops = new GrowableArrayCHeap (2); } JvmtiFramePops::~JvmtiFramePops() { diff --git a/src/hotspot/share/prims/jvmtiEnvThreadState.hpp b/src/hotspot/share/prims/jvmtiEnvThreadState.hpp index 544d4eff5f1eb..f581c16de2183 100644 --- a/src/hotspot/share/prims/jvmtiEnvThreadState.hpp +++ b/src/hotspot/share/prims/jvmtiEnvThreadState.hpp @@ -78,7 +78,7 @@ class JvmtiFramePop { class JvmtiFramePops : public CHeapObj { private: - GrowableArray* _pops; + GrowableArrayCHeap* _pops; // should only be used by JvmtiEventControllerPrivate // to insure they only occur at safepoints. diff --git a/src/hotspot/share/prims/jvmtiEventController.hpp b/src/hotspot/share/prims/jvmtiEventController.hpp index 84070a3098c2c..0523b1a84cafd 100644 --- a/src/hotspot/share/prims/jvmtiEventController.hpp +++ b/src/hotspot/share/prims/jvmtiEventController.hpp @@ -35,6 +35,7 @@ class JvmtiEventController; class JvmtiEnvThreadState; class JvmtiFramePop; class JvmtiEnvBase; +class JvmtiThreadState; // Extension event support diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp index eee5b2a70fa5d..f26c36e4c4e5b 100644 --- a/src/hotspot/share/prims/jvmtiExport.cpp +++ b/src/hotspot/share/prims/jvmtiExport.cpp @@ -3053,7 +3053,7 @@ JvmtiDynamicCodeEventCollector::~JvmtiDynamicCodeEventCollector() { // register a stub void JvmtiDynamicCodeEventCollector::register_stub(const char* name, address start, address end) { if (_code_blobs == nullptr) { - _code_blobs = new (mtServiceability) GrowableArray(1, mtServiceability); + _code_blobs = new GrowableArrayCHeap(1); } _code_blobs->append(new JvmtiCodeBlobDesc(name, start, end)); } @@ -3082,7 +3082,7 @@ void JvmtiObjectAllocEventCollector::generate_call_for_allocated() { void JvmtiObjectAllocEventCollector::record_allocation(oop obj) { assert(is_enabled(), "Object alloc event collector is not enabled"); if (_allocated == nullptr) { - _allocated = new (mtServiceability) GrowableArray(1, mtServiceability); + _allocated = new GrowableArrayCHeap(1); } _allocated->push(OopHandle(JvmtiExport::jvmti_oop_storage(), obj)); } diff --git a/src/hotspot/share/prims/jvmtiExport.hpp b/src/hotspot/share/prims/jvmtiExport.hpp index 263104f74f86a..94b3c2c84e177 100644 --- a/src/hotspot/share/prims/jvmtiExport.hpp +++ b/src/hotspot/share/prims/jvmtiExport.hpp @@ -502,7 +502,7 @@ class JvmtiEventCollector : public StackObj { class JvmtiDynamicCodeEventCollector : public JvmtiEventCollector { private: - GrowableArray* _code_blobs; // collected code blob events + GrowableArrayCHeap* _code_blobs; // collected code blob events friend class JvmtiExport; void register_stub(const char* name, address start, address end); @@ -519,7 +519,7 @@ class JvmtiDynamicCodeEventCollector : public JvmtiEventCollector { // class JvmtiObjectAllocEventCollector : public JvmtiEventCollector { protected: - GrowableArray* _allocated; // field to record collected allocated object oop. + GrowableArrayCHeap* _allocated; // field to record collected allocated object oop. bool _enable; // This flag is enabled in constructor if set up in the thread state // and disabled in destructor before posting event. To avoid // collection of objects allocated while running java code inside diff --git a/src/hotspot/share/prims/jvmtiExtensions.cpp b/src/hotspot/share/prims/jvmtiExtensions.cpp index b40ad7ddbd108..f2f972dffd769 100644 --- a/src/hotspot/share/prims/jvmtiExtensions.cpp +++ b/src/hotspot/share/prims/jvmtiExtensions.cpp @@ -32,10 +32,10 @@ #include "runtime/jniHandles.inline.hpp" // the list of extension functions -GrowableArray* JvmtiExtensions::_ext_functions; +GrowableArrayCHeap* JvmtiExtensions::_ext_functions; // the list of extension events -GrowableArray* JvmtiExtensions::_ext_events; +GrowableArrayCHeap* JvmtiExtensions::_ext_events; // @@ -170,8 +170,8 @@ static jvmtiError JNICALL GetCarrierThread(const jvmtiEnv* env, ...) { // event. The function and the event are registered here. // void JvmtiExtensions::register_extensions() { - _ext_functions = new (mtServiceability) GrowableArray(1, mtServiceability); - _ext_events = new (mtServiceability) GrowableArray(1, mtServiceability); + _ext_functions = new GrowableArrayCHeap(1); + _ext_events = new GrowableArrayCHeap(1); // Register our extension functions. static jvmtiParamInfo func_params0[] = { diff --git a/src/hotspot/share/prims/jvmtiExtensions.hpp b/src/hotspot/share/prims/jvmtiExtensions.hpp index d44c6dd4f5188..8d1f07daefbcf 100644 --- a/src/hotspot/share/prims/jvmtiExtensions.hpp +++ b/src/hotspot/share/prims/jvmtiExtensions.hpp @@ -38,8 +38,8 @@ class JvmtiExtensions : public AllStatic { private: - static GrowableArray* _ext_functions; - static GrowableArray* _ext_events; + static GrowableArrayCHeap* _ext_functions; + static GrowableArrayCHeap* _ext_events; public: // register extensions function diff --git a/src/hotspot/share/prims/jvmtiImpl.cpp b/src/hotspot/share/prims/jvmtiImpl.cpp index 21122539af847..083a0e41f4ba7 100644 --- a/src/hotspot/share/prims/jvmtiImpl.cpp +++ b/src/hotspot/share/prims/jvmtiImpl.cpp @@ -139,7 +139,7 @@ GrowableCache::~GrowableCache() { void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) { _this_obj = this_obj; _listener_fun = listener_fun; - _elements = new (mtServiceability) GrowableArray(5, mtServiceability); + _elements = new GrowableArrayCHeap(5); recache(); } diff --git a/src/hotspot/share/prims/jvmtiImpl.hpp b/src/hotspot/share/prims/jvmtiImpl.hpp index c93abe5eedf88..ad0a9abbfbd5e 100644 --- a/src/hotspot/share/prims/jvmtiImpl.hpp +++ b/src/hotspot/share/prims/jvmtiImpl.hpp @@ -78,7 +78,7 @@ class GrowableCache { void *_this_obj; // Array of elements in the collection - GrowableArray *_elements; + GrowableArrayCHeap *_elements; // Parallel array of cached values address *_cache; diff --git a/src/hotspot/share/prims/jvmtiRawMonitor.cpp b/src/hotspot/share/prims/jvmtiRawMonitor.cpp index 115ece68ca8f2..eac45b9328807 100644 --- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp +++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp @@ -36,8 +36,8 @@ JvmtiRawMonitor::QNode::QNode(Thread* thread) : _next(nullptr), _prev(nullptr), _notified(0), _t_state(TS_RUN) { } -GrowableArray* JvmtiPendingMonitors::_monitors = - new (mtServiceability) GrowableArray(1, mtServiceability); +GrowableArrayCHeap* JvmtiPendingMonitors::_monitors = + new GrowableArrayCHeap(1); void JvmtiPendingMonitors::transition_raw_monitors() { assert((Threads::number_of_threads()==1), diff --git a/src/hotspot/share/prims/jvmtiRawMonitor.hpp b/src/hotspot/share/prims/jvmtiRawMonitor.hpp index 2adc730ff9f39..654d16b03f792 100644 --- a/src/hotspot/share/prims/jvmtiRawMonitor.hpp +++ b/src/hotspot/share/prims/jvmtiRawMonitor.hpp @@ -133,9 +133,10 @@ class JvmtiRawMonitor : public CHeapObj { class JvmtiPendingMonitors : public AllStatic { private: - static GrowableArray* _monitors; // Cache raw monitor enter + // Cache raw monitor enter + static GrowableArrayCHeap* _monitors; - inline static GrowableArray* monitors() { return _monitors; } + inline static GrowableArrayCHeap* monitors() { return _monitors; } static void dispose() { delete monitors(); diff --git a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp index c03c4b48538b1..6b772c9637cd8 100644 --- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp +++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp @@ -100,12 +100,12 @@ static inline InstanceKlass* get_ik(jclass def) { // Parallel constant pool merging leads to indeterminate constant pools. void VM_RedefineClasses::lock_classes() { JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); - GrowableArray* redef_classes = state->get_classes_being_redefined(); + GrowableArrayCHeap* redef_classes = state->get_classes_being_redefined(); MonitorLocker ml(RedefineClasses_lock); if (redef_classes == nullptr) { - redef_classes = new (mtClass) GrowableArray(1, mtClass); + redef_classes = new GrowableArrayCHeap(1); state->set_classes_being_redefined(redef_classes); } @@ -141,7 +141,7 @@ void VM_RedefineClasses::lock_classes() { void VM_RedefineClasses::unlock_classes() { JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); - GrowableArray* redef_classes = state->get_classes_being_redefined(); + GrowableArrayCHeap* redef_classes = state->get_classes_being_redefined(); assert(redef_classes != nullptr, "_classes_being_redefined is not allocated"); MonitorLocker ml(RedefineClasses_lock); diff --git a/src/hotspot/share/prims/jvmtiTagMap.cpp b/src/hotspot/share/prims/jvmtiTagMap.cpp index 77bbbbfa77cc5..640911b55c13a 100644 --- a/src/hotspot/share/prims/jvmtiTagMap.cpp +++ b/src/hotspot/share/prims/jvmtiTagMap.cpp @@ -392,7 +392,7 @@ class ClassFieldMap: public CHeapObj { }; // list of field descriptors - GrowableArray* _fields; + GrowableArrayCHeap* _fields; // constructor ClassFieldMap(); @@ -413,8 +413,7 @@ class ClassFieldMap: public CHeapObj { }; ClassFieldMap::ClassFieldMap() { - _fields = new (mtServiceability) - GrowableArray(initial_field_count, mtServiceability); + _fields = new GrowableArrayCHeap(initial_field_count); } ClassFieldMap::~ClassFieldMap() { @@ -495,7 +494,7 @@ class JvmtiCachedClassFieldMap : public CHeapObj { JvmtiCachedClassFieldMap(ClassFieldMap* field_map); ~JvmtiCachedClassFieldMap(); - static GrowableArray* _class_list; + static GrowableArrayCHeap* _class_list; static void add_to_class_list(InstanceKlass* ik); public: @@ -511,7 +510,7 @@ class JvmtiCachedClassFieldMap : public CHeapObj { static int cached_field_map_count(); }; -GrowableArray* JvmtiCachedClassFieldMap::_class_list; +GrowableArrayCHeap* JvmtiCachedClassFieldMap::_class_list; JvmtiCachedClassFieldMap::JvmtiCachedClassFieldMap(ClassFieldMap* field_map) { _field_map = field_map; @@ -547,8 +546,7 @@ bool ClassFieldMapCacheMark::_is_active; // record that the given InstanceKlass is caching a field map void JvmtiCachedClassFieldMap::add_to_class_list(InstanceKlass* ik) { if (_class_list == nullptr) { - _class_list = new (mtServiceability) - GrowableArray(initial_class_count, mtServiceability); + _class_list = new GrowableArrayCHeap(initial_class_count); } _class_list->push(ik); } @@ -1216,8 +1214,8 @@ class TagObjectCollector : public JvmtiTagMapKeyClosure { jint _tag_count; bool _some_dead_found; - GrowableArray* _object_results; // collected objects (JNI weak refs) - GrowableArray* _tag_results; // collected tags + GrowableArrayCHeap* _object_results; // collected objects (JNI weak refs) + GrowableArrayCHeap* _tag_results; // collected tags public: TagObjectCollector(JvmtiEnv* env, const jlong* tags, jint tag_count) : @@ -1226,8 +1224,8 @@ class TagObjectCollector : public JvmtiTagMapKeyClosure { _tags((jlong*)tags), _tag_count(tag_count), _some_dead_found(false), - _object_results(new (mtServiceability) GrowableArray(1, mtServiceability)), - _tag_results(new (mtServiceability) GrowableArray(1, mtServiceability)) { } + _object_results(new GrowableArrayCHeap(1)), + _tag_results(new GrowableArrayCHeap(1)) { } ~TagObjectCollector() { delete _object_results; @@ -1449,13 +1447,13 @@ class CallbackInvoker : AllStatic { // context needed for all heap walks static JvmtiTagMap* _tag_map; static const void* _user_data; - static GrowableArray* _visit_stack; + static GrowableArrayCHeap* _visit_stack; static JVMTIBitSet* _bitset; // accessors - static JvmtiTagMap* tag_map() { return _tag_map; } - static const void* user_data() { return _user_data; } - static GrowableArray* visit_stack() { return _visit_stack; } + static JvmtiTagMap* tag_map() { return _tag_map; } + static const void* user_data() { return _user_data; } + static GrowableArrayCHeap* visit_stack() { return _visit_stack; } // if the object hasn't been visited then push it onto the visit stack // so that it will be visited later @@ -1489,14 +1487,14 @@ class CallbackInvoker : AllStatic { public: // initialize for basic mode static void initialize_for_basic_heap_walk(JvmtiTagMap* tag_map, - GrowableArray* visit_stack, + GrowableArrayCHeap* visit_stack, const void* user_data, BasicHeapWalkContext context, JVMTIBitSet* bitset); // initialize for advanced mode static void initialize_for_advanced_heap_walk(JvmtiTagMap* tag_map, - GrowableArray* visit_stack, + GrowableArrayCHeap* visit_stack, const void* user_data, AdvancedHeapWalkContext context, JVMTIBitSet* bitset); @@ -1531,12 +1529,12 @@ BasicHeapWalkContext CallbackInvoker::_basic_context; AdvancedHeapWalkContext CallbackInvoker::_advanced_context; JvmtiTagMap* CallbackInvoker::_tag_map; const void* CallbackInvoker::_user_data; -GrowableArray* CallbackInvoker::_visit_stack; +GrowableArrayCHeap* CallbackInvoker::_visit_stack; JVMTIBitSet* CallbackInvoker::_bitset; // initialize for basic heap walk (IterateOverReachableObjects et al) void CallbackInvoker::initialize_for_basic_heap_walk(JvmtiTagMap* tag_map, - GrowableArray* visit_stack, + GrowableArrayCHeap* visit_stack, const void* user_data, BasicHeapWalkContext context, JVMTIBitSet* bitset) { @@ -1551,7 +1549,7 @@ void CallbackInvoker::initialize_for_basic_heap_walk(JvmtiTagMap* tag_map, // initialize for advanced heap walk (FollowReferences) void CallbackInvoker::initialize_for_advanced_heap_walk(JvmtiTagMap* tag_map, - GrowableArray* visit_stack, + GrowableArrayCHeap* visit_stack, const void* user_data, AdvancedHeapWalkContext context, JVMTIBitSet* bitset) { @@ -2385,7 +2383,7 @@ class VM_HeapWalkOperation: public VM_Operation { bool _is_advanced_heap_walk; // indicates FollowReferences JvmtiTagMap* _tag_map; Handle _initial_object; - GrowableArray* _visit_stack; // the visit stack + GrowableArrayCHeap* _visit_stack; // the visit stack JVMTIBitSet _bitset; @@ -2398,8 +2396,8 @@ class VM_HeapWalkOperation: public VM_Operation { bool _reporting_primitive_array_values; bool _reporting_string_values; - GrowableArray* create_visit_stack() { - return new (mtServiceability) GrowableArray(initial_visit_stack_size, mtServiceability); + GrowableArrayCHeap* create_visit_stack() { + return new GrowableArrayCHeap(initial_visit_stack_size); } // accessors @@ -2413,7 +2411,7 @@ class VM_HeapWalkOperation: public VM_Operation { bool is_reporting_primitive_array_values() const { return _reporting_primitive_array_values; } bool is_reporting_string_values() const { return _reporting_string_values; } - GrowableArray* visit_stack() const { return _visit_stack; } + GrowableArrayCHeap* visit_stack() const { return _visit_stack; } // iterate over the various object types inline bool iterate_over_array(oop o); diff --git a/src/hotspot/share/prims/jvmtiThreadState.hpp b/src/hotspot/share/prims/jvmtiThreadState.hpp index 4bba0691e40d6..75c0960cd256d 100644 --- a/src/hotspot/share/prims/jvmtiThreadState.hpp +++ b/src/hotspot/share/prims/jvmtiThreadState.hpp @@ -209,7 +209,7 @@ class JvmtiThreadState : public CHeapObj { // info to the class file load hook event handler. Klass* _class_being_redefined; JvmtiClassLoadKind _class_load_kind; - GrowableArray* _classes_being_redefined; + GrowableArrayCHeap* _classes_being_redefined; // This is only valid when is_interp_only_mode() returns true int _cur_stack_depth; @@ -382,11 +382,11 @@ class JvmtiThreadState : public CHeapObj { } // Get the classes that are currently being redefined by this thread. - inline GrowableArray* get_classes_being_redefined() { + inline GrowableArrayCHeap* get_classes_being_redefined() { return _classes_being_redefined; } - inline void set_classes_being_redefined(GrowableArray* redef_classes) { + inline void set_classes_being_redefined(GrowableArrayCHeap* redef_classes) { _classes_being_redefined = redef_classes; } diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index ac83e1cf9d3ce..cbe355c07d9d7 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -118,7 +118,7 @@ SystemProperty *Arguments::_java_class_path = nullptr; SystemProperty *Arguments::_jdk_boot_class_path_append = nullptr; SystemProperty *Arguments::_vm_info = nullptr; -GrowableArray *Arguments::_patch_mod_prefix = nullptr; +GrowableArrayCHeap *Arguments::_patch_mod_prefix = nullptr; PathString *Arguments::_boot_class_path = nullptr; bool Arguments::_has_jimage = false; @@ -2837,7 +2837,7 @@ void Arguments::add_patch_mod_prefix(const char* module_name, const char* path, // Create GrowableArray lazily, only if --patch-module has been specified if (_patch_mod_prefix == nullptr) { - _patch_mod_prefix = new (mtArguments) GrowableArray(10, mtArguments); + _patch_mod_prefix = new GrowableArrayCHeap(10); } _patch_mod_prefix->push(new ModulePatchPath(module_name, path)); diff --git a/src/hotspot/share/runtime/arguments.hpp b/src/hotspot/share/runtime/arguments.hpp index 8251db3d0d59a..ec850b57d4685 100644 --- a/src/hotspot/share/runtime/arguments.hpp +++ b/src/hotspot/share/runtime/arguments.hpp @@ -211,7 +211,7 @@ class Arguments : AllStatic { // --patch-module=module=()* // Each element contains the associated module name, path // string pair as specified to --patch-module. - static GrowableArray* _patch_mod_prefix; + static GrowableArrayCHeap* _patch_mod_prefix; // The constructed value of the system class path after // argument processing and JVMTI OnLoad additions via @@ -481,7 +481,7 @@ class Arguments : AllStatic { _jdk_boot_class_path_append->append_value(value); } - static GrowableArray* get_patch_mod_prefix() { return _patch_mod_prefix; } + static GrowableArrayCHeap* get_patch_mod_prefix() { return _patch_mod_prefix; } static char* get_boot_class_path() { return _boot_class_path->value(); } static bool has_jimage() { return _has_jimage; } diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp index f4893bae5db95..df34b0a119e74 100644 --- a/src/hotspot/share/runtime/frame.cpp +++ b/src/hotspot/share/runtime/frame.cpp @@ -1248,18 +1248,18 @@ extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue); class FrameValuesOopClosure: public OopClosure, public DerivedOopClosure { private: - GrowableArray* _oops; - GrowableArray* _narrow_oops; - GrowableArray* _base; - GrowableArray* _derived; + GrowableArrayCHeap* _oops; + GrowableArrayCHeap* _narrow_oops; + GrowableArrayCHeap* _base; + GrowableArrayCHeap* _derived; NoSafepointVerifier nsv; public: FrameValuesOopClosure() { - _oops = new (mtThread) GrowableArray(100, mtThread); - _narrow_oops = new (mtThread) GrowableArray(100, mtThread); - _base = new (mtThread) GrowableArray(100, mtThread); - _derived = new (mtThread) GrowableArray(100, mtThread); + _oops = new GrowableArrayCHeap(100); + _narrow_oops = new GrowableArrayCHeap(100); + _base = new GrowableArrayCHeap(100); + _derived = new GrowableArrayCHeap(100); } ~FrameValuesOopClosure() { delete _oops; diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp index 98b2a569b7239..fcfa015e09683 100644 --- a/src/hotspot/share/runtime/javaThread.cpp +++ b/src/hotspot/share/runtime/javaThread.cpp @@ -1364,7 +1364,7 @@ void JavaThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) { assert(vframe_array_head() == nullptr, "deopt in progress at a safepoint!"); // If we have deferred set_locals there might be oops waiting to be // written - GrowableArray* list = JvmtiDeferredUpdates::deferred_locals(this); + GrowableArrayCHeap* list = JvmtiDeferredUpdates::deferred_locals(this); if (list != nullptr) { for (int i = 0; i < list->length(); i++) { list->at(i)->oops_do(f); diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index 094fd2509a05b..c9c53254caa5d 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -115,6 +115,7 @@ class Mutex; struct jvmtiTimerInfo; template class GrowableArray; +template class GrowableArrayCHeap; // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose diff --git a/src/hotspot/share/runtime/perfData.cpp b/src/hotspot/share/runtime/perfData.cpp index b195274dc016c..7385f88e2dd0d 100644 --- a/src/hotspot/share/runtime/perfData.cpp +++ b/src/hotspot/share/runtime/perfData.cpp @@ -492,12 +492,12 @@ PerfLongCounter* PerfDataManager::create_long_counter(CounterNS ns, PerfDataList::PerfDataList(int length) { - _set = new (mtInternal) PerfDataArray(length, mtInternal); + _set = new PerfDataArray(length); } PerfDataList::PerfDataList(PerfDataList* p) { - _set = new (mtInternal) PerfDataArray(p->length(), mtInternal); + _set = new PerfDataArray(p->length()); _set->appendAll(p->get_impl()); } diff --git a/src/hotspot/share/runtime/perfData.hpp b/src/hotspot/share/runtime/perfData.hpp index 103e2698d9b9e..239ca3260c555 100644 --- a/src/hotspot/share/runtime/perfData.hpp +++ b/src/hotspot/share/runtime/perfData.hpp @@ -30,7 +30,7 @@ #include "runtime/perfMemory.hpp" #include "runtime/timer.hpp" -template class GrowableArray; +template class GrowableArrayCHeap; /* jvmstat global and subsystem counter name space - enumeration value * serve as an index into the PerfDataManager::_name_space[] array @@ -575,7 +575,7 @@ class PerfDataList : public CHeapObj { private: // GrowableArray implementation - typedef GrowableArray PerfDataArray; + typedef GrowableArrayCHeap PerfDataArray; PerfDataArray* _set; diff --git a/src/hotspot/share/runtime/reflectionUtils.cpp b/src/hotspot/share/runtime/reflectionUtils.cpp index 3518e1c050b40..f104597241d67 100644 --- a/src/hotspot/share/runtime/reflectionUtils.cpp +++ b/src/hotspot/share/runtime/reflectionUtils.cpp @@ -71,8 +71,8 @@ bool KlassStream::eos() { int FieldStream::length() { return _klass->java_fields_count(); } -GrowableArray *FilteredFieldsMap::_filtered_fields = - new (mtServiceability) GrowableArray(3, mtServiceability); +GrowableArrayCHeap *FilteredFieldsMap::_filtered_fields = + new GrowableArrayCHeap(3); void FilteredFieldsMap::initialize() { diff --git a/src/hotspot/share/runtime/reflectionUtils.hpp b/src/hotspot/share/runtime/reflectionUtils.hpp index 93bb541467aa5..2921b616fc087 100644 --- a/src/hotspot/share/runtime/reflectionUtils.hpp +++ b/src/hotspot/share/runtime/reflectionUtils.hpp @@ -173,7 +173,7 @@ class FilteredField : public CHeapObj { class FilteredFieldsMap : AllStatic { private: - static GrowableArray *_filtered_fields; + static GrowableArrayCHeap *_filtered_fields; public: static void initialize(); static bool is_filtered_field(Klass* klass, int field_offset) { diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index 851e5139f8aad..2ac96c9c5380c 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -79,7 +79,7 @@ Thread::Thread() { set_resource_area(new (mtThread)ResourceArea()); DEBUG_ONLY(_current_resource_mark = nullptr;) set_handle_area(new (mtThread) HandleArea(nullptr)); - set_metadata_handles(new (mtClass) GrowableArray(30, mtClass)); + set_metadata_handles(new GrowableArrayCHeap(30)); set_last_handle_mark(nullptr); DEBUG_ONLY(_missed_ic_stub_refill_verifier = nullptr); diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index cc431e8c90022..08d5cb41b2b65 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -409,8 +409,8 @@ class Thread: public ThreadShadow { HandleArea* handle_area() const { return _handle_area; } void set_handle_area(HandleArea* area) { _handle_area = area; } - GrowableArray* metadata_handles() const { return _metadata_handles; } - void set_metadata_handles(GrowableArray* handles){ _metadata_handles = handles; } + GrowableArrayCHeap* metadata_handles() const { return _metadata_handles; } + void set_metadata_handles(GrowableArrayCHeap* handles){ _metadata_handles = handles; } // Thread-Local Allocation Buffer (TLAB) support ThreadLocalAllocBuffer& tlab() { return _tlab; } @@ -529,7 +529,7 @@ class Thread: public ThreadShadow { // Thread local handle area for allocation of handles within the VM HandleArea* _handle_area; - GrowableArray* _metadata_handles; + GrowableArrayCHeap* _metadata_handles; // Support for stack overflow handling, get_thread, etc. address _stack_base; diff --git a/src/hotspot/share/runtime/threads.hpp b/src/hotspot/share/runtime/threads.hpp index 8d61431f0ce63..b6d608cd8b5f7 100644 --- a/src/hotspot/share/runtime/threads.hpp +++ b/src/hotspot/share/runtime/threads.hpp @@ -37,6 +37,7 @@ class Thread; class ThreadClosure; class ThreadsList; class outputStream; +class ObjectMonitor; class CodeBlobClosure; class MetadataClosure; diff --git a/src/hotspot/share/runtime/unhandledOops.cpp b/src/hotspot/share/runtime/unhandledOops.cpp index cd1bde76c775f..b788a52683814 100644 --- a/src/hotspot/share/runtime/unhandledOops.cpp +++ b/src/hotspot/share/runtime/unhandledOops.cpp @@ -35,8 +35,7 @@ const int free_list_size = 256; UnhandledOops::UnhandledOops(Thread* thread) { _thread = thread; - _oop_list = new (mtThread) - GrowableArray(free_list_size, mtThread); + _oop_list = new GrowableArrayCHeap(free_list_size); _level = 0; } diff --git a/src/hotspot/share/runtime/unhandledOops.hpp b/src/hotspot/share/runtime/unhandledOops.hpp index 09ebbe68be357..39373412a5d90 100644 --- a/src/hotspot/share/runtime/unhandledOops.hpp +++ b/src/hotspot/share/runtime/unhandledOops.hpp @@ -69,7 +69,7 @@ class UnhandledOops : public CHeapObj { private: Thread* _thread; int _level; - GrowableArray *_oop_list; + GrowableArrayCHeap *_oop_list; void allow_unhandled_oop(oop* op); void clear_unhandled_oops(); UnhandledOops(Thread* thread); diff --git a/src/hotspot/share/runtime/vframe_hp.cpp b/src/hotspot/share/runtime/vframe_hp.cpp index b508b09104776..f320845394698 100644 --- a/src/hotspot/share/runtime/vframe_hp.cpp +++ b/src/hotspot/share/runtime/vframe_hp.cpp @@ -69,7 +69,7 @@ StackValueCollection* compiledVFrame::locals() const { // Replace the original values with any stores that have been // performed through compiledVFrame::update_locals. if (!register_map()->in_cont()) { // LOOM TODO - GrowableArray* list = JvmtiDeferredUpdates::deferred_locals(thread()); + GrowableArrayCHeap* list = JvmtiDeferredUpdates::deferred_locals(thread()); if (list != nullptr ) { // In real life this never happens or is typically a single element search for (int i = 0; i < list->length(); i++) { @@ -110,7 +110,7 @@ void compiledVFrame::update_monitor(int index, MonitorInfo* val) { void compiledVFrame::update_deferred_value(BasicType type, int index, jvalue value) { assert(fr().is_deoptimized_frame(), "frame must be scheduled for deoptimization"); assert(!Continuation::is_frame_in_continuation(thread(), fr()), "No support for deferred values in continuations"); - GrowableArray* deferred = JvmtiDeferredUpdates::deferred_locals(thread()); + GrowableArrayCHeap* deferred = JvmtiDeferredUpdates::deferred_locals(thread()); jvmtiDeferredLocalVariableSet* locals = nullptr; if (deferred != nullptr ) { // See if this vframe has already had locals with deferred writes @@ -202,7 +202,7 @@ StackValueCollection* compiledVFrame::expressions() const { if (!register_map()->in_cont()) { // LOOM TODO // Replace the original values with any stores that have been // performed through compiledVFrame::update_stack. - GrowableArray* list = JvmtiDeferredUpdates::deferred_locals(thread()); + GrowableArrayCHeap* list = JvmtiDeferredUpdates::deferred_locals(thread()); if (list != nullptr ) { // In real life this never happens or is typically a single element search for (int i = 0; i < list->length(); i++) { @@ -413,7 +413,7 @@ jvmtiDeferredLocalVariableSet::jvmtiDeferredLocalVariableSet(Method* method, int _id = id; _vframe_id = vframe_id; // Always will need at least one, must be on C heap - _locals = new(mtCompiler) GrowableArray (1, mtCompiler); + _locals = new GrowableArrayCHeap(1); _objects_are_deoptimized = false; } diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index 54d591ff14125..88c2650c6fae1 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -498,7 +498,7 @@ /* CodeCache (NOTE: incomplete) */ \ /********************************/ \ \ - static_field(CodeCache, _heaps, GrowableArray*) \ + static_field(CodeCache, _heaps, CodeCache::CodeHeapArray*) \ static_field(CodeCache, _low_bound, address) \ static_field(CodeCache, _high_bound, address) \ \ @@ -1237,6 +1237,7 @@ \ declare_toplevel_type(GrowableArrayBase) \ declare_toplevel_type(GrowableArray) \ + declare_toplevel_type(CodeCache::CodeHeapArray) \ declare_toplevel_type(Arena) \ declare_type(ResourceArea, Arena) \ \ diff --git a/src/hotspot/share/services/diagnosticArgument.cpp b/src/hotspot/share/services/diagnosticArgument.cpp index 94f2d3e1eb124..7dbac2aa196a3 100644 --- a/src/hotspot/share/services/diagnosticArgument.cpp +++ b/src/hotspot/share/services/diagnosticArgument.cpp @@ -31,7 +31,7 @@ #include "utilities/globalDefinitions.hpp" StringArrayArgument::StringArrayArgument() { - _array = new (mtServiceability) GrowableArray(32, mtServiceability); + _array = new GrowableArrayCHeap(32); assert(_array != nullptr, "Sanity check"); } diff --git a/src/hotspot/share/services/diagnosticArgument.hpp b/src/hotspot/share/services/diagnosticArgument.hpp index c9683ce4a2196..f9a87ea2a7959 100644 --- a/src/hotspot/share/services/diagnosticArgument.hpp +++ b/src/hotspot/share/services/diagnosticArgument.hpp @@ -33,14 +33,14 @@ class StringArrayArgument : public CHeapObj { private: - GrowableArray* _array; + GrowableArrayCHeap* _array; public: StringArrayArgument(); ~StringArrayArgument(); void add(const char* str, size_t len); - GrowableArray* array() { + GrowableArrayCHeap* array() { return _array; } }; diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp index a46101f6d8d1d..924dde582ffb4 100644 --- a/src/hotspot/share/services/heapDumper.cpp +++ b/src/hotspot/share/services/heapDumper.cpp @@ -1623,7 +1623,7 @@ class ThreadDumper : public CHeapObj { JavaThread* _java_thread; oop _thread_oop; - GrowableArray* _frames; + GrowableArrayCHeap* _frames; // non-null if the thread is OOM thread Method* _oome_constructor; int _thread_serial_num; @@ -1673,7 +1673,7 @@ class ThreadDumper : public CHeapObj { // writes HPROF_TRACE and HPROF_FRAME records // returns number of dumped frames - void dump_stack_traces(AbstractDumpWriter* writer, GrowableArray* klass_map); + void dump_stack_traces(AbstractDumpWriter* writer, GrowableArrayCHeap* klass_map); // writes HPROF_GC_ROOT_THREAD_OBJ subrecord void dump_thread_obj(AbstractDumpWriter* writer); @@ -1699,7 +1699,7 @@ ThreadDumper::ThreadDumper(ThreadType thread_type, JavaThread* java_thread, oop assert(_thread_oop != nullptr, "sanity"); } - _frames = new (mtServiceability) GrowableArray(10, mtServiceability); + _frames = new GrowableArrayCHeap(10); bool stop_at_vthread_entry = _thread_type == ThreadType::MountedVirtual; // vframes are resource allocated @@ -1720,7 +1720,7 @@ ThreadDumper::ThreadDumper(ThreadType thread_type, JavaThread* java_thread, oop } } -void ThreadDumper::dump_stack_traces(AbstractDumpWriter* writer, GrowableArray* klass_map) { +void ThreadDumper::dump_stack_traces(AbstractDumpWriter* writer, GrowableArrayCHeap* klass_map) { assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_nums are not initialized"); // write HPROF_FRAME records for this thread's stack trace @@ -2173,7 +2173,7 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public Unmounte JavaThread* _oome_thread; Method* _oome_constructor; bool _gc_before_heap_dump; - GrowableArray* _klass_map; + GrowableArrayCHeap* _klass_map; ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads int _thread_dumpers_count; @@ -2238,7 +2238,7 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public Unmounte WorkerTask("dump heap") { _local_writer = writer; _gc_before_heap_dump = gc_before_heap_dump; - _klass_map = new (mtServiceability) GrowableArray(INITIAL_CLASS_COUNT, mtServiceability); + _klass_map = new GrowableArrayCHeap(INITIAL_CLASS_COUNT); _thread_dumpers = nullptr; _thread_dumpers_count = 0; diff --git a/src/hotspot/share/services/management.cpp b/src/hotspot/share/services/management.cpp index 8e9249f5a61bc..2a2aa00d1b275 100644 --- a/src/hotspot/share/services/management.cpp +++ b/src/hotspot/share/services/management.cpp @@ -1258,7 +1258,7 @@ JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboo for (int depth = 0; depth < num_frames; depth++) { StackFrameInfo* frame = stacktrace->stack_frame_at(depth); int len = frame->num_locked_monitors(); - GrowableArray* locked_monitors = frame->locked_monitors(); + GrowableArrayCHeap* locked_monitors = frame->locked_monitors(); for (j = 0; j < len; j++) { oop monitor = locked_monitors->at(j).resolve(); assert(monitor != nullptr, "must be a Java object"); @@ -1268,7 +1268,7 @@ JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboo } } - GrowableArray* jni_locked_monitors = stacktrace->jni_locked_monitors(); + GrowableArrayCHeap* jni_locked_monitors = stacktrace->jni_locked_monitors(); for (j = 0; j < jni_locked_monitors->length(); j++) { oop object = jni_locked_monitors->at(j).resolve(); assert(object != nullptr, "must be a Java object"); @@ -1284,7 +1284,7 @@ JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboo // Create Object[] filled with locked JSR-166 synchronizers assert(ts->threadObj() != nullptr, "Must be a valid JavaThread"); ThreadConcurrentLocks* tcl = ts->get_concurrent_locks(); - GrowableArray* locks = (tcl != nullptr ? tcl->owned_locks() : nullptr); + GrowableArrayCHeap* locks = (tcl != nullptr ? tcl->owned_locks() : nullptr); int num_locked_synchronizers = (locks != nullptr ? locks->length() : 0); objArrayOop array = oopFactory::new_objArray(vmClasses::Object_klass(), num_locked_synchronizers, CHECK_NULL); @@ -1759,7 +1759,7 @@ static Handle find_deadlocks(bool object_monitors_only, TRAPS) { int index = 0; for (cycle = deadlocks; cycle != nullptr; cycle = cycle->next()) { - GrowableArray* deadlock_threads = cycle->threads(); + GrowableArrayCHeap* deadlock_threads = cycle->threads(); int len = deadlock_threads->length(); for (int i = 0; i < len; i++) { threads_ah->obj_at_put(index, deadlock_threads->at(i)->threadObj()); diff --git a/src/hotspot/share/services/memoryService.cpp b/src/hotspot/share/services/memoryService.cpp index 21b773e204e63..01bd564c4ca83 100644 --- a/src/hotspot/share/services/memoryService.cpp +++ b/src/hotspot/share/services/memoryService.cpp @@ -43,14 +43,14 @@ #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" -GrowableArray* MemoryService::_pools_list = - new (mtServiceability) GrowableArray(init_pools_list_size, mtServiceability); -GrowableArray* MemoryService::_managers_list = - new (mtServiceability) GrowableArray(init_managers_list_size, mtServiceability); +GrowableArrayCHeap* MemoryService::_pools_list = + new GrowableArrayCHeap(init_pools_list_size); +GrowableArrayCHeap* MemoryService::_managers_list = + new GrowableArrayCHeap(init_managers_list_size); MemoryManager* MemoryService::_code_cache_manager = nullptr; -GrowableArray* MemoryService::_code_heap_pools = - new (mtServiceability) GrowableArray(init_code_heap_pools_size, mtServiceability); +GrowableArrayCHeap* MemoryService::_code_heap_pools = + new GrowableArrayCHeap(init_code_heap_pools_size); MemoryPool* MemoryService::_metaspace_pool = nullptr; MemoryPool* MemoryService::_compressed_class_pool = nullptr; diff --git a/src/hotspot/share/services/memoryService.hpp b/src/hotspot/share/services/memoryService.hpp index 2d28f25c69519..9cc9897bbd45d 100644 --- a/src/hotspot/share/services/memoryService.hpp +++ b/src/hotspot/share/services/memoryService.hpp @@ -49,15 +49,15 @@ class MemoryService : public AllStatic { init_code_heap_pools_size = 9 }; - static GrowableArray* _pools_list; - static GrowableArray* _managers_list; + static GrowableArrayCHeap* _pools_list; + static GrowableArrayCHeap* _managers_list; // memory manager and code heap pools for the CodeCache - static MemoryManager* _code_cache_manager; - static GrowableArray* _code_heap_pools; + static MemoryManager* _code_cache_manager; + static GrowableArrayCHeap* _code_heap_pools; - static MemoryPool* _metaspace_pool; - static MemoryPool* _compressed_class_pool; + static MemoryPool* _metaspace_pool; + static MemoryPool* _compressed_class_pool; public: static void set_universe_heap(CollectedHeap* heap); diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp index bf9979fa3b4df..0ed2763cc2e61 100644 --- a/src/hotspot/share/services/threadService.cpp +++ b/src/hotspot/share/services/threadService.cpp @@ -609,7 +609,7 @@ StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) { GrowableArray* list = jvf->locked_monitors(); int length = list->length(); if (length > 0) { - _locked_monitors = new (mtServiceability) GrowableArray(length, mtServiceability); + _locked_monitors = new GrowableArrayCHeap(length); for (int i = 0; i < length; i++) { MonitorInfo* monitor = list->at(i); assert(monitor->owner() != nullptr, "This monitor must have an owning object"); @@ -661,11 +661,11 @@ class InflatedMonitorsClosure: public MonitorClosure { ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) { _thread = t; - _frames = new (mtServiceability) GrowableArray(INITIAL_ARRAY_SIZE, mtServiceability); + _frames = new GrowableArrayCHeap(INITIAL_ARRAY_SIZE); _depth = 0; _with_locked_monitors = with_locked_monitors; if (_with_locked_monitors) { - _jni_locked_monitors = new (mtServiceability) GrowableArray(INITIAL_ARRAY_SIZE, mtServiceability); + _jni_locked_monitors = new GrowableArrayCHeap(INITIAL_ARRAY_SIZE); } else { _jni_locked_monitors = nullptr; } @@ -737,7 +737,7 @@ bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) { for (int depth = 0; depth < num_frames; depth++) { StackFrameInfo* frame = stack_frame_at(depth); int len = frame->num_locked_monitors(); - GrowableArray* locked_monitors = frame->locked_monitors(); + GrowableArrayCHeap* locked_monitors = frame->locked_monitors(); for (int j = 0; j < len; j++) { oop monitor = locked_monitors->at(j).resolve(); assert(monitor != nullptr, "must be a Java object"); @@ -796,7 +796,7 @@ void ConcurrentLocksDump::dump_at_safepoint() { // dump all locked concurrent locks assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); - GrowableArray* aos_objects = new (mtServiceability) GrowableArray(INITIAL_ARRAY_SIZE, mtServiceability); + GrowableArrayCHeap* aos_objects = new GrowableArrayCHeap(INITIAL_ARRAY_SIZE); // Find all instances of AbstractOwnableSynchronizer HeapInspection::find_instances_at_safepoint(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(), @@ -809,7 +809,7 @@ void ConcurrentLocksDump::dump_at_safepoint() { // build a map of JavaThread to all its owned AbstractOwnableSynchronizer -void ConcurrentLocksDump::build_map(GrowableArray* aos_objects) { +void ConcurrentLocksDump::build_map(GrowableArrayCHeap* aos_objects) { int length = aos_objects->length(); for (int i = 0; i < length; i++) { oop o = aos_objects->at(i); @@ -854,7 +854,7 @@ ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) { st->print_cr(" Locked ownable synchronizers:"); ThreadConcurrentLocks* tcl = thread_concurrent_locks(t); - GrowableArray* locks = (tcl != nullptr ? tcl->owned_locks() : nullptr); + GrowableArrayCHeap* locks = (tcl != nullptr ? tcl->owned_locks() : nullptr); if (locks == nullptr || locks->is_empty()) { st->print_cr("\t- None"); st->cr(); @@ -870,7 +870,7 @@ void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) { ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) { _thread = thread; - _owned_locks = new (mtServiceability) GrowableArray(INITIAL_ARRAY_SIZE, mtServiceability); + _owned_locks = new GrowableArrayCHeap(INITIAL_ARRAY_SIZE); _next = nullptr; } @@ -993,7 +993,7 @@ void ThreadSnapshot::metadata_do(void f(Metadata*)) { DeadlockCycle::DeadlockCycle() { - _threads = new (mtServiceability) GrowableArray(INITIAL_ARRAY_SIZE, mtServiceability); + _threads = new GrowableArrayCHeap(INITIAL_ARRAY_SIZE); _next = nullptr; } diff --git a/src/hotspot/share/services/threadService.hpp b/src/hotspot/share/services/threadService.hpp index 63ae767a9a482..9e5075ba26b22 100644 --- a/src/hotspot/share/services/threadService.hpp +++ b/src/hotspot/share/services/threadService.hpp @@ -271,11 +271,11 @@ class ThreadSnapshot : public CHeapObj { class ThreadStackTrace : public CHeapObj { private: - JavaThread* _thread; - int _depth; // number of stack frames added - bool _with_locked_monitors; - GrowableArray* _frames; - GrowableArray* _jni_locked_monitors; + JavaThread* _thread; + int _depth; // number of stack frames added + bool _with_locked_monitors; + GrowableArrayCHeap* _frames; + GrowableArrayCHeap* _jni_locked_monitors; public: @@ -290,7 +290,7 @@ class ThreadStackTrace : public CHeapObj { void dump_stack_at_safepoint(int max_depth, ObjectMonitorsView* monitors, bool full); Handle allocate_fill_stack_trace_element_array(TRAPS); void metadata_do(void f(Metadata*)); - GrowableArray* jni_locked_monitors() { return _jni_locked_monitors; } + GrowableArrayCHeap* jni_locked_monitors() { return _jni_locked_monitors; } int num_jni_locked_monitors() { return (_jni_locked_monitors != nullptr ? _jni_locked_monitors->length() : 0); } bool is_owned_monitor_on_stack(oop object); @@ -304,7 +304,8 @@ class StackFrameInfo : public CHeapObj { private: Method* _method; int _bci; - GrowableArray* _locked_monitors; // list of object monitors locked by this frame + // list of object monitors locked by this frame + GrowableArrayCHeap* _locked_monitors; // We need to save the mirrors in the backtrace to keep the class // from being unloaded while we still have this stack trace. OopHandle _class_holder; @@ -318,14 +319,14 @@ class StackFrameInfo : public CHeapObj { void metadata_do(void f(Metadata*)); int num_locked_monitors() { return (_locked_monitors != nullptr ? _locked_monitors->length() : 0); } - GrowableArray* locked_monitors() { return _locked_monitors; } + GrowableArrayCHeap* locked_monitors() { return _locked_monitors; } void print_on(outputStream* st) const; }; class ThreadConcurrentLocks : public CHeapObj { private: - GrowableArray* _owned_locks; + GrowableArrayCHeap* _owned_locks; ThreadConcurrentLocks* _next; // This JavaThread* is protected in one of two different ways // depending on the usage of the ThreadConcurrentLocks object: @@ -342,7 +343,7 @@ class ThreadConcurrentLocks : public CHeapObj { void set_next(ThreadConcurrentLocks* n) { _next = n; } ThreadConcurrentLocks* next() { return _next; } JavaThread* java_thread() { return _thread; } - GrowableArray* owned_locks() { return _owned_locks; } + GrowableArrayCHeap* owned_locks() { return _owned_locks; } }; class ConcurrentLocksDump : public StackObj { @@ -351,7 +352,7 @@ class ConcurrentLocksDump : public StackObj { ThreadConcurrentLocks* _last; // Last ThreadConcurrentLocks in the map bool _retain_map_on_free; - void build_map(GrowableArray* aos_objects); + void build_map(GrowableArrayCHeap* aos_objects); void add_lock(JavaThread* thread, instanceOop o); public: @@ -401,8 +402,8 @@ class ThreadDumpResult : public StackObj { class DeadlockCycle : public CHeapObj { private: - GrowableArray* _threads; - DeadlockCycle* _next; + GrowableArrayCHeap* _threads; + DeadlockCycle* _next; public: DeadlockCycle(); ~DeadlockCycle(); @@ -412,7 +413,7 @@ class DeadlockCycle : public CHeapObj { void add_thread(JavaThread* t) { _threads->append(t); } void reset() { _threads->clear(); } int num_threads() { return _threads->length(); } - GrowableArray* threads() { return _threads; } + GrowableArrayCHeap* threads() { return _threads; } void print_on_with(ThreadsList * t_list, outputStream* st) const; }; diff --git a/src/hotspot/share/utilities/bitMap.hpp b/src/hotspot/share/utilities/bitMap.hpp index 4fa6fb5fda2e6..be65897e99064 100644 --- a/src/hotspot/share/utilities/bitMap.hpp +++ b/src/hotspot/share/utilities/bitMap.hpp @@ -188,7 +188,6 @@ class BitMap { BitMap(bm_word_t* map, idx_t size_in_bits) : _map(map), _size(size_in_bits) { verify_size(size_in_bits); } - ~BitMap() {} public: // Pretouch the entire range of memory this BitMap covers. diff --git a/src/hotspot/share/utilities/growableArray.cpp b/src/hotspot/share/utilities/growableArray.cpp index 8e1057dd9f846..85c8758755b46 100644 --- a/src/hotspot/share/utilities/growableArray.cpp +++ b/src/hotspot/share/utilities/growableArray.cpp @@ -25,16 +25,9 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" -#include "runtime/javaThread.hpp" +#include "runtime/thread.hpp" #include "utilities/growableArray.hpp" -void* GrowableArrayResourceAllocator::allocate(int max, int elementSize) { - assert(max >= 0, "integer overflow"); - size_t byte_size = elementSize * (size_t) max; - - return (void*)resource_allocate_bytes(byte_size); -} - void* GrowableArrayArenaAllocator::allocate(int max, int element_size, Arena* arena) { assert(max >= 0, "integer overflow"); size_t byte_size = element_size * (size_t) max; @@ -56,37 +49,17 @@ void GrowableArrayCHeapAllocator::deallocate(void* elements) { } #ifdef ASSERT - GrowableArrayNestingCheck::GrowableArrayNestingCheck(bool on_resource_area) : - _nesting(on_resource_area ? Thread::current()->resource_area()->nesting() : 0) { -} + _nesting(on_resource_area ? Thread::current()->resource_area()->nesting() : 0), + _on_resource_area(on_resource_area) {} -void GrowableArrayNestingCheck::on_resource_area_alloc() const { +void GrowableArrayNestingCheck::on_allocate() const { // Check for insidious allocation bug: if a GrowableArray overflows, the // grown array must be allocated under the same ResourceMark as the original. // Otherwise, the _data array will be deallocated too early. - if (_nesting != Thread::current()->resource_area()->nesting()) { + if (_on_resource_area && + _nesting != Thread::current()->resource_area()->nesting()) { fatal("allocation bug: GrowableArray could grow within nested ResourceMark"); } } - -void GrowableArrayMetadata::init_checks(const GrowableArrayBase* array) const { - // Stack allocated arrays support all three element allocation locations - if (array->allocated_on_stack_or_embedded()) { - return; - } - - // Otherwise there's a strict one-to-one mapping - assert(on_C_heap() == array->allocated_on_C_heap(), - "growable array must be C heap allocated if elements are"); - assert(on_resource_area() == array->allocated_on_res_area(), - "growable array must be resource allocated if elements are"); - assert(on_arena() == array->allocated_on_arena(), - "growable array must be arena allocated if elements are"); -} - -void GrowableArrayMetadata::on_resource_area_alloc_check() const { - _nesting_check.on_resource_area_alloc(); -} - #endif // ASSERT diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp index e9abd9fae9a3a..85a20d67cad2f 100644 --- a/src/hotspot/share/utilities/growableArray.hpp +++ b/src/hotspot/share/utilities/growableArray.hpp @@ -27,11 +27,14 @@ #include "memory/allocation.hpp" #include "memory/iterator.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/thread.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ostream.hpp" #include "utilities/powerOfTwo.hpp" + // A growable array. /*************************************************************************/ @@ -58,7 +61,7 @@ /* ... */ /* } */ /* */ -/* If the GrowableArrays you are creating is C_Heap allocated then it */ +/* If you are using a GrowableArraysCHeap (allocates on CHeap) then it */ /* should not hold handles since the handles could trivially try and */ /* outlive their HandleMark. In some situations you might need to do */ /* this and it would be legal but be very careful and see if you can do */ @@ -66,9 +69,7 @@ /* */ /*************************************************************************/ -// Non-template base class responsible for handling the length and max. - - +// Non-template base class responsible for handling the length and capacity. class GrowableArrayBase : public AnyObj { friend class VMStructs; @@ -84,8 +85,6 @@ class GrowableArrayBase : public AnyObj { assert(_len >= 0 && _len <= _capacity, "initial_len too big"); } - ~GrowableArrayBase() {} - public: int length() const { return _len; } int capacity() const { return _capacity; } @@ -93,16 +92,9 @@ class GrowableArrayBase : public AnyObj { bool is_empty() const { return _len == 0; } bool is_nonempty() const { return _len != 0; } bool is_full() const { return _len == _capacity; } - - void clear() { _len = 0; } - void trunc_to(int length) { - assert(length <= _len,"cannot increase length"); - _len = length; - } }; template class GrowableArrayIterator; -template class GrowableArrayFilterIterator; // Extends GrowableArrayBase with a typed data array. // @@ -121,10 +113,18 @@ class GrowableArrayView : public GrowableArrayBase { GrowableArrayView(E* data, int capacity, int initial_len) : GrowableArrayBase(capacity, initial_len), _data(data) {} - ~GrowableArrayView() {} - public: - const static GrowableArrayView EMPTY; + void clear() { trunc_to(0); } + + void trunc_to(int length) { + assert(length <= _len,"cannot increase length"); + assert(length >= 0,"positive length"); + // Destruct all elements from new length to old length + for (int i = length; i < _len; i++) { + this->_data[i].~E(); + } + _len = length; + } bool operator==(const GrowableArrayView& rhs) const { if (_len != rhs._len) @@ -180,7 +180,13 @@ class GrowableArrayView : public GrowableArrayBase { E pop() { assert(_len > 0, "empty list"); - return _data[--_len]; + int new_len = _len - 1; + // copy-construct the return value + E elem(_data[new_len]); + // destruct the old value + this->_data[new_len].~E(); + _len = new_len; + return elem; } void at_put(int i, const E& elem) { @@ -188,6 +194,12 @@ class GrowableArrayView : public GrowableArrayBase { _data[i] = elem; } + void at_swap(int i, int j) { + E tmp = this->at(i); + this->at_put(i, this->at(j)); + this->at_put(j, tmp); + } + bool contains(const E& elem) const { for (int i = 0; i < _len; i++) { if (_data[i] == elem) return true; @@ -258,9 +270,16 @@ class GrowableArrayView : public GrowableArrayBase { void remove_at(int index) { assert(0 <= index && index < _len, "illegal index %d for length %d", index, _len); + + // destruct old element + this->_data[index].~E(); + + // copy-construct, and destruct all the way up. This simulates the move. for (int j = index + 1; j < _len; j++) { - _data[j-1] = _data[j]; + ::new ((void*)&this->_data[j-1]) E(_data[j]); + this->_data[j].~E(); } + _len--; } @@ -274,19 +293,30 @@ class GrowableArrayView : public GrowableArrayBase { assert(0 <= start, "illegal start index %d", start); assert(start < end && end <= _len, "erase called with invalid range (%d, %d) for length %d", start, end, _len); + // Destruct old elements + for (int i = start; i < end; i++) { + this->_data[i].~E(); + } + + // Move down the high elements for (int i = start, j = end; j < length(); i++, j++) { - at_put(i, at(j)); + ::new ((void*)&this->_data[i]) E(_data[j]); + this->_data[j].~E(); } - trunc_to(length() - (end - start)); + _len -= (end - start); } // The order is changed. void delete_at(int index) { assert(0 <= index && index < _len, "illegal index %d for length %d", index, _len); - if (index < --_len) { - // Replace removed element with last one. - _data[index] = _data[_len]; - } + + _len--; + // Destruct old + this->_data[index].~E(); + // Copy-construct last element to deleted slot + ::new ((void*)&this->_data[index]) E(_data[_len]); + // Destruct last element + this->_data[_len].~E(); } void sort(int f(E*, E*)) { @@ -297,7 +327,8 @@ class GrowableArrayView : public GrowableArrayBase { qsort(_data, length() / stride, sizeof(E) * stride, (_sort_Fn)f); } - template int find_sorted(const K& key, bool& found) const { + template + int find_sorted(const K& key, bool& found) const { found = false; int min = 0; int max = length() - 1; @@ -318,28 +349,6 @@ class GrowableArrayView : public GrowableArrayBase { return min; } - template - int find_sorted(CompareClosure* cc, const K& key, bool& found) { - found = false; - int min = 0; - int max = length() - 1; - - while (max >= min) { - int mid = (int)(((uint)max + min) / 2); - E value = at(mid); - int diff = cc->do_compare(key, value); - if (diff > 0) { - min = mid + 1; - } else if (diff < 0) { - max = mid - 1; - } else { - found = true; - return mid; - } - } - return min; - } - void print() const { tty->print("Growable Array " PTR_FORMAT, p2i(this)); tty->print(": length %d (capacity %d) { ", _len, _capacity); @@ -350,9 +359,6 @@ class GrowableArrayView : public GrowableArrayBase { } }; -template -const GrowableArrayView GrowableArrayView::EMPTY(nullptr, 0, 0); - template class GrowableArrayFromArray : public GrowableArrayView { public: @@ -373,35 +379,40 @@ template class GrowableArrayWithAllocator : public GrowableArrayView { friend class VMStructs; +private: void expand_to(int j); void grow(int j); protected: GrowableArrayWithAllocator(E* data, int capacity) : GrowableArrayView(data, capacity, 0) { - for (int i = 0; i < capacity; i++) { - ::new ((void*)&data[i]) E(); - } + // Allocate for "capacity" many elements, but do not yet + // initialize / construct any elements. } GrowableArrayWithAllocator(E* data, int capacity, int initial_len, const E& filler) : GrowableArrayView(data, capacity, initial_len) { int i = 0; + // We initialize / construct (with placement new, copy constructor), + // for "initial_len" many elements. for (; i < initial_len; i++) { ::new ((void*)&data[i]) E(filler); } - for (; i < capacity; i++) { - ::new ((void*)&data[i]) E(); - } + // The "capacity" may be larger, but we leave the rest + // of the space uninitialized / no construction. } - ~GrowableArrayWithAllocator() {} - public: int append(const E& elem) { if (this->_len == this->_capacity) grow(this->_len); int idx = this->_len++; - this->_data[idx] = elem; + + // The destination element on _data is not yet initialized. + // Hence, we need to use placement new, with copy-construct. + // Assignment would be wrong, as it assumes the destination + // was already initialized. + ::new ((void*)&this->_data[idx]) E(elem); + return idx; } @@ -418,8 +429,13 @@ class GrowableArrayWithAllocator : public GrowableArrayView { assert(0 <= i, "negative index %d", i); if (i >= this->_len) { if (i >= this->_capacity) grow(i); - for (int j = this->_len; j <= i; j++) - this->_data[j] = fill; + // The elements from old len to new len are not yet + // initialized. We use placement new with copy-construct. + // Assignment would be wrong, as it assumes the destination + // was already initialized. + for (int j = this->_len; j <= i; j++) { + ::new ((void*)&this->_data[j]) E(fill); + } this->_len = i+1; } return this->_data[i]; @@ -429,52 +445,76 @@ class GrowableArrayWithAllocator : public GrowableArrayView { assert(0 <= i, "negative index %d", i); if (i >= this->_len) { if (i >= this->_capacity) grow(i); - for (int j = this->_len; j < i; j++) - this->_data[j] = fill; + // The elements from old len to new len are not yet + // initialized. We use placement new with copy-construct. + // Assignment would be wrong, as it assumes the destination + // was already initialized. + for (int j = this->_len; j < i; j++) { + ::new ((void*)&this->_data[j]) E(fill); + } + ::new ((void*)&this->_data[i]) E(elem); this->_len = i+1; + } else { + // Destination is already initialized, so use assignment. + this->_data[i] = elem; } - this->_data[i] = elem; } // inserts the given element before the element at index i void insert_before(const int idx, const E& elem) { assert(0 <= idx && idx <= this->_len, "illegal index %d for length %d", idx, this->_len); - if (this->_len == this->_capacity) grow(this->_len); + + if (this->_len == this->_capacity) { + grow(this->_len); + } + + // Move up the high elements (copy-construct / deconstruct) for (int j = this->_len - 1; j >= idx; j--) { - this->_data[j + 1] = this->_data[j]; + ::new ((void*)&this->_data[j + 1]) E(this->_data[j]); + this->_data[j].~E(); } + + // Place the new element + ::new ((void*)&this->_data[idx]) E(elem); + this->_len++; - this->_data[idx] = elem; } void insert_before(const int idx, const GrowableArrayView* array) { + assert(this != array, "cannot insert itself to itself"); assert(0 <= idx && idx <= this->_len, "illegal index %d for length %d", idx, this->_len); + int array_len = array->length(); int new_len = this->_len + array_len; - if (new_len >= this->_capacity) grow(new_len); + if (new_len >= this->_capacity) { + grow(new_len); + } + + // Move up the high elements (copy-construct / deconstruct) for (int j = this->_len - 1; j >= idx; j--) { - this->_data[j + array_len] = this->_data[j]; + ::new ((void*)&this->_data[j + array_len]) E(this->_data[j]); + this->_data[j].~E(); } + // Place all new elements for (int j = 0; j < array_len; j++) { - this->_data[idx + j] = array->at(j); + ::new ((void*)&this->_data[idx + j]) E(array->at(j)); } this->_len += array_len; } void appendAll(const GrowableArrayView* l) { - for (int i = 0; i < l->length(); i++) { - this->at_put_grow(this->_len, l->at(i), E()); - } + insert_before(this->_len, l); } // Binary search and insertion utility. Search array for element // matching key according to the static compare function. Insert // that element if not already in the list. Assumes the list is // already sorted according to compare function. - template E insert_sorted(const E& key) { + template + E insert_sorted(const E& key) { bool found; int location = GrowableArrayView::template find_sorted(key, found); if (!found) { @@ -483,28 +523,8 @@ class GrowableArrayWithAllocator : public GrowableArrayView { return this->at(location); } - E insert_sorted(CompareClosure* cc, const E& key) { - bool found; - int location = find_sorted(cc, key, found); - if (!found) { - insert_before(location, key); - } - return this->at(location); - } - - void swap(GrowableArrayWithAllocator* other) { - ::swap(this->_data, other->_data); - ::swap(this->_len, other->_len); - ::swap(this->_capacity, other->_capacity); - } - // Ensure capacity is at least new_capacity. void reserve(int new_capacity); - - // Reduce capacity to length. - void shrink_to_fit(); - - void clear_and_deallocate(); }; template @@ -512,16 +532,34 @@ void GrowableArrayWithAllocator::expand_to(int new_capacity) { int old_capacity = this->_capacity; assert(new_capacity > old_capacity, "expected growth but %d <= %d", new_capacity, old_capacity); + + // Allocate the new data with new capacity this->_capacity = new_capacity; - E* newData = static_cast(this)->allocate(); + E* new_data = static_cast(this)->allocate(); + + // Copy-construct old->new (using placement new) int i = 0; - for ( ; i < this->_len; i++) ::new ((void*)&newData[i]) E(this->_data[i]); - for ( ; i < this->_capacity; i++) ::new ((void*)&newData[i]) E(); - for (i = 0; i < old_capacity; i++) this->_data[i].~E(); + for ( ; i < this->_len; i++) { + ::new ((void*)&new_data[i]) E(this->_data[i]); + } + + // Leave rest of space up to "new_capacity" uninitialized, + // no construction + + // Remove the old elements, calling the destructor + // (on initialized elements only) + for (i = 0; i < this->_len; i++) { + this->_data[i].~E(); + } + + // Now that we have destructed all elements on the old + // data, we can deallocate it. if (this->_data != nullptr) { static_cast(this)->deallocate(this->_data); } - this->_data = newData; + + // swap in the new data + this->_data = new_data; } template @@ -537,45 +575,6 @@ void GrowableArrayWithAllocator::reserve(int new_capacity) { } } -template -void GrowableArrayWithAllocator::shrink_to_fit() { - int old_capacity = this->_capacity; - int len = this->_len; - assert(len <= old_capacity, "invariant"); - - // If already at full capacity, nothing to do. - if (len == old_capacity) { - return; - } - - // If not empty, allocate new, smaller, data, and copy old data to it. - E* old_data = this->_data; - E* new_data = nullptr; - this->_capacity = len; // Must preceed allocate(). - if (len > 0) { - new_data = static_cast(this)->allocate(); - for (int i = 0; i < len; ++i) ::new (&new_data[i]) E(old_data[i]); - } - // Destroy contents of old data, and deallocate it. - for (int i = 0; i < old_capacity; ++i) old_data[i].~E(); - if (old_data != nullptr) { - static_cast(this)->deallocate(old_data); - } - // Install new data, which might be nullptr. - this->_data = new_data; -} - -template -void GrowableArrayWithAllocator::clear_and_deallocate() { - this->clear(); - this->shrink_to_fit(); -} - -class GrowableArrayResourceAllocator { -public: - static void* allocate(int max, int element_size); -}; - // Arena allocator class GrowableArrayArenaAllocator { public: @@ -590,206 +589,159 @@ class GrowableArrayCHeapAllocator { }; #ifdef ASSERT - // Checks resource allocation nesting class GrowableArrayNestingCheck { // resource area nesting at creation int _nesting; + bool _on_resource_area; public: GrowableArrayNestingCheck(bool on_resource_area); - void on_resource_area_alloc() const; + int nesting() const { return _nesting; } + void on_allocate() const; }; - -#endif // ASSERT - -// Encodes where the backing array is allocated -// and performs necessary checks. -class GrowableArrayMetadata { - uintptr_t _bits; - - // resource area nesting at creation - debug_only(GrowableArrayNestingCheck _nesting_check;) - - // Resource allocation - static uintptr_t bits() { - return 0; - } - - // CHeap allocation - static uintptr_t bits(MEMFLAGS memflags) { - assert(memflags != mtNone, "Must provide a proper MEMFLAGS"); - return (uintptr_t(memflags) << 1) | 1; - } - - // Arena allocation - static uintptr_t bits(Arena* arena) { - assert((uintptr_t(arena) & 1) == 0, "Required for on_C_heap() to work"); - return uintptr_t(arena); - } - -public: - // Resource allocation - GrowableArrayMetadata() : - _bits(bits()) - debug_only(COMMA _nesting_check(true)) { - } - - // Arena allocation - GrowableArrayMetadata(Arena* arena) : - _bits(bits(arena)) - debug_only(COMMA _nesting_check(false)) { - } - - // CHeap allocation - GrowableArrayMetadata(MEMFLAGS memflags) : - _bits(bits(memflags)) - debug_only(COMMA _nesting_check(false)) { - } - -#ifdef ASSERT - GrowableArrayMetadata(const GrowableArrayMetadata& other) : - _bits(other._bits), - _nesting_check(other._nesting_check) { - assert(!on_C_heap(), "Copying of CHeap arrays not supported"); - assert(!other.on_C_heap(), "Copying of CHeap arrays not supported"); - } - - GrowableArrayMetadata& operator=(const GrowableArrayMetadata& other) { - _bits = other._bits; - _nesting_check = other._nesting_check; - assert(!on_C_heap(), "Assignment of CHeap arrays not supported"); - assert(!other.on_C_heap(), "Assignment of CHeap arrays not supported"); - return *this; - } - - void init_checks(const GrowableArrayBase* array) const; - void on_resource_area_alloc_check() const; #endif // ASSERT - bool on_C_heap() const { return (_bits & 1) == 1; } - bool on_resource_area() const { return _bits == 0; } - bool on_arena() const { return (_bits & 1) == 0 && _bits != 0; } - - Arena* arena() const { return (Arena*)_bits; } - MEMFLAGS memflags() const { return MEMFLAGS(_bits >> 1); } -}; -// THE GrowableArray. +// The GrowableArray internal data is allocated from either: +// - Resrouce area (default) +// - Arena // -// Supports multiple allocation strategies: -// - Resource stack allocation: if no extra argument is provided -// - CHeap allocation: if memflags is provided -// - Arena allocation: if an arena is provided -// -// There are some drawbacks of using GrowableArray, that are removed in some -// of the other implementations of GrowableArrayWithAllocator sub-classes: -// -// Memory overhead: The multiple allocation strategies uses extra metadata -// embedded in the instance. +// Itself, it can be embedded, on stack, resource_arena or arena allocated. // // Strict allocation locations: There are rules about where the GrowableArray // instance is allocated, that depends on where the data array is allocated. // See: init_checks. - +// +// For C-Heap allocation use GrowableArrayCHeap. +// +// Note, that with GrowableArray does not deallocate the allocated memory from +// the arena / resource area, but rather just abandons it until the memory is +// released by the arena or by the ResourceMark from the resource area. +// Because GrowableArrays are often just abandoned rather than properly destructed, +// we have decided to require that elements are trivially destructible, so that +// it makes no difference if the destructors are called or not. +// +// GrowableArray is copyable, but it only creates a shallow copy. Hence, one has +// to be careful not to duplicate the state and then diverge while sharing the +// underlying data. template class GrowableArray : public GrowableArrayWithAllocator > { - friend class GrowableArrayWithAllocator >; - friend class GrowableArrayTest; - - static E* allocate(int max) { - return (E*)GrowableArrayResourceAllocator::allocate(max, sizeof(E)); - } - - static E* allocate(int max, MEMFLAGS memflags) { - return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), memflags); - } - - static E* allocate(int max, Arena* arena) { - return (E*)GrowableArrayArenaAllocator::allocate(max, sizeof(E), arena); - } - - GrowableArrayMetadata _metadata; - - void init_checks() const { debug_only(_metadata.init_checks(this);) } - - // Where are we going to allocate memory? - bool on_C_heap() const { return _metadata.on_C_heap(); } - bool on_resource_area() const { return _metadata.on_resource_area(); } - bool on_arena() const { return _metadata.on_arena(); } - - E* allocate() { - if (on_resource_area()) { - debug_only(_metadata.on_resource_area_alloc_check()); - return allocate(this->_capacity); - } - - if (on_C_heap()) { - return allocate(this->_capacity, _metadata.memflags()); - } + // Since GrowableArray is arena / resource area allocated, it is a custom to + // simply abandon the array and hence not destruct the elements. Therefore, + // we only allow elements where the destruction does nothing anyway. That + // way there is no difference between abandoning elements or destructing them. +#ifndef ASSERT + // Sadly, we can only verify this in non-ASSERT mode, because AnyObj has a + // destructor in ASSERT mode. Hence, we now allow non-trivial destructors + // in ASSERT mode, but forbid them in product mode. + static_assert(std::is_trivially_destructible::value, + "GrowableArray only allows trivially destructible elements"); +#endif // ASSERT - assert(on_arena(), "Sanity"); - return allocate(this->_capacity, _metadata.arena()); - } +private: + Arena* _arena; - void deallocate(E* mem) { - if (on_C_heap()) { - GrowableArrayCHeapAllocator::deallocate(mem); - } - } + // Check for insidious allocation bug: if a GrowableArray reallocates _data, + // this must be done under the same ResourceMark as the original. + // Otherwise, the _data array will be deallocated too early. + DEBUG_ONLY(GrowableArrayNestingCheck _nesting_check;) public: - GrowableArray() : GrowableArray(2 /* initial_capacity */) {} + GrowableArray() : + GrowableArray(Thread::current()->resource_area()) {} + + explicit GrowableArray(Arena* arena) : + GrowableArray(arena, 2 /* initial_capacity */) {} explicit GrowableArray(int initial_capacity) : - GrowableArrayWithAllocator >( - allocate(initial_capacity), - initial_capacity), - _metadata() { - init_checks(); - } + GrowableArray(Thread::current()->resource_area(), initial_capacity) {} - GrowableArray(int initial_capacity, MEMFLAGS memflags) : + GrowableArray(Arena* arena, int initial_capacity) : GrowableArrayWithAllocator >( - allocate(initial_capacity, memflags), + allocate(arena, initial_capacity), initial_capacity), - _metadata(memflags) { - init_checks(); + _arena(arena) + DEBUG_ONLY(COMMA _nesting_check(on_resource_area())) + { + DEBUG_ONLY( init_checks(); ) } GrowableArray(int initial_capacity, int initial_len, const E& filler) : + GrowableArray(Thread::current()->resource_area(), initial_capacity, initial_len, filler) {} + + GrowableArray(Arena* arena, int initial_capacity, int initial_len, const E& filler) : GrowableArrayWithAllocator >( - allocate(initial_capacity), + allocate(arena, initial_capacity), initial_capacity, initial_len, filler), - _metadata() { - init_checks(); + _arena(arena) + DEBUG_ONLY(COMMA _nesting_check(on_resource_area())) + { + DEBUG_ONLY( init_checks(); ) } - GrowableArray(int initial_capacity, int initial_len, const E& filler, MEMFLAGS memflags) : - GrowableArrayWithAllocator >( - allocate(initial_capacity, memflags), - initial_capacity, initial_len, filler), - _metadata(memflags) { - init_checks(); +#ifdef ASSERT + bool on_resource_area() const { + return _arena == (Arena*)Thread::current()->resource_area(); + }; +#endif + + void swap(GrowableArray* other) { + assert(_arena == other->_arena, "must have same arena"); + assert(_nesting_check.nesting() == other->_nesting_check.nesting(), + "same nesting if using resource area"); + ::swap(this->_data, other->_data); + ::swap(this->_len, other->_len); + ::swap(this->_capacity, other->_capacity); } - GrowableArray(Arena* arena, int initial_capacity, int initial_len, const E& filler) : - GrowableArrayWithAllocator >( - allocate(initial_capacity, arena), - initial_capacity, initial_len, filler), - _metadata(arena) { - init_checks(); + E* allocate() { + DEBUG_ONLY(_nesting_check.on_allocate(); ) + return allocate(_arena, this->_capacity); + } + + void deallocate(E* mem) { + // We have arena allocation, so we just abandon the memory. } - ~GrowableArray() { - if (on_C_heap()) { - this->clear_and_deallocate(); +private: + E* allocate(Arena* arena, int capacity) { + return (E*)GrowableArrayArenaAllocator::allocate(capacity, sizeof(E), arena); + } + +#ifdef ASSERT + void init_checks() const { + if (this->allocated_on_stack_or_embedded()) { + return; + } else if (this->allocated_on_res_area()) { + assert(on_resource_area(), + "The elements must be resource area allocated if the GrowableArray itself is"); + } else if (this->allocated_on_arena()) { + assert(Arena_contains(_arena, this), + "if GrowableArray is arena allocated, then the elements must be from the same arena"); + } else if (this->allocated_on_C_heap()) { + // We should not allocate GrowableArray on the C-Heap, while the internal + // memory is allocated on an Arena. Otherwise, the data pointer can outlive + // the arena scope. + assert(false, "GrowableArray cannot be C heap allocated"); + } else { + assert(false, "GrowableArray has unhandled allocation state"); } } +#endif // ASSERT }; -// Leaner GrowableArray for CHeap backed data arrays, with compile-time decided MEMFLAGS. +// The GrowableArrayCHeap internal data is allocated from C-Heap, +// with compile-time decided MEMFLAGS. +// +// The GrowableArrayCHeap itself can be stack allocated, embedded +// or C heap allocated. It is up to the user to ensure that the +// array is eventually destructed / deallocated. +// +// When the array is destructed, then all the remaining elements +// are first destructed. Hence, we allow elements with non-trivial +// destructors. template class GrowableArrayCHeap : public GrowableArrayWithAllocator > { friend class GrowableArrayWithAllocator >; @@ -839,14 +791,60 @@ class GrowableArrayCHeap : public GrowableArrayWithAllocatorclear(); + this->shrink_to_fit(); + } + + // The template argument F ensures the MEMFLAGS are the + // same for both arrays. + void swap(GrowableArrayCHeap* other) { + ::swap(this->_data, other->_data); + ::swap(this->_len, other->_len); + ::swap(this->_capacity, other->_capacity); + } }; +template +void GrowableArrayCHeap::shrink_to_fit() { + int old_capacity = this->_capacity; + int len = this->_len; + assert(len <= old_capacity, "invariant"); + + // If already at full capacity, nothing to do. + if (len == old_capacity) { + return; + } + + // If not empty, allocate new, smaller, data, and copy old data to it. + E* old_data = this->_data; + E* new_data = nullptr; + this->_capacity = len; // Must preceed allocate(). + if (len > 0) { + new_data = this->allocate(); + // copy-construct old->new + for (int i = 0; i < len; ++i) ::new (&new_data[i]) E(old_data[i]); + } + + // destruct old + for (int i = 0; i < len; ++i) old_data[i].~E(); + + if (old_data != nullptr) { + this->deallocate(old_data); + } + // Install new data, which might be nullptr. + this->_data = new_data; +} + // Custom STL-style iterator to iterate over GrowableArrays // It is constructed by invoking GrowableArray::begin() and GrowableArray::end() template class GrowableArrayIterator : public StackObj { friend class GrowableArrayView; - template friend class GrowableArrayFilterIterator; private: const GrowableArrayView* _array; // GrowableArray we iterate over @@ -860,47 +858,7 @@ class GrowableArrayIterator : public StackObj { public: GrowableArrayIterator() : _array(nullptr), _position(0) { } GrowableArrayIterator& operator++() { ++_position; return *this; } - E operator*() { return _array->at(_position); } - - bool operator==(const GrowableArrayIterator& rhs) { - assert(_array == rhs._array, "iterator belongs to different array"); - return _position == rhs._position; - } - - bool operator!=(const GrowableArrayIterator& rhs) { - assert(_array == rhs._array, "iterator belongs to different array"); - return _position != rhs._position; - } -}; - -// Custom STL-style iterator to iterate over elements of a GrowableArray that satisfy a given predicate -template -class GrowableArrayFilterIterator : public StackObj { - friend class GrowableArrayView; - - private: - const GrowableArrayView* _array; // GrowableArray we iterate over - int _position; // Current position in the GrowableArray - UnaryPredicate _predicate; // Unary predicate the elements of the GrowableArray should satisfy - - public: - GrowableArrayFilterIterator(const GrowableArrayIterator& begin, UnaryPredicate filter_predicate) : - _array(begin._array), _position(begin._position), _predicate(filter_predicate) { - // Advance to first element satisfying the predicate - while(_position != _array->length() && !_predicate(_array->at(_position))) { - ++_position; - } - } - - GrowableArrayFilterIterator& operator++() { - do { - // Advance to next element satisfying the predicate - ++_position; - } while(_position != _array->length() && !_predicate(_array->at(_position))); - return *this; - } - - E operator*() { return _array->at(_position); } + const E& operator*() { return _array->at(_position); } bool operator==(const GrowableArrayIterator& rhs) { assert(_array == rhs._array, "iterator belongs to different array"); @@ -911,16 +869,6 @@ class GrowableArrayFilterIterator : public StackObj { assert(_array == rhs._array, "iterator belongs to different array"); return _position != rhs._position; } - - bool operator==(const GrowableArrayFilterIterator& rhs) { - assert(_array == rhs._array, "iterator belongs to different array"); - return _position == rhs._position; - } - - bool operator!=(const GrowableArrayFilterIterator& rhs) { - assert(_array == rhs._array, "iterator belongs to different array"); - return _position != rhs._position; - } }; // Arrays for basic types diff --git a/test/hotspot/gtest/utilities/test_growableArray.cpp b/test/hotspot/gtest/utilities/test_growableArray.cpp index cd269e092121e..720350bd5ea32 100644 --- a/test/hotspot/gtest/utilities/test_growableArray.cpp +++ b/test/hotspot/gtest/utilities/test_growableArray.cpp @@ -26,640 +26,2362 @@ #include "utilities/growableArray.hpp" #include "unittest.hpp" -struct WithEmbeddedArray { - // Array embedded in another class - GrowableArray _a; - - // Resource allocated data array - WithEmbeddedArray(int initial_max) : _a(initial_max) {} - // Arena allocated data array - WithEmbeddedArray(Arena* arena, int initial_max) : _a(arena, initial_max, 0, 0) {} - // CHeap allocated data array - WithEmbeddedArray(int initial_max, MEMFLAGS memflags) : _a(initial_max, memflags) { - assert(memflags != mtNone, "test requirement"); - } - WithEmbeddedArray(const GrowableArray& other) : _a(other) {} -}; +// We have a list of each: +// - ModifyClosure +// - TestClosure +// - AllocatorClosure +// - AllocatorArgs +// +// For each AllocationClosure and AllocatorArgs, we call dispatch +// with each of the ModifyClosuresi and each TestClosures. The +// allocation cosure allocates its array (with initial size and +// capacity specified by the AllocatorArgs), and then passes itself +// into the ModifyClosure which does some first modificaions and +// subsequently into the TestClosure, which runs the test. +// +// For one test and allocator we do: +// test.reset() +// allocator.dispatch(modification, test); +// -> allocate GrowableArray +// modification.do_modify(allocator) +// test.do_test(allocator) +// -> call read / write ops on allocator which forwards +// that to the allocated GrowableArray +// de-allocate GrowableAray +// test.finish() +// +// We test the arrays with different element types. Hence, the test +// is heavily templated. The idea is to ensure GrowableArray works +// for basic types, pointers, but also classes: for example with or +// without a default-constructor, or copy-assign operator. Of course +// if the copy-assign operator is deleted, then we cannot use certain +// write operations (such as at_put). With a special CtorDtor type we +// verify that constructor and destructor counts are as expected, +// for example we can verify the expected number of "live" elements. +// +// ------------ Array Elements ------------- + +// Used to get an ith element of any type E. +template +E value_factory(int i) { + return E(i); +} -// Test fixture to work with TEST_VM_F -class GrowableArrayTest : public ::testing::Test { -protected: - // friend -> private accessors - template - static bool elements_on_C_heap(const GrowableArray* array) { - return array->on_C_heap(); - } - template - static bool elements_on_resource_area(const GrowableArray* array) { - return array->on_resource_area(); - } - template - static bool elements_on_arena(const GrowableArray* array) { - return array->on_arena(); - } +// Used for sorting of any type E +template +int value_compare_ptr(E* e1, E* e2) { + return (*e1) - (*e2); +} - template - static void test_append(ArrayClass* a) { - // Add elements - for (int i = 0; i < 10; i++) { - a->append(i); - } +// Used for sorting of any type E +template +int value_compare_ref(const E& e1, const E& e2) { + return e1 - e2; +} - // Check size - ASSERT_EQ(a->length(), 10); +class Point { +private: + int _x; + int _y; +public: + // On purpose, we have no default constructor + // This is to test that it is not needed for + // GrowableArray. + Point() = delete; + Point(int x, int y) : _x(x), _y(y) {} + + bool operator==(const Point& other) const { + return _x == other._x && _y == other._y; + } - // Check elements - for (int i = 0; i < 10; i++) { - EXPECT_EQ(a->at(i), i); - } + bool operator!=(const Point& other) const { + return !(*this == other); } - template - static void test_clear(ArrayClass* a) { - // Add elements - for (int i = 0; i < 10; i++) { - a->append(i); + int operator-(const Point& other) const { + int i = _x - other._x; + if (i == 0) { + return _y - other._y; + } else { + return i; } + } +}; - // Check size - ASSERT_EQ(a->length(), 10); - ASSERT_EQ(a->is_empty(), false); - - // Clear elements - a->clear(); - - // Check size - ASSERT_EQ(a->length(), 0); - ASSERT_EQ(a->is_empty(), true); - - // Add element - a->append(11); - - // Check size - ASSERT_EQ(a->length(), 1); - ASSERT_EQ(a->is_empty(), false); +class PointWithDefault { +private: + int _x; + int _y; +public: + PointWithDefault(int x, int y) : _x(x), _y(y + 1) {} + PointWithDefault() : PointWithDefault(0, 0) {} - // Clear elements - a->clear(); + bool operator==(const PointWithDefault& other) const { + return _x == other._x && _y == other._y; + } - // Check size - ASSERT_EQ(a->length(), 0); - ASSERT_EQ(a->is_empty(), true); + bool operator!=(const PointWithDefault& other) const { + return !(*this == other); } - template - static void test_iterator(ArrayClass* a) { - // Add elements - for (int i = 0; i < 10; i++) { - a->append(i); + int operator-(const PointWithDefault& other) const { + int i = _x - other._x; + if (i == 0) { + return _y - other._y; + } else { + return i; } + } +}; - // Iterate - int counter = 0; - for (GrowableArrayIterator i = a->begin(); i != a->end(); ++i) { - ASSERT_EQ(*i, counter++); - } +class PointNoAssign { +private: + int _x; + int _y; +public: + // No default constructor + // No copy assign + PointNoAssign(int x, int y) : _x(x), _y(y) {} + PointNoAssign& operator=(PointNoAssign& other) = delete; + // Now we have to explicitly define the copy constructor, so that + // some compilers do not complain about: + // "error: definition of implicit copy constructor for 'PointNoAssign' is deprecated because it has a user-declared copy assignment operator" + PointNoAssign(PointNoAssign const&) = default; + + bool operator==(const PointNoAssign& other) const { + return _x == other._x && _y == other._y; + } - // Check count - ASSERT_EQ(counter, 10); + bool operator!=(const PointNoAssign& other) const { + return !(*this == other); } - template - static void test_capacity(ArrayClass* a) { - ASSERT_EQ(a->length(), 0); - a->reserve(50); - ASSERT_EQ(a->length(), 0); - ASSERT_EQ(a->capacity(), 50); - for (int i = 0; i < 50; ++i) { - a->append(i); - } - ASSERT_EQ(a->length(), 50); - ASSERT_EQ(a->capacity(), 50); - a->append(50); - ASSERT_EQ(a->length(), 51); - int capacity = a->capacity(); - ASSERT_GE(capacity, 51); - for (int i = 0; i < 30; ++i) { - a->pop(); + int operator-(const PointNoAssign& other) const { + int i = _x - other._x; + if (i == 0) { + return _y - other._y; + } else { + return i; } - ASSERT_EQ(a->length(), 21); - ASSERT_EQ(a->capacity(), capacity); - a->shrink_to_fit(); - ASSERT_EQ(a->length(), 21); - ASSERT_EQ(a->capacity(), 21); - - a->reserve(50); - ASSERT_EQ(a->length(), 21); - ASSERT_EQ(a->capacity(), 50); - - a->clear(); - ASSERT_EQ(a->length(), 0); - ASSERT_EQ(a->capacity(), 50); - - a->shrink_to_fit(); - ASSERT_EQ(a->length(), 0); - ASSERT_EQ(a->capacity(), 0); } +}; - template - static void test_copy1(ArrayClass* a) { - ASSERT_EQ(a->length(), 1); - ASSERT_EQ(a->at(0), 1); +template<> +Point value_factory(int i) { + return Point(i, i+1); +} - // Only allowed to copy to stack and embedded ResourceObjs +template<> +PointWithDefault value_factory(int i) { + return PointWithDefault(i, i+1); +} +template<> +PointNoAssign value_factory(int i) { + return PointNoAssign(i, i+1); +} - // Copy to stack - { - GrowableArray c(*a); +template<> +int* value_factory(int i) { + // cast int to int ptr, just for sake of test + // multiply i by 4, so that pointer subtract can count + // the distance in integers, and get different results + // for the comparison. + return (int*)(0x100000000L + (long)i*4); +} - ASSERT_EQ(c.length(), 1); - ASSERT_EQ(c.at(0), 1); - } +class CtorDtor { +private: + static int _constructed; + static int _destructed; + int _i; +public: + // Since this class has a non-trivial destructor, we can only use it with + // arena / resource area allocated arrays in ASSERT mode. In PRODUCT mode + // non-trivial destructors are forbidden for GrowableArray. +#ifdef ASSERT + static const bool is_enabled_for_arena = true; +#endif // ASSERT +#ifndef ASSERT + static const bool is_enabled_for_arena = false; +#endif // ASSERT + + CtorDtor() : _i(-1) { _constructed++; }; + explicit CtorDtor(int i) : _i(i) { _constructed++; } + CtorDtor(const CtorDtor& t) : _i(t._i) { _constructed++; } + CtorDtor& operator =(const CtorDtor& t) = default; + CtorDtor(CtorDtor&& t) : _i(t._i) { /* not counted, as t never destructed */ } + CtorDtor& operator =(CtorDtor&& t) = default; + ~CtorDtor() { _destructed++; } + + bool operator==(const CtorDtor& other) const { + return _i == other._i; + } - // Copy to embedded - { - WithEmbeddedArray c(*a); + bool operator!=(const CtorDtor& other) const { + return !(*this == other); + } - ASSERT_EQ(c._a.length(), 1); - ASSERT_EQ(c._a.at(0), 1); - } + int operator-(const CtorDtor& other) const { + return _i - other._i; } - template - static void test_assignment1(ArrayClass* a) { - ASSERT_EQ(a->length(), 1); - ASSERT_EQ(a->at(0), 1); + static int constructed() { return _constructed; } + static int destructed() { return _destructed; } + static void reset() { + _constructed = 0; + _destructed = 0; + } +}; +int CtorDtor::_constructed = 0; +int CtorDtor::_destructed = 0; - // Only allowed to assign to stack and embedded ResourceObjs +template +void reset_type() {} - // Copy to embedded/resource - { - ResourceMark rm; - GrowableArray c(1); - c = *a; +template<> +void reset_type() { + CtorDtor::reset(); +} - ASSERT_EQ(c.length(), 1); - ASSERT_EQ(c.at(0), 1); - } +template +void check_constructor_count_for_type(int i) { + // default no check because no count +} - // Copy to embedded/arena - { - Arena arena(mtTest); - GrowableArray c(&arena, 1, 0, 0); - c = *a; +template<> +void check_constructor_count_for_type(int i) { + ASSERT_EQ(CtorDtor::constructed(), 0); +} - ASSERT_EQ(c.length(), 1); - ASSERT_EQ(c.at(0), 1); - } +template +void check_alive_elements_for_type(int i) { + // default no check because no count +} - // Copy to embedded/resource - { - ResourceMark rm; - WithEmbeddedArray c(1); - c._a = *a; +template<> +void check_alive_elements_for_type(int i) { + ASSERT_EQ(CtorDtor::constructed(), CtorDtor::destructed() + i); +} - ASSERT_EQ(c._a.length(), 1); - ASSERT_EQ(c._a.at(0), 1); - } +// -------------- Basic Definitions ------------- - // Copy to embedded/arena - { - Arena arena(mtTest); - WithEmbeddedArray c(&arena, 1); - c._a = *a; +template class TestClosure; +template class ModifyClosure; - ASSERT_EQ(c._a.length(), 1); - ASSERT_EQ(c._a.at(0), 1); - } - } +enum AllocatorArgs { + CAP2, + CAP0, + CAP100, + CAP100LEN100, + CAP200LEN50, +}; - // Supported by all GrowableArrays - enum TestEnum { - Append, - Clear, - Capacity, - Iterator +template +class AllocatorClosure { +private: + GrowableArrayView* _view; +public: + void dispatch(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) { + test->reset(); + dispatch_impl(modify, test, args); + test->finish(this); }; - template - static void do_test(ArrayClass* a, TestEnum test) { - switch (test) { - case Append: - test_append(a); - break; - - case Clear: - test_clear(a); - break; + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) = 0; - case Capacity: - test_capacity(a); - break; + void dispatch_inner(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) { + modify->do_modify(this, args); + test->do_test(this); + } - case Iterator: - test_iterator(a); - break; + virtual bool is_C_heap() const = 0; + + // at least set the view so that we do not have to repeat + // forwarding in the subclasses of AllocatorClosure too + // much. + void set_view(GrowableArrayView* view) { _view = view; } + GrowableArrayView& view() const { return *_view; } + + // forwarding to underlying array view + int length() const { return _view->length(); }; + int capacity() const { return _view->capacity(); }; + bool is_empty() const { return _view->is_empty(); } + void clear() { _view->clear(); } + void trunc_to(int length) { _view->trunc_to(length); } + + E& at(int i) { return _view->at(i); } + E* adr_at(int i) const { return _view->adr_at(i); } + E first() const { return _view->first(); } + E top() const { return _view->top(); } + E last() const { return _view->last(); } + GrowableArrayIterator begin() const { return _view->begin(); } + GrowableArrayIterator end() const { return _view->end(); } + E pop() { return _view->pop(); } + + void at_put(int i, const E& elem) { _view->at_put(i, elem); } + void at_swap(int i, int j) { _view->at_swap(i, j); } + bool contains(const E& elem) const { return _view->contains(elem); } + int find(const E& elem) const { return _view->find(elem); } + int find_from_end(const E& elem) const { return _view->find_from_end(elem); } + + template + int find_if(Predicate predicate) const { return _view->find_if(predicate); } + + template + int find_from_end_if(Predicate predicate) const { return _view->find_from_end_if(predicate); } + + void remove(const E& elem) { _view->remove(elem); } + bool remove_if_existing(const E& elem) { return _view->remove_if_existing(elem); } + void remove_at(int i) { _view->remove_at(i); } + void remove_till(int i) { _view->remove_till(i); } + void remove_range(int start, int end) { _view->remove_range(start, end); } + void delete_at(int i) { _view->delete_at(i); } + + // forwarding to underlying array with allocation + virtual void append(const E& e) = 0; + virtual bool append_if_missing(const E& e) = 0; + virtual void push(const E& e) = 0; + virtual void reserve(int new_capacity) = 0; + virtual E at_grow(int i, const E& fill) = 0; + virtual void at_put_grow (int i, const E& e, const E& fill) = 0; + virtual void insert_before(int idx, const E& e) = 0; + virtual void insert_before(int idx, const GrowableArrayView* array) = 0; + virtual void appendAll(const GrowableArrayView* array) = 0; + virtual E insert_sorted(const E& e) = 0; + + // Only defined for CHeap: + virtual void clear_and_deallocate() { + ASSERT_TRUE(false); + } + virtual void shrink_to_fit() {; + ASSERT_TRUE(false); + } +}; - default: - fatal("Missing dispatch"); - break; +template +class TestClosure { +public: + virtual void reset() { + reset_type(); + } + virtual void do_test(AllocatorClosure* a) = 0; + virtual void finish(const AllocatorClosure* a) { + // After the array is destructed, all constructed elements + // should again be destructed. But this only holds for + // the CHeap version. The Arena / Resource Area allocated + // array can simply be abandoned and the destructions + // are not guaranteed for the elements. + if (a->is_C_heap()) { + check_alive_elements_for_type(0); } } +}; - // Only supported by GrowableArrays without CHeap data arrays - enum TestNoCHeapEnum { - Copy1, - Assignment1, - }; +template +class ModifyClosure { +public: + virtual void do_modify(AllocatorClosure* a, AllocatorArgs args) = 0; +}; - template - static void do_test(ArrayClass* a, TestNoCHeapEnum test) { - switch (test) { - case Copy1: - test_copy1(a); - break; +// ------------ AllocationClosures ------------ - case Assignment1: - test_assignment1(a); - break; +template +class AllocatorClosureGrowableArray : public AllocatorClosure { +private: + GrowableArray* _array; - default: - fatal("Missing dispatch"); - break; - } +public: + void set_array(GrowableArray* array) { + this->set_view(array); + _array = array; } - enum ModifyEnum { - Append1, - Append1Clear, - Append1ClearAndDeallocate, - NoModify - }; - - template - static void do_modify(ArrayClass* a, ModifyEnum modify) { - switch (modify) { - case Append1: - a->append(1); - break; - - case Append1Clear: - a->append(1); - a->clear(); - break; - - case Append1ClearAndDeallocate: - a->append(1); - a->clear_and_deallocate(); - break; + virtual bool is_C_heap() const override final { return false; }; - case NoModify: - // Nothing to do - break; + virtual void append(const E& e) override final { + _array->append(e); + } - default: - fatal("Missing dispatch"); - break; - } + virtual bool append_if_missing(const E& e) override final { + return _array->append_if_missing(e); } - static const int Max0 = 0; - static const int Max1 = 1; + virtual void push(const E& e) override final { + _array->push(e); + } - template - static void modify_and_test(ArrayClass* array, ModifyEnum modify, T test) { - do_modify(array, modify); - do_test(array, test); + virtual void reserve(int new_capacity) override final { + _array->reserve(new_capacity); } - template - static void with_no_cheap_array(int max, ModifyEnum modify, T test) { - // Resource/Resource allocated - { - ResourceMark rm; - GrowableArray* a = new GrowableArray(max); - modify_and_test(a, modify, test); - } + virtual E at_grow(int i, const E& fill) override final { + return _array->at_grow(i, fill); + } - // Resource/Arena allocated - // Combination not supported + // The implementation of at_put_grow uses assignment. We should + // only instantiate a call to it if assignment is allowed. + // I have to do this workaround because the method is virtual. + virtual void at_put_grow (int i, const E& e, const E& fill) override final { + at_put_grow_impl(i, e, fill); + } - // CHeap/Resource allocated - // Combination not supported + template::value)> + void at_put_grow_impl(int i, const E& e, const E& fill) { + _array->at_put_grow(i, e, fill); + } - // CHeap/Arena allocated - // Combination not supported + template::value)> + void at_put_grow_impl(int i, const E& e, const E& fill) { + // do not call if copy-assign not implemented for elements + ASSERT_TRUE(false); + } - // Stack/Resource allocated - { - ResourceMark rm; - GrowableArray a(max); - modify_and_test(&a, modify, test); - } + virtual void insert_before(int idx, const E& e) override final{ + _array->insert_before(idx, e); + } - // Stack/Arena allocated - { - Arena arena(mtTest); - GrowableArray a(&arena, max, 0, 0); - modify_and_test(&a, modify, test); - } + virtual void insert_before(int idx, const GrowableArrayView* array) override final { + _array->insert_before(idx, array); + } - // Embedded/Resource allocated - { - ResourceMark rm; - WithEmbeddedArray w(max); - modify_and_test(&w._a, modify, test); - } + virtual void appendAll(const GrowableArrayView* array) override final { + _array->appendAll(array); + } - // Embedded/Arena allocated - { - Arena arena(mtTest); - WithEmbeddedArray w(&arena, max); - modify_and_test(&w._a, modify, test); - } + virtual E insert_sorted(const E& e) override final { + // virtual function cannot have template, so we just fill in the template here + return _array->template insert_sorted>(e); } +}; - static void with_cheap_array(int max, ModifyEnum modify, TestEnum test) { - // Resource/CHeap allocated - // Combination not supported +template +class EmbeddedGrowableArray { +private: + GrowableArray _array; +public: + explicit EmbeddedGrowableArray(int cap) : _array(cap) {} + EmbeddedGrowableArray(int cap, int len, const E& filler) : _array(cap, len, filler) {} + EmbeddedGrowableArray(Arena* a, int cap) : _array(a, cap) {} + EmbeddedGrowableArray(Arena* a, int cap, int len, const E& filler) : _array(a, cap, len, filler) {} + GrowableArray* array() { return &_array; } +}; - // CHeap/CHeap allocated - { - GrowableArray* a = new (mtTest) GrowableArray(max, mtTest); - modify_and_test(a, modify, test); - delete a; - } +#define ARGS_CASES(CASE) { \ + switch (args) { \ + CASE(CAP2, 2) \ + CASE(CAP0, 0) \ + CASE(CAP100, 100) \ + CASE(CAP100LEN100, 100 COMMA 100 COMMA value_factory(-42)) \ + CASE(CAP200LEN50, 200 COMMA 50 COMMA value_factory(-42)) \ + default: \ + ASSERT_TRUE(false); \ + } \ +} - // Stack/CHeap allocated - { - GrowableArray a(max, mtTest); - modify_and_test(&a, modify, test); - } +#define CASE(args, init) { \ + case args: \ + { \ + ResourceMark rm; \ + GrowableArray array(init); \ + dispatch_impl_helper(modify, test, &array, args); \ + break; \ + } \ +} - // Embedded/CHeap allocated - { - WithEmbeddedArray w(max, mtTest); - modify_and_test(&w._a, modify, test); - } +template +class AllocatorClosureStackResourceArea : public AllocatorClosureGrowableArray { +public: + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) override final { + ARGS_CASES(CASE) + // implicit destructor } - static void with_all_types(int max, ModifyEnum modify, TestEnum test) { - with_no_cheap_array(max, modify, test); - with_cheap_array(max, modify, test); + void dispatch_impl_helper(ModifyClosure* modify, TestClosure* test, GrowableArray* array, AllocatorArgs args) { +#ifdef ASSERT + ASSERT_TRUE(array->allocated_on_stack_or_embedded()); // itself: stack + ASSERT_TRUE(array->on_resource_area()); // data: resource area +#endif + this->set_array(array); + this->dispatch_inner(modify, test, args); } +}; - static void with_all_types_empty(TestEnum test) { - with_all_types(Max0, NoModify, test); - } +#undef CASE +#define CASE(args, init) { \ + case args: \ + { \ + ResourceMark rm; \ + EmbeddedGrowableArray embedded(init); \ + GrowableArray* array = embedded.array(); \ + dispatch_impl_helper(modify, test, array, args); \ + break; \ + } \ +} - static void with_all_types_max_set(TestEnum test) { - with_all_types(Max1, NoModify, test); - } +template +class AllocatorClosureEmbeddedResourceArea : public AllocatorClosureGrowableArray { +public: + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) override final { + ARGS_CASES(CASE) + // implicit destructor + }; - static void with_all_types_cleared(TestEnum test) { - with_all_types(Max1, Append1Clear, test); + void dispatch_impl_helper(ModifyClosure* modify, TestClosure* test, GrowableArray* array, AllocatorArgs args) { +#ifdef ASSERT + ASSERT_TRUE(array->allocated_on_stack_or_embedded()); // itself: embedded + ASSERT_TRUE(array->on_resource_area()); // data: resource area +#endif + this->set_array(array); + this->dispatch_inner(modify, test, args); } +}; - static void with_all_types_clear_and_deallocated(TestEnum test) { - with_all_types(Max1, Append1ClearAndDeallocate, test); - } +#undef CASE +#define CASE(args, init) { \ + case args: \ + { \ + ResourceMark rm; \ + GrowableArray* array = new GrowableArray(init); \ + dispatch_impl_helper(modify, test, array, args); \ + break; \ + } \ +} - static void with_all_types_all_0(TestEnum test) { - with_all_types_empty(test); - with_all_types_max_set(test); - with_all_types_cleared(test); - with_all_types_clear_and_deallocated(test); - } +template +class AllocatorClosureResourceAreaResourceArea : public AllocatorClosureGrowableArray { +public: + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) override final { + ARGS_CASES(CASE) + // no destructors called, array just abandoned + }; - static void with_no_cheap_array_append1(TestNoCHeapEnum test) { - with_no_cheap_array(Max0, Append1, test); + void dispatch_impl_helper(ModifyClosure* modify, TestClosure* test, GrowableArray* array, AllocatorArgs args) { +#ifdef ASSERT + ASSERT_TRUE(array->allocated_on_res_area()); // itself: resource arena + ASSERT_TRUE(array->on_resource_area()); // data: resource area +#endif + this->set_array(array); + this->dispatch_inner(modify, test, args); } }; -TEST_VM_F(GrowableArrayTest, append) { - with_all_types_all_0(Append); +#undef CASE +#define CASE(args, init) { \ + case args: \ + { \ + Arena arena(mtTest); \ + GrowableArray array(&arena, init); \ + dispatch_impl_helper(modify, test, &array, args); \ + break; \ + } \ } -TEST_VM_F(GrowableArrayTest, clear) { - with_all_types_all_0(Clear); -} +template +class AllocatorClosureStackArena : public AllocatorClosureGrowableArray { +public: + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) override final { + ARGS_CASES(CASE) + // implicit destructor + }; -TEST_VM_F(GrowableArrayTest, capacity) { - with_all_types_all_0(Capacity); -} + void dispatch_impl_helper(ModifyClosure* modify, TestClosure* test, GrowableArray* array, AllocatorArgs args) { +#ifdef ASSERT + ASSERT_TRUE(array->allocated_on_stack_or_embedded()); // itself: stack + ASSERT_TRUE(!array->on_resource_area()); // data: arena +#endif + this->set_array(array); + this->dispatch_inner(modify, test, args); + } +}; -TEST_VM_F(GrowableArrayTest, iterator) { - with_all_types_all_0(Iterator); +#undef CASE +#define CASE(args, init) { \ + case args: \ + { \ + Arena arena(mtTest); \ + EmbeddedGrowableArray embedded(&arena, init); \ + GrowableArray* array = embedded.array(); \ + dispatch_impl_helper(modify, test, array, args); \ + break; \ + } \ } -TEST_VM_F(GrowableArrayTest, copy) { - with_no_cheap_array_append1(Copy1); -} -TEST_VM_F(GrowableArrayTest, assignment) { - with_no_cheap_array_append1(Assignment1); +template +class AllocatorClosureEmbeddedArena : public AllocatorClosureGrowableArray { +public: + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) override final { + ARGS_CASES(CASE) + // implicit destructor + }; + + void dispatch_impl_helper(ModifyClosure* modify, TestClosure* test, GrowableArray* array, AllocatorArgs args) { +#ifdef ASSERT + ASSERT_TRUE(array->allocated_on_stack_or_embedded()); // itself: embedded + ASSERT_TRUE(!array->on_resource_area()); // data: arena +#endif + this->set_array(array); + this->dispatch_inner(modify, test, args); + } +}; + +#undef CASE +#define CASE(args, init) { \ + case args: \ + { \ + Arena arena(mtTest); \ + GrowableArray* array = new (&arena) GrowableArray(&arena, init); \ + dispatch_impl_helper(modify, test, array, args); \ + break; \ + } \ } + +template +class AllocatorClosureArenaArena : public AllocatorClosureGrowableArray { +public: + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) override final { + ARGS_CASES(CASE) + // no destructors called, array just abandoned + }; + + void dispatch_impl_helper(ModifyClosure* modify, TestClosure* test, GrowableArray* array, AllocatorArgs args) { #ifdef ASSERT -TEST_VM_F(GrowableArrayTest, where) { - WithEmbeddedArray s(1, mtTest); - ASSERT_FALSE(s._a.allocated_on_C_heap()); - ASSERT_TRUE(elements_on_C_heap(&s._a)); + ASSERT_TRUE(array->allocated_on_arena()); // itself: arena + ASSERT_TRUE(!array->on_resource_area()); // data: arena +#endif + this->set_array(array); + this->dispatch_inner(modify, test, args); + } +}; + +template +class AllocatorClosureGrowableArrayCHeap : public AllocatorClosure { +private: + GrowableArrayCHeap* _array; - // Resource/Resource allocated - { - ResourceMark rm; - GrowableArray* a = new GrowableArray(); - ASSERT_TRUE(a->allocated_on_res_area()); - ASSERT_TRUE(elements_on_resource_area(a)); +public: + void set_array(GrowableArrayCHeap* array) { + this->set_view(array); + _array = array; } - // Resource/CHeap allocated - // Combination not supported + virtual bool is_C_heap() const override final { return true; }; - // Resource/Arena allocated - // Combination not supported + virtual void append(const E& e) override final { + _array->append(e); + } - // CHeap/Resource allocated - // Combination not supported + virtual bool append_if_missing(const E& e) override final { + return _array->append_if_missing(e); + } - // CHeap/CHeap allocated - { - GrowableArray* a = new (mtTest) GrowableArray(0, mtTest); - ASSERT_TRUE(a->allocated_on_C_heap()); - ASSERT_TRUE(elements_on_C_heap(a)); - delete a; + virtual void push(const E& e) override final { + _array->push(e); } - // CHeap/Arena allocated - // Combination not supported + virtual void reserve(int new_capacity) override final { + _array->reserve(new_capacity); + } - // Stack/Resource allocated - { - ResourceMark rm; - GrowableArray a(0); - ASSERT_TRUE(a.allocated_on_stack_or_embedded()); - ASSERT_TRUE(elements_on_resource_area(&a)); + virtual void shrink_to_fit() override final { + _array->shrink_to_fit(); } - // Stack/CHeap allocated - { - GrowableArray a(0, mtTest); - ASSERT_TRUE(a.allocated_on_stack_or_embedded()); - ASSERT_TRUE(elements_on_C_heap(&a)); + virtual void clear_and_deallocate() override final { + _array->clear_and_deallocate(); } - // Stack/Arena allocated - { - Arena arena(mtTest); - GrowableArray a(&arena, 0, 0, 0); - ASSERT_TRUE(a.allocated_on_stack_or_embedded()); - ASSERT_TRUE(elements_on_arena(&a)); + virtual E at_grow(int i, const E& fill) override final { + return _array->at_grow(i, fill); } - // Embedded/Resource allocated - { - ResourceMark rm; - WithEmbeddedArray w(0); - ASSERT_TRUE(w._a.allocated_on_stack_or_embedded()); - ASSERT_TRUE(elements_on_resource_area(&w._a)); + // The implementation of at_put_grow uses assignment. We should + // only instantiate a call to it if assignment is allowed. + // I have to do this workaround because the method is virtual. + virtual void at_put_grow (int i, const E& e, const E& fill) override final { + at_put_grow_impl(i, e, fill); } - // Embedded/CHeap allocated - { - WithEmbeddedArray w(0, mtTest); - ASSERT_TRUE(w._a.allocated_on_stack_or_embedded()); - ASSERT_TRUE(elements_on_C_heap(&w._a)); + template::value)> + void at_put_grow_impl(int i, const E& e, const E& fill) { + _array->at_put_grow(i, e, fill); } - // Embedded/Arena allocated - { - Arena arena(mtTest); - WithEmbeddedArray w(&arena, 0); - ASSERT_TRUE(w._a.allocated_on_stack_or_embedded()); - ASSERT_TRUE(elements_on_arena(&w._a)); + template::value)> + void at_put_grow_impl(int i, const E& e, const E& fill) { + // do not call if copy-assign not implemented for elements + ASSERT_TRUE(false); } -} -TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, copy_with_embedded_cheap, - "assert.!on_C_heap... failed: Copying of CHeap arrays not supported") { - WithEmbeddedArray s(1, mtTest); - // Intentionally asserts that copy of CHeap arrays are not allowed - WithEmbeddedArray c(s); -} + virtual void insert_before(int idx, const E& e) override final{ + _array->insert_before(idx, e); + } + + virtual void insert_before(int idx, const GrowableArrayView* array) override final { + _array->insert_before(idx, array); + } -TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, assignment_with_embedded_cheap, - "assert.!on_C_heap... failed: Assignment of CHeap arrays not supported") { - WithEmbeddedArray s(1, mtTest); - WithEmbeddedArray c(1, mtTest); + virtual void appendAll(const GrowableArrayView* array) override final { + _array->appendAll(array); + } + + virtual E insert_sorted(const E& e) override final { + // virtual function cannot have template, so we just fill in the template here + return _array->template insert_sorted>(e); + } +}; - // Intentionally asserts that assignment of CHeap arrays are not allowed - c = s; +template +class EmbeddedGrowableArrayCHeap { +private: + GrowableArrayCHeap _array; +public: + explicit EmbeddedGrowableArrayCHeap(int cap) : _array(cap) {} + EmbeddedGrowableArrayCHeap(int cap, int len, const E& filler) : _array(cap, len, filler) {} + GrowableArrayCHeap* array() { return &_array; } +}; + +#undef CASE +#define CASE(args, init) { \ + case args: \ + { \ + GrowableArrayCHeap array(init); \ + dispatch_impl_helper(modify, test, &array, args); \ + break; \ + } \ } -#endif +template +class AllocatorClosureStackCHeap : public AllocatorClosureGrowableArrayCHeap { +public: + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) override final { + ARGS_CASES(CASE) + // destructor called implicitly, and it first destructs all elements. + }; -TEST(GrowableArrayCHeap, sanity) { - // Stack/CHeap - { - GrowableArrayCHeap a(0); + void dispatch_impl_helper(ModifyClosure* modify, TestClosure* test, GrowableArrayCHeap* array, AllocatorArgs args) { #ifdef ASSERT - ASSERT_TRUE(a.allocated_on_stack_or_embedded()); + ASSERT_TRUE(array->allocated_on_stack_or_embedded()); // itself: stack #endif - ASSERT_TRUE(a.is_empty()); - - a.append(1); - ASSERT_FALSE(a.is_empty()); - ASSERT_EQ(a.at(0), 1); + this->set_array(array); + this->dispatch_inner(modify, test, args); } +}; - // CHeap/CHeap - { - GrowableArrayCHeap* a = new GrowableArrayCHeap(0); -#ifdef ASSERT - ASSERT_TRUE(a->allocated_on_C_heap()); -#endif - ASSERT_TRUE(a->is_empty()); +#undef CASE +#define CASE(args, init) { \ + case args: \ + { \ + EmbeddedGrowableArrayCHeap embedded(init); \ + GrowableArrayCHeap* array = embedded.array(); \ + dispatch_impl_helper(modify, test, array, args); \ + break; \ + } \ +} - a->append(1); - ASSERT_FALSE(a->is_empty()); - ASSERT_EQ(a->at(0), 1); - delete a; - } +template +class AllocatorClosureEmbeddedCHeap : public AllocatorClosureGrowableArrayCHeap { +public: + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) override final { + ARGS_CASES(CASE) + // destructor called implicitly, and it first destructs all elements. + }; - // CHeap/CHeap - nothrow new operator - { - GrowableArrayCHeap* a = new (std::nothrow) GrowableArrayCHeap(0); + void dispatch_impl_helper(ModifyClosure* modify, TestClosure* test, GrowableArrayCHeap* array, AllocatorArgs args) { #ifdef ASSERT - ASSERT_TRUE(a->allocated_on_C_heap()); + ASSERT_TRUE(array->allocated_on_stack_or_embedded()); // itself: embedded #endif - ASSERT_TRUE(a->is_empty()); - - a->append(1); - ASSERT_FALSE(a->is_empty()); - ASSERT_EQ(a->at(0), 1); - delete a; + this->set_array(array); + this->dispatch_inner(modify, test, args); } +}; + +#undef CASE +#define CASE(args, init) { \ + case args: \ + { \ + GrowableArrayCHeap* array = new GrowableArrayCHeap(init); \ + dispatch_impl_helper(modify, test, array, args); \ + delete array; \ + break; \ + } \ } -TEST(GrowableArrayCHeap, find_if) { - struct Element { - int value; +template +class AllocatorClosureCHeapCHeap : public AllocatorClosureGrowableArrayCHeap { +public: + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) override final { + ARGS_CASES(CASE) + // destruction explicit, recursively destructs all elements }; - GrowableArrayCHeap array; - array.push({1}); - array.push({2}); - array.push({3}); - - { - int index = array.find_if([&](const Element& elem) { - return elem.value == 1; - }); - ASSERT_EQ(index, 0); - } - { - int index = array.find_if([&](const Element& elem) { - return elem.value > 1; - }); - ASSERT_EQ(index, 1); + void dispatch_impl_helper(ModifyClosure* modify, TestClosure* test, GrowableArrayCHeap* array, AllocatorArgs args) { +#ifdef ASSERT + ASSERT_TRUE(array->allocated_on_C_heap()); // itself: cheap +#endif + this->set_array(array); + this->dispatch_inner(modify, test, args); } +}; - { - int index = array.find_if([&](const Element& elem) { - return elem.value == 4; - }); - ASSERT_EQ(index, -1); - } +#undef CASE +#define CASE(args, init) { \ + case args: \ + { \ + GrowableArrayCHeap* array = new (std::nothrow) GrowableArrayCHeap(init); \ + dispatch_impl_helper(modify, test, array, args); \ + delete array; \ + break; \ + } \ } -TEST(GrowableArrayCHeap, find_from_end_if) { - struct Element { - int value; +template +class AllocatorClosureCHeapCHeapNoThrow : public AllocatorClosureGrowableArrayCHeap { +public: + virtual void dispatch_impl(ModifyClosure* modify, TestClosure* test, AllocatorArgs args) override final { + ARGS_CASES(CASE) + // destruction explicit, recursively destructs all elements }; - GrowableArrayCHeap array; - array.push({1}); - array.push({2}); - array.push({3}); - { - int index = array.find_from_end_if([&](const Element& elem) { - return elem.value == 1; - }); - ASSERT_EQ(index, 0); + void dispatch_impl_helper(ModifyClosure* modify, TestClosure* test, GrowableArrayCHeap* array, AllocatorArgs args) { +#ifdef ASSERT + ASSERT_TRUE(array->allocated_on_C_heap()); // itself: cheap +#endif + this->set_array(array); + this->dispatch_inner(modify, test, args); } +}; - { - int index = array.find_from_end_if([&](const Element& elem) { - return elem.value > 1; - }); - ASSERT_EQ(index, 2); +// ------------ ModifyClosures ------------ + +template +class ModifyClosureEmpty : public ModifyClosure { +public: + virtual void do_modify(AllocatorClosure* a, AllocatorArgs args) override final { + // array is freshly initialized. Verify initialization: + switch(args) { + case CAP2: + { + ASSERT_TRUE(a->is_empty()); + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(a->capacity(), 2); + check_constructor_count_for_type(0); + break; + } + case CAP0: + { + ASSERT_TRUE(a->is_empty()); + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(a->capacity(), 0); + check_constructor_count_for_type(0); + break; + } + case CAP100: + { + ASSERT_TRUE(a->is_empty()); + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(a->capacity(), 100); + check_constructor_count_for_type(0); + break; + } + case CAP100LEN100: + { + ASSERT_TRUE(!a->is_empty()); + ASSERT_EQ(a->length(), 100); + ASSERT_EQ(a->capacity(), 100); + check_alive_elements_for_type(100); + // Check elements + for (int i = 0; i < 100; i++) { + EXPECT_EQ(a->at(i), value_factory(-42)); + } + break; + } + case CAP200LEN50: + { + ASSERT_TRUE(!a->is_empty()); + ASSERT_EQ(a->length(), 50); + ASSERT_EQ(a->capacity(), 200); + check_alive_elements_for_type(50); + // Check elements + for (int i = 0; i < 50; i++) { + EXPECT_EQ(a->at(i), value_factory(-42)); + } + break; + } + default: + { + ASSERT_TRUE(false); + break; + } + } } +}; - { - int index = array.find_from_end_if([&](const Element& elem) { - return elem.value == 4; - }); - ASSERT_EQ(index, -1); +template +class ModifyClosureAppend : public ModifyClosure { +public: + virtual void do_modify(AllocatorClosure* a, AllocatorArgs args) override final { + a->clear(); + ASSERT_EQ(a->length(), 0); + check_alive_elements_for_type(0); + + // Test append + for (int i = 0; i < 1000; i++) { + a->append(value_factory(i * 100)); + } + ASSERT_FALSE(a->is_empty()); + + ASSERT_EQ(a->length(), 1000); + check_alive_elements_for_type(1000); + + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->at(i), value_factory(i * 100)); + } + + a->clear(); + ASSERT_EQ(a->length(), 0); + check_alive_elements_for_type(0); + + // Test push + for (int i = 0; i < 1000; i++) { + a->push(value_factory(i * 100)); + } + + ASSERT_EQ(a->length(), 1000); + check_alive_elements_for_type(1000); + + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->at(i), value_factory(i * 100)); + } + + a->clear(); + ASSERT_EQ(a->length(), 0); + check_alive_elements_for_type(0); + + // Test append_if_missing + for (int i = 0; i < 1000; i++) { + ASSERT_TRUE(a->append_if_missing(value_factory(i * 3))); + } + check_alive_elements_for_type(1000); + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->append_if_missing(value_factory(i)), i % 3 != 0); + } + check_alive_elements_for_type(1666); + + int j = 0; + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->at(i), value_factory(i * 3)); + if (i % 3 != 0) { + ASSERT_EQ(a->at(1000 + j), value_factory(i)); + j++; + } + } + + a->clear(); + ASSERT_EQ(a->length(), 0); + check_alive_elements_for_type(0); + + // Test insert_before, single element + a->insert_before(0, value_factory(3)); + a->insert_before(1, value_factory(5)); + a->insert_before(0, value_factory(0)); + a->insert_before(1, value_factory(1)); + a->insert_before(2, value_factory(2)); + a->insert_before(4, value_factory(4)); + + for (int i = 0; i < 6; i++) { + ASSERT_EQ(a->at(i), value_factory(i)); + } + ASSERT_EQ(a->length(), 6); + check_alive_elements_for_type(6); + + a->clear(); + ASSERT_EQ(a->length(), 0); + check_alive_elements_for_type(0); + + // Test insert_before, with array + GrowableArrayCHeap array; + + for (int i = 0; i < 100; i++) { + array.append(value_factory(i)); + } + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(array.length(), 100); + check_alive_elements_for_type(100); + + a->insert_before(0, &array); + + ASSERT_EQ(a->length(), 100); + ASSERT_EQ(array.length(), 100); + check_alive_elements_for_type(200); + + array.clear(); + check_alive_elements_for_type(100); + + array.append(value_factory(42)); + array.append(value_factory(42)); + + a->insert_before(100, &array); + a->insert_before(0, &array); + + ASSERT_EQ(a->at(0), value_factory(42)); + ASSERT_EQ(a->at(1), value_factory(42)); + for (int i = 0; i < 100; i++) { + ASSERT_EQ(a->at(i + 2), value_factory(i)); + } + ASSERT_EQ(a->at(102), value_factory(42)); + ASSERT_EQ(a->at(103), value_factory(42)); + + a->clear(); + + for (int i = 0; i < 10; i++) { + array.clear(); + for (int j = 0; j < 10; j++) { + array.append(value_factory(i*10 + j)); + } + a->appendAll(&array); + } + array.clear(); + + for (int i = 0; i < 100; i++) { + ASSERT_EQ(a->at(i), value_factory(i)); + } + ASSERT_EQ(a->length(), 100); + ASSERT_EQ(array.length(), 0); + check_alive_elements_for_type(100); + } +}; + +template +class ModifyClosureClear : public ModifyClosure { +public: + virtual void do_modify(AllocatorClosure* a, AllocatorArgs args) override final { + a->clear(); + ASSERT_EQ(a->length(), 0); + + // Add elements + for (int i = 0; i < 1000; i++) { + a->append(value_factory(i * 100)); + } + + ASSERT_EQ(a->length(), 1000); + check_alive_elements_for_type(1000); + + int old_capacity = a->capacity(); + + // Clear + a->clear(); + check_alive_elements_for_type(0); + + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(a->capacity(), old_capacity); + } +}; + +template +class ModifyClosureClearAndDeallocate : public ModifyClosure { +public: + virtual void do_modify(AllocatorClosure* a, AllocatorArgs args) override final { + a->clear(); + ASSERT_EQ(a->length(), 0); + + // Add elements + for (int i = 0; i < 1000; i++) { + a->append(value_factory(i * 100)); + } + + ASSERT_EQ(a->length(), 1000); + check_alive_elements_for_type(1000); + + // Clear + if (a->is_C_heap()) { + a->clear_and_deallocate(); + ASSERT_EQ(a->capacity(), 0); + } else { + a->clear(); + } + ASSERT_EQ(a->length(), 0); + check_alive_elements_for_type(0); + } +}; + +template +class ModifyClosureAccess : public ModifyClosure { +public: + virtual void do_modify(AllocatorClosure* a, AllocatorArgs args) override final { + a->clear(); + ASSERT_EQ(a->length(), 0); + + // write + for (int i = 0; i < 1000; i++) { + a->append(value_factory(3*i)); + } + ASSERT_EQ(a->length(), 1000); + check_alive_elements_for_type(1000); + + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->at(i), value_factory(3*i)); + ASSERT_EQ(*a->adr_at(i), value_factory(3*i)); + } + + for (int i = 0; i < 1000; i++) { + if (i % 3 == 0) { + ASSERT_TRUE(a->contains(value_factory(i))); + ASSERT_EQ(a->find(value_factory(i)), i/3); + ASSERT_EQ(a->find_from_end(value_factory(i)), i/3); + } else { + ASSERT_FALSE(a->contains(value_factory(i))); + ASSERT_EQ(a->find(value_factory(i)), -1); + ASSERT_EQ(a->find_from_end(value_factory(i)), -1); + } + } + + a->append(value_factory(7)); + a->append(value_factory(31)); + a->append(value_factory(7)); + + ASSERT_EQ(a->find(value_factory(7)), 1000); + ASSERT_EQ(a->find_from_end(value_factory(7)), 1002); + + a->clear(); + ASSERT_EQ(a->length(), 0); + check_alive_elements_for_type(0); + + // write + for (int i = 0; i < 1000; i++) { + a->append(value_factory(i)); + } + + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->at(i), value_factory(i)); + } + check_alive_elements_for_type(1000); + + // remove all even numbers: + for (int i = 0; i < 500; i++) { + a->remove(value_factory(2*i)); + check_alive_elements_for_type(1000 - i - 1); + ASSERT_EQ(a->length(), 1000 - i - 1); + } + + // remove rest: + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->remove_if_existing(value_factory(i)), i % 2 == 1); + ASSERT_EQ(a->length(), 500 - (i+1)/2); + } + ASSERT_TRUE(a->is_empty()); + check_alive_elements_for_type(0); + + // write + for (int i = 0; i < 1000; i++) { + a->append(value_factory(i)); + } + check_alive_elements_for_type(1000); + + // Test remove_range + for (int i = 0; i < 10; i++) { + ASSERT_EQ(a->length(), 1000 - i*50); + check_alive_elements_for_type(1000 - i*50); + int start = i * 50 + 50; + a->remove_range(start, start + 50); + } + check_alive_elements_for_type(500); + ASSERT_EQ(a->length(), 500); + + for (int i = 0; i < 10; i++) { + for (int j = 0; j < 50; j++) { + ASSERT_EQ(a->at(i * 50 + j), value_factory(i * 100 + j)); + } + } + + // Test remove_till + for (int i = 0; i < 10; i++) { + check_alive_elements_for_type(500 - i*50); + ASSERT_EQ(a->length(), 500 - i*50); + a->remove_till(50); + if (i < 9) { + ASSERT_EQ(a->at(0), value_factory(i*100+100)); + } + } + check_alive_elements_for_type(0); + + // write + for (int i = 0; i < 1000; i++) { + a->append(value_factory(i)); + } + ASSERT_EQ(a->length(), 1000); + check_alive_elements_for_type(1000); + + for (int i = 0; i < 100; i++) { + a->delete_at(100 + i); + } + ASSERT_EQ(a->length(), 900); + check_alive_elements_for_type(900); + + for (int i = 0; i < 100; i++) { + ASSERT_EQ(a->at(i), value_factory(i)); + } + for (int i = 0; i < 100; i++) { + ASSERT_EQ(a->at(100 + i), value_factory(999 - i)); + } + for (int i = 200; i < 900; i++) { + ASSERT_EQ(a->at(i), value_factory(i)); + } + } +}; + +// ------------ TestClosures ------------ + +template +class TestClosureAppend : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + // Add elements + for (int i = 0; i < 10; i++) { + a->append(value_factory(i)); + EXPECT_EQ(a->top(), value_factory(i)); + EXPECT_EQ(a->last(), value_factory(i)); + EXPECT_EQ(a->first(), value_factory(0)); + EXPECT_EQ(a->at(i), value_factory(i)); + EXPECT_EQ(*a->adr_at(i), value_factory(i)); + } + + // Check size + ASSERT_EQ(a->length(), 10); + check_alive_elements_for_type(10); + + // Check elements + for (int i = 0; i < 10; i++) { + EXPECT_EQ(a->at(i), value_factory(i)); + } + }; +}; + +template +class TestClosureAssign : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + a->at_grow(999, value_factory(-1)); + ASSERT_EQ(a->length(), 1000); + check_alive_elements_for_type(1000); + + // write over at + for (int i = 0; i < 1000; i++) { + a->at(i) = value_factory(i); + } + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->at(i), value_factory(i)); + ASSERT_EQ(*a->adr_at(i), value_factory(i)); + } + check_alive_elements_for_type(1000); + + // write over adr_at + for (int i = 0; i < 1000; i++) { + *a->adr_at(i) = value_factory(2*i); + } + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->at(i), value_factory(2*i)); + } + check_alive_elements_for_type(1000); + + // write + for (int i = 0; i < 1000; i++) { + a->at_put(i, value_factory(3*i)); + } + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->at(i), value_factory(3*i)); + ASSERT_EQ(*a->adr_at(i), value_factory(3*i)); + } + check_alive_elements_for_type(1000); + + // all zero + for (int i = 0; i < 1000; i++) { + a->at_put(i, value_factory(0)); + } + a->at_put(42, value_factory(1)); + a->at_put(666, value_factory(2)); + + for (int i = 0; i < 1000; i++) { + if (i == 42) { + ASSERT_EQ(a->at(i), value_factory(1)); + } else if (i == 666) { + ASSERT_EQ(a->at(i), value_factory(2)); + } else { + ASSERT_EQ(a->at(i), value_factory(0)); + } + } + check_alive_elements_for_type(1000); + + a->at_swap(42, 666); + + for (int i = 0; i < 1000; i++) { + if (i == 42) { + ASSERT_EQ(a->at(i), value_factory(2)); + } else if (i == 666) { + ASSERT_EQ(a->at(i), value_factory(1)); + } else { + ASSERT_EQ(a->at(i), value_factory(0)); + } + } + check_alive_elements_for_type(1000); + }; +}; + +template +class TestClosureClear : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + check_alive_elements_for_type(0); + + // Check size + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(a->is_empty(), true); + + // Add elements + for (int i = 0; i < 10; i++) { + a->append(value_factory(i)); + } + + // Check size + ASSERT_EQ(a->length(), 10); + ASSERT_EQ(a->is_empty(), false); + check_alive_elements_for_type(10); + + // Clear elements + a->clear(); + check_alive_elements_for_type(0); + + // Check size + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(a->is_empty(), true); + + // Add element + a->append(value_factory(11)); + + // Check size + ASSERT_EQ(a->length(), 1); + ASSERT_EQ(a->is_empty(), false); + check_alive_elements_for_type(1); + + // Clear elements + a->clear(); + check_alive_elements_for_type(0); + + // Check size + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(a->is_empty(), true); + + // Add elements + for (int i = 0; i < 100; i++) { + a->append(value_factory(i)); + } + ASSERT_EQ(a->length(), 100); + check_alive_elements_for_type(100); + + int old_capacity = a->capacity(); + a->trunc_to(50); + ASSERT_EQ(a->length(), 50); + ASSERT_EQ(a->capacity(), old_capacity); + + // Add elements + for (int i = 0; i < 50; i++) { + ASSERT_EQ(a->at(i), value_factory(i)); + } + }; +}; + +template +class TestClosureIterator : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + check_alive_elements_for_type(0); + + // Add elements + for (int i = 0; i < 10; i++) { + a->append(value_factory(i)); + } + check_alive_elements_for_type(10); + + // Iterate + int counter = 0; + for (GrowableArrayIterator i = a->begin(); i != a->end(); ++i) { + ASSERT_EQ(*i, value_factory(counter++)); + } + + // Check count + ASSERT_EQ(counter, 10); + check_alive_elements_for_type(10); + }; +}; + +template +class TestClosureCapacity : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + check_alive_elements_for_type(0); + + int old_capacity = a->capacity(); + ASSERT_EQ(a->length(), 0); + a->reserve(50); + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(a->capacity(), MAX2(50, old_capacity)); + check_alive_elements_for_type(0); + + for (int i = 0; i < 50; ++i) { + a->append(value_factory(i)); + } + ASSERT_EQ(a->length(), 50); + ASSERT_EQ(a->capacity(), MAX2(50, old_capacity)); + check_alive_elements_for_type(50); + + a->append(value_factory(50)); + ASSERT_EQ(a->length(), 51); + check_alive_elements_for_type(51); + + int capacity = a->capacity(); + ASSERT_GE(capacity, 51); + for (int i = 0; i < 30; ++i) { + a->pop(); + } + ASSERT_EQ(a->length(), 21); + ASSERT_EQ(a->capacity(), capacity); + check_alive_elements_for_type(21); + + if (a->is_C_heap()) { + // shrink_to_fit only implemented on CHeap + a->shrink_to_fit(); + ASSERT_EQ(a->length(), 21); + ASSERT_EQ(a->capacity(), 21); + check_alive_elements_for_type(21); + + a->reserve(50); + ASSERT_EQ(a->length(), 21); + ASSERT_EQ(a->capacity(), 50); + check_alive_elements_for_type(21); + + a->clear(); + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(a->capacity(), 50); + check_alive_elements_for_type(0); + + a->shrink_to_fit(); + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(a->capacity(), 0); + check_alive_elements_for_type(0); + } + }; +}; + +template +class TestClosureCompare : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + GrowableArrayCHeap array; + check_alive_elements_for_type(0); + + ASSERT_TRUE(a->view() == array); + ASSERT_FALSE(a->view() != array); + + for (int i = 0; i < 100; i++) { + array.append(value_factory(i)); + } + + ASSERT_EQ(a->length(), 0); + ASSERT_EQ(array.length(), 100); + check_alive_elements_for_type(100); + + ASSERT_FALSE(a->view() == array); + ASSERT_TRUE(a->view() != array); + + for (int i = 0; i < 100; i++) { + a->append(value_factory(i)); + } + + ASSERT_EQ(a->length(), 100); + ASSERT_EQ(array.length(), 100); + check_alive_elements_for_type(200); + + ASSERT_TRUE(a->view() == array); + ASSERT_FALSE(a->view() != array); + }; +}; + +template +class TestClosureFindIf : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + ASSERT_EQ(a->length(), 0); + check_alive_elements_for_type(0); + + // Add elements + for (int i = 0; i < 10; i++) { + a->append(value_factory(i)); + } + a->append(value_factory(20)); + a->append(value_factory(20)); + a->append(value_factory(42)); + check_alive_elements_for_type(13); + + for (int i = 0; i < 10; i++) { + int index = a->find_if([&](const E& elem) { + return elem == value_factory(i); + }); + ASSERT_EQ(index, i); + } + + { + int index = a->find_if([&](const E& elem) { + return elem == value_factory(20); + }); + ASSERT_EQ(index, 10); + } + + { + int index = a->find_if([&](const E& elem) { + return elem == value_factory(100); + }); + ASSERT_EQ(index, -1); + } + + { + int index = a->find_if([&](const E& elem) { + return elem == value_factory(-100); + }); + ASSERT_EQ(index, -1); + } + }; +}; + +template +class TestClosureFindFromEndIf : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + // Add elements + for (int i = 0; i < 10; i++) { + a->append(value_factory(i)); + } + a->append(value_factory(20)); + a->append(value_factory(20)); + a->append(value_factory(42)); + check_alive_elements_for_type(13); + + for (int i = 0; i < 10; i++) { + int index = a->find_from_end_if([&](const E& elem) { + return elem == value_factory(i); + }); + ASSERT_EQ(index, i); + } + + { + int index = a->find_from_end_if([&](const E& elem) { + return elem == value_factory(20); + }); + ASSERT_EQ(index, 11); + } + + { + int index = a->find_from_end_if([&](const E& elem) { + return elem == value_factory(100); + }); + ASSERT_EQ(index, -1); + } + + { + int index = a->find_from_end_if([&](const E& elem) { + return elem == value_factory(-100); + }); + ASSERT_EQ(index, -1); + } + }; +}; + +template +class TestClosureAtGrow : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + a->reserve(100); + + for (int j = 1; j < 100; j++) { + int new_len = j * 7; + a->at_grow(new_len - 1, value_factory(j)); + ASSERT_EQ(a->length(), new_len); + check_alive_elements_for_type(new_len); + + // Check elements + for (int k = 0; k < new_len; k++) { + EXPECT_EQ(a->at_grow(k, value_factory(-1)), value_factory(k / 7 + 1)); + } + ASSERT_EQ(a->length(), new_len); + } + + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + int old_capacity = a->capacity(); + a->at_grow(old_capacity - 1, value_factory(0)); + ASSERT_EQ(a->length(), old_capacity); + ASSERT_EQ(a->capacity(), old_capacity); + check_alive_elements_for_type(old_capacity); + + for (int j = 1; j < 100; j++) { + int target = j * 31; + a->at_put_grow(target, value_factory(target), value_factory(-2)); + int new_length = MAX2(target + 1, old_capacity); + ASSERT_EQ(a->length(), new_length); + + // Check elements + for (int k = 0; k < new_length; k++) { + if (k != 0 && (k % 31) == 0 && k <= target) { + EXPECT_EQ(a->at(k), value_factory(k)); + } else if (k < old_capacity) { + EXPECT_EQ(a->at(k), value_factory(0)); + } else { + EXPECT_EQ(a->at(k), value_factory(-2)); + } + } + } + }; +}; + +template +class TestClosureAtGrowDefault : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + a->reserve(100); + + for (int j = 1; j < 100; j++) { + int new_len = j * 7; + a->at_grow(new_len - 1, E()); // simulate default argument + ASSERT_EQ(a->length(), new_len); + check_alive_elements_for_type(new_len); + + // Check elements + for (int k = 0; k < new_len; k++) { + EXPECT_EQ(a->at_grow(k, value_factory(-1)), E()); + } + ASSERT_EQ(a->length(), new_len); + } + + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + int old_capacity = a->capacity(); + a->at_grow(old_capacity - 1, value_factory(-3)); + ASSERT_EQ(a->length(), old_capacity); + ASSERT_EQ(a->capacity(), old_capacity); + check_alive_elements_for_type(old_capacity); + + for (int j = 1; j < 100; j++) { + int target = j * 31; + a->at_put_grow(target, value_factory(target), E()); // simulate default argument + int new_length = MAX2(target + 1, old_capacity); + ASSERT_EQ(a->length(), new_length); + + // Check elements + for (int k = 0; k < new_length; k++) { + if (k != 0 && (k % 31) == 0 && k <= target) { + EXPECT_EQ(a->at(k), value_factory(k)); + } else if (k < old_capacity) { + EXPECT_EQ(a->at(k), value_factory(-3)); + } else { + EXPECT_EQ(a->at(k), E()); + } + } + } + }; +}; + +template +class TestClosureSort : public TestClosure { + virtual void do_test(AllocatorClosure* a) override final { + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + // Test sort + for (int i = 0; i < 997; i++) { + a->append(value_factory((i * 31) % 997)); + } + check_alive_elements_for_type(997); + ASSERT_EQ(a->length(), 997); + + a->view().sort(value_compare_ptr); + + check_alive_elements_for_type(997); + ASSERT_EQ(a->length(), 997); + for (int i = 0; i < 977; i++) { + ASSERT_EQ(a->at(i), value_factory(i)); + } + + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + // Test sort, strided + for (int i = 0; i < 1000; i++) { + a->append(value_factory(-i)); + a->append(value_factory(i)); + a->append(value_factory(-i)); + } + check_alive_elements_for_type(3000); + ASSERT_EQ(a->length(), 3000); + + a->view().sort(value_compare_ptr, 3); + + check_alive_elements_for_type(3000); + ASSERT_EQ(a->length(), 3000); + for (int i = 0; i < 1000; i++) { + ASSERT_EQ(a->at(3 * i + 0), value_factory(-999 + i)); + ASSERT_EQ(a->at(3 * i + 1), value_factory(999 - i)); + ASSERT_EQ(a->at(3 * i + 2), value_factory(-999 + i)); + } + + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + // Test find_sorted + for (int i = 0; i < 1000; i++) { + a->append(value_factory(7 * i)); + } + + for (int i = 0; i < 1000; i++) { + bool found = (i % 2 == 0); // init random + int j = a->view().template find_sorted>(value_factory(i), found); + if (i % 7 == 0) { + ASSERT_TRUE(found); + ASSERT_EQ(j, i / 7); + } else { + ASSERT_FALSE(found); + } + } + + a->clear(); + check_alive_elements_for_type(0); + ASSERT_EQ(a->length(), 0); + + // Test insert_sorted + for (int i = 0; i < 997; i++) { + a->insert_sorted(value_factory((i * 31) % 997)); + } + + check_alive_elements_for_type(997); + ASSERT_EQ(a->length(), 997); + for (int i = 0; i < 977; i++) { + ASSERT_EQ(a->at(i), value_factory(i)); + } + }; +}; + +// Test fixture to work with TEST_VM_F +class GrowableArrayTest : public ::testing::Test { +protected: + template + static void run_test_modify_allocate_arena(TestClosure* test, ModifyClosure* modify, AllocatorArgs args) { + AllocatorClosureStackResourceArea allocator_s_r; + allocator_s_r.dispatch(modify, test, args); + + AllocatorClosureEmbeddedResourceArea allocator_e_r; + allocator_e_r.dispatch(modify, test, args); + + AllocatorClosureResourceAreaResourceArea allocator_r_r; + allocator_r_r.dispatch(modify, test, args); + + AllocatorClosureStackArena allocator_s_a; + allocator_s_a.dispatch(modify, test, args); + + AllocatorClosureEmbeddedArena allocator_e_a; + allocator_e_a.dispatch(modify, test, args); + + AllocatorClosureArenaArena allocator_a_a; + allocator_a_a.dispatch(modify, test, args); + } + + template + static void run_test_modify_allocate_arena(TestClosure* test, ModifyClosure* modify, AllocatorArgs args) { + // not enabled + } + + template + static void run_test_modify_allocate_cheap(TestClosure* test, ModifyClosure* modify, AllocatorArgs args) { + AllocatorClosureStackCHeap allocator_s_c; + allocator_s_c.dispatch(modify, test, args); + + AllocatorClosureEmbeddedCHeap allocator_e_c; + allocator_e_c.dispatch(modify, test, args); + + AllocatorClosureCHeapCHeap allocator_c_c; + allocator_c_c.dispatch(modify, test, args); + + AllocatorClosureCHeapCHeapNoThrow allocator_c_c_nt; + allocator_c_c_nt.dispatch(modify, test, args); + } + + template + static void run_test_modify_allocate_cheap(TestClosure* test, ModifyClosure* modify, AllocatorArgs args) { + // not enabled + } + + template + static void run_test_modify_allocate_args(TestClosure* test, ModifyClosure* modify, AllocatorArgs args) { + run_test_modify_allocate_arena(test, modify, args); + run_test_modify_allocate_cheap(test, modify, args); + } + + template + static void run_test_modify_allocate(TestClosure* test, ModifyClosure* modify) { + run_test_modify_allocate_args(test, modify, CAP2); + run_test_modify_allocate_args(test, modify, CAP0); + run_test_modify_allocate_args(test, modify, CAP100); + run_test_modify_allocate_args(test, modify, CAP100LEN100); + run_test_modify_allocate_args(test, modify, CAP200LEN50); + } + + template + static void run_test_modify(TestClosure* test) { + ModifyClosureEmpty modify_empty; + run_test_modify_allocate(test, &modify_empty); + + ModifyClosureAppend modify_append; + run_test_modify_allocate(test, &modify_append); + + ModifyClosureAccess modify_access; + run_test_modify_allocate(test, &modify_access); + + ModifyClosureClear modify_clear; + run_test_modify_allocate(test, &modify_clear); + + ModifyClosureClearAndDeallocate modify_deallocate; + run_test_modify_allocate(test, &modify_deallocate); + } + + template + static void run_test_append() { + TestClosureAppend test; + run_test_modify(&test); + } + + template + static void run_test_assign() { + TestClosureAssign test; + run_test_modify(&test); + } + + template + static void run_test_clear() { + TestClosureClear test; + run_test_modify(&test); + } + + template + static void run_test_iterator() { + TestClosureIterator test; + run_test_modify(&test); + } + + template + static void run_test_capacity() { + TestClosureCapacity test; + run_test_modify(&test); + } + + template + static void run_test_compare() { + TestClosureCompare test; + run_test_modify(&test); + } + + template + static void run_test_find_if() { + TestClosureFindIf test; + run_test_modify(&test); + } + + template + static void run_test_find_from_end_if() { + TestClosureFindFromEndIf test; + run_test_modify(&test); + } + + template + static void run_test_at_grow() { + TestClosureAtGrow test; + run_test_modify(&test); + } + + template + static void run_test_at_grow_default() { + TestClosureAtGrowDefault test; + run_test_modify(&test); + } + + template + static void run_test_sort() { + TestClosureSort test; + run_test_modify(&test); + } +}; + +TEST_VM_F(GrowableArrayTest, append_int) { + run_test_append(); +} + +TEST_VM_F(GrowableArrayTest, append_ptr) { + run_test_append(); +} + +TEST_VM_F(GrowableArrayTest, append_point) { + run_test_append(); +} + +TEST_VM_F(GrowableArrayTest, append_point_with_default) { + run_test_append(); +} + +TEST_VM_F(GrowableArrayTest, append_point_no_assign) { + run_test_append(); +} + +TEST_VM_F(GrowableArrayTest, append_ctor_dtor) { + run_test_append(); +} + +TEST_VM_F(GrowableArrayTest, assign_int) { + run_test_assign(); +} + +TEST_VM_F(GrowableArrayTest, assign_ptr) { + run_test_assign(); +} + +TEST_VM_F(GrowableArrayTest, assign_point) { + run_test_assign(); +} + +TEST_VM_F(GrowableArrayTest, assign_point_with_default) { + run_test_assign(); +} + +// No assign test for PointNoAssign + +TEST_VM_F(GrowableArrayTest, assign_ctor_dtor) { + run_test_append(); +} + +TEST_VM_F(GrowableArrayTest, clear_int) { + run_test_clear(); +} + +TEST_VM_F(GrowableArrayTest, clear_ptr) { + run_test_clear(); +} + +TEST_VM_F(GrowableArrayTest, clear_point) { + run_test_clear(); +} + +TEST_VM_F(GrowableArrayTest, clear_point_with_default) { + run_test_clear(); +} + +TEST_VM_F(GrowableArrayTest, clear_point_no_assign) { + run_test_clear(); +} + +TEST_VM_F(GrowableArrayTest, clear_ctor_dtor) { + run_test_clear(); +} + +TEST_VM_F(GrowableArrayTest, iterator_int) { + run_test_iterator(); +} + +TEST_VM_F(GrowableArrayTest, iterator_ptr) { + run_test_iterator(); +} + +TEST_VM_F(GrowableArrayTest, iterator_point) { + run_test_iterator(); +} + +TEST_VM_F(GrowableArrayTest, iterator_point_with_default) { + run_test_iterator(); +} + +TEST_VM_F(GrowableArrayTest, iterator_point_no_assign) { + run_test_iterator(); +} + +TEST_VM_F(GrowableArrayTest, iterator_ctor_dtor) { + run_test_iterator(); +} + +TEST_VM_F(GrowableArrayTest, capacity_int) { + run_test_capacity(); +} + +TEST_VM_F(GrowableArrayTest, capacity_ptr) { + run_test_capacity(); +} + +TEST_VM_F(GrowableArrayTest, capacity_point) { + run_test_capacity(); +} + +TEST_VM_F(GrowableArrayTest, capacity_point_with_default) { + run_test_capacity(); +} + +TEST_VM_F(GrowableArrayTest, capacity_point_no_assign) { + run_test_capacity(); +} + +TEST_VM_F(GrowableArrayTest, capacity_ctor_dtor) { + run_test_capacity(); +} + +TEST_VM_F(GrowableArrayTest, compare_int) { + run_test_compare(); +} + +TEST_VM_F(GrowableArrayTest, compare_ptr) { + run_test_compare(); +} + +TEST_VM_F(GrowableArrayTest, compare_point) { + run_test_compare(); +} + +TEST_VM_F(GrowableArrayTest, compare_point_with_default) { + run_test_compare(); +} + +TEST_VM_F(GrowableArrayTest, compare_point_no_assign) { + run_test_compare(); +} + +TEST_VM_F(GrowableArrayTest, compare_ctor_dtor) { + run_test_compare(); +} + +TEST_VM_F(GrowableArrayTest, find_if_int) { + run_test_find_if(); +} + +TEST_VM_F(GrowableArrayTest, find_if_ptr) { + run_test_find_if(); +} + +TEST_VM_F(GrowableArrayTest, find_if_point) { + run_test_find_if(); +} + +TEST_VM_F(GrowableArrayTest, find_if_point_with_default) { + run_test_find_if(); +} + +TEST_VM_F(GrowableArrayTest, find_if_point_no_assign) { + run_test_find_if(); +} + +TEST_VM_F(GrowableArrayTest, find_if_ctor_dtor) { + run_test_find_if(); +} + +TEST_VM_F(GrowableArrayTest, find_from_end_if_int) { + run_test_find_from_end_if(); +} + +TEST_VM_F(GrowableArrayTest, find_from_end_if_ptr) { + run_test_find_from_end_if(); +} + +TEST_VM_F(GrowableArrayTest, find_from_end_if_point) { + run_test_find_from_end_if(); +} + +TEST_VM_F(GrowableArrayTest, find_from_end_if_point_with_default) { + run_test_find_from_end_if(); +} + +TEST_VM_F(GrowableArrayTest, find_from_end_if_point_no_assign) { + run_test_find_from_end_if(); +} + +TEST_VM_F(GrowableArrayTest, find_from_end_if_ctor_dtor) { + run_test_find_from_end_if(); +} + +TEST_VM_F(GrowableArrayTest, at_grow_int) { + run_test_at_grow(); +} + +TEST_VM_F(GrowableArrayTest, at_grow_ptr) { + run_test_at_grow(); +} + +TEST_VM_F(GrowableArrayTest, at_grow_point) { + run_test_at_grow(); +} + +TEST_VM_F(GrowableArrayTest, at_grow_point_with_default) { + run_test_at_grow(); +} + +// PointNoAssign: assign not implemented, cannot test at_put_grow + +TEST_VM_F(GrowableArrayTest, at_grow_ctor_dtor) { + run_test_at_grow(); +} + +TEST_VM_F(GrowableArrayTest, at_grow_default_int) { + run_test_at_grow_default(); +} + +TEST_VM_F(GrowableArrayTest, at_grow_default_ptr) { + run_test_at_grow_default(); +} + +// Point: default not implemented, so cannot test! + +TEST_VM_F(GrowableArrayTest, at_grow_default_point_with_default) { + run_test_at_grow_default(); +} + +// PointNoAssign: default not implemented, so cannot test! +// PointNoAssign: assign not implemented, cannot test at_put_grow + +TEST_VM_F(GrowableArrayTest, at_grow_default_ctor_dtor) { + run_test_at_grow_default(); +} + +TEST_VM_F(GrowableArrayTest, sort_int) { + run_test_sort(); +} + +TEST_VM_F(GrowableArrayTest, sort_ptr) { + run_test_sort(); +} + +TEST_VM_F(GrowableArrayTest, sort_point) { + run_test_sort(); +} + +TEST_VM_F(GrowableArrayTest, sort_point_with_default) { + run_test_sort(); +} + +TEST_VM_F(GrowableArrayTest, sort_point_no_assign) { + run_test_sort(); +} + +TEST_VM_F(GrowableArrayTest, sort_ctor_dtor) { + run_test_sort(); +} + +#ifdef ASSERT +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, unallowed_alloc_cheap_res_area, + ".*GrowableArray cannot be C heap allocated") { + GrowableArray* array = new (mtTest) GrowableArray(); +} + +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, unallowed_alloc_cheap_arena, + ".*GrowableArray cannot be C heap allocated") { + Arena arena(mtTest); + GrowableArray* array = new (mtTest) GrowableArray(&arena); +} + +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, unallowed_alloc_arena_res_area, + ".*if GrowableArray is arena allocated, then the elements must be from the same arena") { + Arena arena(mtTest); + GrowableArray* array = new (&arena) GrowableArray(); +} + +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, unallowed_alloc_res_area_arena_leak, + ".*memory leak: allocating without ResourceMark") { + // Missing ResourceMark + Arena arena(mtTest); + GrowableArray* array = new GrowableArray(&arena); +} + +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, unallowed_alloc_res_area_arena, + ".*The elements must be resource area allocated if the GrowableArray itself is") { + ResourceMark rm; + Arena arena(mtTest); + GrowableArray* array = new GrowableArray(&arena); +} + +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, unallowed_alloc_arena_arena, + ".*if GrowableArray is arena allocated, then the elements must be from the same arena") { + Arena arena1(mtTest); + Arena arena2(mtTest); + GrowableArray* array = new (&arena1) GrowableArray(&arena2); +} +#endif + +#define TEST_SWAP(a1, a2) { \ + int* a1_adr = a1.adr_at(0); \ + int* a2_adr = a2.adr_at(0); \ + a1.swap(&a2); \ + ASSERT_EQ(a1.adr_at(0), a2_adr); \ + ASSERT_EQ(a2.adr_at(0), a1_adr); \ + ASSERT_EQ(a1.capacity(), 300); \ + ASSERT_EQ(a1.length(), 150); \ + ASSERT_EQ(a2.capacity(), 200); \ + ASSERT_EQ(a2.length(), 100); \ +} + +TEST_VM_F(GrowableArrayTest, swap_s_ra_s_ra) { + ResourceMark rm; + GrowableArray a1(200, 100, 1); + GrowableArray a2(300, 150, 2); + TEST_SWAP(a1, a2) +} + +TEST_VM_F(GrowableArrayTest, swap_ra_ra_s_ra) { + ResourceMark rm; + GrowableArray* a1 = new GrowableArray(200, 100, 1); + GrowableArray a2(300, 150, 2); + TEST_SWAP((*a1), a2) +} + +TEST_VM_F(GrowableArrayTest, swap_s_a_s_a) { + Arena arena(mtTest); + GrowableArray a1(&arena, 200, 100, 1); + GrowableArray a2(&arena, 300, 150, 2); + TEST_SWAP(a1, a2) +} + +TEST_VM_F(GrowableArrayTest, swap_a_a_s_a) { + Arena arena(mtTest); + GrowableArray *a1 = new (&arena) GrowableArray(&arena, 200, 100, 1); + GrowableArray a2(&arena, 300, 150, 2); + TEST_SWAP((*a1), a2) +} + +TEST_VM_F(GrowableArrayTest, swap_s_c_s_c) { + GrowableArrayCHeap a1(200, 100, 1); + GrowableArrayCHeap a2(300, 150, 2); + TEST_SWAP(a1, a2) +} + +TEST_VM_F(GrowableArrayTest, swap_c_c_s_c) { + GrowableArrayCHeap* a1 = new GrowableArrayCHeap(200, 100, 1); + GrowableArrayCHeap a2(300, 150, 2); + TEST_SWAP((*a1), a2) +} + +#ifdef ASSERT +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, swap_s_ra_s_a, + ".*must have same arena") { + Arena arena(mtTest); + GrowableArray a1(100, 100, 1); + GrowableArray a2(&arena, 100, 100, 1); + a1.swap(&a2); +} + +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, swap_s_a1_s_a2, + ".*must have same arena") { + Arena arena1(mtTest); + Arena arena2(mtTest); + GrowableArray a1(&arena1, 100, 100, 1); + GrowableArray a2(&arena2, 100, 100, 1); + a1.swap(&a2); +} + +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, swap_s_ra_s_ra, + ".*same nesting if using resource area") { + ResourceMark rm1; + GrowableArray a1(100, 100, 1); + ResourceMark rm2; + GrowableArray a2(100, 100, 1); + a1.swap(&a2); +} + +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, resource_area_realloc_scopes, + ".*allocation bug: GrowableArray could grow within nested ResourceMark") { + ResourceMark rm1; + GrowableArray a(10, 10, 1); + ResourceMark rm2; + for (int i = 0; i < 1000; i++) { + a.append(i); } } + +TEST_VM_ASSERT_MSG(GrowableArrayAssertingTest, insert_before_with_itself, + ".*cannot insert itself to itself") { + ResourceMark rm1; + GrowableArray a(10, 10, 1); + a.insert_before(5, &a); +} +#endif + +#define TEST_SHALLOW_ASSIGN(a1, a2) { \ + int* a1_adr = a1.adr_at(0); \ + int* a2_adr = a2.adr_at(0); \ + a2 = a1; \ + ASSERT_EQ(a1.adr_at(0), a1_adr); \ + ASSERT_EQ(a2.adr_at(0), a1_adr); \ + ASSERT_EQ(a1.capacity(), 200); \ + ASSERT_EQ(a1.length(), 100); \ + ASSERT_EQ(a2.capacity(), 200); \ + ASSERT_EQ(a2.length(), 100); \ +} + +#define TEST_SHALLOW_COPY(a1) { \ + int* a1_adr = a1.adr_at(0); \ + GrowableArray a2(a1); \ + ASSERT_EQ(a1.adr_at(0), a1_adr); \ + ASSERT_EQ(a2.adr_at(0), a1_adr); \ + ASSERT_EQ(a1.capacity(), 200); \ + ASSERT_EQ(a1.length(), 100); \ + ASSERT_EQ(a2.capacity(), 200); \ + ASSERT_EQ(a2.length(), 100); \ +} + +TEST_VM_F(GrowableArrayTest, shallow_assign_s_ra_s_ra) { + ResourceMark rm; + GrowableArray a1(200, 100, 1); + GrowableArray a2(300, 150, 2); + TEST_SHALLOW_ASSIGN(a1, a2); +} + +TEST_VM_F(GrowableArrayTest, shallow_assign_s_a_s_a) { + Arena arena(mtTest); + GrowableArray a1(&arena, 200, 100, 1); + GrowableArray a2(&arena, 300, 150, 2); + TEST_SHALLOW_ASSIGN(a1, a2); +} + +TEST_VM_F(GrowableArrayTest, shallow_copy_s_ra_s_ra) { + ResourceMark rm; + GrowableArray a1(200, 100, 1); + TEST_SHALLOW_COPY(a1); +} + +TEST_VM_F(GrowableArrayTest, shallow_copy_s_a_s_a) { + Arena arena(mtTest); + GrowableArray a1(&arena, 200, 100, 1); + TEST_SHALLOW_COPY(a1); +}