diff --git a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp index 425c4ceca74..d2843757402 100644 --- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp +++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp @@ -2016,11 +2016,11 @@ void MacroAssembler::warn(const char* msg) { // If a constant does not fit in an immediate field, generate some // number of MOV instructions and then perform the operation. -void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, +void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, add_sub_imm_insn insn1, add_sub_reg_insn insn2) { assert(Rd != zr, "Rd = zr and not setting flags?"); - if (operand_valid_for_add_sub_immediate((int)imm)) { + if (operand_valid_for_add_sub_immediate(imm)) { (this->*insn1)(Rd, Rn, imm); } else { if (uabs(imm) < (1 << 24)) { @@ -2036,15 +2036,15 @@ void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned im // Seperate vsn which sets the flags. Optimisations are more restricted // because we must set the flags correctly. -void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, +void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, add_sub_imm_insn insn1, add_sub_reg_insn insn2) { - if (operand_valid_for_add_sub_immediate((int)imm)) { + if (operand_valid_for_add_sub_immediate(imm)) { (this->*insn1)(Rd, Rn, imm); } else { assert_different_registers(Rd, Rn); assert(Rd != zr, "overflow in immediate operand"); - mov(Rd, (uint64_t)imm); + mov(Rd, imm); (this->*insn2)(Rd, Rn, Rd, LSL, 0); } } diff --git a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp index 593411634b8..33c003917b0 100644 --- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp +++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp @@ -181,7 +181,7 @@ class MacroAssembler: public Assembler { template inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); } - inline void cmp(Register Rd, unsigned imm) { subs(zr, Rd, imm); } + inline void cmp(Register Rd, unsigned imm) { Assembler::subs(zr, Rd, imm); } inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); } inline void cmn(Register Rd, unsigned imm) { adds(zr, Rd, imm); } @@ -202,7 +202,7 @@ class MacroAssembler: public Assembler { inline void movw(Register Rd, Register Rn) { if (Rd == sp || Rn == sp) { - addw(Rd, Rn, 0U); + Assembler::addw(Rd, Rn, 0U); } else { orrw(Rd, zr, Rn); } @@ -211,7 +211,7 @@ class MacroAssembler: public Assembler { assert(Rd != r31_sp && Rn != r31_sp, "should be"); if (Rd == Rn) { } else if (Rd == sp || Rn == sp) { - add(Rd, Rn, 0U); + Assembler::add(Rd, Rn, 0U); } else { orr(Rd, zr, Rn); } @@ -1074,16 +1074,16 @@ class MacroAssembler: public Assembler { // If a constant does not fit in an immediate field, generate some // number of MOV instructions and then perform the operation - void wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, + void wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, add_sub_imm_insn insn1, add_sub_reg_insn insn2); // Seperate vsn which sets the flags - void wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, + void wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, add_sub_imm_insn insn1, add_sub_reg_insn insn2); #define WRAP(INSN) \ - void INSN(Register Rd, Register Rn, unsigned imm) { \ + void INSN(Register Rd, Register Rn, uint64_t imm) { \ wrap_add_sub_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \ } \ \ @@ -1105,7 +1105,7 @@ class MacroAssembler: public Assembler { #undef WRAP #define WRAP(INSN) \ - void INSN(Register Rd, Register Rn, unsigned imm) { \ + void INSN(Register Rd, Register Rn, uint64_t imm) { \ wrap_adds_subs_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \ } \ \ diff --git a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp index dc04f4841b1..6668603bd92 100644 --- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp +++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp @@ -3449,7 +3449,7 @@ class StubGenerator: public StubCodeGenerator { assert(is_even(framesize/2), "sp not 16-byte aligned"); // lr and fp are already in place - __ sub(sp, rfp, ((unsigned)framesize-4) << LogBytesPerInt); // prolog + __ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog int frame_complete = __ pc() - start;