diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index 9bd8c6b8e9f88..615c8e19ac863 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -1212,7 +1212,8 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { arrayOopDesc::base_offset_in_bytes(op->type()), array_element_size(op->type()), op->klass()->as_register(), - *op->stub()->entry()); + *op->stub()->entry(), + op->zero_array()); } __ bind(*op->stub()->continuation()); } @@ -2504,7 +2505,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ call_VM_leaf(entry, 3); } - __ bind(*stub->continuation()); + if (stub != nullptr) { + __ bind(*stub->continuation()); + } } diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp index 568be65e1447e..8f1260feba3ea 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp @@ -878,7 +878,13 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { assert(x->number_of_arguments() == 5, "wrong type"); // Make all state_for calls early since they can emit code - CodeEmitInfo* info = state_for(x, x->state()); + CodeEmitInfo* info = nullptr; + if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { + info = state_for(x, x->state_before()); + info->set_force_reexecute(); + } else { + info = state_for(x, x->state()); + } LIRItem src(x->argument_at(0), this); LIRItem src_pos(x->argument_at(1), this); @@ -911,6 +917,9 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { int flags; ciArrayKlass* expected_type; arraycopy_helper(x, &flags, &expected_type); + if (x->check_flag(Instruction::OmitChecksFlag)) { + flags = 0; + } __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint } @@ -1132,7 +1141,13 @@ void LIRGenerator::do_NewInstance(NewInstance* x) { } void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { - CodeEmitInfo* info = state_for(x, x->state()); + CodeEmitInfo* info = nullptr; + if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { + info = state_for(x, x->state_before()); + info->set_force_reexecute(); + } else { + info = state_for(x, x->state()); + } LIRItem length(x->length(), this); length.load_item_force(FrameMap::r19_opr); @@ -1149,7 +1164,7 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); - __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); + __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array()); LIR_Opr result = rlock_result(x); __ move(reg, result); diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index e48d64d90696c..c0455ad1bff69 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -272,7 +272,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register verify_oop(obj); } -void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case) { +void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case, bool zero_array) { assert_different_registers(obj, len, t1, t2, klass); // determine alignment mask @@ -297,7 +297,9 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, // following the length field in initialize_header(). int base_offset = align_up(base_offset_in_bytes, BytesPerWord); // clear rest of allocated space - initialize_body(obj, arr_size, base_offset, t1, t2); + if (zero_array) { + initialize_body(obj, arr_size, base_offset, t1, t2); + } if (Compilation::current()->bailed_out()) { return; } diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp index 3a4c868744c7c..fc8e83d706b50 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp @@ -100,7 +100,8 @@ using MacroAssembler::null_check; // base_offset_in_bytes: offset of first array element, in bytes // f : element scale factor // slow_case : exit to slow case implementation if fast allocation fails - void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case); + // zero_array : zero the allocated array or not + void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case, bool zero_array); int rsp_offset() const { return _rsp_offset; } void set_rsp_offset(int n) { _rsp_offset = n; } diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index c279e3073af87..978708d03e66b 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -1621,7 +1621,8 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { arrayOopDesc::base_offset_in_bytes(op->type()), array_element_size(op->type()), op->klass()->as_register(), - *op->stub()->entry()); + *op->stub()->entry(), + op->zero_array()); } __ bind(*op->stub()->continuation()); } @@ -3453,7 +3454,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); __ call_VM_leaf(entry, 0); - __ bind(*stub->continuation()); + if (stub != nullptr) { + __ bind(*stub->continuation()); + } } void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp index 7088cf33cf646..d3add6975b4f2 100644 --- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp @@ -1004,7 +1004,13 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { assert(x->number_of_arguments() == 5, "wrong type"); // Make all state_for calls early since they can emit code - CodeEmitInfo* info = state_for(x, x->state()); + CodeEmitInfo* info = nullptr; + if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { + info = state_for(x, x->state_before()); + info->set_force_reexecute(); + } else { + info = state_for(x, x->state()); + } LIRItem src(x->argument_at(0), this); LIRItem src_pos(x->argument_at(1), this); @@ -1016,6 +1022,13 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { // LinearScan will fail allocation (because arraycopy always needs a // call) + int flags; + ciArrayKlass* expected_type; + arraycopy_helper(x, &flags, &expected_type); + if (x->check_flag(Instruction::OmitChecksFlag)) { + flags = 0; + } + #ifndef _LP64 src.load_item_force (FrameMap::rcx_oop_opr); src_pos.load_item_force (FrameMap::rdx_opr); @@ -1023,6 +1036,11 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { dst_pos.load_item_force (FrameMap::rbx_opr); length.load_item_force (FrameMap::rdi_opr); LIR_Opr tmp = (FrameMap::rsi_opr); + + if (expected_type != nullptr && flags == 0) { + FrameMap* f = Compilation::current()->frame_map(); + f->update_reserved_argument_area_size(3 * BytesPerWord); + } #else // The java calling convention will give us enough registers @@ -1044,10 +1062,6 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) { set_no_result(x); - int flags; - ciArrayKlass* expected_type; - arraycopy_helper(x, &flags, &expected_type); - __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint } @@ -1310,7 +1324,13 @@ void LIRGenerator::do_NewInstance(NewInstance* x) { void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { - CodeEmitInfo* info = state_for(x, x->state()); + CodeEmitInfo* info = nullptr; + if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { + info = state_for(x, x->state_before()); + info->set_force_reexecute(); + } else { + info = state_for(x, x->state()); + } LIRItem length(x->length(), this); length.load_item_force(FrameMap::rbx_opr); @@ -1327,7 +1347,7 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); - __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); + __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array()); LIR_Opr result = rlock_result(x); __ move(reg, result); diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index caca3a1528261..2374324ca7c13 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -278,7 +278,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register verify_oop(obj); } -void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case) { +void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case, bool zero_array) { assert(obj == rax, "obj must be in rax, for cmpxchg"); assert_different_registers(obj, len, t1, t2, klass); @@ -300,11 +300,13 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, initialize_header(obj, klass, len, t1, t2); // clear rest of allocated space - const Register len_zero = len; - // Align-up to word boundary, because we clear the 4 bytes potentially - // following the length field in initialize_header(). - int base_offset = align_up(base_offset_in_bytes, BytesPerWord); - initialize_body(obj, arr_size, base_offset, len_zero); + if (zero_array) { + const Register len_zero = len; + // Align-up to word boundary, because we clear the 4 bytes potentially + // following the length field in initialize_header(). + int base_offset = align_up(base_offset_in_bytes, BytesPerWord); + initialize_body(obj, arr_size, base_offset, len_zero); + } if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == rax, "must be"); diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp index a705dd70efd06..6344a7b6ef19e 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp @@ -89,7 +89,8 @@ // base_offset_in_bytes: offset of the first array element, in bytes // f : element scale factor // slow_case : exit to slow case implementation if fast allocation fails - void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case); + // zero_array : zero the allocated array or not + void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case, bool zero_array); int rsp_offset() const { return _rsp_offset; } void set_rsp_offset(int n) { _rsp_offset = n; } diff --git a/src/hotspot/share/c1/c1_Compiler.cpp b/src/hotspot/share/c1/c1_Compiler.cpp index bdbeb39f89a4a..6e518b0213b4a 100644 --- a/src/hotspot/share/c1/c1_Compiler.cpp +++ b/src/hotspot/share/c1/c1_Compiler.cpp @@ -235,6 +235,9 @@ bool Compiler::is_intrinsic_supported(vmIntrinsics::ID id) { case vmIntrinsics::_counterTime: #endif case vmIntrinsics::_getObjectSize: +#if defined(X86) || defined(AARCH64) + case vmIntrinsics::_clone: +#endif break; case vmIntrinsics::_blackhole: break; diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp index a361f3da9c281..db025883b78ba 100644 --- a/src/hotspot/share/c1/c1_GraphBuilder.cpp +++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp @@ -2026,8 +2026,11 @@ void GraphBuilder::invoke(Bytecodes::Code code) { int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); receiver = state()->stack_at(index); ciType* type = receiver->exact_type(); - if (type != nullptr && type->is_loaded() && - type->is_instance_klass() && !type->as_instance_klass()->is_interface()) { + if (type != nullptr && type->is_loaded()) { + assert(!type->is_instance_klass() || !type->as_instance_klass()->is_interface(), "Must not be an interface"); + // Detects non-interface instances, primitive arrays, and some object arrays. + // Array receivers can only call Object methods, so we should be able to allow + // all object arrays here too, even those with unloaded types. receiver_klass = (ciInstanceKlass*) type; type_is_exact = true; } @@ -2243,7 +2246,7 @@ void GraphBuilder::new_instance(int klass_index) { void GraphBuilder::new_type_array() { ValueStack* state_before = copy_state_exhandling(); - apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before))); + apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true))); } @@ -3650,9 +3653,13 @@ void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee, bool ignore_retur case vmIntrinsics::_getAndSetReference : append_unsafe_get_and_set(callee, false); return; case vmIntrinsics::_getCharStringU : append_char_access(callee, false); return; case vmIntrinsics::_putCharStringU : append_char_access(callee, true); return; + case vmIntrinsics::_clone : append_alloc_array_copy(callee); return; default: break; } + if (_inline_bailout_msg != nullptr) { + return; + } // create intrinsic node const bool has_receiver = !callee->is_static(); @@ -3714,6 +3721,9 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee, bool ignore_return) { } } build_graph_for_intrinsic(callee, ignore_return); + if (_inline_bailout_msg != nullptr) { + return false; + } return true; } @@ -4427,6 +4437,43 @@ void GraphBuilder::append_char_access(ciMethod* callee, bool is_store) { } } +void GraphBuilder::append_alloc_array_copy(ciMethod* callee) { + const int args_base = state()->stack_size() - callee->arg_size(); + ciType* receiver_type = state()->stack_at(args_base)->exact_type(); + if (receiver_type == nullptr) { + inline_bailout("must have a receiver"); + return; + } + if (!receiver_type->is_type_array_klass()) { + inline_bailout("clone array not primitive"); + return; + } + + ValueStack* state_before = copy_state_before(); + state_before->set_force_reexecute(); + Value src = apop(); + BasicType basic_type = src->exact_type()->as_array_klass()->element_type()->basic_type(); + Value length = append(new ArrayLength(src, state_before)); + Value new_array = append_split(new NewTypeArray(length, basic_type, state_before, false)); + + ValueType* result_type = as_ValueType(callee->return_type()); + vmIntrinsics::ID id = vmIntrinsics::_arraycopy; + Values* args = new Values(5); + args->push(src); + args->push(append(new Constant(new IntConstant(0)))); + args->push(new_array); + args->push(append(new Constant(new IntConstant(0)))); + args->push(length); + const bool has_receiver = true; + Intrinsic* array_copy = new Intrinsic(result_type, id, + args, has_receiver, state_before, + vmIntrinsics::preserves_state(id), + vmIntrinsics::can_trap(id)); + array_copy->set_flag(Instruction::OmitChecksFlag, true); + append_split(array_copy); + apush(new_array); +} + void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { CompileLog* log = compilation()->log(); if (log != nullptr) { diff --git a/src/hotspot/share/c1/c1_GraphBuilder.hpp b/src/hotspot/share/c1/c1_GraphBuilder.hpp index 42233455d4c73..92b9a518a2070 100644 --- a/src/hotspot/share/c1/c1_GraphBuilder.hpp +++ b/src/hotspot/share/c1/c1_GraphBuilder.hpp @@ -379,6 +379,7 @@ class GraphBuilder { void append_unsafe_CAS(ciMethod* callee); void append_unsafe_get_and_set(ciMethod* callee, bool is_add); void append_char_access(ciMethod* callee, bool is_store); + void append_alloc_array_copy(ciMethod* callee); void print_inlining(ciMethod* callee, const char* msg, bool success = true); diff --git a/src/hotspot/share/c1/c1_Instruction.hpp b/src/hotspot/share/c1/c1_Instruction.hpp index 8f7fd698e7944..32ff3d9f61cab 100644 --- a/src/hotspot/share/c1/c1_Instruction.hpp +++ b/src/hotspot/share/c1/c1_Instruction.hpp @@ -364,6 +364,7 @@ class Instruction: public CompilationResourceObj { InWorkListFlag, DeoptimizeOnException, KillsMemoryFlag, + OmitChecksFlag, InstructionLastFlag }; @@ -1327,16 +1328,19 @@ BASE(NewArray, StateSplit) LEAF(NewTypeArray, NewArray) private: BasicType _elt_type; + bool _zero_array; public: // creation - NewTypeArray(Value length, BasicType elt_type, ValueStack* state_before) + NewTypeArray(Value length, BasicType elt_type, ValueStack* state_before, bool zero_array) : NewArray(length, state_before) , _elt_type(elt_type) + , _zero_array(zero_array) {} // accessors BasicType elt_type() const { return _elt_type; } + bool zero_array() const { return _zero_array; } ciType* exact_type() const; }; diff --git a/src/hotspot/share/c1/c1_LIR.cpp b/src/hotspot/share/c1/c1_LIR.cpp index dee208c11bed3..4017a5324b53f 100644 --- a/src/hotspot/share/c1/c1_LIR.cpp +++ b/src/hotspot/share/c1/c1_LIR.cpp @@ -353,7 +353,15 @@ LIR_OpArrayCopy::LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_ , _tmp(tmp) , _expected_type(expected_type) , _flags(flags) { +#if defined(X86) || defined(AARCH64) + if (expected_type != nullptr && flags == 0) { + _stub = nullptr; + } else { + _stub = new ArrayCopyStub(this); + } +#else _stub = new ArrayCopyStub(this); +#endif } LIR_OpUpdateCRC32::LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) @@ -999,7 +1007,10 @@ void LIR_OpLabel::emit_code(LIR_Assembler* masm) { void LIR_OpArrayCopy::emit_code(LIR_Assembler* masm) { masm->emit_arraycopy(this); - masm->append_code_stub(stub()); + ArrayCopyStub* code_stub = stub(); + if (code_stub != nullptr) { + masm->append_code_stub(code_stub); + } } void LIR_OpUpdateCRC32::emit_code(LIR_Assembler* masm) { @@ -1365,7 +1376,7 @@ void LIR_List::allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, stub)); } -void LIR_List::allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub) { +void LIR_List::allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub, bool zero_array) { append(new LIR_OpAllocArray( klass, len, @@ -1375,7 +1386,8 @@ void LIR_List::allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, L t3, t4, type, - stub)); + stub, + zero_array)); } void LIR_List::shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp) { diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp index 6f527135fbe1c..c69d29f8d619d 100644 --- a/src/hotspot/share/c1/c1_LIR.hpp +++ b/src/hotspot/share/c1/c1_LIR.hpp @@ -1750,9 +1750,10 @@ class LIR_OpAllocArray : public LIR_Op { LIR_Opr _tmp4; BasicType _type; CodeStub* _stub; + bool _zero_array; public: - LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub) + LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub, bool zero_array) : LIR_Op(lir_alloc_array, result, nullptr) , _klass(klass) , _len(len) @@ -1761,7 +1762,8 @@ class LIR_OpAllocArray : public LIR_Op { , _tmp3(t3) , _tmp4(t4) , _type(type) - , _stub(stub) {} + , _stub(stub) + , _zero_array(zero_array) {} LIR_Opr klass() const { return _klass; } LIR_Opr len() const { return _len; } @@ -1772,6 +1774,7 @@ class LIR_OpAllocArray : public LIR_Op { LIR_Opr tmp4() const { return _tmp4; } BasicType type() const { return _type; } CodeStub* stub() const { return _stub; } + bool zero_array() const { return _zero_array; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpAllocArray * as_OpAllocArray () { return this; } @@ -2302,7 +2305,7 @@ class LIR_List: public CompilationResourceObj { void irem(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info); void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub); - void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub); + void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub, bool zero_array = true); // jump is an unconditional branch void jump(BlockBegin* block) { diff --git a/src/hotspot/share/c1/c1_ValueStack.hpp b/src/hotspot/share/c1/c1_ValueStack.hpp index 0a75fa39bf607..bb0c475585c86 100644 --- a/src/hotspot/share/c1/c1_ValueStack.hpp +++ b/src/hotspot/share/c1/c1_ValueStack.hpp @@ -58,6 +58,7 @@ class ValueStack: public CompilationResourceObj { Values _locals; // the locals Values _stack; // the expression stack Values* _locks; // the monitor stack (holding the locked values) + bool _force_reexecute; // force the reexecute flag on, used for patching stub Value check(ValueTag tag, Value t) { assert(tag == t->type()->tag() || (tag == objectTag && t->type()->tag() == addressTag), "types must correspond"); @@ -225,6 +226,9 @@ class ValueStack: public CompilationResourceObj { void setup_phi_for_stack(BlockBegin* b, int index); void setup_phi_for_local(BlockBegin* b, int index); + bool force_reexecute() const { return _force_reexecute; } + void set_force_reexecute() { _force_reexecute = true; } + // debugging void print() PRODUCT_RETURN; void verify() PRODUCT_RETURN; diff --git a/test/hotspot/jtreg/compiler/c1/TestNullArrayClone.java b/test/hotspot/jtreg/compiler/c1/TestNullArrayClone.java new file mode 100644 index 0000000000000..d153eac3abd35 --- /dev/null +++ b/test/hotspot/jtreg/compiler/c1/TestNullArrayClone.java @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8302850 + * @summary Tests that an array clone call that has been compiled with C1 + * handles null values correctly. + * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:TieredStopAtLevel=1 + * -XX:CompileOnly=compiler.c1.TestNullArrayClone::testClone* -XX:+UnlockExperimentalVMOptions + * compiler.c1.TestNullArrayClone + */ +package compiler.c1; + +import java.util.concurrent.ThreadLocalRandom; + +public class TestNullArrayClone { + static final int ITER = 2000; // ~ Tier3CompileThreshold + static final int ARRAY_SIZE = 999; + + public static void main(String[] args) { + testInts(); + testLongs(); + testBytes(); + } + + private static void testInts() { + final int[] arr = new int[ARRAY_SIZE]; + for (int i = 0; i < arr.length; i++) { + arr[i] = ThreadLocalRandom.current().nextInt(); + } + + for (int i = 0; i < ITER; i++) { + int[] result = testClonePrimitiveInt(arr); + if (result.length != arr.length) { + throw new RuntimeException("Unexpected clone length: source array length " + arr.length + " != clone array length " + result.length); + } + for (int j = 0; j < arr.length; j++) { + if (result[j] != arr[j]) { + throw new RuntimeException("Unexpected result: " + result[j] + " != " + j); + } + } + } + + try { + testClonePrimitiveInt(null); + throw new RuntimeException("Expected NullPointerException to be thrown"); + } catch (NullPointerException e) { + } + } + + private static void testLongs() { + final long[] arr = new long[ARRAY_SIZE]; + for (int i = 0; i < arr.length; i++) { + arr[i] = ThreadLocalRandom.current().nextLong(); + } + + for (int i = 0; i < ITER; i++) { + long[] result = testClonePrimitiveLong(arr); + if (result.length != arr.length) { + throw new RuntimeException("Unexpected clone length: source array length " + arr.length + " != clone array length " + result.length); + } + for (int j = 0; j < arr.length; j++) { + if (result[j] != arr[j]) { + throw new RuntimeException("Unexpected result: " + result[j] + " != " + j); + } + } + } + + try { + testClonePrimitiveLong(null); + throw new RuntimeException("Expected NullPointerException to be thrown"); + } catch (NullPointerException e) { + } + } + + private static void testBytes() { + final byte[] arr = new byte[ARRAY_SIZE]; + for (int i = 0; i < arr.length; i++) { + arr[i] = (byte) ThreadLocalRandom.current().nextInt(); + } + + for (int i = 0; i < ITER; i++) { + byte[] result = testClonePrimitiveBytes(arr); + if (result.length != arr.length) { + throw new RuntimeException("Unexpected clone length: source array length " + arr.length + " != clone array length " + result.length); + } + for (int j = 0; j < arr.length; j++) { + if (result[j] != arr[j]) { + throw new RuntimeException("Unexpected result: " + result[j] + " != " + j); + } + } + } + + try { + testClonePrimitiveBytes(null); + throw new RuntimeException("Expected NullPointerException to be thrown"); + } catch (NullPointerException e) { + } + } + + static int[] testClonePrimitiveInt(int[] ints) { + return ints.clone(); + } + + static long[] testClonePrimitiveLong(long[] longs) { + return longs.clone(); + } + + static byte[] testClonePrimitiveBytes(byte[] bytes) { + return bytes.clone(); + } +}