diff --git a/make/modules/java.desktop/lib/Awt2dLibraries.gmk b/make/modules/java.desktop/lib/Awt2dLibraries.gmk index 2e203106d98..7bec6d5059f 100644 --- a/make/modules/java.desktop/lib/Awt2dLibraries.gmk +++ b/make/modules/java.desktop/lib/Awt2dLibraries.gmk @@ -751,7 +751,8 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false) DISABLED_WARNINGS_gcc := sign-compare type-limits unused-result \ maybe-uninitialized shift-negative-value implicit-fallthrough \ unused-function, \ - DISABLED_WARNINGS_clang := incompatible-pointer-types sign-compare, \ + DISABLED_WARNINGS_clang := incompatible-pointer-types sign-compare \ + deprecated-declarations, \ DISABLED_WARNINGS_microsoft := 4018 4244 4267, \ LDFLAGS := $(LDFLAGS_JDKLIB) \ $(call SET_SHARED_LIBRARY_ORIGIN), \ diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp index 318c9f424b4..e794bd48743 100644 --- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp @@ -575,7 +575,7 @@ class Address { static bool offset_ok_for_immed(int64_t offset, int shift) { unsigned mask = (1 << shift) - 1; if (offset < 0 || offset & mask) { - return (uabs(offset) < (1 << (20 - 12))); // Unscaled offset + return (uabs((long)offset) < (1 << (20 - 12))); // Unscaled offset } else { return ((offset >> shift) < (1 << (21 - 10 + 1))); // Scaled, unsigned offset } diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp index 791e7aacdef..112f3226f9c 100644 --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp @@ -129,6 +129,11 @@ class VM_Version : public Abstract_VM_Version { return (1 << ((_psr_info.ctr_el0 >> 16) & 0x0f)) * 4; } static bool supports_fast_class_init_checks() { return true; } + +#ifdef __APPLE__ + // Is the CPU running emulated (for example macOS Rosetta running x86_64 code on M1 ARM (aarch64) + static bool is_cpu_emulated(); +#endif }; #endif // CPU_AARCH64_VM_VERSION_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp new file mode 100644 index 00000000000..20d47dc0084 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp @@ -0,0 +1,105 @@ +/* + * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP + +// Implementation of class atomic +// Note that memory_order_conservative requires a full barrier after atomic stores. +// See https://patchwork.kernel.org/patch/3575821/ + +template +struct Atomic::PlatformAdd { + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { + D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE); + FULL_MEM_BARRIER; + return res; + } + + template + D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_and_fetch(dest, add_value, order) - add_value; + } +}; + +template +template +inline T Atomic::PlatformXchg::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(byte_size == sizeof(T)); + T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE); + FULL_MEM_BARRIER; + return res; +} + +// __attribute__((unused)) on dest is to get rid of spurious GCC warnings. +template +template +inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attribute__((unused)), + T compare_value, + T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(byte_size == sizeof(T)); + if (order == memory_order_relaxed) { + T value = compare_value; + __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); + return value; + } else { + T value = compare_value; + FULL_MEM_BARRIER; + __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); + FULL_MEM_BARRIER; + return value; + } +} + +template +struct Atomic::PlatformOrderedLoad +{ + template + T operator()(const volatile T* p) const { T data; __atomic_load(const_cast(p), &data, __ATOMIC_ACQUIRE); return data; } +}; + +template +struct Atomic::PlatformOrderedStore +{ + template + void operator()(volatile T* p, T v) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } +}; + +template +struct Atomic::PlatformOrderedStore +{ + template + void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); } +}; + + +#endif // OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/bytes_bsd_aarch64.inline.hpp b/src/hotspot/os_cpu/bsd_aarch64/bytes_bsd_aarch64.inline.hpp new file mode 100644 index 00000000000..2ea41ef20ec --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/bytes_bsd_aarch64.inline.hpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_BYTES_BSD_AARCH64_INLINE_HPP +#define OS_CPU_BSD_AARCH64_BYTES_BSD_AARCH64_INLINE_HPP + +#ifdef __APPLE__ +#include +#endif + +#if defined(__APPLE__) +# define bswap_16(x) OSSwapInt16(x) +# define bswap_32(x) OSSwapInt32(x) +# define bswap_64(x) OSSwapInt64(x) +#else +# error "Unimplemented" +#endif + +// Efficient swapping of data bytes from Java byte +// ordering to native byte ordering and vice versa. +inline u2 Bytes::swap_u2(u2 x) { + return bswap_16(x); +} + +inline u4 Bytes::swap_u4(u4 x) { + return bswap_32(x); +} + +inline u8 Bytes::swap_u8(u8 x) { + return bswap_64(x); +} + +#endif // OS_CPU_BSD_AARCH64_BYTES_BSD_AARCH64_INLINE_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.inline.hpp b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.inline.hpp new file mode 100644 index 00000000000..078a16149b1 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.inline.hpp @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_COPY_BSD_AARCH64_INLINE_HPP +#define OS_CPU_BSD_AARCH64_COPY_BSD_AARCH64_INLINE_HPP + +#define COPY_SMALL(from, to, count) \ +{ \ + long tmp0, tmp1, tmp2, tmp3; \ + long tmp4, tmp5, tmp6, tmp7; \ + __asm volatile( \ +" adr %[t0], 0f;\n" \ +" add %[t0], %[t0], %[cnt], lsl #5;\n" \ +" br %[t0];\n" \ +" .align 5;\n" \ +"0:" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldr %[t0], [%[s], #0];\n" \ +" str %[t0], [%[d], #0];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" ldr %[t2], [%[s], #16];\n" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" str %[t2], [%[d], #16];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" ldp %[t2], %[t3], [%[s], #16];\n" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" stp %[t2], %[t3], [%[d], #16];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" ldp %[t2], %[t3], [%[s], #16];\n" \ +" ldr %[t4], [%[s], #32];\n" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" stp %[t2], %[t3], [%[d], #16];\n" \ +" str %[t4], [%[d], #32];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" ldp %[t2], %[t3], [%[s], #16];\n" \ +" ldp %[t4], %[t5], [%[s], #32];\n" \ +"2:" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" stp %[t2], %[t3], [%[d], #16];\n" \ +" stp %[t4], %[t5], [%[d], #32];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldr %[t6], [%[s], #0];\n" \ +" ldp %[t0], %[t1], [%[s], #8];\n" \ +" ldp %[t2], %[t3], [%[s], #24];\n" \ +" ldp %[t4], %[t5], [%[s], #40];\n" \ +" str %[t6], [%[d]], #8;\n" \ +" b 2b;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" ldp %[t2], %[t3], [%[s], #16];\n" \ +" ldp %[t4], %[t5], [%[s], #32];\n" \ +" ldp %[t6], %[t7], [%[s], #48];\n" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" stp %[t2], %[t3], [%[d], #16];\n" \ +" stp %[t4], %[t5], [%[d], #32];\n" \ +" stp %[t6], %[t7], [%[d], #48];\n" \ +"1:" \ + \ + : [s]"+r"(from), [d]"+r"(to), [cnt]"+r"(count), \ + [t0]"=&r"(tmp0), [t1]"=&r"(tmp1), [t2]"=&r"(tmp2), [t3]"=&r"(tmp3), \ + [t4]"=&r"(tmp4), [t5]"=&r"(tmp5), [t6]"=&r"(tmp6), [t7]"=&r"(tmp7) \ + : \ + : "memory", "cc"); \ +} + +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { + __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory"); + if (__builtin_expect(count <= 8, 1)) { + COPY_SMALL(from, to, count); + return; + } + _Copy_conjoint_words(from, to, count); +} + +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { + if (__builtin_constant_p(count)) { + memcpy(to, from, count * sizeof(HeapWord)); + return; + } + __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory"); + if (__builtin_expect(count <= 8, 1)) { + COPY_SMALL(from, to, count); + return; + } + _Copy_disjoint_words(from, to, count); +} + +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { + __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory"); + if (__builtin_expect(count <= 8, 1)) { + COPY_SMALL(from, to, count); + return; + } + _Copy_disjoint_words(from, to, count); +} + +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_words(from, to, count); +} + +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { + pd_disjoint_words(from, to, count); +} + +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { + (void)memmove(to, from, count); +} + +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { + pd_conjoint_bytes(from, to, count); +} + +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { + _Copy_conjoint_jshorts_atomic(from, to, count); +} + +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { + _Copy_conjoint_jints_atomic(from, to, count); +} + +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { + _Copy_conjoint_jlongs_atomic(from, to, count); +} + +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { + assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); +} + +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { + _Copy_arrayof_conjoint_bytes(from, to, count); +} + +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + _Copy_arrayof_conjoint_jshorts(from, to, count); +} + +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + _Copy_arrayof_conjoint_jints(from, to, count); +} + +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + _Copy_arrayof_conjoint_jlongs(from, to, count); +} + +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + assert(!UseCompressedOops, "foo!"); + assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); + _Copy_arrayof_conjoint_jlongs(from, to, count); +} + +#endif // OS_CPU_BSD_AARCH64_COPY_BSD_AARCH64_INLINE_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.s b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.s new file mode 100644 index 00000000000..7b286820a9a --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.s @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2016, Linaro Ltd. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#define CFUNC(x) _##x + + .global CFUNC(_Copy_conjoint_words) + .global CFUNC(_Copy_disjoint_words) + +s .req x0 +d .req x1 +count .req x2 +t0 .req x3 +t1 .req x4 +t2 .req x5 +t3 .req x6 +t4 .req x7 +t5 .req x8 +t6 .req x9 +t7 .req x10 + + .align 6 +CFUNC(_Copy_disjoint_words): + // Ensure 2 word aligned + tbz s, #3, fwd_copy_aligned + ldr t0, [s], #8 + str t0, [d], #8 + sub count, count, #1 + +fwd_copy_aligned: + // Bias s & d so we only pre index on the last copy + sub s, s, #16 + sub d, d, #16 + + ldp t0, t1, [s, #16] + ldp t2, t3, [s, #32] + ldp t4, t5, [s, #48] + ldp t6, t7, [s, #64]! + + subs count, count, #16 + blo fwd_copy_drain + +fwd_copy_again: + prfm pldl1keep, [s, #256] + stp t0, t1, [d, #16] + ldp t0, t1, [s, #16] + stp t2, t3, [d, #32] + ldp t2, t3, [s, #32] + stp t4, t5, [d, #48] + ldp t4, t5, [s, #48] + stp t6, t7, [d, #64]! + ldp t6, t7, [s, #64]! + subs count, count, #8 + bhs fwd_copy_again + +fwd_copy_drain: + stp t0, t1, [d, #16] + stp t2, t3, [d, #32] + stp t4, t5, [d, #48] + stp t6, t7, [d, #64]! + + // count is now -8..-1 for 0..7 words to copy + adr t0, 0f + add t0, t0, count, lsl #5 + br t0 + + .align 5 + ret // -8 == 0 words + .align 5 + ldr t0, [s, #16] // -7 == 1 word + str t0, [d, #16] + ret + .align 5 + ldp t0, t1, [s, #16] // -6 = 2 words + stp t0, t1, [d, #16] + ret + .align 5 + ldp t0, t1, [s, #16] // -5 = 3 words + ldr t2, [s, #32] + stp t0, t1, [d, #16] + str t2, [d, #32] + ret + .align 5 + ldp t0, t1, [s, #16] // -4 = 4 words + ldp t2, t3, [s, #32] + stp t0, t1, [d, #16] + stp t2, t3, [d, #32] + ret + .align 5 + ldp t0, t1, [s, #16] // -3 = 5 words + ldp t2, t3, [s, #32] + ldr t4, [s, #48] + stp t0, t1, [d, #16] + stp t2, t3, [d, #32] + str t4, [d, #48] + ret + .align 5 + ldp t0, t1, [s, #16] // -2 = 6 words + ldp t2, t3, [s, #32] + ldp t4, t5, [s, #48] + stp t0, t1, [d, #16] + stp t2, t3, [d, #32] + stp t4, t5, [d, #48] + ret + .align 5 + ldp t0, t1, [s, #16] // -1 = 7 words + ldp t2, t3, [s, #32] + ldp t4, t5, [s, #48] + ldr t6, [s, #64] + stp t0, t1, [d, #16] + stp t2, t3, [d, #32] + stp t4, t5, [d, #48] + str t6, [d, #64] + // Is always aligned here, code for 7 words is one instruction + // too large so it just falls through. + .align 5 +0: + ret + + .align 6 +CFUNC(_Copy_conjoint_words): + sub t0, d, s + cmp t0, count, lsl #3 + bhs CFUNC(_Copy_disjoint_words) + + add s, s, count, lsl #3 + add d, d, count, lsl #3 + + // Ensure 2 word aligned + tbz s, #3, bwd_copy_aligned + ldr t0, [s, #-8]! + str t0, [d, #-8]! + sub count, count, #1 + +bwd_copy_aligned: + ldp t0, t1, [s, #-16] + ldp t2, t3, [s, #-32] + ldp t4, t5, [s, #-48] + ldp t6, t7, [s, #-64]! + + subs count, count, #16 + blo bwd_copy_drain + +bwd_copy_again: + prfum pldl1keep, [s, #-256] + stp t0, t1, [d, #-16] + ldp t0, t1, [s, #-16] + stp t2, t3, [d, #-32] + ldp t2, t3, [s, #-32] + stp t4, t5, [d, #-48] + ldp t4, t5, [s, #-48] + stp t6, t7, [d, #-64]! + ldp t6, t7, [s, #-64]! + subs count, count, #8 + bhs bwd_copy_again + +bwd_copy_drain: + stp t0, t1, [d, #-16] + stp t2, t3, [d, #-32] + stp t4, t5, [d, #-48] + stp t6, t7, [d, #-64]! + + // count is now -8..-1 for 0..7 words to copy + adr t0, 0f + add t0, t0, count, lsl #5 + br t0 + + .align 5 + ret // -8 == 0 words + .align 5 + ldr t0, [s, #-8] // -7 == 1 word + str t0, [d, #-8] + ret + .align 5 + ldp t0, t1, [s, #-16] // -6 = 2 words + stp t0, t1, [d, #-16] + ret + .align 5 + ldp t0, t1, [s, #-16] // -5 = 3 words + ldr t2, [s, #-24] + stp t0, t1, [d, #-16] + str t2, [d, #-24] + ret + .align 5 + ldp t0, t1, [s, #-16] // -4 = 4 words + ldp t2, t3, [s, #-32] + stp t0, t1, [d, #-16] + stp t2, t3, [d, #-32] + ret + .align 5 + ldp t0, t1, [s, #-16] // -3 = 5 words + ldp t2, t3, [s, #-32] + ldr t4, [s, #-40] + stp t0, t1, [d, #-16] + stp t2, t3, [d, #-32] + str t4, [d, #-40] + ret + .align 5 + ldp t0, t1, [s, #-16] // -2 = 6 words + ldp t2, t3, [s, #-32] + ldp t4, t5, [s, #-48] + stp t0, t1, [d, #-16] + stp t2, t3, [d, #-32] + stp t4, t5, [d, #-48] + ret + .align 5 + ldp t0, t1, [s, #-16] // -1 = 7 words + ldp t2, t3, [s, #-32] + ldp t4, t5, [s, #-48] + ldr t6, [s, #-56] + stp t0, t1, [d, #-16] + stp t2, t3, [d, #-32] + stp t4, t5, [d, #-48] + str t6, [d, #-56] + // Is always aligned here, code for 7 words is one instruction + // too large so it just falls through. + .align 5 +0: + ret diff --git a/src/hotspot/os_cpu/bsd_aarch64/globals_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/globals_bsd_aarch64.hpp new file mode 100644 index 00000000000..c18c4506ac9 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/globals_bsd_aarch64.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_GLOBALS_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_GLOBALS_BSD_AARCH64_HPP + + +// Sets the default values for platform dependent flags used by the runtime system. +// (see globals.hpp) + +define_pd_global(bool, DontYieldALot, false); +define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default +define_pd_global(intx, VMThreadStackSize, 2048); + +define_pd_global(intx, CompilerThreadStackSize, 2048); + +define_pd_global(uintx,JVMInvokeMethodSlack, 8192); + +// Used on 64 bit platforms for UseCompressedOops base address +define_pd_global(uintx,HeapBaseMinAddress, 2*G); + +#endif // OS_CPU_BSD_AARCH64_GLOBALS_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/icache_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/icache_bsd_aarch64.hpp new file mode 100644 index 00000000000..7e9ca43efcf --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/icache_bsd_aarch64.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_ICACHE_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_ICACHE_AARCH64_HPP + +// Interface for updating the instruction cache. Whenever the VM +// modifies code, part of the processor instruction cache potentially +// has to be flushed. + +class ICache : public AbstractICache { + public: + static void initialize(); + static void invalidate_word(address addr) { + __clear_cache((char *)addr, (char *)(addr + 4)); + } + static void invalidate_range(address start, int nbytes) { + __clear_cache((char *)start, (char *)(start + nbytes)); + } +}; + +#endif // OS_CPU_BSD_AARCH64_ICACHE_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/orderAccess_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/orderAccess_bsd_aarch64.hpp new file mode 100644 index 00000000000..b325cae8354 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/orderAccess_bsd_aarch64.hpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_ORDERACCESS_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_ORDERACCESS_BSD_AARCH64_HPP + +// Included in orderAccess.hpp header file. + +// Implementation of class OrderAccess. + +inline void OrderAccess::loadload() { acquire(); } +inline void OrderAccess::storestore() { release(); } +inline void OrderAccess::loadstore() { acquire(); } +inline void OrderAccess::storeload() { fence(); } + +#define FULL_MEM_BARRIER __sync_synchronize() +#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE); +#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE); + +inline void OrderAccess::acquire() { + READ_MEM_BARRIER; +} + +inline void OrderAccess::release() { + WRITE_MEM_BARRIER; +} + +inline void OrderAccess::fence() { + FULL_MEM_BARRIER; +} + +inline void OrderAccess::cross_modify_fence() { } + +#endif // OS_CPU_BSD_AARCH64_ORDERACCESS_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp new file mode 100644 index 00000000000..18c09fc483a --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp @@ -0,0 +1,806 @@ +/* + * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// no precompiled headers +#include "jvm.h" +#include "asm/macroAssembler.hpp" +#include "classfile/classLoader.hpp" +#include "classfile/systemDictionary.hpp" +#include "classfile/vmSymbols.hpp" +#include "code/codeCache.hpp" +#include "code/icBuffer.hpp" +#include "code/vtableStubs.hpp" +#include "interpreter/interpreter.hpp" +#include "logging/log.hpp" +#include "memory/allocation.inline.hpp" +#include "os_share_bsd.hpp" +#include "prims/jniFastGetField.hpp" +#include "prims/jvm_misc.hpp" +#include "runtime/arguments.hpp" +#include "runtime/extendedPC.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/java.hpp" +#include "runtime/javaCalls.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/osThread.hpp" +#include "runtime/safepointMechanism.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/stubRoutines.hpp" +#include "runtime/thread.inline.hpp" +#include "runtime/timer.hpp" +#include "utilities/align.hpp" +#include "utilities/events.hpp" +#include "utilities/vmError.hpp" + +// put OS-includes here +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +#ifndef __OpenBSD__ +# include +#endif + +#if !defined(__APPLE__) && !defined(__NetBSD__) +# include +#endif + +// needed by current_stack_region() workaround for Mavericks +#if defined(__APPLE__) +# include +# include +# include +# define DEFAULT_MAIN_THREAD_STACK_PAGES 2048 +# define OS_X_10_9_0_KERNEL_MAJOR_VERSION 13 +#endif + +#define SPELL_REG_SP "sp" +#define SPELL_REG_FP "fp" + +#ifdef __APPLE__ +// see darwin-xnu/osfmk/mach/arm/_structs.h + +# if __DARWIN_UNIX03 && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_5) + // 10.5 UNIX03 member name prefixes + #define DU3_PREFIX(s, m) __ ## s.__ ## m +# else + #define DU3_PREFIX(s, m) s ## . ## m +# endif +#endif + +#define context_x uc_mcontext->DU3_PREFIX(ss,x) +#define context_fp uc_mcontext->DU3_PREFIX(ss,fp) +#define context_lr uc_mcontext->DU3_PREFIX(ss,lr) +#define context_sp uc_mcontext->DU3_PREFIX(ss,sp) +#define context_pc uc_mcontext->DU3_PREFIX(ss,pc) +#define context_cpsr uc_mcontext->DU3_PREFIX(ss,cpsr) + +address os::current_stack_pointer() { +#if defined(__clang__) || defined(__llvm__) + void *sp; + __asm__("mov %0, " SPELL_REG_SP : "=r"(sp)); + return (address) sp; +#else + register void *sp __asm__ (SPELL_REG_SP); + return (address) sp; +#endif +} + +char* os::non_memory_address_word() { + // Must never look like an address returned by reserve_memory, + // even in its subfields (as defined by the CPU immediate fields, + // if the CPU splits constants across multiple instructions). + + // the return value used in computation of Universe::non_oop_word(), which + // is loaded by cpu/aarch64 by MacroAssembler::movptr(Register, uintptr_t) + return (char*) 0xffffffffffff; +} + +address os::Bsd::ucontext_get_pc(const ucontext_t * uc) { + return (address)uc->context_pc; +} + +void os::Bsd::ucontext_set_pc(ucontext_t * uc, address pc) { + uc->context_pc = (intptr_t)pc ; +} + +intptr_t* os::Bsd::ucontext_get_sp(const ucontext_t * uc) { + return (intptr_t*)uc->context_sp; +} + +intptr_t* os::Bsd::ucontext_get_fp(const ucontext_t * uc) { + return (intptr_t*)uc->context_fp; +} + +// For Forte Analyzer AsyncGetCallTrace profiling support - thread +// is currently interrupted by SIGPROF. +// os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal +// frames. Currently we don't do that on Bsd, so it's the same as +// os::fetch_frame_from_context(). +ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread, + const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { + + assert(thread != NULL, "just checking"); + assert(ret_sp != NULL, "just checking"); + assert(ret_fp != NULL, "just checking"); + + return os::fetch_frame_from_context(uc, ret_sp, ret_fp); +} + +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, + intptr_t** ret_sp, intptr_t** ret_fp) { + + ExtendedPC epc; + const ucontext_t* uc = (const ucontext_t*)ucVoid; + + if (uc != NULL) { + epc = ExtendedPC(os::Bsd::ucontext_get_pc(uc)); + if (ret_sp) *ret_sp = os::Bsd::ucontext_get_sp(uc); + if (ret_fp) *ret_fp = os::Bsd::ucontext_get_fp(uc); + } else { + // construct empty ExtendedPC for return value checking + epc = ExtendedPC(NULL); + if (ret_sp) *ret_sp = (intptr_t *)NULL; + if (ret_fp) *ret_fp = (intptr_t *)NULL; + } + + return epc; +} + +frame os::fetch_frame_from_context(const void* ucVoid) { + intptr_t* sp; + intptr_t* fp; + ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); + return frame(sp, fp, epc.pc()); +} + +bool os::Bsd::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) { + address pc = (address) os::Bsd::ucontext_get_pc(uc); + if (Interpreter::contains(pc)) { + // interpreter performs stack banging after the fixed frame header has + // been generated while the compilers perform it before. To maintain + // semantic consistency between interpreted and compiled frames, the + // method returns the Java sender of the current frame. + *fr = os::fetch_frame_from_context(uc); + if (!fr->is_first_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } else { + // more complex code with compiled code + assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); + CodeBlob* cb = CodeCache::find_blob(pc); + if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { + // Not sure where the pc points to, fallback to default + // stack overflow handling + return false; + } else { + // In compiled code, the stack banging is performed before LR + // has been saved in the frame. LR is live, and SP and FP + // belong to the caller. + intptr_t* fp = os::Bsd::ucontext_get_fp(uc); + intptr_t* sp = os::Bsd::ucontext_get_sp(uc); + address pc = (address)(uc->context_lr + - NativeInstruction::instruction_size); + *fr = frame(sp, fp, pc); + if (!fr->is_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + assert(!fr->is_first_frame(), "Safety check"); + *fr = fr->java_sender(); + } + } + } + assert(fr->is_java_frame(), "Safety check"); + return true; +} + +// JVM compiled with -fno-omit-frame-pointer, so RFP is saved on the stack. +frame os::get_sender_for_C_frame(frame* fr) { + return frame(fr->link(), fr->link(), fr->sender_pc()); +} + +NOINLINE frame os::current_frame() { + intptr_t *fp = *(intptr_t **)__builtin_frame_address(0); + frame myframe((intptr_t*)os::current_stack_pointer(), + (intptr_t*)fp, + CAST_FROM_FN_PTR(address, os::current_frame)); + if (os::is_first_C_frame(&myframe)) { + // stack is not walkable + return frame(); + } else { + return os::get_sender_for_C_frame(&myframe); + } +} + +extern "C" JNIEXPORT int +JVM_handle_bsd_signal(int sig, + siginfo_t* info, + void* ucVoid, + int abort_if_unrecognized) { + ucontext_t* uc = (ucontext_t*) ucVoid; + + Thread* t = Thread::current_or_null_safe(); + + // Enable WXWrite: this function is called by the signal handler at arbitrary + // point of execution. + ThreadWXEnable wx(WXWrite, t); + + // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away + // (no destructors can be run) + os::ThreadCrashProtection::check_crash_protection(sig, t); + + SignalHandlerMark shm(t); + + // Note: it's not uncommon that JNI code uses signal/sigset to install + // then restore certain signal handler (e.g. to temporarily block SIGPIPE, + // or have a SIGILL handler when detecting CPU type). When that happens, + // JVM_handle_bsd_signal() might be invoked with junk info/ucVoid. To + // avoid unnecessary crash when libjsig is not preloaded, try handle signals + // that do not require siginfo/ucontext first. + + if (sig == SIGPIPE || sig == SIGXFSZ) { + // allow chained handler to go first + if (os::Bsd::chained_handler(sig, info, ucVoid)) { + return true; + } else { + // Ignoring SIGPIPE/SIGXFSZ - see bugs 4229104 or 6499219 + return true; + } + } + +#ifdef CAN_SHOW_REGISTERS_ON_ASSERT + if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) { + if (handle_assert_poison_fault(ucVoid, info->si_addr)) { + return 1; + } + } +#endif + + JavaThread* thread = NULL; + VMThread* vmthread = NULL; + if (os::Bsd::signal_handlers_are_installed) { + if (t != NULL ){ + if(t->is_Java_thread()) { + thread = (JavaThread*)t; + } + else if(t->is_VM_thread()){ + vmthread = (VMThread *)t; + } + } + } +/* + NOTE: does not seem to work on bsd. + if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) { + // can't decode this kind of signal + info = NULL; + } else { + assert(sig == info->si_signo, "bad siginfo"); + } +*/ + // decide if this trap can be handled by a stub + address stub = NULL; + + address pc = NULL; + + //%note os_trap_1 + if (info != NULL && uc != NULL && thread != NULL) { + pc = (address) os::Bsd::ucontext_get_pc(uc); + + if (StubRoutines::is_safefetch_fault(pc)) { + os::Bsd::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc)); + return 1; + } + + // Handle ALL stack overflow variations here + if (sig == SIGSEGV || sig == SIGBUS) { + address addr = (address) info->si_addr; + + // Make sure the high order byte is sign extended, as it may be masked away by the hardware. + if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) { + addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56)); + } + + // check if fault address is within thread stack + if (thread->is_in_full_stack(addr)) { + // stack overflow + if (thread->in_stack_yellow_reserved_zone(addr)) { + if (thread->thread_state() == _thread_in_Java) { + if (thread->in_stack_reserved_zone(addr)) { + frame fr; + if (os::Bsd::get_frame_at_stack_banging_point(thread, uc, &fr)) { + assert(fr.is_java_frame(), "Must be a Java frame"); + frame activation = + SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); + if (activation.sp() != NULL) { + thread->disable_stack_reserved_zone(); + if (activation.is_interpreted_frame()) { + thread->set_reserved_stack_activation((address)( + activation.fp() + frame::interpreter_frame_initial_sp_offset)); + } else { + thread->set_reserved_stack_activation((address)activation.unextended_sp()); + } + return 1; + } + } + } + // Throw a stack overflow exception. Guard pages will be reenabled + // while unwinding the stack. + thread->disable_stack_yellow_reserved_zone(); + stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); + } else { + // Thread was in the vm or native code. Return and try to finish. + thread->disable_stack_yellow_reserved_zone(); + return 1; + } + } else if (thread->in_stack_red_zone(addr)) { + // Fatal red zone violation. Disable the guard pages and fall through + // to handle_unexpected_exception way down below. + thread->disable_stack_red_zone(); + tty->print_raw_cr("An irrecoverable stack overflow has occurred."); + } + } + } + + // We test if stub is already set (by the stack overflow code + // above) so it is not overwritten by the code that follows. This + // check is not required on other platforms, because on other + // platforms we check for SIGSEGV only or SIGBUS only, where here + // we have to check for both SIGSEGV and SIGBUS. + if (thread->thread_state() == _thread_in_Java && stub == NULL) { + // Java thread running in Java code => find exception handler if any + // a fault inside compiled code, the interpreter, or a stub + + // Handle signal from NativeJump::patch_verified_entry(). + if ((sig == SIGILL) + && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + if (TraceTraps) { + tty->print_cr("trap: zombie_not_entrant"); + } + stub = SharedRuntime::get_handle_wrong_method_stub(); + } else if ((sig == SIGSEGV || sig == SIGBUS) && SafepointMechanism::is_poll_address((address)info->si_addr)) { + stub = SharedRuntime::get_poll_stub(pc); +#if defined(__APPLE__) + // 32-bit Darwin reports a SIGBUS for nearly all memory access exceptions. + // 64-bit Darwin may also use a SIGBUS (seen with compressed oops). + // Catching SIGBUS here prevents the implicit SIGBUS NULL check below from + // being called, so only do so if the implicit NULL check is not necessary. + } else if (sig == SIGBUS && !MacroAssembler::uses_implicit_null_check(info->si_addr)) { +#else + } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { +#endif + // BugId 4454115: A read from a MappedByteBuffer can fault + // here if the underlying file has been truncated. + // Do not crash the VM in such a case. + CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; + bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); + if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) { + address next_pc = pc + NativeCall::instruction_size; + if (is_unsafe_arraycopy) { + next_pc = UnsafeCopyMemory::page_error_continue_pc(pc); + } + stub = SharedRuntime::handle_unsafe_access(thread, next_pc); + } + } else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) { + // Pull a pointer to the error message out of the instruction + // stream. + const uint64_t *detail_msg_ptr + = (uint64_t*)(pc + NativeInstruction::instruction_size); + const char *detail_msg = (const char *)*detail_msg_ptr; + const char *msg = "stop"; + if (TraceTraps) { + tty->print_cr("trap: %s: (SIGILL)", msg); + } + +PRAGMA_DIAG_PUSH +PRAGMA_DISABLE_GCC_WARNING("-Wformat-nonliteral") +PRAGMA_DISABLE_GCC_WARNING("-Wuninitialized") + va_list detail_args; + VMError::report_and_die(INTERNAL_ERROR, msg, detail_msg, detail_args, thread, + pc, info, ucVoid, NULL, 0, 0); + va_end(detail_args); +PRAGMA_DIAG_POP + } else if (sig == SIGFPE && + (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) { + stub = + SharedRuntime:: + continuation_for_implicit_exception(thread, + pc, + SharedRuntime:: + IMPLICIT_DIVIDE_BY_ZERO); + } else if ((sig == SIGSEGV || sig == SIGBUS) && + MacroAssembler::uses_implicit_null_check(info->si_addr)) { + // Determination of interpreter/vtable stub/compiled code null exception + stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); + } + } else if ((thread->thread_state() == _thread_in_vm || + thread->thread_state() == _thread_in_native) && + sig == SIGBUS && /* info->si_code == BUS_OBJERR && */ + thread->doing_unsafe_access()) { + address next_pc = pc + NativeCall::instruction_size; + if (UnsafeCopyMemory::contains_pc(pc)) { + next_pc = UnsafeCopyMemory::page_error_continue_pc(pc); + } + stub = SharedRuntime::handle_unsafe_access(thread, next_pc); + } + + // jni_fast_GetField can trap at certain pc's if a GC kicks in + // and the heap gets shrunk before the field access. + if ((sig == SIGSEGV) || (sig == SIGBUS)) { + address addr = JNI_FastGetField::find_slowcase_pc(pc); + if (addr != (address)-1) { + stub = addr; + } + } + } + + if (stub != NULL) { + // save all thread context in case we need to restore it + if (thread != NULL) thread->set_saved_exception_pc(pc); + + os::Bsd::ucontext_set_pc(uc, stub); + return true; + } + + // signal-chaining + if (os::Bsd::chained_handler(sig, info, ucVoid)) { + return true; + } + + if (!abort_if_unrecognized) { + // caller wants another chance, so give it to him + return false; + } + + if (pc == NULL && uc != NULL) { + pc = os::Bsd::ucontext_get_pc(uc); + } + + // unmask current signal + sigset_t newset; + sigemptyset(&newset); + sigaddset(&newset, sig); + sigprocmask(SIG_UNBLOCK, &newset, NULL); + + VMError::report_and_die(t, sig, pc, info, ucVoid); + + ShouldNotReachHere(); + return true; // Mute compiler +} + +void os::Bsd::init_thread_fpu_state(void) { +} + +bool os::is_allocatable(size_t bytes) { + return true; +} + +//////////////////////////////////////////////////////////////////////////////// +// thread stack + +// Minimum usable stack sizes required to get to user code. Space for +// HotSpot guard pages is added later. +size_t os::Posix::_compiler_thread_min_stack_allowed = 72 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 72 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 72 * K; + +// return default stack size for thr_type +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { + // default stack size (compiler thread needs larger stack) + size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); + return s; +} + +static void current_stack_region(address * bottom, size_t * size) { +#ifdef __APPLE__ + pthread_t self = pthread_self(); + void *stacktop = pthread_get_stackaddr_np(self); + *size = pthread_get_stacksize_np(self); + // workaround for OS X 10.9.0 (Mavericks) + // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages + if (pthread_main_np() == 1) { + // At least on Mac OS 10.12 we have observed stack sizes not aligned + // to pages boundaries. This can be provoked by e.g. setrlimit() (ulimit -s xxxx in the + // shell). Apparently Mac OS actually rounds upwards to next multiple of page size, + // however, we round downwards here to be on the safe side. + *size = align_down(*size, getpagesize()); + + if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) { + char kern_osrelease[256]; + size_t kern_osrelease_size = sizeof(kern_osrelease); + int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0); + if (ret == 0) { + // get the major number, atoi will ignore the minor amd micro portions of the version string + if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) { + *size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize()); + } + } + } + } + *bottom = (address) stacktop - *size; +#elif defined(__OpenBSD__) + stack_t ss; + int rslt = pthread_stackseg_np(pthread_self(), &ss); + + if (rslt != 0) + fatal("pthread_stackseg_np failed with error = %d", rslt); + + *bottom = (address)((char *)ss.ss_sp - ss.ss_size); + *size = ss.ss_size; +#else + pthread_attr_t attr; + + int rslt = pthread_attr_init(&attr); + + // JVM needs to know exact stack location, abort if it fails + if (rslt != 0) + fatal("pthread_attr_init failed with error = %d", rslt); + + rslt = pthread_attr_get_np(pthread_self(), &attr); + + if (rslt != 0) + fatal("pthread_attr_get_np failed with error = %d", rslt); + + if (pthread_attr_getstackaddr(&attr, (void **)bottom) != 0 || + pthread_attr_getstacksize(&attr, size) != 0) { + fatal("Can not locate current stack attributes!"); + } + + pthread_attr_destroy(&attr); +#endif + assert(os::current_stack_pointer() >= *bottom && + os::current_stack_pointer() < *bottom + *size, "just checking"); +} + +address os::current_stack_base() { + address bottom; + size_t size; + current_stack_region(&bottom, &size); + return (bottom + size); +} + +size_t os::current_stack_size() { + // stack size includes normal stack and HotSpot guard pages + address bottom; + size_t size; + current_stack_region(&bottom, &size); + return size; +} + +///////////////////////////////////////////////////////////////////////////// +// helper functions for fatal error handler + +void os::print_context(outputStream *st, const void *context) { + if (context == NULL) return; + + const ucontext_t *uc = (const ucontext_t*)context; + st->print_cr("Registers:"); + st->print( " x0=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 0]); + st->print(" x1=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 1]); + st->print(" x2=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 2]); + st->print(" x3=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 3]); + st->cr(); + st->print( " x4=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 4]); + st->print(" x5=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 5]); + st->print(" x6=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 6]); + st->print(" x7=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 7]); + st->cr(); + st->print( " x8=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 8]); + st->print(" x9=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 9]); + st->print(" x10=" INTPTR_FORMAT, (intptr_t)uc->context_x[10]); + st->print(" x11=" INTPTR_FORMAT, (intptr_t)uc->context_x[11]); + st->cr(); + st->print( "x12=" INTPTR_FORMAT, (intptr_t)uc->context_x[12]); + st->print(" x13=" INTPTR_FORMAT, (intptr_t)uc->context_x[13]); + st->print(" x14=" INTPTR_FORMAT, (intptr_t)uc->context_x[14]); + st->print(" x15=" INTPTR_FORMAT, (intptr_t)uc->context_x[15]); + st->cr(); + st->print( "x16=" INTPTR_FORMAT, (intptr_t)uc->context_x[16]); + st->print(" x17=" INTPTR_FORMAT, (intptr_t)uc->context_x[17]); + st->print(" x18=" INTPTR_FORMAT, (intptr_t)uc->context_x[18]); + st->print(" x19=" INTPTR_FORMAT, (intptr_t)uc->context_x[19]); + st->cr(); + st->print( "x20=" INTPTR_FORMAT, (intptr_t)uc->context_x[20]); + st->print(" x21=" INTPTR_FORMAT, (intptr_t)uc->context_x[21]); + st->print(" x22=" INTPTR_FORMAT, (intptr_t)uc->context_x[22]); + st->print(" x23=" INTPTR_FORMAT, (intptr_t)uc->context_x[23]); + st->cr(); + st->print( "x24=" INTPTR_FORMAT, (intptr_t)uc->context_x[24]); + st->print(" x25=" INTPTR_FORMAT, (intptr_t)uc->context_x[25]); + st->print(" x26=" INTPTR_FORMAT, (intptr_t)uc->context_x[26]); + st->print(" x27=" INTPTR_FORMAT, (intptr_t)uc->context_x[27]); + st->cr(); + st->print( "x28=" INTPTR_FORMAT, (intptr_t)uc->context_x[28]); + st->print(" fp=" INTPTR_FORMAT, (intptr_t)uc->context_fp); + st->print(" lr=" INTPTR_FORMAT, (intptr_t)uc->context_lr); + st->print(" sp=" INTPTR_FORMAT, (intptr_t)uc->context_sp); + st->cr(); + st->print( "pc=" INTPTR_FORMAT, (intptr_t)uc->context_pc); + st->print(" cpsr=" INTPTR_FORMAT, (intptr_t)uc->context_cpsr); + st->cr(); + + intptr_t *sp = (intptr_t *)os::Bsd::ucontext_get_sp(uc); + st->print_cr("Top of Stack: (sp=" INTPTR_FORMAT ")", (intptr_t)sp); + print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); + st->cr(); + + // Note: it may be unsafe to inspect memory near pc. For example, pc may + // point to garbage if entry point in an nmethod is corrupted. Leave + // this at the end, and hope for the best. + address pc = os::Bsd::ucontext_get_pc(uc); + print_instructions(st, pc, 4/*native instruction size*/); + st->cr(); +} + +void os::print_register_info(outputStream *st, const void *context) { + if (context == NULL) return; + + const ucontext_t *uc = (const ucontext_t*)context; + + st->print_cr("Register to memory mapping:"); + st->cr(); + + // this is horrendously verbose but the layout of the registers in the + // context does not match how we defined our abstract Register set, so + // we can't just iterate through the gregs area + + // this is only for the "general purpose" registers + + st->print(" x0="); print_location(st, uc->context_x[ 0]); + st->print(" x1="); print_location(st, uc->context_x[ 1]); + st->print(" x2="); print_location(st, uc->context_x[ 2]); + st->print(" x3="); print_location(st, uc->context_x[ 3]); + st->print(" x4="); print_location(st, uc->context_x[ 4]); + st->print(" x5="); print_location(st, uc->context_x[ 5]); + st->print(" x6="); print_location(st, uc->context_x[ 6]); + st->print(" x7="); print_location(st, uc->context_x[ 7]); + st->print(" x8="); print_location(st, uc->context_x[ 8]); + st->print(" x9="); print_location(st, uc->context_x[ 9]); + st->print("x10="); print_location(st, uc->context_x[10]); + st->print("x11="); print_location(st, uc->context_x[11]); + st->print("x12="); print_location(st, uc->context_x[12]); + st->print("x13="); print_location(st, uc->context_x[13]); + st->print("x14="); print_location(st, uc->context_x[14]); + st->print("x15="); print_location(st, uc->context_x[15]); + st->print("x16="); print_location(st, uc->context_x[16]); + st->print("x17="); print_location(st, uc->context_x[17]); + st->print("x18="); print_location(st, uc->context_x[18]); + st->print("x19="); print_location(st, uc->context_x[19]); + st->print("x20="); print_location(st, uc->context_x[20]); + st->print("x21="); print_location(st, uc->context_x[21]); + st->print("x22="); print_location(st, uc->context_x[22]); + st->print("x23="); print_location(st, uc->context_x[23]); + st->print("x24="); print_location(st, uc->context_x[24]); + st->print("x25="); print_location(st, uc->context_x[25]); + st->print("x26="); print_location(st, uc->context_x[26]); + st->print("x27="); print_location(st, uc->context_x[27]); + st->print("x28="); print_location(st, uc->context_x[28]); + + st->cr(); +} + +void os::setup_fpu() { +} + +#ifndef PRODUCT +void os::verify_stack_alignment() { + assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); +} +#endif + +int os::extra_bang_size_in_bytes() { + // AArch64 does not require the additional stack bang. + return 0; +} + +void os::current_thread_enable_wx(WXMode mode) { + pthread_jit_write_protect_np(mode == WXExec); +} + +extern "C" { + int SpinPause() { + return 0; + } + + void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { + if (from > to) { + const jshort *end = from + count; + while (from < end) + *(to++) = *(from++); + } + else if (from < to) { + const jshort *end = from; + from += count - 1; + to += count - 1; + while (from >= end) + *(to--) = *(from--); + } + } + void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { + if (from > to) { + const jint *end = from + count; + while (from < end) + *(to++) = *(from++); + } + else if (from < to) { + const jint *end = from; + from += count - 1; + to += count - 1; + while (from >= end) + *(to--) = *(from--); + } + } + void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { + if (from > to) { + const jlong *end = from + count; + while (from < end) + os::atomic_copy64(from++, to++); + } + else if (from < to) { + const jlong *end = from; + from += count - 1; + to += count - 1; + while (from >= end) + os::atomic_copy64(from--, to--); + } + } + + void _Copy_arrayof_conjoint_bytes(const HeapWord* from, + HeapWord* to, + size_t count) { + memmove(to, from, count); + } + void _Copy_arrayof_conjoint_jshorts(const HeapWord* from, + HeapWord* to, + size_t count) { + memmove(to, from, count * 2); + } + void _Copy_arrayof_conjoint_jints(const HeapWord* from, + HeapWord* to, + size_t count) { + memmove(to, from, count * 4); + } + void _Copy_arrayof_conjoint_jlongs(const HeapWord* from, + HeapWord* to, + size_t count) { + memmove(to, from, count * 8); + } +}; diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.hpp new file mode 100644 index 00000000000..14638bc68d6 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.hpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP + + static void setup_fpu(); + + static bool is_allocatable(size_t bytes); + + // Used to register dynamic code cache area with the OS + // Note: Currently only used in 64 bit Windows implementations + static bool register_code_area(char *low, char *high) { return true; } + + // Atomically copy 64 bits of data + static void atomic_copy64(const volatile void *src, volatile void *dst) { + *(jlong *) dst = *(const jlong *) src; + } + +#endif // OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/prefetch_bsd_aarch64.inline.hpp b/src/hotspot/os_cpu/bsd_aarch64/prefetch_bsd_aarch64.inline.hpp new file mode 100644 index 00000000000..e24d0328e6d --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/prefetch_bsd_aarch64.inline.hpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP +#define OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP + +#include "runtime/prefetch.hpp" + + +inline void Prefetch::read (void *loc, intx interval) { + if (interval >= 0) + asm("prfm PLDL1KEEP, [%0, %1]" : : "r"(loc), "r"(interval)); +} + +inline void Prefetch::write(void *loc, intx interval) { + if (interval >= 0) + asm("prfm PSTL1KEEP, [%0, %1]" : : "r"(loc), "r"(interval)); +} + +#endif // OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.cpp new file mode 100644 index 00000000000..ffd31e8fed8 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.cpp @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/metaspaceShared.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/thread.inline.hpp" + +frame JavaThread::pd_last_frame() { + assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); + vmassert(_anchor.last_Java_pc() != NULL, "not walkable"); + return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); +} + +// For Forte Analyzer AsyncGetCallTrace profiling support - thread is +// currently interrupted by SIGPROF +bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, + void* ucontext, bool isInJava) { + assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { + assert(this->is_Java_thread(), "must be JavaThread"); + JavaThread* jt = (JavaThread *)this; + + // If we have a last_Java_frame, then we should use it even if + // isInJava == true. It should be more reliable than ucontext info. + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { + *fr_addr = jt->pd_last_frame(); + return true; + } + + // At this point, we don't have a last_Java_frame, so + // we try to glean some information out of the ucontext + // if we were running Java code when SIGPROF came in. + if (isInJava) { + ucontext_t* uc = (ucontext_t*) ucontext; + + intptr_t* ret_fp; + intptr_t* ret_sp; + ExtendedPC addr = os::Bsd::fetch_frame_from_ucontext(this, uc, &ret_sp, &ret_fp); + if (addr.pc() == NULL || ret_sp == NULL ) { + // ucontext wasn't useful + return false; + } + + if (MetaspaceShared::is_in_trampoline_frame(addr.pc())) { + // In the middle of a trampoline call. Bail out for safety. + // This happens rarely so shouldn't affect profiling. + return false; + } + + frame ret_frame(ret_sp, ret_fp, addr.pc()); + if (!ret_frame.safe_for_sender(jt)) { +#if COMPILER2_OR_JVMCI + // C2 and JVMCI use ebp as a general register see if NULL fp helps + frame ret_frame2(ret_sp, NULL, addr.pc()); + if (!ret_frame2.safe_for_sender(jt)) { + // nothing else to try if the frame isn't good + return false; + } + ret_frame = ret_frame2; +#else + // nothing else to try if the frame isn't good + return false; +#endif // COMPILER2_OR_JVMCI + } + *fr_addr = ret_frame; + return true; + } + + // nothing else to try + return false; +} + +void JavaThread::cache_global_variables() { } + diff --git a/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.hpp new file mode 100644 index 00000000000..270d3cb13af --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.hpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_THREAD_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_THREAD_BSD_AARCH64_HPP + + private: + void pd_initialize() { + _anchor.clear(); + } + + frame pd_last_frame(); + + public: + // Mutators are highly dangerous.... + intptr_t* last_Java_fp() { return _anchor.last_Java_fp(); } + void set_last_Java_fp(intptr_t* fp) { _anchor.set_last_Java_fp(fp); } + + void set_base_of_stack_pointer(intptr_t* base_sp) { + } + + static ByteSize last_Java_fp_offset() { + return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset(); + } + + intptr_t* base_of_stack_pointer() { + return NULL; + } + void record_base_of_stack_pointer() { + } + + bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, + bool isInJava); + + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); + + private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava); + public: + + static Thread *aarch64_get_thread_helper() { + return Thread::current(); + } + + // These routines are only used on cpu architectures that + // have separate register stacks (Itanium). + static bool register_stack_overflow() { return false; } + static void enable_register_stack_guard() {} + static void disable_register_stack_guard() {} + +#endif // OS_CPU_BSD_AARCH64_THREAD_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/vmStructs_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/vmStructs_bsd_aarch64.hpp new file mode 100644 index 00000000000..07b878106cf --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/vmStructs_bsd_aarch64.hpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_VMSTRUCTS_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_VMSTRUCTS_BSD_AARCH64_HPP + +// These are the OS and CPU-specific fields, types and integer +// constants required by the Serviceability Agent. This file is +// referenced by vmStructs.cpp. + +#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \ + \ + /******************************/ \ + /* Threads (NOTE: incomplete) */ \ + /******************************/ \ + nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \ + nonstatic_field(OSThread, _unique_thread_id, uint64_t) + + +#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \ + \ + /**********************/ \ + /* Thread IDs */ \ + /**********************/ \ + \ + declare_unsigned_integer_type(OSThread::thread_id_t) + +#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) + +#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) + +#endif // OS_CPU_BSD_AARCH64_VMSTRUCTS_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp new file mode 100644 index 00000000000..5cd6a4ca559 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. + * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "runtime/java.hpp" +#include "runtime/os.hpp" +#include "runtime/vm_version.hpp" + +#include + +static bool cpu_has(const char* optional) { + uint32_t val; + size_t len = sizeof(val); + if (sysctlbyname(optional, &val, &len, NULL, 0)) { + return false; + } + return val; +} + +void VM_Version::get_os_cpu_info() { + size_t sysctllen; + + // hw.optional.floatingpoint always returns 1, see + // https://github.com/apple/darwin-xnu/blob/master/bsd/kern/kern_mib.c#L416. + // ID_AA64PFR0_EL1 describes AdvSIMD always equals to FP field. + assert(cpu_has("hw.optional.floatingpoint"), "should be"); + assert(cpu_has("hw.optional.neon"), "should be"); + _features = CPU_FP | CPU_ASIMD; + + // Only few features are available via sysctl, see line 614 + // https://opensource.apple.com/source/xnu/xnu-6153.141.1/bsd/kern/kern_mib.c.auto.html + if (cpu_has("hw.optional.armv8_crc32")) _features |= CPU_CRC32; + if (cpu_has("hw.optional.armv8_1_atomics")) _features |= CPU_LSE; + + int cache_line_size; + int hw_conf_cache_line[] = { CTL_HW, HW_CACHELINE }; + sysctllen = sizeof(cache_line_size); + if (sysctl(hw_conf_cache_line, 2, &cache_line_size, &sysctllen, NULL, 0)) { + cache_line_size = 16; + } + _icache_line_size = 16; // minimal line lenght CCSIDR_EL1 can hold + _dcache_line_size = cache_line_size; + + uint64_t dczid_el0; + __asm__ ( + "mrs %0, DCZID_EL0\n" + : "=r"(dczid_el0) + ); + if (!(dczid_el0 & 0x10)) { + _zva_length = 4 << (dczid_el0 & 0xf); + } + + int family; + sysctllen = sizeof(family); + if (sysctlbyname("hw.cpufamily", &family, &sysctllen, NULL, 0)) { + family = 0; + } + _model = family; + _cpu = CPU_APPLE; +} + +#ifdef __APPLE__ + +bool VM_Version::is_cpu_emulated() { + return false; +} + +#endif