diff options
Diffstat (limited to 'contrib/llvm-project/compiler-rt/lib/builtins/riscv')
6 files changed, 534 insertions, 0 deletions
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c new file mode 100644 index 000000000000..1a5a3de95de9 --- /dev/null +++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c @@ -0,0 +1,42 @@ +//=== lib/builtins/riscv/fp_mode.c - Floaing-point mode utilities -*- C -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "../fp_mode.h" + +#define RISCV_TONEAREST 0x0 +#define RISCV_TOWARDZERO 0x1 +#define RISCV_DOWNWARD 0x2 +#define RISCV_UPWARD 0x3 + +#define RISCV_INEXACT 0x1 + +CRT_FE_ROUND_MODE __fe_getround(void) { +#if defined(__riscv_f) || defined(__riscv_zfinx) + int frm; + __asm__ __volatile__("frrm %0" : "=r" (frm)); + switch (frm) { + case RISCV_TOWARDZERO: + return CRT_FE_TOWARDZERO; + case RISCV_DOWNWARD: + return CRT_FE_DOWNWARD; + case RISCV_UPWARD: + return CRT_FE_UPWARD; + case RISCV_TONEAREST: + default: + return CRT_FE_TONEAREST; + } +#else + return CRT_FE_TONEAREST; +#endif +} + +int __fe_raise_inexact(void) { +#if defined(__riscv_f) || defined(__riscv_zfinx) + __asm__ __volatile__("csrsi fflags, %0" :: "i" (RISCV_INEXACT)); +#endif + return 0; +} diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/int_mul_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/int_mul_impl.inc new file mode 100644 index 000000000000..53699b356f6a --- /dev/null +++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/int_mul_impl.inc @@ -0,0 +1,33 @@ +//===-- int_mul_impl.inc - Integer multiplication -------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Helpers used by __mulsi3, __muldi3. +// +//===----------------------------------------------------------------------===// + +#ifndef __mulxi3 +#error "__mulxi3 must be defined to use this generic implementation" +#endif + + .text + .align 2 + + .globl __mulxi3 + .type __mulxi3, @function +__mulxi3: + mv a2, a0 + mv a0, zero +.L1: + andi a3, a1, 1 + beqz a3, .L2 + add a0, a0, a2 +.L2: + srli a1, a1, 1 + slli a2, a2, 1 + bnez a1, .L1 + ret diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/muldi3.S b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/muldi3.S new file mode 100644 index 000000000000..9e292e8dd8b9 --- /dev/null +++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/muldi3.S @@ -0,0 +1,11 @@ +//===--- muldi3.S - Integer multiplication routines -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#if __riscv_xlen == 64 +#define __mulxi3 __muldi3 +#include "int_mul_impl.inc" +#endif diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/mulsi3.S b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/mulsi3.S new file mode 100644 index 000000000000..cfafb7a0d7b3 --- /dev/null +++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/mulsi3.S @@ -0,0 +1,12 @@ +//===--- mulsi3.S - Integer multiplication routines -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#if __riscv_xlen == 32 +#define __mulxi3 __mulsi3 +#include "int_mul_impl.inc" +#endif diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/restore.S b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/restore.S new file mode 100644 index 000000000000..d87dfc1ac71d --- /dev/null +++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/restore.S @@ -0,0 +1,208 @@ +//===-- restore.S - restore up to 12 callee-save registers ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Multiple entry points depending on number of registers to restore +// +//===----------------------------------------------------------------------===// + +// All of the entry points are in the same section since we rely on many of +// them falling through into each other and don't want the linker to +// accidentally split them up, garbage collect, or reorder them. +// +// The entry points are grouped up into 2s for rv64 and 4s for rv32 since this +// is the minimum grouping which will maintain the required 16-byte stack +// alignment. + + .text + +#if __riscv_xlen == 32 + +#ifndef __riscv_abi_rve + + .globl __riscv_restore_12 + .type __riscv_restore_12,@function +__riscv_restore_12: + lw s11, 12(sp) + addi sp, sp, 16 + // fallthrough into __riscv_restore_11/10/9/8 + + .globl __riscv_restore_11 + .type __riscv_restore_11,@function + .globl __riscv_restore_10 + .type __riscv_restore_10,@function + .globl __riscv_restore_9 + .type __riscv_restore_9,@function + .globl __riscv_restore_8 + .type __riscv_restore_8,@function +__riscv_restore_11: +__riscv_restore_10: +__riscv_restore_9: +__riscv_restore_8: + lw s10, 0(sp) + lw s9, 4(sp) + lw s8, 8(sp) + lw s7, 12(sp) + addi sp, sp, 16 + // fallthrough into __riscv_restore_7/6/5/4 + + .globl __riscv_restore_7 + .type __riscv_restore_7,@function + .globl __riscv_restore_6 + .type __riscv_restore_6,@function + .globl __riscv_restore_5 + .type __riscv_restore_5,@function + .globl __riscv_restore_4 + .type __riscv_restore_4,@function +__riscv_restore_7: +__riscv_restore_6: +__riscv_restore_5: +__riscv_restore_4: + lw s6, 0(sp) + lw s5, 4(sp) + lw s4, 8(sp) + lw s3, 12(sp) + addi sp, sp, 16 + // fallthrough into __riscv_restore_3/2/1/0 + + .globl __riscv_restore_3 + .type __riscv_restore_3,@function + .globl __riscv_restore_2 + .type __riscv_restore_2,@function + .globl __riscv_restore_1 + .type __riscv_restore_1,@function + .globl __riscv_restore_0 + .type __riscv_restore_0,@function +__riscv_restore_3: +__riscv_restore_2: +__riscv_restore_1: +__riscv_restore_0: + lw s2, 0(sp) + lw s1, 4(sp) + lw s0, 8(sp) + lw ra, 12(sp) + addi sp, sp, 16 + ret + +#else + + .globl __riscv_restore_2 + .type __riscv_restore_2,@function + .globl __riscv_restore_1 + .type __riscv_restore_1,@function + .globl __riscv_restore_0 + .type __riscv_restore_0,@function +__riscv_restore_2: +__riscv_restore_1: +__riscv_restore_0: + lw s1, 0(sp) + lw s0, 4(sp) + lw ra, 8(sp) + addi sp, sp, 12 + ret + +#endif + +#elif __riscv_xlen == 64 + +#ifndef __riscv_abi_rve + + .globl __riscv_restore_12 + .type __riscv_restore_12,@function +__riscv_restore_12: + ld s11, 8(sp) + addi sp, sp, 16 + // fallthrough into __riscv_restore_11/10 + + .globl __riscv_restore_11 + .type __riscv_restore_11,@function + .globl __riscv_restore_10 + .type __riscv_restore_10,@function +__riscv_restore_11: +__riscv_restore_10: + ld s10, 0(sp) + ld s9, 8(sp) + addi sp, sp, 16 + // fallthrough into __riscv_restore_9/8 + + .globl __riscv_restore_9 + .type __riscv_restore_9,@function + .globl __riscv_restore_8 + .type __riscv_restore_8,@function +__riscv_restore_9: +__riscv_restore_8: + ld s8, 0(sp) + ld s7, 8(sp) + addi sp, sp, 16 + // fallthrough into __riscv_restore_7/6 + + .globl __riscv_restore_7 + .type __riscv_restore_7,@function + .globl __riscv_restore_6 + .type __riscv_restore_6,@function +__riscv_restore_7: +__riscv_restore_6: + ld s6, 0(sp) + ld s5, 8(sp) + addi sp, sp, 16 + // fallthrough into __riscv_restore_5/4 + + .globl __riscv_restore_5 + .type __riscv_restore_5,@function + .globl __riscv_restore_4 + .type __riscv_restore_4,@function +__riscv_restore_5: +__riscv_restore_4: + ld s4, 0(sp) + ld s3, 8(sp) + addi sp, sp, 16 + // fallthrough into __riscv_restore_3/2 + + .globl __riscv_restore_3 + .type __riscv_restore_3,@function + .globl __riscv_restore_2 + .type __riscv_restore_2,@function +__riscv_restore_3: +__riscv_restore_2: + ld s2, 0(sp) + ld s1, 8(sp) + addi sp, sp, 16 + // fallthrough into __riscv_restore_1/0 + + .globl __riscv_restore_1 + .type __riscv_restore_1,@function + .globl __riscv_restore_0 + .type __riscv_restore_0,@function +__riscv_restore_1: +__riscv_restore_0: + ld s0, 0(sp) + ld ra, 8(sp) + addi sp, sp, 16 + ret + +#else + + .globl __riscv_restore_2 + .type __riscv_restore_2,@function + .globl __riscv_restore_1 + .type __riscv_restore_1,@function + .globl __riscv_restore_0 + .type __riscv_restore_0,@function +__riscv_restore_2: +__riscv_restore_1: +__riscv_restore_0: + ld s1, 0(sp) + ld s0, 8(sp) + ld ra, 16(sp) + addi sp, sp, 24 + ret + +#endif + +#else +# error "xlen must be 32 or 64 for save-restore implementation +#endif diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/save.S b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/save.S new file mode 100644 index 000000000000..6324e05e9719 --- /dev/null +++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/save.S @@ -0,0 +1,228 @@ +//===-- save.S - save up to 12 callee-saved registers ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Multiple entry points depending on number of registers to save +// +//===----------------------------------------------------------------------===// + +// The entry points are grouped up into 2s for rv64 and 4s for rv32 since this +// is the minimum grouping which will maintain the required 16-byte stack +// alignment. + + .text + +#if __riscv_xlen == 32 + +#ifndef __riscv_abi_rve + + .globl __riscv_save_12 + .type __riscv_save_12,@function +__riscv_save_12: + addi sp, sp, -64 + mv t1, zero + sw s11, 12(sp) + j .Lriscv_save_11_8 + + .globl __riscv_save_11 + .type __riscv_save_11,@function + .globl __riscv_save_10 + .type __riscv_save_10,@function + .globl __riscv_save_9 + .type __riscv_save_9,@function + .globl __riscv_save_8 + .type __riscv_save_8,@function +__riscv_save_11: +__riscv_save_10: +__riscv_save_9: +__riscv_save_8: + addi sp, sp, -64 + li t1, 16 +.Lriscv_save_11_8: + sw s10, 16(sp) + sw s9, 20(sp) + sw s8, 24(sp) + sw s7, 28(sp) + j .Lriscv_save_7_4 + + .globl __riscv_save_7 + .type __riscv_save_7,@function + .globl __riscv_save_6 + .type __riscv_save_6,@function + .globl __riscv_save_5 + .type __riscv_save_5,@function + .globl __riscv_save_4 + .type __riscv_save_4,@function +__riscv_save_7: +__riscv_save_6: +__riscv_save_5: +__riscv_save_4: + addi sp, sp, -64 + li t1, 32 +.Lriscv_save_7_4: + sw s6, 32(sp) + sw s5, 36(sp) + sw s4, 40(sp) + sw s3, 44(sp) + sw s2, 48(sp) + sw s1, 52(sp) + sw s0, 56(sp) + sw ra, 60(sp) + add sp, sp, t1 + jr t0 + + .globl __riscv_save_3 + .type __riscv_save_3,@function + .globl __riscv_save_2 + .type __riscv_save_2,@function + .globl __riscv_save_1 + .type __riscv_save_1,@function + .globl __riscv_save_0 + .type __riscv_save_0,@function +__riscv_save_3: +__riscv_save_2: +__riscv_save_1: +__riscv_save_0: + addi sp, sp, -16 + sw s2, 0(sp) + sw s1, 4(sp) + sw s0, 8(sp) + sw ra, 12(sp) + jr t0 + +#else + + .globl __riscv_save_2 + .type __riscv_save_2,@function + .globl __riscv_save_1 + .type __riscv_save_1,@function + .globl __riscv_save_0 + .type __riscv_save_0,@function +__riscv_save_2: +__riscv_save_1: +__riscv_save_0: + addi sp, sp, -12 + sw s1, 0(sp) + sw s0, 4(sp) + sw ra, 8(sp) + jr t0 + +#endif + +#elif __riscv_xlen == 64 + +#ifndef __riscv_abi_rve + + .globl __riscv_save_12 + .type __riscv_save_12,@function +__riscv_save_12: + addi sp, sp, -112 + mv t1, zero + sd s11, 8(sp) + j .Lriscv_save_11_10 + + .globl __riscv_save_11 + .type __riscv_save_11,@function + .globl __riscv_save_10 + .type __riscv_save_10,@function +__riscv_save_11: +__riscv_save_10: + addi sp, sp, -112 + li t1, 16 +.Lriscv_save_11_10: + sd s10, 16(sp) + sd s9, 24(sp) + j .Lriscv_save_9_8 + + .globl __riscv_save_9 + .type __riscv_save_9,@function + .globl __riscv_save_8 + .type __riscv_save_8,@function +__riscv_save_9: +__riscv_save_8: + addi sp, sp, -112 + li t1, 32 +.Lriscv_save_9_8: + sd s8, 32(sp) + sd s7, 40(sp) + j .Lriscv_save_7_6 + + .globl __riscv_save_7 + .type __riscv_save_7,@function + .globl __riscv_save_6 + .type __riscv_save_6,@function +__riscv_save_7: +__riscv_save_6: + addi sp, sp, -112 + li t1, 48 +.Lriscv_save_7_6: + sd s6, 48(sp) + sd s5, 56(sp) + j .Lriscv_save_5_4 + + .globl __riscv_save_5 + .type __riscv_save_5,@function + .globl __riscv_save_4 + .type __riscv_save_4,@function +__riscv_save_5: +__riscv_save_4: + addi sp, sp, -112 + li t1, 64 +.Lriscv_save_5_4: + sd s4, 64(sp) + sd s3, 72(sp) + j .Lriscv_save_3_2 + + .globl __riscv_save_3 + .type __riscv_save_3,@function + .globl __riscv_save_2 + .type __riscv_save_2,@function +__riscv_save_3: +__riscv_save_2: + addi sp, sp, -112 + li t1, 80 +.Lriscv_save_3_2: + sd s2, 80(sp) + sd s1, 88(sp) + sd s0, 96(sp) + sd ra, 104(sp) + add sp, sp, t1 + jr t0 + + .globl __riscv_save_1 + .type __riscv_save_1,@function + .globl __riscv_save_0 + .type __riscv_save_0,@function +__riscv_save_1: +__riscv_save_0: + addi sp, sp, -16 + sd s0, 0(sp) + sd ra, 8(sp) + jr t0 + +#else + + .globl __riscv_save_2 + .type __riscv_save_2,@function + .globl __riscv_save_1 + .type __riscv_save_1,@function + .globl __riscv_save_0 + .type __riscv_save_0,@function +__riscv_save_2: +__riscv_save_1: +__riscv_save_0: + addi sp, sp, -24 + sd s1, 0(sp) + sd s0, 8(sp) + sd ra, 16(sp) + jr t0 + +#endif + +#else +# error "xlen must be 32 or 64 for save-restore implementation +#endif |