From 4638c5b59698c34255bcdab9e2f91298599e8db7 Mon Sep 17 00:00:00 2001 From: Kiva Date: Tue, 23 Apr 2024 10:30:35 +0800 Subject: [PATCH] [Clang][XTHeadVector] Implement 15.1-15.4 `vred/vfred/vfwred` intrinsic family (#104) * [Clang][XTHeadVector] Implement 15.1-15.4 `vred/vfred/vfwred` intrinsic family * [Clang][XTHeadVector] Test 15.1-15.4 `vred/vfred/vfwred` intrinsic family * [Clang][XTHeadVector] Implement wrappers for 15.1-15.4 `vred/vfred/vfwred` intrinsic family --- .../clang/Basic/riscv_vector_xtheadv.td | 111 +++ .../Basic/riscv_vector_xtheadv_wrappers.td | 581 ++++++++++++++++ .../vector-reduction/thead/vfredmax.c | 247 +++++++ .../vector-reduction/thead/vfredmin.c | 247 +++++++ .../vector-reduction/thead/vfredosum.c | 246 +++++++ .../vector-reduction/thead/vfredsum.c | 246 +++++++ .../vector-reduction/thead/vfwredosum.c | 166 +++++ .../vector-reduction/thead/vfwredsum.c | 166 +++++ .../vector-reduction/thead/vredand.c | 647 ++++++++++++++++++ .../vector-reduction/thead/vredmax.c | 327 +++++++++ .../vector-reduction/thead/vredmaxu.c | 327 +++++++++ .../vector-reduction/thead/vredmin.c | 327 +++++++++ .../vector-reduction/thead/vredminu.c | 327 +++++++++ .../vector-reduction/thead/vredor.c | 647 ++++++++++++++++++ .../vector-reduction/thead/vredsum.c | 647 ++++++++++++++++++ .../vector-reduction/thead/vredxor.c | 647 ++++++++++++++++++ .../vector-reduction/thead/vwredsum.c | 247 +++++++ .../vector-reduction/thead/vwredsumu.c | 367 ++++++++++ .../vector-reduction/wrappers/vfredmax.c | 247 +++++++ .../vector-reduction/wrappers/vfredmin.c | 247 +++++++ .../vector-reduction/wrappers/vfredosum.c | 246 +++++++ .../vector-reduction/wrappers/vfredsum.c | 246 +++++++ .../vector-reduction/wrappers/vfwredosum.c | 166 +++++ .../vector-reduction/wrappers/vfwredsum.c | 166 +++++ .../vector-reduction/wrappers/vredand.c | 647 ++++++++++++++++++ .../vector-reduction/wrappers/vredmax.c | 327 +++++++++ .../vector-reduction/wrappers/vredmaxu.c | 327 +++++++++ .../vector-reduction/wrappers/vredmin.c | 327 +++++++++ .../vector-reduction/wrappers/vredminu.c | 327 +++++++++ .../vector-reduction/wrappers/vredor.c | 647 ++++++++++++++++++ .../vector-reduction/wrappers/vredsum.c | 647 ++++++++++++++++++ .../vector-reduction/wrappers/vredxor.c | 647 ++++++++++++++++++ .../vector-reduction/wrappers/vwredsum.c | 247 +++++++ .../vector-reduction/wrappers/vwredsumu.c | 367 ++++++++++ 34 files changed, 12348 insertions(+) create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredmax.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredmin.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredosum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfwredosum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfwredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredand.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmax.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmaxu.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmin.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredminu.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredor.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredxor.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vwredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vwredsumu.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmax.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmin.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredosum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredosum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredand.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmax.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmaxu.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmin.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredminu.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredor.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredxor.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsumu.c diff --git a/clang/include/clang/Basic/riscv_vector_xtheadv.td b/clang/include/clang/Basic/riscv_vector_xtheadv.td index 2a0912455e917..1e0c830accd45 100644 --- a/clang/include/clang/Basic/riscv_vector_xtheadv.td +++ b/clang/include/clang/Basic/riscv_vector_xtheadv.td @@ -61,6 +61,11 @@ multiclass RVVOutBuiltinSet> suffixes_prototypes> : RVVBuiltinSet; +// IntrinsicTypes is output, op1 [-1, 0] +multiclass RVVOutOp0BuiltinSet> suffixes_prototypes> + : RVVBuiltinSet; + // IntrinsicTypes is output, op1 [-1, 1] multiclass RVVOutOp1BuiltinSet> suffixes_prototypes> @@ -256,6 +261,29 @@ class RVVMaskOp0Builtin : RVVOp0Builtin<"m", protot let HasMaskedOffOperand = false; } +let HasMaskedOffOperand = true in { + multiclass RVVSignedReductionBuiltin { + defm "" : RVVOutOp0BuiltinSet; + } + multiclass RVVUnsignedReductionBuiltin { + defm "" : RVVOutOp0BuiltinSet; + } + multiclass RVVFloatingReductionBuiltin { + defm "" : RVVOutOp0BuiltinSet; + } + multiclass RVVFloatingWidenReductionBuiltin { + defm "" : RVVOutOp0BuiltinSet; + } +} + +multiclass RVVIntReductionBuiltinSet + : RVVSignedReductionBuiltin, + RVVUnsignedReductionBuiltin; + //===----------------------------------------------------------------------===// // 6. Configuration-Setting and Utility //===----------------------------------------------------------------------===// @@ -1323,6 +1351,89 @@ let UnMaskedPolicyScheme = HasPassthruOperand, defm th_vnclip : RVVSignedNShiftBuiltinSetRoundingMode; } + +// 15. Vector Reduction Operations +// 15.1. Vector Single-Width Integer Reduction Instructions +let UnMaskedPolicyScheme = HasPassthruOperand, + MaskedPolicyScheme = HasPassthruOperand, + HasMaskPolicy = false in { + defm th_vredsum : RVVIntReductionBuiltinSet; + defm th_vredmaxu : RVVUnsignedReductionBuiltin; + defm th_vredmax : RVVSignedReductionBuiltin; + defm th_vredminu : RVVUnsignedReductionBuiltin; + defm th_vredmin : RVVSignedReductionBuiltin; + defm th_vredand : RVVIntReductionBuiltinSet; + defm th_vredor : RVVIntReductionBuiltinSet; + defm th_vredxor : RVVIntReductionBuiltinSet; + + // 15.2. Vector Widening Integer Reduction Instructions + // Vector Widening Integer Reduction Operations + let HasMaskedOffOperand = true in { + defm th_vwredsum : RVVOutOp0BuiltinSet<"th_vwredsum", "csi", + [["vs", "vSw", "SwvSw"]]>; + defm th_vwredsumu : RVVOutOp0BuiltinSet<"th_vwredsumu", "csi", + [["vs", "UvUSw", "USwUvUSw"]]>; + } + + // 15.3. Vector Single-Width Floating-Point Reduction Instructions + defm th_vfredmax : RVVFloatingReductionBuiltin; + defm th_vfredmin : RVVFloatingReductionBuiltin; + + let ManualCodegen = [{ + { + // LLVM intrinsic + // Unmasked: (passthru, op0, op1, round_mode, vl) + // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl) + + SmallVector Operands; + bool HasMaskedOff = !( + (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || + (!IsMasked && PolicyAttrs & RVV_VTA)); + bool HasRoundModeOp = IsMasked ? + (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) : + (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4); + + unsigned Offset = IsMasked ? + (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0); + + if (!HasMaskedOff) + Operands.push_back(llvm::PoisonValue::get(ResultType)); + else + Operands.push_back(Ops[IsMasked ? 1 : 0]); + + Operands.push_back(Ops[Offset]); // op0 + Operands.push_back(Ops[Offset + 1]); // op1 + + if (IsMasked) + Operands.push_back(Ops[0]); // mask + + if (HasRoundModeOp) { + Operands.push_back(Ops[Offset + 2]); // frm + Operands.push_back(Ops[Offset + 3]); // vl + } else { + Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm + Operands.push_back(Ops[Offset + 2]); // vl + } + + IntrinsicTypes = {ResultType, Ops[Offset]->getType(), + Ops.back()->getType()}; + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); + return Builder.CreateCall(F, Operands, ""); + } + }] in { + // NOTE: there's no RoundingMode version (like `vfredosum_*_rm`) + // for floating point reduction in XTHeadVector. + + // 15.3. Vector Single-Width Floating-Point Reduction Instructions + defm th_vfredsum : RVVFloatingReductionBuiltin; + defm th_vfredosum : RVVFloatingReductionBuiltin; + + // 15.4. Vector Widening Floating-Point Reduction Instructions + defm th_vfwredsum : RVVFloatingWidenReductionBuiltin; + defm th_vfwredosum : RVVFloatingWidenReductionBuiltin; + } +} + // 16. Vector Mask Instructions // 16.1. Vector Mask-Register Logical Instructions diff --git a/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td b/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td index 1a750552f7d55..8128849ed7f4d 100644 --- a/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td +++ b/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td @@ -2958,6 +2958,587 @@ let HeaderCode = }] in def th_narrowing_width_fixed_point_clip_wrapper_macros: RVVHeader; +// 15. Vector Reduction Operations + +let HeaderCode = +[{ +// Vector Reduction Operations +#define __riscv_vfredmax_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f16m1_f16m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f16m2_f16m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f16m4_f16m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m8_f16m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f16m8_f16m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m1_f32m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f32m1_f32m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m2_f32m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f32m2_f32m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m4_f32m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f32m4_f32m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m8_f32m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f32m8_f32m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m1_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m1_f64m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m2_f64m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m4_f64m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m8_f64m1(vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m1_f16m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m2_f16m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m4_f16m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m8_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m8_f16m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m1_f32m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f32m1_f32m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m2_f32m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f32m2_f32m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m4_f32m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f32m4_f32m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m8_f32m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f32m8_f32m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m1_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m1_f64m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m2_f64m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m4_f64m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m8_f64m1(vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m1_f16m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m2_f16m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m4_f16m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m8_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m8_f16m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m1_f32m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f32m1_f32m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m2_f32m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f32m2_f32m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m4_f32m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f32m4_f32m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m8_f32m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f32m8_f32m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m1_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m1_f64m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m2_f64m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m4_f64m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m8_f64m1(vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m1_f16m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m2_f16m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m4_f16m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m8_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m8_f16m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m1_f32m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f32m1_f32m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m2_f32m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f32m2_f32m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m4_f32m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f32m4_f32m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m8_f32m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f32m8_f32m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m1_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m1_f64m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m2_f64m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m4_f64m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m8_f64m1(vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m1_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m1_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m2_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m2_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m4_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m4_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m8_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m8_f16m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m1_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m2_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m4_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m8_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m1_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m2_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m4_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m8_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m8_f32m1(vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m1_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m1_f64m1(vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m1_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m1_f32m1(vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m2_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m2_f32m1(vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m4_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m4_f32m1(vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m8_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m8_f32m1(vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m1_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m1_f64m1(vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m2_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m2_f64m1(vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m4_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m4_f64m1(vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m8_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m8_f64m1(vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m1_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m2_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m4_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m8_f32m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m1_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m2_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m4_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m1_i8m1(vector, scalar, vl) +#define __riscv_vredand_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m2_i8m1(vector, scalar, vl) +#define __riscv_vredand_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m4_i8m1(vector, scalar, vl) +#define __riscv_vredand_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m8_i8m1(vector, scalar, vl) +#define __riscv_vredand_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredand_vs_i16m1_i16m1(vector, scalar, vl) +#define __riscv_vredand_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredand_vs_i16m2_i16m1(vector, scalar, vl) +#define __riscv_vredand_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredand_vs_i16m4_i16m1(vector, scalar, vl) +#define __riscv_vredand_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredand_vs_i16m8_i16m1(vector, scalar, vl) +#define __riscv_vredand_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredand_vs_i32m1_i32m1(vector, scalar, vl) +#define __riscv_vredand_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredand_vs_i32m2_i32m1(vector, scalar, vl) +#define __riscv_vredand_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredand_vs_i32m4_i32m1(vector, scalar, vl) +#define __riscv_vredand_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredand_vs_i32m8_i32m1(vector, scalar, vl) +#define __riscv_vredand_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredand_vs_i64m1_i64m1(vector, scalar, vl) +#define __riscv_vredand_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredand_vs_i64m2_i64m1(vector, scalar, vl) +#define __riscv_vredand_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredand_vs_i64m4_i64m1(vector, scalar, vl) +#define __riscv_vredand_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredand_vs_i64m8_i64m1(vector, scalar, vl) +#define __riscv_vredand_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredand_vs_u8m1_u8m1(vector, scalar, vl) +#define __riscv_vredand_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredand_vs_u8m2_u8m1(vector, scalar, vl) +#define __riscv_vredand_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredand_vs_u8m4_u8m1(vector, scalar, vl) +#define __riscv_vredand_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredand_vs_u8m8_u8m1(vector, scalar, vl) +#define __riscv_vredand_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredand_vs_u16m1_u16m1(vector, scalar, vl) +#define __riscv_vredand_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredand_vs_u16m2_u16m1(vector, scalar, vl) +#define __riscv_vredand_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredand_vs_u16m4_u16m1(vector, scalar, vl) +#define __riscv_vredand_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredand_vs_u16m8_u16m1(vector, scalar, vl) +#define __riscv_vredand_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredand_vs_u32m1_u32m1(vector, scalar, vl) +#define __riscv_vredand_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredand_vs_u32m2_u32m1(vector, scalar, vl) +#define __riscv_vredand_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredand_vs_u32m4_u32m1(vector, scalar, vl) +#define __riscv_vredand_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredand_vs_u32m8_u32m1(vector, scalar, vl) +#define __riscv_vredand_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m1_u64m1(vector, scalar, vl) +#define __riscv_vredand_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m2_u64m1(vector, scalar, vl) +#define __riscv_vredand_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m4_u64m1(vector, scalar, vl) +#define __riscv_vredand_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m8_u64m1(vector, scalar, vl) +#define __riscv_vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m1_i8m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m2_i8m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m4_i8m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m8_i8m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredmax_vs_i16m1_i16m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredmax_vs_i16m2_i16m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredmax_vs_i16m4_i16m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredmax_vs_i16m8_i16m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredmax_vs_i32m1_i32m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredmax_vs_i32m2_i32m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredmax_vs_i32m4_i32m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredmax_vs_i32m8_i32m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m1_i64m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m2_i64m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m4_i64m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m8_i64m1(vector, scalar, vl) +#define __riscv_vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m8_u8m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m1_u16m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m2_u16m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m4_u16m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m8_u16m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m1_u32m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m2_u32m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m4_u32m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m8_u32m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m1_u64m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m1_i8m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m2_i8m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m4_i8m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m8_i8m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredmin_vs_i16m1_i16m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredmin_vs_i16m2_i16m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredmin_vs_i16m4_i16m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredmin_vs_i16m8_i16m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredmin_vs_i32m1_i32m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredmin_vs_i32m2_i32m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredmin_vs_i32m4_i32m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredmin_vs_i32m8_i32m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m1_i64m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m2_i64m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m4_i64m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m8_i64m1(vector, scalar, vl) +#define __riscv_vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m1_u8m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m2_u8m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m4_u8m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m8_u8m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredminu_vs_u16m1_u16m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredminu_vs_u16m2_u16m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredminu_vs_u16m4_u16m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredminu_vs_u16m8_u16m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredminu_vs_u32m1_u32m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredminu_vs_u32m2_u32m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredminu_vs_u32m4_u32m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredminu_vs_u32m8_u32m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m1_u64m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m2_u64m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m4_u64m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m8_u64m1(vector, scalar, vl) +#define __riscv_vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m1_i8m1(vector, scalar, vl) +#define __riscv_vredor_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m2_i8m1(vector, scalar, vl) +#define __riscv_vredor_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m4_i8m1(vector, scalar, vl) +#define __riscv_vredor_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m8_i8m1(vector, scalar, vl) +#define __riscv_vredor_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredor_vs_i16m1_i16m1(vector, scalar, vl) +#define __riscv_vredor_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredor_vs_i16m2_i16m1(vector, scalar, vl) +#define __riscv_vredor_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredor_vs_i16m4_i16m1(vector, scalar, vl) +#define __riscv_vredor_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredor_vs_i16m8_i16m1(vector, scalar, vl) +#define __riscv_vredor_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredor_vs_i32m1_i32m1(vector, scalar, vl) +#define __riscv_vredor_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredor_vs_i32m2_i32m1(vector, scalar, vl) +#define __riscv_vredor_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredor_vs_i32m4_i32m1(vector, scalar, vl) +#define __riscv_vredor_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredor_vs_i32m8_i32m1(vector, scalar, vl) +#define __riscv_vredor_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredor_vs_i64m1_i64m1(vector, scalar, vl) +#define __riscv_vredor_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredor_vs_i64m2_i64m1(vector, scalar, vl) +#define __riscv_vredor_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredor_vs_i64m4_i64m1(vector, scalar, vl) +#define __riscv_vredor_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredor_vs_i64m8_i64m1(vector, scalar, vl) +#define __riscv_vredor_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredor_vs_u8m1_u8m1(vector, scalar, vl) +#define __riscv_vredor_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredor_vs_u8m2_u8m1(vector, scalar, vl) +#define __riscv_vredor_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredor_vs_u8m4_u8m1(vector, scalar, vl) +#define __riscv_vredor_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredor_vs_u8m8_u8m1(vector, scalar, vl) +#define __riscv_vredor_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredor_vs_u16m1_u16m1(vector, scalar, vl) +#define __riscv_vredor_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredor_vs_u16m2_u16m1(vector, scalar, vl) +#define __riscv_vredor_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredor_vs_u16m4_u16m1(vector, scalar, vl) +#define __riscv_vredor_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredor_vs_u16m8_u16m1(vector, scalar, vl) +#define __riscv_vredor_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredor_vs_u32m1_u32m1(vector, scalar, vl) +#define __riscv_vredor_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredor_vs_u32m2_u32m1(vector, scalar, vl) +#define __riscv_vredor_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredor_vs_u32m4_u32m1(vector, scalar, vl) +#define __riscv_vredor_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredor_vs_u32m8_u32m1(vector, scalar, vl) +#define __riscv_vredor_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m1_u64m1(vector, scalar, vl) +#define __riscv_vredor_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m2_u64m1(vector, scalar, vl) +#define __riscv_vredor_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m4_u64m1(vector, scalar, vl) +#define __riscv_vredor_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m8_u64m1(vector, scalar, vl) +#define __riscv_vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m1_i8m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m2_i8m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m4_i8m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m8_i8m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredsum_vs_i16m1_i16m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredsum_vs_i16m2_i16m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredsum_vs_i16m4_i16m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredsum_vs_i16m8_i16m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredsum_vs_i32m1_i32m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredsum_vs_i32m2_i32m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredsum_vs_i32m4_i32m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredsum_vs_i32m8_i32m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredsum_vs_i64m1_i64m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredsum_vs_i64m2_i64m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredsum_vs_i64m4_i64m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredsum_vs_i64m8_i64m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredsum_vs_u8m1_u8m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredsum_vs_u8m2_u8m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredsum_vs_u8m4_u8m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredsum_vs_u8m8_u8m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredsum_vs_u16m1_u16m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredsum_vs_u16m2_u16m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredsum_vs_u16m4_u16m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredsum_vs_u16m8_u16m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredsum_vs_u32m1_u32m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredsum_vs_u32m2_u32m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredsum_vs_u32m4_u32m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredsum_vs_u32m8_u32m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m1_u64m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m2_u64m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m4_u64m1(vector, scalar, vl) +#define __riscv_vredsum_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m8_u64m1(vector, scalar, vl) +#define __riscv_vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m1_i8m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m2_i8m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m4_i8m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m8_i8m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredxor_vs_i16m1_i16m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredxor_vs_i16m2_i16m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredxor_vs_i16m4_i16m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredxor_vs_i16m8_i16m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredxor_vs_i32m1_i32m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredxor_vs_i32m2_i32m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredxor_vs_i32m4_i32m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredxor_vs_i32m8_i32m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredxor_vs_i64m1_i64m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredxor_vs_i64m2_i64m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredxor_vs_i64m4_i64m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredxor_vs_i64m8_i64m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredxor_vs_u8m1_u8m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredxor_vs_u8m2_u8m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredxor_vs_u8m4_u8m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredxor_vs_u8m8_u8m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredxor_vs_u16m1_u16m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredxor_vs_u16m2_u16m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredxor_vs_u16m4_u16m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredxor_vs_u16m8_u16m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredxor_vs_u32m1_u32m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredxor_vs_u32m2_u32m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredxor_vs_u32m4_u32m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredxor_vs_u32m8_u32m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m1_u64m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m2_u64m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m4_u64m1(vector, scalar, vl) +#define __riscv_vredxor_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m8_u64m1(vector, scalar, vl) +#define __riscv_vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m1_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m1_i16m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m2_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m2_i16m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m4_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m4_i16m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m8_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m8_i16m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m1_i32m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i16m1_i32m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m2_i32m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i16m2_i32m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m4_i32m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i16m4_i32m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m8_i32m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i16m8_i32m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m1_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m1_i64m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m2_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m2_i64m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m4_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m4_i64m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m8_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m8_i64m1(vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m1_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m1_u16m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m2_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m2_u16m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m4_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m4_u16m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m8_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m8_u16m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m1_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m1_u32m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m2_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m2_u32m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m4_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m4_u32m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m8_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m8_u32m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m1_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m1_u64m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl) + +}] in +def th_vector_reduction_operations_wrapper_macros: RVVHeader; + // 16. Vector Mask Instructions let HeaderCode = diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredmax.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredmax.c new file mode 100644 index 0000000000000..2804c2dfbc64d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredmax.c @@ -0,0 +1,247 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m1_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m2_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m4_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m8_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f16m8_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f32m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f32m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f32m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f32m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f64m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f64m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f64m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m1_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m2_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m4_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m8_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredmin.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredmin.c new file mode 100644 index 0000000000000..aeffaaf3cb350 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredmin.c @@ -0,0 +1,247 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m1_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m2_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m4_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m8_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f16m8_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f32m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f32m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f32m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f32m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f64m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f64m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f64m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m1_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m2_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m4_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m8_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredosum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredosum.c new file mode 100644 index 0000000000000..a6dbb47d2cf96 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredosum.c @@ -0,0 +1,246 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m1_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m2_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m4_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m8_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f16m8_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f32m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f32m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f32m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f32m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f64m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f64m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f64m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m1_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m2_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m4_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m8_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredsum.c new file mode 100644 index 0000000000000..0e712031abf26 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfredsum.c @@ -0,0 +1,246 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m1_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m2_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m4_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m8_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f16m8_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f32m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f32m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f32m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f32m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f64m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f64m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f64m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m1_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m2_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m4_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m8_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfredsum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfwredosum.c new file mode 100644 index 0000000000000..5033ee93ee098 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfwredosum.c @@ -0,0 +1,166 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f16m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f32m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfwredsum.c new file mode 100644 index 0000000000000..8a84528034c87 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vfwredsum.c @@ -0,0 +1,166 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f16m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f16m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f16m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f16m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f32m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f32m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f32m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f32m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_th_vfwredsum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredand.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredand.c new file mode 100644 index 0000000000000..c17683d8ab5fb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredand.c @@ -0,0 +1,647 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmax.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmax.c new file mode 100644 index 0000000000000..1db51028d7305 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmax.c @@ -0,0 +1,327 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmaxu.c new file mode 100644 index 0000000000000..549f72d449a35 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmaxu.c @@ -0,0 +1,327 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmin.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmin.c new file mode 100644 index 0000000000000..c05b6471c0f88 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredmin.c @@ -0,0 +1,327 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredminu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredminu.c new file mode 100644 index 0000000000000..5d14af557d2ab --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredminu.c @@ -0,0 +1,327 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredor.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredor.c new file mode 100644 index 0000000000000..e31b6b46891fc --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredor.c @@ -0,0 +1,647 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredsum.c new file mode 100644 index 0000000000000..c59f6c4fd2723 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredsum.c @@ -0,0 +1,647 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredxor.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredxor.c new file mode 100644 index 0000000000000..fe41b880fb873 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vredxor.c @@ -0,0 +1,647 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vwredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vwredsum.c new file mode 100644 index 0000000000000..02c7137726ede --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vwredsum.c @@ -0,0 +1,247 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i8m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i8m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i8m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i8m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i16m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i16m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i16m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i16m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i32m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i32m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i32m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i32m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vwredsumu.c new file mode 100644 index 0000000000000..3e094d8d4181e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/thead/vwredsumu.c @@ -0,0 +1,367 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv1i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv2i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv4i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv1i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv2i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32mf2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u32m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32mf2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_th_vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmax.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmax.c new file mode 100644 index 0000000000000..3be8fde8b2426 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmax.c @@ -0,0 +1,247 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m1_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m2_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m4_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m8_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m8_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m1_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m2_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m4_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m8_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmin.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmin.c new file mode 100644 index 0000000000000..26001a2ba2e77 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmin.c @@ -0,0 +1,247 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m1_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m2_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m4_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m8_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m8_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m1_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m2_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m4_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m8_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredosum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredosum.c new file mode 100644 index 0000000000000..38f91cc5b1837 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredosum.c @@ -0,0 +1,246 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m1_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m2_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m4_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m8_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m1_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m2_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m4_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m8_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredsum.c new file mode 100644 index 0000000000000..9933d9d7ef5ed --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredsum.c @@ -0,0 +1,246 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m1_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m1_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m2_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m2_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m4_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m4_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m8_f16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m8_f16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m1_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m2_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m4_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m8_f16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfredsum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredosum.c new file mode 100644 index 0000000000000..809ec0e8e8784 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredosum.c @@ -0,0 +1,166 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredsum.c new file mode 100644 index 0000000000000..38e2b9c2141e3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredsum.c @@ -0,0 +1,166 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m1_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m1_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m2_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m2_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m4_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m4_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m8_f32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m8_f32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m1_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m1_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m2_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m2_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m4_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m4_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m8_f64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m8_f64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m1_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m2_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m4_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m8_f32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m1_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m2_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m4_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m8_f64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredand.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredand.c new file mode 100644 index 0000000000000..f7bb80ab7a8c0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredand.c @@ -0,0 +1,647 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmax.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmax.c new file mode 100644 index 0000000000000..1ee510af5a5f1 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmax.c @@ -0,0 +1,327 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmaxu.c new file mode 100644 index 0000000000000..71725aab99590 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmaxu.c @@ -0,0 +1,327 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmin.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmin.c new file mode 100644 index 0000000000000..08aef3f0b9597 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmin.c @@ -0,0 +1,327 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredminu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredminu.c new file mode 100644 index 0000000000000..2197567c9ff6f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredminu.c @@ -0,0 +1,327 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredor.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredor.c new file mode 100644 index 0000000000000..2f33290cd2bb0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredor.c @@ -0,0 +1,647 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredsum.c new file mode 100644 index 0000000000000..fc7a5e7e7d8f8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredsum.c @@ -0,0 +1,647 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredxor.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredxor.c new file mode 100644 index 0000000000000..c6d747e860302 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredxor.c @@ -0,0 +1,647 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m1_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m1_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m2_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m2_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m4_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m4_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m8_i8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m8_i8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m1_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m1_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m2_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m2_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m4_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m4_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m8_u8m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m8_u8m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m1_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m2_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m4_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m8_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m1_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m2_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m4_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m8_u8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsum.c new file mode 100644 index 0000000000000..a9a82f29f2a37 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsum.c @@ -0,0 +1,247 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m1_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m1_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m2_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m2_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m4_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m4_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m8_i16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m8_i16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m1_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m1_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m2_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m2_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m4_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m4_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m8_i32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m8_i32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m1_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m1_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m2_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m2_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m4_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m4_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m8_i64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m8_i64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m1_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m2_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m4_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m8_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m1_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m2_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m4_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m8_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m1_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m2_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m4_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m8_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsumu.c new file mode 100644 index 0000000000000..89957cf9ea116 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsumu.c @@ -0,0 +1,367 @@ +// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadvector \ +// RUN: -target-feature +d -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv1i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv2i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv4i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m1_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m1_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m2_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m2_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m4_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m4_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m8_u16m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m8_u16m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv1i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv2i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m1_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m1_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m2_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m2_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m4_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m4_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m8_u32m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m8_u32m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32mf2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m1_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m1_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m2_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m4_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m8_u64m1 +// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m1_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m2_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m4_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m8_u16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m1_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m2_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m4_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m8_u32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32mf2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m1_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m2_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m4_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m8_u64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl); +} +