diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td index a0107e44b421b..5096a8fcda8eb 100644 --- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td @@ -1651,18 +1651,20 @@ def : Pat<(vector_insert v8i32:$xd, GRLenVT:$rj, uimm3:$imm), (XVINSGR2VR_W v8i32:$xd, GRLenVT:$rj, uimm3:$imm)>; def : Pat<(vector_insert v4i64:$xd, GRLenVT:$rj, uimm2:$imm), (XVINSGR2VR_D v4i64:$xd, GRLenVT:$rj, uimm2:$imm)>; -def : Pat<(vector_insert v8f32:$vd, (loongarch_movgr2fr_w_la64 GPR:$rj), uimm3:$imm), - (XVINSGR2VR_W $vd, $rj, uimm3:$imm)>; -def : Pat<(vector_insert v4f64:$vd, (f64 (bitconvert i64:$rj)), uimm2:$imm), - (XVINSGR2VR_D $vd, $rj, uimm2:$imm)>; +def : Pat<(vector_insert v8f32:$xd, (loongarch_movgr2fr_w_la64 GPR:$rj), uimm3:$imm), + (XVINSGR2VR_W $xd, $rj, uimm3:$imm)>; +def : Pat<(vector_insert v4f64:$xd, (f64 (bitconvert i64:$rj)), uimm2:$imm), + (XVINSGR2VR_D $xd, $rj, uimm2:$imm)>; def : Pat<(vector_insert v8f32:$xd, (f32 (vector_extract v8f32:$xj, uimm3:$imm1)), uimm3:$imm2), (XVINSGR2VR_W $xd, (XVPICKVE2GR_W v8f32:$xj, uimm3:$imm1), uimm3:$imm2)>; def : Pat<(vector_insert v4f64:$xd, (f64 (vector_extract v4f64:$xj, uimm2:$imm1)), uimm2:$imm2), (XVINSGR2VR_D $xd, (XVPICKVE2GR_D v4f64:$xj, uimm2:$imm1), uimm2:$imm2)>; + +// XVINSVE0_{W/D} def : Pat<(vector_insert v8f32:$xd, FPR32:$fj, uimm3:$imm), - (XVINSGR2VR_W $xd, (COPY_TO_REGCLASS FPR32:$fj, GPR), uimm3:$imm)>; + (XVINSVE0_W $xd, (SUBREG_TO_REG (i64 0), FPR32:$fj, sub_32), uimm3:$imm)>; def : Pat<(vector_insert v4f64:$xd, FPR64:$fj, uimm2:$imm), - (XVINSGR2VR_D $xd, (COPY_TO_REGCLASS FPR64:$fj, GPR), uimm2:$imm)>; + (XVINSVE0_D $xd, (SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64), uimm2:$imm)>; // scalar_to_vector def : Pat<(v8f32 (scalar_to_vector FPR32:$fj)), diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td index 962e7c21431b1..3c9defb0366ff 100644 --- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td @@ -1842,10 +1842,19 @@ def : Pat<(vector_insert v4f32:$vd, (loongarch_movgr2fr_w_la64 GPR:$rj), uimm2:$ (VINSGR2VR_W $vd, $rj, uimm2:$imm)>; def : Pat<(vector_insert v2f64:$vd, (f64 (bitconvert i64:$rj)), uimm1:$imm), (VINSGR2VR_D $vd, $rj, uimm1:$imm)>; -def : Pat<(vector_insert v4f32:$vd, FPR32:$fj, uimm2:$imm), - (VINSGR2VR_W $vd, (COPY_TO_REGCLASS FPR32:$fj, GPR), uimm2:$imm)>; -def : Pat<(vector_insert v2f64:$vd, FPR64:$fj, uimm1:$imm), - (VINSGR2VR_D $vd, (COPY_TO_REGCLASS FPR64:$fj, GPR), uimm1:$imm)>; + +// VEXTRINS_{W/D} +foreach imm = 0...3 in { + defvar Imm = !shl(imm, 4); + def : Pat<(vector_insert v4f32:$vd, FPR32:$fj, imm), + (VEXTRINS_W $vd, (SUBREG_TO_REG (i64 0), FPR32:$fj, sub_32), Imm)>; +} + +foreach imm = 0...1 in { + defvar Imm = !shl(imm, 4); + def : Pat<(vector_insert v2f64:$vd, FPR64:$fj, imm), + (VEXTRINS_D $vd, (SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64), Imm)>; +} // scalar_to_vector def : Pat<(v4f32 (scalar_to_vector FPR32:$fj)), diff --git a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll index f25e988b52dc9..a4f3fe717ae25 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll @@ -467,22 +467,22 @@ entry: define void @buildvector_v8f32(ptr %dst, float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) nounwind { ; CHECK-LABEL: buildvector_v8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movfr2gr.s $a1, $fa0 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 0 -; CHECK-NEXT: movfr2gr.s $a1, $fa1 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 1 -; CHECK-NEXT: movfr2gr.s $a1, $fa2 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 2 -; CHECK-NEXT: movfr2gr.s $a1, $fa3 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 3 -; CHECK-NEXT: movfr2gr.s $a1, $fa4 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 4 -; CHECK-NEXT: movfr2gr.s $a1, $fa5 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 5 -; CHECK-NEXT: movfr2gr.s $a1, $fa6 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 6 -; CHECK-NEXT: movfr2gr.s $a1, $fa7 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 7 +; CHECK-NEXT: # kill: def $f7 killed $f7 def $xr7 +; CHECK-NEXT: # kill: def $f6 killed $f6 def $xr6 +; CHECK-NEXT: # kill: def $f5 killed $f5 def $xr5 +; CHECK-NEXT: # kill: def $f4 killed $f4 def $xr4 +; CHECK-NEXT: # kill: def $f3 killed $f3 def $xr3 +; CHECK-NEXT: # kill: def $f2 killed $f2 def $xr2 +; CHECK-NEXT: # kill: def $f1 killed $f1 def $xr1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr0, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1 +; CHECK-NEXT: xvinsve0.w $xr0, $xr2, 2 +; CHECK-NEXT: xvinsve0.w $xr0, $xr3, 3 +; CHECK-NEXT: xvinsve0.w $xr0, $xr4, 4 +; CHECK-NEXT: xvinsve0.w $xr0, $xr5, 5 +; CHECK-NEXT: xvinsve0.w $xr0, $xr6, 6 +; CHECK-NEXT: xvinsve0.w $xr0, $xr7, 7 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -501,14 +501,14 @@ entry: define void @buildvector_v4f64(ptr %dst, double %a0, double %a1, double %a2, double %a3) nounwind { ; CHECK-LABEL: buildvector_v4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movfr2gr.d $a1, $fa0 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 0 -; CHECK-NEXT: movfr2gr.d $a1, $fa1 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 1 -; CHECK-NEXT: movfr2gr.d $a1, $fa2 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 2 -; CHECK-NEXT: movfr2gr.d $a1, $fa3 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 3 +; CHECK-NEXT: # kill: def $f3_64 killed $f3_64 def $xr3 +; CHECK-NEXT: # kill: def $f2_64 killed $f2_64 def $xr2 +; CHECK-NEXT: # kill: def $f1_64 killed $f1_64 def $xr1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr0, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 +; CHECK-NEXT: xvinsve0.d $xr0, $xr2, 2 +; CHECK-NEXT: xvinsve0.d $xr0, $xr3, 3 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll index 9528280d181a3..6ea3efe3f84a3 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll @@ -16,8 +16,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 0 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr0, 0 ; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 1 @@ -25,69 +25,70 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 1 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 1 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 2 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 2 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 2 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 3 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 3 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 3 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 4 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 4 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 4 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 5 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 5 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 5 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 6 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 6 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 7 ; CHECK-NEXT: movgr2fr.w $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 7 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 7 +; CHECK-NEXT: xvori.b $xr0, $xr1, 0 ; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload ; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload ; CHECK-NEXT: addi.d $sp, $sp, 96 @@ -112,8 +113,8 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 0 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr0, 0 ; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 @@ -121,29 +122,30 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 1 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2 ; CHECK-NEXT: movgr2fr.d $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 2 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2 +; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill ; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload ; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 ; CHECK-NEXT: movgr2fr.d $fa0, $a0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 3 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 3 +; CHECK-NEXT: xvori.b $xr0, $xr1, 0 ; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload ; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload ; CHECK-NEXT: addi.d $sp, $sp, 96 diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll index b24f95e676d10..c1d4220fc1166 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll @@ -87,8 +87,8 @@ define void @insert_8xfloat(ptr %src, ptr %dst, float %in) nounwind { ; CHECK-LABEL: insert_8xfloat: ; CHECK: # %bb.0: ; CHECK-NEXT: xvld $xr1, $a0, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: xvinsgr2vr.w $xr1, $a0, 1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 1 ; CHECK-NEXT: xvst $xr1, $a1, 0 ; CHECK-NEXT: ret %v = load volatile <8 x float>, ptr %src @@ -101,8 +101,8 @@ define void @insert_4xdouble(ptr %src, ptr %dst, double %in) nounwind { ; CHECK-LABEL: insert_4xdouble: ; CHECK: # %bb.0: ; CHECK-NEXT: xvld $xr1, $a0, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 1 ; CHECK-NEXT: xvst $xr1, $a1, 0 ; CHECK-NEXT: ret %v = load volatile <4 x double>, ptr %src diff --git a/llvm/test/CodeGen/LoongArch/llvm.exp10.ll b/llvm/test/CodeGen/LoongArch/llvm.exp10.ll index 7a52531daa802..c667a3609e7f1 100644 --- a/llvm/test/CodeGen/LoongArch/llvm.exp10.ll +++ b/llvm/test/CodeGen/LoongArch/llvm.exp10.ll @@ -200,17 +200,18 @@ define <2 x double> @exp10_v2f64(<2 x double> %x) #0 { ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(exp10) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 -; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vextrins.d $vr0, $vr0, 0 ; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload ; LA64-NEXT: vreplvei.d $vr0, $vr0, 1 ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(exp10) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 -; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 1 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; LA64-NEXT: vextrins.d $vr1, $vr0, 16 +; LA64-NEXT: vori.b $vr0, $vr1, 0 ; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 48 ; LA64-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/llvm.sincos.ll b/llvm/test/CodeGen/LoongArch/llvm.sincos.ll index 648c19d509715..17e5969d2da81 100644 --- a/llvm/test/CodeGen/LoongArch/llvm.sincos.ll +++ b/llvm/test/CodeGen/LoongArch/llvm.sincos.ll @@ -576,8 +576,8 @@ define { <2 x double>, <2 x double> } @test_sincos_v2f64(<2 x double> %a) #0 { ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sin) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 -; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vextrins.d $vr0, $vr0, 0 ; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload ; LA64-NEXT: vreplvei.d $vr0, $vr0, 1 @@ -585,24 +585,24 @@ define { <2 x double>, <2 x double> } @test_sincos_v2f64(<2 x double> %a) #0 { ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(sin) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 -; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload -; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 1 -; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vld $vr1, $sp, 48 # 16-byte Folded Reload +; LA64-NEXT: vextrins.d $vr1, $vr0, 16 +; LA64-NEXT: vst $vr1, $sp, 48 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cos) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 -; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vextrins.d $vr0, $vr0, 0 ; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill ; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload ; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 ; LA64-NEXT: pcaddu18i $ra, %call36(cos) ; LA64-NEXT: jirl $ra, $ra, 0 -; LA64-NEXT: movfr2gr.d $a0, $fa0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 ; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload -; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; LA64-NEXT: vextrins.d $vr1, $vr0, 16 ; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload ; LA64-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload ; LA64-NEXT: addi.d $sp, $sp, 80 diff --git a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll index d84e408cd28be..f723343964f5d 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll @@ -334,14 +334,14 @@ entry: define void @buildvector_v4f32(ptr %dst, float %a0, float %a1, float %a2, float %a3) nounwind { ; CHECK-LABEL: buildvector_v4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movfr2gr.s $a1, $fa0 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 0 -; CHECK-NEXT: movfr2gr.s $a1, $fa1 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 1 -; CHECK-NEXT: movfr2gr.s $a1, $fa2 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 2 -; CHECK-NEXT: movfr2gr.s $a1, $fa3 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 3 +; CHECK-NEXT: # kill: def $f3 killed $f3 def $vr3 +; CHECK-NEXT: # kill: def $f2 killed $f2 def $vr2 +; CHECK-NEXT: # kill: def $f1 killed $f1 def $vr1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vextrins.w $vr0, $vr0, 0 +; CHECK-NEXT: vextrins.w $vr0, $vr1, 16 +; CHECK-NEXT: vextrins.w $vr0, $vr2, 32 +; CHECK-NEXT: vextrins.w $vr0, $vr3, 48 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -356,10 +356,10 @@ entry: define void @buildvector_v2f64(ptr %dst, double %a0, double %a1) nounwind { ; CHECK-LABEL: buildvector_v2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movfr2gr.d $a1, $fa0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a1, 0 -; CHECK-NEXT: movfr2gr.d $a1, $fa1 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a1, 1 +; CHECK-NEXT: # kill: def $f1_64 killed $f1_64 def $vr1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; CHECK-NEXT: vextrins.d $vr0, $vr0, 0 +; CHECK-NEXT: vextrins.d $vr0, $vr1, 16 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lsx/fpowi.ll b/llvm/test/CodeGen/LoongArch/lsx/fpowi.ll index aafef07fbb8f4..79663b63daf08 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/fpowi.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/fpowi.ll @@ -16,8 +16,8 @@ define <4 x float> @powi_v4f32(<4 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vextrins.w $vr0, $vr0, 0 ; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload ; CHECK-NEXT: vreplvei.w $vr0, $vr0, 1 @@ -25,29 +25,30 @@ define <4 x float> @powi_v4f32(<4 x float> %va, i32 %b) nounwind { ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 1 -; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; CHECK-NEXT: vextrins.w $vr1, $vr0, 16 +; CHECK-NEXT: vst $vr1, $sp, 16 # 16-byte Folded Spill ; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload ; CHECK-NEXT: vreplvei.w $vr0, $vr0, 2 ; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 2 -; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; CHECK-NEXT: vextrins.w $vr1, $vr0, 32 +; CHECK-NEXT: vst $vr1, $sp, 16 # 16-byte Folded Spill ; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload ; CHECK-NEXT: vreplvei.w $vr0, $vr0, 3 ; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0 ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 3 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; CHECK-NEXT: vextrins.w $vr1, $vr0, 48 +; CHECK-NEXT: vori.b $vr0, $vr1, 0 ; CHECK-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload ; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload ; CHECK-NEXT: addi.d $sp, $sp, 48 @@ -72,8 +73,8 @@ define <2 x double> @powi_v2f64(<2 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; CHECK-NEXT: vextrins.d $vr0, $vr0, 0 ; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill ; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload ; CHECK-NEXT: vreplvei.d $vr0, $vr0, 1 @@ -81,9 +82,10 @@ define <2 x double> @powi_v2f64(<2 x double> %va, i32 %b) nounwind { ; CHECK-NEXT: move $a0, $fp ; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) ; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; CHECK-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; CHECK-NEXT: vextrins.d $vr1, $vr0, 16 +; CHECK-NEXT: vori.b $vr0, $vr1, 0 ; CHECK-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload ; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload ; CHECK-NEXT: addi.d $sp, $sp, 48 diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll index 7f232073ae129..c73252bd1335f 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll @@ -57,8 +57,8 @@ define void @insert_4xfloat(ptr %src, ptr %dst, float %ins) nounwind { ; CHECK-LABEL: insert_4xfloat: ; CHECK: # %bb.0: ; CHECK-NEXT: vld $vr1, $a0, 0 -; CHECK-NEXT: movfr2gr.s $a0, $fa0 -; CHECK-NEXT: vinsgr2vr.w $vr1, $a0, 1 +; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 +; CHECK-NEXT: vextrins.w $vr1, $vr0, 16 ; CHECK-NEXT: vst $vr1, $a1, 0 ; CHECK-NEXT: ret %v = load volatile <4 x float>, ptr %src @@ -71,8 +71,8 @@ define void @insert_2xdouble(ptr %src, ptr %dst, double %ins) nounwind { ; CHECK-LABEL: insert_2xdouble: ; CHECK: # %bb.0: ; CHECK-NEXT: vld $vr1, $a0, 0 -; CHECK-NEXT: movfr2gr.d $a0, $fa0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; CHECK-NEXT: vextrins.d $vr1, $vr0, 16 ; CHECK-NEXT: vst $vr1, $a1, 0 ; CHECK-NEXT: ret %v = load volatile <2 x double>, ptr %src