diff --git a/test/CodeGen/X86/avx2-gather.ll b/test/CodeGen/X86/avx2-gather.ll index a7da2fcc91d..9983e7dcc83 100644 --- a/test/CodeGen/X86/avx2-gather.ll +++ b/test/CodeGen/X86/avx2-gather.ll @@ -145,3 +145,30 @@ define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) { %res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> zeroinitializer, i8* %arg0, <4 x i32> %arg1, <2 x double> %mask, i8 2) ret <2 x double> %res } + +@x = global [1024 x float] zeroinitializer, align 16 + +define <4 x float> @gather_global(<4 x i64>, i32* nocapture readnone) { +; X32-LABEL: gather_global: +; X32: # %bb.0: +; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; X32-NEXT: movl $x, %eax +; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X32-NEXT: vgatherqps %xmm2, (%eax,%ymm0,4), %xmm1 +; X32-NEXT: vmovaps %xmm1, %xmm0 +; X32-NEXT: vzeroupper +; X32-NEXT: retl +; +; X64-LABEL: gather_global: +; X64: # %bb.0: +; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; X64-NEXT: movl $x, %eax +; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X64-NEXT: vgatherqps %xmm2, (%rax,%ymm0,4), %xmm1 +; X64-NEXT: vmovaps %xmm1, %xmm0 +; X64-NEXT: vzeroupper +; X64-NEXT: retq + %3 = tail call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> zeroinitializer, i8* bitcast ([1024 x float]* @x to i8*), <4 x i64> %0, <4 x float> , i8 4) + ret <4 x float> %3 +} +declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*, <4 x i64>, <4 x float>, i8) diff --git a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll index 7228a129e9a..038abca371b 100644 --- a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll +++ b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll @@ -875,6 +875,21 @@ define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %b ret <16 x float> %res6 } +@x = global [1024 x float] zeroinitializer, align 16 + +define <8 x float> @gather_global(<8 x i64>, i32* nocapture readnone) { +; CHECK-LABEL: gather_global: +; CHECK: # %bb.0: +; CHECK-NEXT: kxnorw %k0, %k0, %k1 +; CHECK-NEXT: movl $x, %eax +; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vgatherqps (%rax,%zmm0,4), %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %3 = tail call <8 x float> @llvm.x86.avx512.mask.gather.qps.512(<8 x float> zeroinitializer, i8* bitcast ([1024 x float]* @x to i8*), <8 x i64> %0, <8 x i1> , i32 4) + ret <8 x float> %3 +} + declare <16 x float> @llvm.x86.avx512.mask.gather.dps.512(<16 x float>, i8*, <16 x i32>, <16 x i1>, i32) declare <8 x double> @llvm.x86.avx512.mask.gather.dpd.512(<8 x double>, i8*, <8 x i32>, <8 x i1>, i32) declare <8 x float> @llvm.x86.avx512.mask.gather.qps.512(<8 x float>, i8*, <8 x i64>, <8 x i1>, i32)