We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
The reference and gcc (generated with https://gist.github.com/gnzlbg/3a27cca666f4a9f8a51d2eb008ce914c from the gcc docs):
fn add_a_b(i8x16, i8x16) -> i8x16
fn add_a_h(i16x8, i16x8) -> i16x8
fn add_a_w(i32x4, i32x4) -> i32x4
fn add_a_d(i64x2, i64x2) -> i64x2
fn adds_a_b(i8x16, i8x16) -> i8x16
fn adds_a_h(i16x8, i16x8) -> i16x8
fn adds_a_w(i32x4, i32x4) -> i32x4
fn adds_a_d(i64x2, i64x2) -> i64x2
fn adds_s_b(i8x16, i8x16) -> i8x16
fn adds_s_h(i16x8, i16x8) -> i16x8
fn adds_s_w(i32x4, i32x4) -> i32x4
fn adds_s_d(i64x2, i64x2) -> i64x2
fn adds_u_b(u8x16, u8x16) -> u8x16
fn adds_u_h(u16x8, u16x8) -> u16x8
fn adds_u_w(u32x4, u32x4) -> u32x4
fn adds_u_d(u64x2, u64x2) -> u64x2
fn addv_b(i8x16, i8x16) -> i8x16
fn addv_h(i16x8, i16x8) -> i16x8
fn addv_w(i32x4, i32x4) -> i32x4
fn addv_d(i64x2, i64x2) -> i64x2
fn addvi_b(i8x16, imm0_31) -> i8x16
fn addvi_h(i16x8, imm0_31) -> i16x8
fn addvi_w(i32x4, imm0_31) -> i32x4
fn addvi_d(i64x2, imm0_31) -> i64x2
fn and_v(u8x16, u8x16) -> u8x16
fn andi_b(u8x16, imm0_255) -> u8x16
fn asub_s_b(i8x16, i8x16) -> i8x16
fn asub_s_h(i16x8, i16x8) -> i16x8
fn asub_s_w(i32x4, i32x4) -> i32x4
fn asub_s_d(i64x2, i64x2) -> i64x2
fn asub_u_b(u8x16, u8x16) -> u8x16
fn asub_u_h(u16x8, u16x8) -> u16x8
fn asub_u_w(u32x4, u32x4) -> u32x4
fn asub_u_d(u64x2, u64x2) -> u64x2
fn ave_s_b(i8x16, i8x16) -> i8x16
fn ave_s_h(i16x8, i16x8) -> i16x8
fn ave_s_w(i32x4, i32x4) -> i32x4
fn ave_s_d(i64x2, i64x2) -> i64x2
fn ave_u_b(u8x16, u8x16) -> u8x16
fn ave_u_h(u16x8, u16x8) -> u16x8
fn ave_u_w(u32x4, u32x4) -> u32x4
fn ave_u_d(u64x2, u64x2) -> u64x2
fn aver_s_b(i8x16, i8x16) -> i8x16
fn aver_s_h(i16x8, i16x8) -> i16x8
fn aver_s_w(i32x4, i32x4) -> i32x4
fn aver_s_d(i64x2, i64x2) -> i64x2
fn aver_u_b(u8x16, u8x16) -> u8x16
fn aver_u_h(u16x8, u16x8) -> u16x8
fn aver_u_w(u32x4, u32x4) -> u32x4
fn aver_u_d(u64x2, u64x2) -> u64x2
fn bclr_b(u8x16, u8x16) -> u8x16
fn bclr_h(u16x8, u16x8) -> u16x8
fn bclr_w(u32x4, u32x4) -> u32x4
fn bclr_d(u64x2, u64x2) -> u64x2
fn bclri_b(u8x16, imm0_7) -> u8x16
fn bclri_h(u16x8, imm0_15) -> u16x8
fn bclri_w(u32x4, imm0_31) -> u32x4
fn bclri_d(u64x2, imm0_63) -> u64x2
fn binsl_b(u8x16, u8x16, u8x16) -> u8x16
fn binsl_h(u16x8, u16x8, u16x8) -> u16x8
fn binsl_w(u32x4, u32x4, u32x4) -> u32x4
fn binsl_d(u64x2, u64x2, u64x2) -> u64x2
fn binsli_b(u8x16, u8x16, imm0_7) -> u8x16
fn binsli_h(u16x8, u16x8, imm0_15) -> u16x8
fn binsli_w(u32x4, u32x4, imm0_31) -> u32x4
fn binsli_d(u64x2, u64x2, imm0_63) -> u64x2
fn binsr_b(u8x16, u8x16, u8x16) -> u8x16
fn binsr_h(u16x8, u16x8, u16x8) -> u16x8
fn binsr_w(u32x4, u32x4, u32x4) -> u32x4
fn binsr_d(u64x2, u64x2, u64x2) -> u64x2
fn binsri_b(u8x16, u8x16, imm0_7) -> u8x16
fn binsri_h(u16x8, u16x8, imm0_15) -> u16x8
fn binsri_w(u32x4, u32x4, imm0_31) -> u32x4
fn binsri_d(u64x2, u64x2, imm0_63) -> u64x2
fn bmnz_v(u8x16, u8x16, u8x16) -> u8x16
fn bmnzi_b(u8x16, u8x16, imm0_255) -> u8x16
fn bmz_v(u8x16, u8x16, u8x16) -> u8x16
fn bmzi_b(u8x16, u8x16, imm0_255) -> u8x16
fn bneg_b(u8x16, u8x16) -> u8x16
fn bneg_h(u16x8, u16x8) -> u16x8
fn bneg_w(u32x4, u32x4) -> u32x4
fn bneg_d(u64x2, u64x2) -> u64x2
fn bnegi_b(u8x16, imm0_7) -> u8x16
fn bnegi_h(u16x8, imm0_15) -> u16x8
fn bnegi_w(u32x4, imm0_31) -> u32x4
fn bnegi_d(u64x2, imm0_63) -> u64x2
fn bnz_b(u8x16) -> i32
fn bnz_h(u16x8) -> i32
fn bnz_w(u32x4) -> i32
fn bnz_d(u64x2) -> i32
fn bnz_v(u8x16) -> i32
fn bsel_v(u8x16, u8x16, u8x16) -> u8x16
fn bseli_b(u8x16, u8x16, imm0_255) -> u8x16
fn bset_b(u8x16, u8x16) -> u8x16
fn bset_h(u16x8, u16x8) -> u16x8
fn bset_w(u32x4, u32x4) -> u32x4
fn bset_d(u64x2, u64x2) -> u64x2
fn bseti_b(u8x16, imm0_7) -> u8x16
fn bseti_h(u16x8, imm0_15) -> u16x8
fn bseti_w(u32x4, imm0_31) -> u32x4
fn bseti_d(u64x2, imm0_63) -> u64x2
fn bz_b(u8x16) -> i32
fn bz_h(u16x8) -> i32
fn bz_w(u32x4) -> i32
fn bz_d(u64x2) -> i32
fn bz_v(u8x16) -> i32
fn ceq_b(i8x16, i8x16) -> i8x16
fn ceq_h(i16x8, i16x8) -> i16x8
fn ceq_w(i32x4, i32x4) -> i32x4
fn ceq_d(i64x2, i64x2) -> i64x2
fn ceqi_b(i8x16, imm_n16_15) -> i8x16
fn ceqi_h(i16x8, imm_n16_15) -> i16x8
fn ceqi_w(i32x4, imm_n16_15) -> i32x4
fn ceqi_d(i64x2, imm_n16_15) -> i64x2
fn cfcmsa(imm0_31) -> i32
fn cle_s_b(i8x16, i8x16) -> i8x16
fn cle_s_h(i16x8, i16x8) -> i16x8
fn cle_s_w(i32x4, i32x4) -> i32x4
fn cle_s_d(i64x2, i64x2) -> i64x2
fn cle_u_b(u8x16, u8x16) -> i8x16
fn cle_u_h(u16x8, u16x8) -> i16x8
fn cle_u_w(u32x4, u32x4) -> i32x4
fn cle_u_d(u64x2, u64x2) -> i64x2
fn clei_s_b(i8x16, imm_n16_15) -> i8x16
fn clei_s_h(i16x8, imm_n16_15) -> i16x8
fn clei_s_w(i32x4, imm_n16_15) -> i32x4
fn clei_s_d(i64x2, imm_n16_15) -> i64x2
fn clei_u_b(u8x16, imm0_31) -> i8x16
fn clei_u_h(u16x8, imm0_31) -> i16x8
fn clei_u_w(u32x4, imm0_31) -> i32x4
fn clei_u_d(u64x2, imm0_31) -> i64x2
fn clt_s_b(i8x16, i8x16) -> i8x16
fn clt_s_h(i16x8, i16x8) -> i16x8
fn clt_s_w(i32x4, i32x4) -> i32x4
fn clt_s_d(i64x2, i64x2) -> i64x2
fn clt_u_b(u8x16, u8x16) -> i8x16
fn clt_u_h(u16x8, u16x8) -> i16x8
fn clt_u_w(u32x4, u32x4) -> i32x4
fn clt_u_d(u64x2, u64x2) -> i64x2
fn clti_s_b(i8x16, imm_n16_15) -> i8x16
fn clti_s_h(i16x8, imm_n16_15) -> i16x8
fn clti_s_w(i32x4, imm_n16_15) -> i32x4
fn clti_s_d(i64x2, imm_n16_15) -> i64x2
fn clti_u_b(u8x16, imm0_31) -> i8x16
fn clti_u_h(u16x8, imm0_31) -> i16x8
fn clti_u_w(u32x4, imm0_31) -> i32x4
fn clti_u_d(u64x2, imm0_31) -> i64x2
fn copy_s_b(i8x16, imm0_15) -> i32
fn copy_s_h(i16x8, imm0_7) -> i32
fn copy_s_w(i32x4, imm0_3) -> i32
fn copy_s_d(i64x2, imm0_1) -> i64
fn copy_u_b(i8x16, imm0_15) -> u32
fn copy_u_h(i16x8, imm0_7) -> u32
fn copy_u_w(i32x4, imm0_3) -> u32
fn copy_u_d(i64x2, imm0_1) -> u64
fn ctcmsa(imm0_31, i32) -> ()
fn div_s_b(i8x16, i8x16) -> i8x16
fn div_s_h(i16x8, i16x8) -> i16x8
fn div_s_w(i32x4, i32x4) -> i32x4
fn div_s_d(i64x2, i64x2) -> i64x2
fn div_u_b(u8x16, u8x16) -> u8x16
fn div_u_h(u16x8, u16x8) -> u16x8
fn div_u_w(u32x4, u32x4) -> u32x4
fn div_u_d(u64x2, u64x2) -> u64x2
fn dotp_s_h(i8x16, i8x16) -> i16x8
fn dotp_s_w(i16x8, i16x8) -> i32x4
fn dotp_s_d(i32x4, i32x4) -> i64x2
fn dotp_u_h(u8x16, u8x16) -> u16x8
fn dotp_u_w(u16x8, u16x8) -> u32x4
fn dotp_u_d(u32x4, u32x4) -> u64x2
fn dpadd_s_h(i16x8, i8x16, i8x16) -> i16x8
fn dpadd_s_w(i32x4, i16x8, i16x8) -> i32x4
fn dpadd_s_d(i64x2, i32x4, i32x4) -> i64x2
fn dpadd_u_h(u16x8, u8x16, u8x16) -> u16x8
fn dpadd_u_w(u32x4, u16x8, u16x8) -> u32x4
fn dpadd_u_d(u64x2, u32x4, u32x4) -> u64x2
fn dpsub_s_h(i16x8, i8x16, i8x16) -> i16x8
fn dpsub_s_w(i32x4, i16x8, i16x8) -> i32x4
fn dpsub_s_d(i64x2, i32x4, i32x4) -> i64x2
fn dpsub_u_h(i16x8, u8x16, u8x16) -> i16x8
fn dpsub_u_w(i32x4, u16x8, u16x8) -> i32x4
fn dpsub_u_d(i64x2, u32x4, u32x4) -> i64x2
fn fadd_w(f32x4, f32x4) -> f32x4
fn fadd_d(f64x2, f64x2) -> f64x2
fn fcaf_w(f32x4, f32x4) -> i32x4
fn fcaf_d(f64x2, f64x2) -> i64x2
fn fceq_w(f32x4, f32x4) -> i32x4
fn fceq_d(f64x2, f64x2) -> i64x2
fn fclass_w(f32x4) -> i32x4
fn fclass_d(f64x2) -> i64x2
fn fcle_w(f32x4, f32x4) -> i32x4
fn fcle_d(f64x2, f64x2) -> i64x2
fn fclt_w(f32x4, f32x4) -> i32x4
fn fclt_d(f64x2, f64x2) -> i64x2
fn fcne_w(f32x4, f32x4) -> i32x4
fn fcne_d(f64x2, f64x2) -> i64x2
fn fcor_w(f32x4, f32x4) -> i32x4
fn fcor_d(f64x2, f64x2) -> i64x2
fn fcueq_w(f32x4, f32x4) -> i32x4
fn fcueq_d(f64x2, f64x2) -> i64x2
fn fcule_w(f32x4, f32x4) -> i32x4
fn fcule_d(f64x2, f64x2) -> i64x2
fn fcult_w(f32x4, f32x4) -> i32x4
fn fcult_d(f64x2, f64x2) -> i64x2
fn fcun_w(f32x4, f32x4) -> i32x4
fn fcun_d(f64x2, f64x2) -> i64x2
fn fcune_w(f32x4, f32x4) -> i32x4
fn fcune_d(f64x2, f64x2) -> i64x2
fn fdiv_w(f32x4, f32x4) -> f32x4
fn fdiv_d(f64x2, f64x2) -> f64x2
fn fexdo_h(f32x4, f32x4) -> i16x8
fn fexdo_w(f64x2, f64x2) -> f32x4
fn fexp2_w(f32x4, i32x4) -> f32x4
fn fexp2_d(f64x2, i64x2) -> f64x2
fn fexupl_w(i16x8) -> f32x4
fn fexupl_d(f32x4) -> f64x2
fn fexupr_w(i16x8) -> f32x4
fn fexupr_d(f32x4) -> f64x2
fn ffint_s_w(i32x4) -> f32x4
fn ffint_s_d(i64x2) -> f64x2
fn ffint_u_w(u32x4) -> f32x4
fn ffint_u_d(u64x2) -> f64x2
fn ffql_w(i16x8) -> f32x4
fn ffql_d(i32x4) -> f64x2
fn ffqr_w(i16x8) -> f32x4
fn ffqr_d(i32x4) -> f64x2
fn fill_b(i32) -> i8x16
fn fill_h(i32) -> i16x8
fn fill_w(i32) -> i32x4
fn fill_d(i64) -> i64x2
fn flog2_w(f32x4) -> f32x4
fn flog2_d(f64x2) -> f64x2
fn fmadd_w(f32x4, f32x4, f32x4) -> f32x4
fn fmadd_d(f64x2, f64x2, f64x2) -> f64x2
fn fmax_w(f32x4, f32x4) -> f32x4
fn fmax_d(f64x2, f64x2) -> f64x2
fn fmax_a_w(f32x4, f32x4) -> f32x4
fn fmax_a_d(f64x2, f64x2) -> f64x2
fn fmin_w(f32x4, f32x4) -> f32x4
fn fmin_d(f64x2, f64x2) -> f64x2
fn fmin_a_w(f32x4, f32x4) -> f32x4
fn fmin_a_d(f64x2, f64x2) -> f64x2
fn fmsub_w(f32x4, f32x4, f32x4) -> f32x4
fn fmsub_d(f64x2, f64x2, f64x2) -> f64x2
fn fmul_w(f32x4, f32x4) -> f32x4
fn fmul_d(f64x2, f64x2) -> f64x2
fn frint_w(f32x4) -> f32x4
fn frint_d(f64x2) -> f64x2
fn frcp_w(f32x4) -> f32x4
fn frcp_d(f64x2) -> f64x2
fn frsqrt_w(f32x4) -> f32x4
fn frsqrt_d(f64x2) -> f64x2
fn fsaf_w(f32x4, f32x4) -> i32x4
fn fsaf_d(f64x2, f64x2) -> i64x2
fn fseq_w(f32x4, f32x4) -> i32x4
fn fseq_d(f64x2, f64x2) -> i64x2
fn fsle_w(f32x4, f32x4) -> i32x4
fn fsle_d(f64x2, f64x2) -> i64x2
fn fslt_w(f32x4, f32x4) -> i32x4
fn fslt_d(f64x2, f64x2) -> i64x2
fn fsne_w(f32x4, f32x4) -> i32x4
fn fsne_d(f64x2, f64x2) -> i64x2
fn fsor_w(f32x4, f32x4) -> i32x4
fn fsor_d(f64x2, f64x2) -> i64x2
fn fsqrt_w(f32x4) -> f32x4
fn fsqrt_d(f64x2) -> f64x2
fn fsub_w(f32x4, f32x4) -> f32x4
fn fsub_d(f64x2, f64x2) -> f64x2
fn fsueq_w(f32x4, f32x4) -> i32x4
fn fsueq_d(f64x2, f64x2) -> i64x2
fn fsule_w(f32x4, f32x4) -> i32x4
fn fsule_d(f64x2, f64x2) -> i64x2
fn fsult_w(f32x4, f32x4) -> i32x4
fn fsult_d(f64x2, f64x2) -> i64x2
fn fsun_w(f32x4, f32x4) -> i32x4
fn fsun_d(f64x2, f64x2) -> i64x2
fn fsune_w(f32x4, f32x4) -> i32x4
fn fsune_d(f64x2, f64x2) -> i64x2
fn ftint_s_w(f32x4) -> i32x4
fn ftint_s_d(f64x2) -> i64x2
fn ftint_u_w(f32x4) -> u32x4
fn ftint_u_d(f64x2) -> u64x2
fn ftq_h(f32x4, f32x4) -> i16x8
fn ftq_w(f64x2, f64x2) -> i32x4
fn ftrunc_s_w(f32x4) -> i32x4
fn ftrunc_s_d(f64x2) -> i64x2
fn ftrunc_u_w(f32x4) -> u32x4
fn ftrunc_u_d(f64x2) -> u64x2
fn hadd_s_h(i8x16, i8x16) -> i16x8
fn hadd_s_w(i16x8, i16x8) -> i32x4
fn hadd_s_d(i32x4, i32x4) -> i64x2
fn hadd_u_h(u8x16, u8x16) -> u16x8
fn hadd_u_w(u16x8, u16x8) -> u32x4
fn hadd_u_d(u32x4, u32x4) -> u64x2
fn hsub_s_h(i8x16, i8x16) -> i16x8
fn hsub_s_w(i16x8, i16x8) -> i32x4
fn hsub_s_d(i32x4, i32x4) -> i64x2
fn hsub_u_h(u8x16, u8x16) -> i16x8
fn hsub_u_w(u16x8, u16x8) -> i32x4
fn hsub_u_d(u32x4, u32x4) -> i64x2
fn ilvev_b(i8x16, i8x16) -> i8x16
fn ilvev_h(i16x8, i16x8) -> i16x8
fn ilvev_w(i32x4, i32x4) -> i32x4
fn ilvev_d(i64x2, i64x2) -> i64x2
fn ilvl_b(i8x16, i8x16) -> i8x16
fn ilvl_h(i16x8, i16x8) -> i16x8
fn ilvl_w(i32x4, i32x4) -> i32x4
fn ilvl_d(i64x2, i64x2) -> i64x2
fn ilvod_b(i8x16, i8x16) -> i8x16
fn ilvod_h(i16x8, i16x8) -> i16x8
fn ilvod_w(i32x4, i32x4) -> i32x4
fn ilvod_d(i64x2, i64x2) -> i64x2
fn ilvr_b(i8x16, i8x16) -> i8x16
fn ilvr_h(i16x8, i16x8) -> i16x8
fn ilvr_w(i32x4, i32x4) -> i32x4
fn ilvr_d(i64x2, i64x2) -> i64x2
fn insert_b(i8x16, imm0_15, i32) -> i8x16
fn insert_h(i16x8, imm0_7, i32) -> i16x8
fn insert_w(i32x4, imm0_3, i32) -> i32x4
fn insert_d(i64x2, imm0_1, i64) -> i64x2
fn insve_b(i8x16, imm0_15, i8x16) -> i8x16
fn insve_h(i16x8, imm0_7, i16x8) -> i16x8
fn insve_w(i32x4, imm0_3, i32x4) -> i32x4
fn insve_d(i64x2, imm0_1, i64x2) -> i64x2
fn ld_b(*mut c_void, imm_n512_511) -> i8x16
fn ld_h(*mut c_void, imm_n1024_1022) -> i16x8
fn ld_w(*mut c_void, imm_n2048_2044) -> i32x4
fn ld_d(*mut c_void, imm_n4096_4088) -> i64x2
fn ldi_b(imm_n512_511) -> i8x16
fn ldi_h(imm_n512_511) -> i16x8
fn ldi_w(imm_n512_511) -> i32x4
fn ldi_d(imm_n512_511) -> i64x2
fn madd_q_h(i16x8, i16x8, i16x8) -> i16x8
fn madd_q_w(i32x4, i32x4, i32x4) -> i32x4
fn maddr_q_h(i16x8, i16x8, i16x8) -> i16x8
fn maddr_q_w(i32x4, i32x4, i32x4) -> i32x4
fn maddv_b(i8x16, i8x16, i8x16) -> i8x16
fn maddv_h(i16x8, i16x8, i16x8) -> i16x8
fn maddv_w(i32x4, i32x4, i32x4) -> i32x4
fn maddv_d(i64x2, i64x2, i64x2) -> i64x2
fn max_a_b(i8x16, i8x16) -> i8x16
fn max_a_h(i16x8, i16x8) -> i16x8
fn max_a_w(i32x4, i32x4) -> i32x4
fn max_a_d(i64x2, i64x2) -> i64x2
fn max_s_b(i8x16, i8x16) -> i8x16
fn max_s_h(i16x8, i16x8) -> i16x8
fn max_s_w(i32x4, i32x4) -> i32x4
fn max_s_d(i64x2, i64x2) -> i64x2
fn max_u_b(u8x16, u8x16) -> u8x16
fn max_u_h(u16x8, u16x8) -> u16x8
fn max_u_w(u32x4, u32x4) -> u32x4
fn max_u_d(u64x2, u64x2) -> u64x2
fn maxi_s_b(i8x16, imm_n16_15) -> i8x16
fn maxi_s_h(i16x8, imm_n16_15) -> i16x8
fn maxi_s_w(i32x4, imm_n16_15) -> i32x4
fn maxi_s_d(i64x2, imm_n16_15) -> i64x2
fn maxi_u_b(u8x16, imm0_31) -> u8x16
fn maxi_u_h(u16x8, imm0_31) -> u16x8
fn maxi_u_w(u32x4, imm0_31) -> u32x4
fn maxi_u_d(u64x2, imm0_31) -> u64x2
fn min_a_b(i8x16, i8x16) -> i8x16
fn min_a_h(i16x8, i16x8) -> i16x8
fn min_a_w(i32x4, i32x4) -> i32x4
fn min_a_d(i64x2, i64x2) -> i64x2
fn min_s_b(i8x16, i8x16) -> i8x16
fn min_s_h(i16x8, i16x8) -> i16x8
fn min_s_w(i32x4, i32x4) -> i32x4
fn min_s_d(i64x2, i64x2) -> i64x2
fn min_u_b(u8x16, u8x16) -> u8x16
fn min_u_h(u16x8, u16x8) -> u16x8
fn min_u_w(u32x4, u32x4) -> u32x4
fn min_u_d(u64x2, u64x2) -> u64x2
fn mini_s_b(i8x16, imm_n16_15) -> i8x16
fn mini_s_h(i16x8, imm_n16_15) -> i16x8
fn mini_s_w(i32x4, imm_n16_15) -> i32x4
fn mini_s_d(i64x2, imm_n16_15) -> i64x2
fn mini_u_b(u8x16, imm0_31) -> u8x16
fn mini_u_h(u16x8, imm0_31) -> u16x8
fn mini_u_w(u32x4, imm0_31) -> u32x4
fn mini_u_d(u64x2, imm0_31) -> u64x2
fn mod_s_b(i8x16, i8x16) -> i8x16
fn mod_s_h(i16x8, i16x8) -> i16x8
fn mod_s_w(i32x4, i32x4) -> i32x4
fn mod_s_d(i64x2, i64x2) -> i64x2
fn mod_u_b(u8x16, u8x16) -> u8x16
fn mod_u_h(u16x8, u16x8) -> u16x8
fn mod_u_w(u32x4, u32x4) -> u32x4
fn mod_u_d(u64x2, u64x2) -> u64x2
fn move_v(i8x16) -> i8x16
fn msub_q_h(i16x8, i16x8, i16x8) -> i16x8
fn msub_q_w(i32x4, i32x4, i32x4) -> i32x4
fn msubr_q_h(i16x8, i16x8, i16x8) -> i16x8
fn msubr_q_w(i32x4, i32x4, i32x4) -> i32x4
fn msubv_b(i8x16, i8x16, i8x16) -> i8x16
fn msubv_h(i16x8, i16x8, i16x8) -> i16x8
fn msubv_w(i32x4, i32x4, i32x4) -> i32x4
fn msubv_d(i64x2, i64x2, i64x2) -> i64x2
fn mul_q_h(i16x8, i16x8) -> i16x8
fn mul_q_w(i32x4, i32x4) -> i32x4
fn mulr_q_h(i16x8, i16x8) -> i16x8
fn mulr_q_w(i32x4, i32x4) -> i32x4
fn mulv_b(i8x16, i8x16) -> i8x16
fn mulv_h(i16x8, i16x8) -> i16x8
fn mulv_w(i32x4, i32x4) -> i32x4
fn mulv_d(i64x2, i64x2) -> i64x2
fn nloc_b(i8x16) -> i8x16
fn nloc_h(i16x8) -> i16x8
fn nloc_w(i32x4) -> i32x4
fn nloc_d(i64x2) -> i64x2
fn nlzc_b(i8x16) -> i8x16
fn nlzc_h(i16x8) -> i16x8
fn nlzc_w(i32x4) -> i32x4
fn nlzc_d(i64x2) -> i64x2
fn nor_v(u8x16, u8x16) -> u8x16
fn nori_b(u8x16, imm0_255) -> u8x16
fn or_v(u8x16, u8x16) -> u8x16
fn ori_b(u8x16, imm0_255) -> u8x16
fn pckev_b(i8x16, i8x16) -> i8x16
fn pckev_h(i16x8, i16x8) -> i16x8
fn pckev_w(i32x4, i32x4) -> i32x4
fn pckev_d(i64x2, i64x2) -> i64x2
fn pckod_b(i8x16, i8x16) -> i8x16
fn pckod_h(i16x8, i16x8) -> i16x8
fn pckod_w(i32x4, i32x4) -> i32x4
fn pckod_d(i64x2, i64x2) -> i64x2
fn pcnt_b(i8x16) -> i8x16
fn pcnt_h(i16x8) -> i16x8
fn pcnt_w(i32x4) -> i32x4
fn pcnt_d(i64x2) -> i64x2
fn sat_s_b(i8x16, imm0_7) -> i8x16
fn sat_s_h(i16x8, imm0_15) -> i16x8
fn sat_s_w(i32x4, imm0_31) -> i32x4
fn sat_s_d(i64x2, imm0_63) -> i64x2
fn sat_u_b(u8x16, imm0_7) -> u8x16
fn sat_u_h(u16x8, imm0_15) -> u16x8
fn sat_u_w(u32x4, imm0_31) -> u32x4
fn sat_u_d(u64x2, imm0_63) -> u64x2
fn shf_b(i8x16, imm0_255) -> i8x16
fn shf_h(i16x8, imm0_255) -> i16x8
fn shf_w(i32x4, imm0_255) -> i32x4
fn sld_b(i8x16, i8x16, i32) -> i8x16
fn sld_h(i16x8, i16x8, i32) -> i16x8
fn sld_w(i32x4, i32x4, i32) -> i32x4
fn sld_d(i64x2, i64x2, i32) -> i64x2
fn sldi_b(i8x16, i8x16, imm0_15) -> i8x16
fn sldi_h(i16x8, i16x8, imm0_7) -> i16x8
fn sldi_w(i32x4, i32x4, imm0_3) -> i32x4
fn sldi_d(i64x2, i64x2, imm0_1) -> i64x2
fn sll_b(i8x16, i8x16) -> i8x16
fn sll_h(i16x8, i16x8) -> i16x8
fn sll_w(i32x4, i32x4) -> i32x4
fn sll_d(i64x2, i64x2) -> i64x2
fn slli_b(i8x16, imm0_7) -> i8x16
fn slli_h(i16x8, imm0_15) -> i16x8
fn slli_w(i32x4, imm0_31) -> i32x4
fn slli_d(i64x2, imm0_63) -> i64x2
fn splat_b(i8x16, i32) -> i8x16
fn splat_h(i16x8, i32) -> i16x8
fn splat_w(i32x4, i32) -> i32x4
fn splat_d(i64x2, i32) -> i64x2
fn splati_b(i8x16, imm0_15) -> i8x16
fn splati_h(i16x8, imm0_7) -> i16x8
fn splati_w(i32x4, imm0_3) -> i32x4
fn splati_d(i64x2, imm0_1) -> i64x2
fn sra_b(i8x16, i8x16) -> i8x16
fn sra_h(i16x8, i16x8) -> i16x8
fn sra_w(i32x4, i32x4) -> i32x4
fn sra_d(i64x2, i64x2) -> i64x2
fn srai_b(i8x16, imm0_7) -> i8x16
fn srai_h(i16x8, imm0_15) -> i16x8
fn srai_w(i32x4, imm0_31) -> i32x4
fn srai_d(i64x2, imm0_63) -> i64x2
fn srar_b(i8x16, i8x16) -> i8x16
fn srar_h(i16x8, i16x8) -> i16x8
fn srar_w(i32x4, i32x4) -> i32x4
fn srar_d(i64x2, i64x2) -> i64x2
fn srari_b(i8x16, imm0_7) -> i8x16
fn srari_h(i16x8, imm0_15) -> i16x8
fn srari_w(i32x4, imm0_31) -> i32x4
fn srari_d(i64x2, imm0_63) -> i64x2
fn srl_b(i8x16, i8x16) -> i8x16
fn srl_h(i16x8, i16x8) -> i16x8
fn srl_w(i32x4, i32x4) -> i32x4
fn srl_d(i64x2, i64x2) -> i64x2
fn srli_b(i8x16, imm0_7) -> i8x16
fn srli_h(i16x8, imm0_15) -> i16x8
fn srli_w(i32x4, imm0_31) -> i32x4
fn srli_d(i64x2, imm0_63) -> i64x2
fn srlr_b(i8x16, i8x16) -> i8x16
fn srlr_h(i16x8, i16x8) -> i16x8
fn srlr_w(i32x4, i32x4) -> i32x4
fn srlr_d(i64x2, i64x2) -> i64x2
fn srlri_b(i8x16, imm0_7) -> i8x16
fn srlri_h(i16x8, imm0_15) -> i16x8
fn srlri_w(i32x4, imm0_31) -> i32x4
fn srlri_d(i64x2, imm0_63) -> i64x2
fn st_b(i8x16, *mut c_void, imm_n512_511) -> ()
fn st_h(i16x8, *mut c_void, imm_n1024_1022) -> ()
fn st_w(i32x4, *mut c_void, imm_n2048_2044) -> ()
fn st_d(i64x2, *mut c_void, imm_n4096_4088) -> ()
fn subs_s_b(i8x16, i8x16) -> i8x16
fn subs_s_h(i16x8, i16x8) -> i16x8
fn subs_s_w(i32x4, i32x4) -> i32x4
fn subs_s_d(i64x2, i64x2) -> i64x2
fn subs_u_b(u8x16, u8x16) -> u8x16
fn subs_u_h(u16x8, u16x8) -> u16x8
fn subs_u_w(u32x4, u32x4) -> u32x4
fn subs_u_d(u64x2, u64x2) -> u64x2
fn subsus_u_b(u8x16, i8x16) -> u8x16
fn subsus_u_h(u16x8, i16x8) -> u16x8
fn subsus_u_w(u32x4, i32x4) -> u32x4
fn subsus_u_d(u64x2, i64x2) -> u64x2
fn subsuu_s_b(u8x16, u8x16) -> i8x16
fn subsuu_s_h(u16x8, u16x8) -> i16x8
fn subsuu_s_w(u32x4, u32x4) -> i32x4
fn subsuu_s_d(u64x2, u64x2) -> i64x2
fn subv_b(i8x16, i8x16) -> i8x16
fn subv_h(i16x8, i16x8) -> i16x8
fn subv_w(i32x4, i32x4) -> i32x4
fn subv_d(i64x2, i64x2) -> i64x2
fn subvi_b(i8x16, imm0_31) -> i8x16
fn subvi_h(i16x8, imm0_31) -> i16x8
fn subvi_w(i32x4, imm0_31) -> i32x4
fn subvi_d(i64x2, imm0_31) -> i64x2
fn vshf_b(i8x16, i8x16, i8x16) -> i8x16
fn vshf_h(i16x8, i16x8, i16x8) -> i16x8
fn vshf_w(i32x4, i32x4, i32x4) -> i32x4
fn vshf_d(i64x2, i64x2, i64x2) -> i64x2
fn xor_v(u8x16, u8x16) -> u8x16
fn xori_b(u8x16, imm0_255) -> u8x16
The text was updated successfully, but these errors were encountered:
stdsimd
No branches or pull requests
The reference and gcc (generated with https://gist.github.com/gnzlbg/3a27cca666f4a9f8a51d2eb008ce914c from the gcc docs):
fn add_a_b(i8x16, i8x16) -> i8x16
fn add_a_h(i16x8, i16x8) -> i16x8
fn add_a_w(i32x4, i32x4) -> i32x4
fn add_a_d(i64x2, i64x2) -> i64x2
fn adds_a_b(i8x16, i8x16) -> i8x16
fn adds_a_h(i16x8, i16x8) -> i16x8
fn adds_a_w(i32x4, i32x4) -> i32x4
fn adds_a_d(i64x2, i64x2) -> i64x2
fn adds_s_b(i8x16, i8x16) -> i8x16
fn adds_s_h(i16x8, i16x8) -> i16x8
fn adds_s_w(i32x4, i32x4) -> i32x4
fn adds_s_d(i64x2, i64x2) -> i64x2
fn adds_u_b(u8x16, u8x16) -> u8x16
fn adds_u_h(u16x8, u16x8) -> u16x8
fn adds_u_w(u32x4, u32x4) -> u32x4
fn adds_u_d(u64x2, u64x2) -> u64x2
fn addv_b(i8x16, i8x16) -> i8x16
fn addv_h(i16x8, i16x8) -> i16x8
fn addv_w(i32x4, i32x4) -> i32x4
fn addv_d(i64x2, i64x2) -> i64x2
fn addvi_b(i8x16, imm0_31) -> i8x16
fn addvi_h(i16x8, imm0_31) -> i16x8
fn addvi_w(i32x4, imm0_31) -> i32x4
fn addvi_d(i64x2, imm0_31) -> i64x2
fn and_v(u8x16, u8x16) -> u8x16
fn andi_b(u8x16, imm0_255) -> u8x16
fn asub_s_b(i8x16, i8x16) -> i8x16
fn asub_s_h(i16x8, i16x8) -> i16x8
fn asub_s_w(i32x4, i32x4) -> i32x4
fn asub_s_d(i64x2, i64x2) -> i64x2
fn asub_u_b(u8x16, u8x16) -> u8x16
fn asub_u_h(u16x8, u16x8) -> u16x8
fn asub_u_w(u32x4, u32x4) -> u32x4
fn asub_u_d(u64x2, u64x2) -> u64x2
fn ave_s_b(i8x16, i8x16) -> i8x16
fn ave_s_h(i16x8, i16x8) -> i16x8
fn ave_s_w(i32x4, i32x4) -> i32x4
fn ave_s_d(i64x2, i64x2) -> i64x2
fn ave_u_b(u8x16, u8x16) -> u8x16
fn ave_u_h(u16x8, u16x8) -> u16x8
fn ave_u_w(u32x4, u32x4) -> u32x4
fn ave_u_d(u64x2, u64x2) -> u64x2
fn aver_s_b(i8x16, i8x16) -> i8x16
fn aver_s_h(i16x8, i16x8) -> i16x8
fn aver_s_w(i32x4, i32x4) -> i32x4
fn aver_s_d(i64x2, i64x2) -> i64x2
fn aver_u_b(u8x16, u8x16) -> u8x16
fn aver_u_h(u16x8, u16x8) -> u16x8
fn aver_u_w(u32x4, u32x4) -> u32x4
fn aver_u_d(u64x2, u64x2) -> u64x2
fn bclr_b(u8x16, u8x16) -> u8x16
fn bclr_h(u16x8, u16x8) -> u16x8
fn bclr_w(u32x4, u32x4) -> u32x4
fn bclr_d(u64x2, u64x2) -> u64x2
fn bclri_b(u8x16, imm0_7) -> u8x16
fn bclri_h(u16x8, imm0_15) -> u16x8
fn bclri_w(u32x4, imm0_31) -> u32x4
fn bclri_d(u64x2, imm0_63) -> u64x2
fn binsl_b(u8x16, u8x16, u8x16) -> u8x16
fn binsl_h(u16x8, u16x8, u16x8) -> u16x8
fn binsl_w(u32x4, u32x4, u32x4) -> u32x4
fn binsl_d(u64x2, u64x2, u64x2) -> u64x2
fn binsli_b(u8x16, u8x16, imm0_7) -> u8x16
fn binsli_h(u16x8, u16x8, imm0_15) -> u16x8
fn binsli_w(u32x4, u32x4, imm0_31) -> u32x4
fn binsli_d(u64x2, u64x2, imm0_63) -> u64x2
fn binsr_b(u8x16, u8x16, u8x16) -> u8x16
fn binsr_h(u16x8, u16x8, u16x8) -> u16x8
fn binsr_w(u32x4, u32x4, u32x4) -> u32x4
fn binsr_d(u64x2, u64x2, u64x2) -> u64x2
fn binsri_b(u8x16, u8x16, imm0_7) -> u8x16
fn binsri_h(u16x8, u16x8, imm0_15) -> u16x8
fn binsri_w(u32x4, u32x4, imm0_31) -> u32x4
fn binsri_d(u64x2, u64x2, imm0_63) -> u64x2
fn bmnz_v(u8x16, u8x16, u8x16) -> u8x16
fn bmnzi_b(u8x16, u8x16, imm0_255) -> u8x16
fn bmz_v(u8x16, u8x16, u8x16) -> u8x16
fn bmzi_b(u8x16, u8x16, imm0_255) -> u8x16
fn bneg_b(u8x16, u8x16) -> u8x16
fn bneg_h(u16x8, u16x8) -> u16x8
fn bneg_w(u32x4, u32x4) -> u32x4
fn bneg_d(u64x2, u64x2) -> u64x2
fn bnegi_b(u8x16, imm0_7) -> u8x16
fn bnegi_h(u16x8, imm0_15) -> u16x8
fn bnegi_w(u32x4, imm0_31) -> u32x4
fn bnegi_d(u64x2, imm0_63) -> u64x2
fn bnz_b(u8x16) -> i32
fn bnz_h(u16x8) -> i32
fn bnz_w(u32x4) -> i32
fn bnz_d(u64x2) -> i32
fn bnz_v(u8x16) -> i32
fn bsel_v(u8x16, u8x16, u8x16) -> u8x16
fn bseli_b(u8x16, u8x16, imm0_255) -> u8x16
fn bset_b(u8x16, u8x16) -> u8x16
fn bset_h(u16x8, u16x8) -> u16x8
fn bset_w(u32x4, u32x4) -> u32x4
fn bset_d(u64x2, u64x2) -> u64x2
fn bseti_b(u8x16, imm0_7) -> u8x16
fn bseti_h(u16x8, imm0_15) -> u16x8
fn bseti_w(u32x4, imm0_31) -> u32x4
fn bseti_d(u64x2, imm0_63) -> u64x2
fn bz_b(u8x16) -> i32
fn bz_h(u16x8) -> i32
fn bz_w(u32x4) -> i32
fn bz_d(u64x2) -> i32
fn bz_v(u8x16) -> i32
fn ceq_b(i8x16, i8x16) -> i8x16
fn ceq_h(i16x8, i16x8) -> i16x8
fn ceq_w(i32x4, i32x4) -> i32x4
fn ceq_d(i64x2, i64x2) -> i64x2
fn ceqi_b(i8x16, imm_n16_15) -> i8x16
fn ceqi_h(i16x8, imm_n16_15) -> i16x8
fn ceqi_w(i32x4, imm_n16_15) -> i32x4
fn ceqi_d(i64x2, imm_n16_15) -> i64x2
fn cfcmsa(imm0_31) -> i32
fn cle_s_b(i8x16, i8x16) -> i8x16
fn cle_s_h(i16x8, i16x8) -> i16x8
fn cle_s_w(i32x4, i32x4) -> i32x4
fn cle_s_d(i64x2, i64x2) -> i64x2
fn cle_u_b(u8x16, u8x16) -> i8x16
fn cle_u_h(u16x8, u16x8) -> i16x8
fn cle_u_w(u32x4, u32x4) -> i32x4
fn cle_u_d(u64x2, u64x2) -> i64x2
fn clei_s_b(i8x16, imm_n16_15) -> i8x16
fn clei_s_h(i16x8, imm_n16_15) -> i16x8
fn clei_s_w(i32x4, imm_n16_15) -> i32x4
fn clei_s_d(i64x2, imm_n16_15) -> i64x2
fn clei_u_b(u8x16, imm0_31) -> i8x16
fn clei_u_h(u16x8, imm0_31) -> i16x8
fn clei_u_w(u32x4, imm0_31) -> i32x4
fn clei_u_d(u64x2, imm0_31) -> i64x2
fn clt_s_b(i8x16, i8x16) -> i8x16
fn clt_s_h(i16x8, i16x8) -> i16x8
fn clt_s_w(i32x4, i32x4) -> i32x4
fn clt_s_d(i64x2, i64x2) -> i64x2
fn clt_u_b(u8x16, u8x16) -> i8x16
fn clt_u_h(u16x8, u16x8) -> i16x8
fn clt_u_w(u32x4, u32x4) -> i32x4
fn clt_u_d(u64x2, u64x2) -> i64x2
fn clti_s_b(i8x16, imm_n16_15) -> i8x16
fn clti_s_h(i16x8, imm_n16_15) -> i16x8
fn clti_s_w(i32x4, imm_n16_15) -> i32x4
fn clti_s_d(i64x2, imm_n16_15) -> i64x2
fn clti_u_b(u8x16, imm0_31) -> i8x16
fn clti_u_h(u16x8, imm0_31) -> i16x8
fn clti_u_w(u32x4, imm0_31) -> i32x4
fn clti_u_d(u64x2, imm0_31) -> i64x2
fn copy_s_b(i8x16, imm0_15) -> i32
fn copy_s_h(i16x8, imm0_7) -> i32
fn copy_s_w(i32x4, imm0_3) -> i32
fn copy_s_d(i64x2, imm0_1) -> i64
fn copy_u_b(i8x16, imm0_15) -> u32
fn copy_u_h(i16x8, imm0_7) -> u32
fn copy_u_w(i32x4, imm0_3) -> u32
fn copy_u_d(i64x2, imm0_1) -> u64
fn ctcmsa(imm0_31, i32) -> ()
fn div_s_b(i8x16, i8x16) -> i8x16
fn div_s_h(i16x8, i16x8) -> i16x8
fn div_s_w(i32x4, i32x4) -> i32x4
fn div_s_d(i64x2, i64x2) -> i64x2
fn div_u_b(u8x16, u8x16) -> u8x16
fn div_u_h(u16x8, u16x8) -> u16x8
fn div_u_w(u32x4, u32x4) -> u32x4
fn div_u_d(u64x2, u64x2) -> u64x2
fn dotp_s_h(i8x16, i8x16) -> i16x8
fn dotp_s_w(i16x8, i16x8) -> i32x4
fn dotp_s_d(i32x4, i32x4) -> i64x2
fn dotp_u_h(u8x16, u8x16) -> u16x8
fn dotp_u_w(u16x8, u16x8) -> u32x4
fn dotp_u_d(u32x4, u32x4) -> u64x2
fn dpadd_s_h(i16x8, i8x16, i8x16) -> i16x8
fn dpadd_s_w(i32x4, i16x8, i16x8) -> i32x4
fn dpadd_s_d(i64x2, i32x4, i32x4) -> i64x2
fn dpadd_u_h(u16x8, u8x16, u8x16) -> u16x8
fn dpadd_u_w(u32x4, u16x8, u16x8) -> u32x4
fn dpadd_u_d(u64x2, u32x4, u32x4) -> u64x2
fn dpsub_s_h(i16x8, i8x16, i8x16) -> i16x8
fn dpsub_s_w(i32x4, i16x8, i16x8) -> i32x4
fn dpsub_s_d(i64x2, i32x4, i32x4) -> i64x2
fn dpsub_u_h(i16x8, u8x16, u8x16) -> i16x8
fn dpsub_u_w(i32x4, u16x8, u16x8) -> i32x4
fn dpsub_u_d(i64x2, u32x4, u32x4) -> i64x2
fn fadd_w(f32x4, f32x4) -> f32x4
fn fadd_d(f64x2, f64x2) -> f64x2
fn fcaf_w(f32x4, f32x4) -> i32x4
fn fcaf_d(f64x2, f64x2) -> i64x2
fn fceq_w(f32x4, f32x4) -> i32x4
fn fceq_d(f64x2, f64x2) -> i64x2
fn fclass_w(f32x4) -> i32x4
fn fclass_d(f64x2) -> i64x2
fn fcle_w(f32x4, f32x4) -> i32x4
fn fcle_d(f64x2, f64x2) -> i64x2
fn fclt_w(f32x4, f32x4) -> i32x4
fn fclt_d(f64x2, f64x2) -> i64x2
fn fcne_w(f32x4, f32x4) -> i32x4
fn fcne_d(f64x2, f64x2) -> i64x2
fn fcor_w(f32x4, f32x4) -> i32x4
fn fcor_d(f64x2, f64x2) -> i64x2
fn fcueq_w(f32x4, f32x4) -> i32x4
fn fcueq_d(f64x2, f64x2) -> i64x2
fn fcule_w(f32x4, f32x4) -> i32x4
fn fcule_d(f64x2, f64x2) -> i64x2
fn fcult_w(f32x4, f32x4) -> i32x4
fn fcult_d(f64x2, f64x2) -> i64x2
fn fcun_w(f32x4, f32x4) -> i32x4
fn fcun_d(f64x2, f64x2) -> i64x2
fn fcune_w(f32x4, f32x4) -> i32x4
fn fcune_d(f64x2, f64x2) -> i64x2
fn fdiv_w(f32x4, f32x4) -> f32x4
fn fdiv_d(f64x2, f64x2) -> f64x2
fn fexdo_h(f32x4, f32x4) -> i16x8
fn fexdo_w(f64x2, f64x2) -> f32x4
fn fexp2_w(f32x4, i32x4) -> f32x4
fn fexp2_d(f64x2, i64x2) -> f64x2
fn fexupl_w(i16x8) -> f32x4
fn fexupl_d(f32x4) -> f64x2
fn fexupr_w(i16x8) -> f32x4
fn fexupr_d(f32x4) -> f64x2
fn ffint_s_w(i32x4) -> f32x4
fn ffint_s_d(i64x2) -> f64x2
fn ffint_u_w(u32x4) -> f32x4
fn ffint_u_d(u64x2) -> f64x2
fn ffql_w(i16x8) -> f32x4
fn ffql_d(i32x4) -> f64x2
fn ffqr_w(i16x8) -> f32x4
fn ffqr_d(i32x4) -> f64x2
fn fill_b(i32) -> i8x16
fn fill_h(i32) -> i16x8
fn fill_w(i32) -> i32x4
fn fill_d(i64) -> i64x2
fn flog2_w(f32x4) -> f32x4
fn flog2_d(f64x2) -> f64x2
fn fmadd_w(f32x4, f32x4, f32x4) -> f32x4
fn fmadd_d(f64x2, f64x2, f64x2) -> f64x2
fn fmax_w(f32x4, f32x4) -> f32x4
fn fmax_d(f64x2, f64x2) -> f64x2
fn fmax_a_w(f32x4, f32x4) -> f32x4
fn fmax_a_d(f64x2, f64x2) -> f64x2
fn fmin_w(f32x4, f32x4) -> f32x4
fn fmin_d(f64x2, f64x2) -> f64x2
fn fmin_a_w(f32x4, f32x4) -> f32x4
fn fmin_a_d(f64x2, f64x2) -> f64x2
fn fmsub_w(f32x4, f32x4, f32x4) -> f32x4
fn fmsub_d(f64x2, f64x2, f64x2) -> f64x2
fn fmul_w(f32x4, f32x4) -> f32x4
fn fmul_d(f64x2, f64x2) -> f64x2
fn frint_w(f32x4) -> f32x4
fn frint_d(f64x2) -> f64x2
fn frcp_w(f32x4) -> f32x4
fn frcp_d(f64x2) -> f64x2
fn frsqrt_w(f32x4) -> f32x4
fn frsqrt_d(f64x2) -> f64x2
fn fsaf_w(f32x4, f32x4) -> i32x4
fn fsaf_d(f64x2, f64x2) -> i64x2
fn fseq_w(f32x4, f32x4) -> i32x4
fn fseq_d(f64x2, f64x2) -> i64x2
fn fsle_w(f32x4, f32x4) -> i32x4
fn fsle_d(f64x2, f64x2) -> i64x2
fn fslt_w(f32x4, f32x4) -> i32x4
fn fslt_d(f64x2, f64x2) -> i64x2
fn fsne_w(f32x4, f32x4) -> i32x4
fn fsne_d(f64x2, f64x2) -> i64x2
fn fsor_w(f32x4, f32x4) -> i32x4
fn fsor_d(f64x2, f64x2) -> i64x2
fn fsqrt_w(f32x4) -> f32x4
fn fsqrt_d(f64x2) -> f64x2
fn fsub_w(f32x4, f32x4) -> f32x4
fn fsub_d(f64x2, f64x2) -> f64x2
fn fsueq_w(f32x4, f32x4) -> i32x4
fn fsueq_d(f64x2, f64x2) -> i64x2
fn fsule_w(f32x4, f32x4) -> i32x4
fn fsule_d(f64x2, f64x2) -> i64x2
fn fsult_w(f32x4, f32x4) -> i32x4
fn fsult_d(f64x2, f64x2) -> i64x2
fn fsun_w(f32x4, f32x4) -> i32x4
fn fsun_d(f64x2, f64x2) -> i64x2
fn fsune_w(f32x4, f32x4) -> i32x4
fn fsune_d(f64x2, f64x2) -> i64x2
fn ftint_s_w(f32x4) -> i32x4
fn ftint_s_d(f64x2) -> i64x2
fn ftint_u_w(f32x4) -> u32x4
fn ftint_u_d(f64x2) -> u64x2
fn ftq_h(f32x4, f32x4) -> i16x8
fn ftq_w(f64x2, f64x2) -> i32x4
fn ftrunc_s_w(f32x4) -> i32x4
fn ftrunc_s_d(f64x2) -> i64x2
fn ftrunc_u_w(f32x4) -> u32x4
fn ftrunc_u_d(f64x2) -> u64x2
fn hadd_s_h(i8x16, i8x16) -> i16x8
fn hadd_s_w(i16x8, i16x8) -> i32x4
fn hadd_s_d(i32x4, i32x4) -> i64x2
fn hadd_u_h(u8x16, u8x16) -> u16x8
fn hadd_u_w(u16x8, u16x8) -> u32x4
fn hadd_u_d(u32x4, u32x4) -> u64x2
fn hsub_s_h(i8x16, i8x16) -> i16x8
fn hsub_s_w(i16x8, i16x8) -> i32x4
fn hsub_s_d(i32x4, i32x4) -> i64x2
fn hsub_u_h(u8x16, u8x16) -> i16x8
fn hsub_u_w(u16x8, u16x8) -> i32x4
fn hsub_u_d(u32x4, u32x4) -> i64x2
fn ilvev_b(i8x16, i8x16) -> i8x16
fn ilvev_h(i16x8, i16x8) -> i16x8
fn ilvev_w(i32x4, i32x4) -> i32x4
fn ilvev_d(i64x2, i64x2) -> i64x2
fn ilvl_b(i8x16, i8x16) -> i8x16
fn ilvl_h(i16x8, i16x8) -> i16x8
fn ilvl_w(i32x4, i32x4) -> i32x4
fn ilvl_d(i64x2, i64x2) -> i64x2
fn ilvod_b(i8x16, i8x16) -> i8x16
fn ilvod_h(i16x8, i16x8) -> i16x8
fn ilvod_w(i32x4, i32x4) -> i32x4
fn ilvod_d(i64x2, i64x2) -> i64x2
fn ilvr_b(i8x16, i8x16) -> i8x16
fn ilvr_h(i16x8, i16x8) -> i16x8
fn ilvr_w(i32x4, i32x4) -> i32x4
fn ilvr_d(i64x2, i64x2) -> i64x2
fn insert_b(i8x16, imm0_15, i32) -> i8x16
fn insert_h(i16x8, imm0_7, i32) -> i16x8
fn insert_w(i32x4, imm0_3, i32) -> i32x4
fn insert_d(i64x2, imm0_1, i64) -> i64x2
fn insve_b(i8x16, imm0_15, i8x16) -> i8x16
fn insve_h(i16x8, imm0_7, i16x8) -> i16x8
fn insve_w(i32x4, imm0_3, i32x4) -> i32x4
fn insve_d(i64x2, imm0_1, i64x2) -> i64x2
fn ld_b(*mut c_void, imm_n512_511) -> i8x16
fn ld_h(*mut c_void, imm_n1024_1022) -> i16x8
fn ld_w(*mut c_void, imm_n2048_2044) -> i32x4
fn ld_d(*mut c_void, imm_n4096_4088) -> i64x2
fn ldi_b(imm_n512_511) -> i8x16
fn ldi_h(imm_n512_511) -> i16x8
fn ldi_w(imm_n512_511) -> i32x4
fn ldi_d(imm_n512_511) -> i64x2
fn madd_q_h(i16x8, i16x8, i16x8) -> i16x8
fn madd_q_w(i32x4, i32x4, i32x4) -> i32x4
fn maddr_q_h(i16x8, i16x8, i16x8) -> i16x8
fn maddr_q_w(i32x4, i32x4, i32x4) -> i32x4
fn maddv_b(i8x16, i8x16, i8x16) -> i8x16
fn maddv_h(i16x8, i16x8, i16x8) -> i16x8
fn maddv_w(i32x4, i32x4, i32x4) -> i32x4
fn maddv_d(i64x2, i64x2, i64x2) -> i64x2
fn max_a_b(i8x16, i8x16) -> i8x16
fn max_a_h(i16x8, i16x8) -> i16x8
fn max_a_w(i32x4, i32x4) -> i32x4
fn max_a_d(i64x2, i64x2) -> i64x2
fn max_s_b(i8x16, i8x16) -> i8x16
fn max_s_h(i16x8, i16x8) -> i16x8
fn max_s_w(i32x4, i32x4) -> i32x4
fn max_s_d(i64x2, i64x2) -> i64x2
fn max_u_b(u8x16, u8x16) -> u8x16
fn max_u_h(u16x8, u16x8) -> u16x8
fn max_u_w(u32x4, u32x4) -> u32x4
fn max_u_d(u64x2, u64x2) -> u64x2
fn maxi_s_b(i8x16, imm_n16_15) -> i8x16
fn maxi_s_h(i16x8, imm_n16_15) -> i16x8
fn maxi_s_w(i32x4, imm_n16_15) -> i32x4
fn maxi_s_d(i64x2, imm_n16_15) -> i64x2
fn maxi_u_b(u8x16, imm0_31) -> u8x16
fn maxi_u_h(u16x8, imm0_31) -> u16x8
fn maxi_u_w(u32x4, imm0_31) -> u32x4
fn maxi_u_d(u64x2, imm0_31) -> u64x2
fn min_a_b(i8x16, i8x16) -> i8x16
fn min_a_h(i16x8, i16x8) -> i16x8
fn min_a_w(i32x4, i32x4) -> i32x4
fn min_a_d(i64x2, i64x2) -> i64x2
fn min_s_b(i8x16, i8x16) -> i8x16
fn min_s_h(i16x8, i16x8) -> i16x8
fn min_s_w(i32x4, i32x4) -> i32x4
fn min_s_d(i64x2, i64x2) -> i64x2
fn min_u_b(u8x16, u8x16) -> u8x16
fn min_u_h(u16x8, u16x8) -> u16x8
fn min_u_w(u32x4, u32x4) -> u32x4
fn min_u_d(u64x2, u64x2) -> u64x2
fn mini_s_b(i8x16, imm_n16_15) -> i8x16
fn mini_s_h(i16x8, imm_n16_15) -> i16x8
fn mini_s_w(i32x4, imm_n16_15) -> i32x4
fn mini_s_d(i64x2, imm_n16_15) -> i64x2
fn mini_u_b(u8x16, imm0_31) -> u8x16
fn mini_u_h(u16x8, imm0_31) -> u16x8
fn mini_u_w(u32x4, imm0_31) -> u32x4
fn mini_u_d(u64x2, imm0_31) -> u64x2
fn mod_s_b(i8x16, i8x16) -> i8x16
fn mod_s_h(i16x8, i16x8) -> i16x8
fn mod_s_w(i32x4, i32x4) -> i32x4
fn mod_s_d(i64x2, i64x2) -> i64x2
fn mod_u_b(u8x16, u8x16) -> u8x16
fn mod_u_h(u16x8, u16x8) -> u16x8
fn mod_u_w(u32x4, u32x4) -> u32x4
fn mod_u_d(u64x2, u64x2) -> u64x2
fn move_v(i8x16) -> i8x16
fn msub_q_h(i16x8, i16x8, i16x8) -> i16x8
fn msub_q_w(i32x4, i32x4, i32x4) -> i32x4
fn msubr_q_h(i16x8, i16x8, i16x8) -> i16x8
fn msubr_q_w(i32x4, i32x4, i32x4) -> i32x4
fn msubv_b(i8x16, i8x16, i8x16) -> i8x16
fn msubv_h(i16x8, i16x8, i16x8) -> i16x8
fn msubv_w(i32x4, i32x4, i32x4) -> i32x4
fn msubv_d(i64x2, i64x2, i64x2) -> i64x2
fn mul_q_h(i16x8, i16x8) -> i16x8
fn mul_q_w(i32x4, i32x4) -> i32x4
fn mulr_q_h(i16x8, i16x8) -> i16x8
fn mulr_q_w(i32x4, i32x4) -> i32x4
fn mulv_b(i8x16, i8x16) -> i8x16
fn mulv_h(i16x8, i16x8) -> i16x8
fn mulv_w(i32x4, i32x4) -> i32x4
fn mulv_d(i64x2, i64x2) -> i64x2
fn nloc_b(i8x16) -> i8x16
fn nloc_h(i16x8) -> i16x8
fn nloc_w(i32x4) -> i32x4
fn nloc_d(i64x2) -> i64x2
fn nlzc_b(i8x16) -> i8x16
fn nlzc_h(i16x8) -> i16x8
fn nlzc_w(i32x4) -> i32x4
fn nlzc_d(i64x2) -> i64x2
fn nor_v(u8x16, u8x16) -> u8x16
fn nori_b(u8x16, imm0_255) -> u8x16
fn or_v(u8x16, u8x16) -> u8x16
fn ori_b(u8x16, imm0_255) -> u8x16
fn pckev_b(i8x16, i8x16) -> i8x16
fn pckev_h(i16x8, i16x8) -> i16x8
fn pckev_w(i32x4, i32x4) -> i32x4
fn pckev_d(i64x2, i64x2) -> i64x2
fn pckod_b(i8x16, i8x16) -> i8x16
fn pckod_h(i16x8, i16x8) -> i16x8
fn pckod_w(i32x4, i32x4) -> i32x4
fn pckod_d(i64x2, i64x2) -> i64x2
fn pcnt_b(i8x16) -> i8x16
fn pcnt_h(i16x8) -> i16x8
fn pcnt_w(i32x4) -> i32x4
fn pcnt_d(i64x2) -> i64x2
fn sat_s_b(i8x16, imm0_7) -> i8x16
fn sat_s_h(i16x8, imm0_15) -> i16x8
fn sat_s_w(i32x4, imm0_31) -> i32x4
fn sat_s_d(i64x2, imm0_63) -> i64x2
fn sat_u_b(u8x16, imm0_7) -> u8x16
fn sat_u_h(u16x8, imm0_15) -> u16x8
fn sat_u_w(u32x4, imm0_31) -> u32x4
fn sat_u_d(u64x2, imm0_63) -> u64x2
fn shf_b(i8x16, imm0_255) -> i8x16
fn shf_h(i16x8, imm0_255) -> i16x8
fn shf_w(i32x4, imm0_255) -> i32x4
fn sld_b(i8x16, i8x16, i32) -> i8x16
fn sld_h(i16x8, i16x8, i32) -> i16x8
fn sld_w(i32x4, i32x4, i32) -> i32x4
fn sld_d(i64x2, i64x2, i32) -> i64x2
fn sldi_b(i8x16, i8x16, imm0_15) -> i8x16
fn sldi_h(i16x8, i16x8, imm0_7) -> i16x8
fn sldi_w(i32x4, i32x4, imm0_3) -> i32x4
fn sldi_d(i64x2, i64x2, imm0_1) -> i64x2
fn sll_b(i8x16, i8x16) -> i8x16
fn sll_h(i16x8, i16x8) -> i16x8
fn sll_w(i32x4, i32x4) -> i32x4
fn sll_d(i64x2, i64x2) -> i64x2
fn slli_b(i8x16, imm0_7) -> i8x16
fn slli_h(i16x8, imm0_15) -> i16x8
fn slli_w(i32x4, imm0_31) -> i32x4
fn slli_d(i64x2, imm0_63) -> i64x2
fn splat_b(i8x16, i32) -> i8x16
fn splat_h(i16x8, i32) -> i16x8
fn splat_w(i32x4, i32) -> i32x4
fn splat_d(i64x2, i32) -> i64x2
fn splati_b(i8x16, imm0_15) -> i8x16
fn splati_h(i16x8, imm0_7) -> i16x8
fn splati_w(i32x4, imm0_3) -> i32x4
fn splati_d(i64x2, imm0_1) -> i64x2
fn sra_b(i8x16, i8x16) -> i8x16
fn sra_h(i16x8, i16x8) -> i16x8
fn sra_w(i32x4, i32x4) -> i32x4
fn sra_d(i64x2, i64x2) -> i64x2
fn srai_b(i8x16, imm0_7) -> i8x16
fn srai_h(i16x8, imm0_15) -> i16x8
fn srai_w(i32x4, imm0_31) -> i32x4
fn srai_d(i64x2, imm0_63) -> i64x2
fn srar_b(i8x16, i8x16) -> i8x16
fn srar_h(i16x8, i16x8) -> i16x8
fn srar_w(i32x4, i32x4) -> i32x4
fn srar_d(i64x2, i64x2) -> i64x2
fn srari_b(i8x16, imm0_7) -> i8x16
fn srari_h(i16x8, imm0_15) -> i16x8
fn srari_w(i32x4, imm0_31) -> i32x4
fn srari_d(i64x2, imm0_63) -> i64x2
fn srl_b(i8x16, i8x16) -> i8x16
fn srl_h(i16x8, i16x8) -> i16x8
fn srl_w(i32x4, i32x4) -> i32x4
fn srl_d(i64x2, i64x2) -> i64x2
fn srli_b(i8x16, imm0_7) -> i8x16
fn srli_h(i16x8, imm0_15) -> i16x8
fn srli_w(i32x4, imm0_31) -> i32x4
fn srli_d(i64x2, imm0_63) -> i64x2
fn srlr_b(i8x16, i8x16) -> i8x16
fn srlr_h(i16x8, i16x8) -> i16x8
fn srlr_w(i32x4, i32x4) -> i32x4
fn srlr_d(i64x2, i64x2) -> i64x2
fn srlri_b(i8x16, imm0_7) -> i8x16
fn srlri_h(i16x8, imm0_15) -> i16x8
fn srlri_w(i32x4, imm0_31) -> i32x4
fn srlri_d(i64x2, imm0_63) -> i64x2
fn st_b(i8x16, *mut c_void, imm_n512_511) -> ()
fn st_h(i16x8, *mut c_void, imm_n1024_1022) -> ()
fn st_w(i32x4, *mut c_void, imm_n2048_2044) -> ()
fn st_d(i64x2, *mut c_void, imm_n4096_4088) -> ()
fn subs_s_b(i8x16, i8x16) -> i8x16
fn subs_s_h(i16x8, i16x8) -> i16x8
fn subs_s_w(i32x4, i32x4) -> i32x4
fn subs_s_d(i64x2, i64x2) -> i64x2
fn subs_u_b(u8x16, u8x16) -> u8x16
fn subs_u_h(u16x8, u16x8) -> u16x8
fn subs_u_w(u32x4, u32x4) -> u32x4
fn subs_u_d(u64x2, u64x2) -> u64x2
fn subsus_u_b(u8x16, i8x16) -> u8x16
fn subsus_u_h(u16x8, i16x8) -> u16x8
fn subsus_u_w(u32x4, i32x4) -> u32x4
fn subsus_u_d(u64x2, i64x2) -> u64x2
fn subsuu_s_b(u8x16, u8x16) -> i8x16
fn subsuu_s_h(u16x8, u16x8) -> i16x8
fn subsuu_s_w(u32x4, u32x4) -> i32x4
fn subsuu_s_d(u64x2, u64x2) -> i64x2
fn subv_b(i8x16, i8x16) -> i8x16
fn subv_h(i16x8, i16x8) -> i16x8
fn subv_w(i32x4, i32x4) -> i32x4
fn subv_d(i64x2, i64x2) -> i64x2
fn subvi_b(i8x16, imm0_31) -> i8x16
fn subvi_h(i16x8, imm0_31) -> i16x8
fn subvi_w(i32x4, imm0_31) -> i32x4
fn subvi_d(i64x2, imm0_31) -> i64x2
fn vshf_b(i8x16, i8x16, i8x16) -> i8x16
fn vshf_h(i16x8, i16x8, i16x8) -> i16x8
fn vshf_w(i32x4, i32x4, i32x4) -> i32x4
fn vshf_d(i64x2, i64x2, i64x2) -> i64x2
fn xor_v(u8x16, u8x16) -> u8x16
fn xori_b(u8x16, imm0_255) -> u8x16
The text was updated successfully, but these errors were encountered: