qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

target/arm: Split VMINMAXNM decode

Passing the raw op field from the manual is less instructive
than it might be. Do the full decode and use the existing
helpers to perform the expansion.

Since these are v8 insns, VECLEN+VECSTRIDE are already RES0.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200224222232.13807-18-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

authored by

Richard Henderson and committed by
Peter Maydell
f2eafb75 d486f830

+44 -77
+36 -73
target/arm/translate-vfp.inc.c
··· 322 322 return true; 323 323 } 324 324 325 - static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a) 326 - { 327 - uint32_t rd, rn, rm; 328 - bool dp = a->dp; 329 - bool vmin = a->op; 330 - TCGv_ptr fpst; 331 - 332 - if (!dc_isar_feature(aa32_vminmaxnm, s)) { 333 - return false; 334 - } 335 - 336 - if (dp && !dc_isar_feature(aa32_fpdp_v2, s)) { 337 - return false; 338 - } 339 - 340 - /* UNDEF accesses to D16-D31 if they don't exist */ 341 - if (dp && !dc_isar_feature(aa32_simd_r32, s) && 342 - ((a->vm | a->vn | a->vd) & 0x10)) { 343 - return false; 344 - } 345 - 346 - rd = a->vd; 347 - rn = a->vn; 348 - rm = a->vm; 349 - 350 - if (!vfp_access_check(s)) { 351 - return true; 352 - } 353 - 354 - fpst = get_fpstatus_ptr(0); 355 - 356 - if (dp) { 357 - TCGv_i64 frn, frm, dest; 358 - 359 - frn = tcg_temp_new_i64(); 360 - frm = tcg_temp_new_i64(); 361 - dest = tcg_temp_new_i64(); 362 - 363 - neon_load_reg64(frn, rn); 364 - neon_load_reg64(frm, rm); 365 - if (vmin) { 366 - gen_helper_vfp_minnumd(dest, frn, frm, fpst); 367 - } else { 368 - gen_helper_vfp_maxnumd(dest, frn, frm, fpst); 369 - } 370 - neon_store_reg64(dest, rd); 371 - tcg_temp_free_i64(frn); 372 - tcg_temp_free_i64(frm); 373 - tcg_temp_free_i64(dest); 374 - } else { 375 - TCGv_i32 frn, frm, dest; 376 - 377 - frn = tcg_temp_new_i32(); 378 - frm = tcg_temp_new_i32(); 379 - dest = tcg_temp_new_i32(); 380 - 381 - neon_load_reg32(frn, rn); 382 - neon_load_reg32(frm, rm); 383 - if (vmin) { 384 - gen_helper_vfp_minnums(dest, frn, frm, fpst); 385 - } else { 386 - gen_helper_vfp_maxnums(dest, frn, frm, fpst); 387 - } 388 - neon_store_reg32(dest, rd); 389 - tcg_temp_free_i32(frn); 390 - tcg_temp_free_i32(frm); 391 - tcg_temp_free_i32(dest); 392 - } 393 - 394 - tcg_temp_free_ptr(fpst); 395 - return true; 396 - } 397 - 398 325 /* 399 326 * Table for converting the most common AArch32 encoding of 400 327 * rounding mode to arm_fprounding order (which matches the ··· 1782 1709 static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a) 1783 1710 { 1784 1711 return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false); 1712 + } 1713 + 1714 + static bool trans_VMINNM_sp(DisasContext *s, arg_VMINNM_sp *a) 1715 + { 1716 + if (!dc_isar_feature(aa32_vminmaxnm, s)) { 1717 + return false; 1718 + } 1719 + return do_vfp_3op_sp(s, gen_helper_vfp_minnums, 1720 + a->vd, a->vn, a->vm, false); 1721 + } 1722 + 1723 + static bool trans_VMAXNM_sp(DisasContext *s, arg_VMAXNM_sp *a) 1724 + { 1725 + if (!dc_isar_feature(aa32_vminmaxnm, s)) { 1726 + return false; 1727 + } 1728 + return do_vfp_3op_sp(s, gen_helper_vfp_maxnums, 1729 + a->vd, a->vn, a->vm, false); 1730 + } 1731 + 1732 + static bool trans_VMINNM_dp(DisasContext *s, arg_VMINNM_dp *a) 1733 + { 1734 + if (!dc_isar_feature(aa32_vminmaxnm, s)) { 1735 + return false; 1736 + } 1737 + return do_vfp_3op_dp(s, gen_helper_vfp_minnumd, 1738 + a->vd, a->vn, a->vm, false); 1739 + } 1740 + 1741 + static bool trans_VMAXNM_dp(DisasContext *s, arg_VMAXNM_dp *a) 1742 + { 1743 + if (!dc_isar_feature(aa32_vminmaxnm, s)) { 1744 + return false; 1745 + } 1746 + return do_vfp_3op_dp(s, gen_helper_vfp_maxnumd, 1747 + a->vd, a->vn, a->vm, false); 1785 1748 } 1786 1749 1787 1750 static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
+8 -4
target/arm/vfp-uncond.decode
··· 41 41 %vd_dp 22:1 12:4 42 42 %vd_sp 12:4 22:1 43 43 44 + @vfp_dnm_s ................................ vm=%vm_sp vn=%vn_sp vd=%vd_sp 45 + @vfp_dnm_d ................................ vm=%vm_dp vn=%vn_dp vd=%vd_dp 46 + 44 47 VSEL 1111 1110 0. cc:2 .... .... 1010 .0.0 .... \ 45 48 vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0 46 49 VSEL 1111 1110 0. cc:2 .... .... 1011 .0.0 .... \ 47 50 vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1 48 51 49 - VMINMAXNM 1111 1110 1.00 .... .... 1010 . op:1 .0 .... \ 50 - vm=%vm_sp vn=%vn_sp vd=%vd_sp dp=0 51 - VMINMAXNM 1111 1110 1.00 .... .... 1011 . op:1 .0 .... \ 52 - vm=%vm_dp vn=%vn_dp vd=%vd_dp dp=1 52 + VMAXNM_sp 1111 1110 1.00 .... .... 1010 .0.0 .... @vfp_dnm_s 53 + VMINNM_sp 1111 1110 1.00 .... .... 1010 .1.0 .... @vfp_dnm_s 54 + 55 + VMAXNM_dp 1111 1110 1.00 .... .... 1011 .0.0 .... @vfp_dnm_d 56 + VMINNM_dp 1111 1110 1.00 .... .... 1011 .1.0 .... @vfp_dnm_d 53 57 54 58 VRINT 1111 1110 1.11 10 rm:2 .... 1010 01.0 .... \ 55 59 vm=%vm_sp vd=%vd_sp dp=0