qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

target/ppc: improve VSX_FMADD with new GEN_VSX_HELPER_VSX_MADD macro

Introduce a new GEN_VSX_HELPER_VSX_MADD macro for the generator function which
enables the source and destination registers to be decoded at translation time.

This enables the determination of a or m form to be made at translation time so
that a single helper function can now be used for both variants.

Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20190616123751.781-16-mark.cave-ayland@ilande.co.uk>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>

authored by

Mark Cave-Ayland and committed by
David Gibson
c9f4e4d8 5ba5335d

+120 -143
+19 -45
target/ppc/fpu_helper.c
··· 2280 2280 * fld - vsr_t field (VsrD(*) or VsrW(*)) 2281 2281 * maddflgs - flags for the float*muladd routine that control the 2282 2282 * various forms (madd, msub, nmadd, nmsub) 2283 - * afrm - A form (1=A, 0=M) 2284 2283 * sfprf - set FPRF 2285 2284 */ 2286 - #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \ 2285 + #define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \ 2287 2286 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ 2288 - ppc_vsr_t *xa, ppc_vsr_t *xb) \ 2287 + ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \ 2289 2288 { \ 2290 - ppc_vsr_t t = *xt, *b, *c; \ 2289 + ppc_vsr_t t = *xt; \ 2291 2290 int i; \ 2292 - \ 2293 - if (afrm) { /* AxB + T */ \ 2294 - b = xb; \ 2295 - c = xt; \ 2296 - } else { /* AxT + B */ \ 2297 - b = xt; \ 2298 - c = xb; \ 2299 - } \ 2300 2291 \ 2301 2292 helper_reset_fpstatus(env); \ 2302 2293 \ ··· 2336 2327 do_float_check_status(env, GETPC()); \ 2337 2328 } 2338 2329 2339 - VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0) 2340 - VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0) 2341 - VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0) 2342 - VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0) 2343 - VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0) 2344 - VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0) 2345 - VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0) 2346 - VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0) 2347 - 2348 - VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1) 2349 - VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1) 2350 - VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1) 2351 - VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1) 2352 - VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1) 2353 - VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1) 2354 - VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1) 2355 - VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1) 2330 + VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0) 2331 + VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0) 2332 + VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0) 2333 + VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0) 2334 + VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1) 2335 + VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1) 2336 + VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1) 2337 + VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1) 2356 2338 2357 - VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0) 2358 - VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0) 2359 - VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0) 2360 - VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0) 2361 - VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0) 2362 - VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0) 2363 - VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0) 2364 - VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0) 2339 + VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0) 2340 + VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0) 2341 + VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0) 2342 + VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0) 2365 2343 2366 - VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0) 2367 - VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0) 2368 - VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0) 2369 - VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0) 2370 - VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0) 2371 - VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0) 2372 - VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0) 2373 - VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0) 2344 + VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0) 2345 + VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0) 2346 + VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0) 2347 + VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0) 2374 2348 2375 2349 /* 2376 2350 * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
+16 -32
target/ppc/helper.h
··· 377 377 DEF_HELPER_3(xsrsqrtedp, void, env, vsr, vsr) 378 378 DEF_HELPER_4(xstdivdp, void, env, i32, vsr, vsr) 379 379 DEF_HELPER_3(xstsqrtdp, void, env, i32, vsr) 380 - DEF_HELPER_4(xsmaddadp, void, env, vsr, vsr, vsr) 381 - DEF_HELPER_4(xsmaddmdp, void, env, vsr, vsr, vsr) 382 - DEF_HELPER_4(xsmsubadp, void, env, vsr, vsr, vsr) 383 - DEF_HELPER_4(xsmsubmdp, void, env, vsr, vsr, vsr) 384 - DEF_HELPER_4(xsnmaddadp, void, env, vsr, vsr, vsr) 385 - DEF_HELPER_4(xsnmaddmdp, void, env, vsr, vsr, vsr) 386 - DEF_HELPER_4(xsnmsubadp, void, env, vsr, vsr, vsr) 387 - DEF_HELPER_4(xsnmsubmdp, void, env, vsr, vsr, vsr) 380 + DEF_HELPER_5(xsmadddp, void, env, vsr, vsr, vsr, vsr) 381 + DEF_HELPER_5(xsmsubdp, void, env, vsr, vsr, vsr, vsr) 382 + DEF_HELPER_5(xsnmadddp, void, env, vsr, vsr, vsr, vsr) 383 + DEF_HELPER_5(xsnmsubdp, void, env, vsr, vsr, vsr, vsr) 388 384 DEF_HELPER_4(xscmpeqdp, void, env, vsr, vsr, vsr) 389 385 DEF_HELPER_4(xscmpgtdp, void, env, vsr, vsr, vsr) 390 386 DEF_HELPER_4(xscmpgedp, void, env, vsr, vsr, vsr) ··· 444 440 DEF_HELPER_2(xsrsp, i64, env, i64) 445 441 DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr) 446 442 DEF_HELPER_3(xsrsqrtesp, void, env, vsr, vsr) 447 - DEF_HELPER_4(xsmaddasp, void, env, vsr, vsr, vsr) 448 - DEF_HELPER_4(xsmaddmsp, void, env, vsr, vsr, vsr) 449 - DEF_HELPER_4(xsmsubasp, void, env, vsr, vsr, vsr) 450 - DEF_HELPER_4(xsmsubmsp, void, env, vsr, vsr, vsr) 451 - DEF_HELPER_4(xsnmaddasp, void, env, vsr, vsr, vsr) 452 - DEF_HELPER_4(xsnmaddmsp, void, env, vsr, vsr, vsr) 453 - DEF_HELPER_4(xsnmsubasp, void, env, vsr, vsr, vsr) 454 - DEF_HELPER_4(xsnmsubmsp, void, env, vsr, vsr, vsr) 443 + DEF_HELPER_5(xsmaddsp, void, env, vsr, vsr, vsr, vsr) 444 + DEF_HELPER_5(xsmsubsp, void, env, vsr, vsr, vsr, vsr) 445 + DEF_HELPER_5(xsnmaddsp, void, env, vsr, vsr, vsr, vsr) 446 + DEF_HELPER_5(xsnmsubsp, void, env, vsr, vsr, vsr, vsr) 455 447 456 448 DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr) 457 449 DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr) ··· 462 454 DEF_HELPER_3(xvrsqrtedp, void, env, vsr, vsr) 463 455 DEF_HELPER_4(xvtdivdp, void, env, i32, vsr, vsr) 464 456 DEF_HELPER_3(xvtsqrtdp, void, env, i32, vsr) 465 - DEF_HELPER_4(xvmaddadp, void, env, vsr, vsr, vsr) 466 - DEF_HELPER_4(xvmaddmdp, void, env, vsr, vsr, vsr) 467 - DEF_HELPER_4(xvmsubadp, void, env, vsr, vsr, vsr) 468 - DEF_HELPER_4(xvmsubmdp, void, env, vsr, vsr, vsr) 469 - DEF_HELPER_4(xvnmaddadp, void, env, vsr, vsr, vsr) 470 - DEF_HELPER_4(xvnmaddmdp, void, env, vsr, vsr, vsr) 471 - DEF_HELPER_4(xvnmsubadp, void, env, vsr, vsr, vsr) 472 - DEF_HELPER_4(xvnmsubmdp, void, env, vsr, vsr, vsr) 457 + DEF_HELPER_5(xvmadddp, void, env, vsr, vsr, vsr, vsr) 458 + DEF_HELPER_5(xvmsubdp, void, env, vsr, vsr, vsr, vsr) 459 + DEF_HELPER_5(xvnmadddp, void, env, vsr, vsr, vsr, vsr) 460 + DEF_HELPER_5(xvnmsubdp, void, env, vsr, vsr, vsr, vsr) 473 461 DEF_HELPER_4(xvmaxdp, void, env, vsr, vsr, vsr) 474 462 DEF_HELPER_4(xvmindp, void, env, vsr, vsr, vsr) 475 463 DEF_HELPER_FLAGS_4(xvcmpeqdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr) ··· 500 488 DEF_HELPER_3(xvrsqrtesp, void, env, vsr, vsr) 501 489 DEF_HELPER_4(xvtdivsp, void, env, i32, vsr, vsr) 502 490 DEF_HELPER_3(xvtsqrtsp, void, env, i32, vsr) 503 - DEF_HELPER_4(xvmaddasp, void, env, vsr, vsr, vsr) 504 - DEF_HELPER_4(xvmaddmsp, void, env, vsr, vsr, vsr) 505 - DEF_HELPER_4(xvmsubasp, void, env, vsr, vsr, vsr) 506 - DEF_HELPER_4(xvmsubmsp, void, env, vsr, vsr, vsr) 507 - DEF_HELPER_4(xvnmaddasp, void, env, vsr, vsr, vsr) 508 - DEF_HELPER_4(xvnmaddmsp, void, env, vsr, vsr, vsr) 509 - DEF_HELPER_4(xvnmsubasp, void, env, vsr, vsr, vsr) 510 - DEF_HELPER_4(xvnmsubmsp, void, env, vsr, vsr, vsr) 491 + DEF_HELPER_5(xvmaddsp, void, env, vsr, vsr, vsr, vsr) 492 + DEF_HELPER_5(xvmsubsp, void, env, vsr, vsr, vsr, vsr) 493 + DEF_HELPER_5(xvnmaddsp, void, env, vsr, vsr, vsr, vsr) 494 + DEF_HELPER_5(xvnmsubsp, void, env, vsr, vsr, vsr, vsr) 511 495 DEF_HELPER_4(xvmaxsp, void, env, vsr, vsr, vsr) 512 496 DEF_HELPER_4(xvminsp, void, env, vsr, vsr, vsr) 513 497 DEF_HELPER_FLAGS_4(xvcmpeqsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+47 -34
target/ppc/translate/vsx-impl.inc.c
··· 1182 1182 GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX) 1183 1183 GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX) 1184 1184 GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX) 1185 - GEN_VSX_HELPER_X3(xsmaddadp, 0x04, 0x04, 0, PPC2_VSX) 1186 - GEN_VSX_HELPER_X3(xsmaddmdp, 0x04, 0x05, 0, PPC2_VSX) 1187 - GEN_VSX_HELPER_X3(xsmsubadp, 0x04, 0x06, 0, PPC2_VSX) 1188 - GEN_VSX_HELPER_X3(xsmsubmdp, 0x04, 0x07, 0, PPC2_VSX) 1189 - GEN_VSX_HELPER_X3(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX) 1190 - GEN_VSX_HELPER_X3(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX) 1191 - GEN_VSX_HELPER_X3(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX) 1192 - GEN_VSX_HELPER_X3(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX) 1193 1185 GEN_VSX_HELPER_X3(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300) 1194 1186 GEN_VSX_HELPER_X3(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300) 1195 1187 GEN_VSX_HELPER_X3(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300) ··· 1231 1223 GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX) 1232 1224 GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX) 1233 1225 GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207) 1234 - 1235 1226 GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300) 1236 1227 GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300) 1237 1228 GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300) 1238 1229 GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300) 1239 - 1240 1230 GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207) 1241 1231 GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207) 1242 1232 GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207) ··· 1244 1234 GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207) 1245 1235 GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207) 1246 1236 GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207) 1247 - GEN_VSX_HELPER_X3(xsmaddasp, 0x04, 0x00, 0, PPC2_VSX207) 1248 - GEN_VSX_HELPER_X3(xsmaddmsp, 0x04, 0x01, 0, PPC2_VSX207) 1249 - GEN_VSX_HELPER_X3(xsmsubasp, 0x04, 0x02, 0, PPC2_VSX207) 1250 - GEN_VSX_HELPER_X3(xsmsubmsp, 0x04, 0x03, 0, PPC2_VSX207) 1251 - GEN_VSX_HELPER_X3(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207) 1252 - GEN_VSX_HELPER_X3(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207) 1253 - GEN_VSX_HELPER_X3(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207) 1254 - GEN_VSX_HELPER_X3(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207) 1255 1237 GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207) 1256 1238 GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207) 1257 1239 GEN_VSX_HELPER_X1(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300) ··· 1267 1249 GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX) 1268 1250 GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX) 1269 1251 GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX) 1270 - GEN_VSX_HELPER_X3(xvmaddadp, 0x04, 0x0C, 0, PPC2_VSX) 1271 - GEN_VSX_HELPER_X3(xvmaddmdp, 0x04, 0x0D, 0, PPC2_VSX) 1272 - GEN_VSX_HELPER_X3(xvmsubadp, 0x04, 0x0E, 0, PPC2_VSX) 1273 - GEN_VSX_HELPER_X3(xvmsubmdp, 0x04, 0x0F, 0, PPC2_VSX) 1274 - GEN_VSX_HELPER_X3(xvnmaddadp, 0x04, 0x1C, 0, PPC2_VSX) 1275 - GEN_VSX_HELPER_X3(xvnmaddmdp, 0x04, 0x1D, 0, PPC2_VSX) 1276 - GEN_VSX_HELPER_X3(xvnmsubadp, 0x04, 0x1E, 0, PPC2_VSX) 1277 - GEN_VSX_HELPER_X3(xvnmsubmdp, 0x04, 0x1F, 0, PPC2_VSX) 1278 1252 GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX) 1279 1253 GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX) 1280 1254 GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX) ··· 1301 1275 GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX) 1302 1276 GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX) 1303 1277 GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX) 1304 - GEN_VSX_HELPER_X3(xvmaddasp, 0x04, 0x08, 0, PPC2_VSX) 1305 - GEN_VSX_HELPER_X3(xvmaddmsp, 0x04, 0x09, 0, PPC2_VSX) 1306 - GEN_VSX_HELPER_X3(xvmsubasp, 0x04, 0x0A, 0, PPC2_VSX) 1307 - GEN_VSX_HELPER_X3(xvmsubmsp, 0x04, 0x0B, 0, PPC2_VSX) 1308 - GEN_VSX_HELPER_X3(xvnmaddasp, 0x04, 0x18, 0, PPC2_VSX) 1309 - GEN_VSX_HELPER_X3(xvnmaddmsp, 0x04, 0x19, 0, PPC2_VSX) 1310 - GEN_VSX_HELPER_X3(xvnmsubasp, 0x04, 0x1A, 0, PPC2_VSX) 1311 - GEN_VSX_HELPER_X3(xvnmsubmsp, 0x04, 0x1B, 0, PPC2_VSX) 1312 1278 GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX) 1313 1279 GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX) 1314 1280 GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX) ··· 1331 1297 GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX) 1332 1298 GEN_VSX_HELPER_X3(xxperm, 0x08, 0x03, 0, PPC2_ISA300) 1333 1299 GEN_VSX_HELPER_X3(xxpermr, 0x08, 0x07, 0, PPC2_ISA300) 1300 + 1301 + #define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \ 1302 + static void gen_##name(DisasContext *ctx) \ 1303 + { \ 1304 + TCGv_ptr xt, xa, b, c; \ 1305 + if (unlikely(!ctx->vsx_enabled)) { \ 1306 + gen_exception(ctx, POWERPC_EXCP_VSXU); \ 1307 + return; \ 1308 + } \ 1309 + xt = gen_vsr_ptr(xT(ctx->opcode)); \ 1310 + xa = gen_vsr_ptr(xA(ctx->opcode)); \ 1311 + if (ctx->opcode & PPC_BIT(25)) { \ 1312 + /* \ 1313 + * AxT + B \ 1314 + */ \ 1315 + b = gen_vsr_ptr(xT(ctx->opcode)); \ 1316 + c = gen_vsr_ptr(xB(ctx->opcode)); \ 1317 + } else { \ 1318 + /* \ 1319 + * AxB + T \ 1320 + */ \ 1321 + b = gen_vsr_ptr(xB(ctx->opcode)); \ 1322 + c = gen_vsr_ptr(xT(ctx->opcode)); \ 1323 + } \ 1324 + gen_helper_##name(cpu_env, xt, xa, b, c); \ 1325 + tcg_temp_free_ptr(xt); \ 1326 + tcg_temp_free_ptr(xa); \ 1327 + tcg_temp_free_ptr(b); \ 1328 + tcg_temp_free_ptr(c); \ 1329 + } 1330 + 1331 + GEN_VSX_HELPER_VSX_MADD(xsmadddp, 0x04, 0x04, 0x05, 0, PPC2_VSX) 1332 + GEN_VSX_HELPER_VSX_MADD(xsmsubdp, 0x04, 0x06, 0x07, 0, PPC2_VSX) 1333 + GEN_VSX_HELPER_VSX_MADD(xsnmadddp, 0x04, 0x14, 0x15, 0, PPC2_VSX) 1334 + GEN_VSX_HELPER_VSX_MADD(xsnmsubdp, 0x04, 0x16, 0x17, 0, PPC2_VSX) 1335 + GEN_VSX_HELPER_VSX_MADD(xsmaddsp, 0x04, 0x00, 0x01, 0, PPC2_VSX207) 1336 + GEN_VSX_HELPER_VSX_MADD(xsmsubsp, 0x04, 0x02, 0x03, 0, PPC2_VSX207) 1337 + GEN_VSX_HELPER_VSX_MADD(xsnmaddsp, 0x04, 0x10, 0x11, 0, PPC2_VSX207) 1338 + GEN_VSX_HELPER_VSX_MADD(xsnmsubsp, 0x04, 0x12, 0x13, 0, PPC2_VSX207) 1339 + GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX) 1340 + GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX) 1341 + GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX) 1342 + GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX) 1343 + GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX) 1344 + GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX) 1345 + GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX) 1346 + GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX) 1334 1347 1335 1348 static void gen_xxbrd(DisasContext *ctx) 1336 1349 {
+38 -32
target/ppc/translate/vsx-ops.inc.c
··· 63 63 GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \ 64 64 GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2) 65 65 66 + #define GEN_XX3FORM_NAME(name, opcname, opc2, opc3, fl2) \ 67 + GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \ 68 + GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \ 69 + GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \ 70 + GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2) 71 + 66 72 #define GEN_XX2IFORM(name, opc2, opc3, fl2) \ 67 73 GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 1, PPC_NONE, fl2), \ 68 74 GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \ ··· 182 188 GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX), 183 189 GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX), 184 190 GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX), 185 - GEN_XX3FORM(xsmaddadp, 0x04, 0x04, PPC2_VSX), 186 - GEN_XX3FORM(xsmaddmdp, 0x04, 0x05, PPC2_VSX), 187 - GEN_XX3FORM(xsmsubadp, 0x04, 0x06, PPC2_VSX), 188 - GEN_XX3FORM(xsmsubmdp, 0x04, 0x07, PPC2_VSX), 189 - GEN_XX3FORM(xsnmaddadp, 0x04, 0x14, PPC2_VSX), 190 - GEN_XX3FORM(xsnmaddmdp, 0x04, 0x15, PPC2_VSX), 191 - GEN_XX3FORM(xsnmsubadp, 0x04, 0x16, PPC2_VSX), 192 - GEN_XX3FORM(xsnmsubmdp, 0x04, 0x17, PPC2_VSX), 191 + GEN_XX3FORM_NAME(xsmadddp, "xsmaddadp", 0x04, 0x04, PPC2_VSX), 192 + GEN_XX3FORM_NAME(xsmadddp, "xsmaddmdp", 0x04, 0x05, PPC2_VSX), 193 + GEN_XX3FORM_NAME(xsmsubdp, "xsmsubadp", 0x04, 0x06, PPC2_VSX), 194 + GEN_XX3FORM_NAME(xsmsubdp, "xsmsubmdp", 0x04, 0x07, PPC2_VSX), 195 + GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddadp", 0x04, 0x14, PPC2_VSX), 196 + GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddmdp", 0x04, 0x15, PPC2_VSX), 197 + GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubadp", 0x04, 0x16, PPC2_VSX), 198 + GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubmdp", 0x04, 0x17, PPC2_VSX), 193 199 GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300), 194 200 GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300), 195 201 GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300), ··· 235 241 GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207), 236 242 GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207), 237 243 GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207), 238 - GEN_XX3FORM(xsmaddasp, 0x04, 0x00, PPC2_VSX207), 239 - GEN_XX3FORM(xsmaddmsp, 0x04, 0x01, PPC2_VSX207), 240 - GEN_XX3FORM(xsmsubasp, 0x04, 0x02, PPC2_VSX207), 241 - GEN_XX3FORM(xsmsubmsp, 0x04, 0x03, PPC2_VSX207), 242 - GEN_XX3FORM(xsnmaddasp, 0x04, 0x10, PPC2_VSX207), 243 - GEN_XX3FORM(xsnmaddmsp, 0x04, 0x11, PPC2_VSX207), 244 - GEN_XX3FORM(xsnmsubasp, 0x04, 0x12, PPC2_VSX207), 245 - GEN_XX3FORM(xsnmsubmsp, 0x04, 0x13, PPC2_VSX207), 244 + GEN_XX3FORM_NAME(xsmaddsp, "xsmaddasp", 0x04, 0x00, PPC2_VSX207), 245 + GEN_XX3FORM_NAME(xsmaddsp, "xsmaddmsp", 0x04, 0x01, PPC2_VSX207), 246 + GEN_XX3FORM_NAME(xsmsubsp, "xsmsubasp", 0x04, 0x02, PPC2_VSX207), 247 + GEN_XX3FORM_NAME(xsmsubsp, "xsmsubmsp", 0x04, 0x03, PPC2_VSX207), 248 + GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddasp", 0x04, 0x10, PPC2_VSX207), 249 + GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddmsp", 0x04, 0x11, PPC2_VSX207), 250 + GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubasp", 0x04, 0x12, PPC2_VSX207), 251 + GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubmsp", 0x04, 0x13, PPC2_VSX207), 246 252 GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207), 247 253 GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207), 248 254 ··· 255 261 GEN_XX2FORM(xvrsqrtedp, 0x14, 0x0C, PPC2_VSX), 256 262 GEN_XX3FORM(xvtdivdp, 0x14, 0x0F, PPC2_VSX), 257 263 GEN_XX2FORM(xvtsqrtdp, 0x14, 0x0E, PPC2_VSX), 258 - GEN_XX3FORM(xvmaddadp, 0x04, 0x0C, PPC2_VSX), 259 - GEN_XX3FORM(xvmaddmdp, 0x04, 0x0D, PPC2_VSX), 260 - GEN_XX3FORM(xvmsubadp, 0x04, 0x0E, PPC2_VSX), 261 - GEN_XX3FORM(xvmsubmdp, 0x04, 0x0F, PPC2_VSX), 262 - GEN_XX3FORM(xvnmaddadp, 0x04, 0x1C, PPC2_VSX), 263 - GEN_XX3FORM(xvnmaddmdp, 0x04, 0x1D, PPC2_VSX), 264 - GEN_XX3FORM(xvnmsubadp, 0x04, 0x1E, PPC2_VSX), 265 - GEN_XX3FORM(xvnmsubmdp, 0x04, 0x1F, PPC2_VSX), 264 + GEN_XX3FORM_NAME(xvmadddp, "xvmaddadp", 0x04, 0x0C, PPC2_VSX), 265 + GEN_XX3FORM_NAME(xvmadddp, "xvmaddmdp", 0x04, 0x0D, PPC2_VSX), 266 + GEN_XX3FORM_NAME(xvmsubdp, "xvmsubadp", 0x04, 0x0E, PPC2_VSX), 267 + GEN_XX3FORM_NAME(xvmsubdp, "xvmsubmdp", 0x04, 0x0F, PPC2_VSX), 268 + GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddadp", 0x04, 0x1C, PPC2_VSX), 269 + GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddmdp", 0x04, 0x1D, PPC2_VSX), 270 + GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubadp", 0x04, 0x1E, PPC2_VSX), 271 + GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubmdp", 0x04, 0x1F, PPC2_VSX), 266 272 GEN_XX3FORM(xvmaxdp, 0x00, 0x1C, PPC2_VSX), 267 273 GEN_XX3FORM(xvmindp, 0x00, 0x1D, PPC2_VSX), 268 274 GEN_XX3_RC_FORM(xvcmpeqdp, 0x0C, 0x0C, PPC2_VSX), ··· 293 299 GEN_XX2FORM(xvrsqrtesp, 0x14, 0x08, PPC2_VSX), 294 300 GEN_XX3FORM(xvtdivsp, 0x14, 0x0B, PPC2_VSX), 295 301 GEN_XX2FORM(xvtsqrtsp, 0x14, 0x0A, PPC2_VSX), 296 - GEN_XX3FORM(xvmaddasp, 0x04, 0x08, PPC2_VSX), 297 - GEN_XX3FORM(xvmaddmsp, 0x04, 0x09, PPC2_VSX), 298 - GEN_XX3FORM(xvmsubasp, 0x04, 0x0A, PPC2_VSX), 299 - GEN_XX3FORM(xvmsubmsp, 0x04, 0x0B, PPC2_VSX), 300 - GEN_XX3FORM(xvnmaddasp, 0x04, 0x18, PPC2_VSX), 301 - GEN_XX3FORM(xvnmaddmsp, 0x04, 0x19, PPC2_VSX), 302 - GEN_XX3FORM(xvnmsubasp, 0x04, 0x1A, PPC2_VSX), 303 - GEN_XX3FORM(xvnmsubmsp, 0x04, 0x1B, PPC2_VSX), 302 + GEN_XX3FORM_NAME(xvmaddsp, "xvmaddasp", 0x04, 0x08, PPC2_VSX), 303 + GEN_XX3FORM_NAME(xvmaddsp, "xvmaddmsp", 0x04, 0x09, PPC2_VSX), 304 + GEN_XX3FORM_NAME(xvmsubsp, "xvmsubasp", 0x04, 0x0A, PPC2_VSX), 305 + GEN_XX3FORM_NAME(xvmsubsp, "xvmsubmsp", 0x04, 0x0B, PPC2_VSX), 306 + GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddasp", 0x04, 0x18, PPC2_VSX), 307 + GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddmsp", 0x04, 0x19, PPC2_VSX), 308 + GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubasp", 0x04, 0x1A, PPC2_VSX), 309 + GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubmsp", 0x04, 0x1B, PPC2_VSX), 304 310 GEN_XX3FORM(xvmaxsp, 0x00, 0x18, PPC2_VSX), 305 311 GEN_XX3FORM(xvminsp, 0x00, 0x19, PPC2_VSX), 306 312 GEN_XX3_RC_FORM(xvcmpeqsp, 0x0C, 0x08, PPC2_VSX),