qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

tcg/aarch64: Implement INDEX_op_rotl{i,v}_vec

For immediate rotate , we can implement this in two instructions,
using SLI. For variable rotate, the oddness of aarch64 right-shift-
as-negative-left-shift means a backend-specific expansion works best.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

+52 -2
+51 -2
tcg/aarch64/tcg-target.inc.c
··· 557 557 I3614_SSHR = 0x0f000400, 558 558 I3614_SSRA = 0x0f001400, 559 559 I3614_SHL = 0x0f005400, 560 + I3614_SLI = 0x2f005400, 560 561 I3614_USHR = 0x2f000400, 561 562 I3614_USRA = 0x2f001400, 562 563 ··· 2411 2412 case INDEX_op_sari_vec: 2412 2413 tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2); 2413 2414 break; 2415 + case INDEX_op_aa64_sli_vec: 2416 + tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece)); 2417 + break; 2414 2418 case INDEX_op_shlv_vec: 2415 2419 tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2); 2416 2420 break; ··· 2498 2502 case INDEX_op_shlv_vec: 2499 2503 case INDEX_op_bitsel_vec: 2500 2504 return 1; 2505 + case INDEX_op_rotli_vec: 2501 2506 case INDEX_op_shrv_vec: 2502 2507 case INDEX_op_sarv_vec: 2508 + case INDEX_op_rotlv_vec: 2509 + case INDEX_op_rotrv_vec: 2503 2510 return -1; 2504 2511 case INDEX_op_mul_vec: 2505 2512 case INDEX_op_smax_vec: ··· 2517 2524 TCGArg a0, ...) 2518 2525 { 2519 2526 va_list va; 2520 - TCGv_vec v0, v1, v2, t1; 2527 + TCGv_vec v0, v1, v2, t1, t2; 2528 + TCGArg a2; 2521 2529 2522 2530 va_start(va, a0); 2523 2531 v0 = temp_tcgv_vec(arg_temp(a0)); 2524 2532 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 2525 - v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); 2533 + a2 = va_arg(va, TCGArg); 2534 + v2 = temp_tcgv_vec(arg_temp(a2)); 2526 2535 2527 2536 switch (opc) { 2537 + case INDEX_op_rotli_vec: 2538 + t1 = tcg_temp_new_vec(type); 2539 + tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); 2540 + vec_gen_4(INDEX_op_aa64_sli_vec, type, vece, 2541 + tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); 2542 + tcg_temp_free_vec(t1); 2543 + break; 2544 + 2528 2545 case INDEX_op_shrv_vec: 2529 2546 case INDEX_op_sarv_vec: 2530 2547 /* Right shifts are negative left shifts for AArch64. */ ··· 2537 2554 tcg_temp_free_vec(t1); 2538 2555 break; 2539 2556 2557 + case INDEX_op_rotlv_vec: 2558 + t1 = tcg_temp_new_vec(type); 2559 + tcg_gen_dupi_vec(vece, t1, 8 << vece); 2560 + tcg_gen_sub_vec(vece, t1, v2, t1); 2561 + /* Right shifts are negative left shifts for AArch64. */ 2562 + vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1), 2563 + tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2564 + vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(v0), 2565 + tcgv_vec_arg(v1), tcgv_vec_arg(v2)); 2566 + tcg_gen_or_vec(vece, v0, v0, t1); 2567 + tcg_temp_free_vec(t1); 2568 + break; 2569 + 2570 + case INDEX_op_rotrv_vec: 2571 + t1 = tcg_temp_new_vec(type); 2572 + t2 = tcg_temp_new_vec(type); 2573 + tcg_gen_neg_vec(vece, t1, v2); 2574 + tcg_gen_dupi_vec(vece, t2, 8 << vece); 2575 + tcg_gen_add_vec(vece, t2, t1, t2); 2576 + /* Right shifts are negative left shifts for AArch64. */ 2577 + vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1), 2578 + tcgv_vec_arg(v1), tcgv_vec_arg(t1)); 2579 + vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t2), 2580 + tcgv_vec_arg(v1), tcgv_vec_arg(t2)); 2581 + tcg_gen_or_vec(vece, v0, t1, t2); 2582 + tcg_temp_free_vec(t1); 2583 + tcg_temp_free_vec(t2); 2584 + break; 2585 + 2540 2586 default: 2541 2587 g_assert_not_reached(); 2542 2588 } ··· 2557 2603 static const TCGTargetOpDef lZ_l = { .args_ct_str = { "lZ", "l" } }; 2558 2604 static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; 2559 2605 static const TCGTargetOpDef w_w_w = { .args_ct_str = { "w", "w", "w" } }; 2606 + static const TCGTargetOpDef w_0_w = { .args_ct_str = { "w", "0", "w" } }; 2560 2607 static const TCGTargetOpDef w_w_wO = { .args_ct_str = { "w", "w", "wO" } }; 2561 2608 static const TCGTargetOpDef w_w_wN = { .args_ct_str = { "w", "w", "wN" } }; 2562 2609 static const TCGTargetOpDef w_w_wZ = { .args_ct_str = { "w", "w", "wZ" } }; ··· 2751 2798 return &w_w_wZ; 2752 2799 case INDEX_op_bitsel_vec: 2753 2800 return &w_w_w_w; 2801 + case INDEX_op_aa64_sli_vec: 2802 + return &w_0_w; 2754 2803 2755 2804 default: 2756 2805 return NULL;
+1
tcg/aarch64/tcg-target.opc.h
··· 12 12 */ 13 13 14 14 DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC) 15 + DEF(aa64_sli_vec, 1, 2, 1, IMPLVEC)