qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

tcg: Implement gvec support for rotate by vector

No host backend support yet, but the interfaces for rotlv
and rotrv are in place.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
v3: Drop the generic expansion from rot to shift; we can do better
for each backend, and then this code becomes unused.

+256 -1
+96
accel/tcg/tcg-runtime-gvec.c
··· 908 908 clear_high(d, oprsz, desc); 909 909 } 910 910 911 + void HELPER(gvec_rotl8v)(void *d, void *a, void *b, uint32_t desc) 912 + { 913 + intptr_t oprsz = simd_oprsz(desc); 914 + intptr_t i; 915 + 916 + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { 917 + uint8_t sh = *(uint8_t *)(b + i) & 7; 918 + *(uint8_t *)(d + i) = rol8(*(uint8_t *)(a + i), sh); 919 + } 920 + clear_high(d, oprsz, desc); 921 + } 922 + 923 + void HELPER(gvec_rotl16v)(void *d, void *a, void *b, uint32_t desc) 924 + { 925 + intptr_t oprsz = simd_oprsz(desc); 926 + intptr_t i; 927 + 928 + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { 929 + uint8_t sh = *(uint16_t *)(b + i) & 15; 930 + *(uint16_t *)(d + i) = rol16(*(uint16_t *)(a + i), sh); 931 + } 932 + clear_high(d, oprsz, desc); 933 + } 934 + 935 + void HELPER(gvec_rotl32v)(void *d, void *a, void *b, uint32_t desc) 936 + { 937 + intptr_t oprsz = simd_oprsz(desc); 938 + intptr_t i; 939 + 940 + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { 941 + uint8_t sh = *(uint32_t *)(b + i) & 31; 942 + *(uint32_t *)(d + i) = rol32(*(uint32_t *)(a + i), sh); 943 + } 944 + clear_high(d, oprsz, desc); 945 + } 946 + 947 + void HELPER(gvec_rotl64v)(void *d, void *a, void *b, uint32_t desc) 948 + { 949 + intptr_t oprsz = simd_oprsz(desc); 950 + intptr_t i; 951 + 952 + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { 953 + uint8_t sh = *(uint64_t *)(b + i) & 63; 954 + *(uint64_t *)(d + i) = rol64(*(uint64_t *)(a + i), sh); 955 + } 956 + clear_high(d, oprsz, desc); 957 + } 958 + 959 + void HELPER(gvec_rotr8v)(void *d, void *a, void *b, uint32_t desc) 960 + { 961 + intptr_t oprsz = simd_oprsz(desc); 962 + intptr_t i; 963 + 964 + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { 965 + uint8_t sh = *(uint8_t *)(b + i) & 7; 966 + *(uint8_t *)(d + i) = ror8(*(uint8_t *)(a + i), sh); 967 + } 968 + clear_high(d, oprsz, desc); 969 + } 970 + 971 + void HELPER(gvec_rotr16v)(void *d, void *a, void *b, uint32_t desc) 972 + { 973 + intptr_t oprsz = simd_oprsz(desc); 974 + intptr_t i; 975 + 976 + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { 977 + uint8_t sh = *(uint16_t *)(b + i) & 15; 978 + *(uint16_t *)(d + i) = ror16(*(uint16_t *)(a + i), sh); 979 + } 980 + clear_high(d, oprsz, desc); 981 + } 982 + 983 + void HELPER(gvec_rotr32v)(void *d, void *a, void *b, uint32_t desc) 984 + { 985 + intptr_t oprsz = simd_oprsz(desc); 986 + intptr_t i; 987 + 988 + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { 989 + uint8_t sh = *(uint32_t *)(b + i) & 31; 990 + *(uint32_t *)(d + i) = ror32(*(uint32_t *)(a + i), sh); 991 + } 992 + clear_high(d, oprsz, desc); 993 + } 994 + 995 + void HELPER(gvec_rotr64v)(void *d, void *a, void *b, uint32_t desc) 996 + { 997 + intptr_t oprsz = simd_oprsz(desc); 998 + intptr_t i; 999 + 1000 + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { 1001 + uint8_t sh = *(uint64_t *)(b + i) & 63; 1002 + *(uint64_t *)(d + i) = ror64(*(uint64_t *)(a + i), sh); 1003 + } 1004 + clear_high(d, oprsz, desc); 1005 + } 1006 + 911 1007 #define DO_CMP1(NAME, TYPE, OP) \ 912 1008 void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \ 913 1009 { \
+10
accel/tcg/tcg-runtime.h
··· 279 279 DEF_HELPER_FLAGS_4(gvec_sar32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 280 280 DEF_HELPER_FLAGS_4(gvec_sar64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 281 281 282 + DEF_HELPER_FLAGS_4(gvec_rotl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 283 + DEF_HELPER_FLAGS_4(gvec_rotl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 284 + DEF_HELPER_FLAGS_4(gvec_rotl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 285 + DEF_HELPER_FLAGS_4(gvec_rotl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 286 + 287 + DEF_HELPER_FLAGS_4(gvec_rotr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 288 + DEF_HELPER_FLAGS_4(gvec_rotr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 289 + DEF_HELPER_FLAGS_4(gvec_rotr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 290 + DEF_HELPER_FLAGS_4(gvec_rotr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 291 + 282 292 DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 283 293 DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) 284 294 DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+4
include/tcg/tcg-op-gvec.h
··· 356 356 uint32_t bofs, uint32_t oprsz, uint32_t maxsz); 357 357 void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs, 358 358 uint32_t bofs, uint32_t oprsz, uint32_t maxsz); 359 + void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs, 360 + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); 361 + void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs, 362 + uint32_t bofs, uint32_t oprsz, uint32_t maxsz); 359 363 360 364 void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, 361 365 uint32_t aofs, uint32_t bofs,
+2
include/tcg/tcg-op.h
··· 1009 1009 void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); 1010 1010 void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); 1011 1011 void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); 1012 + void tcg_gen_rotlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); 1013 + void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s); 1012 1014 1013 1015 void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r, 1014 1016 TCGv_vec a, TCGv_vec b);
+2
include/tcg/tcg-opc.h
··· 257 257 DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) 258 258 DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) 259 259 DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) 260 + DEF(rotlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec)) 261 + DEF(rotrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec)) 260 262 261 263 DEF(cmp_vec, 1, 2, 1, IMPLVEC) 262 264
+1
include/tcg/tcg.h
··· 183 183 #define TCG_TARGET_HAS_andc_vec 0 184 184 #define TCG_TARGET_HAS_orc_vec 0 185 185 #define TCG_TARGET_HAS_roti_vec 0 186 + #define TCG_TARGET_HAS_rotv_vec 0 186 187 #define TCG_TARGET_HAS_shi_vec 0 187 188 #define TCG_TARGET_HAS_shs_vec 0 188 189 #define TCG_TARGET_HAS_shv_vec 0
+3 -1
tcg/README
··· 621 621 622 622 * shrv_vec v0, v1, v2 623 623 * sarv_vec v0, v1, v2 624 + * rotlv_vec v0, v1, v2 625 + * rotrv_vec v0, v1, v2 624 626 625 - Similarly for logical and arithmetic right shift. 627 + Similarly for logical and arithmetic right shift, and rotates. 626 628 627 629 * cmp_vec v0, v1, v2, cond 628 630
+1
tcg/aarch64/tcg-target.h
··· 134 134 #define TCG_TARGET_HAS_neg_vec 1 135 135 #define TCG_TARGET_HAS_abs_vec 1 136 136 #define TCG_TARGET_HAS_roti_vec 0 137 + #define TCG_TARGET_HAS_rotv_vec 0 137 138 #define TCG_TARGET_HAS_shi_vec 1 138 139 #define TCG_TARGET_HAS_shs_vec 0 139 140 #define TCG_TARGET_HAS_shv_vec 1
+1
tcg/i386/tcg-target.h
··· 184 184 #define TCG_TARGET_HAS_neg_vec 0 185 185 #define TCG_TARGET_HAS_abs_vec 1 186 186 #define TCG_TARGET_HAS_roti_vec 0 187 + #define TCG_TARGET_HAS_rotv_vec 0 187 188 #define TCG_TARGET_HAS_shi_vec 1 188 189 #define TCG_TARGET_HAS_shs_vec 1 189 190 #define TCG_TARGET_HAS_shv_vec have_avx2
+1
tcg/ppc/tcg-target.h
··· 162 162 #define TCG_TARGET_HAS_neg_vec have_isa_3_00 163 163 #define TCG_TARGET_HAS_abs_vec 0 164 164 #define TCG_TARGET_HAS_roti_vec 0 165 + #define TCG_TARGET_HAS_rotv_vec 0 165 166 #define TCG_TARGET_HAS_shi_vec 0 166 167 #define TCG_TARGET_HAS_shs_vec 0 167 168 #define TCG_TARGET_HAS_shv_vec 1
+122
tcg/tcg-op-gvec.c
··· 3171 3171 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]); 3172 3172 } 3173 3173 3174 + /* 3175 + * Similarly for rotates. 3176 + */ 3177 + 3178 + static void tcg_gen_rotlv_mod_vec(unsigned vece, TCGv_vec d, 3179 + TCGv_vec a, TCGv_vec b) 3180 + { 3181 + TCGv_vec t = tcg_temp_new_vec_matching(d); 3182 + 3183 + tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); 3184 + tcg_gen_and_vec(vece, t, t, b); 3185 + tcg_gen_rotlv_vec(vece, d, a, t); 3186 + tcg_temp_free_vec(t); 3187 + } 3188 + 3189 + static void tcg_gen_rotl_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) 3190 + { 3191 + TCGv_i32 t = tcg_temp_new_i32(); 3192 + 3193 + tcg_gen_andi_i32(t, b, 31); 3194 + tcg_gen_rotl_i32(d, a, t); 3195 + tcg_temp_free_i32(t); 3196 + } 3197 + 3198 + static void tcg_gen_rotl_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) 3199 + { 3200 + TCGv_i64 t = tcg_temp_new_i64(); 3201 + 3202 + tcg_gen_andi_i64(t, b, 63); 3203 + tcg_gen_rotl_i64(d, a, t); 3204 + tcg_temp_free_i64(t); 3205 + } 3206 + 3207 + void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs, 3208 + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) 3209 + { 3210 + static const TCGOpcode vecop_list[] = { INDEX_op_rotlv_vec, 0 }; 3211 + static const GVecGen3 g[4] = { 3212 + { .fniv = tcg_gen_rotlv_mod_vec, 3213 + .fno = gen_helper_gvec_rotl8v, 3214 + .opt_opc = vecop_list, 3215 + .vece = MO_8 }, 3216 + { .fniv = tcg_gen_rotlv_mod_vec, 3217 + .fno = gen_helper_gvec_rotl16v, 3218 + .opt_opc = vecop_list, 3219 + .vece = MO_16 }, 3220 + { .fni4 = tcg_gen_rotl_mod_i32, 3221 + .fniv = tcg_gen_rotlv_mod_vec, 3222 + .fno = gen_helper_gvec_rotl32v, 3223 + .opt_opc = vecop_list, 3224 + .vece = MO_32 }, 3225 + { .fni8 = tcg_gen_rotl_mod_i64, 3226 + .fniv = tcg_gen_rotlv_mod_vec, 3227 + .fno = gen_helper_gvec_rotl64v, 3228 + .opt_opc = vecop_list, 3229 + .prefer_i64 = TCG_TARGET_REG_BITS == 64, 3230 + .vece = MO_64 }, 3231 + }; 3232 + 3233 + tcg_debug_assert(vece <= MO_64); 3234 + tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]); 3235 + } 3236 + 3237 + static void tcg_gen_rotrv_mod_vec(unsigned vece, TCGv_vec d, 3238 + TCGv_vec a, TCGv_vec b) 3239 + { 3240 + TCGv_vec t = tcg_temp_new_vec_matching(d); 3241 + 3242 + tcg_gen_dupi_vec(vece, t, (8 << vece) - 1); 3243 + tcg_gen_and_vec(vece, t, t, b); 3244 + tcg_gen_rotrv_vec(vece, d, a, t); 3245 + tcg_temp_free_vec(t); 3246 + } 3247 + 3248 + static void tcg_gen_rotr_mod_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) 3249 + { 3250 + TCGv_i32 t = tcg_temp_new_i32(); 3251 + 3252 + tcg_gen_andi_i32(t, b, 31); 3253 + tcg_gen_rotr_i32(d, a, t); 3254 + tcg_temp_free_i32(t); 3255 + } 3256 + 3257 + static void tcg_gen_rotr_mod_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) 3258 + { 3259 + TCGv_i64 t = tcg_temp_new_i64(); 3260 + 3261 + tcg_gen_andi_i64(t, b, 63); 3262 + tcg_gen_rotr_i64(d, a, t); 3263 + tcg_temp_free_i64(t); 3264 + } 3265 + 3266 + void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs, 3267 + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) 3268 + { 3269 + static const TCGOpcode vecop_list[] = { INDEX_op_rotrv_vec, 0 }; 3270 + static const GVecGen3 g[4] = { 3271 + { .fniv = tcg_gen_rotrv_mod_vec, 3272 + .fno = gen_helper_gvec_rotr8v, 3273 + .opt_opc = vecop_list, 3274 + .vece = MO_8 }, 3275 + { .fniv = tcg_gen_rotrv_mod_vec, 3276 + .fno = gen_helper_gvec_rotr16v, 3277 + .opt_opc = vecop_list, 3278 + .vece = MO_16 }, 3279 + { .fni4 = tcg_gen_rotr_mod_i32, 3280 + .fniv = tcg_gen_rotrv_mod_vec, 3281 + .fno = gen_helper_gvec_rotr32v, 3282 + .opt_opc = vecop_list, 3283 + .vece = MO_32 }, 3284 + { .fni8 = tcg_gen_rotr_mod_i64, 3285 + .fniv = tcg_gen_rotrv_mod_vec, 3286 + .fno = gen_helper_gvec_rotr64v, 3287 + .opt_opc = vecop_list, 3288 + .prefer_i64 = TCG_TARGET_REG_BITS == 64, 3289 + .vece = MO_64 }, 3290 + }; 3291 + 3292 + tcg_debug_assert(vece <= MO_64); 3293 + tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]); 3294 + } 3295 + 3174 3296 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ 3175 3297 static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, 3176 3298 uint32_t oprsz, TCGCond cond)
+10
tcg/tcg-op-vec.c
··· 696 696 do_op3_nofail(vece, r, a, b, INDEX_op_sarv_vec); 697 697 } 698 698 699 + void tcg_gen_rotlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) 700 + { 701 + do_op3_nofail(vece, r, a, b, INDEX_op_rotlv_vec); 702 + } 703 + 704 + void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) 705 + { 706 + do_op3_nofail(vece, r, a, b, INDEX_op_rotrv_vec); 707 + } 708 + 699 709 static void do_shifts(unsigned vece, TCGv_vec r, TCGv_vec a, 700 710 TCGv_i32 s, TCGOpcode opc_s, TCGOpcode opc_v) 701 711 {
+3
tcg/tcg.c
··· 1663 1663 return have_vec && TCG_TARGET_HAS_shv_vec; 1664 1664 case INDEX_op_rotli_vec: 1665 1665 return have_vec && TCG_TARGET_HAS_roti_vec; 1666 + case INDEX_op_rotlv_vec: 1667 + case INDEX_op_rotrv_vec: 1668 + return have_vec && TCG_TARGET_HAS_rotv_vec; 1666 1669 case INDEX_op_ssadd_vec: 1667 1670 case INDEX_op_usadd_vec: 1668 1671 case INDEX_op_sssub_vec: