qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

tcg: Dynamically allocate TCGOps

With no fixed array allocation, we can't overflow a buffer.
This will be important as optimizations related to host vectors
may expand the number of ops used.

Use QTAILQ to link the ops together.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

+77 -157
+3 -6
include/exec/gen-icount.h
··· 5 5 6 6 /* Helpers for instruction counting code generation. */ 7 7 8 - static int icount_start_insn_idx; 8 + static TCGOp *icount_start_insn; 9 9 10 10 static inline void gen_tb_start(TranslationBlock *tb) 11 11 { ··· 26 26 /* We emit a movi with a dummy immediate argument. Keep the insn index 27 27 * of the movi so that we later (when we know the actual insn count) 28 28 * can update the immediate argument with the actual insn count. */ 29 - icount_start_insn_idx = tcg_op_buf_count(); 30 29 tcg_gen_movi_i32(imm, 0xdeadbeef); 30 + icount_start_insn = tcg_last_op(); 31 31 32 32 tcg_gen_sub_i32(count, count, imm); 33 33 tcg_temp_free_i32(imm); ··· 48 48 if (tb_cflags(tb) & CF_USE_ICOUNT) { 49 49 /* Update the num_insn immediate parameter now that we know 50 50 * the actual insn count. */ 51 - tcg_set_insn_param(icount_start_insn_idx, 1, num_insns); 51 + tcg_set_insn_param(icount_start_insn, 1, num_insns); 52 52 } 53 53 54 54 gen_set_label(tcg_ctx->exitreq_label); 55 55 tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED); 56 - 57 - /* Terminate the linked list. */ 58 - tcg_ctx->gen_op_buf[tcg_ctx->gen_op_buf[0].prev].next = 0; 59 56 } 60 57 61 58 static inline void gen_io_start(void)
+5
include/qemu/queue.h
··· 425 425 (var); \ 426 426 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) 427 427 428 + #define QTAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev_var) \ 429 + for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ 430 + (var) && ((prev_var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)), 1); \ 431 + (var) = (prev_var)) 432 + 428 433 /* 429 434 * Tail queue access methods. 430 435 */
+1 -1
target/arm/translate-a64.c
··· 11290 11290 { 11291 11291 DisasContext *dc = container_of(dcbase, DisasContext, base); 11292 11292 11293 - dc->insn_start_idx = tcg_op_buf_count(); 11294 11293 tcg_gen_insn_start(dc->pc, 0, 0); 11294 + dc->insn_start = tcg_last_op(); 11295 11295 } 11296 11296 11297 11297 static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
+1 -1
target/arm/translate.c
··· 12096 12096 { 12097 12097 DisasContext *dc = container_of(dcbase, DisasContext, base); 12098 12098 12099 - dc->insn_start_idx = tcg_op_buf_count(); 12100 12099 tcg_gen_insn_start(dc->pc, 12101 12100 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1), 12102 12101 0); 12102 + dc->insn_start = tcg_last_op(); 12103 12103 } 12104 12104 12105 12105 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
+5 -5
target/arm/translate.h
··· 66 66 bool ss_same_el; 67 67 /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ 68 68 int c15_cpar; 69 - /* TCG op index of the current insn_start. */ 70 - int insn_start_idx; 69 + /* TCG op of the current insn_start. */ 70 + TCGOp *insn_start; 71 71 #define TMP_A64_MAX 16 72 72 int tmp_a64_count; 73 73 TCGv_i64 tmp_a64[TMP_A64_MAX]; ··· 117 117 syn >>= ARM_INSN_START_WORD2_SHIFT; 118 118 119 119 /* We check and clear insn_start_idx to catch multiple updates. */ 120 - assert(s->insn_start_idx != 0); 121 - tcg_set_insn_param(s->insn_start_idx, 2, syn); 122 - s->insn_start_idx = 0; 120 + assert(s->insn_start != NULL); 121 + tcg_set_insn_param(s->insn_start, 2, syn); 122 + s->insn_start = NULL; 123 123 } 124 124 125 125 /* is_jmp field values */
-2
target/cris/translate.c
··· 3297 3297 qemu_log("--------------\n"); 3298 3298 qemu_log("IN: %s\n", lookup_symbol(pc_start)); 3299 3299 log_target_disas(cs, pc_start, dc->pc - pc_start); 3300 - qemu_log("\nisize=%d osize=%d\n", 3301 - dc->pc - pc_start, tcg_op_buf_count()); 3302 3300 qemu_log_unlock(); 3303 3301 } 3304 3302 #endif
-2
target/lm32/translate.c
··· 1156 1156 qemu_log_lock(); 1157 1157 qemu_log("\n"); 1158 1158 log_target_disas(cs, pc_start, dc->pc - pc_start); 1159 - qemu_log("\nisize=%d osize=%d\n", 1160 - dc->pc - pc_start, tcg_op_buf_count()); 1161 1159 qemu_log_unlock(); 1162 1160 } 1163 1161 #endif
-4
target/microblaze/translate.c
··· 1808 1808 && qemu_log_in_addr_range(pc_start)) { 1809 1809 qemu_log_lock(); 1810 1810 qemu_log("--------------\n"); 1811 - #if DISAS_GNU 1812 1811 log_target_disas(cs, pc_start, dc->pc - pc_start); 1813 - #endif 1814 - qemu_log("\nisize=%d osize=%d\n", 1815 - dc->pc - pc_start, tcg_op_buf_count()); 1816 1812 qemu_log_unlock(); 1817 1813 } 1818 1814 #endif
+3 -13
tcg/optimize.c
··· 602 602 /* Propagate constants and copies, fold constant expressions. */ 603 603 void tcg_optimize(TCGContext *s) 604 604 { 605 - int oi, oi_next, nb_temps, nb_globals; 606 - TCGOp *prev_mb = NULL; 605 + int nb_temps, nb_globals; 606 + TCGOp *op, *op_next, *prev_mb = NULL; 607 607 struct tcg_temp_info *infos; 608 608 TCGTempSet temps_used; 609 609 ··· 617 617 bitmap_zero(temps_used.l, nb_temps); 618 618 infos = tcg_malloc(sizeof(struct tcg_temp_info) * nb_temps); 619 619 620 - for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) { 620 + QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { 621 621 tcg_target_ulong mask, partmask, affected; 622 622 int nb_oargs, nb_iargs, i; 623 623 TCGArg tmp; 624 - 625 - TCGOp * const op = &s->gen_op_buf[oi]; 626 624 TCGOpcode opc = op->opc; 627 625 const TCGOpDef *def = &tcg_op_defs[opc]; 628 - 629 - oi_next = op->next; 630 626 631 627 /* Count the arguments, and initialize the temps that are 632 628 going to be used */ ··· 1261 1257 rh = op->args[1]; 1262 1258 tcg_opt_gen_movi(s, op, rl, (int32_t)a); 1263 1259 tcg_opt_gen_movi(s, op2, rh, (int32_t)(a >> 32)); 1264 - 1265 - /* We've done all we need to do with the movi. Skip it. */ 1266 - oi_next = op2->next; 1267 1260 break; 1268 1261 } 1269 1262 goto do_default; ··· 1280 1273 rh = op->args[1]; 1281 1274 tcg_opt_gen_movi(s, op, rl, (int32_t)r); 1282 1275 tcg_opt_gen_movi(s, op2, rh, (int32_t)(r >> 32)); 1283 - 1284 - /* We've done all we need to do with the movi. Skip it. */ 1285 - oi_next = op2->next; 1286 1276 break; 1287 1277 } 1288 1278 goto do_default;
-24
tcg/tcg-op.c
··· 42 42 #define TCGV_HIGH TCGV_HIGH_link_error 43 43 #endif 44 44 45 - /* Note that this is optimized for sequential allocation during translate. 46 - Up to and including filling in the forward link immediately. We'll do 47 - proper termination of the end of the list after we finish translation. */ 48 - 49 - static inline TCGOp *tcg_emit_op(TCGOpcode opc) 50 - { 51 - TCGContext *ctx = tcg_ctx; 52 - int oi = ctx->gen_next_op_idx; 53 - int ni = oi + 1; 54 - int pi = oi - 1; 55 - TCGOp *op = &ctx->gen_op_buf[oi]; 56 - 57 - tcg_debug_assert(oi < OPC_BUF_SIZE); 58 - ctx->gen_op_buf[0].prev = oi; 59 - ctx->gen_next_op_idx = ni; 60 - 61 - memset(op, 0, offsetof(TCGOp, args)); 62 - op->opc = opc; 63 - op->prev = pi; 64 - op->next = ni; 65 - 66 - return op; 67 - } 68 - 69 45 void tcg_gen_op1(TCGOpcode opc, TCGArg a1) 70 46 { 71 47 TCGOp *op = tcg_emit_op(opc);
+45 -78
tcg/tcg.c
··· 862 862 s->goto_tb_issue_mask = 0; 863 863 #endif 864 864 865 - s->gen_op_buf[0].next = 1; 866 - s->gen_op_buf[0].prev = 0; 867 - s->gen_next_op_idx = 1; 865 + QTAILQ_INIT(&s->ops); 866 + QTAILQ_INIT(&s->free_ops); 868 867 } 869 868 870 869 static inline TCGTemp *tcg_temp_alloc(TCGContext *s) ··· 1339 1338 and endian swap in tcg_reg_alloc_call(). */ 1340 1339 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) 1341 1340 { 1342 - TCGContext *s = tcg_ctx; 1343 1341 int i, real_args, nb_rets, pi; 1344 1342 unsigned sizemask, flags; 1345 1343 TCGHelperInfo *info; ··· 1395 1393 } 1396 1394 #endif /* TCG_TARGET_EXTEND_ARGS */ 1397 1395 1398 - i = s->gen_next_op_idx; 1399 - tcg_debug_assert(i < OPC_BUF_SIZE); 1400 - s->gen_op_buf[0].prev = i; 1401 - s->gen_next_op_idx = i + 1; 1402 - op = &s->gen_op_buf[i]; 1403 - 1404 - /* Set links for sequential allocation during translation. */ 1405 - memset(op, 0, offsetof(TCGOp, args)); 1406 - op->opc = INDEX_op_call; 1407 - op->prev = i - 1; 1408 - op->next = i + 1; 1396 + op = tcg_emit_op(INDEX_op_call); 1409 1397 1410 1398 pi = 0; 1411 1399 if (ret != NULL) { ··· 1622 1610 { 1623 1611 char buf[128]; 1624 1612 TCGOp *op; 1625 - int oi; 1626 1613 1627 - for (oi = s->gen_op_buf[0].next; oi != 0; oi = op->next) { 1614 + QTAILQ_FOREACH(op, &s->ops, link) { 1628 1615 int i, k, nb_oargs, nb_iargs, nb_cargs; 1629 1616 const TCGOpDef *def; 1630 1617 TCGOpcode c; 1631 1618 int col = 0; 1632 1619 1633 - op = &s->gen_op_buf[oi]; 1634 1620 c = op->opc; 1635 1621 def = &tcg_op_defs[c]; 1636 1622 1637 1623 if (c == INDEX_op_insn_start) { 1638 - col += qemu_log("%s ----", oi != s->gen_op_buf[0].next ? "\n" : ""); 1624 + col += qemu_log("\n ----"); 1639 1625 1640 1626 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { 1641 1627 target_ulong a; ··· 1898 1884 1899 1885 void tcg_op_remove(TCGContext *s, TCGOp *op) 1900 1886 { 1901 - int next = op->next; 1902 - int prev = op->prev; 1903 - 1904 - /* We should never attempt to remove the list terminator. */ 1905 - tcg_debug_assert(op != &s->gen_op_buf[0]); 1906 - 1907 - s->gen_op_buf[next].prev = prev; 1908 - s->gen_op_buf[prev].next = next; 1909 - 1910 - memset(op, 0, sizeof(*op)); 1887 + QTAILQ_REMOVE(&s->ops, op, link); 1888 + QTAILQ_INSERT_TAIL(&s->free_ops, op, link); 1911 1889 1912 1890 #ifdef CONFIG_PROFILER 1913 1891 atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1); 1914 1892 #endif 1915 1893 } 1916 1894 1917 - TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, 1918 - TCGOpcode opc, int nargs) 1895 + static TCGOp *tcg_op_alloc(TCGOpcode opc) 1919 1896 { 1920 - int oi = s->gen_next_op_idx; 1921 - int prev = old_op->prev; 1922 - int next = old_op - s->gen_op_buf; 1923 - TCGOp *new_op; 1897 + TCGContext *s = tcg_ctx; 1898 + TCGOp *op; 1899 + 1900 + if (likely(QTAILQ_EMPTY(&s->free_ops))) { 1901 + op = tcg_malloc(sizeof(TCGOp)); 1902 + } else { 1903 + op = QTAILQ_FIRST(&s->free_ops); 1904 + QTAILQ_REMOVE(&s->free_ops, op, link); 1905 + } 1906 + memset(op, 0, offsetof(TCGOp, link)); 1907 + op->opc = opc; 1924 1908 1925 - tcg_debug_assert(oi < OPC_BUF_SIZE); 1926 - s->gen_next_op_idx = oi + 1; 1909 + return op; 1910 + } 1927 1911 1928 - new_op = &s->gen_op_buf[oi]; 1929 - *new_op = (TCGOp){ 1930 - .opc = opc, 1931 - .prev = prev, 1932 - .next = next 1933 - }; 1934 - s->gen_op_buf[prev].next = oi; 1935 - old_op->prev = oi; 1912 + TCGOp *tcg_emit_op(TCGOpcode opc) 1913 + { 1914 + TCGOp *op = tcg_op_alloc(opc); 1915 + QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link); 1916 + return op; 1917 + } 1936 1918 1919 + TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, 1920 + TCGOpcode opc, int nargs) 1921 + { 1922 + TCGOp *new_op = tcg_op_alloc(opc); 1923 + QTAILQ_INSERT_BEFORE(old_op, new_op, link); 1937 1924 return new_op; 1938 1925 } 1939 1926 1940 1927 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, 1941 1928 TCGOpcode opc, int nargs) 1942 1929 { 1943 - int oi = s->gen_next_op_idx; 1944 - int prev = old_op - s->gen_op_buf; 1945 - int next = old_op->next; 1946 - TCGOp *new_op; 1947 - 1948 - tcg_debug_assert(oi < OPC_BUF_SIZE); 1949 - s->gen_next_op_idx = oi + 1; 1950 - 1951 - new_op = &s->gen_op_buf[oi]; 1952 - *new_op = (TCGOp){ 1953 - .opc = opc, 1954 - .prev = prev, 1955 - .next = next 1956 - }; 1957 - s->gen_op_buf[next].prev = oi; 1958 - old_op->next = oi; 1959 - 1930 + TCGOp *new_op = tcg_op_alloc(opc); 1931 + QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link); 1960 1932 return new_op; 1961 1933 } 1962 1934 ··· 2006 1978 static void liveness_pass_1(TCGContext *s) 2007 1979 { 2008 1980 int nb_globals = s->nb_globals; 2009 - int oi, oi_prev; 1981 + TCGOp *op, *op_prev; 2010 1982 2011 1983 tcg_la_func_end(s); 2012 1984 2013 - for (oi = s->gen_op_buf[0].prev; oi != 0; oi = oi_prev) { 1985 + QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, TCGOpHead, link, op_prev) { 2014 1986 int i, nb_iargs, nb_oargs; 2015 1987 TCGOpcode opc_new, opc_new2; 2016 1988 bool have_opc_new2; 2017 1989 TCGLifeData arg_life = 0; 2018 1990 TCGTemp *arg_ts; 2019 - 2020 - TCGOp * const op = &s->gen_op_buf[oi]; 2021 1991 TCGOpcode opc = op->opc; 2022 1992 const TCGOpDef *def = &tcg_op_defs[opc]; 2023 - 2024 - oi_prev = op->prev; 2025 1993 2026 1994 switch (opc) { 2027 1995 case INDEX_op_call: ··· 2233 2201 static bool liveness_pass_2(TCGContext *s) 2234 2202 { 2235 2203 int nb_globals = s->nb_globals; 2236 - int nb_temps, i, oi, oi_next; 2204 + int nb_temps, i; 2237 2205 bool changes = false; 2206 + TCGOp *op, *op_next; 2238 2207 2239 2208 /* Create a temporary for each indirect global. */ 2240 2209 for (i = 0; i < nb_globals; ++i) { ··· 2256 2225 its->state = TS_DEAD; 2257 2226 } 2258 2227 2259 - for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) { 2260 - TCGOp *op = &s->gen_op_buf[oi]; 2228 + QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { 2261 2229 TCGOpcode opc = op->opc; 2262 2230 const TCGOpDef *def = &tcg_op_defs[opc]; 2263 2231 TCGLifeData arg_life = op->life; 2264 2232 int nb_iargs, nb_oargs, call_flags; 2265 2233 TCGTemp *arg_ts, *dir_ts; 2266 - 2267 - oi_next = op->next; 2268 2234 2269 2235 if (opc == INDEX_op_call) { 2270 2236 nb_oargs = op->callo; ··· 3168 3134 #ifdef CONFIG_PROFILER 3169 3135 TCGProfile *prof = &s->prof; 3170 3136 #endif 3171 - int i, oi, oi_next, num_insns; 3137 + int i, num_insns; 3138 + TCGOp *op; 3172 3139 3173 3140 #ifdef CONFIG_PROFILER 3174 3141 { 3175 3142 int n; 3176 3143 3177 - n = s->gen_op_buf[0].prev + 1; 3144 + QTAILQ_FOREACH(op, &s->ops, link) { 3145 + n++; 3146 + } 3178 3147 atomic_set(&prof->op_count, prof->op_count + n); 3179 3148 if (n > prof->op_count_max) { 3180 3149 atomic_set(&prof->op_count_max, n); ··· 3260 3229 #endif 3261 3230 3262 3231 num_insns = -1; 3263 - for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) { 3264 - TCGOp * const op = &s->gen_op_buf[oi]; 3232 + QTAILQ_FOREACH(op, &s->ops, link) { 3265 3233 TCGOpcode opc = op->opc; 3266 3234 3267 - oi_next = op->next; 3268 3235 #ifdef CONFIG_PROFILER 3269 3236 atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1); 3270 3237 #endif
+14 -21
tcg/tcg.h
··· 29 29 #include "cpu.h" 30 30 #include "exec/tb-context.h" 31 31 #include "qemu/bitops.h" 32 + #include "qemu/queue.h" 32 33 #include "tcg-mo.h" 33 34 #include "tcg-target.h" 34 35 ··· 48 49 * and up to 4 + N parameters on 64-bit archs 49 50 * (N = number of input arguments + output arguments). */ 50 51 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) 51 - #define OPC_BUF_SIZE 640 52 - #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) 53 52 54 53 #define CPU_TEMP_BUF_NLONGS 128 55 54 ··· 572 571 unsigned callo : 2; /* 14 */ 573 572 unsigned : 2; /* 16 */ 574 573 575 - /* Index of the prev/next op, or 0 for the end of the list. */ 576 - unsigned prev : 16; /* 32 */ 577 - unsigned next : 16; /* 48 */ 574 + /* Lifetime data of the operands. */ 575 + unsigned life : 16; /* 32 */ 578 576 579 - /* Lifetime data of the operands. */ 580 - unsigned life : 16; /* 64 */ 577 + /* Next and previous opcodes. */ 578 + QTAILQ_ENTRY(TCGOp) link; 581 579 582 580 /* Arguments for the opcode. */ 583 581 TCGArg args[MAX_OPC_PARAM]; 584 582 } TCGOp; 585 583 586 - /* Make sure that we don't expand the structure without noticing. */ 587 - QEMU_BUILD_BUG_ON(sizeof(TCGOp) != 8 + sizeof(TCGArg) * MAX_OPC_PARAM); 588 - 589 584 /* Make sure operands fit in the bitfields above. */ 590 585 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8)); 591 - QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 16)); 592 586 593 587 typedef struct TCGProfile { 594 588 int64_t tb_count1; ··· 642 636 int goto_tb_issue_mask; 643 637 #endif 644 638 645 - int gen_next_op_idx; 646 - 647 639 /* Code generation. Note that we specifically do not use tcg_insn_unit 648 640 here, because there's too much arithmetic throughout that relies 649 641 on addition and subtraction working on bytes. Rely on the GCC ··· 674 666 TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; 675 667 TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ 676 668 669 + QTAILQ_HEAD(TCGOpHead, TCGOp) ops, free_ops; 670 + 677 671 /* Tells which temporary holds a given register. 678 672 It does not take into account fixed registers */ 679 673 TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; 680 - 681 - TCGOp gen_op_buf[OPC_BUF_SIZE]; 682 674 683 675 uint16_t gen_insn_end_off[TCG_MAX_INSNS]; 684 676 target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; ··· 769 761 } 770 762 #endif 771 763 772 - static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v) 764 + static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v) 773 765 { 774 - tcg_ctx->gen_op_buf[op_idx].args[arg] = v; 766 + op->args[arg] = v; 775 767 } 776 768 777 - /* The number of opcodes emitted so far. */ 778 - static inline int tcg_op_buf_count(void) 769 + /* The last op that was emitted. */ 770 + static inline TCGOp *tcg_last_op(void) 779 771 { 780 - return tcg_ctx->gen_next_op_idx; 772 + return QTAILQ_LAST(&tcg_ctx->ops, TCGOpHead); 781 773 } 782 774 783 775 /* Test for whether to terminate the TB for using too many opcodes. */ 784 776 static inline bool tcg_op_buf_full(void) 785 777 { 786 - return tcg_op_buf_count() >= OPC_MAX_SIZE; 778 + return false; 787 779 } 788 780 789 781 /* pool based memory allocation */ ··· 967 959 968 960 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args); 969 961 962 + TCGOp *tcg_emit_op(TCGOpcode opc); 970 963 void tcg_op_remove(TCGContext *s, TCGOp *op); 971 964 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg); 972 965 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);