qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/philmd-gitlab/tags/target_renesas_rx-20200320' into staging

Introduce the architectural part of the Renesas RX
architecture emulation, developed by Yoshinori Sato.

CI jobs results:
https://gitlab.com/philmd/qemu/pipelines/127886344
https://travis-ci.org/github/philmd/qemu/builds/664579420

# gpg: Signature made Fri 20 Mar 2020 10:27:32 GMT
# gpg: using RSA key FAABE75E12917221DCFD6BB2E3E32C2CDEADC0DE
# gpg: Good signature from "Philippe Mathieu-Daudé (F4BUG) <f4bug@amsat.org>" [full]
# Primary key fingerprint: FAAB E75E 1291 7221 DCFD 6BB2 E3E3 2C2C DEAD C0DE

* remotes/philmd-gitlab/tags/target_renesas_rx-20200320:
Add rx-softmmu
target/rx: Dump bytes for each insn during disassembly
target/rx: Collect all bytes during disassembly
target/rx: Emit all disassembly in one prt()
target/rx: Use prt_ldmi for XCHG_mr disassembly
target/rx: Replace operand with prt_ldmi in disassembler
target/rx: Disassemble rx_index_addr into a string
target/rx: RX disassembler
target/rx: CPU definitions
target/rx: TCG helpers
target/rx: TCG translation
MAINTAINERS: Add entry for the Renesas RX architecture
hw/registerfields.h: Add 8bit and 16bit register macros

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

+5897 -2
+5
MAINTAINERS
··· 277 277 F: linux-user/host/riscv32/ 278 278 F: linux-user/host/riscv64/ 279 279 280 + RENESAS RX CPUs 281 + M: Yoshinori Sato <ysato@users.sourceforge.jp> 282 + S: Maintained 283 + F: target/rx/ 284 + 280 285 S390 TCG CPUs 281 286 M: Richard Henderson <rth@twiddle.net> 282 287 M: David Hildenbrand <david@redhat.com>
+2
arch_init.c
··· 77 77 #define QEMU_ARCH QEMU_ARCH_PPC 78 78 #elif defined(TARGET_RISCV) 79 79 #define QEMU_ARCH QEMU_ARCH_RISCV 80 + #elif defined(TARGET_RX) 81 + #define QEMU_ARCH QEMU_ARCH_RX 80 82 #elif defined(TARGET_S390X) 81 83 #define QEMU_ARCH QEMU_ARCH_S390X 82 84 #elif defined(TARGET_SH4)
+10 -1
configure
··· 4227 4227 fdt_required=no 4228 4228 for target in $target_list; do 4229 4229 case $target in 4230 - aarch64*-softmmu|arm*-softmmu|ppc*-softmmu|microblaze*-softmmu|mips64el-softmmu|riscv*-softmmu) 4230 + aarch64*-softmmu|arm*-softmmu|ppc*-softmmu|microblaze*-softmmu|mips64el-softmmu|riscv*-softmmu|rx-softmmu) 4231 4231 fdt_required=yes 4232 4232 ;; 4233 4233 esac ··· 7912 7912 mttcg=yes 7913 7913 gdb_xml_files="riscv-64bit-cpu.xml riscv-32bit-fpu.xml riscv-64bit-fpu.xml riscv-64bit-csr.xml riscv-64bit-virtual.xml" 7914 7914 ;; 7915 + rx) 7916 + TARGET_ARCH=rx 7917 + bflt="yes" 7918 + target_compiler=$cross_cc_rx 7919 + gdb_xml_files="rx-core.xml" 7920 + ;; 7915 7921 sh4|sh4eb) 7916 7922 TARGET_ARCH=sh4 7917 7923 bflt="yes" ··· 8092 8098 ;; 8093 8099 riscv*) 8094 8100 disas_config "RISCV" 8101 + ;; 8102 + rx) 8103 + disas_config "RX" 8095 8104 ;; 8096 8105 s390*) 8097 8106 disas_config "S390"
+2
default-configs/rx-softmmu.mak
··· 1 + # Default configuration for rx-softmmu 2 +
+70
gdb-xml/rx-core.xml
··· 1 + <?xml version="1.0"?> 2 + <!-- Copyright (C) 2019 Free Software Foundation, Inc. 3 + 4 + Copying and distribution of this file, with or without modification, 5 + are permitted in any medium without royalty provided the copyright 6 + notice and this notice are preserved. --> 7 + 8 + <!DOCTYPE feature SYSTEM "gdb-target.dtd"> 9 + <feature name="org.gnu.gdb.rx.core"> 10 + <reg name="r0" bitsize="32" type="data_ptr"/> 11 + <reg name="r1" bitsize="32" type="uint32"/> 12 + <reg name="r2" bitsize="32" type="uint32"/> 13 + <reg name="r3" bitsize="32" type="uint32"/> 14 + <reg name="r4" bitsize="32" type="uint32"/> 15 + <reg name="r5" bitsize="32" type="uint32"/> 16 + <reg name="r6" bitsize="32" type="uint32"/> 17 + <reg name="r7" bitsize="32" type="uint32"/> 18 + <reg name="r8" bitsize="32" type="uint32"/> 19 + <reg name="r9" bitsize="32" type="uint32"/> 20 + <reg name="r10" bitsize="32" type="uint32"/> 21 + <reg name="r11" bitsize="32" type="uint32"/> 22 + <reg name="r12" bitsize="32" type="uint32"/> 23 + <reg name="r13" bitsize="32" type="uint32"/> 24 + <reg name="r14" bitsize="32" type="uint32"/> 25 + <reg name="r15" bitsize="32" type="uint32"/> 26 + 27 + <flags id="psw_flags" size="4"> 28 + <field name="C" start="0" end="0"/> 29 + <field name="Z" start="1" end="1"/> 30 + <field name="S" start="2" end="2"/> 31 + <field name="O" start="3" end="3"/> 32 + <field name="I" start="16" end="16"/> 33 + <field name="U" start="17" end="17"/> 34 + <field name="PM" start="20" end="20"/> 35 + <field name="IPL" start="24" end="27"/> 36 + </flags> 37 + 38 + <flags id="fpsw_flags" size="4"> 39 + <field name="RM" start="0" end="1"/> 40 + <field name="CV" start="2" end="2"/> 41 + <field name="CO" start="3" end="3"/> 42 + <field name="CZ" start="4" end="4"/> 43 + <field name="CU" start="5" end="5"/> 44 + <field name="CX" start="6" end="6"/> 45 + <field name="CE" start="7" end="7"/> 46 + <field name="DN" start="8" end="8"/> 47 + <field name="EV" start="10" end="10"/> 48 + <field name="EO" start="11" end="11"/> 49 + <field name="EZ" start="12" end="12"/> 50 + <field name="EU" start="13" end="13"/> 51 + <field name="EX" start="14" end="14"/> 52 + <field name="FV" start="26" end="26"/> 53 + <field name="FO" start="27" end="27"/> 54 + <field name="FZ" start="28" end="28"/> 55 + <field name="FU" start="29" end="29"/> 56 + <field name="FX" start="30" end="30"/> 57 + <field name="FS" start="31" end="31"/> 58 + </flags> 59 + 60 + <reg name="usp" bitsize="32" type="data_ptr"/> 61 + <reg name="isp" bitsize="32" type="data_ptr"/> 62 + <reg name="psw" bitsize="32" type="psw_flags"/> 63 + <reg name="pc" bitsize="32" type="code_ptr"/> 64 + <reg name="intb" bitsize="32" type="data_ptr"/> 65 + <reg name="bpsw" bitsize="32" type="psw_flags"/> 66 + <reg name="bpc" bitsize="32" type="code_ptr"/> 67 + <reg name="fintv" bitsize="32" type="code_ptr"/> 68 + <reg name="fpsw" bitsize="32" type="fpsw_flags"/> 69 + <reg name="acc" bitsize="64" type="uint64"/> 70 + </feature>
+5
include/disas/dis-asm.h
··· 226 226 #define bfd_mach_nios2r2 2 227 227 bfd_arch_lm32, /* Lattice Mico32 */ 228 228 #define bfd_mach_lm32 1 229 + bfd_arch_rx, /* Renesas RX */ 230 + #define bfd_mach_rx 0x75 231 + #define bfd_mach_rx_v2 0x76 232 + #define bfd_mach_rx_v3 0x77 229 233 bfd_arch_last 230 234 }; 231 235 #define bfd_mach_s390_31 31 ··· 436 440 int print_insn_xtensa (bfd_vma, disassemble_info*); 437 441 int print_insn_riscv32 (bfd_vma, disassemble_info*); 438 442 int print_insn_riscv64 (bfd_vma, disassemble_info*); 443 + int print_insn_rx(bfd_vma, disassemble_info *); 439 444 440 445 #if 0 441 446 /* Fetch the disassembler for a given BFD, if that support is available. */
+1
include/exec/poison.h
··· 26 26 #pragma GCC poison TARGET_PPC 27 27 #pragma GCC poison TARGET_PPC64 28 28 #pragma GCC poison TARGET_ABI32 29 + #pragma GCC poison TARGET_RX 29 30 #pragma GCC poison TARGET_S390X 30 31 #pragma GCC poison TARGET_SH4 31 32 #pragma GCC poison TARGET_SPARC
+30
include/hw/registerfields.h
··· 22 22 enum { A_ ## reg = (addr) }; \ 23 23 enum { R_ ## reg = (addr) / 4 }; 24 24 25 + #define REG8(reg, addr) \ 26 + enum { A_ ## reg = (addr) }; \ 27 + enum { R_ ## reg = (addr) }; 28 + 29 + #define REG16(reg, addr) \ 30 + enum { A_ ## reg = (addr) }; \ 31 + enum { R_ ## reg = (addr) / 2 }; 32 + 25 33 /* Define SHIFT, LENGTH and MASK constants for a field within a register */ 26 34 27 35 /* This macro will define R_FOO_BAR_MASK, R_FOO_BAR_SHIFT and R_FOO_BAR_LENGTH ··· 34 42 MAKE_64BIT_MASK(shift, length)}; 35 43 36 44 /* Extract a field from a register */ 45 + #define FIELD_EX8(storage, reg, field) \ 46 + extract8((storage), R_ ## reg ## _ ## field ## _SHIFT, \ 47 + R_ ## reg ## _ ## field ## _LENGTH) 48 + #define FIELD_EX16(storage, reg, field) \ 49 + extract16((storage), R_ ## reg ## _ ## field ## _SHIFT, \ 50 + R_ ## reg ## _ ## field ## _LENGTH) 37 51 #define FIELD_EX32(storage, reg, field) \ 38 52 extract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ 39 53 R_ ## reg ## _ ## field ## _LENGTH) ··· 49 63 * Assigning values larger then the target field will result in 50 64 * compilation warnings. 51 65 */ 66 + #define FIELD_DP8(storage, reg, field, val) ({ \ 67 + struct { \ 68 + unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ 69 + } v = { .v = val }; \ 70 + uint8_t d; \ 71 + d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ 72 + R_ ## reg ## _ ## field ## _LENGTH, v.v); \ 73 + d; }) 74 + #define FIELD_DP16(storage, reg, field, val) ({ \ 75 + struct { \ 76 + unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \ 77 + } v = { .v = val }; \ 78 + uint16_t d; \ 79 + d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \ 80 + R_ ## reg ## _ ## field ## _LENGTH, v.v); \ 81 + d; }) 52 82 #define FIELD_DP32(storage, reg, field, val) ({ \ 53 83 struct { \ 54 84 unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
+1
include/sysemu/arch_init.h
··· 24 24 QEMU_ARCH_NIOS2 = (1 << 17), 25 25 QEMU_ARCH_HPPA = (1 << 18), 26 26 QEMU_ARCH_RISCV = (1 << 19), 27 + QEMU_ARCH_RX = (1 << 20), 27 28 28 29 QEMU_ARCH_NONE = (1 << 31), 29 30 };
+3 -1
qapi/machine.json
··· 16 16 # individual target constants are not documented here, for the time 17 17 # being. 18 18 # 19 + # @rx: since 5.0 20 + # 19 21 # Notes: The resulting QMP strings can be appended to the "qemu-system-" 20 22 # prefix to produce the corresponding QEMU executable name. This 21 23 # is true even for "qemu-system-x86_64". ··· 26 28 'data' : [ 'aarch64', 'alpha', 'arm', 'cris', 'hppa', 'i386', 'lm32', 27 29 'm68k', 'microblaze', 'microblazeel', 'mips', 'mips64', 28 30 'mips64el', 'mipsel', 'moxie', 'nios2', 'or1k', 'ppc', 29 - 'ppc64', 'riscv32', 'riscv64', 's390x', 'sh4', 31 + 'ppc64', 'riscv32', 'riscv64', 'rx', 's390x', 'sh4', 30 32 'sh4eb', 'sparc', 'sparc64', 'tricore', 'unicore32', 31 33 'x86_64', 'xtensa', 'xtensaeb' ] } 32 34
+11
target/rx/Makefile.objs
··· 1 + obj-y += translate.o op_helper.o helper.o cpu.o gdbstub.o disas.o 2 + 3 + DECODETREE = $(SRC_PATH)/scripts/decodetree.py 4 + 5 + target/rx/decode.inc.c: \ 6 + $(SRC_PATH)/target/rx/insns.decode $(DECODETREE) 7 + $(call quiet-command,\ 8 + $(PYTHON) $(DECODETREE) --varinsnwidth 32 -o $@ $<, "GEN", $(TARGET_DIR)$@) 9 + 10 + target/rx/translate.o: target/rx/decode.inc.c 11 + target/rx/disas.o: target/rx/decode.inc.c
+30
target/rx/cpu-param.h
··· 1 + /* 2 + * RX cpu parameters 3 + * 4 + * Copyright (c) 2019 Yoshinori Sato 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2 or later, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #ifndef RX_CPU_PARAM_H 20 + #define RX_CPU_PARAM_H 21 + 22 + #define TARGET_LONG_BITS 32 23 + #define TARGET_PAGE_BITS 12 24 + 25 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 26 + #define TARGET_VIRT_ADDR_SPACE_BITS 32 27 + 28 + #define NB_MMU_MODES 1 29 + 30 + #endif
+53
target/rx/cpu-qom.h
··· 1 + /* 2 + * RX CPU 3 + * 4 + * Copyright (c) 2019 Yoshinori Sato 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2 or later, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #ifndef RX_CPU_QOM_H 20 + #define RX_CPU_QOM_H 21 + 22 + #include "hw/core/cpu.h" 23 + 24 + #define TYPE_RX_CPU "rx-cpu" 25 + 26 + #define TYPE_RX62N_CPU RX_CPU_TYPE_NAME("rx62n") 27 + 28 + #define RXCPU_CLASS(klass) \ 29 + OBJECT_CLASS_CHECK(RXCPUClass, (klass), TYPE_RX_CPU) 30 + #define RXCPU(obj) \ 31 + OBJECT_CHECK(RXCPU, (obj), TYPE_RX_CPU) 32 + #define RXCPU_GET_CLASS(obj) \ 33 + OBJECT_GET_CLASS(RXCPUClass, (obj), TYPE_RX_CPU) 34 + 35 + /* 36 + * RXCPUClass: 37 + * @parent_realize: The parent class' realize handler. 38 + * @parent_reset: The parent class' reset handler. 39 + * 40 + * A RX CPU model. 41 + */ 42 + typedef struct RXCPUClass { 43 + /*< private >*/ 44 + CPUClass parent_class; 45 + /*< public >*/ 46 + 47 + DeviceRealize parent_realize; 48 + DeviceReset parent_reset; 49 + } RXCPUClass; 50 + 51 + #define CPUArchState struct CPURXState 52 + 53 + #endif
+225
target/rx/cpu.c
··· 1 + /* 2 + * QEMU RX CPU 3 + * 4 + * Copyright (c) 2019 Yoshinori Sato 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2 or later, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include "qemu/osdep.h" 20 + #include "qemu/qemu-print.h" 21 + #include "qapi/error.h" 22 + #include "cpu.h" 23 + #include "qemu-common.h" 24 + #include "migration/vmstate.h" 25 + #include "exec/exec-all.h" 26 + #include "hw/loader.h" 27 + #include "fpu/softfloat.h" 28 + 29 + static void rx_cpu_set_pc(CPUState *cs, vaddr value) 30 + { 31 + RXCPU *cpu = RXCPU(cs); 32 + 33 + cpu->env.pc = value; 34 + } 35 + 36 + static void rx_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 37 + { 38 + RXCPU *cpu = RXCPU(cs); 39 + 40 + cpu->env.pc = tb->pc; 41 + } 42 + 43 + static bool rx_cpu_has_work(CPUState *cs) 44 + { 45 + return cs->interrupt_request & 46 + (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR); 47 + } 48 + 49 + static void rx_cpu_reset(DeviceState *dev) 50 + { 51 + RXCPU *cpu = RXCPU(dev); 52 + RXCPUClass *rcc = RXCPU_GET_CLASS(cpu); 53 + CPURXState *env = &cpu->env; 54 + uint32_t *resetvec; 55 + 56 + rcc->parent_reset(dev); 57 + 58 + memset(env, 0, offsetof(CPURXState, end_reset_fields)); 59 + 60 + resetvec = rom_ptr(0xfffffffc, 4); 61 + if (resetvec) { 62 + /* In the case of kernel, it is ignored because it is not set. */ 63 + env->pc = ldl_p(resetvec); 64 + } 65 + rx_cpu_unpack_psw(env, 0, 1); 66 + env->regs[0] = env->isp = env->usp = 0; 67 + env->fpsw = 0; 68 + set_flush_to_zero(1, &env->fp_status); 69 + set_flush_inputs_to_zero(1, &env->fp_status); 70 + } 71 + 72 + static void rx_cpu_list_entry(gpointer data, gpointer user_data) 73 + { 74 + ObjectClass *oc = data; 75 + 76 + qemu_printf(" %s\n", object_class_get_name(oc)); 77 + } 78 + 79 + void rx_cpu_list(void) 80 + { 81 + GSList *list; 82 + list = object_class_get_list_sorted(TYPE_RX_CPU, false); 83 + qemu_printf("Available CPUs:\n"); 84 + g_slist_foreach(list, rx_cpu_list_entry, NULL); 85 + g_slist_free(list); 86 + } 87 + 88 + static ObjectClass *rx_cpu_class_by_name(const char *cpu_model) 89 + { 90 + ObjectClass *oc; 91 + char *typename; 92 + 93 + oc = object_class_by_name(cpu_model); 94 + if (oc != NULL && object_class_dynamic_cast(oc, TYPE_RX_CPU) != NULL && 95 + !object_class_is_abstract(oc)) { 96 + return oc; 97 + } 98 + typename = g_strdup_printf(RX_CPU_TYPE_NAME("%s"), cpu_model); 99 + oc = object_class_by_name(typename); 100 + g_free(typename); 101 + if (oc != NULL && object_class_is_abstract(oc)) { 102 + oc = NULL; 103 + } 104 + 105 + return oc; 106 + } 107 + 108 + static void rx_cpu_realize(DeviceState *dev, Error **errp) 109 + { 110 + CPUState *cs = CPU(dev); 111 + RXCPUClass *rcc = RXCPU_GET_CLASS(dev); 112 + Error *local_err = NULL; 113 + 114 + cpu_exec_realizefn(cs, &local_err); 115 + if (local_err != NULL) { 116 + error_propagate(errp, local_err); 117 + return; 118 + } 119 + 120 + qemu_init_vcpu(cs); 121 + cpu_reset(cs); 122 + 123 + rcc->parent_realize(dev, errp); 124 + } 125 + 126 + static void rx_cpu_set_irq(void *opaque, int no, int request) 127 + { 128 + RXCPU *cpu = opaque; 129 + CPUState *cs = CPU(cpu); 130 + int irq = request & 0xff; 131 + 132 + static const int mask[] = { 133 + [RX_CPU_IRQ] = CPU_INTERRUPT_HARD, 134 + [RX_CPU_FIR] = CPU_INTERRUPT_FIR, 135 + }; 136 + if (irq) { 137 + cpu->env.req_irq = irq; 138 + cpu->env.req_ipl = (request >> 8) & 0x0f; 139 + cpu_interrupt(cs, mask[no]); 140 + } else { 141 + cpu_reset_interrupt(cs, mask[no]); 142 + } 143 + } 144 + 145 + static void rx_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) 146 + { 147 + info->mach = bfd_mach_rx; 148 + info->print_insn = print_insn_rx; 149 + } 150 + 151 + static bool rx_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, 152 + MMUAccessType access_type, int mmu_idx, 153 + bool probe, uintptr_t retaddr) 154 + { 155 + uint32_t address, physical, prot; 156 + 157 + /* Linear mapping */ 158 + address = physical = addr & TARGET_PAGE_MASK; 159 + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 160 + tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE); 161 + return true; 162 + } 163 + 164 + static void rx_cpu_init(Object *obj) 165 + { 166 + CPUState *cs = CPU(obj); 167 + RXCPU *cpu = RXCPU(obj); 168 + CPURXState *env = &cpu->env; 169 + 170 + cpu_set_cpustate_pointers(cpu); 171 + cs->env_ptr = env; 172 + qdev_init_gpio_in(DEVICE(cpu), rx_cpu_set_irq, 2); 173 + } 174 + 175 + static void rx_cpu_class_init(ObjectClass *klass, void *data) 176 + { 177 + DeviceClass *dc = DEVICE_CLASS(klass); 178 + CPUClass *cc = CPU_CLASS(klass); 179 + RXCPUClass *rcc = RXCPU_CLASS(klass); 180 + 181 + device_class_set_parent_realize(dc, rx_cpu_realize, 182 + &rcc->parent_realize); 183 + device_class_set_parent_reset(dc, rx_cpu_reset, 184 + &rcc->parent_reset); 185 + 186 + cc->class_by_name = rx_cpu_class_by_name; 187 + cc->has_work = rx_cpu_has_work; 188 + cc->do_interrupt = rx_cpu_do_interrupt; 189 + cc->cpu_exec_interrupt = rx_cpu_exec_interrupt; 190 + cc->dump_state = rx_cpu_dump_state; 191 + cc->set_pc = rx_cpu_set_pc; 192 + cc->synchronize_from_tb = rx_cpu_synchronize_from_tb; 193 + cc->gdb_read_register = rx_cpu_gdb_read_register; 194 + cc->gdb_write_register = rx_cpu_gdb_write_register; 195 + cc->get_phys_page_debug = rx_cpu_get_phys_page_debug; 196 + cc->disas_set_info = rx_cpu_disas_set_info; 197 + cc->tcg_initialize = rx_translate_init; 198 + cc->tlb_fill = rx_cpu_tlb_fill; 199 + 200 + cc->gdb_num_core_regs = 26; 201 + cc->gdb_core_xml_file = "rx-core.xml"; 202 + } 203 + 204 + static const TypeInfo rx_cpu_info = { 205 + .name = TYPE_RX_CPU, 206 + .parent = TYPE_CPU, 207 + .instance_size = sizeof(RXCPU), 208 + .instance_init = rx_cpu_init, 209 + .abstract = true, 210 + .class_size = sizeof(RXCPUClass), 211 + .class_init = rx_cpu_class_init, 212 + }; 213 + 214 + static const TypeInfo rx62n_rx_cpu_info = { 215 + .name = TYPE_RX62N_CPU, 216 + .parent = TYPE_RX_CPU, 217 + }; 218 + 219 + static void rx_cpu_register_types(void) 220 + { 221 + type_register_static(&rx_cpu_info); 222 + type_register_static(&rx62n_rx_cpu_info); 223 + } 224 + 225 + type_init(rx_cpu_register_types)
+180
target/rx/cpu.h
··· 1 + /* 2 + * RX emulation definition 3 + * 4 + * Copyright (c) 2019 Yoshinori Sato 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2 or later, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #ifndef RX_CPU_H 20 + #define RX_CPU_H 21 + 22 + #include "qemu/bitops.h" 23 + #include "qemu-common.h" 24 + #include "hw/registerfields.h" 25 + #include "cpu-qom.h" 26 + 27 + #include "exec/cpu-defs.h" 28 + 29 + /* PSW define */ 30 + REG32(PSW, 0) 31 + FIELD(PSW, C, 0, 1) 32 + FIELD(PSW, Z, 1, 1) 33 + FIELD(PSW, S, 2, 1) 34 + FIELD(PSW, O, 3, 1) 35 + FIELD(PSW, I, 16, 1) 36 + FIELD(PSW, U, 17, 1) 37 + FIELD(PSW, PM, 20, 1) 38 + FIELD(PSW, IPL, 24, 4) 39 + 40 + /* FPSW define */ 41 + REG32(FPSW, 0) 42 + FIELD(FPSW, RM, 0, 2) 43 + FIELD(FPSW, CV, 2, 1) 44 + FIELD(FPSW, CO, 3, 1) 45 + FIELD(FPSW, CZ, 4, 1) 46 + FIELD(FPSW, CU, 5, 1) 47 + FIELD(FPSW, CX, 6, 1) 48 + FIELD(FPSW, CE, 7, 1) 49 + FIELD(FPSW, CAUSE, 2, 6) 50 + FIELD(FPSW, DN, 8, 1) 51 + FIELD(FPSW, EV, 10, 1) 52 + FIELD(FPSW, EO, 11, 1) 53 + FIELD(FPSW, EZ, 12, 1) 54 + FIELD(FPSW, EU, 13, 1) 55 + FIELD(FPSW, EX, 14, 1) 56 + FIELD(FPSW, ENABLE, 10, 5) 57 + FIELD(FPSW, FV, 26, 1) 58 + FIELD(FPSW, FO, 27, 1) 59 + FIELD(FPSW, FZ, 28, 1) 60 + FIELD(FPSW, FU, 29, 1) 61 + FIELD(FPSW, FX, 30, 1) 62 + FIELD(FPSW, FLAGS, 26, 4) 63 + FIELD(FPSW, FS, 31, 1) 64 + 65 + enum { 66 + NUM_REGS = 16, 67 + }; 68 + 69 + typedef struct CPURXState { 70 + /* CPU registers */ 71 + uint32_t regs[NUM_REGS]; /* general registers */ 72 + uint32_t psw_o; /* O bit of status register */ 73 + uint32_t psw_s; /* S bit of status register */ 74 + uint32_t psw_z; /* Z bit of status register */ 75 + uint32_t psw_c; /* C bit of status register */ 76 + uint32_t psw_u; 77 + uint32_t psw_i; 78 + uint32_t psw_pm; 79 + uint32_t psw_ipl; 80 + uint32_t bpsw; /* backup status */ 81 + uint32_t bpc; /* backup pc */ 82 + uint32_t isp; /* global base register */ 83 + uint32_t usp; /* vector base register */ 84 + uint32_t pc; /* program counter */ 85 + uint32_t intb; /* interrupt vector */ 86 + uint32_t fintv; 87 + uint32_t fpsw; 88 + uint64_t acc; 89 + 90 + /* Fields up to this point are cleared by a CPU reset */ 91 + struct {} end_reset_fields; 92 + 93 + /* Internal use */ 94 + uint32_t in_sleep; 95 + uint32_t req_irq; /* Requested interrupt no (hard) */ 96 + uint32_t req_ipl; /* Requested interrupt level */ 97 + uint32_t ack_irq; /* execute irq */ 98 + uint32_t ack_ipl; /* execute ipl */ 99 + float_status fp_status; 100 + qemu_irq ack; /* Interrupt acknowledge */ 101 + } CPURXState; 102 + 103 + /* 104 + * RXCPU: 105 + * @env: #CPURXState 106 + * 107 + * A RX CPU 108 + */ 109 + struct RXCPU { 110 + /*< private >*/ 111 + CPUState parent_obj; 112 + /*< public >*/ 113 + 114 + CPUNegativeOffsetState neg; 115 + CPURXState env; 116 + }; 117 + 118 + typedef struct RXCPU RXCPU; 119 + typedef RXCPU ArchCPU; 120 + 121 + #define ENV_OFFSET offsetof(RXCPU, env) 122 + 123 + #define RX_CPU_TYPE_SUFFIX "-" TYPE_RX_CPU 124 + #define RX_CPU_TYPE_NAME(model) model RX_CPU_TYPE_SUFFIX 125 + #define CPU_RESOLVING_TYPE TYPE_RX_CPU 126 + 127 + const char *rx_crname(uint8_t cr); 128 + void rx_cpu_do_interrupt(CPUState *cpu); 129 + bool rx_cpu_exec_interrupt(CPUState *cpu, int int_req); 130 + void rx_cpu_dump_state(CPUState *cpu, FILE *f, int flags); 131 + int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 132 + int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 133 + hwaddr rx_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 134 + 135 + void rx_translate_init(void); 136 + int cpu_rx_signal_handler(int host_signum, void *pinfo, 137 + void *puc); 138 + 139 + void rx_cpu_list(void); 140 + void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte); 141 + 142 + #define cpu_signal_handler cpu_rx_signal_handler 143 + #define cpu_list rx_cpu_list 144 + 145 + #include "exec/cpu-all.h" 146 + 147 + #define CPU_INTERRUPT_SOFT CPU_INTERRUPT_TGT_INT_0 148 + #define CPU_INTERRUPT_FIR CPU_INTERRUPT_TGT_INT_1 149 + 150 + #define RX_CPU_IRQ 0 151 + #define RX_CPU_FIR 1 152 + 153 + static inline void cpu_get_tb_cpu_state(CPURXState *env, target_ulong *pc, 154 + target_ulong *cs_base, uint32_t *flags) 155 + { 156 + *pc = env->pc; 157 + *cs_base = 0; 158 + *flags = FIELD_DP32(0, PSW, PM, env->psw_pm); 159 + } 160 + 161 + static inline int cpu_mmu_index(CPURXState *env, bool ifetch) 162 + { 163 + return 0; 164 + } 165 + 166 + static inline uint32_t rx_cpu_pack_psw(CPURXState *env) 167 + { 168 + uint32_t psw = 0; 169 + psw = FIELD_DP32(psw, PSW, IPL, env->psw_ipl); 170 + psw = FIELD_DP32(psw, PSW, PM, env->psw_pm); 171 + psw = FIELD_DP32(psw, PSW, U, env->psw_u); 172 + psw = FIELD_DP32(psw, PSW, I, env->psw_i); 173 + psw = FIELD_DP32(psw, PSW, O, env->psw_o >> 31); 174 + psw = FIELD_DP32(psw, PSW, S, env->psw_s >> 31); 175 + psw = FIELD_DP32(psw, PSW, Z, env->psw_z == 0); 176 + psw = FIELD_DP32(psw, PSW, C, env->psw_c); 177 + return psw; 178 + } 179 + 180 + #endif /* RX_CPU_H */
+1446
target/rx/disas.c
··· 1 + /* 2 + * Renesas RX Disassembler 3 + * 4 + * Copyright (c) 2019 Yoshinori Sato <ysato@users.sourceforge.jp> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2 or later, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include "qemu/osdep.h" 20 + #include "disas/dis-asm.h" 21 + #include "qemu/bitops.h" 22 + #include "cpu.h" 23 + 24 + typedef struct DisasContext { 25 + disassemble_info *dis; 26 + uint32_t addr; 27 + uint32_t pc; 28 + uint8_t len; 29 + uint8_t bytes[8]; 30 + } DisasContext; 31 + 32 + 33 + static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn, 34 + int i, int n) 35 + { 36 + uint32_t addr = ctx->addr; 37 + 38 + g_assert(ctx->len == i); 39 + g_assert(n <= ARRAY_SIZE(ctx->bytes)); 40 + 41 + while (++i <= n) { 42 + ctx->dis->read_memory_func(addr++, &ctx->bytes[i - 1], 1, ctx->dis); 43 + insn |= ctx->bytes[i - 1] << (32 - i * 8); 44 + } 45 + ctx->addr = addr; 46 + ctx->len = n; 47 + 48 + return insn; 49 + } 50 + 51 + static int32_t li(DisasContext *ctx, int sz) 52 + { 53 + uint32_t addr = ctx->addr; 54 + uintptr_t len = ctx->len; 55 + 56 + switch (sz) { 57 + case 1: 58 + g_assert(len + 1 <= ARRAY_SIZE(ctx->bytes)); 59 + ctx->addr += 1; 60 + ctx->len += 1; 61 + ctx->dis->read_memory_func(addr, ctx->bytes + len, 1, ctx->dis); 62 + return (int8_t)ctx->bytes[len]; 63 + case 2: 64 + g_assert(len + 2 <= ARRAY_SIZE(ctx->bytes)); 65 + ctx->addr += 2; 66 + ctx->len += 2; 67 + ctx->dis->read_memory_func(addr, ctx->bytes + len, 2, ctx->dis); 68 + return ldsw_le_p(ctx->bytes + len); 69 + case 3: 70 + g_assert(len + 3 <= ARRAY_SIZE(ctx->bytes)); 71 + ctx->addr += 3; 72 + ctx->len += 3; 73 + ctx->dis->read_memory_func(addr, ctx->bytes + len, 3, ctx->dis); 74 + return (int8_t)ctx->bytes[len + 2] << 16 | lduw_le_p(ctx->bytes + len); 75 + case 0: 76 + g_assert(len + 4 <= ARRAY_SIZE(ctx->bytes)); 77 + ctx->addr += 4; 78 + ctx->len += 4; 79 + ctx->dis->read_memory_func(addr, ctx->bytes + len, 4, ctx->dis); 80 + return ldl_le_p(ctx->bytes + len); 81 + default: 82 + g_assert_not_reached(); 83 + } 84 + } 85 + 86 + static int bdsp_s(DisasContext *ctx, int d) 87 + { 88 + /* 89 + * 0 -> 8 90 + * 1 -> 9 91 + * 2 -> 10 92 + * 3 -> 3 93 + * : 94 + * 7 -> 7 95 + */ 96 + if (d < 3) { 97 + d += 8; 98 + } 99 + return d; 100 + } 101 + 102 + /* Include the auto-generated decoder. */ 103 + #include "decode.inc.c" 104 + 105 + static void dump_bytes(DisasContext *ctx) 106 + { 107 + int i, len = ctx->len; 108 + 109 + for (i = 0; i < len; ++i) { 110 + ctx->dis->fprintf_func(ctx->dis->stream, "%02x ", ctx->bytes[i]); 111 + } 112 + ctx->dis->fprintf_func(ctx->dis->stream, "%*c", (8 - i) * 3, '\t'); 113 + } 114 + 115 + #define prt(...) \ 116 + do { \ 117 + dump_bytes(ctx); \ 118 + ctx->dis->fprintf_func(ctx->dis->stream, __VA_ARGS__); \ 119 + } while (0) 120 + 121 + #define RX_MEMORY_BYTE 0 122 + #define RX_MEMORY_WORD 1 123 + #define RX_MEMORY_LONG 2 124 + 125 + #define RX_IM_BYTE 0 126 + #define RX_IM_WORD 1 127 + #define RX_IM_LONG 2 128 + #define RX_IM_UWORD 3 129 + 130 + static const char size[] = {'b', 'w', 'l'}; 131 + static const char cond[][4] = { 132 + "eq", "ne", "c", "nc", "gtu", "leu", "pz", "n", 133 + "ge", "lt", "gt", "le", "o", "no", "ra", "f" 134 + }; 135 + static const char psw[] = { 136 + 'c', 'z', 's', 'o', 0, 0, 0, 0, 137 + 'i', 'u', 0, 0, 0, 0, 0, 0, 138 + }; 139 + 140 + static void rx_index_addr(DisasContext *ctx, char out[8], int ld, int mi) 141 + { 142 + uint32_t addr = ctx->addr; 143 + uintptr_t len = ctx->len; 144 + uint16_t dsp; 145 + 146 + switch (ld) { 147 + case 0: 148 + /* No index; return empty string. */ 149 + out[0] = '\0'; 150 + return; 151 + case 1: 152 + g_assert(len + 1 <= ARRAY_SIZE(ctx->bytes)); 153 + ctx->addr += 1; 154 + ctx->len += 1; 155 + ctx->dis->read_memory_func(addr, ctx->bytes + len, 1, ctx->dis); 156 + dsp = ctx->bytes[len]; 157 + break; 158 + case 2: 159 + g_assert(len + 2 <= ARRAY_SIZE(ctx->bytes)); 160 + ctx->addr += 2; 161 + ctx->len += 2; 162 + ctx->dis->read_memory_func(addr, ctx->bytes + len, 2, ctx->dis); 163 + dsp = lduw_le_p(ctx->bytes + len); 164 + break; 165 + default: 166 + g_assert_not_reached(); 167 + } 168 + 169 + sprintf(out, "%u", dsp << (mi < 3 ? mi : 4 - mi)); 170 + } 171 + 172 + static void prt_ldmi(DisasContext *ctx, const char *insn, 173 + int ld, int mi, int rs, int rd) 174 + { 175 + static const char sizes[][4] = {".b", ".w", ".l", ".uw", ".ub"}; 176 + char dsp[8]; 177 + 178 + if (ld < 3) { 179 + rx_index_addr(ctx, dsp, ld, mi); 180 + prt("%s\t%s[r%d]%s, r%d", insn, dsp, rs, sizes[mi], rd); 181 + } else { 182 + prt("%s\tr%d, r%d", insn, rs, rd); 183 + } 184 + } 185 + 186 + static void prt_ir(DisasContext *ctx, const char *insn, int imm, int rd) 187 + { 188 + if (imm < 0x100) { 189 + prt("%s\t#%d, r%d", insn, imm, rd); 190 + } else { 191 + prt("%s\t#0x%08x, r%d", insn, imm, rd); 192 + } 193 + } 194 + 195 + /* mov.[bwl] rs,dsp:[rd] */ 196 + static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a) 197 + { 198 + if (a->dsp > 0) { 199 + prt("mov.%c\tr%d,%d[r%d]", 200 + size[a->sz], a->rs, a->dsp << a->sz, a->rd); 201 + } else { 202 + prt("mov.%c\tr%d,[r%d]", 203 + size[a->sz], a->rs, a->rd); 204 + } 205 + return true; 206 + } 207 + 208 + /* mov.[bwl] dsp:[rs],rd */ 209 + static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a) 210 + { 211 + if (a->dsp > 0) { 212 + prt("mov.%c\t%d[r%d], r%d", 213 + size[a->sz], a->dsp << a->sz, a->rs, a->rd); 214 + } else { 215 + prt("mov.%c\t[r%d], r%d", 216 + size[a->sz], a->rs, a->rd); 217 + } 218 + return true; 219 + } 220 + 221 + /* mov.l #uimm4,rd */ 222 + /* mov.l #uimm8,rd */ 223 + /* mov.l #imm,rd */ 224 + static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a) 225 + { 226 + prt_ir(ctx, "mov.l", a->imm, a->rd); 227 + return true; 228 + } 229 + 230 + /* mov.[bwl] #uimm8,dsp:[rd] */ 231 + /* mov #imm, dsp:[rd] */ 232 + static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a) 233 + { 234 + if (a->dsp > 0) { 235 + prt("mov.%c\t#%d,%d[r%d]", 236 + size[a->sz], a->imm, a->dsp << a->sz, a->rd); 237 + } else { 238 + prt("mov.%c\t#%d,[r%d]", 239 + size[a->sz], a->imm, a->rd); 240 + } 241 + return true; 242 + } 243 + 244 + /* mov.[bwl] [ri,rb],rd */ 245 + static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a) 246 + { 247 + prt("mov.%c\t[r%d,r%d], r%d", size[a->sz], a->ri, a->rb, a->rd); 248 + return true; 249 + } 250 + 251 + /* mov.[bwl] rd,[ri,rb] */ 252 + static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a) 253 + { 254 + prt("mov.%c\tr%d, [r%d, r%d]", size[a->sz], a->rs, a->ri, a->rb); 255 + return true; 256 + } 257 + 258 + 259 + /* mov.[bwl] dsp:[rs],dsp:[rd] */ 260 + /* mov.[bwl] rs,dsp:[rd] */ 261 + /* mov.[bwl] dsp:[rs],rd */ 262 + /* mov.[bwl] rs,rd */ 263 + static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a) 264 + { 265 + char dspd[8], dsps[8], szc = size[a->sz]; 266 + 267 + if (a->lds == 3 && a->ldd == 3) { 268 + /* mov.[bwl] rs,rd */ 269 + prt("mov.%c\tr%d, r%d", szc, a->rs, a->rd); 270 + } else if (a->lds == 3) { 271 + rx_index_addr(ctx, dspd, a->ldd, a->sz); 272 + prt("mov.%c\tr%d, %s[r%d]", szc, a->rs, dspd, a->rd); 273 + } else if (a->ldd == 3) { 274 + rx_index_addr(ctx, dsps, a->lds, a->sz); 275 + prt("mov.%c\t%s[r%d], r%d", szc, dsps, a->rs, a->rd); 276 + } else { 277 + rx_index_addr(ctx, dsps, a->lds, a->sz); 278 + rx_index_addr(ctx, dspd, a->ldd, a->sz); 279 + prt("mov.%c\t%s[r%d], %s[r%d]", szc, dsps, a->rs, dspd, a->rd); 280 + } 281 + return true; 282 + } 283 + 284 + /* mov.[bwl] rs,[rd+] */ 285 + /* mov.[bwl] rs,[-rd] */ 286 + static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a) 287 + { 288 + if (a->ad) { 289 + prt("mov.%c\tr%d, [-r%d]", size[a->sz], a->rs, a->rd); 290 + } else { 291 + prt("mov.%c\tr%d, [r%d+]", size[a->sz], a->rs, a->rd); 292 + } 293 + return true; 294 + } 295 + 296 + /* mov.[bwl] [rd+],rs */ 297 + /* mov.[bwl] [-rd],rs */ 298 + static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a) 299 + { 300 + if (a->ad) { 301 + prt("mov.%c\t[-r%d], r%d", size[a->sz], a->rd, a->rs); 302 + } else { 303 + prt("mov.%c\t[r%d+], r%d", size[a->sz], a->rd, a->rs); 304 + } 305 + return true; 306 + } 307 + 308 + /* movu.[bw] dsp5:[rs],rd */ 309 + static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a) 310 + { 311 + if (a->dsp > 0) { 312 + prt("movu.%c\t%d[r%d], r%d", size[a->sz], 313 + a->dsp << a->sz, a->rs, a->rd); 314 + } else { 315 + prt("movu.%c\t[r%d], r%d", size[a->sz], a->rs, a->rd); 316 + } 317 + return true; 318 + } 319 + 320 + /* movu.[bw] rs,rd */ 321 + static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a) 322 + { 323 + prt("movu.%c\tr%d, r%d", size[a->sz], a->rs, a->rd); 324 + return true; 325 + } 326 + 327 + /* movu.[bw] [ri,rb],rd */ 328 + static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a) 329 + { 330 + prt("mov.%c\t[r%d,r%d], r%d", size[a->sz], a->ri, a->rb, a->rd); 331 + return true; 332 + } 333 + 334 + /* movu.[bw] [rs+],rd */ 335 + /* movu.[bw] [-rs],rd */ 336 + static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a) 337 + { 338 + if (a->ad) { 339 + prt("movu.%c\t[-r%d], r%d", size[a->sz], a->rd, a->rs); 340 + } else { 341 + prt("movu.%c\t[r%d+], r%d", size[a->sz], a->rd, a->rs); 342 + } 343 + return true; 344 + } 345 + 346 + /* pop rd */ 347 + static bool trans_POP(DisasContext *ctx, arg_POP *a) 348 + { 349 + prt("pop\tr%d", a->rd); 350 + return true; 351 + } 352 + 353 + /* popc rx */ 354 + static bool trans_POPC(DisasContext *ctx, arg_POPC *a) 355 + { 356 + prt("pop\tr%s", rx_crname(a->cr)); 357 + return true; 358 + } 359 + 360 + /* popm rd-rd2 */ 361 + static bool trans_POPM(DisasContext *ctx, arg_POPM *a) 362 + { 363 + prt("popm\tr%d-r%d", a->rd, a->rd2); 364 + return true; 365 + } 366 + 367 + /* push rs */ 368 + static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a) 369 + { 370 + prt("push\tr%d", a->rs); 371 + return true; 372 + } 373 + 374 + /* push dsp[rs] */ 375 + static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a) 376 + { 377 + char dsp[8]; 378 + 379 + rx_index_addr(ctx, dsp, a->ld, a->sz); 380 + prt("push\t%s[r%d]", dsp, a->rs); 381 + return true; 382 + } 383 + 384 + /* pushc rx */ 385 + static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a) 386 + { 387 + prt("push\t%s", rx_crname(a->cr)); 388 + return true; 389 + } 390 + 391 + /* pushm rs-rs2*/ 392 + static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a) 393 + { 394 + prt("pushm\tr%d-r%d", a->rs, a->rs2); 395 + return true; 396 + } 397 + 398 + /* xchg rs,rd */ 399 + static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a) 400 + { 401 + prt("xchg\tr%d, r%d", a->rs, a->rd); 402 + return true; 403 + } 404 + /* xchg dsp[rs].<mi>,rd */ 405 + static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a) 406 + { 407 + prt_ldmi(ctx, "xchg", a->ld, a->mi, a->rs, a->rd); 408 + return true; 409 + } 410 + 411 + /* stz #imm,rd */ 412 + static bool trans_STZ(DisasContext *ctx, arg_STZ *a) 413 + { 414 + prt_ir(ctx, "stz", a->imm, a->rd); 415 + return true; 416 + } 417 + 418 + /* stnz #imm,rd */ 419 + static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a) 420 + { 421 + prt_ir(ctx, "stnz", a->imm, a->rd); 422 + return true; 423 + } 424 + 425 + /* rtsd #imm */ 426 + static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a) 427 + { 428 + prt("rtsd\t#%d", a->imm << 2); 429 + return true; 430 + } 431 + 432 + /* rtsd #imm, rd-rd2 */ 433 + static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a) 434 + { 435 + prt("rtsd\t#%d, r%d - r%d", a->imm << 2, a->rd, a->rd2); 436 + return true; 437 + } 438 + 439 + /* and #uimm:4, rd */ 440 + /* and #imm, rd */ 441 + static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a) 442 + { 443 + prt_ir(ctx, "and", a->imm, a->rd); 444 + return true; 445 + } 446 + 447 + /* and dsp[rs], rd */ 448 + /* and rs,rd */ 449 + static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a) 450 + { 451 + prt_ldmi(ctx, "and", a->ld, a->mi, a->rs, a->rd); 452 + return true; 453 + } 454 + 455 + /* and rs,rs2,rd */ 456 + static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a) 457 + { 458 + prt("and\tr%d,r%d, r%d", a->rs, a->rs2, a->rd); 459 + return true; 460 + } 461 + 462 + /* or #uimm:4, rd */ 463 + /* or #imm, rd */ 464 + static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a) 465 + { 466 + prt_ir(ctx, "or", a->imm, a->rd); 467 + return true; 468 + } 469 + 470 + /* or dsp[rs], rd */ 471 + /* or rs,rd */ 472 + static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a) 473 + { 474 + prt_ldmi(ctx, "or", a->ld, a->mi, a->rs, a->rd); 475 + return true; 476 + } 477 + 478 + /* or rs,rs2,rd */ 479 + static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a) 480 + { 481 + prt("or\tr%d, r%d, r%d", a->rs, a->rs2, a->rd); 482 + return true; 483 + } 484 + 485 + /* xor #imm, rd */ 486 + static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a) 487 + { 488 + prt_ir(ctx, "xor", a->imm, a->rd); 489 + return true; 490 + } 491 + 492 + /* xor dsp[rs], rd */ 493 + /* xor rs,rd */ 494 + static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a) 495 + { 496 + prt_ldmi(ctx, "xor", a->ld, a->mi, a->rs, a->rd); 497 + return true; 498 + } 499 + 500 + /* tst #imm, rd */ 501 + static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a) 502 + { 503 + prt_ir(ctx, "tst", a->imm, a->rd); 504 + return true; 505 + } 506 + 507 + /* tst dsp[rs], rd */ 508 + /* tst rs, rd */ 509 + static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a) 510 + { 511 + prt_ldmi(ctx, "tst", a->ld, a->mi, a->rs, a->rd); 512 + return true; 513 + } 514 + 515 + /* not rd */ 516 + /* not rs, rd */ 517 + static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a) 518 + { 519 + if (a->rs != a->rd) { 520 + prt("not\tr%d, r%d", a->rs, a->rd); 521 + } else { 522 + prt("not\tr%d", a->rs); 523 + } 524 + return true; 525 + } 526 + 527 + /* neg rd */ 528 + /* neg rs, rd */ 529 + static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a) 530 + { 531 + if (a->rs != a->rd) { 532 + prt("neg\tr%d, r%d", a->rs, a->rd); 533 + } else { 534 + prt("neg\tr%d", a->rs); 535 + } 536 + return true; 537 + } 538 + 539 + /* adc #imm, rd */ 540 + static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a) 541 + { 542 + prt_ir(ctx, "adc", a->imm, a->rd); 543 + return true; 544 + } 545 + 546 + /* adc rs, rd */ 547 + static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a) 548 + { 549 + prt("adc\tr%d, r%d", a->rs, a->rd); 550 + return true; 551 + } 552 + 553 + /* adc dsp[rs], rd */ 554 + static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a) 555 + { 556 + char dsp[8]; 557 + 558 + rx_index_addr(ctx, dsp, a->ld, 2); 559 + prt("adc\t%s[r%d], r%d", dsp, a->rs, a->rd); 560 + return true; 561 + } 562 + 563 + /* add #uimm4, rd */ 564 + /* add #imm, rs, rd */ 565 + static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a) 566 + { 567 + if (a->imm < 0x10 && a->rs2 == a->rd) { 568 + prt("add\t#%d, r%d", a->imm, a->rd); 569 + } else { 570 + prt("add\t#0x%08x, r%d, r%d", a->imm, a->rs2, a->rd); 571 + } 572 + return true; 573 + } 574 + 575 + /* add rs, rd */ 576 + /* add dsp[rs], rd */ 577 + static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a) 578 + { 579 + prt_ldmi(ctx, "add", a->ld, a->mi, a->rs, a->rd); 580 + return true; 581 + } 582 + 583 + /* add rs, rs2, rd */ 584 + static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a) 585 + { 586 + prt("add\tr%d, r%d, r%d", a->rs, a->rs2, a->rd); 587 + return true; 588 + } 589 + 590 + /* cmp #imm4, rd */ 591 + /* cmp #imm8, rd */ 592 + /* cmp #imm, rs2 */ 593 + static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a) 594 + { 595 + prt_ir(ctx, "cmp", a->imm, a->rs2); 596 + return true; 597 + } 598 + 599 + /* cmp rs, rs2 */ 600 + /* cmp dsp[rs], rs2 */ 601 + static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a) 602 + { 603 + prt_ldmi(ctx, "cmp", a->ld, a->mi, a->rs, a->rd); 604 + return true; 605 + } 606 + 607 + /* sub #imm4, rd */ 608 + static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a) 609 + { 610 + prt("sub\t#%d, r%d", a->imm, a->rd); 611 + return true; 612 + } 613 + 614 + /* sub rs, rd */ 615 + /* sub dsp[rs], rd */ 616 + static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a) 617 + { 618 + prt_ldmi(ctx, "sub", a->ld, a->mi, a->rs, a->rd); 619 + return true; 620 + } 621 + 622 + /* sub rs, rs2, rd */ 623 + static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a) 624 + { 625 + prt("sub\tr%d, r%d, r%d", a->rs, a->rs2, a->rd); 626 + return true; 627 + } 628 + 629 + /* sbb rs, rd */ 630 + static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a) 631 + { 632 + prt("sbb\tr%d, r%d", a->rs, a->rd); 633 + return true; 634 + } 635 + 636 + /* sbb dsp[rs], rd */ 637 + static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a) 638 + { 639 + prt_ldmi(ctx, "sbb", a->ld, RX_IM_LONG, a->rs, a->rd); 640 + return true; 641 + } 642 + 643 + /* abs rd */ 644 + /* abs rs, rd */ 645 + static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a) 646 + { 647 + if (a->rs != a->rd) { 648 + prt("abs\tr%d, r%d", a->rs, a->rd); 649 + } else { 650 + prt("abs\tr%d", a->rs); 651 + } 652 + return true; 653 + } 654 + 655 + /* max #imm, rd */ 656 + static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a) 657 + { 658 + prt_ir(ctx, "max", a->imm, a->rd); 659 + return true; 660 + } 661 + 662 + /* max rs, rd */ 663 + /* max dsp[rs], rd */ 664 + static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a) 665 + { 666 + prt_ldmi(ctx, "max", a->ld, a->mi, a->rs, a->rd); 667 + return true; 668 + } 669 + 670 + /* min #imm, rd */ 671 + static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a) 672 + { 673 + prt_ir(ctx, "min", a->imm, a->rd); 674 + return true; 675 + } 676 + 677 + /* min rs, rd */ 678 + /* min dsp[rs], rd */ 679 + static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a) 680 + { 681 + prt_ldmi(ctx, "min", a->ld, a->mi, a->rs, a->rd); 682 + return true; 683 + } 684 + 685 + /* mul #uimm4, rd */ 686 + /* mul #imm, rd */ 687 + static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a) 688 + { 689 + prt_ir(ctx, "mul", a->imm, a->rd); 690 + return true; 691 + } 692 + 693 + /* mul rs, rd */ 694 + /* mul dsp[rs], rd */ 695 + static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a) 696 + { 697 + prt_ldmi(ctx, "mul", a->ld, a->mi, a->rs, a->rd); 698 + return true; 699 + } 700 + 701 + /* mul rs, rs2, rd */ 702 + static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a) 703 + { 704 + prt("mul\tr%d,r%d,r%d", a->rs, a->rs2, a->rd); 705 + return true; 706 + } 707 + 708 + /* emul #imm, rd */ 709 + static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a) 710 + { 711 + prt_ir(ctx, "emul", a->imm, a->rd); 712 + return true; 713 + } 714 + 715 + /* emul rs, rd */ 716 + /* emul dsp[rs], rd */ 717 + static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a) 718 + { 719 + prt_ldmi(ctx, "emul", a->ld, a->mi, a->rs, a->rd); 720 + return true; 721 + } 722 + 723 + /* emulu #imm, rd */ 724 + static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a) 725 + { 726 + prt_ir(ctx, "emulu", a->imm, a->rd); 727 + return true; 728 + } 729 + 730 + /* emulu rs, rd */ 731 + /* emulu dsp[rs], rd */ 732 + static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a) 733 + { 734 + prt_ldmi(ctx, "emulu", a->ld, a->mi, a->rs, a->rd); 735 + return true; 736 + } 737 + 738 + /* div #imm, rd */ 739 + static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a) 740 + { 741 + prt_ir(ctx, "div", a->imm, a->rd); 742 + return true; 743 + } 744 + 745 + /* div rs, rd */ 746 + /* div dsp[rs], rd */ 747 + static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a) 748 + { 749 + prt_ldmi(ctx, "div", a->ld, a->mi, a->rs, a->rd); 750 + return true; 751 + } 752 + 753 + /* divu #imm, rd */ 754 + static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a) 755 + { 756 + prt_ir(ctx, "divu", a->imm, a->rd); 757 + return true; 758 + } 759 + 760 + /* divu rs, rd */ 761 + /* divu dsp[rs], rd */ 762 + static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a) 763 + { 764 + prt_ldmi(ctx, "divu", a->ld, a->mi, a->rs, a->rd); 765 + return true; 766 + } 767 + 768 + 769 + /* shll #imm:5, rd */ 770 + /* shll #imm:5, rs, rd */ 771 + static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a) 772 + { 773 + if (a->rs2 != a->rd) { 774 + prt("shll\t#%d, r%d, r%d", a->imm, a->rs2, a->rd); 775 + } else { 776 + prt("shll\t#%d, r%d", a->imm, a->rd); 777 + } 778 + return true; 779 + } 780 + 781 + /* shll rs, rd */ 782 + static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a) 783 + { 784 + prt("shll\tr%d, r%d", a->rs, a->rd); 785 + return true; 786 + } 787 + 788 + /* shar #imm:5, rd */ 789 + /* shar #imm:5, rs, rd */ 790 + static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a) 791 + { 792 + if (a->rs2 != a->rd) { 793 + prt("shar\t#%d, r%d, r%d", a->imm, a->rs2, a->rd); 794 + } else { 795 + prt("shar\t#%d, r%d", a->imm, a->rd); 796 + } 797 + return true; 798 + } 799 + 800 + /* shar rs, rd */ 801 + static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a) 802 + { 803 + prt("shar\tr%d, r%d", a->rs, a->rd); 804 + return true; 805 + } 806 + 807 + /* shlr #imm:5, rd */ 808 + /* shlr #imm:5, rs, rd */ 809 + static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a) 810 + { 811 + if (a->rs2 != a->rd) { 812 + prt("shlr\t#%d, r%d, r%d", a->imm, a->rs2, a->rd); 813 + } else { 814 + prt("shlr\t#%d, r%d", a->imm, a->rd); 815 + } 816 + return true; 817 + } 818 + 819 + /* shlr rs, rd */ 820 + static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a) 821 + { 822 + prt("shlr\tr%d, r%d", a->rs, a->rd); 823 + return true; 824 + } 825 + 826 + /* rolc rd */ 827 + static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a) 828 + { 829 + prt("rorc\tr%d", a->rd); 830 + return true; 831 + } 832 + 833 + /* rorc rd */ 834 + static bool trans_RORC(DisasContext *ctx, arg_RORC *a) 835 + { 836 + prt("rorc\tr%d", a->rd); 837 + return true; 838 + } 839 + 840 + /* rotl #imm, rd */ 841 + static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a) 842 + { 843 + prt("rotl\t#%d, r%d", a->imm, a->rd); 844 + return true; 845 + } 846 + 847 + /* rotl rs, rd */ 848 + static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a) 849 + { 850 + prt("rotl\tr%d, r%d", a->rs, a->rd); 851 + return true; 852 + } 853 + 854 + /* rotr #imm, rd */ 855 + static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a) 856 + { 857 + prt("rotr\t#%d, r%d", a->imm, a->rd); 858 + return true; 859 + } 860 + 861 + /* rotr rs, rd */ 862 + static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a) 863 + { 864 + prt("rotr\tr%d, r%d", a->rs, a->rd); 865 + return true; 866 + } 867 + 868 + /* revl rs, rd */ 869 + static bool trans_REVL(DisasContext *ctx, arg_REVL *a) 870 + { 871 + prt("revl\tr%d, r%d", a->rs, a->rd); 872 + return true; 873 + } 874 + 875 + /* revw rs, rd */ 876 + static bool trans_REVW(DisasContext *ctx, arg_REVW *a) 877 + { 878 + prt("revw\tr%d, r%d", a->rs, a->rd); 879 + return true; 880 + } 881 + 882 + /* conditional branch helper */ 883 + static void rx_bcnd_main(DisasContext *ctx, int cd, int len, int dst) 884 + { 885 + static const char sz[] = {'s', 'b', 'w', 'a'}; 886 + prt("b%s.%c\t%08x", cond[cd], sz[len - 1], ctx->pc + dst); 887 + } 888 + 889 + /* beq dsp:3 / bne dsp:3 */ 890 + /* beq dsp:8 / bne dsp:8 */ 891 + /* bc dsp:8 / bnc dsp:8 */ 892 + /* bgtu dsp:8 / bleu dsp:8 */ 893 + /* bpz dsp:8 / bn dsp:8 */ 894 + /* bge dsp:8 / blt dsp:8 */ 895 + /* bgt dsp:8 / ble dsp:8 */ 896 + /* bo dsp:8 / bno dsp:8 */ 897 + /* beq dsp:16 / bne dsp:16 */ 898 + static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a) 899 + { 900 + rx_bcnd_main(ctx, a->cd, a->sz, a->dsp); 901 + return true; 902 + } 903 + 904 + /* bra dsp:3 */ 905 + /* bra dsp:8 */ 906 + /* bra dsp:16 */ 907 + /* bra dsp:24 */ 908 + static bool trans_BRA(DisasContext *ctx, arg_BRA *a) 909 + { 910 + rx_bcnd_main(ctx, 14, a->sz, a->dsp); 911 + return true; 912 + } 913 + 914 + /* bra rs */ 915 + static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a) 916 + { 917 + prt("bra.l\tr%d", a->rd); 918 + return true; 919 + } 920 + 921 + /* jmp rs */ 922 + static bool trans_JMP(DisasContext *ctx, arg_JMP *a) 923 + { 924 + prt("jmp\tr%d", a->rs); 925 + return true; 926 + } 927 + 928 + /* jsr rs */ 929 + static bool trans_JSR(DisasContext *ctx, arg_JSR *a) 930 + { 931 + prt("jsr\tr%d", a->rs); 932 + return true; 933 + } 934 + 935 + /* bsr dsp:16 */ 936 + /* bsr dsp:24 */ 937 + static bool trans_BSR(DisasContext *ctx, arg_BSR *a) 938 + { 939 + static const char sz[] = {'w', 'a'}; 940 + prt("bsr.%c\t%08x", sz[a->sz - 3], ctx->pc + a->dsp); 941 + return true; 942 + } 943 + 944 + /* bsr rs */ 945 + static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a) 946 + { 947 + prt("bsr.l\tr%d", a->rd); 948 + return true; 949 + } 950 + 951 + /* rts */ 952 + static bool trans_RTS(DisasContext *ctx, arg_RTS *a) 953 + { 954 + prt("rts"); 955 + return true; 956 + } 957 + 958 + /* nop */ 959 + static bool trans_NOP(DisasContext *ctx, arg_NOP *a) 960 + { 961 + prt("nop"); 962 + return true; 963 + } 964 + 965 + /* scmpu */ 966 + static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a) 967 + { 968 + prt("scmpu"); 969 + return true; 970 + } 971 + 972 + /* smovu */ 973 + static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a) 974 + { 975 + prt("smovu"); 976 + return true; 977 + } 978 + 979 + /* smovf */ 980 + static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a) 981 + { 982 + prt("smovf"); 983 + return true; 984 + } 985 + 986 + /* smovb */ 987 + static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a) 988 + { 989 + prt("smovb"); 990 + return true; 991 + } 992 + 993 + /* suntile */ 994 + static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a) 995 + { 996 + prt("suntil.%c", size[a->sz]); 997 + return true; 998 + } 999 + 1000 + /* swhile */ 1001 + static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a) 1002 + { 1003 + prt("swhile.%c", size[a->sz]); 1004 + return true; 1005 + } 1006 + /* sstr */ 1007 + static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a) 1008 + { 1009 + prt("sstr.%c", size[a->sz]); 1010 + return true; 1011 + } 1012 + 1013 + /* rmpa */ 1014 + static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a) 1015 + { 1016 + prt("rmpa.%c", size[a->sz]); 1017 + return true; 1018 + } 1019 + 1020 + /* mulhi rs,rs2 */ 1021 + static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a) 1022 + { 1023 + prt("mulhi\tr%d,r%d", a->rs, a->rs2); 1024 + return true; 1025 + } 1026 + 1027 + /* mullo rs,rs2 */ 1028 + static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a) 1029 + { 1030 + prt("mullo\tr%d, r%d", a->rs, a->rs2); 1031 + return true; 1032 + } 1033 + 1034 + /* machi rs,rs2 */ 1035 + static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a) 1036 + { 1037 + prt("machi\tr%d, r%d", a->rs, a->rs2); 1038 + return true; 1039 + } 1040 + 1041 + /* maclo rs,rs2 */ 1042 + static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a) 1043 + { 1044 + prt("maclo\tr%d, r%d", a->rs, a->rs2); 1045 + return true; 1046 + } 1047 + 1048 + /* mvfachi rd */ 1049 + static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a) 1050 + { 1051 + prt("mvfachi\tr%d", a->rd); 1052 + return true; 1053 + } 1054 + 1055 + /* mvfacmi rd */ 1056 + static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a) 1057 + { 1058 + prt("mvfacmi\tr%d", a->rd); 1059 + return true; 1060 + } 1061 + 1062 + /* mvtachi rs */ 1063 + static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a) 1064 + { 1065 + prt("mvtachi\tr%d", a->rs); 1066 + return true; 1067 + } 1068 + 1069 + /* mvtaclo rs */ 1070 + static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a) 1071 + { 1072 + prt("mvtaclo\tr%d", a->rs); 1073 + return true; 1074 + } 1075 + 1076 + /* racw #imm */ 1077 + static bool trans_RACW(DisasContext *ctx, arg_RACW *a) 1078 + { 1079 + prt("racw\t#%d", a->imm + 1); 1080 + return true; 1081 + } 1082 + 1083 + /* sat rd */ 1084 + static bool trans_SAT(DisasContext *ctx, arg_SAT *a) 1085 + { 1086 + prt("sat\tr%d", a->rd); 1087 + return true; 1088 + } 1089 + 1090 + /* satr */ 1091 + static bool trans_SATR(DisasContext *ctx, arg_SATR *a) 1092 + { 1093 + prt("satr"); 1094 + return true; 1095 + } 1096 + 1097 + /* fadd #imm, rd */ 1098 + static bool trans_FADD_ir(DisasContext *ctx, arg_FADD_ir *a) 1099 + { 1100 + prt("fadd\t#%d,r%d", li(ctx, 0), a->rd); 1101 + return true; 1102 + } 1103 + 1104 + /* fadd dsp[rs], rd */ 1105 + /* fadd rs, rd */ 1106 + static bool trans_FADD_mr(DisasContext *ctx, arg_FADD_mr *a) 1107 + { 1108 + prt_ldmi(ctx, "fadd", a->ld, RX_IM_LONG, a->rs, a->rd); 1109 + return true; 1110 + } 1111 + 1112 + /* fcmp #imm, rd */ 1113 + static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir *a) 1114 + { 1115 + prt("fadd\t#%d,r%d", li(ctx, 0), a->rd); 1116 + return true; 1117 + } 1118 + 1119 + /* fcmp dsp[rs], rd */ 1120 + /* fcmp rs, rd */ 1121 + static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a) 1122 + { 1123 + prt_ldmi(ctx, "fcmp", a->ld, RX_IM_LONG, a->rs, a->rd); 1124 + return true; 1125 + } 1126 + 1127 + /* fsub #imm, rd */ 1128 + static bool trans_FSUB_ir(DisasContext *ctx, arg_FSUB_ir *a) 1129 + { 1130 + prt("fsub\t#%d,r%d", li(ctx, 0), a->rd); 1131 + return true; 1132 + } 1133 + 1134 + /* fsub dsp[rs], rd */ 1135 + /* fsub rs, rd */ 1136 + static bool trans_FSUB_mr(DisasContext *ctx, arg_FSUB_mr *a) 1137 + { 1138 + prt_ldmi(ctx, "fsub", a->ld, RX_IM_LONG, a->rs, a->rd); 1139 + return true; 1140 + } 1141 + 1142 + /* ftoi dsp[rs], rd */ 1143 + /* ftoi rs, rd */ 1144 + static bool trans_FTOI(DisasContext *ctx, arg_FTOI *a) 1145 + { 1146 + prt_ldmi(ctx, "ftoi", a->ld, RX_IM_LONG, a->rs, a->rd); 1147 + return true; 1148 + } 1149 + 1150 + /* fmul #imm, rd */ 1151 + static bool trans_FMUL_ir(DisasContext *ctx, arg_FMUL_ir *a) 1152 + { 1153 + prt("fmul\t#%d,r%d", li(ctx, 0), a->rd); 1154 + return true; 1155 + } 1156 + 1157 + /* fmul dsp[rs], rd */ 1158 + /* fmul rs, rd */ 1159 + static bool trans_FMUL_mr(DisasContext *ctx, arg_FMUL_mr *a) 1160 + { 1161 + prt_ldmi(ctx, "fmul", a->ld, RX_IM_LONG, a->rs, a->rd); 1162 + return true; 1163 + } 1164 + 1165 + /* fdiv #imm, rd */ 1166 + static bool trans_FDIV_ir(DisasContext *ctx, arg_FDIV_ir *a) 1167 + { 1168 + prt("fdiv\t#%d,r%d", li(ctx, 0), a->rd); 1169 + return true; 1170 + } 1171 + 1172 + /* fdiv dsp[rs], rd */ 1173 + /* fdiv rs, rd */ 1174 + static bool trans_FDIV_mr(DisasContext *ctx, arg_FDIV_mr *a) 1175 + { 1176 + prt_ldmi(ctx, "fdiv", a->ld, RX_IM_LONG, a->rs, a->rd); 1177 + return true; 1178 + } 1179 + 1180 + /* round dsp[rs], rd */ 1181 + /* round rs, rd */ 1182 + static bool trans_ROUND(DisasContext *ctx, arg_ROUND *a) 1183 + { 1184 + prt_ldmi(ctx, "round", a->ld, RX_IM_LONG, a->rs, a->rd); 1185 + return true; 1186 + } 1187 + 1188 + /* itof rs, rd */ 1189 + /* itof dsp[rs], rd */ 1190 + static bool trans_ITOF(DisasContext *ctx, arg_ITOF *a) 1191 + { 1192 + prt_ldmi(ctx, "itof", a->ld, RX_IM_LONG, a->rs, a->rd); 1193 + return true; 1194 + } 1195 + 1196 + #define BOP_IM(name, reg) \ 1197 + do { \ 1198 + char dsp[8]; \ 1199 + rx_index_addr(ctx, dsp, a->ld, RX_MEMORY_BYTE); \ 1200 + prt("b%s\t#%d, %s[r%d]", #name, a->imm, dsp, reg); \ 1201 + return true; \ 1202 + } while (0) 1203 + 1204 + #define BOP_RM(name) \ 1205 + do { \ 1206 + char dsp[8]; \ 1207 + rx_index_addr(ctx, dsp, a->ld, RX_MEMORY_BYTE); \ 1208 + prt("b%s\tr%d, %s[r%d]", #name, a->rd, dsp, a->rs); \ 1209 + return true; \ 1210 + } while (0) 1211 + 1212 + /* bset #imm, dsp[rd] */ 1213 + static bool trans_BSET_im(DisasContext *ctx, arg_BSET_im *a) 1214 + { 1215 + BOP_IM(bset, a->rs); 1216 + } 1217 + 1218 + /* bset rs, dsp[rd] */ 1219 + static bool trans_BSET_rm(DisasContext *ctx, arg_BSET_rm *a) 1220 + { 1221 + BOP_RM(set); 1222 + } 1223 + 1224 + /* bset rs, rd */ 1225 + static bool trans_BSET_rr(DisasContext *ctx, arg_BSET_rr *a) 1226 + { 1227 + prt("bset\tr%d,r%d", a->rs, a->rd); 1228 + return true; 1229 + } 1230 + 1231 + /* bset #imm, rd */ 1232 + static bool trans_BSET_ir(DisasContext *ctx, arg_BSET_ir *a) 1233 + { 1234 + prt("bset\t#%d, r%d", a->imm, a->rd); 1235 + return true; 1236 + } 1237 + 1238 + /* bclr #imm, dsp[rd] */ 1239 + static bool trans_BCLR_im(DisasContext *ctx, arg_BCLR_im *a) 1240 + { 1241 + BOP_IM(clr, a->rs); 1242 + } 1243 + 1244 + /* bclr rs, dsp[rd] */ 1245 + static bool trans_BCLR_rm(DisasContext *ctx, arg_BCLR_rm *a) 1246 + { 1247 + BOP_RM(clr); 1248 + } 1249 + 1250 + /* bclr rs, rd */ 1251 + static bool trans_BCLR_rr(DisasContext *ctx, arg_BCLR_rr *a) 1252 + { 1253 + prt("bclr\tr%d, r%d", a->rs, a->rd); 1254 + return true; 1255 + } 1256 + 1257 + /* bclr #imm, rd */ 1258 + static bool trans_BCLR_ir(DisasContext *ctx, arg_BCLR_ir *a) 1259 + { 1260 + prt("bclr\t#%d,r%d", a->imm, a->rd); 1261 + return true; 1262 + } 1263 + 1264 + /* btst #imm, dsp[rd] */ 1265 + static bool trans_BTST_im(DisasContext *ctx, arg_BTST_im *a) 1266 + { 1267 + BOP_IM(tst, a->rs); 1268 + } 1269 + 1270 + /* btst rs, dsp[rd] */ 1271 + static bool trans_BTST_rm(DisasContext *ctx, arg_BTST_rm *a) 1272 + { 1273 + BOP_RM(tst); 1274 + } 1275 + 1276 + /* btst rs, rd */ 1277 + static bool trans_BTST_rr(DisasContext *ctx, arg_BTST_rr *a) 1278 + { 1279 + prt("btst\tr%d, r%d", a->rs, a->rd); 1280 + return true; 1281 + } 1282 + 1283 + /* btst #imm, rd */ 1284 + static bool trans_BTST_ir(DisasContext *ctx, arg_BTST_ir *a) 1285 + { 1286 + prt("btst\t#%d, r%d", a->imm, a->rd); 1287 + return true; 1288 + } 1289 + 1290 + /* bnot rs, dsp[rd] */ 1291 + static bool trans_BNOT_rm(DisasContext *ctx, arg_BNOT_rm *a) 1292 + { 1293 + BOP_RM(not); 1294 + } 1295 + 1296 + /* bnot rs, rd */ 1297 + static bool trans_BNOT_rr(DisasContext *ctx, arg_BNOT_rr *a) 1298 + { 1299 + prt("bnot\tr%d, r%d", a->rs, a->rd); 1300 + return true; 1301 + } 1302 + 1303 + /* bnot #imm, dsp[rd] */ 1304 + static bool trans_BNOT_im(DisasContext *ctx, arg_BNOT_im *a) 1305 + { 1306 + BOP_IM(not, a->rs); 1307 + } 1308 + 1309 + /* bnot #imm, rd */ 1310 + static bool trans_BNOT_ir(DisasContext *ctx, arg_BNOT_ir *a) 1311 + { 1312 + prt("bnot\t#%d, r%d", a->imm, a->rd); 1313 + return true; 1314 + } 1315 + 1316 + /* bmcond #imm, dsp[rd] */ 1317 + static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a) 1318 + { 1319 + char dsp[8]; 1320 + 1321 + rx_index_addr(ctx, dsp, a->ld, RX_MEMORY_BYTE); 1322 + prt("bm%s\t#%d, %s[r%d]", cond[a->cd], a->imm, dsp, a->rd); 1323 + return true; 1324 + } 1325 + 1326 + /* bmcond #imm, rd */ 1327 + static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a) 1328 + { 1329 + prt("bm%s\t#%d, r%d", cond[a->cd], a->imm, a->rd); 1330 + return true; 1331 + } 1332 + 1333 + /* clrpsw psw */ 1334 + static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a) 1335 + { 1336 + prt("clrpsw\t%c", psw[a->cb]); 1337 + return true; 1338 + } 1339 + 1340 + /* setpsw psw */ 1341 + static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a) 1342 + { 1343 + prt("setpsw\t%c", psw[a->cb]); 1344 + return true; 1345 + } 1346 + 1347 + /* mvtipl #imm */ 1348 + static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a) 1349 + { 1350 + prt("movtipl\t#%d", a->imm); 1351 + return true; 1352 + } 1353 + 1354 + /* mvtc #imm, rd */ 1355 + static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a) 1356 + { 1357 + prt("mvtc\t#0x%08x, %s", a->imm, rx_crname(a->cr)); 1358 + return true; 1359 + } 1360 + 1361 + /* mvtc rs, rd */ 1362 + static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a) 1363 + { 1364 + prt("mvtc\tr%d, %s", a->rs, rx_crname(a->cr)); 1365 + return true; 1366 + } 1367 + 1368 + /* mvfc rs, rd */ 1369 + static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a) 1370 + { 1371 + prt("mvfc\t%s, r%d", rx_crname(a->cr), a->rd); 1372 + return true; 1373 + } 1374 + 1375 + /* rtfi */ 1376 + static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a) 1377 + { 1378 + prt("rtfi"); 1379 + return true; 1380 + } 1381 + 1382 + /* rte */ 1383 + static bool trans_RTE(DisasContext *ctx, arg_RTE *a) 1384 + { 1385 + prt("rte"); 1386 + return true; 1387 + } 1388 + 1389 + /* brk */ 1390 + static bool trans_BRK(DisasContext *ctx, arg_BRK *a) 1391 + { 1392 + prt("brk"); 1393 + return true; 1394 + } 1395 + 1396 + /* int #imm */ 1397 + static bool trans_INT(DisasContext *ctx, arg_INT *a) 1398 + { 1399 + prt("int\t#%d", a->imm); 1400 + return true; 1401 + } 1402 + 1403 + /* wait */ 1404 + static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a) 1405 + { 1406 + prt("wait"); 1407 + return true; 1408 + } 1409 + 1410 + /* sccnd.[bwl] rd */ 1411 + /* sccnd.[bwl] dsp:[rd] */ 1412 + static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a) 1413 + { 1414 + if (a->ld < 3) { 1415 + char dsp[8]; 1416 + rx_index_addr(ctx, dsp, a->sz, a->ld); 1417 + prt("sc%s.%c\t%s[r%d]", cond[a->cd], size[a->sz], dsp, a->rd); 1418 + } else { 1419 + prt("sc%s.%c\tr%d", cond[a->cd], size[a->sz], a->rd); 1420 + } 1421 + return true; 1422 + } 1423 + 1424 + int print_insn_rx(bfd_vma addr, disassemble_info *dis) 1425 + { 1426 + DisasContext ctx; 1427 + uint32_t insn; 1428 + int i; 1429 + 1430 + ctx.dis = dis; 1431 + ctx.pc = ctx.addr = addr; 1432 + ctx.len = 0; 1433 + 1434 + insn = decode_load(&ctx); 1435 + if (!decode(&ctx, insn)) { 1436 + ctx.dis->fprintf_func(ctx.dis->stream, ".byte\t"); 1437 + for (i = 0; i < ctx.addr - addr; i++) { 1438 + if (i > 0) { 1439 + ctx.dis->fprintf_func(ctx.dis->stream, ","); 1440 + } 1441 + ctx.dis->fprintf_func(ctx.dis->stream, "0x%02x", insn >> 24); 1442 + insn <<= 8; 1443 + } 1444 + } 1445 + return ctx.addr - addr; 1446 + }
+112
target/rx/gdbstub.c
··· 1 + /* 2 + * RX gdb server stub 3 + * 4 + * Copyright (c) 2019 Yoshinori Sato 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2 or later, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #include "qemu/osdep.h" 19 + #include "qemu-common.h" 20 + #include "cpu.h" 21 + #include "exec/gdbstub.h" 22 + 23 + int rx_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) 24 + { 25 + RXCPU *cpu = RXCPU(cs); 26 + CPURXState *env = &cpu->env; 27 + 28 + switch (n) { 29 + case 0 ... 15: 30 + return gdb_get_regl(mem_buf, env->regs[n]); 31 + case 16: 32 + return gdb_get_regl(mem_buf, (env->psw_u) ? env->regs[0] : env->usp); 33 + case 17: 34 + return gdb_get_regl(mem_buf, (!env->psw_u) ? env->regs[0] : env->isp); 35 + case 18: 36 + return gdb_get_regl(mem_buf, rx_cpu_pack_psw(env)); 37 + case 19: 38 + return gdb_get_regl(mem_buf, env->pc); 39 + case 20: 40 + return gdb_get_regl(mem_buf, env->intb); 41 + case 21: 42 + return gdb_get_regl(mem_buf, env->bpsw); 43 + case 22: 44 + return gdb_get_regl(mem_buf, env->bpc); 45 + case 23: 46 + return gdb_get_regl(mem_buf, env->fintv); 47 + case 24: 48 + return gdb_get_regl(mem_buf, env->fpsw); 49 + case 25: 50 + return 0; 51 + } 52 + return 0; 53 + } 54 + 55 + int rx_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) 56 + { 57 + RXCPU *cpu = RXCPU(cs); 58 + CPURXState *env = &cpu->env; 59 + uint32_t psw; 60 + switch (n) { 61 + case 0 ... 15: 62 + env->regs[n] = ldl_p(mem_buf); 63 + if (n == 0) { 64 + if (env->psw_u) { 65 + env->usp = env->regs[0]; 66 + } else { 67 + env->isp = env->regs[0]; 68 + } 69 + } 70 + break; 71 + case 16: 72 + env->usp = ldl_p(mem_buf); 73 + if (env->psw_u) { 74 + env->regs[0] = ldl_p(mem_buf); 75 + } 76 + break; 77 + case 17: 78 + env->isp = ldl_p(mem_buf); 79 + if (!env->psw_u) { 80 + env->regs[0] = ldl_p(mem_buf); 81 + } 82 + break; 83 + case 18: 84 + psw = ldl_p(mem_buf); 85 + rx_cpu_unpack_psw(env, psw, 1); 86 + break; 87 + case 19: 88 + env->pc = ldl_p(mem_buf); 89 + break; 90 + case 20: 91 + env->intb = ldl_p(mem_buf); 92 + break; 93 + case 21: 94 + env->bpsw = ldl_p(mem_buf); 95 + break; 96 + case 22: 97 + env->bpc = ldl_p(mem_buf); 98 + break; 99 + case 23: 100 + env->fintv = ldl_p(mem_buf); 101 + break; 102 + case 24: 103 + env->fpsw = ldl_p(mem_buf); 104 + break; 105 + case 25: 106 + return 8; 107 + default: 108 + return 0; 109 + } 110 + 111 + return 4; 112 + }
+149
target/rx/helper.c
··· 1 + /* 2 + * RX emulation 3 + * 4 + * Copyright (c) 2019 Yoshinori Sato 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2 or later, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include "qemu/osdep.h" 20 + #include "qemu/bitops.h" 21 + #include "cpu.h" 22 + #include "exec/log.h" 23 + #include "exec/cpu_ldst.h" 24 + #include "sysemu/sysemu.h" 25 + #include "hw/irq.h" 26 + 27 + void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte) 28 + { 29 + if (env->psw_pm == 0) { 30 + env->psw_ipl = FIELD_EX32(psw, PSW, IPL); 31 + if (rte) { 32 + /* PSW.PM can write RTE and RTFI */ 33 + env->psw_pm = FIELD_EX32(psw, PSW, PM); 34 + } 35 + env->psw_u = FIELD_EX32(psw, PSW, U); 36 + env->psw_i = FIELD_EX32(psw, PSW, I); 37 + } 38 + env->psw_o = FIELD_EX32(psw, PSW, O) << 31; 39 + env->psw_s = FIELD_EX32(psw, PSW, S) << 31; 40 + env->psw_z = 1 - FIELD_EX32(psw, PSW, Z); 41 + env->psw_c = FIELD_EX32(psw, PSW, C); 42 + } 43 + 44 + #define INT_FLAGS (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR) 45 + void rx_cpu_do_interrupt(CPUState *cs) 46 + { 47 + RXCPU *cpu = RXCPU(cs); 48 + CPURXState *env = &cpu->env; 49 + int do_irq = cs->interrupt_request & INT_FLAGS; 50 + uint32_t save_psw; 51 + 52 + env->in_sleep = 0; 53 + 54 + if (env->psw_u) { 55 + env->usp = env->regs[0]; 56 + } else { 57 + env->isp = env->regs[0]; 58 + } 59 + save_psw = rx_cpu_pack_psw(env); 60 + env->psw_pm = env->psw_i = env->psw_u = 0; 61 + 62 + if (do_irq) { 63 + if (do_irq & CPU_INTERRUPT_FIR) { 64 + env->bpc = env->pc; 65 + env->bpsw = save_psw; 66 + env->pc = env->fintv; 67 + env->psw_ipl = 15; 68 + cs->interrupt_request &= ~CPU_INTERRUPT_FIR; 69 + qemu_set_irq(env->ack, env->ack_irq); 70 + qemu_log_mask(CPU_LOG_INT, "fast interrupt raised\n"); 71 + } else if (do_irq & CPU_INTERRUPT_HARD) { 72 + env->isp -= 4; 73 + cpu_stl_data(env, env->isp, save_psw); 74 + env->isp -= 4; 75 + cpu_stl_data(env, env->isp, env->pc); 76 + env->pc = cpu_ldl_data(env, env->intb + env->ack_irq * 4); 77 + env->psw_ipl = env->ack_ipl; 78 + cs->interrupt_request &= ~CPU_INTERRUPT_HARD; 79 + qemu_set_irq(env->ack, env->ack_irq); 80 + qemu_log_mask(CPU_LOG_INT, 81 + "interrupt 0x%02x raised\n", env->ack_irq); 82 + } 83 + } else { 84 + uint32_t vec = cs->exception_index; 85 + const char *expname = "unknown exception"; 86 + 87 + env->isp -= 4; 88 + cpu_stl_data(env, env->isp, save_psw); 89 + env->isp -= 4; 90 + cpu_stl_data(env, env->isp, env->pc); 91 + 92 + if (vec < 0x100) { 93 + env->pc = cpu_ldl_data(env, 0xffffffc0 + vec * 4); 94 + } else { 95 + env->pc = cpu_ldl_data(env, env->intb + (vec & 0xff) * 4); 96 + } 97 + switch (vec) { 98 + case 20: 99 + expname = "privilege violation"; 100 + break; 101 + case 21: 102 + expname = "access exception"; 103 + break; 104 + case 23: 105 + expname = "illegal instruction"; 106 + break; 107 + case 25: 108 + expname = "fpu exception"; 109 + break; 110 + case 30: 111 + expname = "non-maskable interrupt"; 112 + break; 113 + case 0x100 ... 0x1ff: 114 + expname = "unconditional trap"; 115 + } 116 + qemu_log_mask(CPU_LOG_INT, "exception 0x%02x [%s] raised\n", 117 + (vec & 0xff), expname); 118 + } 119 + env->regs[0] = env->isp; 120 + } 121 + 122 + bool rx_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 123 + { 124 + RXCPU *cpu = RXCPU(cs); 125 + CPURXState *env = &cpu->env; 126 + int accept = 0; 127 + /* hardware interrupt (Normal) */ 128 + if ((interrupt_request & CPU_INTERRUPT_HARD) && 129 + env->psw_i && (env->psw_ipl < env->req_ipl)) { 130 + env->ack_irq = env->req_irq; 131 + env->ack_ipl = env->req_ipl; 132 + accept = 1; 133 + } 134 + /* hardware interrupt (FIR) */ 135 + if ((interrupt_request & CPU_INTERRUPT_FIR) && 136 + env->psw_i && (env->psw_ipl < 15)) { 137 + accept = 1; 138 + } 139 + if (accept) { 140 + rx_cpu_do_interrupt(cs); 141 + return true; 142 + } 143 + return false; 144 + } 145 + 146 + hwaddr rx_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 147 + { 148 + return addr; 149 + }
+31
target/rx/helper.h
··· 1 + DEF_HELPER_1(raise_illegal_instruction, noreturn, env) 2 + DEF_HELPER_1(raise_access_fault, noreturn, env) 3 + DEF_HELPER_1(raise_privilege_violation, noreturn, env) 4 + DEF_HELPER_1(wait, noreturn, env) 5 + DEF_HELPER_1(debug, noreturn, env) 6 + DEF_HELPER_2(rxint, noreturn, env, i32) 7 + DEF_HELPER_1(rxbrk, noreturn, env) 8 + DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32) 9 + DEF_HELPER_FLAGS_3(fsub, TCG_CALL_NO_WG, f32, env, f32, f32) 10 + DEF_HELPER_FLAGS_3(fmul, TCG_CALL_NO_WG, f32, env, f32, f32) 11 + DEF_HELPER_FLAGS_3(fdiv, TCG_CALL_NO_WG, f32, env, f32, f32) 12 + DEF_HELPER_FLAGS_3(fcmp, TCG_CALL_NO_WG, void, env, f32, f32) 13 + DEF_HELPER_FLAGS_2(ftoi, TCG_CALL_NO_WG, i32, env, f32) 14 + DEF_HELPER_FLAGS_2(round, TCG_CALL_NO_WG, i32, env, f32) 15 + DEF_HELPER_FLAGS_2(itof, TCG_CALL_NO_WG, f32, env, i32) 16 + DEF_HELPER_2(set_fpsw, void, env, i32) 17 + DEF_HELPER_FLAGS_2(racw, TCG_CALL_NO_WG, void, env, i32) 18 + DEF_HELPER_FLAGS_2(set_psw_rte, TCG_CALL_NO_WG, void, env, i32) 19 + DEF_HELPER_FLAGS_2(set_psw, TCG_CALL_NO_WG, void, env, i32) 20 + DEF_HELPER_1(pack_psw, i32, env) 21 + DEF_HELPER_FLAGS_3(div, TCG_CALL_NO_WG, i32, env, i32, i32) 22 + DEF_HELPER_FLAGS_3(divu, TCG_CALL_NO_WG, i32, env, i32, i32) 23 + DEF_HELPER_FLAGS_1(scmpu, TCG_CALL_NO_WG, void, env) 24 + DEF_HELPER_1(smovu, void, env) 25 + DEF_HELPER_1(smovf, void, env) 26 + DEF_HELPER_1(smovb, void, env) 27 + DEF_HELPER_2(sstr, void, env, i32) 28 + DEF_HELPER_FLAGS_2(swhile, TCG_CALL_NO_WG, void, env, i32) 29 + DEF_HELPER_FLAGS_2(suntil, TCG_CALL_NO_WG, void, env, i32) 30 + DEF_HELPER_FLAGS_2(rmpa, TCG_CALL_NO_WG, void, env, i32) 31 + DEF_HELPER_1(satr, void, env)
+621
target/rx/insns.decode
··· 1 + # 2 + # Renesas RX instruction decode definitions. 3 + # 4 + # Copyright (c) 2019 Richard Henderson <richard.henderson@linaro.org> 5 + # Copyright (c) 2019 Yoshinori Sato <ysato@users.sourceforge.jp> 6 + # 7 + # This library is free software; you can redistribute it and/or 8 + # modify it under the terms of the GNU Lesser General Public 9 + # License as published by the Free Software Foundation; either 10 + # version 2 of the License, or (at your option) any later version. 11 + # 12 + # This library is distributed in the hope that it will be useful, 13 + # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 + # Lesser General Public License for more details. 16 + # 17 + # You should have received a copy of the GNU Lesser General Public 18 + # License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 + # 20 + 21 + &bcnd cd dsp sz 22 + &jdsp dsp sz 23 + &jreg rs 24 + &rr rd rs 25 + &ri rd imm 26 + &rrr rd rs rs2 27 + &rri rd imm rs2 28 + &rm rd rs ld mi 29 + &mi rs ld mi imm 30 + &mr rs ld mi rs2 31 + &mcnd ld sz rd cd 32 + ######## 33 + %b1_bdsp 24:3 !function=bdsp_s 34 + 35 + @b1_bcnd_s .... cd:1 ... &bcnd dsp=%b1_bdsp sz=1 36 + @b1_bra_s .... .... &jdsp dsp=%b1_bdsp sz=1 37 + 38 + %b2_r_0 16:4 39 + %b2_li_2 18:2 !function=li 40 + %b2_li_8 24:2 !function=li 41 + %b2_dsp5_3 23:4 19:1 42 + 43 + @b2_rds .... .... .... rd:4 &rr rs=%b2_r_0 44 + @b2_rds_li .... .... .... rd:4 &rri rs2=%b2_r_0 imm=%b2_li_8 45 + @b2_rds_uimm4 .... .... imm:4 rd:4 &rri rs2=%b2_r_0 46 + @b2_rs2_uimm4 .... .... imm:4 rs2:4 &rri rd=0 47 + @b2_rds_imm5 .... ... imm:5 rd:4 &rri rs2=%b2_r_0 48 + @b2_rd_rs_li .... .... rs2:4 rd:4 &rri imm=%b2_li_8 49 + @b2_rd_ld_ub .... .. ld:2 rs:4 rd:4 &rm mi=4 50 + @b2_ld_imm3 .... .. ld:2 rs:4 . imm:3 &mi mi=4 51 + @b2_bcnd_b .... cd:4 dsp:s8 &bcnd sz=2 52 + @b2_bra_b .... .... dsp:s8 &jdsp sz=2 53 + 54 + ######## 55 + 56 + %b3_r_0 8:4 57 + %b3_li_10 18:2 !function=li 58 + %b3_dsp5_8 23:1 16:4 59 + %b3_bdsp 8:s8 16:8 60 + 61 + @b3_rd_rs .... .... .... .... rs:4 rd:4 &rr 62 + @b3_rs_rd .... .... .... .... rd:4 rs:4 &rr 63 + @b3_rd_li .... .... .... .... .... rd:4 \ 64 + &rri rs2=%b3_r_0 imm=%b3_li_10 65 + @b3_rd_ld .... .... mi:2 .... ld:2 rs:4 rd:4 &rm 66 + @b3_rd_ld_ub .... .... .... .. ld:2 rs:4 rd:4 &rm mi=4 67 + @b3_rd_ld_ul .... .... .... .. ld:2 rs:4 rd:4 &rm mi=2 68 + @b3_rd_rs_rs2 .... .... .... rd:4 rs:4 rs2:4 &rrr 69 + @b3_rds_imm5 .... .... ....... imm:5 rd:4 &rri rs2=%b3_r_0 70 + @b3_rd_rs_imm5 .... .... ... imm:5 rs2:4 rd:4 &rri 71 + @b3_bcnd_w .... ... cd:1 .... .... .... .... &bcnd dsp=%b3_bdsp sz=3 72 + @b3_bra_w .... .... .... .... .... .... &jdsp dsp=%b3_bdsp sz=3 73 + @b3_ld_rd_rs .... .... .... .. ld:2 rs:4 rd:4 &rm mi=0 74 + @b3_sz_ld_rd_cd .... .... .... sz:2 ld:2 rd:4 cd:4 &mcnd 75 + 76 + ######## 77 + 78 + %b4_li_18 18:2 !function=li 79 + %b4_dsp_16 0:s8 8:8 80 + %b4_bdsp 0:s8 8:8 16:8 81 + 82 + @b4_rd_ldmi .... .... mi:2 .... ld:2 .... .... rs:4 rd:4 &rm 83 + @b4_bra_a .... .... .... .... .... .... .... .... \ 84 + &jdsp dsp=%b4_bdsp sz=4 85 + ######## 86 + # ABS rd 87 + ABS_rr 0111 1110 0010 .... @b2_rds 88 + # ABS rs, rd 89 + ABS_rr 1111 1100 0000 1111 .... .... @b3_rd_rs 90 + 91 + # ADC #imm, rd 92 + ADC_ir 1111 1101 0111 ..00 0010 .... @b3_rd_li 93 + # ADC rs, rd 94 + ADC_rr 1111 1100 0000 1011 .... .... @b3_rd_rs 95 + # ADC dsp[rs].l, rd 96 + # Note only mi==2 allowed. 97 + ADC_mr 0000 0110 ..10 00.. 0000 0010 .... .... @b4_rd_ldmi 98 + 99 + # ADD #uimm4, rd 100 + ADD_irr 0110 0010 .... .... @b2_rds_uimm4 101 + # ADD #imm, rs, rd 102 + ADD_irr 0111 00.. .... .... @b2_rd_rs_li 103 + # ADD dsp[rs].ub, rd 104 + # ADD rs, rd 105 + ADD_mr 0100 10.. .... .... @b2_rd_ld_ub 106 + # ADD dsp[rs], rd 107 + ADD_mr 0000 0110 ..00 10.. .... .... @b3_rd_ld 108 + # ADD rs, rs2, rd 109 + ADD_rrr 1111 1111 0010 .... .... .... @b3_rd_rs_rs2 110 + 111 + # AND #uimm4, rd 112 + AND_ir 0110 0100 .... .... @b2_rds_uimm4 113 + # AND #imm, rd 114 + AND_ir 0111 01.. 0010 .... @b2_rds_li 115 + # AND dsp[rs].ub, rd 116 + # AND rs, rd 117 + AND_mr 0101 00.. .... .... @b2_rd_ld_ub 118 + # AND dsp[rs], rd 119 + AND_mr 0000 0110 ..01 00.. .... .... @b3_rd_ld 120 + # AND rs, rs2, rd 121 + AND_rrr 1111 1111 0100 .... .... .... @b3_rd_rs_rs2 122 + 123 + # BCLR #imm, dsp[rd] 124 + BCLR_im 1111 00.. .... 1... @b2_ld_imm3 125 + # BCLR #imm, rs 126 + BCLR_ir 0111 101. .... .... @b2_rds_imm5 127 + # BCLR rs, rd 128 + # BCLR rs, dsp[rd] 129 + { 130 + BCLR_rr 1111 1100 0110 0111 .... .... @b3_rs_rd 131 + BCLR_rm 1111 1100 0110 01.. .... .... @b3_rd_ld_ub 132 + } 133 + 134 + # BCnd.s dsp 135 + BCnd 0001 .... @b1_bcnd_s 136 + # BRA.b dsp 137 + # BCnd.b dsp 138 + { 139 + BRA 0010 1110 .... .... @b2_bra_b 140 + BCnd 0010 .... .... .... @b2_bcnd_b 141 + } 142 + 143 + # BCnd.w dsp 144 + BCnd 0011 101 . .... .... .... .... @b3_bcnd_w 145 + 146 + # BNOT #imm, dsp[rd] 147 + # BMCnd #imm, dsp[rd] 148 + { 149 + BNOT_im 1111 1100 111 imm:3 ld:2 rs:4 1111 150 + BMCnd_im 1111 1100 111 imm:3 ld:2 rd:4 cd:4 151 + } 152 + 153 + # BNOT #imm, rd 154 + # BMCnd #imm, rd 155 + { 156 + BNOT_ir 1111 1101 111 imm:5 1111 rd:4 157 + BMCnd_ir 1111 1101 111 imm:5 cd:4 rd:4 158 + } 159 + 160 + # BNOT rs, rd 161 + # BNOT rs, dsp[rd] 162 + { 163 + BNOT_rr 1111 1100 0110 1111 .... .... @b3_rs_rd 164 + BNOT_rm 1111 1100 0110 11.. .... .... @b3_rd_ld_ub 165 + } 166 + 167 + # BRA.s dsp 168 + BRA 0000 1 ... @b1_bra_s 169 + # BRA.w dsp 170 + BRA 0011 1000 .... .... .... .... @b3_bra_w 171 + # BRA.a dsp 172 + BRA 0000 0100 .... .... .... .... .... .... @b4_bra_a 173 + # BRA.l rs 174 + BRA_l 0111 1111 0100 rd:4 175 + 176 + BRK 0000 0000 177 + 178 + # BSET #imm, dsp[rd] 179 + BSET_im 1111 00.. .... 0... @b2_ld_imm3 180 + # BSET #imm, rd 181 + BSET_ir 0111 100. .... .... @b2_rds_imm5 182 + # BSET rs, rd 183 + # BSET rs, dsp[rd] 184 + { 185 + BSET_rr 1111 1100 0110 0011 .... .... @b3_rs_rd 186 + BSET_rm 1111 1100 0110 00.. .... .... @b3_rd_ld_ub 187 + } 188 + 189 + # BSR.w dsp 190 + BSR 0011 1001 .... .... .... .... @b3_bra_w 191 + # BSR.a dsp 192 + BSR 0000 0101 .... .... .... .... .... .... @b4_bra_a 193 + # BSR.l rs 194 + BSR_l 0111 1111 0101 rd:4 195 + 196 + # BSET #imm, dsp[rd] 197 + BTST_im 1111 01.. .... 0... @b2_ld_imm3 198 + # BSET #imm, rd 199 + BTST_ir 0111 110. .... .... @b2_rds_imm5 200 + # BSET rs, rd 201 + # BSET rs, dsp[rd] 202 + { 203 + BTST_rr 1111 1100 0110 1011 .... .... @b3_rs_rd 204 + BTST_rm 1111 1100 0110 10.. .... .... @b3_rd_ld_ub 205 + } 206 + 207 + # CLRSPW psw 208 + CLRPSW 0111 1111 1011 cb:4 209 + 210 + # CMP #uimm4, rs2 211 + CMP_ir 0110 0001 .... .... @b2_rs2_uimm4 212 + # CMP #uimm8, rs2 213 + CMP_ir 0111 0101 0101 rs2:4 imm:8 &rri rd=0 214 + # CMP #imm, rs2 215 + CMP_ir 0111 01.. 0000 rs2:4 &rri imm=%b2_li_8 rd=0 216 + # CMP dsp[rs].ub, rs2 217 + # CMP rs, rs2 218 + CMP_mr 0100 01.. .... .... @b2_rd_ld_ub 219 + # CMP dsp[rs], rs2 220 + CMP_mr 0000 0110 ..00 01.. .... .... @b3_rd_ld 221 + 222 + # DIV #imm, rd 223 + DIV_ir 1111 1101 0111 ..00 1000 .... @b3_rd_li 224 + # DIV dsp[rs].ub, rd 225 + # DIV rs, rd 226 + DIV_mr 1111 1100 0010 00.. .... .... @b3_rd_ld_ub 227 + # DIV dsp[rs], rd 228 + DIV_mr 0000 0110 ..10 00.. 0000 1000 .... .... @b4_rd_ldmi 229 + 230 + # DIVU #imm, rd 231 + DIVU_ir 1111 1101 0111 ..00 1001 .... @b3_rd_li 232 + # DIVU dsp[rs].ub, rd 233 + # DIVU rs, rd 234 + DIVU_mr 1111 1100 0010 01.. .... .... @b3_rd_ld_ub 235 + # DIVU dsp[rs], rd 236 + DIVU_mr 0000 0110 ..10 00.. 0000 1001 .... .... @b4_rd_ldmi 237 + 238 + # EMUL #imm, rd 239 + EMUL_ir 1111 1101 0111 ..00 0110 .... @b3_rd_li 240 + # EMUL dsp[rs].ub, rd 241 + # EMUL rs, rd 242 + EMUL_mr 1111 1100 0001 10.. .... .... @b3_rd_ld_ub 243 + # EMUL dsp[rs], rd 244 + EMUL_mr 0000 0110 ..10 00.. 0000 0110 .... .... @b4_rd_ldmi 245 + 246 + # EMULU #imm, rd 247 + EMULU_ir 1111 1101 0111 ..00 0111 .... @b3_rd_li 248 + # EMULU dsp[rs].ub, rd 249 + # EMULU rs, rd 250 + EMULU_mr 1111 1100 0001 11.. .... .... @b3_rd_ld_ub 251 + # EMULU dsp[rs], rd 252 + EMULU_mr 0000 0110 ..10 00.. 0000 0111 .... .... @b4_rd_ldmi 253 + 254 + # FADD #imm, rd 255 + FADD_ir 1111 1101 0111 0010 0010 rd:4 256 + # FADD rs, rd 257 + # FADD dsp[rs], rd 258 + FADD_mr 1111 1100 1000 10.. .... .... @b3_rd_ld_ul 259 + 260 + # FCMP #imm, rd 261 + FCMP_ir 1111 1101 0111 0010 0001 rd:4 262 + # FCMP rs, rd 263 + # FCMP dsp[rs], rd 264 + FCMP_mr 1111 1100 1000 01.. .... .... @b3_rd_ld_ul 265 + 266 + # FDIV #imm, rd 267 + FDIV_ir 1111 1101 0111 0010 0100 rd:4 268 + # FDIV rs, rd 269 + # FDIV dsp[rs], rd 270 + FDIV_mr 1111 1100 1001 00.. .... .... @b3_rd_ld_ul 271 + 272 + # FMUL #imm, rd 273 + FMUL_ir 1111 1101 0111 0010 0011 rd:4 274 + # FMUL rs, rd 275 + # FMUL dsp[rs], rd 276 + FMUL_mr 1111 1100 1000 11.. .... .... @b3_rd_ld_ul 277 + 278 + # FSUB #imm, rd 279 + FSUB_ir 1111 1101 0111 0010 0000 rd:4 280 + # FSUB rs, rd 281 + # FSUB dsp[rs], rd 282 + FSUB_mr 1111 1100 1000 00.. .... .... @b3_rd_ld_ul 283 + 284 + # FTOI rs, rd 285 + # FTOI dsp[rs], rd 286 + FTOI 1111 1100 1001 01.. .... .... @b3_rd_ld_ul 287 + 288 + # INT #uimm8 289 + INT 0111 0101 0110 0000 imm:8 290 + 291 + # ITOF dsp[rs].ub, rd 292 + # ITOF rs, rd 293 + ITOF 1111 1100 0100 01.. .... .... @b3_rd_ld_ub 294 + # ITOF dsp[rs], rd 295 + ITOF 0000 0110 ..10 00.. 0001 0001 .... .... @b4_rd_ldmi 296 + 297 + # JMP rs 298 + JMP 0111 1111 0000 rs:4 &jreg 299 + # JSR rs 300 + JSR 0111 1111 0001 rs:4 &jreg 301 + 302 + # MACHI rs, rs2 303 + MACHI 1111 1101 0000 0100 rs:4 rs2:4 304 + # MACLO rs, rs2 305 + MACLO 1111 1101 0000 0101 rs:4 rs2:4 306 + 307 + # MAX #imm, rd 308 + MAX_ir 1111 1101 0111 ..00 0100 .... @b3_rd_li 309 + # MAX dsp[rs].ub, rd 310 + # MAX rs, rd 311 + MAX_mr 1111 1100 0001 00.. .... .... @b3_rd_ld_ub 312 + # MAX dsp[rs], rd 313 + MAX_mr 0000 0110 ..10 00.. 0000 0100 .... .... @b4_rd_ldmi 314 + 315 + # MIN #imm, rd 316 + MIN_ir 1111 1101 0111 ..00 0101 .... @b3_rd_li 317 + # MIN dsp[rs].ub, rd 318 + # MIN rs, rd 319 + MIN_mr 1111 1100 0001 01.. .... .... @b3_rd_ld_ub 320 + # MIN dsp[rs], rd 321 + MIN_mr 0000 0110 ..10 00.. 0000 0101 .... .... @b4_rd_ldmi 322 + 323 + # MOV.b rs, dsp5[rd] 324 + MOV_rm 1000 0 .... rd:3 . rs:3 dsp=%b2_dsp5_3 sz=0 325 + # MOV.w rs, dsp5[rd] 326 + MOV_rm 1001 0 .... rd:3 . rs:3 dsp=%b2_dsp5_3 sz=1 327 + # MOV.l rs, dsp5[rd] 328 + MOV_rm 1010 0 .... rd:3 . rs:3 dsp=%b2_dsp5_3 sz=2 329 + # MOV.b dsp5[rs], rd 330 + MOV_mr 1000 1 .... rs:3 . rd:3 dsp=%b2_dsp5_3 sz=0 331 + # MOV.w dsp5[rs], rd 332 + MOV_mr 1001 1 .... rs:3 . rd:3 dsp=%b2_dsp5_3 sz=1 333 + # MOV.l dsp5[rs], rd 334 + MOV_mr 1010 1 .... rs:3 . rd:3 dsp=%b2_dsp5_3 sz=2 335 + # MOV.l #uimm4, rd 336 + MOV_ir 0110 0110 imm:4 rd:4 337 + # MOV.b #imm8, dsp5[rd] 338 + MOV_im 0011 1100 . rd:3 .... imm:8 sz=0 dsp=%b3_dsp5_8 339 + # MOV.w #imm8, dsp5[rd] 340 + MOV_im 0011 1101 . rd:3 .... imm:8 sz=1 dsp=%b3_dsp5_8 341 + # MOV.l #imm8, dsp5[rd] 342 + MOV_im 0011 1110 . rd:3 .... imm:8 sz=2 dsp=%b3_dsp5_8 343 + # MOV.l #imm8, rd 344 + MOV_ir 0111 0101 0100 rd:4 imm:8 345 + # MOV.l #mm8, rd 346 + MOV_ir 1111 1011 rd:4 .. 10 imm=%b2_li_2 347 + # MOV.<bwl> #imm, [rd] 348 + MOV_im 1111 1000 rd:4 .. sz:2 dsp=0 imm=%b2_li_2 349 + # MOV.<bwl> #imm, dsp8[rd] 350 + MOV_im 1111 1001 rd:4 .. sz:2 dsp:8 imm=%b3_li_10 351 + # MOV.<bwl> #imm, dsp16[rd] 352 + MOV_im 1111 1010 rd:4 .. sz:2 .... .... .... .... \ 353 + imm=%b4_li_18 dsp=%b4_dsp_16 354 + # MOV.<bwl> [ri,rb], rd 355 + MOV_ar 1111 1110 01 sz:2 ri:4 rb:4 rd:4 356 + # MOV.<bwl> rs, [ri,rb] 357 + MOV_ra 1111 1110 00 sz:2 ri:4 rb:4 rs:4 358 + # Note ldd=3 and lds=3 indicate register src or dst 359 + # MOV.b rs, rd 360 + # MOV.b rs, dsp[rd] 361 + # MOV.b dsp[rs], rd 362 + # MOV.b dsp[rs], dsp[rd] 363 + MOV_mm 1100 ldd:2 lds:2 rs:4 rd:4 sz=0 364 + # MOV.w rs, rd 365 + # MOV.w rs, dsp[rd] 366 + # MOV.w dsp[rs], rd 367 + # MOV.w dsp[rs], dsp[rd] 368 + MOV_mm 1101 ldd:2 lds:2 rs:4 rd:4 sz=1 369 + # MOV.l rs, rd 370 + # MOV.l rs, dsp[rd] 371 + # MOV.l dsp[rs], rd 372 + # MOV.l dsp[rs], dsp[rd] 373 + MOV_mm 1110 ldd:2 lds:2 rs:4 rd:4 sz=2 374 + # MOV.l rs, [rd+] 375 + # MOV.l rs, [-rd] 376 + MOV_rp 1111 1101 0010 0 ad:1 sz:2 rd:4 rs:4 377 + # MOV.l [rs+], rd 378 + # MOV.l [-rs], rd 379 + MOV_pr 1111 1101 0010 1 ad:1 sz:2 rd:4 rs:4 380 + 381 + # MOVU.<bw> dsp5[rs], rd 382 + MOVU_mr 1011 sz:1 ... . rs:3 . rd:3 dsp=%b2_dsp5_3 383 + # MOVU.<bw> [rs], rd 384 + MOVU_mr 0101 1 sz:1 00 rs:4 rd:4 dsp=0 385 + # MOVU.<bw> dsp8[rs], rd 386 + MOVU_mr 0101 1 sz:1 01 rs:4 rd:4 dsp:8 387 + # MOVU.<bw> dsp16[rs], rd 388 + MOVU_mr 0101 1 sz:1 10 rs:4 rd:4 .... .... .... .... dsp=%b4_dsp_16 389 + # MOVU.<bw> rs, rd 390 + MOVU_rr 0101 1 sz:1 11 rs:4 rd:4 391 + # MOVU.<bw> [ri, rb], rd 392 + MOVU_ar 1111 1110 110 sz:1 ri:4 rb:4 rd:4 393 + # MOVU.<bw> [rs+], rd 394 + MOVU_pr 1111 1101 0011 1 ad:1 0 sz:1 rd:4 rs:4 395 + 396 + # MUL #uimm4, rd 397 + MUL_ir 0110 0011 .... .... @b2_rds_uimm4 398 + # MUL #imm4, rd 399 + MUL_ir 0111 01.. 0001 .... @b2_rds_li 400 + # MUL dsp[rs].ub, rd 401 + # MUL rs, rd 402 + MUL_mr 0100 11.. .... .... @b2_rd_ld_ub 403 + # MUL dsp[rs], rd 404 + MUL_mr 0000 0110 ..00 11.. .... .... @b3_rd_ld 405 + # MOV rs, rs2, rd 406 + MUL_rrr 1111 1111 0011 .... .... .... @b3_rd_rs_rs2 407 + 408 + # MULHI rs, rs2 409 + MULHI 1111 1101 0000 0000 rs:4 rs2:4 410 + # MULLO rs, rs2 411 + MULLO 1111 1101 0000 0001 rs:4 rs2:4 412 + 413 + # MVFACHI rd 414 + MVFACHI 1111 1101 0001 1111 0000 rd:4 415 + # MVFACMI rd 416 + MVFACMI 1111 1101 0001 1111 0010 rd:4 417 + 418 + # MVFC cr, rd 419 + MVFC 1111 1101 0110 1010 cr:4 rd:4 420 + 421 + # MVTACHI rs 422 + MVTACHI 1111 1101 0001 0111 0000 rs:4 423 + # MVTACLO rs 424 + MVTACLO 1111 1101 0001 0111 0001 rs:4 425 + 426 + # MVTC #imm, cr 427 + MVTC_i 1111 1101 0111 ..11 0000 cr:4 imm=%b3_li_10 428 + # MVTC rs, cr 429 + MVTC_r 1111 1101 0110 1000 rs:4 cr:4 430 + 431 + # MVTIPL #imm 432 + MVTIPL 0111 0101 0111 0000 0000 imm:4 433 + 434 + # NEG rd 435 + NEG_rr 0111 1110 0001 .... @b2_rds 436 + # NEG rs, rd 437 + NEG_rr 1111 1100 0000 0111 .... .... @b3_rd_rs 438 + 439 + NOP 0000 0011 440 + 441 + # NOT rd 442 + NOT_rr 0111 1110 0000 .... @b2_rds 443 + # NOT rs, rd 444 + NOT_rr 1111 1100 0011 1011 .... .... @b3_rd_rs 445 + 446 + # OR #uimm4, rd 447 + OR_ir 0110 0101 .... .... @b2_rds_uimm4 448 + # OR #imm, rd 449 + OR_ir 0111 01.. 0011 .... @b2_rds_li 450 + # OR dsp[rs].ub, rd 451 + # OR rs, rd 452 + OR_mr 0101 01.. .... .... @b2_rd_ld_ub 453 + # OR dsp[rs], rd 454 + OR_mr 0000 0110 .. 0101 .. .... .... @b3_rd_ld 455 + # OR rs, rs2, rd 456 + OR_rrr 1111 1111 0101 .... .... .... @b3_rd_rs_rs2 457 + 458 + # POP cr 459 + POPC 0111 1110 1110 cr:4 460 + # POP rd-rd2 461 + POPM 0110 1111 rd:4 rd2:4 462 + 463 + # POP rd 464 + # PUSH.<bwl> rs 465 + { 466 + POP 0111 1110 1011 rd:4 467 + PUSH_r 0111 1110 10 sz:2 rs:4 468 + } 469 + # PUSH.<bwl> dsp[rs] 470 + PUSH_m 1111 01 ld:2 rs:4 10 sz:2 471 + # PUSH cr 472 + PUSHC 0111 1110 1100 cr:4 473 + # PUSHM rs-rs2 474 + PUSHM 0110 1110 rs:4 rs2:4 475 + 476 + # RACW #imm 477 + RACW 1111 1101 0001 1000 000 imm:1 0000 478 + 479 + # REVL rs,rd 480 + REVL 1111 1101 0110 0111 .... .... @b3_rd_rs 481 + # REVW rs,rd 482 + REVW 1111 1101 0110 0101 .... .... @b3_rd_rs 483 + 484 + # SMOVF 485 + # RPMA.<bwl> 486 + { 487 + SMOVF 0111 1111 1000 1111 488 + RMPA 0111 1111 1000 11 sz:2 489 + } 490 + 491 + # ROLC rd 492 + ROLC 0111 1110 0101 .... @b2_rds 493 + # RORC rd 494 + RORC 0111 1110 0100 .... @b2_rds 495 + 496 + # ROTL #imm, rd 497 + ROTL_ir 1111 1101 0110 111. .... .... @b3_rds_imm5 498 + # ROTL rs, rd 499 + ROTL_rr 1111 1101 0110 0110 .... .... @b3_rd_rs 500 + 501 + # ROTR #imm, rd 502 + ROTR_ir 1111 1101 0110 110. .... .... @b3_rds_imm5 503 + # ROTR #imm, rd 504 + ROTR_rr 1111 1101 0110 0100 .... .... @b3_rd_rs 505 + 506 + # ROUND rs,rd 507 + # ROUND dsp[rs],rd 508 + ROUND 1111 1100 1001 10 .. .... .... @b3_ld_rd_rs 509 + 510 + RTE 0111 1111 1001 0101 511 + 512 + RTFI 0111 1111 1001 0100 513 + 514 + RTS 0000 0010 515 + 516 + # RTSD #imm 517 + RTSD_i 0110 0111 imm:8 518 + # RTSD #imm, rd-rd2 519 + RTSD_irr 0011 1111 rd:4 rd2:4 imm:8 520 + 521 + # SAT rd 522 + SAT 0111 1110 0011 .... @b2_rds 523 + # SATR 524 + SATR 0111 1111 1001 0011 525 + 526 + # SBB rs, rd 527 + SBB_rr 1111 1100 0000 0011 .... .... @b3_rd_rs 528 + # SBB dsp[rs].l, rd 529 + # Note only mi==2 allowed. 530 + SBB_mr 0000 0110 ..10 00.. 0000 0000 .... .... @b4_rd_ldmi 531 + 532 + # SCCnd dsp[rd] 533 + # SCCnd rd 534 + SCCnd 1111 1100 1101 .... .... .... @b3_sz_ld_rd_cd 535 + 536 + # SETPSW psw 537 + SETPSW 0111 1111 1010 cb:4 538 + 539 + # SHAR #imm, rd 540 + SHAR_irr 0110 101. .... .... @b2_rds_imm5 541 + # SHAR #imm, rs, rd 542 + SHAR_irr 1111 1101 101. .... .... .... @b3_rd_rs_imm5 543 + # SHAR rs, rd 544 + SHAR_rr 1111 1101 0110 0001 .... .... @b3_rd_rs 545 + 546 + # SHLL #imm, rd 547 + SHLL_irr 0110 110. .... .... @b2_rds_imm5 548 + # SHLL #imm, rs, rd 549 + SHLL_irr 1111 1101 110. .... .... .... @b3_rd_rs_imm5 550 + # SHLL rs, rd 551 + SHLL_rr 1111 1101 0110 0010 .... .... @b3_rd_rs 552 + 553 + # SHLR #imm, rd 554 + SHLR_irr 0110 100. .... .... @b2_rds_imm5 555 + # SHLR #imm, rs, rd 556 + SHLR_irr 1111 1101 100. .... .... .... @b3_rd_rs_imm5 557 + # SHLR rs, rd 558 + SHLR_rr 1111 1101 0110 0000 .... .... @b3_rd_rs 559 + 560 + # SMOVB 561 + # SSTR.<bwl> 562 + { 563 + SMOVB 0111 1111 1000 1011 564 + SSTR 0111 1111 1000 10 sz:2 565 + } 566 + 567 + # STNZ #imm, rd 568 + STNZ 1111 1101 0111 ..00 1111 .... @b3_rd_li 569 + # STZ #imm, rd 570 + STZ 1111 1101 0111 ..00 1110 .... @b3_rd_li 571 + 572 + # SUB #uimm4, rd 573 + SUB_ir 0110 0000 .... .... @b2_rds_uimm4 574 + # SUB dsp[rs].ub, rd 575 + # SUB rs, rd 576 + SUB_mr 0100 00.. .... .... @b2_rd_ld_ub 577 + # SUB dsp[rs], rd 578 + SUB_mr 0000 0110 ..00 00.. .... .... @b3_rd_ld 579 + # SUB rs, rs2, rd 580 + SUB_rrr 1111 1111 0000 .... .... .... @b3_rd_rs_rs2 581 + 582 + # SCMPU 583 + # SUNTIL.<bwl> 584 + { 585 + SCMPU 0111 1111 1000 0011 586 + SUNTIL 0111 1111 1000 00 sz:2 587 + } 588 + 589 + # SMOVU 590 + # SWHILE.<bwl> 591 + { 592 + SMOVU 0111 1111 1000 0111 593 + SWHILE 0111 1111 1000 01 sz:2 594 + } 595 + 596 + # TST #imm, rd 597 + TST_ir 1111 1101 0111 ..00 1100 .... @b3_rd_li 598 + # TST dsp[rs].ub, rd 599 + # TST rs, rd 600 + TST_mr 1111 1100 0011 00.. .... .... @b3_rd_ld_ub 601 + # TST dsp[rs], rd 602 + TST_mr 0000 0110 ..10 00.. 0000 1100 .... .... @b4_rd_ldmi 603 + 604 + WAIT 0111 1111 1001 0110 605 + 606 + # XCHG rs, rd 607 + # XCHG dsp[rs].ub, rd 608 + { 609 + XCHG_rr 1111 1100 0100 0011 .... .... @b3_rd_rs 610 + XCHG_mr 1111 1100 0100 00.. .... .... @b3_rd_ld_ub 611 + } 612 + # XCHG dsp[rs], rd 613 + XCHG_mr 0000 0110 ..10 00.. 0001 0000 .... .... @b4_rd_ldmi 614 + 615 + # XOR #imm, rd 616 + XOR_ir 1111 1101 0111 ..00 1101 .... @b3_rd_li 617 + # XOR dsp[rs].ub, rd 618 + # XOR rs, rd 619 + XOR_mr 1111 1100 0011 01.. .... .... @b3_rd_ld_ub 620 + # XOR dsp[rs], rd 621 + XOR_mr 0000 0110 ..10 00.. 0000 1101 .... .... @b4_rd_ldmi
+470
target/rx/op_helper.c
··· 1 + /* 2 + * RX helper functions 3 + * 4 + * Copyright (c) 2019 Yoshinori Sato 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2 or later, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include "qemu/osdep.h" 20 + #include "qemu/bitops.h" 21 + #include "cpu.h" 22 + #include "exec/exec-all.h" 23 + #include "exec/helper-proto.h" 24 + #include "exec/cpu_ldst.h" 25 + #include "fpu/softfloat.h" 26 + 27 + static inline void QEMU_NORETURN raise_exception(CPURXState *env, int index, 28 + uintptr_t retaddr); 29 + 30 + static void _set_psw(CPURXState *env, uint32_t psw, uint32_t rte) 31 + { 32 + uint32_t prev_u; 33 + prev_u = env->psw_u; 34 + rx_cpu_unpack_psw(env, psw, rte); 35 + if (prev_u != env->psw_u) { 36 + /* switch r0 */ 37 + if (env->psw_u) { 38 + env->isp = env->regs[0]; 39 + env->regs[0] = env->usp; 40 + } else { 41 + env->usp = env->regs[0]; 42 + env->regs[0] = env->isp; 43 + } 44 + } 45 + } 46 + 47 + void helper_set_psw(CPURXState *env, uint32_t psw) 48 + { 49 + _set_psw(env, psw, 0); 50 + } 51 + 52 + void helper_set_psw_rte(CPURXState *env, uint32_t psw) 53 + { 54 + _set_psw(env, psw, 1); 55 + } 56 + 57 + uint32_t helper_pack_psw(CPURXState *env) 58 + { 59 + return rx_cpu_pack_psw(env); 60 + } 61 + 62 + #define SET_FPSW(b) \ 63 + do { \ 64 + env->fpsw = FIELD_DP32(env->fpsw, FPSW, C ## b, 1); \ 65 + if (!FIELD_EX32(env->fpsw, FPSW, E ## b)) { \ 66 + env->fpsw = FIELD_DP32(env->fpsw, FPSW, F ## b, 1); \ 67 + } \ 68 + } while (0) 69 + 70 + /* fp operations */ 71 + static void update_fpsw(CPURXState *env, float32 ret, uintptr_t retaddr) 72 + { 73 + int xcpt, cause, enable; 74 + 75 + env->psw_z = ret & ~(1 << 31); /* mask sign bit */ 76 + env->psw_s = ret; 77 + 78 + xcpt = get_float_exception_flags(&env->fp_status); 79 + 80 + /* Clear the cause entries */ 81 + env->fpsw = FIELD_DP32(env->fpsw, FPSW, CAUSE, 0); 82 + 83 + /* set FPSW */ 84 + if (unlikely(xcpt)) { 85 + if (xcpt & float_flag_invalid) { 86 + SET_FPSW(V); 87 + } 88 + if (xcpt & float_flag_divbyzero) { 89 + SET_FPSW(Z); 90 + } 91 + if (xcpt & float_flag_overflow) { 92 + SET_FPSW(O); 93 + } 94 + if (xcpt & float_flag_underflow) { 95 + SET_FPSW(U); 96 + } 97 + if (xcpt & float_flag_inexact) { 98 + SET_FPSW(X); 99 + } 100 + if ((xcpt & (float_flag_input_denormal 101 + | float_flag_output_denormal)) 102 + && !FIELD_EX32(env->fpsw, FPSW, DN)) { 103 + env->fpsw = FIELD_DP32(env->fpsw, FPSW, CE, 1); 104 + } 105 + 106 + /* update FPSW_FLAG_S */ 107 + if (FIELD_EX32(env->fpsw, FPSW, FLAGS) != 0) { 108 + env->fpsw = FIELD_DP32(env->fpsw, FPSW, FS, 1); 109 + } 110 + 111 + /* Generate an exception if enabled */ 112 + cause = FIELD_EX32(env->fpsw, FPSW, CAUSE); 113 + enable = FIELD_EX32(env->fpsw, FPSW, ENABLE); 114 + enable |= 1 << 5; /* CE always enabled */ 115 + if (cause & enable) { 116 + raise_exception(env, 21, retaddr); 117 + } 118 + } 119 + } 120 + 121 + void helper_set_fpsw(CPURXState *env, uint32_t val) 122 + { 123 + static const int roundmode[] = { 124 + float_round_nearest_even, 125 + float_round_to_zero, 126 + float_round_up, 127 + float_round_down, 128 + }; 129 + uint32_t fpsw = env->fpsw; 130 + fpsw |= 0x7fffff03; 131 + val &= ~0x80000000; 132 + fpsw &= val; 133 + FIELD_DP32(fpsw, FPSW, FS, FIELD_EX32(fpsw, FPSW, FLAGS) != 0); 134 + env->fpsw = fpsw; 135 + set_float_rounding_mode(roundmode[FIELD_EX32(env->fpsw, FPSW, RM)], 136 + &env->fp_status); 137 + } 138 + 139 + #define FLOATOP(op, func) \ 140 + float32 helper_##op(CPURXState *env, float32 t0, float32 t1) \ 141 + { \ 142 + float32 ret; \ 143 + ret = func(t0, t1, &env->fp_status); \ 144 + update_fpsw(env, *(uint32_t *)&ret, GETPC()); \ 145 + return ret; \ 146 + } 147 + 148 + FLOATOP(fadd, float32_add) 149 + FLOATOP(fsub, float32_sub) 150 + FLOATOP(fmul, float32_mul) 151 + FLOATOP(fdiv, float32_div) 152 + 153 + void helper_fcmp(CPURXState *env, float32 t0, float32 t1) 154 + { 155 + int st; 156 + st = float32_compare(t0, t1, &env->fp_status); 157 + update_fpsw(env, 0, GETPC()); 158 + env->psw_z = 1; 159 + env->psw_s = env->psw_o = 0; 160 + switch (st) { 161 + case float_relation_equal: 162 + env->psw_z = 0; 163 + break; 164 + case float_relation_less: 165 + env->psw_s = -1; 166 + break; 167 + case float_relation_unordered: 168 + env->psw_o = -1; 169 + break; 170 + } 171 + } 172 + 173 + uint32_t helper_ftoi(CPURXState *env, float32 t0) 174 + { 175 + uint32_t ret; 176 + ret = float32_to_int32_round_to_zero(t0, &env->fp_status); 177 + update_fpsw(env, ret, GETPC()); 178 + return ret; 179 + } 180 + 181 + uint32_t helper_round(CPURXState *env, float32 t0) 182 + { 183 + uint32_t ret; 184 + ret = float32_to_int32(t0, &env->fp_status); 185 + update_fpsw(env, ret, GETPC()); 186 + return ret; 187 + } 188 + 189 + float32 helper_itof(CPURXState *env, uint32_t t0) 190 + { 191 + float32 ret; 192 + ret = int32_to_float32(t0, &env->fp_status); 193 + update_fpsw(env, ret, GETPC()); 194 + return ret; 195 + } 196 + 197 + /* string operations */ 198 + void helper_scmpu(CPURXState *env) 199 + { 200 + uint8_t tmp0, tmp1; 201 + if (env->regs[3] == 0) { 202 + return; 203 + } 204 + while (env->regs[3] != 0) { 205 + tmp0 = cpu_ldub_data_ra(env, env->regs[1]++, GETPC()); 206 + tmp1 = cpu_ldub_data_ra(env, env->regs[2]++, GETPC()); 207 + env->regs[3]--; 208 + if (tmp0 != tmp1 || tmp0 == '\0') { 209 + break; 210 + } 211 + } 212 + env->psw_z = tmp0 - tmp1; 213 + env->psw_c = (tmp0 >= tmp1); 214 + } 215 + 216 + static uint32_t (* const cpu_ldufn[])(CPUArchState *env, 217 + target_ulong ptr, 218 + uintptr_t retaddr) = { 219 + cpu_ldub_data_ra, cpu_lduw_data_ra, cpu_ldl_data_ra, 220 + }; 221 + 222 + static uint32_t (* const cpu_ldfn[])(CPUArchState *env, 223 + target_ulong ptr, 224 + uintptr_t retaddr) = { 225 + cpu_ldub_data_ra, cpu_lduw_data_ra, cpu_ldl_data_ra, 226 + }; 227 + 228 + static void (* const cpu_stfn[])(CPUArchState *env, 229 + target_ulong ptr, 230 + uint32_t val, 231 + uintptr_t retaddr) = { 232 + cpu_stb_data_ra, cpu_stw_data_ra, cpu_stl_data_ra, 233 + }; 234 + 235 + void helper_sstr(CPURXState *env, uint32_t sz) 236 + { 237 + tcg_debug_assert(sz < 3); 238 + while (env->regs[3] != 0) { 239 + cpu_stfn[sz](env, env->regs[1], env->regs[2], GETPC()); 240 + env->regs[1] += 1 << sz; 241 + env->regs[3]--; 242 + } 243 + } 244 + 245 + #define OP_SMOVU 1 246 + #define OP_SMOVF 0 247 + #define OP_SMOVB 2 248 + 249 + static void smov(uint32_t mode, CPURXState *env) 250 + { 251 + uint8_t tmp; 252 + int dir; 253 + 254 + dir = (mode & OP_SMOVB) ? -1 : 1; 255 + while (env->regs[3] != 0) { 256 + tmp = cpu_ldub_data_ra(env, env->regs[2], GETPC()); 257 + cpu_stb_data_ra(env, env->regs[1], tmp, GETPC()); 258 + env->regs[1] += dir; 259 + env->regs[2] += dir; 260 + env->regs[3]--; 261 + if ((mode & OP_SMOVU) && tmp == 0) { 262 + break; 263 + } 264 + } 265 + } 266 + 267 + void helper_smovu(CPURXState *env) 268 + { 269 + smov(OP_SMOVU, env); 270 + } 271 + 272 + void helper_smovf(CPURXState *env) 273 + { 274 + smov(OP_SMOVF, env); 275 + } 276 + 277 + void helper_smovb(CPURXState *env) 278 + { 279 + smov(OP_SMOVB, env); 280 + } 281 + 282 + 283 + void helper_suntil(CPURXState *env, uint32_t sz) 284 + { 285 + uint32_t tmp; 286 + tcg_debug_assert(sz < 3); 287 + if (env->regs[3] == 0) { 288 + return ; 289 + } 290 + while (env->regs[3] != 0) { 291 + tmp = cpu_ldufn[sz](env, env->regs[1], GETPC()); 292 + env->regs[1] += 1 << sz; 293 + env->regs[3]--; 294 + if (tmp == env->regs[2]) { 295 + break; 296 + } 297 + } 298 + env->psw_z = tmp - env->regs[2]; 299 + env->psw_c = (tmp <= env->regs[2]); 300 + } 301 + 302 + void helper_swhile(CPURXState *env, uint32_t sz) 303 + { 304 + uint32_t tmp; 305 + tcg_debug_assert(sz < 3); 306 + if (env->regs[3] == 0) { 307 + return ; 308 + } 309 + while (env->regs[3] != 0) { 310 + tmp = cpu_ldufn[sz](env, env->regs[1], GETPC()); 311 + env->regs[1] += 1 << sz; 312 + env->regs[3]--; 313 + if (tmp != env->regs[2]) { 314 + break; 315 + } 316 + } 317 + env->psw_z = env->regs[3]; 318 + env->psw_c = (tmp <= env->regs[2]); 319 + } 320 + 321 + /* accumlator operations */ 322 + void helper_rmpa(CPURXState *env, uint32_t sz) 323 + { 324 + uint64_t result_l, prev; 325 + int32_t result_h; 326 + int64_t tmp0, tmp1; 327 + 328 + if (env->regs[3] == 0) { 329 + return; 330 + } 331 + result_l = env->regs[5]; 332 + result_l <<= 32; 333 + result_l |= env->regs[4]; 334 + result_h = env->regs[6]; 335 + env->psw_o = 0; 336 + 337 + while (env->regs[3] != 0) { 338 + tmp0 = cpu_ldfn[sz](env, env->regs[1], GETPC()); 339 + tmp1 = cpu_ldfn[sz](env, env->regs[2], GETPC()); 340 + tmp0 *= tmp1; 341 + prev = result_l; 342 + result_l += tmp0; 343 + /* carry / bollow */ 344 + if (tmp0 < 0) { 345 + if (prev > result_l) { 346 + result_h--; 347 + } 348 + } else { 349 + if (prev < result_l) { 350 + result_h++; 351 + } 352 + } 353 + 354 + env->regs[1] += 1 << sz; 355 + env->regs[2] += 1 << sz; 356 + } 357 + env->psw_s = result_h; 358 + env->psw_o = (result_h != 0 && result_h != -1) << 31; 359 + env->regs[6] = result_h; 360 + env->regs[5] = result_l >> 32; 361 + env->regs[4] = result_l & 0xffffffff; 362 + } 363 + 364 + void helper_racw(CPURXState *env, uint32_t imm) 365 + { 366 + int64_t acc; 367 + acc = env->acc; 368 + acc <<= (imm + 1); 369 + acc += 0x0000000080000000LL; 370 + if (acc > 0x00007fff00000000LL) { 371 + acc = 0x00007fff00000000LL; 372 + } else if (acc < -0x800000000000LL) { 373 + acc = -0x800000000000LL; 374 + } else { 375 + acc &= 0xffffffff00000000LL; 376 + } 377 + env->acc = acc; 378 + } 379 + 380 + void helper_satr(CPURXState *env) 381 + { 382 + if (env->psw_o >> 31) { 383 + if ((int)env->psw_s < 0) { 384 + env->regs[6] = 0x00000000; 385 + env->regs[5] = 0x7fffffff; 386 + env->regs[4] = 0xffffffff; 387 + } else { 388 + env->regs[6] = 0xffffffff; 389 + env->regs[5] = 0x80000000; 390 + env->regs[4] = 0x00000000; 391 + } 392 + } 393 + } 394 + 395 + /* div */ 396 + uint32_t helper_div(CPURXState *env, uint32_t num, uint32_t den) 397 + { 398 + uint32_t ret = num; 399 + if (!((num == INT_MIN && den == -1) || den == 0)) { 400 + ret = (int32_t)num / (int32_t)den; 401 + env->psw_o = 0; 402 + } else { 403 + env->psw_o = -1; 404 + } 405 + return ret; 406 + } 407 + 408 + uint32_t helper_divu(CPURXState *env, uint32_t num, uint32_t den) 409 + { 410 + uint32_t ret = num; 411 + if (den != 0) { 412 + ret = num / den; 413 + env->psw_o = 0; 414 + } else { 415 + env->psw_o = -1; 416 + } 417 + return ret; 418 + } 419 + 420 + /* exception */ 421 + static inline void QEMU_NORETURN raise_exception(CPURXState *env, int index, 422 + uintptr_t retaddr) 423 + { 424 + CPUState *cs = env_cpu(env); 425 + 426 + cs->exception_index = index; 427 + cpu_loop_exit_restore(cs, retaddr); 428 + } 429 + 430 + void QEMU_NORETURN helper_raise_privilege_violation(CPURXState *env) 431 + { 432 + raise_exception(env, 20, GETPC()); 433 + } 434 + 435 + void QEMU_NORETURN helper_raise_access_fault(CPURXState *env) 436 + { 437 + raise_exception(env, 21, GETPC()); 438 + } 439 + 440 + void QEMU_NORETURN helper_raise_illegal_instruction(CPURXState *env) 441 + { 442 + raise_exception(env, 23, GETPC()); 443 + } 444 + 445 + void QEMU_NORETURN helper_wait(CPURXState *env) 446 + { 447 + CPUState *cs = env_cpu(env); 448 + 449 + cs->halted = 1; 450 + env->in_sleep = 1; 451 + raise_exception(env, EXCP_HLT, 0); 452 + } 453 + 454 + void QEMU_NORETURN helper_debug(CPURXState *env) 455 + { 456 + CPUState *cs = env_cpu(env); 457 + 458 + cs->exception_index = EXCP_DEBUG; 459 + cpu_loop_exit(cs); 460 + } 461 + 462 + void QEMU_NORETURN helper_rxint(CPURXState *env, uint32_t vec) 463 + { 464 + raise_exception(env, 0x100 + vec, 0); 465 + } 466 + 467 + void QEMU_NORETURN helper_rxbrk(CPURXState *env) 468 + { 469 + raise_exception(env, 0x100, 0); 470 + }
+2439
target/rx/translate.c
··· 1 + /* 2 + * RX translation 3 + * 4 + * Copyright (c) 2019 Yoshinori Sato 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2 or later, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include "qemu/osdep.h" 20 + #include "qemu/bswap.h" 21 + #include "qemu/qemu-print.h" 22 + #include "cpu.h" 23 + #include "exec/exec-all.h" 24 + #include "tcg/tcg-op.h" 25 + #include "exec/cpu_ldst.h" 26 + #include "exec/helper-proto.h" 27 + #include "exec/helper-gen.h" 28 + #include "exec/translator.h" 29 + #include "trace-tcg.h" 30 + #include "exec/log.h" 31 + 32 + typedef struct DisasContext { 33 + DisasContextBase base; 34 + CPURXState *env; 35 + uint32_t pc; 36 + } DisasContext; 37 + 38 + typedef struct DisasCompare { 39 + TCGv value; 40 + TCGv temp; 41 + TCGCond cond; 42 + } DisasCompare; 43 + 44 + const char *rx_crname(uint8_t cr) 45 + { 46 + static const char *cr_names[] = { 47 + "psw", "pc", "usp", "fpsw", "", "", "", "", 48 + "bpsw", "bpc", "isp", "fintv", "intb", "", "", "" 49 + }; 50 + if (cr >= ARRAY_SIZE(cr_names)) { 51 + return "illegal"; 52 + } 53 + return cr_names[cr]; 54 + } 55 + 56 + /* Target-specific values for dc->base.is_jmp. */ 57 + #define DISAS_JUMP DISAS_TARGET_0 58 + #define DISAS_UPDATE DISAS_TARGET_1 59 + #define DISAS_EXIT DISAS_TARGET_2 60 + 61 + /* global register indexes */ 62 + static TCGv cpu_regs[16]; 63 + static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c; 64 + static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl; 65 + static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp; 66 + static TCGv cpu_fintv, cpu_intb, cpu_pc; 67 + static TCGv_i64 cpu_acc; 68 + 69 + #define cpu_sp cpu_regs[0] 70 + 71 + #include "exec/gen-icount.h" 72 + 73 + /* decoder helper */ 74 + static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn, 75 + int i, int n) 76 + { 77 + while (++i <= n) { 78 + uint8_t b = cpu_ldub_code(ctx->env, ctx->base.pc_next++); 79 + insn |= b << (32 - i * 8); 80 + } 81 + return insn; 82 + } 83 + 84 + static uint32_t li(DisasContext *ctx, int sz) 85 + { 86 + int32_t tmp, addr; 87 + CPURXState *env = ctx->env; 88 + addr = ctx->base.pc_next; 89 + 90 + tcg_debug_assert(sz < 4); 91 + switch (sz) { 92 + case 1: 93 + ctx->base.pc_next += 1; 94 + return cpu_ldsb_code(env, addr); 95 + case 2: 96 + ctx->base.pc_next += 2; 97 + return cpu_ldsw_code(env, addr); 98 + case 3: 99 + ctx->base.pc_next += 3; 100 + tmp = cpu_ldsb_code(env, addr + 2) << 16; 101 + tmp |= cpu_lduw_code(env, addr) & 0xffff; 102 + return tmp; 103 + case 0: 104 + ctx->base.pc_next += 4; 105 + return cpu_ldl_code(env, addr); 106 + } 107 + return 0; 108 + } 109 + 110 + static int bdsp_s(DisasContext *ctx, int d) 111 + { 112 + /* 113 + * 0 -> 8 114 + * 1 -> 9 115 + * 2 -> 10 116 + * 3 -> 3 117 + * : 118 + * 7 -> 7 119 + */ 120 + if (d < 3) { 121 + d += 8; 122 + } 123 + return d; 124 + } 125 + 126 + /* Include the auto-generated decoder. */ 127 + #include "decode.inc.c" 128 + 129 + void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags) 130 + { 131 + RXCPU *cpu = RXCPU(cs); 132 + CPURXState *env = &cpu->env; 133 + int i; 134 + uint32_t psw; 135 + 136 + psw = rx_cpu_pack_psw(env); 137 + qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n", 138 + env->pc, psw); 139 + for (i = 0; i < 16; i += 4) { 140 + qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n", 141 + i, env->regs[i], i + 1, env->regs[i + 1], 142 + i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]); 143 + } 144 + } 145 + 146 + static bool use_goto_tb(DisasContext *dc, target_ulong dest) 147 + { 148 + if (unlikely(dc->base.singlestep_enabled)) { 149 + return false; 150 + } else { 151 + return true; 152 + } 153 + } 154 + 155 + static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) 156 + { 157 + if (use_goto_tb(dc, dest)) { 158 + tcg_gen_goto_tb(n); 159 + tcg_gen_movi_i32(cpu_pc, dest); 160 + tcg_gen_exit_tb(dc->base.tb, n); 161 + } else { 162 + tcg_gen_movi_i32(cpu_pc, dest); 163 + if (dc->base.singlestep_enabled) { 164 + gen_helper_debug(cpu_env); 165 + } else { 166 + tcg_gen_lookup_and_goto_ptr(); 167 + } 168 + } 169 + dc->base.is_jmp = DISAS_NORETURN; 170 + } 171 + 172 + /* generic load wrapper */ 173 + static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem) 174 + { 175 + tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE); 176 + } 177 + 178 + /* unsigned load wrapper */ 179 + static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem) 180 + { 181 + tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE); 182 + } 183 + 184 + /* generic store wrapper */ 185 + static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem) 186 + { 187 + tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE); 188 + } 189 + 190 + /* [ri, rb] */ 191 + static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem, 192 + int size, int ri, int rb) 193 + { 194 + tcg_gen_shli_i32(mem, cpu_regs[ri], size); 195 + tcg_gen_add_i32(mem, mem, cpu_regs[rb]); 196 + } 197 + 198 + /* dsp[reg] */ 199 + static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem, 200 + int ld, int size, int reg) 201 + { 202 + uint32_t dsp; 203 + 204 + tcg_debug_assert(ld < 3); 205 + switch (ld) { 206 + case 0: 207 + return cpu_regs[reg]; 208 + case 1: 209 + dsp = cpu_ldub_code(ctx->env, ctx->base.pc_next) << size; 210 + tcg_gen_addi_i32(mem, cpu_regs[reg], dsp); 211 + ctx->base.pc_next += 1; 212 + return mem; 213 + case 2: 214 + dsp = cpu_lduw_code(ctx->env, ctx->base.pc_next) << size; 215 + tcg_gen_addi_i32(mem, cpu_regs[reg], dsp); 216 + ctx->base.pc_next += 2; 217 + return mem; 218 + } 219 + return NULL; 220 + } 221 + 222 + static inline MemOp mi_to_mop(unsigned mi) 223 + { 224 + static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB }; 225 + tcg_debug_assert(mi < 5); 226 + return mop[mi]; 227 + } 228 + 229 + /* load source operand */ 230 + static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem, 231 + int ld, int mi, int rs) 232 + { 233 + TCGv addr; 234 + MemOp mop; 235 + if (ld < 3) { 236 + mop = mi_to_mop(mi); 237 + addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs); 238 + tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE); 239 + return mem; 240 + } else { 241 + return cpu_regs[rs]; 242 + } 243 + } 244 + 245 + /* Processor mode check */ 246 + static int is_privileged(DisasContext *ctx, int is_exception) 247 + { 248 + if (FIELD_EX32(ctx->base.tb->flags, PSW, PM)) { 249 + if (is_exception) { 250 + gen_helper_raise_privilege_violation(cpu_env); 251 + } 252 + return 0; 253 + } else { 254 + return 1; 255 + } 256 + } 257 + 258 + /* generate QEMU condition */ 259 + static void psw_cond(DisasCompare *dc, uint32_t cond) 260 + { 261 + tcg_debug_assert(cond < 16); 262 + switch (cond) { 263 + case 0: /* z */ 264 + dc->cond = TCG_COND_EQ; 265 + dc->value = cpu_psw_z; 266 + break; 267 + case 1: /* nz */ 268 + dc->cond = TCG_COND_NE; 269 + dc->value = cpu_psw_z; 270 + break; 271 + case 2: /* c */ 272 + dc->cond = TCG_COND_NE; 273 + dc->value = cpu_psw_c; 274 + break; 275 + case 3: /* nc */ 276 + dc->cond = TCG_COND_EQ; 277 + dc->value = cpu_psw_c; 278 + break; 279 + case 4: /* gtu (C& ~Z) == 1 */ 280 + case 5: /* leu (C& ~Z) == 0 */ 281 + tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0); 282 + tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c); 283 + dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ; 284 + dc->value = dc->temp; 285 + break; 286 + case 6: /* pz (S == 0) */ 287 + dc->cond = TCG_COND_GE; 288 + dc->value = cpu_psw_s; 289 + break; 290 + case 7: /* n (S == 1) */ 291 + dc->cond = TCG_COND_LT; 292 + dc->value = cpu_psw_s; 293 + break; 294 + case 8: /* ge (S^O)==0 */ 295 + case 9: /* lt (S^O)==1 */ 296 + tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s); 297 + dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT; 298 + dc->value = dc->temp; 299 + break; 300 + case 10: /* gt ((S^O)|Z)==0 */ 301 + case 11: /* le ((S^O)|Z)==1 */ 302 + tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s); 303 + tcg_gen_sari_i32(dc->temp, dc->temp, 31); 304 + tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp); 305 + dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ; 306 + dc->value = dc->temp; 307 + break; 308 + case 12: /* o */ 309 + dc->cond = TCG_COND_LT; 310 + dc->value = cpu_psw_o; 311 + break; 312 + case 13: /* no */ 313 + dc->cond = TCG_COND_GE; 314 + dc->value = cpu_psw_o; 315 + break; 316 + case 14: /* always true */ 317 + dc->cond = TCG_COND_ALWAYS; 318 + dc->value = dc->temp; 319 + break; 320 + case 15: /* always false */ 321 + dc->cond = TCG_COND_NEVER; 322 + dc->value = dc->temp; 323 + break; 324 + } 325 + } 326 + 327 + static void move_from_cr(TCGv ret, int cr, uint32_t pc) 328 + { 329 + TCGv z = tcg_const_i32(0); 330 + switch (cr) { 331 + case 0: /* PSW */ 332 + gen_helper_pack_psw(ret, cpu_env); 333 + break; 334 + case 1: /* PC */ 335 + tcg_gen_movi_i32(ret, pc); 336 + break; 337 + case 2: /* USP */ 338 + tcg_gen_movcond_i32(TCG_COND_NE, ret, 339 + cpu_psw_u, z, cpu_sp, cpu_usp); 340 + break; 341 + case 3: /* FPSW */ 342 + tcg_gen_mov_i32(ret, cpu_fpsw); 343 + break; 344 + case 8: /* BPSW */ 345 + tcg_gen_mov_i32(ret, cpu_bpsw); 346 + break; 347 + case 9: /* BPC */ 348 + tcg_gen_mov_i32(ret, cpu_bpc); 349 + break; 350 + case 10: /* ISP */ 351 + tcg_gen_movcond_i32(TCG_COND_EQ, ret, 352 + cpu_psw_u, z, cpu_sp, cpu_isp); 353 + break; 354 + case 11: /* FINTV */ 355 + tcg_gen_mov_i32(ret, cpu_fintv); 356 + break; 357 + case 12: /* INTB */ 358 + tcg_gen_mov_i32(ret, cpu_intb); 359 + break; 360 + default: 361 + qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr); 362 + /* Unimplement registers return 0 */ 363 + tcg_gen_movi_i32(ret, 0); 364 + break; 365 + } 366 + tcg_temp_free(z); 367 + } 368 + 369 + static void move_to_cr(DisasContext *ctx, TCGv val, int cr) 370 + { 371 + TCGv z; 372 + if (cr >= 8 && !is_privileged(ctx, 0)) { 373 + /* Some control registers can only be written in privileged mode. */ 374 + qemu_log_mask(LOG_GUEST_ERROR, 375 + "disallow control register write %s", rx_crname(cr)); 376 + return; 377 + } 378 + z = tcg_const_i32(0); 379 + switch (cr) { 380 + case 0: /* PSW */ 381 + gen_helper_set_psw(cpu_env, val); 382 + break; 383 + /* case 1: to PC not supported */ 384 + case 2: /* USP */ 385 + tcg_gen_mov_i32(cpu_usp, val); 386 + tcg_gen_movcond_i32(TCG_COND_NE, cpu_sp, 387 + cpu_psw_u, z, cpu_usp, cpu_sp); 388 + break; 389 + case 3: /* FPSW */ 390 + gen_helper_set_fpsw(cpu_env, val); 391 + break; 392 + case 8: /* BPSW */ 393 + tcg_gen_mov_i32(cpu_bpsw, val); 394 + break; 395 + case 9: /* BPC */ 396 + tcg_gen_mov_i32(cpu_bpc, val); 397 + break; 398 + case 10: /* ISP */ 399 + tcg_gen_mov_i32(cpu_isp, val); 400 + /* if PSW.U is 0, copy isp to r0 */ 401 + tcg_gen_movcond_i32(TCG_COND_EQ, cpu_sp, 402 + cpu_psw_u, z, cpu_isp, cpu_sp); 403 + break; 404 + case 11: /* FINTV */ 405 + tcg_gen_mov_i32(cpu_fintv, val); 406 + break; 407 + case 12: /* INTB */ 408 + tcg_gen_mov_i32(cpu_intb, val); 409 + break; 410 + default: 411 + qemu_log_mask(LOG_GUEST_ERROR, 412 + "Unimplement control register %d", cr); 413 + break; 414 + } 415 + tcg_temp_free(z); 416 + } 417 + 418 + static void push(TCGv val) 419 + { 420 + tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); 421 + rx_gen_st(MO_32, val, cpu_sp); 422 + } 423 + 424 + static void pop(TCGv ret) 425 + { 426 + rx_gen_ld(MO_32, ret, cpu_sp); 427 + tcg_gen_addi_i32(cpu_sp, cpu_sp, 4); 428 + } 429 + 430 + /* mov.<bwl> rs,dsp5[rd] */ 431 + static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a) 432 + { 433 + TCGv mem; 434 + mem = tcg_temp_new(); 435 + tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); 436 + rx_gen_st(a->sz, cpu_regs[a->rs], mem); 437 + tcg_temp_free(mem); 438 + return true; 439 + } 440 + 441 + /* mov.<bwl> dsp5[rs],rd */ 442 + static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a) 443 + { 444 + TCGv mem; 445 + mem = tcg_temp_new(); 446 + tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); 447 + rx_gen_ld(a->sz, cpu_regs[a->rd], mem); 448 + tcg_temp_free(mem); 449 + return true; 450 + } 451 + 452 + /* mov.l #uimm4,rd */ 453 + /* mov.l #uimm8,rd */ 454 + /* mov.l #imm,rd */ 455 + static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a) 456 + { 457 + tcg_gen_movi_i32(cpu_regs[a->rd], a->imm); 458 + return true; 459 + } 460 + 461 + /* mov.<bwl> #uimm8,dsp[rd] */ 462 + /* mov.<bwl> #imm, dsp[rd] */ 463 + static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a) 464 + { 465 + TCGv imm, mem; 466 + imm = tcg_const_i32(a->imm); 467 + mem = tcg_temp_new(); 468 + tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); 469 + rx_gen_st(a->sz, imm, mem); 470 + tcg_temp_free(imm); 471 + tcg_temp_free(mem); 472 + return true; 473 + } 474 + 475 + /* mov.<bwl> [ri,rb],rd */ 476 + static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a) 477 + { 478 + TCGv mem; 479 + mem = tcg_temp_new(); 480 + rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); 481 + rx_gen_ld(a->sz, cpu_regs[a->rd], mem); 482 + tcg_temp_free(mem); 483 + return true; 484 + } 485 + 486 + /* mov.<bwl> rd,[ri,rb] */ 487 + static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a) 488 + { 489 + TCGv mem; 490 + mem = tcg_temp_new(); 491 + rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); 492 + rx_gen_st(a->sz, cpu_regs[a->rs], mem); 493 + tcg_temp_free(mem); 494 + return true; 495 + } 496 + 497 + /* mov.<bwl> dsp[rs],dsp[rd] */ 498 + /* mov.<bwl> rs,dsp[rd] */ 499 + /* mov.<bwl> dsp[rs],rd */ 500 + /* mov.<bwl> rs,rd */ 501 + static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a) 502 + { 503 + static void (* const mov[])(TCGv ret, TCGv arg) = { 504 + tcg_gen_ext8s_i32, tcg_gen_ext16s_i32, tcg_gen_mov_i32, 505 + }; 506 + TCGv tmp, mem, addr; 507 + if (a->lds == 3 && a->ldd == 3) { 508 + /* mov.<bwl> rs,rd */ 509 + mov[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]); 510 + return true; 511 + } 512 + 513 + mem = tcg_temp_new(); 514 + if (a->lds == 3) { 515 + /* mov.<bwl> rs,dsp[rd] */ 516 + addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs); 517 + rx_gen_st(a->sz, cpu_regs[a->rd], addr); 518 + } else if (a->ldd == 3) { 519 + /* mov.<bwl> dsp[rs],rd */ 520 + addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs); 521 + rx_gen_ld(a->sz, cpu_regs[a->rd], addr); 522 + } else { 523 + /* mov.<bwl> dsp[rs],dsp[rd] */ 524 + tmp = tcg_temp_new(); 525 + addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs); 526 + rx_gen_ld(a->sz, tmp, addr); 527 + addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd); 528 + rx_gen_st(a->sz, tmp, addr); 529 + tcg_temp_free(tmp); 530 + } 531 + tcg_temp_free(mem); 532 + return true; 533 + } 534 + 535 + /* mov.<bwl> rs,[rd+] */ 536 + /* mov.<bwl> rs,[-rd] */ 537 + static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a) 538 + { 539 + TCGv val; 540 + val = tcg_temp_new(); 541 + tcg_gen_mov_i32(val, cpu_regs[a->rs]); 542 + if (a->ad == 1) { 543 + tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 544 + } 545 + rx_gen_st(a->sz, val, cpu_regs[a->rd]); 546 + if (a->ad == 0) { 547 + tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 548 + } 549 + tcg_temp_free(val); 550 + return true; 551 + } 552 + 553 + /* mov.<bwl> [rd+],rs */ 554 + /* mov.<bwl> [-rd],rs */ 555 + static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a) 556 + { 557 + TCGv val; 558 + val = tcg_temp_new(); 559 + if (a->ad == 1) { 560 + tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 561 + } 562 + rx_gen_ld(a->sz, val, cpu_regs[a->rd]); 563 + if (a->ad == 0) { 564 + tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 565 + } 566 + tcg_gen_mov_i32(cpu_regs[a->rs], val); 567 + tcg_temp_free(val); 568 + return true; 569 + } 570 + 571 + /* movu.<bw> dsp5[rs],rd */ 572 + /* movu.<bw> dsp[rs],rd */ 573 + static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a) 574 + { 575 + TCGv mem; 576 + mem = tcg_temp_new(); 577 + tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); 578 + rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); 579 + tcg_temp_free(mem); 580 + return true; 581 + } 582 + 583 + /* movu.<bw> rs,rd */ 584 + static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a) 585 + { 586 + static void (* const ext[])(TCGv ret, TCGv arg) = { 587 + tcg_gen_ext8u_i32, tcg_gen_ext16u_i32, 588 + }; 589 + ext[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]); 590 + return true; 591 + } 592 + 593 + /* movu.<bw> [ri,rb],rd */ 594 + static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a) 595 + { 596 + TCGv mem; 597 + mem = tcg_temp_new(); 598 + rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); 599 + rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); 600 + tcg_temp_free(mem); 601 + return true; 602 + } 603 + 604 + /* movu.<bw> [rd+],rs */ 605 + /* mov.<bw> [-rd],rs */ 606 + static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a) 607 + { 608 + TCGv val; 609 + val = tcg_temp_new(); 610 + if (a->ad == 1) { 611 + tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 612 + } 613 + rx_gen_ldu(a->sz, val, cpu_regs[a->rd]); 614 + if (a->ad == 0) { 615 + tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 616 + } 617 + tcg_gen_mov_i32(cpu_regs[a->rs], val); 618 + tcg_temp_free(val); 619 + return true; 620 + } 621 + 622 + 623 + /* pop rd */ 624 + static bool trans_POP(DisasContext *ctx, arg_POP *a) 625 + { 626 + /* mov.l [r0+], rd */ 627 + arg_MOV_rp mov_a; 628 + mov_a.rd = 0; 629 + mov_a.rs = a->rd; 630 + mov_a.ad = 0; 631 + mov_a.sz = MO_32; 632 + trans_MOV_pr(ctx, &mov_a); 633 + return true; 634 + } 635 + 636 + /* popc cr */ 637 + static bool trans_POPC(DisasContext *ctx, arg_POPC *a) 638 + { 639 + TCGv val; 640 + val = tcg_temp_new(); 641 + pop(val); 642 + move_to_cr(ctx, val, a->cr); 643 + if (a->cr == 0 && is_privileged(ctx, 0)) { 644 + /* PSW.I may be updated here. exit TB. */ 645 + ctx->base.is_jmp = DISAS_UPDATE; 646 + } 647 + tcg_temp_free(val); 648 + return true; 649 + } 650 + 651 + /* popm rd-rd2 */ 652 + static bool trans_POPM(DisasContext *ctx, arg_POPM *a) 653 + { 654 + int r; 655 + if (a->rd == 0 || a->rd >= a->rd2) { 656 + qemu_log_mask(LOG_GUEST_ERROR, 657 + "Invalid register ranges r%d-r%d", a->rd, a->rd2); 658 + } 659 + r = a->rd; 660 + while (r <= a->rd2 && r < 16) { 661 + pop(cpu_regs[r++]); 662 + } 663 + return true; 664 + } 665 + 666 + 667 + /* push.<bwl> rs */ 668 + static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a) 669 + { 670 + TCGv val; 671 + val = tcg_temp_new(); 672 + tcg_gen_mov_i32(val, cpu_regs[a->rs]); 673 + tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); 674 + rx_gen_st(a->sz, val, cpu_sp); 675 + tcg_temp_free(val); 676 + return true; 677 + } 678 + 679 + /* push.<bwl> dsp[rs] */ 680 + static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a) 681 + { 682 + TCGv mem, val, addr; 683 + mem = tcg_temp_new(); 684 + val = tcg_temp_new(); 685 + addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs); 686 + rx_gen_ld(a->sz, val, addr); 687 + tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); 688 + rx_gen_st(a->sz, val, cpu_sp); 689 + tcg_temp_free(mem); 690 + tcg_temp_free(val); 691 + return true; 692 + } 693 + 694 + /* pushc rx */ 695 + static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a) 696 + { 697 + TCGv val; 698 + val = tcg_temp_new(); 699 + move_from_cr(val, a->cr, ctx->pc); 700 + push(val); 701 + tcg_temp_free(val); 702 + return true; 703 + } 704 + 705 + /* pushm rs-rs2 */ 706 + static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a) 707 + { 708 + int r; 709 + 710 + if (a->rs == 0 || a->rs >= a->rs2) { 711 + qemu_log_mask(LOG_GUEST_ERROR, 712 + "Invalid register ranges r%d-r%d", a->rs, a->rs2); 713 + } 714 + r = a->rs2; 715 + while (r >= a->rs && r >= 0) { 716 + push(cpu_regs[r--]); 717 + } 718 + return true; 719 + } 720 + 721 + /* xchg rs,rd */ 722 + static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a) 723 + { 724 + TCGv tmp; 725 + tmp = tcg_temp_new(); 726 + tcg_gen_mov_i32(tmp, cpu_regs[a->rs]); 727 + tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]); 728 + tcg_gen_mov_i32(cpu_regs[a->rd], tmp); 729 + tcg_temp_free(tmp); 730 + return true; 731 + } 732 + 733 + /* xchg dsp[rs].<mi>,rd */ 734 + static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a) 735 + { 736 + TCGv mem, addr; 737 + mem = tcg_temp_new(); 738 + switch (a->mi) { 739 + case 0: /* dsp[rs].b */ 740 + case 1: /* dsp[rs].w */ 741 + case 2: /* dsp[rs].l */ 742 + addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs); 743 + break; 744 + case 3: /* dsp[rs].uw */ 745 + case 4: /* dsp[rs].ub */ 746 + addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs); 747 + break; 748 + default: 749 + g_assert_not_reached(); 750 + } 751 + tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd], 752 + 0, mi_to_mop(a->mi)); 753 + tcg_temp_free(mem); 754 + return true; 755 + } 756 + 757 + static inline void stcond(TCGCond cond, int rd, int imm) 758 + { 759 + TCGv z; 760 + TCGv _imm; 761 + z = tcg_const_i32(0); 762 + _imm = tcg_const_i32(imm); 763 + tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z, 764 + _imm, cpu_regs[rd]); 765 + tcg_temp_free(z); 766 + tcg_temp_free(_imm); 767 + } 768 + 769 + /* stz #imm,rd */ 770 + static bool trans_STZ(DisasContext *ctx, arg_STZ *a) 771 + { 772 + stcond(TCG_COND_EQ, a->rd, a->imm); 773 + return true; 774 + } 775 + 776 + /* stnz #imm,rd */ 777 + static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a) 778 + { 779 + stcond(TCG_COND_NE, a->rd, a->imm); 780 + return true; 781 + } 782 + 783 + /* sccnd.<bwl> rd */ 784 + /* sccnd.<bwl> dsp:[rd] */ 785 + static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a) 786 + { 787 + DisasCompare dc; 788 + TCGv val, mem, addr; 789 + dc.temp = tcg_temp_new(); 790 + psw_cond(&dc, a->cd); 791 + if (a->ld < 3) { 792 + val = tcg_temp_new(); 793 + mem = tcg_temp_new(); 794 + tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0); 795 + addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd); 796 + rx_gen_st(a->sz, val, addr); 797 + tcg_temp_free(val); 798 + tcg_temp_free(mem); 799 + } else { 800 + tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0); 801 + } 802 + tcg_temp_free(dc.temp); 803 + return true; 804 + } 805 + 806 + /* rtsd #imm */ 807 + static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a) 808 + { 809 + tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2); 810 + pop(cpu_pc); 811 + ctx->base.is_jmp = DISAS_JUMP; 812 + return true; 813 + } 814 + 815 + /* rtsd #imm, rd-rd2 */ 816 + static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a) 817 + { 818 + int dst; 819 + int adj; 820 + 821 + if (a->rd2 >= a->rd) { 822 + adj = a->imm - (a->rd2 - a->rd + 1); 823 + } else { 824 + adj = a->imm - (15 - a->rd + 1); 825 + } 826 + 827 + tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2); 828 + dst = a->rd; 829 + while (dst <= a->rd2 && dst < 16) { 830 + pop(cpu_regs[dst++]); 831 + } 832 + pop(cpu_pc); 833 + ctx->base.is_jmp = DISAS_JUMP; 834 + return true; 835 + } 836 + 837 + typedef void (*op2fn)(TCGv ret, TCGv arg1); 838 + typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2); 839 + 840 + static inline void rx_gen_op_rr(op2fn opr, int dst, int src) 841 + { 842 + opr(cpu_regs[dst], cpu_regs[src]); 843 + } 844 + 845 + static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2) 846 + { 847 + opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]); 848 + } 849 + 850 + static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2) 851 + { 852 + TCGv imm = tcg_const_i32(src2); 853 + opr(cpu_regs[dst], cpu_regs[src], imm); 854 + tcg_temp_free(imm); 855 + } 856 + 857 + static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx, 858 + int dst, int src, int ld, int mi) 859 + { 860 + TCGv val, mem; 861 + mem = tcg_temp_new(); 862 + val = rx_load_source(ctx, mem, ld, mi, src); 863 + opr(cpu_regs[dst], cpu_regs[dst], val); 864 + tcg_temp_free(mem); 865 + } 866 + 867 + static void rx_and(TCGv ret, TCGv arg1, TCGv arg2) 868 + { 869 + tcg_gen_and_i32(cpu_psw_s, arg1, arg2); 870 + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 871 + tcg_gen_mov_i32(ret, cpu_psw_s); 872 + } 873 + 874 + /* and #uimm:4, rd */ 875 + /* and #imm, rd */ 876 + static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a) 877 + { 878 + rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm); 879 + return true; 880 + } 881 + 882 + /* and dsp[rs], rd */ 883 + /* and rs,rd */ 884 + static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a) 885 + { 886 + rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi); 887 + return true; 888 + } 889 + 890 + /* and rs,rs2,rd */ 891 + static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a) 892 + { 893 + rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2); 894 + return true; 895 + } 896 + 897 + static void rx_or(TCGv ret, TCGv arg1, TCGv arg2) 898 + { 899 + tcg_gen_or_i32(cpu_psw_s, arg1, arg2); 900 + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 901 + tcg_gen_mov_i32(ret, cpu_psw_s); 902 + } 903 + 904 + /* or #uimm:4, rd */ 905 + /* or #imm, rd */ 906 + static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a) 907 + { 908 + rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm); 909 + return true; 910 + } 911 + 912 + /* or dsp[rs], rd */ 913 + /* or rs,rd */ 914 + static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a) 915 + { 916 + rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi); 917 + return true; 918 + } 919 + 920 + /* or rs,rs2,rd */ 921 + static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a) 922 + { 923 + rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2); 924 + return true; 925 + } 926 + 927 + static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2) 928 + { 929 + tcg_gen_xor_i32(cpu_psw_s, arg1, arg2); 930 + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 931 + tcg_gen_mov_i32(ret, cpu_psw_s); 932 + } 933 + 934 + /* xor #imm, rd */ 935 + static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a) 936 + { 937 + rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm); 938 + return true; 939 + } 940 + 941 + /* xor dsp[rs], rd */ 942 + /* xor rs,rd */ 943 + static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a) 944 + { 945 + rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi); 946 + return true; 947 + } 948 + 949 + static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2) 950 + { 951 + tcg_gen_and_i32(cpu_psw_s, arg1, arg2); 952 + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 953 + } 954 + 955 + /* tst #imm, rd */ 956 + static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a) 957 + { 958 + rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm); 959 + return true; 960 + } 961 + 962 + /* tst dsp[rs], rd */ 963 + /* tst rs, rd */ 964 + static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a) 965 + { 966 + rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi); 967 + return true; 968 + } 969 + 970 + static void rx_not(TCGv ret, TCGv arg1) 971 + { 972 + tcg_gen_not_i32(ret, arg1); 973 + tcg_gen_mov_i32(cpu_psw_z, ret); 974 + tcg_gen_mov_i32(cpu_psw_s, ret); 975 + } 976 + 977 + /* not rd */ 978 + /* not rs, rd */ 979 + static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a) 980 + { 981 + rx_gen_op_rr(rx_not, a->rd, a->rs); 982 + return true; 983 + } 984 + 985 + static void rx_neg(TCGv ret, TCGv arg1) 986 + { 987 + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000); 988 + tcg_gen_neg_i32(ret, arg1); 989 + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0); 990 + tcg_gen_mov_i32(cpu_psw_z, ret); 991 + tcg_gen_mov_i32(cpu_psw_s, ret); 992 + } 993 + 994 + 995 + /* neg rd */ 996 + /* neg rs, rd */ 997 + static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a) 998 + { 999 + rx_gen_op_rr(rx_neg, a->rd, a->rs); 1000 + return true; 1001 + } 1002 + 1003 + /* ret = arg1 + arg2 + psw_c */ 1004 + static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2) 1005 + { 1006 + TCGv z; 1007 + z = tcg_const_i32(0); 1008 + tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z); 1009 + tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z); 1010 + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 1011 + tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); 1012 + tcg_gen_xor_i32(z, arg1, arg2); 1013 + tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z); 1014 + tcg_gen_mov_i32(ret, cpu_psw_s); 1015 + tcg_temp_free(z); 1016 + } 1017 + 1018 + /* adc #imm, rd */ 1019 + static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a) 1020 + { 1021 + rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm); 1022 + return true; 1023 + } 1024 + 1025 + /* adc rs, rd */ 1026 + static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a) 1027 + { 1028 + rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs); 1029 + return true; 1030 + } 1031 + 1032 + /* adc dsp[rs], rd */ 1033 + static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a) 1034 + { 1035 + /* mi only 2 */ 1036 + if (a->mi != 2) { 1037 + return false; 1038 + } 1039 + rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi); 1040 + return true; 1041 + } 1042 + 1043 + /* ret = arg1 + arg2 */ 1044 + static void rx_add(TCGv ret, TCGv arg1, TCGv arg2) 1045 + { 1046 + TCGv z; 1047 + z = tcg_const_i32(0); 1048 + tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z); 1049 + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 1050 + tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); 1051 + tcg_gen_xor_i32(z, arg1, arg2); 1052 + tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z); 1053 + tcg_gen_mov_i32(ret, cpu_psw_s); 1054 + tcg_temp_free(z); 1055 + } 1056 + 1057 + /* add #uimm4, rd */ 1058 + /* add #imm, rs, rd */ 1059 + static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a) 1060 + { 1061 + rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm); 1062 + return true; 1063 + } 1064 + 1065 + /* add rs, rd */ 1066 + /* add dsp[rs], rd */ 1067 + static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a) 1068 + { 1069 + rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi); 1070 + return true; 1071 + } 1072 + 1073 + /* add rs, rs2, rd */ 1074 + static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a) 1075 + { 1076 + rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2); 1077 + return true; 1078 + } 1079 + 1080 + /* ret = arg1 - arg2 */ 1081 + static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2) 1082 + { 1083 + TCGv temp; 1084 + tcg_gen_sub_i32(cpu_psw_s, arg1, arg2); 1085 + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 1086 + tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2); 1087 + tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); 1088 + temp = tcg_temp_new_i32(); 1089 + tcg_gen_xor_i32(temp, arg1, arg2); 1090 + tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp); 1091 + tcg_temp_free_i32(temp); 1092 + /* CMP not requred return */ 1093 + if (ret) { 1094 + tcg_gen_mov_i32(ret, cpu_psw_s); 1095 + } 1096 + } 1097 + static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2) 1098 + { 1099 + rx_sub(NULL, arg1, arg2); 1100 + } 1101 + /* ret = arg1 - arg2 - !psw_c */ 1102 + /* -> ret = arg1 + ~arg2 + psw_c */ 1103 + static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2) 1104 + { 1105 + TCGv temp; 1106 + temp = tcg_temp_new(); 1107 + tcg_gen_not_i32(temp, arg2); 1108 + rx_adc(ret, arg1, temp); 1109 + tcg_temp_free(temp); 1110 + } 1111 + 1112 + /* cmp #imm4, rs2 */ 1113 + /* cmp #imm8, rs2 */ 1114 + /* cmp #imm, rs2 */ 1115 + static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a) 1116 + { 1117 + rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm); 1118 + return true; 1119 + } 1120 + 1121 + /* cmp rs, rs2 */ 1122 + /* cmp dsp[rs], rs2 */ 1123 + static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a) 1124 + { 1125 + rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi); 1126 + return true; 1127 + } 1128 + 1129 + /* sub #imm4, rd */ 1130 + static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a) 1131 + { 1132 + rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm); 1133 + return true; 1134 + } 1135 + 1136 + /* sub rs, rd */ 1137 + /* sub dsp[rs], rd */ 1138 + static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a) 1139 + { 1140 + rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi); 1141 + return true; 1142 + } 1143 + 1144 + /* sub rs2, rs, rd */ 1145 + static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a) 1146 + { 1147 + rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs); 1148 + return true; 1149 + } 1150 + 1151 + /* sbb rs, rd */ 1152 + static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a) 1153 + { 1154 + rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs); 1155 + return true; 1156 + } 1157 + 1158 + /* sbb dsp[rs], rd */ 1159 + static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a) 1160 + { 1161 + /* mi only 2 */ 1162 + if (a->mi != 2) { 1163 + return false; 1164 + } 1165 + rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi); 1166 + return true; 1167 + } 1168 + 1169 + static void rx_abs(TCGv ret, TCGv arg1) 1170 + { 1171 + TCGv neg; 1172 + TCGv zero; 1173 + neg = tcg_temp_new(); 1174 + zero = tcg_const_i32(0); 1175 + tcg_gen_neg_i32(neg, arg1); 1176 + tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1); 1177 + tcg_temp_free(neg); 1178 + tcg_temp_free(zero); 1179 + } 1180 + 1181 + /* abs rd */ 1182 + /* abs rs, rd */ 1183 + static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a) 1184 + { 1185 + rx_gen_op_rr(rx_abs, a->rd, a->rs); 1186 + return true; 1187 + } 1188 + 1189 + /* max #imm, rd */ 1190 + static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a) 1191 + { 1192 + rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm); 1193 + return true; 1194 + } 1195 + 1196 + /* max rs, rd */ 1197 + /* max dsp[rs], rd */ 1198 + static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a) 1199 + { 1200 + rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi); 1201 + return true; 1202 + } 1203 + 1204 + /* min #imm, rd */ 1205 + static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a) 1206 + { 1207 + rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm); 1208 + return true; 1209 + } 1210 + 1211 + /* min rs, rd */ 1212 + /* min dsp[rs], rd */ 1213 + static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a) 1214 + { 1215 + rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi); 1216 + return true; 1217 + } 1218 + 1219 + /* mul #uimm4, rd */ 1220 + /* mul #imm, rd */ 1221 + static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a) 1222 + { 1223 + rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm); 1224 + return true; 1225 + } 1226 + 1227 + /* mul rs, rd */ 1228 + /* mul dsp[rs], rd */ 1229 + static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a) 1230 + { 1231 + rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi); 1232 + return true; 1233 + } 1234 + 1235 + /* mul rs, rs2, rd */ 1236 + static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a) 1237 + { 1238 + rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2); 1239 + return true; 1240 + } 1241 + 1242 + /* emul #imm, rd */ 1243 + static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a) 1244 + { 1245 + TCGv imm = tcg_const_i32(a->imm); 1246 + if (a->rd > 14) { 1247 + qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1248 + } 1249 + tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1250 + cpu_regs[a->rd], imm); 1251 + tcg_temp_free(imm); 1252 + return true; 1253 + } 1254 + 1255 + /* emul rs, rd */ 1256 + /* emul dsp[rs], rd */ 1257 + static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a) 1258 + { 1259 + TCGv val, mem; 1260 + if (a->rd > 14) { 1261 + qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1262 + } 1263 + mem = tcg_temp_new(); 1264 + val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); 1265 + tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1266 + cpu_regs[a->rd], val); 1267 + tcg_temp_free(mem); 1268 + return true; 1269 + } 1270 + 1271 + /* emulu #imm, rd */ 1272 + static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a) 1273 + { 1274 + TCGv imm = tcg_const_i32(a->imm); 1275 + if (a->rd > 14) { 1276 + qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1277 + } 1278 + tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1279 + cpu_regs[a->rd], imm); 1280 + tcg_temp_free(imm); 1281 + return true; 1282 + } 1283 + 1284 + /* emulu rs, rd */ 1285 + /* emulu dsp[rs], rd */ 1286 + static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a) 1287 + { 1288 + TCGv val, mem; 1289 + if (a->rd > 14) { 1290 + qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1291 + } 1292 + mem = tcg_temp_new(); 1293 + val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); 1294 + tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1295 + cpu_regs[a->rd], val); 1296 + tcg_temp_free(mem); 1297 + return true; 1298 + } 1299 + 1300 + static void rx_div(TCGv ret, TCGv arg1, TCGv arg2) 1301 + { 1302 + gen_helper_div(ret, cpu_env, arg1, arg2); 1303 + } 1304 + 1305 + static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2) 1306 + { 1307 + gen_helper_divu(ret, cpu_env, arg1, arg2); 1308 + } 1309 + 1310 + /* div #imm, rd */ 1311 + static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a) 1312 + { 1313 + rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm); 1314 + return true; 1315 + } 1316 + 1317 + /* div rs, rd */ 1318 + /* div dsp[rs], rd */ 1319 + static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a) 1320 + { 1321 + rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi); 1322 + return true; 1323 + } 1324 + 1325 + /* divu #imm, rd */ 1326 + static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a) 1327 + { 1328 + rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm); 1329 + return true; 1330 + } 1331 + 1332 + /* divu rs, rd */ 1333 + /* divu dsp[rs], rd */ 1334 + static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a) 1335 + { 1336 + rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi); 1337 + return true; 1338 + } 1339 + 1340 + 1341 + /* shll #imm:5, rd */ 1342 + /* shll #imm:5, rs2, rd */ 1343 + static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a) 1344 + { 1345 + TCGv tmp; 1346 + tmp = tcg_temp_new(); 1347 + if (a->imm) { 1348 + tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm); 1349 + tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm); 1350 + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0); 1351 + tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff); 1352 + tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp); 1353 + tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0); 1354 + } else { 1355 + tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]); 1356 + tcg_gen_movi_i32(cpu_psw_c, 0); 1357 + tcg_gen_movi_i32(cpu_psw_o, 0); 1358 + } 1359 + tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1360 + tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1361 + return true; 1362 + } 1363 + 1364 + /* shll rs, rd */ 1365 + static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a) 1366 + { 1367 + TCGLabel *noshift, *done; 1368 + TCGv count, tmp; 1369 + 1370 + noshift = gen_new_label(); 1371 + done = gen_new_label(); 1372 + /* if (cpu_regs[a->rs]) { */ 1373 + tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift); 1374 + count = tcg_const_i32(32); 1375 + tmp = tcg_temp_new(); 1376 + tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31); 1377 + tcg_gen_sub_i32(count, count, tmp); 1378 + tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count); 1379 + tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp); 1380 + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0); 1381 + tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff); 1382 + tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp); 1383 + tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0); 1384 + tcg_gen_br(done); 1385 + /* } else { */ 1386 + gen_set_label(noshift); 1387 + tcg_gen_movi_i32(cpu_psw_c, 0); 1388 + tcg_gen_movi_i32(cpu_psw_o, 0); 1389 + /* } */ 1390 + gen_set_label(done); 1391 + tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1392 + tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1393 + tcg_temp_free(count); 1394 + tcg_temp_free(tmp); 1395 + return true; 1396 + } 1397 + 1398 + static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm, 1399 + unsigned int alith) 1400 + { 1401 + static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = { 1402 + tcg_gen_shri_i32, tcg_gen_sari_i32, 1403 + }; 1404 + tcg_debug_assert(alith < 2); 1405 + if (imm) { 1406 + gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1); 1407 + tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001); 1408 + gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1); 1409 + } else { 1410 + tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]); 1411 + tcg_gen_movi_i32(cpu_psw_c, 0); 1412 + } 1413 + tcg_gen_movi_i32(cpu_psw_o, 0); 1414 + tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); 1415 + tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); 1416 + } 1417 + 1418 + static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith) 1419 + { 1420 + TCGLabel *noshift, *done; 1421 + TCGv count; 1422 + static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = { 1423 + tcg_gen_shri_i32, tcg_gen_sari_i32, 1424 + }; 1425 + static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = { 1426 + tcg_gen_shr_i32, tcg_gen_sar_i32, 1427 + }; 1428 + tcg_debug_assert(alith < 2); 1429 + noshift = gen_new_label(); 1430 + done = gen_new_label(); 1431 + count = tcg_temp_new(); 1432 + /* if (cpu_regs[rs]) { */ 1433 + tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift); 1434 + tcg_gen_andi_i32(count, cpu_regs[rs], 31); 1435 + tcg_gen_subi_i32(count, count, 1); 1436 + gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count); 1437 + tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001); 1438 + gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1); 1439 + tcg_gen_br(done); 1440 + /* } else { */ 1441 + gen_set_label(noshift); 1442 + tcg_gen_movi_i32(cpu_psw_c, 0); 1443 + /* } */ 1444 + gen_set_label(done); 1445 + tcg_gen_movi_i32(cpu_psw_o, 0); 1446 + tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); 1447 + tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); 1448 + tcg_temp_free(count); 1449 + } 1450 + 1451 + /* shar #imm:5, rd */ 1452 + /* shar #imm:5, rs2, rd */ 1453 + static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a) 1454 + { 1455 + shiftr_imm(a->rd, a->rs2, a->imm, 1); 1456 + return true; 1457 + } 1458 + 1459 + /* shar rs, rd */ 1460 + static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a) 1461 + { 1462 + shiftr_reg(a->rd, a->rs, 1); 1463 + return true; 1464 + } 1465 + 1466 + /* shlr #imm:5, rd */ 1467 + /* shlr #imm:5, rs2, rd */ 1468 + static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a) 1469 + { 1470 + shiftr_imm(a->rd, a->rs2, a->imm, 0); 1471 + return true; 1472 + } 1473 + 1474 + /* shlr rs, rd */ 1475 + static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a) 1476 + { 1477 + shiftr_reg(a->rd, a->rs, 0); 1478 + return true; 1479 + } 1480 + 1481 + /* rolc rd */ 1482 + static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a) 1483 + { 1484 + TCGv tmp; 1485 + tmp = tcg_temp_new(); 1486 + tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31); 1487 + tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1); 1488 + tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c); 1489 + tcg_gen_mov_i32(cpu_psw_c, tmp); 1490 + tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1491 + tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1492 + tcg_temp_free(tmp); 1493 + return true; 1494 + } 1495 + 1496 + /* rorc rd */ 1497 + static bool trans_RORC(DisasContext *ctx, arg_RORC *a) 1498 + { 1499 + TCGv tmp; 1500 + tmp = tcg_temp_new(); 1501 + tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001); 1502 + tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1); 1503 + tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31); 1504 + tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c); 1505 + tcg_gen_mov_i32(cpu_psw_c, tmp); 1506 + tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1507 + tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1508 + return true; 1509 + } 1510 + 1511 + enum {ROTR = 0, ROTL = 1}; 1512 + enum {ROT_IMM = 0, ROT_REG = 1}; 1513 + static inline void rx_rot(int ir, int dir, int rd, int src) 1514 + { 1515 + switch (dir) { 1516 + case ROTL: 1517 + if (ir == ROT_IMM) { 1518 + tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src); 1519 + } else { 1520 + tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]); 1521 + } 1522 + tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001); 1523 + break; 1524 + case ROTR: 1525 + if (ir == ROT_IMM) { 1526 + tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src); 1527 + } else { 1528 + tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]); 1529 + } 1530 + tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31); 1531 + break; 1532 + } 1533 + tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); 1534 + tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); 1535 + } 1536 + 1537 + /* rotl #imm, rd */ 1538 + static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a) 1539 + { 1540 + rx_rot(ROT_IMM, ROTL, a->rd, a->imm); 1541 + return true; 1542 + } 1543 + 1544 + /* rotl rs, rd */ 1545 + static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a) 1546 + { 1547 + rx_rot(ROT_REG, ROTL, a->rd, a->rs); 1548 + return true; 1549 + } 1550 + 1551 + /* rotr #imm, rd */ 1552 + static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a) 1553 + { 1554 + rx_rot(ROT_IMM, ROTR, a->rd, a->imm); 1555 + return true; 1556 + } 1557 + 1558 + /* rotr rs, rd */ 1559 + static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a) 1560 + { 1561 + rx_rot(ROT_REG, ROTR, a->rd, a->rs); 1562 + return true; 1563 + } 1564 + 1565 + /* revl rs, rd */ 1566 + static bool trans_REVL(DisasContext *ctx, arg_REVL *a) 1567 + { 1568 + tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]); 1569 + return true; 1570 + } 1571 + 1572 + /* revw rs, rd */ 1573 + static bool trans_REVW(DisasContext *ctx, arg_REVW *a) 1574 + { 1575 + TCGv tmp; 1576 + tmp = tcg_temp_new(); 1577 + tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff); 1578 + tcg_gen_shli_i32(tmp, tmp, 8); 1579 + tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8); 1580 + tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff); 1581 + tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp); 1582 + tcg_temp_free(tmp); 1583 + return true; 1584 + } 1585 + 1586 + /* conditional branch helper */ 1587 + static void rx_bcnd_main(DisasContext *ctx, int cd, int dst) 1588 + { 1589 + DisasCompare dc; 1590 + TCGLabel *t, *done; 1591 + 1592 + switch (cd) { 1593 + case 0 ... 13: 1594 + dc.temp = tcg_temp_new(); 1595 + psw_cond(&dc, cd); 1596 + t = gen_new_label(); 1597 + done = gen_new_label(); 1598 + tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t); 1599 + gen_goto_tb(ctx, 0, ctx->base.pc_next); 1600 + tcg_gen_br(done); 1601 + gen_set_label(t); 1602 + gen_goto_tb(ctx, 1, ctx->pc + dst); 1603 + gen_set_label(done); 1604 + tcg_temp_free(dc.temp); 1605 + break; 1606 + case 14: 1607 + /* always true case */ 1608 + gen_goto_tb(ctx, 0, ctx->pc + dst); 1609 + break; 1610 + case 15: 1611 + /* always false case */ 1612 + /* Nothing do */ 1613 + break; 1614 + } 1615 + } 1616 + 1617 + /* beq dsp:3 / bne dsp:3 */ 1618 + /* beq dsp:8 / bne dsp:8 */ 1619 + /* bc dsp:8 / bnc dsp:8 */ 1620 + /* bgtu dsp:8 / bleu dsp:8 */ 1621 + /* bpz dsp:8 / bn dsp:8 */ 1622 + /* bge dsp:8 / blt dsp:8 */ 1623 + /* bgt dsp:8 / ble dsp:8 */ 1624 + /* bo dsp:8 / bno dsp:8 */ 1625 + /* beq dsp:16 / bne dsp:16 */ 1626 + static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a) 1627 + { 1628 + rx_bcnd_main(ctx, a->cd, a->dsp); 1629 + return true; 1630 + } 1631 + 1632 + /* bra dsp:3 */ 1633 + /* bra dsp:8 */ 1634 + /* bra dsp:16 */ 1635 + /* bra dsp:24 */ 1636 + static bool trans_BRA(DisasContext *ctx, arg_BRA *a) 1637 + { 1638 + rx_bcnd_main(ctx, 14, a->dsp); 1639 + return true; 1640 + } 1641 + 1642 + /* bra rs */ 1643 + static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a) 1644 + { 1645 + tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc); 1646 + ctx->base.is_jmp = DISAS_JUMP; 1647 + return true; 1648 + } 1649 + 1650 + static inline void rx_save_pc(DisasContext *ctx) 1651 + { 1652 + TCGv pc = tcg_const_i32(ctx->base.pc_next); 1653 + push(pc); 1654 + tcg_temp_free(pc); 1655 + } 1656 + 1657 + /* jmp rs */ 1658 + static bool trans_JMP(DisasContext *ctx, arg_JMP *a) 1659 + { 1660 + tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]); 1661 + ctx->base.is_jmp = DISAS_JUMP; 1662 + return true; 1663 + } 1664 + 1665 + /* jsr rs */ 1666 + static bool trans_JSR(DisasContext *ctx, arg_JSR *a) 1667 + { 1668 + rx_save_pc(ctx); 1669 + tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]); 1670 + ctx->base.is_jmp = DISAS_JUMP; 1671 + return true; 1672 + } 1673 + 1674 + /* bsr dsp:16 */ 1675 + /* bsr dsp:24 */ 1676 + static bool trans_BSR(DisasContext *ctx, arg_BSR *a) 1677 + { 1678 + rx_save_pc(ctx); 1679 + rx_bcnd_main(ctx, 14, a->dsp); 1680 + return true; 1681 + } 1682 + 1683 + /* bsr rs */ 1684 + static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a) 1685 + { 1686 + rx_save_pc(ctx); 1687 + tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc); 1688 + ctx->base.is_jmp = DISAS_JUMP; 1689 + return true; 1690 + } 1691 + 1692 + /* rts */ 1693 + static bool trans_RTS(DisasContext *ctx, arg_RTS *a) 1694 + { 1695 + pop(cpu_pc); 1696 + ctx->base.is_jmp = DISAS_JUMP; 1697 + return true; 1698 + } 1699 + 1700 + /* nop */ 1701 + static bool trans_NOP(DisasContext *ctx, arg_NOP *a) 1702 + { 1703 + return true; 1704 + } 1705 + 1706 + /* scmpu */ 1707 + static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a) 1708 + { 1709 + gen_helper_scmpu(cpu_env); 1710 + return true; 1711 + } 1712 + 1713 + /* smovu */ 1714 + static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a) 1715 + { 1716 + gen_helper_smovu(cpu_env); 1717 + return true; 1718 + } 1719 + 1720 + /* smovf */ 1721 + static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a) 1722 + { 1723 + gen_helper_smovf(cpu_env); 1724 + return true; 1725 + } 1726 + 1727 + /* smovb */ 1728 + static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a) 1729 + { 1730 + gen_helper_smovb(cpu_env); 1731 + return true; 1732 + } 1733 + 1734 + #define STRING(op) \ 1735 + do { \ 1736 + TCGv size = tcg_const_i32(a->sz); \ 1737 + gen_helper_##op(cpu_env, size); \ 1738 + tcg_temp_free(size); \ 1739 + } while (0) 1740 + 1741 + /* suntile.<bwl> */ 1742 + static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a) 1743 + { 1744 + STRING(suntil); 1745 + return true; 1746 + } 1747 + 1748 + /* swhile.<bwl> */ 1749 + static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a) 1750 + { 1751 + STRING(swhile); 1752 + return true; 1753 + } 1754 + /* sstr.<bwl> */ 1755 + static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a) 1756 + { 1757 + STRING(sstr); 1758 + return true; 1759 + } 1760 + 1761 + /* rmpa.<bwl> */ 1762 + static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a) 1763 + { 1764 + STRING(rmpa); 1765 + return true; 1766 + } 1767 + 1768 + static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2) 1769 + { 1770 + TCGv_i64 tmp0, tmp1; 1771 + tmp0 = tcg_temp_new_i64(); 1772 + tmp1 = tcg_temp_new_i64(); 1773 + tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]); 1774 + tcg_gen_sari_i64(tmp0, tmp0, 16); 1775 + tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]); 1776 + tcg_gen_sari_i64(tmp1, tmp1, 16); 1777 + tcg_gen_mul_i64(ret, tmp0, tmp1); 1778 + tcg_gen_shli_i64(ret, ret, 16); 1779 + tcg_temp_free_i64(tmp0); 1780 + tcg_temp_free_i64(tmp1); 1781 + } 1782 + 1783 + static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2) 1784 + { 1785 + TCGv_i64 tmp0, tmp1; 1786 + tmp0 = tcg_temp_new_i64(); 1787 + tmp1 = tcg_temp_new_i64(); 1788 + tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]); 1789 + tcg_gen_ext16s_i64(tmp0, tmp0); 1790 + tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]); 1791 + tcg_gen_ext16s_i64(tmp1, tmp1); 1792 + tcg_gen_mul_i64(ret, tmp0, tmp1); 1793 + tcg_gen_shli_i64(ret, ret, 16); 1794 + tcg_temp_free_i64(tmp0); 1795 + tcg_temp_free_i64(tmp1); 1796 + } 1797 + 1798 + /* mulhi rs,rs2 */ 1799 + static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a) 1800 + { 1801 + rx_mul64hi(cpu_acc, a->rs, a->rs2); 1802 + return true; 1803 + } 1804 + 1805 + /* mullo rs,rs2 */ 1806 + static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a) 1807 + { 1808 + rx_mul64lo(cpu_acc, a->rs, a->rs2); 1809 + return true; 1810 + } 1811 + 1812 + /* machi rs,rs2 */ 1813 + static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a) 1814 + { 1815 + TCGv_i64 tmp; 1816 + tmp = tcg_temp_new_i64(); 1817 + rx_mul64hi(tmp, a->rs, a->rs2); 1818 + tcg_gen_add_i64(cpu_acc, cpu_acc, tmp); 1819 + tcg_temp_free_i64(tmp); 1820 + return true; 1821 + } 1822 + 1823 + /* maclo rs,rs2 */ 1824 + static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a) 1825 + { 1826 + TCGv_i64 tmp; 1827 + tmp = tcg_temp_new_i64(); 1828 + rx_mul64lo(tmp, a->rs, a->rs2); 1829 + tcg_gen_add_i64(cpu_acc, cpu_acc, tmp); 1830 + tcg_temp_free_i64(tmp); 1831 + return true; 1832 + } 1833 + 1834 + /* mvfachi rd */ 1835 + static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a) 1836 + { 1837 + tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc); 1838 + return true; 1839 + } 1840 + 1841 + /* mvfacmi rd */ 1842 + static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a) 1843 + { 1844 + TCGv_i64 rd64; 1845 + rd64 = tcg_temp_new_i64(); 1846 + tcg_gen_extract_i64(rd64, cpu_acc, 16, 32); 1847 + tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64); 1848 + tcg_temp_free_i64(rd64); 1849 + return true; 1850 + } 1851 + 1852 + /* mvtachi rs */ 1853 + static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a) 1854 + { 1855 + TCGv_i64 rs64; 1856 + rs64 = tcg_temp_new_i64(); 1857 + tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]); 1858 + tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32); 1859 + tcg_temp_free_i64(rs64); 1860 + return true; 1861 + } 1862 + 1863 + /* mvtaclo rs */ 1864 + static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a) 1865 + { 1866 + TCGv_i64 rs64; 1867 + rs64 = tcg_temp_new_i64(); 1868 + tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]); 1869 + tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32); 1870 + tcg_temp_free_i64(rs64); 1871 + return true; 1872 + } 1873 + 1874 + /* racw #imm */ 1875 + static bool trans_RACW(DisasContext *ctx, arg_RACW *a) 1876 + { 1877 + TCGv imm = tcg_const_i32(a->imm + 1); 1878 + gen_helper_racw(cpu_env, imm); 1879 + tcg_temp_free(imm); 1880 + return true; 1881 + } 1882 + 1883 + /* sat rd */ 1884 + static bool trans_SAT(DisasContext *ctx, arg_SAT *a) 1885 + { 1886 + TCGv tmp, z; 1887 + tmp = tcg_temp_new(); 1888 + z = tcg_const_i32(0); 1889 + /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */ 1890 + tcg_gen_sari_i32(tmp, cpu_psw_s, 31); 1891 + /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */ 1892 + tcg_gen_xori_i32(tmp, tmp, 0x80000000); 1893 + tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd], 1894 + cpu_psw_o, z, tmp, cpu_regs[a->rd]); 1895 + tcg_temp_free(tmp); 1896 + tcg_temp_free(z); 1897 + return true; 1898 + } 1899 + 1900 + /* satr */ 1901 + static bool trans_SATR(DisasContext *ctx, arg_SATR *a) 1902 + { 1903 + gen_helper_satr(cpu_env); 1904 + return true; 1905 + } 1906 + 1907 + #define cat3(a, b, c) a##b##c 1908 + #define FOP(name, op) \ 1909 + static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ 1910 + cat3(arg_, name, _ir) * a) \ 1911 + { \ 1912 + TCGv imm = tcg_const_i32(li(ctx, 0)); \ 1913 + gen_helper_##op(cpu_regs[a->rd], cpu_env, \ 1914 + cpu_regs[a->rd], imm); \ 1915 + tcg_temp_free(imm); \ 1916 + return true; \ 1917 + } \ 1918 + static bool cat3(trans_, name, _mr)(DisasContext *ctx, \ 1919 + cat3(arg_, name, _mr) * a) \ 1920 + { \ 1921 + TCGv val, mem; \ 1922 + mem = tcg_temp_new(); \ 1923 + val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ 1924 + gen_helper_##op(cpu_regs[a->rd], cpu_env, \ 1925 + cpu_regs[a->rd], val); \ 1926 + tcg_temp_free(mem); \ 1927 + return true; \ 1928 + } 1929 + 1930 + #define FCONVOP(name, op) \ 1931 + static bool trans_##name(DisasContext *ctx, arg_##name * a) \ 1932 + { \ 1933 + TCGv val, mem; \ 1934 + mem = tcg_temp_new(); \ 1935 + val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ 1936 + gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \ 1937 + tcg_temp_free(mem); \ 1938 + return true; \ 1939 + } 1940 + 1941 + FOP(FADD, fadd) 1942 + FOP(FSUB, fsub) 1943 + FOP(FMUL, fmul) 1944 + FOP(FDIV, fdiv) 1945 + 1946 + /* fcmp #imm, rd */ 1947 + static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a) 1948 + { 1949 + TCGv imm = tcg_const_i32(li(ctx, 0)); 1950 + gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm); 1951 + tcg_temp_free(imm); 1952 + return true; 1953 + } 1954 + 1955 + /* fcmp dsp[rs], rd */ 1956 + /* fcmp rs, rd */ 1957 + static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a) 1958 + { 1959 + TCGv val, mem; 1960 + mem = tcg_temp_new(); 1961 + val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); 1962 + gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val); 1963 + tcg_temp_free(mem); 1964 + return true; 1965 + } 1966 + 1967 + FCONVOP(FTOI, ftoi) 1968 + FCONVOP(ROUND, round) 1969 + 1970 + /* itof rs, rd */ 1971 + /* itof dsp[rs], rd */ 1972 + static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a) 1973 + { 1974 + TCGv val, mem; 1975 + mem = tcg_temp_new(); 1976 + val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); 1977 + gen_helper_itof(cpu_regs[a->rd], cpu_env, val); 1978 + tcg_temp_free(mem); 1979 + return true; 1980 + } 1981 + 1982 + static void rx_bsetm(TCGv mem, TCGv mask) 1983 + { 1984 + TCGv val; 1985 + val = tcg_temp_new(); 1986 + rx_gen_ld(MO_8, val, mem); 1987 + tcg_gen_or_i32(val, val, mask); 1988 + rx_gen_st(MO_8, val, mem); 1989 + tcg_temp_free(val); 1990 + } 1991 + 1992 + static void rx_bclrm(TCGv mem, TCGv mask) 1993 + { 1994 + TCGv val; 1995 + val = tcg_temp_new(); 1996 + rx_gen_ld(MO_8, val, mem); 1997 + tcg_gen_andc_i32(val, val, mask); 1998 + rx_gen_st(MO_8, val, mem); 1999 + tcg_temp_free(val); 2000 + } 2001 + 2002 + static void rx_btstm(TCGv mem, TCGv mask) 2003 + { 2004 + TCGv val; 2005 + val = tcg_temp_new(); 2006 + rx_gen_ld(MO_8, val, mem); 2007 + tcg_gen_and_i32(val, val, mask); 2008 + tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0); 2009 + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); 2010 + tcg_temp_free(val); 2011 + } 2012 + 2013 + static void rx_bnotm(TCGv mem, TCGv mask) 2014 + { 2015 + TCGv val; 2016 + val = tcg_temp_new(); 2017 + rx_gen_ld(MO_8, val, mem); 2018 + tcg_gen_xor_i32(val, val, mask); 2019 + rx_gen_st(MO_8, val, mem); 2020 + tcg_temp_free(val); 2021 + } 2022 + 2023 + static void rx_bsetr(TCGv reg, TCGv mask) 2024 + { 2025 + tcg_gen_or_i32(reg, reg, mask); 2026 + } 2027 + 2028 + static void rx_bclrr(TCGv reg, TCGv mask) 2029 + { 2030 + tcg_gen_andc_i32(reg, reg, mask); 2031 + } 2032 + 2033 + static inline void rx_btstr(TCGv reg, TCGv mask) 2034 + { 2035 + TCGv t0; 2036 + t0 = tcg_temp_new(); 2037 + tcg_gen_and_i32(t0, reg, mask); 2038 + tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0); 2039 + tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); 2040 + tcg_temp_free(t0); 2041 + } 2042 + 2043 + static inline void rx_bnotr(TCGv reg, TCGv mask) 2044 + { 2045 + tcg_gen_xor_i32(reg, reg, mask); 2046 + } 2047 + 2048 + #define BITOP(name, op) \ 2049 + static bool cat3(trans_, name, _im)(DisasContext *ctx, \ 2050 + cat3(arg_, name, _im) * a) \ 2051 + { \ 2052 + TCGv mask, mem, addr; \ 2053 + mem = tcg_temp_new(); \ 2054 + mask = tcg_const_i32(1 << a->imm); \ 2055 + addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ 2056 + cat3(rx_, op, m)(addr, mask); \ 2057 + tcg_temp_free(mask); \ 2058 + tcg_temp_free(mem); \ 2059 + return true; \ 2060 + } \ 2061 + static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ 2062 + cat3(arg_, name, _ir) * a) \ 2063 + { \ 2064 + TCGv mask; \ 2065 + mask = tcg_const_i32(1 << a->imm); \ 2066 + cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ 2067 + tcg_temp_free(mask); \ 2068 + return true; \ 2069 + } \ 2070 + static bool cat3(trans_, name, _rr)(DisasContext *ctx, \ 2071 + cat3(arg_, name, _rr) * a) \ 2072 + { \ 2073 + TCGv mask, b; \ 2074 + mask = tcg_const_i32(1); \ 2075 + b = tcg_temp_new(); \ 2076 + tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \ 2077 + tcg_gen_shl_i32(mask, mask, b); \ 2078 + cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ 2079 + tcg_temp_free(mask); \ 2080 + tcg_temp_free(b); \ 2081 + return true; \ 2082 + } \ 2083 + static bool cat3(trans_, name, _rm)(DisasContext *ctx, \ 2084 + cat3(arg_, name, _rm) * a) \ 2085 + { \ 2086 + TCGv mask, mem, addr, b; \ 2087 + mask = tcg_const_i32(1); \ 2088 + b = tcg_temp_new(); \ 2089 + tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \ 2090 + tcg_gen_shl_i32(mask, mask, b); \ 2091 + mem = tcg_temp_new(); \ 2092 + addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ 2093 + cat3(rx_, op, m)(addr, mask); \ 2094 + tcg_temp_free(mem); \ 2095 + tcg_temp_free(mask); \ 2096 + tcg_temp_free(b); \ 2097 + return true; \ 2098 + } 2099 + 2100 + BITOP(BSET, bset) 2101 + BITOP(BCLR, bclr) 2102 + BITOP(BTST, btst) 2103 + BITOP(BNOT, bnot) 2104 + 2105 + static inline void bmcnd_op(TCGv val, TCGCond cond, int pos) 2106 + { 2107 + TCGv bit; 2108 + DisasCompare dc; 2109 + dc.temp = tcg_temp_new(); 2110 + bit = tcg_temp_new(); 2111 + psw_cond(&dc, cond); 2112 + tcg_gen_andi_i32(val, val, ~(1 << pos)); 2113 + tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0); 2114 + tcg_gen_deposit_i32(val, val, bit, pos, 1); 2115 + tcg_temp_free(bit); 2116 + tcg_temp_free(dc.temp); 2117 + } 2118 + 2119 + /* bmcnd #imm, dsp[rd] */ 2120 + static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a) 2121 + { 2122 + TCGv val, mem, addr; 2123 + val = tcg_temp_new(); 2124 + mem = tcg_temp_new(); 2125 + addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd); 2126 + rx_gen_ld(MO_8, val, addr); 2127 + bmcnd_op(val, a->cd, a->imm); 2128 + rx_gen_st(MO_8, val, addr); 2129 + tcg_temp_free(val); 2130 + tcg_temp_free(mem); 2131 + return true; 2132 + } 2133 + 2134 + /* bmcond #imm, rd */ 2135 + static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a) 2136 + { 2137 + bmcnd_op(cpu_regs[a->rd], a->cd, a->imm); 2138 + return true; 2139 + } 2140 + 2141 + enum { 2142 + PSW_C = 0, 2143 + PSW_Z = 1, 2144 + PSW_S = 2, 2145 + PSW_O = 3, 2146 + PSW_I = 8, 2147 + PSW_U = 9, 2148 + }; 2149 + 2150 + static inline void clrsetpsw(DisasContext *ctx, int cb, int val) 2151 + { 2152 + if (cb < 8) { 2153 + switch (cb) { 2154 + case PSW_C: 2155 + tcg_gen_movi_i32(cpu_psw_c, val); 2156 + break; 2157 + case PSW_Z: 2158 + tcg_gen_movi_i32(cpu_psw_z, val == 0); 2159 + break; 2160 + case PSW_S: 2161 + tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0); 2162 + break; 2163 + case PSW_O: 2164 + tcg_gen_movi_i32(cpu_psw_o, val << 31); 2165 + break; 2166 + default: 2167 + qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb); 2168 + break; 2169 + } 2170 + } else if (is_privileged(ctx, 0)) { 2171 + switch (cb) { 2172 + case PSW_I: 2173 + tcg_gen_movi_i32(cpu_psw_i, val); 2174 + ctx->base.is_jmp = DISAS_UPDATE; 2175 + break; 2176 + case PSW_U: 2177 + tcg_gen_movi_i32(cpu_psw_u, val); 2178 + break; 2179 + default: 2180 + qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb); 2181 + break; 2182 + } 2183 + } 2184 + } 2185 + 2186 + /* clrpsw psw */ 2187 + static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a) 2188 + { 2189 + clrsetpsw(ctx, a->cb, 0); 2190 + return true; 2191 + } 2192 + 2193 + /* setpsw psw */ 2194 + static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a) 2195 + { 2196 + clrsetpsw(ctx, a->cb, 1); 2197 + return true; 2198 + } 2199 + 2200 + /* mvtipl #imm */ 2201 + static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a) 2202 + { 2203 + if (is_privileged(ctx, 1)) { 2204 + tcg_gen_movi_i32(cpu_psw_ipl, a->imm); 2205 + ctx->base.is_jmp = DISAS_UPDATE; 2206 + } 2207 + return true; 2208 + } 2209 + 2210 + /* mvtc #imm, rd */ 2211 + static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a) 2212 + { 2213 + TCGv imm; 2214 + 2215 + imm = tcg_const_i32(a->imm); 2216 + move_to_cr(ctx, imm, a->cr); 2217 + if (a->cr == 0 && is_privileged(ctx, 0)) { 2218 + ctx->base.is_jmp = DISAS_UPDATE; 2219 + } 2220 + tcg_temp_free(imm); 2221 + return true; 2222 + } 2223 + 2224 + /* mvtc rs, rd */ 2225 + static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a) 2226 + { 2227 + move_to_cr(ctx, cpu_regs[a->rs], a->cr); 2228 + if (a->cr == 0 && is_privileged(ctx, 0)) { 2229 + ctx->base.is_jmp = DISAS_UPDATE; 2230 + } 2231 + return true; 2232 + } 2233 + 2234 + /* mvfc rs, rd */ 2235 + static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a) 2236 + { 2237 + move_from_cr(cpu_regs[a->rd], a->cr, ctx->pc); 2238 + return true; 2239 + } 2240 + 2241 + /* rtfi */ 2242 + static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a) 2243 + { 2244 + TCGv psw; 2245 + if (is_privileged(ctx, 1)) { 2246 + psw = tcg_temp_new(); 2247 + tcg_gen_mov_i32(cpu_pc, cpu_bpc); 2248 + tcg_gen_mov_i32(psw, cpu_bpsw); 2249 + gen_helper_set_psw_rte(cpu_env, psw); 2250 + ctx->base.is_jmp = DISAS_EXIT; 2251 + tcg_temp_free(psw); 2252 + } 2253 + return true; 2254 + } 2255 + 2256 + /* rte */ 2257 + static bool trans_RTE(DisasContext *ctx, arg_RTE *a) 2258 + { 2259 + TCGv psw; 2260 + if (is_privileged(ctx, 1)) { 2261 + psw = tcg_temp_new(); 2262 + pop(cpu_pc); 2263 + pop(psw); 2264 + gen_helper_set_psw_rte(cpu_env, psw); 2265 + ctx->base.is_jmp = DISAS_EXIT; 2266 + tcg_temp_free(psw); 2267 + } 2268 + return true; 2269 + } 2270 + 2271 + /* brk */ 2272 + static bool trans_BRK(DisasContext *ctx, arg_BRK *a) 2273 + { 2274 + tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2275 + gen_helper_rxbrk(cpu_env); 2276 + ctx->base.is_jmp = DISAS_NORETURN; 2277 + return true; 2278 + } 2279 + 2280 + /* int #imm */ 2281 + static bool trans_INT(DisasContext *ctx, arg_INT *a) 2282 + { 2283 + TCGv vec; 2284 + 2285 + tcg_debug_assert(a->imm < 0x100); 2286 + vec = tcg_const_i32(a->imm); 2287 + tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2288 + gen_helper_rxint(cpu_env, vec); 2289 + tcg_temp_free(vec); 2290 + ctx->base.is_jmp = DISAS_NORETURN; 2291 + return true; 2292 + } 2293 + 2294 + /* wait */ 2295 + static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a) 2296 + { 2297 + if (is_privileged(ctx, 1)) { 2298 + tcg_gen_addi_i32(cpu_pc, cpu_pc, 2); 2299 + gen_helper_wait(cpu_env); 2300 + } 2301 + return true; 2302 + } 2303 + 2304 + static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 2305 + { 2306 + CPURXState *env = cs->env_ptr; 2307 + DisasContext *ctx = container_of(dcbase, DisasContext, base); 2308 + ctx->env = env; 2309 + } 2310 + 2311 + static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 2312 + { 2313 + } 2314 + 2315 + static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 2316 + { 2317 + DisasContext *ctx = container_of(dcbase, DisasContext, base); 2318 + 2319 + tcg_gen_insn_start(ctx->base.pc_next); 2320 + } 2321 + 2322 + static bool rx_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, 2323 + const CPUBreakpoint *bp) 2324 + { 2325 + DisasContext *ctx = container_of(dcbase, DisasContext, base); 2326 + 2327 + /* We have hit a breakpoint - make sure PC is up-to-date */ 2328 + tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2329 + gen_helper_debug(cpu_env); 2330 + ctx->base.is_jmp = DISAS_NORETURN; 2331 + ctx->base.pc_next += 1; 2332 + return true; 2333 + } 2334 + 2335 + static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 2336 + { 2337 + DisasContext *ctx = container_of(dcbase, DisasContext, base); 2338 + uint32_t insn; 2339 + 2340 + ctx->pc = ctx->base.pc_next; 2341 + insn = decode_load(ctx); 2342 + if (!decode(ctx, insn)) { 2343 + gen_helper_raise_illegal_instruction(cpu_env); 2344 + } 2345 + } 2346 + 2347 + static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 2348 + { 2349 + DisasContext *ctx = container_of(dcbase, DisasContext, base); 2350 + 2351 + switch (ctx->base.is_jmp) { 2352 + case DISAS_NEXT: 2353 + case DISAS_TOO_MANY: 2354 + gen_goto_tb(ctx, 0, dcbase->pc_next); 2355 + break; 2356 + case DISAS_JUMP: 2357 + if (ctx->base.singlestep_enabled) { 2358 + gen_helper_debug(cpu_env); 2359 + } else { 2360 + tcg_gen_lookup_and_goto_ptr(); 2361 + } 2362 + break; 2363 + case DISAS_UPDATE: 2364 + tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2365 + case DISAS_EXIT: 2366 + tcg_gen_exit_tb(NULL, 0); 2367 + break; 2368 + case DISAS_NORETURN: 2369 + break; 2370 + default: 2371 + g_assert_not_reached(); 2372 + } 2373 + } 2374 + 2375 + static void rx_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) 2376 + { 2377 + qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */ 2378 + log_target_disas(cs, dcbase->pc_first, dcbase->tb->size); 2379 + } 2380 + 2381 + static const TranslatorOps rx_tr_ops = { 2382 + .init_disas_context = rx_tr_init_disas_context, 2383 + .tb_start = rx_tr_tb_start, 2384 + .insn_start = rx_tr_insn_start, 2385 + .breakpoint_check = rx_tr_breakpoint_check, 2386 + .translate_insn = rx_tr_translate_insn, 2387 + .tb_stop = rx_tr_tb_stop, 2388 + .disas_log = rx_tr_disas_log, 2389 + }; 2390 + 2391 + void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) 2392 + { 2393 + DisasContext dc; 2394 + 2395 + translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns); 2396 + } 2397 + 2398 + void restore_state_to_opc(CPURXState *env, TranslationBlock *tb, 2399 + target_ulong *data) 2400 + { 2401 + env->pc = data[0]; 2402 + } 2403 + 2404 + #define ALLOC_REGISTER(sym, name) \ 2405 + cpu_##sym = tcg_global_mem_new_i32(cpu_env, \ 2406 + offsetof(CPURXState, sym), name) 2407 + 2408 + void rx_translate_init(void) 2409 + { 2410 + static const char * const regnames[NUM_REGS] = { 2411 + "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", 2412 + "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15" 2413 + }; 2414 + int i; 2415 + 2416 + for (i = 0; i < NUM_REGS; i++) { 2417 + cpu_regs[i] = tcg_global_mem_new_i32(cpu_env, 2418 + offsetof(CPURXState, regs[i]), 2419 + regnames[i]); 2420 + } 2421 + ALLOC_REGISTER(pc, "PC"); 2422 + ALLOC_REGISTER(psw_o, "PSW(O)"); 2423 + ALLOC_REGISTER(psw_s, "PSW(S)"); 2424 + ALLOC_REGISTER(psw_z, "PSW(Z)"); 2425 + ALLOC_REGISTER(psw_c, "PSW(C)"); 2426 + ALLOC_REGISTER(psw_u, "PSW(U)"); 2427 + ALLOC_REGISTER(psw_i, "PSW(I)"); 2428 + ALLOC_REGISTER(psw_pm, "PSW(PM)"); 2429 + ALLOC_REGISTER(psw_ipl, "PSW(IPL)"); 2430 + ALLOC_REGISTER(usp, "USP"); 2431 + ALLOC_REGISTER(fpsw, "FPSW"); 2432 + ALLOC_REGISTER(bpsw, "BPSW"); 2433 + ALLOC_REGISTER(bpc, "BPC"); 2434 + ALLOC_REGISTER(isp, "ISP"); 2435 + ALLOC_REGISTER(fintv, "FINTV"); 2436 + ALLOC_REGISTER(intb, "INTB"); 2437 + cpu_acc = tcg_global_mem_new_i64(cpu_env, 2438 + offsetof(CPURXState, acc), "ACC"); 2439 + }
+1
tests/qtest/machine-none-test.c
··· 56 56 { "hppa", "hppa" }, 57 57 { "riscv64", "rv64gcsu-v1.10.0" }, 58 58 { "riscv32", "rv32gcsu-v1.9.1" }, 59 + { "rx", "rx62n" }, 59 60 }; 60 61 61 62 static const char *get_cpu_model_by_arch(const char *arch)