diff options
author | Timothy Pearson <tpearson@raptorengineering.com> | 2019-05-11 15:12:49 -0500 |
---|---|---|
committer | Timothy Pearson <tpearson@raptorengineering.com> | 2019-05-11 15:12:49 -0500 |
commit | 9e80202352dd49bdd9e67b8b906d86f058431505 (patch) | |
tree | 5673c17aad6e3833da8c4ff21b5a11f666ec9fbe /src/target-tricore | |
download | hqemu-master.zip hqemu-master.tar.gz |
Diffstat (limited to 'src/target-tricore')
-rw-r--r-- | src/target-tricore/Makefile.objs | 1 | ||||
-rw-r--r-- | src/target-tricore/cpu-qom.h | 70 | ||||
-rw-r--r-- | src/target-tricore/cpu.c | 218 | ||||
-rw-r--r-- | src/target-tricore/cpu.h | 397 | ||||
-rw-r--r-- | src/target-tricore/csfr.def | 124 | ||||
-rw-r--r-- | src/target-tricore/helper.c | 139 | ||||
-rw-r--r-- | src/target-tricore/helper.h | 141 | ||||
-rw-r--r-- | src/target-tricore/op_helper.c | 2695 | ||||
-rw-r--r-- | src/target-tricore/translate.c | 8397 | ||||
-rw-r--r-- | src/target-tricore/tricore-defs.h | 28 | ||||
-rw-r--r-- | src/target-tricore/tricore-opcodes.h | 1445 |
11 files changed, 13655 insertions, 0 deletions
diff --git a/src/target-tricore/Makefile.objs b/src/target-tricore/Makefile.objs new file mode 100644 index 0000000..21e820d --- /dev/null +++ b/src/target-tricore/Makefile.objs @@ -0,0 +1 @@ +obj-y += translate.o helper.o cpu.o op_helper.o diff --git a/src/target-tricore/cpu-qom.h b/src/target-tricore/cpu-qom.h new file mode 100644 index 0000000..66c9664 --- /dev/null +++ b/src/target-tricore/cpu-qom.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef QEMU_TRICORE_CPU_QOM_H +#define QEMU_TRICORE_CPU_QOM_H + +#include "qom/cpu.h" + + +#define TYPE_TRICORE_CPU "tricore-cpu" + +#define TRICORE_CPU_CLASS(klass) \ + OBJECT_CLASS_CHECK(TriCoreCPUClass, (klass), TYPE_TRICORE_CPU) +#define TRICORE_CPU(obj) \ + OBJECT_CHECK(TriCoreCPU, (obj), TYPE_TRICORE_CPU) +#define TRICORE_CPU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(TriCoreCPUClass, (obj), TYPE_TRICORE_CPU) + +typedef struct TriCoreCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + void (*parent_reset)(CPUState *cpu); +} TriCoreCPUClass; + +/** + * TriCoreCPU: + * @env: #CPUTriCoreState + * + * A TriCore CPU. + */ +typedef struct TriCoreCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUTriCoreState env; +} TriCoreCPU; + +static inline TriCoreCPU *tricore_env_get_cpu(CPUTriCoreState *env) +{ + return TRICORE_CPU(container_of(env, TriCoreCPU, env)); +} + +#define ENV_GET_CPU(e) CPU(tricore_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(TriCoreCPU, env) + +hwaddr tricore_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +void tricore_cpu_dump_state(CPUState *cpu, FILE *f, + fprintf_function cpu_fprintf, int flags); + + +#endif /*QEMU_TRICORE_CPU_QOM_H */ diff --git a/src/target-tricore/cpu.c b/src/target-tricore/cpu.c new file mode 100644 index 0000000..ed8b030 --- /dev/null +++ b/src/target-tricore/cpu.c @@ -0,0 +1,218 @@ +/* + * TriCore emulation for qemu: main translation routines. + * + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "cpu.h" +#include "qemu-common.h" + +static inline void set_feature(CPUTriCoreState *env, int feature) +{ + env->features |= 1ULL << feature; +} + +static void tricore_cpu_set_pc(CPUState *cs, vaddr value) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + + env->PC = value & ~(target_ulong)1; +} + +static void tricore_cpu_synchronize_from_tb(CPUState *cs, + TranslationBlock *tb) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + + env->PC = tb->pc; +} + +static void tricore_cpu_reset(CPUState *s) +{ + TriCoreCPU *cpu = TRICORE_CPU(s); + TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(cpu); + CPUTriCoreState *env = &cpu->env; + + tcc->parent_reset(s); + + tlb_flush(s, 1); + + cpu_state_reset(env); +} + +static bool tricore_cpu_has_work(CPUState *cs) +{ + return true; +} + +static void tricore_cpu_realizefn(DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + TriCoreCPU *cpu = TRICORE_CPU(dev); + TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(dev); + CPUTriCoreState *env = &cpu->env; + + /* Some features automatically imply others */ + if (tricore_feature(env, TRICORE_FEATURE_161)) { + set_feature(env, TRICORE_FEATURE_16); + } + + if (tricore_feature(env, TRICORE_FEATURE_16)) { + set_feature(env, TRICORE_FEATURE_131); + } + if (tricore_feature(env, TRICORE_FEATURE_131)) { + set_feature(env, TRICORE_FEATURE_13); + } + cpu_reset(cs); + qemu_init_vcpu(cs); + + tcc->parent_realize(dev, errp); +} + + +static void tricore_cpu_initfn(Object *obj) +{ + CPUState *cs = CPU(obj); + TriCoreCPU *cpu = TRICORE_CPU(obj); + CPUTriCoreState *env = &cpu->env; + + cs->env_ptr = env; + cpu_exec_init(cs, &error_abort); + + if (tcg_enabled()) { + tricore_tcg_init(); + } +} + +static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model) +{ + ObjectClass *oc; + char *typename; + + if (!cpu_model) { + return NULL; + } + + typename = g_strdup_printf("%s-" TYPE_TRICORE_CPU, cpu_model); + oc = object_class_by_name(typename); + g_free(typename); + if (!oc || !object_class_dynamic_cast(oc, TYPE_TRICORE_CPU) || + object_class_is_abstract(oc)) { + return NULL; + } + return oc; +} + +static void tc1796_initfn(Object *obj) +{ + TriCoreCPU *cpu = TRICORE_CPU(obj); + + set_feature(&cpu->env, TRICORE_FEATURE_13); +} + +static void tc1797_initfn(Object *obj) +{ + TriCoreCPU *cpu = TRICORE_CPU(obj); + + set_feature(&cpu->env, TRICORE_FEATURE_131); +} + +static void tc27x_initfn(Object *obj) +{ + TriCoreCPU *cpu = TRICORE_CPU(obj); + + set_feature(&cpu->env, TRICORE_FEATURE_161); +} + +typedef struct TriCoreCPUInfo { + const char *name; + void (*initfn)(Object *obj); + void (*class_init)(ObjectClass *oc, void *data); +} TriCoreCPUInfo; + +static const TriCoreCPUInfo tricore_cpus[] = { + { .name = "tc1796", .initfn = tc1796_initfn }, + { .name = "tc1797", .initfn = tc1797_initfn }, + { .name = "tc27x", .initfn = tc27x_initfn }, + { .name = NULL } +}; + +static void tricore_cpu_class_init(ObjectClass *c, void *data) +{ + TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c); + CPUClass *cc = CPU_CLASS(c); + DeviceClass *dc = DEVICE_CLASS(c); + + mcc->parent_realize = dc->realize; + dc->realize = tricore_cpu_realizefn; + + mcc->parent_reset = cc->reset; + cc->reset = tricore_cpu_reset; + cc->class_by_name = tricore_cpu_class_by_name; + cc->has_work = tricore_cpu_has_work; + + cc->dump_state = tricore_cpu_dump_state; + cc->set_pc = tricore_cpu_set_pc; + cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb; + + /* + * Reason: tricore_cpu_initfn() calls cpu_exec_init(), which saves + * the object in cpus -> dangling pointer after final + * object_unref(). + */ + dc->cannot_destroy_with_object_finalize_yet = true; +} + +static void cpu_register(const TriCoreCPUInfo *info) +{ + TypeInfo type_info = { + .parent = TYPE_TRICORE_CPU, + .instance_size = sizeof(TriCoreCPU), + .instance_init = info->initfn, + .class_size = sizeof(TriCoreCPUClass), + .class_init = info->class_init, + }; + + type_info.name = g_strdup_printf("%s-" TYPE_TRICORE_CPU, info->name); + type_register(&type_info); + g_free((void *)type_info.name); +} + +static const TypeInfo tricore_cpu_type_info = { + .name = TYPE_TRICORE_CPU, + .parent = TYPE_CPU, + .instance_size = sizeof(TriCoreCPU), + .instance_init = tricore_cpu_initfn, + .abstract = true, + .class_size = sizeof(TriCoreCPUClass), + .class_init = tricore_cpu_class_init, +}; + +static void tricore_cpu_register_types(void) +{ + const TriCoreCPUInfo *info = tricore_cpus; + + type_register_static(&tricore_cpu_type_info); + + while (info->name) { + cpu_register(info); + info++; + } +} + +type_init(tricore_cpu_register_types) diff --git a/src/target-tricore/cpu.h b/src/target-tricore/cpu.h new file mode 100644 index 0000000..20a12f3 --- /dev/null +++ b/src/target-tricore/cpu.h @@ -0,0 +1,397 @@ +/* + * TriCore emulation for qemu: main CPU struct. + * + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#if !defined(__TRICORE_CPU_H__) +#define __TRICORE_CPU_H__ + +#include "tricore-defs.h" +#include "config.h" +#include "qemu-common.h" +#include "exec/cpu-defs.h" +#include "fpu/softfloat.h" + +#define CPUArchState struct CPUTriCoreState + +struct CPUTriCoreState; + +struct tricore_boot_info; + +#define NB_MMU_MODES 3 + +typedef struct tricore_def_t tricore_def_t; + +typedef struct CPUTriCoreState CPUTriCoreState; +struct CPUTriCoreState { + /* GPR Register */ + uint32_t gpr_a[16]; + uint32_t gpr_d[16]; + /* CSFR Register */ + uint32_t PCXI; +/* Frequently accessed PSW_USB bits are stored separately for efficiency. + This contains all the other bits. Use psw_{read,write} to access + the whole PSW. */ + uint32_t PSW; + + /* PSW flag cache for faster execution + */ + uint32_t PSW_USB_C; + uint32_t PSW_USB_V; /* Only if bit 31 set, then flag is set */ + uint32_t PSW_USB_SV; /* Only if bit 31 set, then flag is set */ + uint32_t PSW_USB_AV; /* Only if bit 31 set, then flag is set. */ + uint32_t PSW_USB_SAV; /* Only if bit 31 set, then flag is set. */ + + uint32_t PC; + uint32_t SYSCON; + uint32_t CPU_ID; + uint32_t BIV; + uint32_t BTV; + uint32_t ISP; + uint32_t ICR; + uint32_t FCX; + uint32_t LCX; + uint32_t COMPAT; + + /* Mem Protection Register */ + uint32_t DPR0_0L; + uint32_t DPR0_0U; + uint32_t DPR0_1L; + uint32_t DPR0_1U; + uint32_t DPR0_2L; + uint32_t DPR0_2U; + uint32_t DPR0_3L; + uint32_t DPR0_3U; + + uint32_t DPR1_0L; + uint32_t DPR1_0U; + uint32_t DPR1_1L; + uint32_t DPR1_1U; + uint32_t DPR1_2L; + uint32_t DPR1_2U; + uint32_t DPR1_3L; + uint32_t DPR1_3U; + + uint32_t DPR2_0L; + uint32_t DPR2_0U; + uint32_t DPR2_1L; + uint32_t DPR2_1U; + uint32_t DPR2_2L; + uint32_t DPR2_2U; + uint32_t DPR2_3L; + uint32_t DPR2_3U; + + uint32_t DPR3_0L; + uint32_t DPR3_0U; + uint32_t DPR3_1L; + uint32_t DPR3_1U; + uint32_t DPR3_2L; + uint32_t DPR3_2U; + uint32_t DPR3_3L; + uint32_t DPR3_3U; + + uint32_t CPR0_0L; + uint32_t CPR0_0U; + uint32_t CPR0_1L; + uint32_t CPR0_1U; + uint32_t CPR0_2L; + uint32_t CPR0_2U; + uint32_t CPR0_3L; + uint32_t CPR0_3U; + + uint32_t CPR1_0L; + uint32_t CPR1_0U; + uint32_t CPR1_1L; + uint32_t CPR1_1U; + uint32_t CPR1_2L; + uint32_t CPR1_2U; + uint32_t CPR1_3L; + uint32_t CPR1_3U; + + uint32_t CPR2_0L; + uint32_t CPR2_0U; + uint32_t CPR2_1L; + uint32_t CPR2_1U; + uint32_t CPR2_2L; + uint32_t CPR2_2U; + uint32_t CPR2_3L; + uint32_t CPR2_3U; + + uint32_t CPR3_0L; + uint32_t CPR3_0U; + uint32_t CPR3_1L; + uint32_t CPR3_1U; + uint32_t CPR3_2L; + uint32_t CPR3_2U; + uint32_t CPR3_3L; + uint32_t CPR3_3U; + + uint32_t DPM0; + uint32_t DPM1; + uint32_t DPM2; + uint32_t DPM3; + + uint32_t CPM0; + uint32_t CPM1; + uint32_t CPM2; + uint32_t CPM3; + + /* Memory Management Registers */ + uint32_t MMU_CON; + uint32_t MMU_ASI; + uint32_t MMU_TVA; + uint32_t MMU_TPA; + uint32_t MMU_TPX; + uint32_t MMU_TFA; + /* {1.3.1 only */ + uint32_t BMACON; + uint32_t SMACON; + uint32_t DIEAR; + uint32_t DIETR; + uint32_t CCDIER; + uint32_t MIECON; + uint32_t PIEAR; + uint32_t PIETR; + uint32_t CCPIER; + /*} */ + /* Debug Registers */ + uint32_t DBGSR; + uint32_t EXEVT; + uint32_t CREVT; + uint32_t SWEVT; + uint32_t TR0EVT; + uint32_t TR1EVT; + uint32_t DMS; + uint32_t DCX; + uint32_t DBGTCR; + uint32_t CCTRL; + uint32_t CCNT; + uint32_t ICNT; + uint32_t M1CNT; + uint32_t M2CNT; + uint32_t M3CNT; + /* Floating Point Registers */ + /* XXX: */ + + /* QEMU */ + int error_code; + uint32_t hflags; /* CPU State */ + + CPU_COMMON + + /* Internal CPU feature flags. */ + uint64_t features; + + const tricore_def_t *cpu_model; + void *irq[8]; + struct QEMUTimer *timer; /* Internal timer */ +}; + +#define MASK_PCXI_PCPN 0xff000000 +#define MASK_PCXI_PIE 0x00800000 +#define MASK_PCXI_UL 0x00400000 +#define MASK_PCXI_PCXS 0x000f0000 +#define MASK_PCXI_PCXO 0x0000ffff + +#define MASK_PSW_USB 0xff000000 +#define MASK_USB_C 0x80000000 +#define MASK_USB_V 0x40000000 +#define MASK_USB_SV 0x20000000 +#define MASK_USB_AV 0x10000000 +#define MASK_USB_SAV 0x08000000 +#define MASK_PSW_PRS 0x00003000 +#define MASK_PSW_IO 0x00000c00 +#define MASK_PSW_IS 0x00000200 +#define MASK_PSW_GW 0x00000100 +#define MASK_PSW_CDE 0x00000080 +#define MASK_PSW_CDC 0x0000007f + +#define MASK_SYSCON_PRO_TEN 0x2 +#define MASK_SYSCON_FCD_SF 0x1 + +#define MASK_CPUID_MOD 0xffff0000 +#define MASK_CPUID_MOD_32B 0x0000ff00 +#define MASK_CPUID_REV 0x000000ff + +#define MASK_ICR_PIPN 0x00ff0000 +#define MASK_ICR_IE 0x00000100 +#define MASK_ICR_CCPN 0x000000ff + +#define MASK_FCX_FCXS 0x000f0000 +#define MASK_FCX_FCXO 0x0000ffff + +#define MASK_LCX_LCXS 0x000f0000 +#define MASK_LCX_LCX0 0x0000ffff + +#define MASK_DBGSR_DE 0x1 +#define MASK_DBGSR_HALT 0x6 +#define MASK_DBGSR_SUSP 0x10 +#define MASK_DBGSR_PREVSUSP 0x20 +#define MASK_DBGSR_PEVT 0x40 +#define MASK_DBGSR_EVTSRC 0x1f00 + +#define TRICORE_HFLAG_KUU 0x3 +#define TRICORE_HFLAG_UM0 0x00002 /* user mode-0 flag */ +#define TRICORE_HFLAG_UM1 0x00001 /* user mode-1 flag */ +#define TRICORE_HFLAG_SM 0x00000 /* kernel mode flag */ + +enum tricore_features { + TRICORE_FEATURE_13, + TRICORE_FEATURE_131, + TRICORE_FEATURE_16, + TRICORE_FEATURE_161, +}; + +static inline int tricore_feature(CPUTriCoreState *env, int feature) +{ + return (env->features & (1ULL << feature)) != 0; +} + +/* TriCore Traps Classes*/ +enum { + TRAPC_NONE = -1, + TRAPC_MMU = 0, + TRAPC_PROT = 1, + TRAPC_INSN_ERR = 2, + TRAPC_CTX_MNG = 3, + TRAPC_SYSBUS = 4, + TRAPC_ASSERT = 5, + TRAPC_SYSCALL = 6, + TRAPC_NMI = 7, +}; + +/* Class 0 TIN */ +enum { + TIN0_VAF = 0, + TIN0_VAP = 1, +}; + +/* Class 1 TIN */ +enum { + TIN1_PRIV = 1, + TIN1_MPR = 2, + TIN1_MPW = 3, + TIN1_MPX = 4, + TIN1_MPP = 5, + TIN1_MPN = 6, + TIN1_GRWP = 7, +}; + +/* Class 2 TIN */ +enum { + TIN2_IOPC = 1, + TIN2_UOPC = 2, + TIN2_OPD = 3, + TIN2_ALN = 4, + TIN2_MEM = 5, +}; + +/* Class 3 TIN */ +enum { + TIN3_FCD = 1, + TIN3_CDO = 2, + TIN3_CDU = 3, + TIN3_FCU = 4, + TIN3_CSU = 5, + TIN3_CTYP = 6, + TIN3_NEST = 7, +}; + +/* Class 4 TIN */ +enum { + TIN4_PSE = 1, + TIN4_DSE = 2, + TIN4_DAE = 3, + TIN4_CAE = 4, + TIN4_PIE = 5, + TIN4_DIE = 6, +}; + +/* Class 5 TIN */ +enum { + TIN5_OVF = 1, + TIN5_SOVF = 1, +}; + +/* Class 6 TIN + * + * Is always TIN6_SYS + */ + +/* Class 7 TIN */ +enum { + TIN7_NMI = 0, +}; + +uint32_t psw_read(CPUTriCoreState *env); +void psw_write(CPUTriCoreState *env, uint32_t val); + +#include "cpu-qom.h" +#define MMU_USER_IDX 2 + +void tricore_cpu_list(FILE *f, fprintf_function cpu_fprintf); + +#define cpu_exec cpu_tricore_exec +#define cpu_signal_handler cpu_tricore_signal_handler +#define cpu_list tricore_cpu_list + +static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch) +{ + return 0; +} + + + +#include "exec/cpu-all.h" + +enum { + /* 1 bit to define user level / supervisor access */ + ACCESS_USER = 0x00, + ACCESS_SUPER = 0x01, + /* 1 bit to indicate direction */ + ACCESS_STORE = 0x02, + /* Type of instruction that generated the access */ + ACCESS_CODE = 0x10, /* Code fetch access */ + ACCESS_INT = 0x20, /* Integer load/store access */ + ACCESS_FLOAT = 0x30, /* floating point load/store access */ +}; + +void cpu_state_reset(CPUTriCoreState *s); +int cpu_tricore_exec(CPUState *cpu); +void tricore_tcg_init(void); +int cpu_tricore_signal_handler(int host_signum, void *pinfo, void *puc); + +static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc, + target_ulong *cs_base, int *flags) +{ + *pc = env->PC; + *cs_base = 0; + *flags = 0; +} + +TriCoreCPU *cpu_tricore_init(const char *cpu_model); + +#define cpu_init(cpu_model) CPU(cpu_tricore_init(cpu_model)) + + +/* helpers.c */ +int cpu_tricore_handle_mmu_fault(CPUState *cpu, target_ulong address, + int rw, int mmu_idx); +#define cpu_handle_mmu_fault cpu_tricore_handle_mmu_fault + +#include "exec/exec-all.h" + +#endif /*__TRICORE_CPU_H__ */ diff --git a/src/target-tricore/csfr.def b/src/target-tricore/csfr.def new file mode 100644 index 0000000..05c45dd --- /dev/null +++ b/src/target-tricore/csfr.def @@ -0,0 +1,124 @@ +/* A(ll) access permited + R(ead only) access + E(nd init protected) access + + A|R|E(offset, register, feature introducing reg) + + NOTE: PSW is handled as a special case in gen_mtcr/mfcr */ + +A(0xfe00, PCXI, TRICORE_FEATURE_13) +A(0xfe08, PC, TRICORE_FEATURE_13) +A(0xfe14, SYSCON, TRICORE_FEATURE_13) +R(0xfe18, CPU_ID, TRICORE_FEATURE_13) +E(0xfe20, BIV, TRICORE_FEATURE_13) +E(0xfe24, BTV, TRICORE_FEATURE_13) +E(0xfe28, ISP, TRICORE_FEATURE_13) +A(0xfe2c, ICR, TRICORE_FEATURE_13) +A(0xfe38, FCX, TRICORE_FEATURE_13) +A(0xfe3c, LCX, TRICORE_FEATURE_13) +E(0x9400, COMPAT, TRICORE_FEATURE_131) +/* memory protection register */ +A(0xC000, DPR0_0L, TRICORE_FEATURE_13) +A(0xC004, DPR0_0U, TRICORE_FEATURE_13) +A(0xC008, DPR0_1L, TRICORE_FEATURE_13) +A(0xC00C, DPR0_1U, TRICORE_FEATURE_13) +A(0xC010, DPR0_2L, TRICORE_FEATURE_13) +A(0xC014, DPR0_2U, TRICORE_FEATURE_13) +A(0xC018, DPR0_3L, TRICORE_FEATURE_13) +A(0xC01C, DPR0_3U, TRICORE_FEATURE_13) +A(0xC400, DPR1_0L, TRICORE_FEATURE_13) +A(0xC404, DPR1_0U, TRICORE_FEATURE_13) +A(0xC408, DPR1_1L, TRICORE_FEATURE_13) +A(0xC40C, DPR1_1U, TRICORE_FEATURE_13) +A(0xC410, DPR1_2L, TRICORE_FEATURE_13) +A(0xC414, DPR1_2U, TRICORE_FEATURE_13) +A(0xC418, DPR1_3L, TRICORE_FEATURE_13) +A(0xC41C, DPR1_3U, TRICORE_FEATURE_13) +A(0xC800, DPR2_0L, TRICORE_FEATURE_13) +A(0xC804, DPR2_0U, TRICORE_FEATURE_13) +A(0xC808, DPR2_1L, TRICORE_FEATURE_13) +A(0xC80C, DPR2_1U, TRICORE_FEATURE_13) +A(0xC810, DPR2_2L, TRICORE_FEATURE_13) +A(0xC814, DPR2_2U, TRICORE_FEATURE_13) +A(0xC818, DPR2_3L, TRICORE_FEATURE_13) +A(0xC81C, DPR2_3U, TRICORE_FEATURE_13) +A(0xCC00, DPR3_0L, TRICORE_FEATURE_13) +A(0xCC04, DPR3_0U, TRICORE_FEATURE_13) +A(0xCC08, DPR3_1L, TRICORE_FEATURE_13) +A(0xCC0C, DPR3_1U, TRICORE_FEATURE_13) +A(0xCC10, DPR3_2L, TRICORE_FEATURE_13) +A(0xCC14, DPR3_2U, TRICORE_FEATURE_13) +A(0xCC18, DPR3_3L, TRICORE_FEATURE_13) +A(0xCC1C, DPR3_3U, TRICORE_FEATURE_13) +A(0xD000, CPR0_0L, TRICORE_FEATURE_13) +A(0xD004, CPR0_0U, TRICORE_FEATURE_13) +A(0xD008, CPR0_1L, TRICORE_FEATURE_13) +A(0xD00C, CPR0_1U, TRICORE_FEATURE_13) +A(0xD010, CPR0_2L, TRICORE_FEATURE_13) +A(0xD014, CPR0_2U, TRICORE_FEATURE_13) +A(0xD018, CPR0_3L, TRICORE_FEATURE_13) +A(0xD01C, CPR0_3U, TRICORE_FEATURE_13) +A(0xD400, CPR1_0L, TRICORE_FEATURE_13) +A(0xD404, CPR1_0U, TRICORE_FEATURE_13) +A(0xD408, CPR1_1L, TRICORE_FEATURE_13) +A(0xD40C, CPR1_1U, TRICORE_FEATURE_13) +A(0xD410, CPR1_2L, TRICORE_FEATURE_13) +A(0xD414, CPR1_2U, TRICORE_FEATURE_13) +A(0xD418, CPR1_3L, TRICORE_FEATURE_13) +A(0xD41C, CPR1_3U, TRICORE_FEATURE_13) +A(0xD800, CPR2_0L, TRICORE_FEATURE_13) +A(0xD804, CPR2_0U, TRICORE_FEATURE_13) +A(0xD808, CPR2_1L, TRICORE_FEATURE_13) +A(0xD80C, CPR2_1U, TRICORE_FEATURE_13) +A(0xD810, CPR2_2L, TRICORE_FEATURE_13) +A(0xD814, CPR2_2U, TRICORE_FEATURE_13) +A(0xD818, CPR2_3L, TRICORE_FEATURE_13) +A(0xD81C, CPR2_3U, TRICORE_FEATURE_13) +A(0xDC00, CPR3_0L, TRICORE_FEATURE_13) +A(0xDC04, CPR3_0U, TRICORE_FEATURE_13) +A(0xDC08, CPR3_1L, TRICORE_FEATURE_13) +A(0xDC0C, CPR3_1U, TRICORE_FEATURE_13) +A(0xDC10, CPR3_2L, TRICORE_FEATURE_13) +A(0xDC14, CPR3_2U, TRICORE_FEATURE_13) +A(0xDC18, CPR3_3L, TRICORE_FEATURE_13) +A(0xDC1C, CPR3_3U, TRICORE_FEATURE_13) +A(0xE000, DPM0, TRICORE_FEATURE_13) +A(0xE080, DPM1, TRICORE_FEATURE_13) +A(0xE100, DPM2, TRICORE_FEATURE_13) +A(0xE180, DPM3, TRICORE_FEATURE_13) +A(0xE200, CPM0, TRICORE_FEATURE_13) +A(0xE280, CPM1, TRICORE_FEATURE_13) +A(0xE300, CPM2, TRICORE_FEATURE_13) +A(0xE380, CPM3, TRICORE_FEATURE_13) +/* memory management registers */ +A(0x8000, MMU_CON, TRICORE_FEATURE_13) +A(0x8004, MMU_ASI, TRICORE_FEATURE_13) +A(0x800C, MMU_TVA, TRICORE_FEATURE_13) +A(0x8010, MMU_TPA, TRICORE_FEATURE_13) +A(0x8014, MMU_TPX, TRICORE_FEATURE_13) +A(0x8018, MMU_TFA, TRICORE_FEATURE_13) +E(0x9004, BMACON, TRICORE_FEATURE_131) +E(0x900C, SMACON, TRICORE_FEATURE_131) +A(0x9020, DIEAR, TRICORE_FEATURE_131) +A(0x9024, DIETR, TRICORE_FEATURE_131) +A(0x9028, CCDIER, TRICORE_FEATURE_131) +E(0x9044, MIECON, TRICORE_FEATURE_131) +A(0x9210, PIEAR, TRICORE_FEATURE_131) +A(0x9214, PIETR, TRICORE_FEATURE_131) +A(0x9218, CCPIER, TRICORE_FEATURE_131) +/* debug registers */ +A(0xFD00, DBGSR, TRICORE_FEATURE_13) +A(0xFD08, EXEVT, TRICORE_FEATURE_13) +A(0xFD0C, CREVT, TRICORE_FEATURE_13) +A(0xFD10, SWEVT, TRICORE_FEATURE_13) +A(0xFD20, TR0EVT, TRICORE_FEATURE_13) +A(0xFD24, TR1EVT, TRICORE_FEATURE_13) +A(0xFD40, DMS, TRICORE_FEATURE_13) +A(0xFD44, DCX, TRICORE_FEATURE_13) +A(0xFD48, DBGTCR, TRICORE_FEATURE_131) +A(0xFC00, CCTRL, TRICORE_FEATURE_131) +A(0xFC04, CCNT, TRICORE_FEATURE_131) +A(0xFC08, ICNT, TRICORE_FEATURE_131) +A(0xFC0C, M1CNT, TRICORE_FEATURE_131) +A(0xFC10, M2CNT, TRICORE_FEATURE_131) +A(0xFC14, M3CNT, TRICORE_FEATURE_131) diff --git a/src/target-tricore/helper.c b/src/target-tricore/helper.c new file mode 100644 index 0000000..1808b28 --- /dev/null +++ b/src/target-tricore/helper.c @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <stdarg.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <inttypes.h> + +#include "cpu.h" + +enum { + TLBRET_DIRTY = -4, + TLBRET_INVALID = -3, + TLBRET_NOMATCH = -2, + TLBRET_BADADDR = -1, + TLBRET_MATCH = 0 +}; + +#if defined(CONFIG_SOFTMMU) +static int get_physical_address(CPUTriCoreState *env, hwaddr *physical, + int *prot, target_ulong address, + int rw, int access_type) +{ + int ret = TLBRET_MATCH; + + *physical = address & 0xFFFFFFFF; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + + return ret; +} +#endif + +/* TODO: Add exeption support*/ +static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address, + int rw, int tlb_error) +{ +} + +int cpu_tricore_handle_mmu_fault(CPUState *cs, target_ulong address, + int rw, int mmu_idx) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + hwaddr physical; + int prot; + int access_type; + int ret = 0; + + rw &= 1; + access_type = ACCESS_INT; + ret = get_physical_address(env, &physical, &prot, + address, rw, access_type); + qemu_log("%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx + " prot %d\n", __func__, address, ret, physical, prot); + + if (ret == TLBRET_MATCH) { + tlb_set_page(cs, address & TARGET_PAGE_MASK, + physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, + mmu_idx, TARGET_PAGE_SIZE); + ret = 0; + } else if (ret < 0) { + raise_mmu_exception(env, address, rw, ret); + ret = 1; + } + + return ret; +} + +TriCoreCPU *cpu_tricore_init(const char *cpu_model) +{ + return TRICORE_CPU(cpu_generic_init(TYPE_TRICORE_CPU, cpu_model)); +} + +static void tricore_cpu_list_entry(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CPUListState *s = user_data; + const char *typename; + char *name; + + typename = object_class_get_name(oc); + name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_TRICORE_CPU)); + (*s->cpu_fprintf)(s->file, " %s\n", + name); + g_free(name); +} + +void tricore_cpu_list(FILE *f, fprintf_function cpu_fprintf) +{ + CPUListState s = { + .file = f, + .cpu_fprintf = cpu_fprintf, + }; + GSList *list; + + list = object_class_get_list(TYPE_TRICORE_CPU, false); + (*cpu_fprintf)(f, "Available CPUs:\n"); + g_slist_foreach(list, tricore_cpu_list_entry, &s); + g_slist_free(list); +} + +uint32_t psw_read(CPUTriCoreState *env) +{ + /* clear all USB bits */ + env->PSW &= 0xffffff; + /* now set them from the cache */ + env->PSW |= ((env->PSW_USB_C != 0) << 31); + env->PSW |= ((env->PSW_USB_V & (1 << 31)) >> 1); + env->PSW |= ((env->PSW_USB_SV & (1 << 31)) >> 2); + env->PSW |= ((env->PSW_USB_AV & (1 << 31)) >> 3); + env->PSW |= ((env->PSW_USB_SAV & (1 << 31)) >> 4); + + return env->PSW; +} + +void psw_write(CPUTriCoreState *env, uint32_t val) +{ + env->PSW_USB_C = (val & MASK_USB_C); + env->PSW_USB_V = (val & MASK_USB_V << 1); + env->PSW_USB_SV = (val & MASK_USB_SV << 2); + env->PSW_USB_AV = ((val & MASK_USB_AV) << 3); + env->PSW_USB_SAV = ((val & MASK_USB_SAV) << 4); + env->PSW = val; +} diff --git a/src/target-tricore/helper.h b/src/target-tricore/helper.h new file mode 100644 index 0000000..cc221f1 --- /dev/null +++ b/src/target-tricore/helper.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* Arithmetic */ +DEF_HELPER_3(add_ssov, i32, env, i32, i32) +DEF_HELPER_3(add64_ssov, i64, env, i64, i64) +DEF_HELPER_3(add_suov, i32, env, i32, i32) +DEF_HELPER_3(add_h_ssov, i32, env, i32, i32) +DEF_HELPER_3(add_h_suov, i32, env, i32, i32) +DEF_HELPER_4(addr_h_ssov, i32, env, i64, i32, i32) +DEF_HELPER_4(addsur_h_ssov, i32, env, i64, i32, i32) +DEF_HELPER_3(sub_ssov, i32, env, i32, i32) +DEF_HELPER_3(sub64_ssov, i64, env, i64, i64) +DEF_HELPER_3(sub_suov, i32, env, i32, i32) +DEF_HELPER_3(sub_h_ssov, i32, env, i32, i32) +DEF_HELPER_3(sub_h_suov, i32, env, i32, i32) +DEF_HELPER_4(subr_h_ssov, i32, env, i64, i32, i32) +DEF_HELPER_4(subadr_h_ssov, i32, env, i64, i32, i32) +DEF_HELPER_3(mul_ssov, i32, env, i32, i32) +DEF_HELPER_3(mul_suov, i32, env, i32, i32) +DEF_HELPER_3(sha_ssov, i32, env, i32, i32) +DEF_HELPER_3(absdif_ssov, i32, env, i32, i32) +DEF_HELPER_4(madd32_ssov, i32, env, i32, i32, i32) +DEF_HELPER_4(madd32_suov, i32, env, i32, i32, i32) +DEF_HELPER_4(madd64_ssov, i64, env, i32, i64, i32) +DEF_HELPER_5(madd64_q_ssov, i64, env, i64, i32, i32, i32) +DEF_HELPER_3(madd32_q_add_ssov, i32, env, i64, i64) +DEF_HELPER_5(maddr_q_ssov, i32, env, i32, i32, i32, i32) +DEF_HELPER_4(madd64_suov, i64, env, i32, i64, i32) +DEF_HELPER_4(msub32_ssov, i32, env, i32, i32, i32) +DEF_HELPER_4(msub32_suov, i32, env, i32, i32, i32) +DEF_HELPER_4(msub64_ssov, i64, env, i32, i64, i32) +DEF_HELPER_5(msub64_q_ssov, i64, env, i64, i32, i32, i32) +DEF_HELPER_3(msub32_q_sub_ssov, i32, env, i64, i64) +DEF_HELPER_5(msubr_q_ssov, i32, env, i32, i32, i32, i32) +DEF_HELPER_4(msub64_suov, i64, env, i32, i64, i32) +DEF_HELPER_3(absdif_h_ssov, i32, env, i32, i32) +DEF_HELPER_2(abs_ssov, i32, env, i32) +DEF_HELPER_2(abs_h_ssov, i32, env, i32) +/* hword/byte arithmetic */ +DEF_HELPER_2(abs_b, i32, env, i32) +DEF_HELPER_2(abs_h, i32, env, i32) +DEF_HELPER_3(absdif_b, i32, env, i32, i32) +DEF_HELPER_3(absdif_h, i32, env, i32, i32) +DEF_HELPER_4(addr_h, i32, env, i64, i32, i32) +DEF_HELPER_4(addsur_h, i32, env, i64, i32, i32) +DEF_HELPER_5(maddr_q, i32, env, i32, i32, i32, i32) +DEF_HELPER_3(add_b, i32, env, i32, i32) +DEF_HELPER_3(add_h, i32, env, i32, i32) +DEF_HELPER_3(sub_b, i32, env, i32, i32) +DEF_HELPER_3(sub_h, i32, env, i32, i32) +DEF_HELPER_4(subr_h, i32, env, i64, i32, i32) +DEF_HELPER_4(subadr_h, i32, env, i64, i32, i32) +DEF_HELPER_5(msubr_q, i32, env, i32, i32, i32, i32) +DEF_HELPER_FLAGS_2(eq_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(eq_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(eqany_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(eqany_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(lt_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(lt_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(lt_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(lt_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(max_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(max_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(max_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(max_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(ixmax, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_FLAGS_2(ixmax_u, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_FLAGS_2(min_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(min_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(min_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(min_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(ixmin, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_FLAGS_2(ixmin_u, TCG_CALL_NO_RWG_SE, i64, i64, i32) +/* count leading ... */ +DEF_HELPER_FLAGS_1(clo, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(clo_h, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(clz_h, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(cls, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(cls_h, TCG_CALL_NO_RWG_SE, i32, i32) +/* sh */ +DEF_HELPER_FLAGS_2(sh, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(sh_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_3(sha, i32, env, i32, i32) +DEF_HELPER_2(sha_h, i32, i32, i32) +/* merge/split/parity */ +DEF_HELPER_FLAGS_2(bmerge, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_1(bsplit, TCG_CALL_NO_RWG_SE, i64, i32) +DEF_HELPER_FLAGS_1(parity, TCG_CALL_NO_RWG_SE, i32, i32) +/* float */ +DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32, i32) +DEF_HELPER_1(unpack, i64, i32) +/* dvinit */ +DEF_HELPER_3(dvinit_b_13, i64, env, i32, i32) +DEF_HELPER_3(dvinit_b_131, i64, env, i32, i32) +DEF_HELPER_3(dvinit_h_13, i64, env, i32, i32) +DEF_HELPER_3(dvinit_h_131, i64, env, i32, i32) +DEF_HELPER_FLAGS_2(dvadj, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_FLAGS_2(dvstep, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_FLAGS_2(dvstep_u, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_3(divide, i64, env, i32, i32) +DEF_HELPER_3(divide_u, i64, env, i32, i32) +/* mulh */ +DEF_HELPER_FLAGS_5(mul_h, TCG_CALL_NO_RWG_SE, i64, i32, i32, i32, i32, i32) +DEF_HELPER_FLAGS_5(mulm_h, TCG_CALL_NO_RWG_SE, i64, i32, i32, i32, i32, i32) +DEF_HELPER_FLAGS_5(mulr_h, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32, i32, i32) +/* crc32 */ +DEF_HELPER_FLAGS_2(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +/* CSA */ +DEF_HELPER_2(call, void, env, i32) +DEF_HELPER_1(ret, void, env) +DEF_HELPER_2(bisr, void, env, i32) +DEF_HELPER_1(rfe, void, env) +DEF_HELPER_1(rfm, void, env) +DEF_HELPER_2(ldlcx, void, env, i32) +DEF_HELPER_2(lducx, void, env, i32) +DEF_HELPER_2(stlcx, void, env, i32) +DEF_HELPER_2(stucx, void, env, i32) +DEF_HELPER_1(svlcx, void, env) +DEF_HELPER_1(rslcx, void, env) +/* Address mode helper */ +DEF_HELPER_1(br_update, i32, i32) +DEF_HELPER_2(circ_update, i32, i32, i32) +/* PSW cache helper */ +DEF_HELPER_2(psw_write, void, env, i32) +DEF_HELPER_1(psw_read, i32, env) diff --git a/src/target-tricore/op_helper.c b/src/target-tricore/op_helper.c new file mode 100644 index 0000000..53edbda --- /dev/null +++ b/src/target-tricore/op_helper.c @@ -0,0 +1,2695 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include <stdlib.h> +#include "cpu.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include <zlib.h> /* for crc32 */ + +/* Addressing mode helper */ + +static uint16_t reverse16(uint16_t val) +{ + uint8_t high = (uint8_t)(val >> 8); + uint8_t low = (uint8_t)(val & 0xff); + + uint16_t rh, rl; + + rl = (uint16_t)((high * 0x0202020202ULL & 0x010884422010ULL) % 1023); + rh = (uint16_t)((low * 0x0202020202ULL & 0x010884422010ULL) % 1023); + + return (rh << 8) | rl; +} + +uint32_t helper_br_update(uint32_t reg) +{ + uint32_t index = reg & 0xffff; + uint32_t incr = reg >> 16; + uint32_t new_index = reverse16(reverse16(index) + reverse16(incr)); + return reg - index + new_index; +} + +uint32_t helper_circ_update(uint32_t reg, uint32_t off) +{ + uint32_t index = reg & 0xffff; + uint32_t length = reg >> 16; + int32_t new_index = index + off; + if (new_index < 0) { + new_index += length; + } else { + new_index %= length; + } + return reg - index + new_index; +} + +static uint32_t ssov32(CPUTriCoreState *env, int64_t arg) +{ + uint32_t ret; + int64_t max_pos = INT32_MAX; + int64_t max_neg = INT32_MIN; + if (arg > max_pos) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + ret = (target_ulong)max_pos; + } else { + if (arg < max_neg) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + ret = (target_ulong)max_neg; + } else { + env->PSW_USB_V = 0; + ret = (target_ulong)arg; + } + } + env->PSW_USB_AV = arg ^ arg * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return ret; +} + +static uint32_t suov32_pos(CPUTriCoreState *env, uint64_t arg) +{ + uint32_t ret; + uint64_t max_pos = UINT32_MAX; + if (arg > max_pos) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + ret = (target_ulong)max_pos; + } else { + env->PSW_USB_V = 0; + ret = (target_ulong)arg; + } + env->PSW_USB_AV = arg ^ arg * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return ret; +} + +static uint32_t suov32_neg(CPUTriCoreState *env, int64_t arg) +{ + uint32_t ret; + + if (arg < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + ret = 0; + } else { + env->PSW_USB_V = 0; + ret = (target_ulong)arg; + } + env->PSW_USB_AV = arg ^ arg * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return ret; +} + +static uint32_t ssov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1) +{ + int32_t max_pos = INT16_MAX; + int32_t max_neg = INT16_MIN; + int32_t av0, av1; + + env->PSW_USB_V = 0; + av0 = hw0 ^ hw0 * 2u; + if (hw0 > max_pos) { + env->PSW_USB_V = (1 << 31); + hw0 = max_pos; + } else if (hw0 < max_neg) { + env->PSW_USB_V = (1 << 31); + hw0 = max_neg; + } + + av1 = hw1 ^ hw1 * 2u; + if (hw1 > max_pos) { + env->PSW_USB_V = (1 << 31); + hw1 = max_pos; + } else if (hw1 < max_neg) { + env->PSW_USB_V = (1 << 31); + hw1 = max_neg; + } + + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = (av0 | av1) << 16; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return (hw0 & 0xffff) | (hw1 << 16); +} + +static uint32_t suov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1) +{ + int32_t max_pos = UINT16_MAX; + int32_t av0, av1; + + env->PSW_USB_V = 0; + av0 = hw0 ^ hw0 * 2u; + if (hw0 > max_pos) { + env->PSW_USB_V = (1 << 31); + hw0 = max_pos; + } else if (hw0 < 0) { + env->PSW_USB_V = (1 << 31); + hw0 = 0; + } + + av1 = hw1 ^ hw1 * 2u; + if (hw1 > max_pos) { + env->PSW_USB_V = (1 << 31); + hw1 = max_pos; + } else if (hw1 < 0) { + env->PSW_USB_V = (1 << 31); + hw1 = 0; + } + + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = (av0 | av1) << 16; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return (hw0 & 0xffff) | (hw1 << 16); +} + +target_ulong helper_add_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t result = t1 + t2; + return ssov32(env, result); +} + +uint64_t helper_add64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) +{ + uint64_t result; + int64_t ovf; + + result = r1 + r2; + ovf = (result ^ r1) & ~(r1 ^ r2); + env->PSW_USB_AV = (result ^ result * 2u) >> 32; + env->PSW_USB_SAV |= env->PSW_USB_AV; + if (ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if ((int64_t)r1 >= 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + return result; +} + +target_ulong helper_add_h_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int32_t ret_hw0, ret_hw1; + + ret_hw0 = sextract32(r1, 0, 16) + sextract32(r2, 0, 16); + ret_hw1 = sextract32(r1, 16, 16) + sextract32(r2, 16, 16); + return ssov16(env, ret_hw0, ret_hw1); +} + +uint32_t helper_addr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low + mul_res0 + 0x8000; + result1 = r2_high + mul_res1 + 0x8000; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + if (result0 > INT32_MAX) { + ovf0 = (1 << 31); + result0 = INT32_MAX; + } else if (result0 < INT32_MIN) { + ovf0 = (1 << 31); + result0 = INT32_MIN; + } + + if (result1 > INT32_MAX) { + ovf1 = (1 << 31); + result1 = INT32_MAX; + } else if (result1 < INT32_MIN) { + ovf1 = (1 << 31); + result1 = INT32_MIN; + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_addsur_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low - mul_res0 + 0x8000; + result1 = r2_high + mul_res1 + 0x8000; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + if (result0 > INT32_MAX) { + ovf0 = (1 << 31); + result0 = INT32_MAX; + } else if (result0 < INT32_MIN) { + ovf0 = (1 << 31); + result0 = INT32_MIN; + } + + if (result1 > INT32_MAX) { + ovf1 = (1 << 31); + result1 = INT32_MAX; + } else if (result1 < INT32_MIN) { + ovf1 = (1 << 31); + result1 = INT32_MIN; + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + + +target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = extract64(r1, 0, 32); + int64_t t2 = extract64(r2, 0, 32); + int64_t result = t1 + t2; + return suov32_pos(env, result); +} + +target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int32_t ret_hw0, ret_hw1; + + ret_hw0 = extract32(r1, 0, 16) + extract32(r2, 0, 16); + ret_hw1 = extract32(r1, 16, 16) + extract32(r2, 16, 16); + return suov16(env, ret_hw0, ret_hw1); +} + +target_ulong helper_sub_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t result = t1 - t2; + return ssov32(env, result); +} + +uint64_t helper_sub64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) +{ + uint64_t result; + int64_t ovf; + + result = r1 - r2; + ovf = (result ^ r1) & (r1 ^ r2); + env->PSW_USB_AV = (result ^ result * 2u) >> 32; + env->PSW_USB_SAV |= env->PSW_USB_AV; + if (ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if ((int64_t)r1 >= 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + return result; +} + +target_ulong helper_sub_h_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int32_t ret_hw0, ret_hw1; + + ret_hw0 = sextract32(r1, 0, 16) - sextract32(r2, 0, 16); + ret_hw1 = sextract32(r1, 16, 16) - sextract32(r2, 16, 16); + return ssov16(env, ret_hw0, ret_hw1); +} + +uint32_t helper_subr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low - mul_res0 + 0x8000; + result1 = r2_high - mul_res1 + 0x8000; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + if (result0 > INT32_MAX) { + ovf0 = (1 << 31); + result0 = INT32_MAX; + } else if (result0 < INT32_MIN) { + ovf0 = (1 << 31); + result0 = INT32_MIN; + } + + if (result1 > INT32_MAX) { + ovf1 = (1 << 31); + result1 = INT32_MAX; + } else if (result1 < INT32_MIN) { + ovf1 = (1 << 31); + result1 = INT32_MIN; + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_subadr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low + mul_res0 + 0x8000; + result1 = r2_high - mul_res1 + 0x8000; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + if (result0 > INT32_MAX) { + ovf0 = (1 << 31); + result0 = INT32_MAX; + } else if (result0 < INT32_MIN) { + ovf0 = (1 << 31); + result0 = INT32_MIN; + } + + if (result1 > INT32_MAX) { + ovf1 = (1 << 31); + result1 = INT32_MAX; + } else if (result1 < INT32_MIN) { + ovf1 = (1 << 31); + result1 = INT32_MIN; + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = extract64(r1, 0, 32); + int64_t t2 = extract64(r2, 0, 32); + int64_t result = t1 - t2; + return suov32_neg(env, result); +} + +target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int32_t ret_hw0, ret_hw1; + + ret_hw0 = extract32(r1, 0, 16) - extract32(r2, 0, 16); + ret_hw1 = extract32(r1, 16, 16) - extract32(r2, 16, 16); + return suov16(env, ret_hw0, ret_hw1); +} + +target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t result = t1 * t2; + return ssov32(env, result); +} + +target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = extract64(r1, 0, 32); + int64_t t2 = extract64(r2, 0, 32); + int64_t result = t1 * t2; + + return suov32_pos(env, result); +} + +target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = sextract64(r1, 0, 32); + int32_t t2 = sextract64(r2, 0, 6); + int64_t result; + if (t2 == 0) { + result = t1; + } else if (t2 > 0) { + result = t1 << t2; + } else { + result = t1 >> -t2; + } + return ssov32(env, result); +} + +uint32_t helper_abs_ssov(CPUTriCoreState *env, target_ulong r1) +{ + target_ulong result; + result = ((int32_t)r1 >= 0) ? r1 : (0 - r1); + return ssov32(env, result); +} + +uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1) +{ + int32_t ret_h0, ret_h1; + + ret_h0 = sextract32(r1, 0, 16); + ret_h0 = (ret_h0 >= 0) ? ret_h0 : (0 - ret_h0); + + ret_h1 = sextract32(r1, 16, 16); + ret_h1 = (ret_h1 >= 0) ? ret_h1 : (0 - ret_h1); + + return ssov16(env, ret_h0, ret_h1); +} + +target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t result; + + if (t1 > t2) { + result = t1 - t2; + } else { + result = t2 - t1; + } + return ssov32(env, result); +} + +uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int32_t t1, t2; + int32_t ret_h0, ret_h1; + + t1 = sextract32(r1, 0, 16); + t2 = sextract32(r2, 0, 16); + if (t1 > t2) { + ret_h0 = t1 - t2; + } else { + ret_h0 = t2 - t1; + } + + t1 = sextract32(r1, 16, 16); + t2 = sextract32(r2, 16, 16); + if (t1 > t2) { + ret_h1 = t1 - t2; + } else { + ret_h1 = t2 - t1; + } + + return ssov16(env, ret_h0, ret_h1); +} + +target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2, target_ulong r3) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t result; + + result = t2 + (t1 * t3); + return ssov32(env, result); +} + +target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2, target_ulong r3) +{ + uint64_t t1 = extract64(r1, 0, 32); + uint64_t t2 = extract64(r2, 0, 32); + uint64_t t3 = extract64(r3, 0, 32); + int64_t result; + + result = t2 + (t1 * t3); + return suov32_pos(env, result); +} + +uint64_t helper_madd64_ssov(CPUTriCoreState *env, target_ulong r1, + uint64_t r2, target_ulong r3) +{ + uint64_t ret, ovf; + int64_t t1 = sextract64(r1, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul; + + mul = t1 * t3; + ret = mul + r2; + ovf = (ret ^ mul) & ~(mul ^ r2); + + t1 = ret >> 32; + env->PSW_USB_AV = t1 ^ t1 * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if ((int64_t)ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul >= 0) { + ret = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + ret = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + + return ret; +} + +uint32_t +helper_madd32_q_add_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) +{ + int64_t result; + + result = (r1 + r2); + + env->PSW_USB_AV = (result ^ result * 2u); + env->PSW_USB_SAV |= env->PSW_USB_AV; + + /* we do the saturation by hand, since we produce an overflow on the host + if the mul before was (0x80000000 * 0x80000000) << 1). If this is the + case, we flip the saturated value. */ + if (r2 == 0x8000000000000000LL) { + if (result > 0x7fffffffLL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MIN; + } else if (result < -0x80000000LL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MAX; + } else { + env->PSW_USB_V = 0; + } + } else { + if (result > 0x7fffffffLL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MAX; + } else if (result < -0x80000000LL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MIN; + } else { + env->PSW_USB_V = 0; + } + } + return (uint32_t)result; +} + +uint64_t helper_madd64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = (int64_t)r1; + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t result, mul; + int64_t ovf; + + mul = (t2 * t3) << n; + result = mul + t1; + + env->PSW_USB_AV = (result ^ result * 2u) >> 32; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + ovf = (result ^ mul) & ~(mul ^ t1); + /* we do the saturation by hand, since we produce an overflow on the host + if the mul was (0x80000000 * 0x80000000) << 1). If this is the + case, we flip the saturated value. */ + if ((r2 == 0x80000000) && (r3 == 0x80000000) && (n == 1)) { + if (ovf >= 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul < 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + } else { + if (ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul >= 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + } + return (uint64_t)result; +} + +uint32_t helper_maddr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul, ret; + + if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { + mul = 0x7fffffff; + } else { + mul = (t2 * t3) << n; + } + + ret = t1 + mul + 0x8000; + + env->PSW_USB_AV = ret ^ ret * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if (ret > 0x7fffffffll) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + ret = INT32_MAX; + } else if (ret < -0x80000000ll) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + ret = INT32_MIN; + } else { + env->PSW_USB_V = 0; + } + return ret & 0xffff0000ll; +} + +uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1, + uint64_t r2, target_ulong r3) +{ + uint64_t ret, mul; + uint64_t t1 = extract64(r1, 0, 32); + uint64_t t3 = extract64(r3, 0, 32); + + mul = t1 * t3; + ret = mul + r2; + + t1 = ret >> 32; + env->PSW_USB_AV = t1 ^ t1 * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if (ret < r2) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* saturate */ + ret = UINT64_MAX; + } else { + env->PSW_USB_V = 0; + } + return ret; +} + +target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2, target_ulong r3) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t result; + + result = t2 - (t1 * t3); + return ssov32(env, result); +} + +target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2, target_ulong r3) +{ + uint64_t t1 = extract64(r1, 0, 32); + uint64_t t2 = extract64(r2, 0, 32); + uint64_t t3 = extract64(r3, 0, 32); + uint64_t result; + uint64_t mul; + + mul = (t1 * t3); + result = t2 - mul; + + env->PSW_USB_AV = result ^ result * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + /* we calculate ovf by hand here, because the multiplication can overflow on + the host, which would give false results if we compare to less than + zero */ + if (mul > t2) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = 0; + } else { + env->PSW_USB_V = 0; + } + return result; +} + +uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1, + uint64_t r2, target_ulong r3) +{ + uint64_t ret, ovf; + int64_t t1 = sextract64(r1, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul; + + mul = t1 * t3; + ret = r2 - mul; + ovf = (ret ^ r2) & (mul ^ r2); + + t1 = ret >> 32; + env->PSW_USB_AV = t1 ^ t1 * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if ((int64_t)ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul < 0) { + ret = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + ret = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + return ret; +} + +uint64_t helper_msub64_suov(CPUTriCoreState *env, target_ulong r1, + uint64_t r2, target_ulong r3) +{ + uint64_t ret, mul; + uint64_t t1 = extract64(r1, 0, 32); + uint64_t t3 = extract64(r3, 0, 32); + + mul = t1 * t3; + ret = r2 - mul; + + t1 = ret >> 32; + env->PSW_USB_AV = t1 ^ t1 * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if (ret > r2) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* saturate */ + ret = 0; + } else { + env->PSW_USB_V = 0; + } + return ret; +} + +uint32_t +helper_msub32_q_sub_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) +{ + int64_t result; + int64_t t1 = (int64_t)r1; + int64_t t2 = (int64_t)r2; + + result = t1 - t2; + + env->PSW_USB_AV = (result ^ result * 2u); + env->PSW_USB_SAV |= env->PSW_USB_AV; + + /* we do the saturation by hand, since we produce an overflow on the host + if the mul before was (0x80000000 * 0x80000000) << 1). If this is the + case, we flip the saturated value. */ + if (r2 == 0x8000000000000000LL) { + if (result > 0x7fffffffLL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MIN; + } else if (result < -0x80000000LL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MAX; + } else { + env->PSW_USB_V = 0; + } + } else { + if (result > 0x7fffffffLL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MAX; + } else if (result < -0x80000000LL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MIN; + } else { + env->PSW_USB_V = 0; + } + } + return (uint32_t)result; +} + +uint64_t helper_msub64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = (int64_t)r1; + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t result, mul; + int64_t ovf; + + mul = (t2 * t3) << n; + result = t1 - mul; + + env->PSW_USB_AV = (result ^ result * 2u) >> 32; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + ovf = (result ^ t1) & (t1 ^ mul); + /* we do the saturation by hand, since we produce an overflow on the host + if the mul before was (0x80000000 * 0x80000000) << 1). If this is the + case, we flip the saturated value. */ + if (mul == 0x8000000000000000LL) { + if (ovf >= 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul >= 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } + } else { + if (ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul < 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + } + + return (uint64_t)result; +} + +uint32_t helper_msubr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul, ret; + + if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { + mul = 0x7fffffff; + } else { + mul = (t2 * t3) << n; + } + + ret = t1 - mul + 0x8000; + + env->PSW_USB_AV = ret ^ ret * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if (ret > 0x7fffffffll) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + ret = INT32_MAX; + } else if (ret < -0x80000000ll) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + ret = INT32_MIN; + } else { + env->PSW_USB_V = 0; + } + return ret & 0xffff0000ll; +} + +uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg) +{ + int32_t b, i; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 4; i++) { + b = sextract32(arg, i * 8, 8); + b = (b >= 0) ? b : (0 - b); + ovf |= (b > 0x7F) || (b < -0x80); + avf |= b ^ b * 2u; + ret |= (b & 0xff) << (i * 8); + } + + env->PSW_USB_V = ovf << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 24; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg) +{ + int32_t h, i; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 2; i++) { + h = sextract32(arg, i * 16, 16); + h = (h >= 0) ? h : (0 - h); + ovf |= (h > 0x7FFF) || (h < -0x8000); + avf |= h ^ h * 2u; + ret |= (h & 0xffff) << (i * 16); + } + + env->PSW_USB_V = ovf << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 16; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t b, i; + int32_t extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 4; i++) { + extr_r2 = sextract32(r2, i * 8, 8); + b = sextract32(r1, i * 8, 8); + b = (b > extr_r2) ? (b - extr_r2) : (extr_r2 - b); + ovf |= (b > 0x7F) || (b < -0x80); + avf |= b ^ b * 2u; + ret |= (b & 0xff) << (i * 8); + } + + env->PSW_USB_V = ovf << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 24; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return ret; +} + +uint32_t helper_absdif_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t h, i; + int32_t extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 2; i++) { + extr_r2 = sextract32(r2, i * 16, 16); + h = sextract32(r1, i * 16, 16); + h = (h > extr_r2) ? (h - extr_r2) : (extr_r2 - h); + ovf |= (h > 0x7FFF) || (h < -0x8000); + avf |= h ^ h * 2u; + ret |= (h & 0xffff) << (i * 16); + } + + env->PSW_USB_V = ovf << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 16; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_addr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low + mul_res0 + 0x8000; + result1 = r2_high + mul_res1 + 0x8000; + + if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { + ovf0 = (1 << 31); + } + + if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { + ovf1 = (1 << 31); + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_addsur_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low - mul_res0 + 0x8000; + result1 = r2_high + mul_res1 + 0x8000; + + if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { + ovf0 = (1 << 31); + } + + if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { + ovf1 = (1 << 31); + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_maddr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul, ret; + + if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { + mul = 0x7fffffff; + } else { + mul = (t2 * t3) << n; + } + + ret = t1 + mul + 0x8000; + + if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + } else { + env->PSW_USB_V = 0; + } + env->PSW_USB_AV = ret ^ ret * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret & 0xffff0000ll; +} + +uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t b, i; + int32_t extr_r1, extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + uint32_t ret = 0; + + for (i = 0; i < 4; i++) { + extr_r1 = sextract32(r1, i * 8, 8); + extr_r2 = sextract32(r2, i * 8, 8); + + b = extr_r1 + extr_r2; + ovf |= ((b > 0x7f) || (b < -0x80)); + avf |= b ^ b * 2u; + ret |= ((b & 0xff) << (i*8)); + } + + env->PSW_USB_V = (ovf << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 24; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_add_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t h, i; + int32_t extr_r1, extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 2; i++) { + extr_r1 = sextract32(r1, i * 16, 16); + extr_r2 = sextract32(r2, i * 16, 16); + h = extr_r1 + extr_r2; + ovf |= ((h > 0x7fff) || (h < -0x8000)); + avf |= h ^ h * 2u; + ret |= (h & 0xffff) << (i * 16); + } + + env->PSW_USB_V = (ovf << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = (avf << 16); + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_subr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low - mul_res0 + 0x8000; + result1 = r2_high - mul_res1 + 0x8000; + + if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { + ovf0 = (1 << 31); + } + + if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { + ovf1 = (1 << 31); + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_subadr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low + mul_res0 + 0x8000; + result1 = r2_high - mul_res1 + 0x8000; + + if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { + ovf0 = (1 << 31); + } + + if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { + ovf1 = (1 << 31); + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_msubr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul, ret; + + if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { + mul = 0x7fffffff; + } else { + mul = (t2 * t3) << n; + } + + ret = t1 - mul + 0x8000; + + if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + } else { + env->PSW_USB_V = 0; + } + env->PSW_USB_AV = ret ^ ret * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret & 0xffff0000ll; +} + +uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t b, i; + int32_t extr_r1, extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + uint32_t ret = 0; + + for (i = 0; i < 4; i++) { + extr_r1 = sextract32(r1, i * 8, 8); + extr_r2 = sextract32(r2, i * 8, 8); + + b = extr_r1 - extr_r2; + ovf |= ((b > 0x7f) || (b < -0x80)); + avf |= b ^ b * 2u; + ret |= ((b & 0xff) << (i*8)); + } + + env->PSW_USB_V = (ovf << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 24; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t h, i; + int32_t extr_r1, extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 2; i++) { + extr_r1 = sextract32(r1, i * 16, 16); + extr_r2 = sextract32(r2, i * 16, 16); + h = extr_r1 - extr_r2; + ovf |= ((h > 0x7fff) || (h < -0x8000)); + avf |= h ^ h * 2u; + ret |= (h & 0xffff) << (i * 16); + } + + env->PSW_USB_V = (ovf << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 16; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_eq_b(target_ulong r1, target_ulong r2) +{ + int32_t ret; + int32_t i, msk; + + ret = 0; + msk = 0xff; + for (i = 0; i < 4; i++) { + if ((r1 & msk) == (r2 & msk)) { + ret |= msk; + } + msk = msk << 8; + } + + return ret; +} + +uint32_t helper_eq_h(target_ulong r1, target_ulong r2) +{ + int32_t ret = 0; + + if ((r1 & 0xffff) == (r2 & 0xffff)) { + ret = 0xffff; + } + + if ((r1 & 0xffff0000) == (r2 & 0xffff0000)) { + ret |= 0xffff0000; + } + + return ret; +} + +uint32_t helper_eqany_b(target_ulong r1, target_ulong r2) +{ + int32_t i; + uint32_t ret = 0; + + for (i = 0; i < 4; i++) { + ret |= (sextract32(r1, i * 8, 8) == sextract32(r2, i * 8, 8)); + } + + return ret; +} + +uint32_t helper_eqany_h(target_ulong r1, target_ulong r2) +{ + uint32_t ret; + + ret = (sextract32(r1, 0, 16) == sextract32(r2, 0, 16)); + ret |= (sextract32(r1, 16, 16) == sextract32(r2, 16, 16)); + + return ret; +} + +uint32_t helper_lt_b(target_ulong r1, target_ulong r2) +{ + int32_t i; + uint32_t ret = 0; + + for (i = 0; i < 4; i++) { + if (sextract32(r1, i * 8, 8) < sextract32(r2, i * 8, 8)) { + ret |= (0xff << (i * 8)); + } + } + + return ret; +} + +uint32_t helper_lt_bu(target_ulong r1, target_ulong r2) +{ + int32_t i; + uint32_t ret = 0; + + for (i = 0; i < 4; i++) { + if (extract32(r1, i * 8, 8) < extract32(r2, i * 8, 8)) { + ret |= (0xff << (i * 8)); + } + } + + return ret; +} + +uint32_t helper_lt_h(target_ulong r1, target_ulong r2) +{ + uint32_t ret = 0; + + if (sextract32(r1, 0, 16) < sextract32(r2, 0, 16)) { + ret |= 0xffff; + } + + if (sextract32(r1, 16, 16) < sextract32(r2, 16, 16)) { + ret |= 0xffff0000; + } + + return ret; +} + +uint32_t helper_lt_hu(target_ulong r1, target_ulong r2) +{ + uint32_t ret = 0; + + if (extract32(r1, 0, 16) < extract32(r2, 0, 16)) { + ret |= 0xffff; + } + + if (extract32(r1, 16, 16) < extract32(r2, 16, 16)) { + ret |= 0xffff0000; + } + + return ret; +} + +#define EXTREMA_H_B(name, op) \ +uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \ +{ \ + int32_t i, extr_r1, extr_r2; \ + uint32_t ret = 0; \ + \ + for (i = 0; i < 4; i++) { \ + extr_r1 = sextract32(r1, i * 8, 8); \ + extr_r2 = sextract32(r2, i * 8, 8); \ + extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret |= (extr_r1 & 0xff) << (i * 8); \ + } \ + return ret; \ +} \ + \ +uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\ +{ \ + int32_t i; \ + uint32_t extr_r1, extr_r2; \ + uint32_t ret = 0; \ + \ + for (i = 0; i < 4; i++) { \ + extr_r1 = extract32(r1, i * 8, 8); \ + extr_r2 = extract32(r2, i * 8, 8); \ + extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret |= (extr_r1 & 0xff) << (i * 8); \ + } \ + return ret; \ +} \ + \ +uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \ +{ \ + int32_t extr_r1, extr_r2; \ + uint32_t ret = 0; \ + \ + extr_r1 = sextract32(r1, 0, 16); \ + extr_r2 = sextract32(r2, 0, 16); \ + ret = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret = ret & 0xffff; \ + \ + extr_r1 = sextract32(r1, 16, 16); \ + extr_r2 = sextract32(r2, 16, 16); \ + extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret |= extr_r1 << 16; \ + \ + return ret; \ +} \ + \ +uint32_t helper_##name ##_hu(target_ulong r1, target_ulong r2)\ +{ \ + uint32_t extr_r1, extr_r2; \ + uint32_t ret = 0; \ + \ + extr_r1 = extract32(r1, 0, 16); \ + extr_r2 = extract32(r2, 0, 16); \ + ret = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret = ret & 0xffff; \ + \ + extr_r1 = extract32(r1, 16, 16); \ + extr_r2 = extract32(r2, 16, 16); \ + extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret |= extr_r1 << (16); \ + \ + return ret; \ +} \ + \ +uint64_t helper_ix##name(uint64_t r1, uint32_t r2) \ +{ \ + int64_t r2l, r2h, r1hl; \ + uint64_t ret = 0; \ + \ + ret = ((r1 + 2) & 0xffff); \ + r2l = sextract64(r2, 0, 16); \ + r2h = sextract64(r2, 16, 16); \ + r1hl = sextract64(r1, 32, 16); \ + \ + if ((r2l op ## = r2h) && (r2l op r1hl)) { \ + ret |= (r2l & 0xffff) << 32; \ + ret |= extract64(r1, 0, 16) << 16; \ + } else if ((r2h op r2l) && (r2h op r1hl)) { \ + ret |= extract64(r2, 16, 16) << 32; \ + ret |= extract64(r1 + 1, 0, 16) << 16; \ + } else { \ + ret |= r1 & 0xffffffff0000ull; \ + } \ + return ret; \ +} \ + \ +uint64_t helper_ix##name ##_u(uint64_t r1, uint32_t r2) \ +{ \ + int64_t r2l, r2h, r1hl; \ + uint64_t ret = 0; \ + \ + ret = ((r1 + 2) & 0xffff); \ + r2l = extract64(r2, 0, 16); \ + r2h = extract64(r2, 16, 16); \ + r1hl = extract64(r1, 32, 16); \ + \ + if ((r2l op ## = r2h) && (r2l op r1hl)) { \ + ret |= (r2l & 0xffff) << 32; \ + ret |= extract64(r1, 0, 16) << 16; \ + } else if ((r2h op r2l) && (r2h op r1hl)) { \ + ret |= extract64(r2, 16, 16) << 32; \ + ret |= extract64(r1 + 1, 0, 16) << 16; \ + } else { \ + ret |= r1 & 0xffffffff0000ull; \ + } \ + return ret; \ +} + +EXTREMA_H_B(max, >) +EXTREMA_H_B(min, <) + +#undef EXTREMA_H_B + +uint32_t helper_clo(target_ulong r1) +{ + return clo32(r1); +} + +uint32_t helper_clo_h(target_ulong r1) +{ + uint32_t ret_hw0 = extract32(r1, 0, 16); + uint32_t ret_hw1 = extract32(r1, 16, 16); + + ret_hw0 = clo32(ret_hw0 << 16); + ret_hw1 = clo32(ret_hw1 << 16); + + if (ret_hw0 > 16) { + ret_hw0 = 16; + } + if (ret_hw1 > 16) { + ret_hw1 = 16; + } + + return ret_hw0 | (ret_hw1 << 16); +} + +uint32_t helper_clz(target_ulong r1) +{ + return clz32(r1); +} + +uint32_t helper_clz_h(target_ulong r1) +{ + uint32_t ret_hw0 = extract32(r1, 0, 16); + uint32_t ret_hw1 = extract32(r1, 16, 16); + + ret_hw0 = clz32(ret_hw0 << 16); + ret_hw1 = clz32(ret_hw1 << 16); + + if (ret_hw0 > 16) { + ret_hw0 = 16; + } + if (ret_hw1 > 16) { + ret_hw1 = 16; + } + + return ret_hw0 | (ret_hw1 << 16); +} + +uint32_t helper_cls(target_ulong r1) +{ + return clrsb32(r1); +} + +uint32_t helper_cls_h(target_ulong r1) +{ + uint32_t ret_hw0 = extract32(r1, 0, 16); + uint32_t ret_hw1 = extract32(r1, 16, 16); + + ret_hw0 = clrsb32(ret_hw0 << 16); + ret_hw1 = clrsb32(ret_hw1 << 16); + + if (ret_hw0 > 15) { + ret_hw0 = 15; + } + if (ret_hw1 > 15) { + ret_hw1 = 15; + } + + return ret_hw0 | (ret_hw1 << 16); +} + +uint32_t helper_sh(target_ulong r1, target_ulong r2) +{ + int32_t shift_count = sextract32(r2, 0, 6); + + if (shift_count == -32) { + return 0; + } else if (shift_count < 0) { + return r1 >> -shift_count; + } else { + return r1 << shift_count; + } +} + +uint32_t helper_sh_h(target_ulong r1, target_ulong r2) +{ + int32_t ret_hw0, ret_hw1; + int32_t shift_count; + + shift_count = sextract32(r2, 0, 5); + + if (shift_count == -16) { + return 0; + } else if (shift_count < 0) { + ret_hw0 = extract32(r1, 0, 16) >> -shift_count; + ret_hw1 = extract32(r1, 16, 16) >> -shift_count; + return (ret_hw0 & 0xffff) | (ret_hw1 << 16); + } else { + ret_hw0 = extract32(r1, 0, 16) << shift_count; + ret_hw1 = extract32(r1, 16, 16) << shift_count; + return (ret_hw0 & 0xffff) | (ret_hw1 << 16); + } +} + +uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t shift_count; + int64_t result, t1; + uint32_t ret; + + shift_count = sextract32(r2, 0, 6); + t1 = sextract32(r1, 0, 32); + + if (shift_count == 0) { + env->PSW_USB_C = env->PSW_USB_V = 0; + ret = r1; + } else if (shift_count == -32) { + env->PSW_USB_C = r1; + env->PSW_USB_V = 0; + ret = t1 >> 31; + } else if (shift_count > 0) { + result = t1 << shift_count; + /* calc carry */ + env->PSW_USB_C = ((result & 0xffffffff00000000ULL) != 0); + /* calc v */ + env->PSW_USB_V = (((result > 0x7fffffffLL) || + (result < -0x80000000LL)) << 31); + /* calc sv */ + env->PSW_USB_SV |= env->PSW_USB_V; + ret = (uint32_t)result; + } else { + env->PSW_USB_V = 0; + env->PSW_USB_C = (r1 & ((1 << -shift_count) - 1)); + ret = t1 >> -shift_count; + } + + env->PSW_USB_AV = ret ^ ret * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_sha_h(target_ulong r1, target_ulong r2) +{ + int32_t shift_count; + int32_t ret_hw0, ret_hw1; + + shift_count = sextract32(r2, 0, 5); + + if (shift_count == 0) { + return r1; + } else if (shift_count < 0) { + ret_hw0 = sextract32(r1, 0, 16) >> -shift_count; + ret_hw1 = sextract32(r1, 16, 16) >> -shift_count; + return (ret_hw0 & 0xffff) | (ret_hw1 << 16); + } else { + ret_hw0 = sextract32(r1, 0, 16) << shift_count; + ret_hw1 = sextract32(r1, 16, 16) << shift_count; + return (ret_hw0 & 0xffff) | (ret_hw1 << 16); + } +} + +uint32_t helper_bmerge(target_ulong r1, target_ulong r2) +{ + uint32_t i, ret; + + ret = 0; + for (i = 0; i < 16; i++) { + ret |= (r1 & 1) << (2 * i + 1); + ret |= (r2 & 1) << (2 * i); + r1 = r1 >> 1; + r2 = r2 >> 1; + } + return ret; +} + +uint64_t helper_bsplit(uint32_t r1) +{ + int32_t i; + uint64_t ret; + + ret = 0; + for (i = 0; i < 32; i = i + 2) { + /* even */ + ret |= (r1 & 1) << (i/2); + r1 = r1 >> 1; + /* odd */ + ret |= (uint64_t)(r1 & 1) << (i/2 + 32); + r1 = r1 >> 1; + } + return ret; +} + +uint32_t helper_parity(target_ulong r1) +{ + uint32_t ret; + uint32_t nOnes, i; + + ret = 0; + nOnes = 0; + for (i = 0; i < 8; i++) { + ret ^= (r1 & 1); + r1 = r1 >> 1; + } + /* second byte */ + nOnes = 0; + for (i = 0; i < 8; i++) { + nOnes ^= (r1 & 1); + r1 = r1 >> 1; + } + ret |= nOnes << 8; + /* third byte */ + nOnes = 0; + for (i = 0; i < 8; i++) { + nOnes ^= (r1 & 1); + r1 = r1 >> 1; + } + ret |= nOnes << 16; + /* fourth byte */ + nOnes = 0; + for (i = 0; i < 8; i++) { + nOnes ^= (r1 & 1); + r1 = r1 >> 1; + } + ret |= nOnes << 24; + + return ret; +} + +uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high, + target_ulong r2) +{ + uint32_t ret; + int32_t fp_exp, fp_frac, temp_exp, fp_exp_frac; + int32_t int_exp = r1_high; + int32_t int_mant = r1_low; + uint32_t flag_rnd = (int_mant & (1 << 7)) && ( + (int_mant & (1 << 8)) || + (int_mant & 0x7f) || + (carry != 0)); + if (((int_mant & (1<<31)) == 0) && (int_exp == 255)) { + fp_exp = 255; + fp_frac = extract32(int_mant, 8, 23); + } else if ((int_mant & (1<<31)) && (int_exp >= 127)) { + fp_exp = 255; + fp_frac = 0; + } else if ((int_mant & (1<<31)) && (int_exp <= -128)) { + fp_exp = 0; + fp_frac = 0; + } else if (int_mant == 0) { + fp_exp = 0; + fp_frac = 0; + } else { + if (((int_mant & (1 << 31)) == 0)) { + temp_exp = 0; + } else { + temp_exp = int_exp + 128; + } + fp_exp_frac = (((temp_exp & 0xff) << 23) | + extract32(int_mant, 8, 23)) + + flag_rnd; + fp_exp = extract32(fp_exp_frac, 23, 8); + fp_frac = extract32(fp_exp_frac, 0, 23); + } + ret = r2 & (1 << 31); + ret = ret + (fp_exp << 23); + ret = ret + (fp_frac & 0x7fffff); + + return ret; +} + +uint64_t helper_unpack(target_ulong arg1) +{ + int32_t fp_exp = extract32(arg1, 23, 8); + int32_t fp_frac = extract32(arg1, 0, 23); + uint64_t ret; + int32_t int_exp, int_mant; + + if (fp_exp == 255) { + int_exp = 255; + int_mant = (fp_frac << 7); + } else if ((fp_exp == 0) && (fp_frac == 0)) { + int_exp = -127; + int_mant = 0; + } else if ((fp_exp == 0) && (fp_frac != 0)) { + int_exp = -126; + int_mant = (fp_frac << 7); + } else { + int_exp = fp_exp - 127; + int_mant = (fp_frac << 7); + int_mant |= (1 << 30); + } + ret = int_exp; + ret = ret << 32; + ret |= int_mant; + + return ret; +} + +uint64_t helper_dvinit_b_13(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint64_t ret; + int32_t abs_sig_dividend, abs_divisor; + + ret = sextract32(r1, 0, 32); + ret = ret << 24; + if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { + ret |= 0xffffff; + } + + abs_sig_dividend = abs((int32_t)r1) >> 8; + abs_divisor = abs((int32_t)r2); + /* calc overflow + ofv if (a/b >= 255) <=> (a/255 >= b) */ + env->PSW_USB_V = (abs_sig_dividend >= abs_divisor) << 31; + env->PSW_USB_V = env->PSW_USB_V << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + + return ret; +} + +uint64_t helper_dvinit_b_131(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint64_t ret = sextract32(r1, 0, 32); + + ret = ret << 24; + if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { + ret |= 0xffffff; + } + /* calc overflow */ + env->PSW_USB_V = ((r2 == 0) || ((r2 == 0xffffffff) && (r1 == 0xffffff80))); + env->PSW_USB_V = env->PSW_USB_V << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + + return ret; +} + +uint64_t helper_dvinit_h_13(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint64_t ret; + int32_t abs_sig_dividend, abs_divisor; + + ret = sextract32(r1, 0, 32); + ret = ret << 16; + if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { + ret |= 0xffff; + } + + abs_sig_dividend = abs((int32_t)r1) >> 16; + abs_divisor = abs((int32_t)r2); + /* calc overflow + ofv if (a/b >= 0xffff) <=> (a/0xffff >= b) */ + env->PSW_USB_V = (abs_sig_dividend >= abs_divisor) << 31; + env->PSW_USB_V = env->PSW_USB_V << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + + return ret; +} + +uint64_t helper_dvinit_h_131(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint64_t ret = sextract32(r1, 0, 32); + + ret = ret << 16; + if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { + ret |= 0xffff; + } + /* calc overflow */ + env->PSW_USB_V = ((r2 == 0) || ((r2 == 0xffffffff) && (r1 == 0xffff8000))); + env->PSW_USB_V = env->PSW_USB_V << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + + return ret; +} + +uint64_t helper_dvadj(uint64_t r1, uint32_t r2) +{ + int32_t x_sign = (r1 >> 63); + int32_t q_sign = x_sign ^ (r2 >> 31); + int32_t eq_pos = x_sign & ((r1 >> 32) == r2); + int32_t eq_neg = x_sign & ((r1 >> 32) == -r2); + uint32_t quotient; + uint64_t ret, remainder; + + if ((q_sign & ~eq_neg) | eq_pos) { + quotient = (r1 + 1) & 0xffffffff; + } else { + quotient = r1 & 0xffffffff; + } + + if (eq_pos | eq_neg) { + remainder = 0; + } else { + remainder = (r1 & 0xffffffff00000000ull); + } + ret = remainder|quotient; + return ret; +} + +uint64_t helper_dvstep(uint64_t r1, uint32_t r2) +{ + int32_t dividend_sign = extract64(r1, 63, 1); + int32_t divisor_sign = extract32(r2, 31, 1); + int32_t quotient_sign = (dividend_sign != divisor_sign); + int32_t addend, dividend_quotient, remainder; + int32_t i, temp; + + if (quotient_sign) { + addend = r2; + } else { + addend = -r2; + } + dividend_quotient = (int32_t)r1; + remainder = (int32_t)(r1 >> 32); + + for (i = 0; i < 8; i++) { + remainder = (remainder << 1) | extract32(dividend_quotient, 31, 1); + dividend_quotient <<= 1; + temp = remainder + addend; + if ((temp < 0) == dividend_sign) { + remainder = temp; + } + if (((temp < 0) == dividend_sign)) { + dividend_quotient = dividend_quotient | !quotient_sign; + } else { + dividend_quotient = dividend_quotient | quotient_sign; + } + } + return ((uint64_t)remainder << 32) | (uint32_t)dividend_quotient; +} + +uint64_t helper_dvstep_u(uint64_t r1, uint32_t r2) +{ + int32_t dividend_quotient = extract64(r1, 0, 32); + int64_t remainder = extract64(r1, 32, 32); + int32_t i; + int64_t temp; + for (i = 0; i < 8; i++) { + remainder = (remainder << 1) | extract32(dividend_quotient, 31, 1); + dividend_quotient <<= 1; + temp = (remainder & 0xffffffff) - r2; + if (temp >= 0) { + remainder = temp; + } + dividend_quotient = dividend_quotient | !(temp < 0); + } + return ((uint64_t)remainder << 32) | (uint32_t)dividend_quotient; +} + +uint64_t helper_divide(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + int32_t quotient, remainder; + int32_t dividend = (int32_t)r1; + int32_t divisor = (int32_t)r2; + + if (divisor == 0) { + if (dividend >= 0) { + quotient = 0x7fffffff; + remainder = 0; + } else { + quotient = 0x80000000; + remainder = 0; + } + env->PSW_USB_V = (1 << 31); + } else if ((divisor == 0xffffffff) && (dividend == 0x80000000)) { + quotient = 0x7fffffff; + remainder = 0; + env->PSW_USB_V = (1 << 31); + } else { + remainder = dividend % divisor; + quotient = (dividend - remainder)/divisor; + env->PSW_USB_V = 0; + } + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + return ((uint64_t)remainder << 32) | (uint32_t)quotient; +} + +uint64_t helper_divide_u(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint32_t quotient, remainder; + uint32_t dividend = r1; + uint32_t divisor = r2; + + if (divisor == 0) { + quotient = 0xffffffff; + remainder = 0; + env->PSW_USB_V = (1 << 31); + } else { + remainder = dividend % divisor; + quotient = (dividend - remainder)/divisor; + env->PSW_USB_V = 0; + } + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + return ((uint64_t)remainder << 32) | quotient; +} + +uint64_t helper_mul_h(uint32_t arg00, uint32_t arg01, + uint32_t arg10, uint32_t arg11, uint32_t n) +{ + uint64_t ret; + uint32_t result0, result1; + + int32_t sc1 = ((arg00 & 0xffff) == 0x8000) && + ((arg10 & 0xffff) == 0x8000) && (n == 1); + int32_t sc0 = ((arg01 & 0xffff) == 0x8000) && + ((arg11 & 0xffff) == 0x8000) && (n == 1); + if (sc1) { + result1 = 0x7fffffff; + } else { + result1 = (((uint32_t)(arg00 * arg10)) << n); + } + if (sc0) { + result0 = 0x7fffffff; + } else { + result0 = (((uint32_t)(arg01 * arg11)) << n); + } + ret = (((uint64_t)result1 << 32)) | result0; + return ret; +} + +uint64_t helper_mulm_h(uint32_t arg00, uint32_t arg01, + uint32_t arg10, uint32_t arg11, uint32_t n) +{ + uint64_t ret; + int64_t result0, result1; + + int32_t sc1 = ((arg00 & 0xffff) == 0x8000) && + ((arg10 & 0xffff) == 0x8000) && (n == 1); + int32_t sc0 = ((arg01 & 0xffff) == 0x8000) && + ((arg11 & 0xffff) == 0x8000) && (n == 1); + + if (sc1) { + result1 = 0x7fffffff; + } else { + result1 = (((int32_t)arg00 * (int32_t)arg10) << n); + } + if (sc0) { + result0 = 0x7fffffff; + } else { + result0 = (((int32_t)arg01 * (int32_t)arg11) << n); + } + ret = (result1 + result0); + ret = ret << 16; + return ret; +} +uint32_t helper_mulr_h(uint32_t arg00, uint32_t arg01, + uint32_t arg10, uint32_t arg11, uint32_t n) +{ + uint32_t result0, result1; + + int32_t sc1 = ((arg00 & 0xffff) == 0x8000) && + ((arg10 & 0xffff) == 0x8000) && (n == 1); + int32_t sc0 = ((arg01 & 0xffff) == 0x8000) && + ((arg11 & 0xffff) == 0x8000) && (n == 1); + + if (sc1) { + result1 = 0x7fffffff; + } else { + result1 = ((arg00 * arg10) << n) + 0x8000; + } + if (sc0) { + result0 = 0x7fffffff; + } else { + result0 = ((arg01 * arg11) << n) + 0x8000; + } + return (result1 & 0xffff0000) | (result0 >> 16); +} + +uint32_t helper_crc32(uint32_t arg0, uint32_t arg1) +{ + uint8_t buf[4]; + uint32_t ret; + stl_be_p(buf, arg0); + + ret = crc32(arg1, buf, 4); + return ret; +} + +/* context save area (CSA) related helpers */ + +static int cdc_increment(target_ulong *psw) +{ + if ((*psw & MASK_PSW_CDC) == 0x7f) { + return 0; + } + + (*psw)++; + /* check for overflow */ + int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); + int mask = (1u << (7 - lo)) - 1; + int count = *psw & mask; + if (count == 0) { + (*psw)--; + return 1; + } + return 0; +} + +static int cdc_decrement(target_ulong *psw) +{ + if ((*psw & MASK_PSW_CDC) == 0x7f) { + return 0; + } + /* check for underflow */ + int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); + int mask = (1u << (7 - lo)) - 1; + int count = *psw & mask; + if (count == 0) { + return 1; + } + (*psw)--; + return 0; +} + +static bool cdc_zero(target_ulong *psw) +{ + int cdc = *psw & MASK_PSW_CDC; + /* Returns TRUE if PSW.CDC.COUNT == 0 or if PSW.CDC == + 7'b1111111, otherwise returns FALSE. */ + if (cdc == 0x7f) { + return true; + } + /* find CDC.COUNT */ + int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); + int mask = (1u << (7 - lo)) - 1; + int count = *psw & mask; + return count == 0; +} + +static void save_context_upper(CPUTriCoreState *env, int ea) +{ + cpu_stl_data(env, ea, env->PCXI); + cpu_stl_data(env, ea+4, env->PSW); + cpu_stl_data(env, ea+8, env->gpr_a[10]); + cpu_stl_data(env, ea+12, env->gpr_a[11]); + cpu_stl_data(env, ea+16, env->gpr_d[8]); + cpu_stl_data(env, ea+20, env->gpr_d[9]); + cpu_stl_data(env, ea+24, env->gpr_d[10]); + cpu_stl_data(env, ea+28, env->gpr_d[11]); + cpu_stl_data(env, ea+32, env->gpr_a[12]); + cpu_stl_data(env, ea+36, env->gpr_a[13]); + cpu_stl_data(env, ea+40, env->gpr_a[14]); + cpu_stl_data(env, ea+44, env->gpr_a[15]); + cpu_stl_data(env, ea+48, env->gpr_d[12]); + cpu_stl_data(env, ea+52, env->gpr_d[13]); + cpu_stl_data(env, ea+56, env->gpr_d[14]); + cpu_stl_data(env, ea+60, env->gpr_d[15]); +} + +static void save_context_lower(CPUTriCoreState *env, int ea) +{ + cpu_stl_data(env, ea, env->PCXI); + cpu_stl_data(env, ea+4, env->gpr_a[11]); + cpu_stl_data(env, ea+8, env->gpr_a[2]); + cpu_stl_data(env, ea+12, env->gpr_a[3]); + cpu_stl_data(env, ea+16, env->gpr_d[0]); + cpu_stl_data(env, ea+20, env->gpr_d[1]); + cpu_stl_data(env, ea+24, env->gpr_d[2]); + cpu_stl_data(env, ea+28, env->gpr_d[3]); + cpu_stl_data(env, ea+32, env->gpr_a[4]); + cpu_stl_data(env, ea+36, env->gpr_a[5]); + cpu_stl_data(env, ea+40, env->gpr_a[6]); + cpu_stl_data(env, ea+44, env->gpr_a[7]); + cpu_stl_data(env, ea+48, env->gpr_d[4]); + cpu_stl_data(env, ea+52, env->gpr_d[5]); + cpu_stl_data(env, ea+56, env->gpr_d[6]); + cpu_stl_data(env, ea+60, env->gpr_d[7]); +} + +static void restore_context_upper(CPUTriCoreState *env, int ea, + target_ulong *new_PCXI, target_ulong *new_PSW) +{ + *new_PCXI = cpu_ldl_data(env, ea); + *new_PSW = cpu_ldl_data(env, ea+4); + env->gpr_a[10] = cpu_ldl_data(env, ea+8); + env->gpr_a[11] = cpu_ldl_data(env, ea+12); + env->gpr_d[8] = cpu_ldl_data(env, ea+16); + env->gpr_d[9] = cpu_ldl_data(env, ea+20); + env->gpr_d[10] = cpu_ldl_data(env, ea+24); + env->gpr_d[11] = cpu_ldl_data(env, ea+28); + env->gpr_a[12] = cpu_ldl_data(env, ea+32); + env->gpr_a[13] = cpu_ldl_data(env, ea+36); + env->gpr_a[14] = cpu_ldl_data(env, ea+40); + env->gpr_a[15] = cpu_ldl_data(env, ea+44); + env->gpr_d[12] = cpu_ldl_data(env, ea+48); + env->gpr_d[13] = cpu_ldl_data(env, ea+52); + env->gpr_d[14] = cpu_ldl_data(env, ea+56); + env->gpr_d[15] = cpu_ldl_data(env, ea+60); +} + +static void restore_context_lower(CPUTriCoreState *env, int ea, + target_ulong *ra, target_ulong *pcxi) +{ + *pcxi = cpu_ldl_data(env, ea); + *ra = cpu_ldl_data(env, ea+4); + env->gpr_a[2] = cpu_ldl_data(env, ea+8); + env->gpr_a[3] = cpu_ldl_data(env, ea+12); + env->gpr_d[0] = cpu_ldl_data(env, ea+16); + env->gpr_d[1] = cpu_ldl_data(env, ea+20); + env->gpr_d[2] = cpu_ldl_data(env, ea+24); + env->gpr_d[3] = cpu_ldl_data(env, ea+28); + env->gpr_a[4] = cpu_ldl_data(env, ea+32); + env->gpr_a[5] = cpu_ldl_data(env, ea+36); + env->gpr_a[6] = cpu_ldl_data(env, ea+40); + env->gpr_a[7] = cpu_ldl_data(env, ea+44); + env->gpr_d[4] = cpu_ldl_data(env, ea+48); + env->gpr_d[5] = cpu_ldl_data(env, ea+52); + env->gpr_d[6] = cpu_ldl_data(env, ea+56); + env->gpr_d[7] = cpu_ldl_data(env, ea+60); +} + +void helper_call(CPUTriCoreState *env, uint32_t next_pc) +{ + target_ulong tmp_FCX; + target_ulong ea; + target_ulong new_FCX; + target_ulong psw; + + psw = psw_read(env); + /* if (FCX == 0) trap(FCU); */ + if (env->FCX == 0) { + /* FCU trap */ + } + /* if (PSW.CDE) then if (cdc_increment()) then trap(CDO); */ + if (psw & MASK_PSW_CDE) { + if (cdc_increment(&psw)) { + /* CDO trap */ + } + } + /* PSW.CDE = 1;*/ + psw |= MASK_PSW_CDE; + /* tmp_FCX = FCX; */ + tmp_FCX = env->FCX; + /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */ + ea = ((env->FCX & MASK_FCX_FCXS) << 12) + + ((env->FCX & MASK_FCX_FCXO) << 6); + /* new_FCX = M(EA, word); */ + new_FCX = cpu_ldl_data(env, ea); + /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], + A[12], A[13], A[14], A[15], D[12], D[13], D[14], + D[15]}; */ + save_context_upper(env, ea); + + /* PCXI.PCPN = ICR.CCPN; */ + env->PCXI = (env->PCXI & 0xffffff) + + ((env->ICR & MASK_ICR_CCPN) << 24); + /* PCXI.PIE = ICR.IE; */ + env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) + + ((env->ICR & MASK_ICR_IE) << 15)); + /* PCXI.UL = 1; */ + env->PCXI |= MASK_PCXI_UL; + + /* PCXI[19: 0] = FCX[19: 0]; */ + env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); + /* FCX[19: 0] = new_FCX[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); + /* A[11] = next_pc[31: 0]; */ + env->gpr_a[11] = next_pc; + + /* if (tmp_FCX == LCX) trap(FCD);*/ + if (tmp_FCX == env->LCX) { + /* FCD trap */ + } + psw_write(env, psw); +} + +void helper_ret(CPUTriCoreState *env) +{ + target_ulong ea; + target_ulong new_PCXI; + target_ulong new_PSW, psw; + + psw = psw_read(env); + /* if (PSW.CDE) then if (cdc_decrement()) then trap(CDU);*/ + if (env->PSW & MASK_PSW_CDE) { + if (cdc_decrement(&(env->PSW))) { + /* CDU trap */ + } + } + /* if (PCXI[19: 0] == 0) then trap(CSU); */ + if ((env->PCXI & 0xfffff) == 0) { + /* CSU trap */ + } + /* if (PCXI.UL == 0) then trap(CTYP); */ + if ((env->PCXI & MASK_PCXI_UL) == 0) { + /* CTYP trap */ + } + /* PC = {A11 [31: 1], 1’b0}; */ + env->PC = env->gpr_a[11] & 0xfffffffe; + + /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */ + ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + + ((env->PCXI & MASK_PCXI_PCXO) << 6); + /* {new_PCXI, new_PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], + A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */ + restore_context_upper(env, ea, &new_PCXI, &new_PSW); + /* M(EA, word) = FCX; */ + cpu_stl_data(env, ea, env->FCX); + /* FCX[19: 0] = PCXI[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); + /* PCXI = new_PCXI; */ + env->PCXI = new_PCXI; + + if (tricore_feature(env, TRICORE_FEATURE_13)) { + /* PSW = new_PSW */ + psw_write(env, new_PSW); + } else { + /* PSW = {new_PSW[31:26], PSW[25:24], new_PSW[23:0]}; */ + psw_write(env, (new_PSW & ~(0x3000000)) + (psw & (0x3000000))); + } +} + +void helper_bisr(CPUTriCoreState *env, uint32_t const9) +{ + target_ulong tmp_FCX; + target_ulong ea; + target_ulong new_FCX; + + if (env->FCX == 0) { + /* FCU trap */ + } + + tmp_FCX = env->FCX; + ea = ((env->FCX & 0xf0000) << 12) + ((env->FCX & 0xffff) << 6); + + /* new_FCX = M(EA, word); */ + new_FCX = cpu_ldl_data(env, ea); + /* M(EA, 16 * word) = {PCXI, A[11], A[2], A[3], D[0], D[1], D[2], D[3], A[4] + , A[5], A[6], A[7], D[4], D[5], D[6], D[7]}; */ + save_context_lower(env, ea); + + + /* PCXI.PCPN = ICR.CCPN */ + env->PCXI = (env->PCXI & 0xffffff) + + ((env->ICR & MASK_ICR_CCPN) << 24); + /* PCXI.PIE = ICR.IE */ + env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) + + ((env->ICR & MASK_ICR_IE) << 15)); + /* PCXI.UL = 0 */ + env->PCXI &= ~(MASK_PCXI_UL); + /* PCXI[19: 0] = FCX[19: 0] */ + env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); + /* FXC[19: 0] = new_FCX[19: 0] */ + env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); + /* ICR.IE = 1 */ + env->ICR |= MASK_ICR_IE; + + env->ICR |= const9; /* ICR.CCPN = const9[7: 0];*/ + + if (tmp_FCX == env->LCX) { + /* FCD trap */ + } +} + +void helper_rfe(CPUTriCoreState *env) +{ + target_ulong ea; + target_ulong new_PCXI; + target_ulong new_PSW; + /* if (PCXI[19: 0] == 0) then trap(CSU); */ + if ((env->PCXI & 0xfffff) == 0) { + /* raise csu trap */ + } + /* if (PCXI.UL == 0) then trap(CTYP); */ + if ((env->PCXI & MASK_PCXI_UL) == 0) { + /* raise CTYP trap */ + } + /* if (!cdc_zero() AND PSW.CDE) then trap(NEST); */ + if (!cdc_zero(&(env->PSW)) && (env->PSW & MASK_PSW_CDE)) { + /* raise MNG trap */ + } + env->PC = env->gpr_a[11] & ~0x1; + /* ICR.IE = PCXI.PIE; */ + env->ICR = (env->ICR & ~MASK_ICR_IE) + ((env->PCXI & MASK_PCXI_PIE) >> 15); + /* ICR.CCPN = PCXI.PCPN; */ + env->ICR = (env->ICR & ~MASK_ICR_CCPN) + + ((env->PCXI & MASK_PCXI_PCPN) >> 24); + /*EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0};*/ + ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + + ((env->PCXI & MASK_PCXI_PCXO) << 6); + /*{new_PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], + A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */ + restore_context_upper(env, ea, &new_PCXI, &new_PSW); + /* M(EA, word) = FCX;*/ + cpu_stl_data(env, ea, env->FCX); + /* FCX[19: 0] = PCXI[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); + /* PCXI = new_PCXI; */ + env->PCXI = new_PCXI; + /* write psw */ + psw_write(env, new_PSW); +} + +void helper_rfm(CPUTriCoreState *env) +{ + env->PC = (env->gpr_a[11] & ~0x1); + /* ICR.IE = PCXI.PIE; */ + env->ICR = (env->ICR & ~MASK_ICR_IE) | + ((env->PCXI & MASK_PCXI_PIE) >> 15); + /* ICR.CCPN = PCXI.PCPN; */ + env->ICR = (env->ICR & ~MASK_ICR_CCPN) | + ((env->PCXI & MASK_PCXI_PCPN) >> 24); + /* {PCXI, PSW, A[10], A[11]} = M(DCX, 4 * word); */ + env->PCXI = cpu_ldl_data(env, env->DCX); + psw_write(env, cpu_ldl_data(env, env->DCX+4)); + env->gpr_a[10] = cpu_ldl_data(env, env->DCX+8); + env->gpr_a[11] = cpu_ldl_data(env, env->DCX+12); + + if (tricore_feature(env, TRICORE_FEATURE_131)) { + env->DBGTCR = 0; + } +} + +void helper_ldlcx(CPUTriCoreState *env, uint32_t ea) +{ + uint32_t dummy; + /* insn doesn't load PCXI and RA */ + restore_context_lower(env, ea, &dummy, &dummy); +} + +void helper_lducx(CPUTriCoreState *env, uint32_t ea) +{ + uint32_t dummy; + /* insn doesn't load PCXI and PSW */ + restore_context_upper(env, ea, &dummy, &dummy); +} + +void helper_stlcx(CPUTriCoreState *env, uint32_t ea) +{ + save_context_lower(env, ea); +} + +void helper_stucx(CPUTriCoreState *env, uint32_t ea) +{ + save_context_upper(env, ea); +} + +void helper_svlcx(CPUTriCoreState *env) +{ + target_ulong tmp_FCX; + target_ulong ea; + target_ulong new_FCX; + + if (env->FCX == 0) { + /* FCU trap */ + } + /* tmp_FCX = FCX; */ + tmp_FCX = env->FCX; + /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */ + ea = ((env->FCX & MASK_FCX_FCXS) << 12) + + ((env->FCX & MASK_FCX_FCXO) << 6); + /* new_FCX = M(EA, word); */ + new_FCX = cpu_ldl_data(env, ea); + /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], + A[12], A[13], A[14], A[15], D[12], D[13], D[14], + D[15]}; */ + save_context_lower(env, ea); + + /* PCXI.PCPN = ICR.CCPN; */ + env->PCXI = (env->PCXI & 0xffffff) + + ((env->ICR & MASK_ICR_CCPN) << 24); + /* PCXI.PIE = ICR.IE; */ + env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) + + ((env->ICR & MASK_ICR_IE) << 15)); + /* PCXI.UL = 0; */ + env->PCXI &= ~MASK_PCXI_UL; + + /* PCXI[19: 0] = FCX[19: 0]; */ + env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); + /* FCX[19: 0] = new_FCX[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); + + /* if (tmp_FCX == LCX) trap(FCD);*/ + if (tmp_FCX == env->LCX) { + /* FCD trap */ + } +} + +void helper_rslcx(CPUTriCoreState *env) +{ + target_ulong ea; + target_ulong new_PCXI; + /* if (PCXI[19: 0] == 0) then trap(CSU); */ + if ((env->PCXI & 0xfffff) == 0) { + /* CSU trap */ + } + /* if (PCXI.UL == 1) then trap(CTYP); */ + if ((env->PCXI & MASK_PCXI_UL) != 0) { + /* CTYP trap */ + } + /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */ + ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + + ((env->PCXI & MASK_PCXI_PCXO) << 6); + /* {new_PCXI, A[11], A[10], A[11], D[8], D[9], D[10], D[11], A[12], + A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */ + restore_context_lower(env, ea, &env->gpr_a[11], &new_PCXI); + /* M(EA, word) = FCX; */ + cpu_stl_data(env, ea, env->FCX); + /* M(EA, word) = FCX; */ + cpu_stl_data(env, ea, env->FCX); + /* FCX[19: 0] = PCXI[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); + /* PCXI = new_PCXI; */ + env->PCXI = new_PCXI; +} + +void helper_psw_write(CPUTriCoreState *env, uint32_t arg) +{ + psw_write(env, arg); +} + +uint32_t helper_psw_read(CPUTriCoreState *env) +{ + return psw_read(env); +} + + +static inline void QEMU_NORETURN do_raise_exception_err(CPUTriCoreState *env, + uint32_t exception, + int error_code, + uintptr_t pc) +{ + CPUState *cs = CPU(tricore_env_get_cpu(env)); + cs->exception_index = exception; + env->error_code = error_code; + + if (pc) { + /* now we have a real cpu fault */ + cpu_restore_state(cs, pc); + } + + cpu_loop_exit(cs); +} + +void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, + uintptr_t retaddr) +{ + int ret; + ret = cpu_tricore_handle_mmu_fault(cs, addr, is_write, mmu_idx); + if (ret) { + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + do_raise_exception_err(env, cs->exception_index, + env->error_code, retaddr); + } +} diff --git a/src/target-tricore/translate.c b/src/target-tricore/translate.c new file mode 100644 index 0000000..135c583 --- /dev/null +++ b/src/target-tricore/translate.c @@ -0,0 +1,8397 @@ +/* + * TriCore emulation for qemu: main translation routines. + * + * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + + +#include "cpu.h" +#include "disas/disas.h" +#include "tcg-op.h" +#include "exec/cpu_ldst.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "tricore-opcodes.h" + +/* + * TCG registers + */ +static TCGv cpu_PC; +static TCGv cpu_PCXI; +static TCGv cpu_PSW; +static TCGv cpu_ICR; +/* GPR registers */ +static TCGv cpu_gpr_a[16]; +static TCGv cpu_gpr_d[16]; +/* PSW Flag cache */ +static TCGv cpu_PSW_C; +static TCGv cpu_PSW_V; +static TCGv cpu_PSW_SV; +static TCGv cpu_PSW_AV; +static TCGv cpu_PSW_SAV; +/* CPU env */ +static TCGv_ptr cpu_env; + +#include "exec/gen-icount.h" + +static const char *regnames_a[] = { + "a0" , "a1" , "a2" , "a3" , "a4" , "a5" , + "a6" , "a7" , "a8" , "a9" , "sp" , "a11" , + "a12" , "a13" , "a14" , "a15", + }; + +static const char *regnames_d[] = { + "d0" , "d1" , "d2" , "d3" , "d4" , "d5" , + "d6" , "d7" , "d8" , "d9" , "d10" , "d11" , + "d12" , "d13" , "d14" , "d15", + }; + +typedef struct DisasContext { + struct TranslationBlock *tb; + target_ulong pc, saved_pc, next_pc; + uint32_t opcode; + int singlestep_enabled; + /* Routine used to access memory */ + int mem_idx; + uint32_t hflags, saved_hflags; + int bstate; +} DisasContext; + +enum { + + BS_NONE = 0, + BS_STOP = 1, + BS_BRANCH = 2, + BS_EXCP = 3, +}; + +enum { + MODE_LL = 0, + MODE_LU = 1, + MODE_UL = 2, + MODE_UU = 3, +}; + +void tricore_cpu_dump_state(CPUState *cs, FILE *f, + fprintf_function cpu_fprintf, int flags) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + uint32_t psw; + int i; + + psw = psw_read(env); + + cpu_fprintf(f, "PC: " TARGET_FMT_lx, env->PC); + cpu_fprintf(f, " PSW: " TARGET_FMT_lx, psw); + cpu_fprintf(f, " ICR: " TARGET_FMT_lx, env->ICR); + cpu_fprintf(f, "\nPCXI: " TARGET_FMT_lx, env->PCXI); + cpu_fprintf(f, " FCX: " TARGET_FMT_lx, env->FCX); + cpu_fprintf(f, " LCX: " TARGET_FMT_lx, env->LCX); + + for (i = 0; i < 16; ++i) { + if ((i & 3) == 0) { + cpu_fprintf(f, "\nGPR A%02d:", i); + } + cpu_fprintf(f, " " TARGET_FMT_lx, env->gpr_a[i]); + } + for (i = 0; i < 16; ++i) { + if ((i & 3) == 0) { + cpu_fprintf(f, "\nGPR D%02d:", i); + } + cpu_fprintf(f, " " TARGET_FMT_lx, env->gpr_d[i]); + } + cpu_fprintf(f, "\n"); +} + +/* + * Functions to generate micro-ops + */ + +/* Makros for generating helpers */ + +#define gen_helper_1arg(name, arg) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(arg); \ + gen_helper_##name(cpu_env, helper_tmp); \ + tcg_temp_free_i32(helper_tmp); \ + } while (0) + +#define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \ + TCGv arg00 = tcg_temp_new(); \ + TCGv arg01 = tcg_temp_new(); \ + TCGv arg11 = tcg_temp_new(); \ + tcg_gen_sari_tl(arg00, arg0, 16); \ + tcg_gen_ext16s_tl(arg01, arg0); \ + tcg_gen_ext16s_tl(arg11, arg1); \ + gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \ + tcg_temp_free(arg00); \ + tcg_temp_free(arg01); \ + tcg_temp_free(arg11); \ +} while (0) + +#define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \ + TCGv arg00 = tcg_temp_new(); \ + TCGv arg01 = tcg_temp_new(); \ + TCGv arg10 = tcg_temp_new(); \ + TCGv arg11 = tcg_temp_new(); \ + tcg_gen_sari_tl(arg00, arg0, 16); \ + tcg_gen_ext16s_tl(arg01, arg0); \ + tcg_gen_sari_tl(arg11, arg1, 16); \ + tcg_gen_ext16s_tl(arg10, arg1); \ + gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \ + tcg_temp_free(arg00); \ + tcg_temp_free(arg01); \ + tcg_temp_free(arg10); \ + tcg_temp_free(arg11); \ +} while (0) + +#define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \ + TCGv arg00 = tcg_temp_new(); \ + TCGv arg01 = tcg_temp_new(); \ + TCGv arg10 = tcg_temp_new(); \ + TCGv arg11 = tcg_temp_new(); \ + tcg_gen_sari_tl(arg00, arg0, 16); \ + tcg_gen_ext16s_tl(arg01, arg0); \ + tcg_gen_sari_tl(arg10, arg1, 16); \ + tcg_gen_ext16s_tl(arg11, arg1); \ + gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \ + tcg_temp_free(arg00); \ + tcg_temp_free(arg01); \ + tcg_temp_free(arg10); \ + tcg_temp_free(arg11); \ +} while (0) + +#define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \ + TCGv arg00 = tcg_temp_new(); \ + TCGv arg01 = tcg_temp_new(); \ + TCGv arg11 = tcg_temp_new(); \ + tcg_gen_sari_tl(arg01, arg0, 16); \ + tcg_gen_ext16s_tl(arg00, arg0); \ + tcg_gen_sari_tl(arg11, arg1, 16); \ + gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \ + tcg_temp_free(arg00); \ + tcg_temp_free(arg01); \ + tcg_temp_free(arg11); \ +} while (0) + +#define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \ + TCGv_i64 ret = tcg_temp_new_i64(); \ + TCGv_i64 arg1 = tcg_temp_new_i64(); \ + \ + tcg_gen_concat_i32_i64(arg1, al1, ah1); \ + gen_helper_##name(ret, arg1, arg2); \ + tcg_gen_extr_i64_i32(rl, rh, ret); \ + \ + tcg_temp_free_i64(ret); \ + tcg_temp_free_i64(arg1); \ +} while (0) + +#define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \ + TCGv_i64 ret = tcg_temp_new_i64(); \ + \ + gen_helper_##name(ret, cpu_env, arg1, arg2); \ + tcg_gen_extr_i64_i32(rl, rh, ret); \ + \ + tcg_temp_free_i64(ret); \ +} while (0) + +#define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF)) +#define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \ + ((offset & 0x0fffff) << 1)) + +/* Functions for load/save to/from memory */ + +static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, + int16_t con, TCGMemOp mop) +{ + TCGv temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, r2, con); + tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop); + tcg_temp_free(temp); +} + +static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, + int16_t con, TCGMemOp mop) +{ + TCGv temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, r2, con); + tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop); + tcg_temp_free(temp); +} + +static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) +{ + TCGv_i64 temp = tcg_temp_new_i64(); + + tcg_gen_concat_i32_i64(temp, rl, rh); + tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ); + + tcg_temp_free_i64(temp); +} + +static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, + DisasContext *ctx) +{ + TCGv temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, base, con); + gen_st_2regs_64(rh, rl, temp, ctx); + tcg_temp_free(temp); +} + +static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) +{ + TCGv_i64 temp = tcg_temp_new_i64(); + + tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ); + /* write back to two 32 bit regs */ + tcg_gen_extr_i64_i32(rl, rh, temp); + + tcg_temp_free_i64(temp); +} + +static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, + DisasContext *ctx) +{ + TCGv temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, base, con); + gen_ld_2regs_64(rh, rl, temp, ctx); + tcg_temp_free(temp); +} + +static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, + TCGMemOp mop) +{ + TCGv temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, r2, off); + tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop); + tcg_gen_mov_tl(r2, temp); + tcg_temp_free(temp); +} + +static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, + TCGMemOp mop) +{ + TCGv temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, r2, off); + tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop); + tcg_gen_mov_tl(r2, temp); + tcg_temp_free(temp); +} + +/* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */ +static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + + /* temp = (M(EA, word) */ + tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL); + /* temp = temp & ~E[a][63:32]) */ + tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]); + /* temp2 = (E[a][31:0] & E[a][63:32]); */ + tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]); + /* temp = temp | temp2; */ + tcg_gen_or_tl(temp, temp, temp2); + /* M(EA, word) = temp; */ + tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +/* tmp = M(EA, word); + M(EA, word) = D[a]; + D[a] = tmp[31:0];*/ +static void gen_swap(DisasContext *ctx, int reg, TCGv ea) +{ + TCGv temp = tcg_temp_new(); + + tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL); + tcg_gen_mov_tl(cpu_gpr_d[reg], temp); + + tcg_temp_free(temp); +} + +static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_movcond_tl(TCG_COND_EQ, temp2, cpu_gpr_d[reg+1], temp, + cpu_gpr_d[reg], temp); + tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_mov_tl(cpu_gpr_d[reg], temp); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv temp3 = tcg_temp_new(); + + tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_and_tl(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg+1]); + tcg_gen_andc_tl(temp3, temp, cpu_gpr_d[reg+1]); + tcg_gen_or_tl(temp2, temp2, temp3); + tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_mov_tl(cpu_gpr_d[reg], temp); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); +} + + +/* We generate loads and store to core special function register (csfr) through + the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3 + makros R, A and E, which allow read-only, all and endinit protected access. + These makros also specify in which ISA version the csfr was introduced. */ +#define R(ADDRESS, REG, FEATURE) \ + case ADDRESS: \ + if (tricore_feature(env, FEATURE)) { \ + tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \ + } \ + break; +#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) +#define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) +static inline void gen_mfcr(CPUTriCoreState *env, TCGv ret, int32_t offset) +{ + /* since we're caching PSW make this a special case */ + if (offset == 0xfe04) { + gen_helper_psw_read(ret, cpu_env); + } else { + switch (offset) { +#include "csfr.def" + } + } +} +#undef R +#undef A +#undef E + +#define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg, + since no execption occurs */ +#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \ + case ADDRESS: \ + if (tricore_feature(env, FEATURE)) { \ + tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \ + } \ + break; +/* Endinit protected registers + TODO: Since the endinit bit is in a register of a not yet implemented + watchdog device, we handle endinit protected registers like + all-access registers for now. */ +#define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE) +static inline void gen_mtcr(CPUTriCoreState *env, DisasContext *ctx, TCGv r1, + int32_t offset) +{ + if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) { + /* since we're caching PSW make this a special case */ + if (offset == 0xfe04) { + gen_helper_psw_write(cpu_env, r1); + } else { + switch (offset) { +#include "csfr.def" + } + } + } else { + /* generate privilege trap */ + } +} + +/* Functions for arithmetic instructions */ + +static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv t0 = tcg_temp_new_i32(); + TCGv result = tcg_temp_new_i32(); + /* Addition and set V/SV bits */ + tcg_gen_add_tl(result, r1, r2); + /* calc V bit */ + tcg_gen_xor_tl(cpu_PSW_V, result, r1); + tcg_gen_xor_tl(t0, r1, r2); + tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, result, result); + tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(ret, result); + + tcg_temp_free(result); + tcg_temp_free(t0); +} + +static inline void +gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) +{ + TCGv temp = tcg_temp_new(); + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 result = tcg_temp_new_i64(); + + tcg_gen_add_i64(result, r1, r2); + /* calc v bit */ + tcg_gen_xor_i64(t1, result, r1); + tcg_gen_xor_i64(t0, r1, r2); + tcg_gen_andc_i64(t1, t1, t0); + tcg_gen_extrh_i64_i32(cpu_PSW_V, t1); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* calc AV/SAV bits */ + tcg_gen_extrh_i64_i32(temp, result); + tcg_gen_add_tl(cpu_PSW_AV, temp, temp); + tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_i64(ret, result); + + tcg_temp_free(temp); + tcg_temp_free_i64(result); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); +} + +static inline void +gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, void(*op1)(TCGv, TCGv, TCGv), + void(*op2)(TCGv, TCGv, TCGv)) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv temp3 = tcg_temp_new(); + TCGv temp4 = tcg_temp_new(); + + (*op1)(temp, r1_low, r2); + /* calc V0 bit */ + tcg_gen_xor_tl(temp2, temp, r1_low); + tcg_gen_xor_tl(temp3, r1_low, r2); + if (op1 == tcg_gen_add_tl) { + tcg_gen_andc_tl(temp2, temp2, temp3); + } else { + tcg_gen_and_tl(temp2, temp2, temp3); + } + + (*op2)(temp3, r1_high, r3); + /* calc V1 bit */ + tcg_gen_xor_tl(cpu_PSW_V, temp3, r1_high); + tcg_gen_xor_tl(temp4, r1_high, r3); + if (op2 == tcg_gen_add_tl) { + tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, temp4); + } else { + tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp4); + } + /* combine V0/V1 bits */ + tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp2); + /* calc sv bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* write result */ + tcg_gen_mov_tl(ret_low, temp); + tcg_gen_mov_tl(ret_high, temp3); + /* calc AV bit */ + tcg_gen_add_tl(temp, ret_low, ret_low); + tcg_gen_xor_tl(temp, temp, ret_low); + tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, ret_high); + tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); + tcg_temp_free(temp4); +} + +/* ret = r2 + (r1 * r3); */ +static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + + tcg_gen_ext_i32_i64(t1, r1); + tcg_gen_ext_i32_i64(t2, r2); + tcg_gen_ext_i32_i64(t3, r3); + + tcg_gen_mul_i64(t1, t1, t3); + tcg_gen_add_i64(t1, t2, t1); + + tcg_gen_extrl_i64_i32(ret, t1); + /* calc V + t1 > 0x7fffffff */ + tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL); + /* t1 < -0x80000000 */ + tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL); + tcg_gen_or_i64(t2, t2, t3); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t2); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); +} + +static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_madd32_d(ret, r1, r2, temp); + tcg_temp_free(temp); +} + +static inline void +gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + TCGv t3 = tcg_temp_new(); + TCGv t4 = tcg_temp_new(); + + tcg_gen_muls2_tl(t1, t2, r1, r3); + /* only the add can overflow */ + tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2); + /* calc V bit */ + tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high); + tcg_gen_xor_tl(t1, r2_high, t2); + tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, t4, t4); + tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back the result */ + tcg_gen_mov_tl(ret_low, t3); + tcg_gen_mov_tl(ret_high, t4); + + tcg_temp_free(t1); + tcg_temp_free(t2); + tcg_temp_free(t3); + tcg_temp_free(t4); +} + +static inline void +gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + + tcg_gen_extu_i32_i64(t1, r1); + tcg_gen_concat_i32_i64(t2, r2_low, r2_high); + tcg_gen_extu_i32_i64(t3, r3); + + tcg_gen_mul_i64(t1, t1, t3); + tcg_gen_add_i64(t2, t2, t1); + /* write back result */ + tcg_gen_extr_i64_i32(ret_low, ret_high, t2); + /* only the add overflows, if t2 < t1 + calc V bit */ + tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t2); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); +} + +static inline void +gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(temp); +} + +static inline void +gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(temp); +} + +static inline void +gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(temp, temp2, temp64); + gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, + tcg_gen_add_tl, tcg_gen_add_tl); + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(temp, temp2, temp64); + gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, + tcg_gen_sub_tl, tcg_gen_add_tl); + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + TCGv_i64 temp64_2 = tcg_temp_new_i64(); + TCGv_i64 temp64_3 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high); + tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */ + tcg_gen_ext32s_i64(temp64, temp64); /* low */ + tcg_gen_sub_i64(temp64, temp64_2, temp64); + tcg_gen_shli_i64(temp64, temp64, 16); + + gen_add64_d(temp64_2, temp64_3, temp64); + /* write back result */ + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); + tcg_temp_free_i64(temp64_2); + tcg_temp_free_i64(temp64_3); +} + +static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2); + +static inline void +gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv temp3 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(temp, temp2, temp64); + gen_adds(ret_low, r1_low, temp); + tcg_gen_mov_tl(temp, cpu_PSW_V); + tcg_gen_mov_tl(temp3, cpu_PSW_AV); + gen_adds(ret_high, r1_high, temp2); + /* combine v bits */ + tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); + /* combine av bits */ + tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); + tcg_temp_free_i64(temp64); + +} + +static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2); + +static inline void +gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv temp3 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(temp, temp2, temp64); + gen_subs(ret_low, r1_low, temp); + tcg_gen_mov_tl(temp, cpu_PSW_V); + tcg_gen_mov_tl(temp3, cpu_PSW_AV); + gen_adds(ret_high, r1_high, temp2); + /* combine v bits */ + tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); + /* combine av bits */ + tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); + tcg_temp_free_i64(temp64); + +} + +static inline void +gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + TCGv_i64 temp64_2 = tcg_temp_new_i64(); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */ + tcg_gen_ext32s_i64(temp64, temp64); /* low */ + tcg_gen_sub_i64(temp64, temp64_2, temp64); + tcg_gen_shli_i64(temp64, temp64, 16); + tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); + + gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64); + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); + tcg_temp_free_i64(temp64_2); +} + + +static inline void +gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + TCGv_i64 temp64_2 = tcg_temp_new_i64(); + TCGv_i64 temp64_3 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); + gen_add64_d(temp64_3, temp64_2, temp64); + /* write back result */ + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); + tcg_temp_free_i64(temp64_2); + tcg_temp_free_i64(temp64_3); +} + +static inline void +gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + TCGv_i64 temp64_2 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); + gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64); + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); + tcg_temp_free_i64(temp64_2); +} + +static inline void +gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, + uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + + tcg_gen_andi_tl(temp2, r1, 0xffff0000); + tcg_gen_shli_tl(temp, r1, 16); + gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_andi_tl(temp2, r1, 0xffff0000); + tcg_gen_shli_tl(temp, r1, 16); + gen_helper_addsur_h(ret, cpu_env, temp64, temp, temp2); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free_i64(temp64); +} + + +static inline void +gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, + uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + + tcg_gen_andi_tl(temp2, r1, 0xffff0000); + tcg_gen_shli_tl(temp, r1, 16); + gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_andi_tl(temp2, r1, 0xffff0000); + tcg_gen_shli_tl(temp, r1, 16); + gen_helper_addsur_h_ssov(ret, cpu_env, temp64, temp, temp2); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +{ + TCGv temp = tcg_const_i32(n); + gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, temp); + tcg_temp_free(temp); +} + +static inline void +gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +{ + TCGv temp = tcg_const_i32(n); + gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, temp); + tcg_temp_free(temp); +} + +static inline void +gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, + uint32_t up_shift, CPUTriCoreState *env) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv temp3 = tcg_temp_new(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + + tcg_gen_ext_i32_i64(t2, arg2); + tcg_gen_ext_i32_i64(t3, arg3); + + tcg_gen_mul_i64(t2, t2, t3); + tcg_gen_shli_i64(t2, t2, n); + + tcg_gen_ext_i32_i64(t1, arg1); + tcg_gen_sari_i64(t2, t2, up_shift); + + tcg_gen_add_i64(t3, t1, t2); + tcg_gen_extrl_i64_i32(temp3, t3); + /* calc v bit */ + tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL); + tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL); + tcg_gen_or_i64(t1, t1, t2); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t1); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* We produce an overflow on the host if the mul before was + (0x80000000 * 0x80000000) << 1). If this is the + case, we negate the ovf. */ + if (n == 1) { + tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000); + tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3); + tcg_gen_and_tl(temp, temp, temp2); + tcg_gen_shli_tl(temp, temp, 31); + /* negate v bit, if special condition */ + tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); + } + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3); + tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(ret, temp3); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); +} + +static inline void +gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + if (n == 0) { + tcg_gen_mul_tl(temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_shli_tl(temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(temp, temp, temp2); + } + gen_add_d(ret, arg1, temp); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + if (n == 0) { + tcg_gen_mul_tl(temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_shli_tl(temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(temp, temp, temp2); + } + gen_adds(ret, arg1, temp); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + + if (n == 0) { + tcg_gen_mul_tl(temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_shli_tl(temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(temp, temp, temp2); + } + tcg_gen_ext_i32_i64(t2, temp); + tcg_gen_shli_i64(t2, t2, 16); + tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high); + gen_add64_d(t3, t1, t2); + /* write back result */ + tcg_gen_extr_i64_i32(rl, rh, t3); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + + if (n == 0) { + tcg_gen_mul_tl(temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_shli_tl(temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(temp, temp, temp2); + } + tcg_gen_ext_i32_i64(t2, temp); + tcg_gen_shli_i64(t2, t2, 16); + tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high); + + gen_helper_add64_ssov(t1, cpu_env, t1, t2); + tcg_gen_extr_i64_i32(rl, rh, t1); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); +} + +static inline void +gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n, CPUTriCoreState *env) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t4 = tcg_temp_new_i64(); + TCGv temp, temp2; + + tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high); + tcg_gen_ext_i32_i64(t2, arg2); + tcg_gen_ext_i32_i64(t3, arg3); + + tcg_gen_mul_i64(t2, t2, t3); + if (n != 0) { + tcg_gen_shli_i64(t2, t2, 1); + } + tcg_gen_add_i64(t4, t1, t2); + /* calc v bit */ + tcg_gen_xor_i64(t3, t4, t1); + tcg_gen_xor_i64(t2, t1, t2); + tcg_gen_andc_i64(t3, t3, t2); + tcg_gen_extrh_i64_i32(cpu_PSW_V, t3); + /* We produce an overflow on the host if the mul before was + (0x80000000 * 0x80000000) << 1). If this is the + case, we negate the ovf. */ + if (n == 1) { + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000); + tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3); + tcg_gen_and_tl(temp, temp, temp2); + tcg_gen_shli_tl(temp, temp, 31); + /* negate v bit, if special condition */ + tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + } + /* write back result */ + tcg_gen_extr_i64_i32(rl, rh, t4); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, rh, rh); + tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); + tcg_temp_free_i64(t4); +} + +static inline void +gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, + uint32_t up_shift) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + + tcg_gen_ext_i32_i64(t1, arg1); + tcg_gen_ext_i32_i64(t2, arg2); + tcg_gen_ext_i32_i64(t3, arg3); + + tcg_gen_mul_i64(t2, t2, t3); + tcg_gen_sari_i64(t2, t2, up_shift - n); + + gen_helper_madd32_q_add_ssov(ret, cpu_env, t1, t2); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); +} + +static inline void +gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGv_i64 r1 = tcg_temp_new_i64(); + TCGv temp = tcg_const_i32(n); + + tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high); + gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp); + tcg_gen_extr_i64_i32(rl, rh, r1); + + tcg_temp_free_i64(r1); + tcg_temp_free(temp); +} +/* ret = r2 - (r1 * r3); */ +static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + + tcg_gen_ext_i32_i64(t1, r1); + tcg_gen_ext_i32_i64(t2, r2); + tcg_gen_ext_i32_i64(t3, r3); + + tcg_gen_mul_i64(t1, t1, t3); + tcg_gen_sub_i64(t1, t2, t1); + + tcg_gen_extrl_i64_i32(ret, t1); + /* calc V + t2 > 0x7fffffff */ + tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL); + /* result < -0x80000000 */ + tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL); + tcg_gen_or_i64(t2, t2, t3); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t2); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); +} + +static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_msub32_d(ret, r1, r2, temp); + tcg_temp_free(temp); +} + +static inline void +gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + TCGv t3 = tcg_temp_new(); + TCGv t4 = tcg_temp_new(); + + tcg_gen_muls2_tl(t1, t2, r1, r3); + /* only the sub can overflow */ + tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2); + /* calc V bit */ + tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high); + tcg_gen_xor_tl(t1, r2_high, t2); + tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, t4, t4); + tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back the result */ + tcg_gen_mov_tl(ret_low, t3); + tcg_gen_mov_tl(ret_high, t4); + + tcg_temp_free(t1); + tcg_temp_free(t2); + tcg_temp_free(t3); + tcg_temp_free(t4); +} + +static inline void +gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(temp); +} + +static inline void +gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + + tcg_gen_extu_i32_i64(t1, r1); + tcg_gen_concat_i32_i64(t2, r2_low, r2_high); + tcg_gen_extu_i32_i64(t3, r3); + + tcg_gen_mul_i64(t1, t1, t3); + tcg_gen_sub_i64(t3, t2, t1); + tcg_gen_extr_i64_i32(ret_low, ret_high, t3); + /* calc V bit, only the sub can overflow, if t1 > t2 */ + tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t1); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); +} + +static inline void +gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(temp); +} + +static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2) +{ + TCGv temp = tcg_const_i32(r2); + gen_add_d(ret, r1, temp); + tcg_temp_free(temp); +} +/* calculate the carry bit too */ +static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv t0 = tcg_temp_new_i32(); + TCGv result = tcg_temp_new_i32(); + + tcg_gen_movi_tl(t0, 0); + /* Addition and set C/V/SV bits */ + tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0); + /* calc V bit */ + tcg_gen_xor_tl(cpu_PSW_V, result, r1); + tcg_gen_xor_tl(t0, r1, r2); + tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, result, result); + tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(ret, result); + + tcg_temp_free(result); + tcg_temp_free(t0); +} + +static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_add_CC(ret, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv carry = tcg_temp_new_i32(); + TCGv t0 = tcg_temp_new_i32(); + TCGv result = tcg_temp_new_i32(); + + tcg_gen_movi_tl(t0, 0); + tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0); + /* Addition, carry and set C/V/SV bits */ + tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0); + tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0); + /* calc V bit */ + tcg_gen_xor_tl(cpu_PSW_V, result, r1); + tcg_gen_xor_tl(t0, r1, r2); + tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, result, result); + tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(ret, result); + + tcg_temp_free(result); + tcg_temp_free(t0); + tcg_temp_free(carry); +} + +static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_addc_CC(ret, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, + TCGv r4) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv result = tcg_temp_new(); + TCGv mask = tcg_temp_new(); + TCGv t0 = tcg_const_i32(0); + + /* create mask for sticky bits */ + tcg_gen_setcond_tl(cond, mask, r4, t0); + tcg_gen_shli_tl(mask, mask, 31); + + tcg_gen_add_tl(result, r1, r2); + /* Calc PSW_V */ + tcg_gen_xor_tl(temp, result, r1); + tcg_gen_xor_tl(temp2, r1, r2); + tcg_gen_andc_tl(temp, temp, temp2); + tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V); + /* Set PSW_SV */ + tcg_gen_and_tl(temp, temp, mask); + tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV); + /* calc AV bit */ + tcg_gen_add_tl(temp, result, result); + tcg_gen_xor_tl(temp, temp, result); + tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_and_tl(temp, temp, mask); + tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV); + /* write back result */ + tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1); + + tcg_temp_free(t0); + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(result); + tcg_temp_free(mask); +} + +static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2, + TCGv r3, TCGv r4) +{ + TCGv temp = tcg_const_i32(r2); + gen_cond_add(cond, r1, temp, r3, r4); + tcg_temp_free(temp); +} + +static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv temp = tcg_temp_new_i32(); + TCGv result = tcg_temp_new_i32(); + + tcg_gen_sub_tl(result, r1, r2); + /* calc V bit */ + tcg_gen_xor_tl(cpu_PSW_V, result, r1); + tcg_gen_xor_tl(temp, r1, r2); + tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(cpu_PSW_AV, result, result); + tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(ret, result); + + tcg_temp_free(temp); + tcg_temp_free(result); +} + +static inline void +gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) +{ + TCGv temp = tcg_temp_new(); + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 result = tcg_temp_new_i64(); + + tcg_gen_sub_i64(result, r1, r2); + /* calc v bit */ + tcg_gen_xor_i64(t1, result, r1); + tcg_gen_xor_i64(t0, r1, r2); + tcg_gen_and_i64(t1, t1, t0); + tcg_gen_extrh_i64_i32(cpu_PSW_V, t1); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* calc AV/SAV bits */ + tcg_gen_extrh_i64_i32(temp, result); + tcg_gen_add_tl(cpu_PSW_AV, temp, temp); + tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_i64(ret, result); + + tcg_temp_free(temp); + tcg_temp_free_i64(result); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); +} + +static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv result = tcg_temp_new(); + TCGv temp = tcg_temp_new(); + + tcg_gen_sub_tl(result, r1, r2); + /* calc C bit */ + tcg_gen_setcond_tl(TCG_COND_GEU, cpu_PSW_C, r1, r2); + /* calc V bit */ + tcg_gen_xor_tl(cpu_PSW_V, result, r1); + tcg_gen_xor_tl(temp, r1, r2); + tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(cpu_PSW_AV, result, result); + tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(ret, result); + + tcg_temp_free(result); + tcg_temp_free(temp); +} + +static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv temp = tcg_temp_new(); + tcg_gen_not_tl(temp, r2); + gen_addc_CC(ret, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, + TCGv r4) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv result = tcg_temp_new(); + TCGv mask = tcg_temp_new(); + TCGv t0 = tcg_const_i32(0); + + /* create mask for sticky bits */ + tcg_gen_setcond_tl(cond, mask, r4, t0); + tcg_gen_shli_tl(mask, mask, 31); + + tcg_gen_sub_tl(result, r1, r2); + /* Calc PSW_V */ + tcg_gen_xor_tl(temp, result, r1); + tcg_gen_xor_tl(temp2, r1, r2); + tcg_gen_and_tl(temp, temp, temp2); + tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V); + /* Set PSW_SV */ + tcg_gen_and_tl(temp, temp, mask); + tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV); + /* calc AV bit */ + tcg_gen_add_tl(temp, result, result); + tcg_gen_xor_tl(temp, temp, result); + tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_and_tl(temp, temp, mask); + tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV); + /* write back result */ + tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1); + + tcg_temp_free(t0); + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(result); + tcg_temp_free(mask); +} + +static inline void +gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(temp, temp2, temp64); + gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, + tcg_gen_sub_tl, tcg_gen_sub_tl); + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv temp3 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(temp, temp2, temp64); + gen_subs(ret_low, r1_low, temp); + tcg_gen_mov_tl(temp, cpu_PSW_V); + tcg_gen_mov_tl(temp3, cpu_PSW_AV); + gen_subs(ret_high, r1_high, temp2); + /* combine v bits */ + tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); + /* combine av bits */ + tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + TCGv_i64 temp64_2 = tcg_temp_new_i64(); + TCGv_i64 temp64_3 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); + gen_sub64_d(temp64_3, temp64_2, temp64); + /* write back result */ + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); + tcg_temp_free_i64(temp64_2); + tcg_temp_free_i64(temp64_3); +} + +static inline void +gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + TCGv_i64 temp64_2 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); + gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64); + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); + tcg_temp_free_i64(temp64_2); +} + +static inline void +gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, + uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + + tcg_gen_andi_tl(temp2, r1, 0xffff0000); + tcg_gen_shli_tl(temp, r1, 16); + gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, + uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + + tcg_gen_andi_tl(temp2, r1, 0xffff0000); + tcg_gen_shli_tl(temp, r1, 16); + gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +{ + TCGv temp = tcg_const_i32(n); + gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp); + tcg_temp_free(temp); +} + +static inline void +gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +{ + TCGv temp = tcg_const_i32(n); + gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp); + tcg_temp_free(temp); +} + +static inline void +gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, + uint32_t up_shift, CPUTriCoreState *env) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv temp3 = tcg_temp_new(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t4 = tcg_temp_new_i64(); + + tcg_gen_ext_i32_i64(t2, arg2); + tcg_gen_ext_i32_i64(t3, arg3); + + tcg_gen_mul_i64(t2, t2, t3); + + tcg_gen_ext_i32_i64(t1, arg1); + /* if we shift part of the fraction out, we need to round up */ + tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1); + tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0); + tcg_gen_sari_i64(t2, t2, up_shift - n); + tcg_gen_add_i64(t2, t2, t4); + + tcg_gen_sub_i64(t3, t1, t2); + tcg_gen_extrl_i64_i32(temp3, t3); + /* calc v bit */ + tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL); + tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL); + tcg_gen_or_i64(t1, t1, t2); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t1); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3); + tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(ret, temp3); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); + tcg_temp_free_i64(t4); +} + +static inline void +gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + if (n == 0) { + tcg_gen_mul_tl(temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_shli_tl(temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(temp, temp, temp2); + } + gen_sub_d(ret, arg1, temp); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + if (n == 0) { + tcg_gen_mul_tl(temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_shli_tl(temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(temp, temp, temp2); + } + gen_subs(ret, arg1, temp); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + + if (n == 0) { + tcg_gen_mul_tl(temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_shli_tl(temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(temp, temp, temp2); + } + tcg_gen_ext_i32_i64(t2, temp); + tcg_gen_shli_i64(t2, t2, 16); + tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high); + gen_sub64_d(t3, t1, t2); + /* write back result */ + tcg_gen_extr_i64_i32(rl, rh, t3); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + + if (n == 0) { + tcg_gen_mul_tl(temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_shli_tl(temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(temp, temp, temp2); + } + tcg_gen_ext_i32_i64(t2, temp); + tcg_gen_shli_i64(t2, t2, 16); + tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high); + + gen_helper_sub64_ssov(t1, cpu_env, t1, t2); + tcg_gen_extr_i64_i32(rl, rh, t1); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); +} + +static inline void +gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n, CPUTriCoreState *env) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t4 = tcg_temp_new_i64(); + TCGv temp, temp2; + + tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high); + tcg_gen_ext_i32_i64(t2, arg2); + tcg_gen_ext_i32_i64(t3, arg3); + + tcg_gen_mul_i64(t2, t2, t3); + if (n != 0) { + tcg_gen_shli_i64(t2, t2, 1); + } + tcg_gen_sub_i64(t4, t1, t2); + /* calc v bit */ + tcg_gen_xor_i64(t3, t4, t1); + tcg_gen_xor_i64(t2, t1, t2); + tcg_gen_and_i64(t3, t3, t2); + tcg_gen_extrh_i64_i32(cpu_PSW_V, t3); + /* We produce an overflow on the host if the mul before was + (0x80000000 * 0x80000000) << 1). If this is the + case, we negate the ovf. */ + if (n == 1) { + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000); + tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3); + tcg_gen_and_tl(temp, temp, temp2); + tcg_gen_shli_tl(temp, temp, 31); + /* negate v bit, if special condition */ + tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + } + /* write back result */ + tcg_gen_extr_i64_i32(rl, rh, t4); + /* Calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(cpu_PSW_AV, rh, rh); + tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); + tcg_temp_free_i64(t4); +} + +static inline void +gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, + uint32_t up_shift) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + TCGv_i64 t4 = tcg_temp_new_i64(); + + tcg_gen_ext_i32_i64(t1, arg1); + tcg_gen_ext_i32_i64(t2, arg2); + tcg_gen_ext_i32_i64(t3, arg3); + + tcg_gen_mul_i64(t2, t2, t3); + /* if we shift part of the fraction out, we need to round up */ + tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1); + tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0); + tcg_gen_sari_i64(t3, t2, up_shift - n); + tcg_gen_add_i64(t3, t3, t4); + + gen_helper_msub32_q_sub_ssov(ret, cpu_env, t1, t3); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); + tcg_temp_free_i64(t4); +} + +static inline void +gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGv_i64 r1 = tcg_temp_new_i64(); + TCGv temp = tcg_const_i32(n); + + tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high); + gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp); + tcg_gen_extr_i64_i32(rl, rh, r1); + + tcg_temp_free_i64(r1); + tcg_temp_free(temp); +} + +static inline void +gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(temp, temp2, temp64); + gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, + tcg_gen_add_tl, tcg_gen_sub_tl); + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + TCGv_i64 temp64_2 = tcg_temp_new_i64(); + TCGv_i64 temp64_3 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high); + tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */ + tcg_gen_ext32s_i64(temp64, temp64); /* low */ + tcg_gen_sub_i64(temp64, temp64_2, temp64); + tcg_gen_shli_i64(temp64, temp64, 16); + + gen_sub64_d(temp64_2, temp64_3, temp64); + /* write back result */ + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); + tcg_temp_free_i64(temp64_2); + tcg_temp_free_i64(temp64_3); +} + +static inline void +gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_andi_tl(temp2, r1, 0xffff0000); + tcg_gen_shli_tl(temp, r1, 16); + gen_helper_subadr_h(ret, cpu_env, temp64, temp, temp2); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv temp3 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(temp, temp2, temp64); + gen_adds(ret_low, r1_low, temp); + tcg_gen_mov_tl(temp, cpu_PSW_V); + tcg_gen_mov_tl(temp3, cpu_PSW_AV); + gen_subs(ret_high, r1_high, temp2); + /* combine v bits */ + tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); + /* combine av bits */ + tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv_i64 temp64 = tcg_temp_new_i64(); + TCGv_i64 temp64_2 = tcg_temp_new_i64(); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */ + tcg_gen_ext32s_i64(temp64, temp64); /* low */ + tcg_gen_sub_i64(temp64, temp64_2, temp64); + tcg_gen_shli_i64(temp64, temp64, 16); + tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); + + gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64); + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); + + tcg_temp_free(temp); + tcg_temp_free_i64(temp64); + tcg_temp_free_i64(temp64_2); +} + +static inline void +gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGv temp = tcg_const_i32(n); + TCGv temp2 = tcg_temp_new(); + TCGv_i64 temp64 = tcg_temp_new_i64(); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_andi_tl(temp2, r1, 0xffff0000); + tcg_gen_shli_tl(temp, r1, 16); + gen_helper_subadr_h_ssov(ret, cpu_env, temp64, temp, temp2); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free_i64(temp64); +} + +static inline void gen_abs(TCGv ret, TCGv r1) +{ + TCGv temp = tcg_temp_new(); + TCGv t0 = tcg_const_i32(0); + + tcg_gen_neg_tl(temp, r1); + tcg_gen_movcond_tl(TCG_COND_GE, ret, r1, t0, r1, temp); + /* overflow can only happen, if r1 = 0x80000000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free(temp); + tcg_temp_free(t0); +} + +static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv temp = tcg_temp_new_i32(); + TCGv result = tcg_temp_new_i32(); + + tcg_gen_sub_tl(result, r1, r2); + tcg_gen_sub_tl(temp, r2, r1); + tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp); + + /* calc V bit */ + tcg_gen_xor_tl(cpu_PSW_V, result, r1); + tcg_gen_xor_tl(temp, result, r2); + tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp); + tcg_gen_xor_tl(temp, r1, r2); + tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(cpu_PSW_AV, result, result); + tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(ret, result); + + tcg_temp_free(temp); + tcg_temp_free(result); +} + +static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_absdif(ret, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_helper_absdif_ssov(ret, cpu_env, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2) +{ + TCGv high = tcg_temp_new(); + TCGv low = tcg_temp_new(); + + tcg_gen_muls2_tl(low, high, r1, r2); + tcg_gen_mov_tl(ret, low); + /* calc V bit */ + tcg_gen_sari_tl(low, low, 31); + tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free(high); + tcg_temp_free(low); +} + +static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_mul_i32s(ret, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) +{ + tcg_gen_muls2_tl(ret_low, ret_high, r1, r2); + /* clear V bit */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); +} + +static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, + int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_mul_i64s(ret_low, ret_high, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) +{ + tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2); + /* clear V bit */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); +} + +static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, + int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_mul_i64u(ret_low, ret_high, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_helper_mul_ssov(ret, cpu_env, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_helper_mul_suov(ret, cpu_env, r1, temp); + tcg_temp_free(temp); +} +/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */ +static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp); + tcg_temp_free(temp); +} + +static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp); + tcg_temp_free(temp); +} + +static void +gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift) +{ + TCGv temp = tcg_temp_new(); + TCGv_i64 temp_64 = tcg_temp_new_i64(); + TCGv_i64 temp2_64 = tcg_temp_new_i64(); + + if (n == 0) { + if (up_shift == 32) { + tcg_gen_muls2_tl(rh, rl, arg1, arg2); + } else if (up_shift == 16) { + tcg_gen_ext_i32_i64(temp_64, arg1); + tcg_gen_ext_i32_i64(temp2_64, arg2); + + tcg_gen_mul_i64(temp_64, temp_64, temp2_64); + tcg_gen_shri_i64(temp_64, temp_64, up_shift); + tcg_gen_extr_i64_i32(rl, rh, temp_64); + } else { + tcg_gen_muls2_tl(rl, rh, arg1, arg2); + } + /* reset v bit */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + } else { /* n is expected to be 1 */ + tcg_gen_ext_i32_i64(temp_64, arg1); + tcg_gen_ext_i32_i64(temp2_64, arg2); + + tcg_gen_mul_i64(temp_64, temp_64, temp2_64); + + if (up_shift == 0) { + tcg_gen_shli_i64(temp_64, temp_64, 1); + } else { + tcg_gen_shri_i64(temp_64, temp_64, up_shift - 1); + } + tcg_gen_extr_i64_i32(rl, rh, temp_64); + /* overflow only occurs if r1 = r2 = 0x8000 */ + if (up_shift == 0) {/* result is 64 bit */ + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rh, + 0x80000000); + } else { /* result is 32 bit */ + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rl, + 0x80000000); + } + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* calc sv overflow bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + } + /* calc av overflow bit */ + if (up_shift == 0) { + tcg_gen_add_tl(cpu_PSW_AV, rh, rh); + tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV); + } else { + tcg_gen_add_tl(cpu_PSW_AV, rl, rl); + tcg_gen_xor_tl(cpu_PSW_AV, rl, cpu_PSW_AV); + } + /* calc sav overflow bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_temp_free(temp); + tcg_temp_free_i64(temp_64); + tcg_temp_free_i64(temp2_64); +} + +static void +gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) +{ + TCGv temp = tcg_temp_new(); + if (n == 0) { + tcg_gen_mul_tl(ret, arg1, arg2); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(ret, arg1, arg2); + tcg_gen_shli_tl(ret, ret, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80000000); + tcg_gen_sub_tl(ret, ret, temp); + } + /* reset v bit */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + /* calc av overflow bit */ + tcg_gen_add_tl(cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + /* calc sav overflow bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free(temp); +} + +static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) +{ + TCGv temp = tcg_temp_new(); + if (n == 0) { + tcg_gen_mul_tl(ret, arg1, arg2); + tcg_gen_addi_tl(ret, ret, 0x8000); + } else { + tcg_gen_mul_tl(ret, arg1, arg2); + tcg_gen_shli_tl(ret, ret, 1); + tcg_gen_addi_tl(ret, ret, 0x8000); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80008000); + tcg_gen_muli_tl(temp, temp, 0x8001); + tcg_gen_sub_tl(ret, ret, temp); + } + /* reset v bit */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + /* calc av overflow bit */ + tcg_gen_add_tl(cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + /* calc sav overflow bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* cut halfword off */ + tcg_gen_andi_tl(ret, ret, 0xffff0000); + + tcg_temp_free(temp); +} + +static inline void +gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGv_i64 temp64 = tcg_temp_new_i64(); + tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); + gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, r3); + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(temp); +} + +static inline void +gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGv_i64 temp64 = tcg_temp_new_i64(); + tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); + gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, r3); + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(temp); +} + +static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp); + tcg_temp_free(temp); +} + +static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp); + tcg_temp_free(temp); +} + +static inline void +gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGv_i64 temp64 = tcg_temp_new_i64(); + tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); + gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, r3); + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(temp); +} + +static inline void +gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGv_i64 temp64 = tcg_temp_new_i64(); + tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); + gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, r3); + tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); + tcg_temp_free_i64(temp64); +} + +static inline void +gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(temp); +} + +static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low) +{ + TCGv sat_neg = tcg_const_i32(low); + TCGv temp = tcg_const_i32(up); + + /* sat_neg = (arg < low ) ? low : arg; */ + tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg); + + /* ret = (sat_neg > up ) ? up : sat_neg; */ + tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg); + + tcg_temp_free(sat_neg); + tcg_temp_free(temp); +} + +static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up) +{ + TCGv temp = tcg_const_i32(up); + /* sat_neg = (arg > up ) ? up : arg; */ + tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg); + tcg_temp_free(temp); +} + +static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count) +{ + if (shift_count == -32) { + tcg_gen_movi_tl(ret, 0); + } else if (shift_count >= 0) { + tcg_gen_shli_tl(ret, r1, shift_count); + } else { + tcg_gen_shri_tl(ret, r1, -shift_count); + } +} + +static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount) +{ + TCGv temp_low, temp_high; + + if (shiftcount == -16) { + tcg_gen_movi_tl(ret, 0); + } else { + temp_high = tcg_temp_new(); + temp_low = tcg_temp_new(); + + tcg_gen_andi_tl(temp_low, r1, 0xffff); + tcg_gen_andi_tl(temp_high, r1, 0xffff0000); + gen_shi(temp_low, temp_low, shiftcount); + gen_shi(ret, temp_high, shiftcount); + tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16); + + tcg_temp_free(temp_low); + tcg_temp_free(temp_high); + } +} + +static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) +{ + uint32_t msk, msk_start; + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + TCGv t_0 = tcg_const_i32(0); + + if (shift_count == 0) { + /* Clear PSW.C and PSW.V */ + tcg_gen_movi_tl(cpu_PSW_C, 0); + tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C); + tcg_gen_mov_tl(ret, r1); + } else if (shift_count == -32) { + /* set PSW.C */ + tcg_gen_mov_tl(cpu_PSW_C, r1); + /* fill ret completly with sign bit */ + tcg_gen_sari_tl(ret, r1, 31); + /* clear PSW.V */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + } else if (shift_count > 0) { + TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count); + TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count); + + /* calc carry */ + msk_start = 32 - shift_count; + msk = ((1 << shift_count) - 1) << msk_start; + tcg_gen_andi_tl(cpu_PSW_C, r1, msk); + /* calc v/sv bits */ + tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max); + tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min); + tcg_gen_or_tl(cpu_PSW_V, temp, temp2); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* calc sv */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV); + /* do shift */ + tcg_gen_shli_tl(ret, r1, shift_count); + + tcg_temp_free(t_max); + tcg_temp_free(t_min); + } else { + /* clear PSW.V */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + /* calc carry */ + msk = (1 << -shift_count) - 1; + tcg_gen_andi_tl(cpu_PSW_C, r1, msk); + /* do shift */ + tcg_gen_sari_tl(ret, r1, -shift_count); + } + /* calc av overflow bit */ + tcg_gen_add_tl(cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + /* calc sav overflow bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(t_0); +} + +static void gen_shas(TCGv ret, TCGv r1, TCGv r2) +{ + gen_helper_sha_ssov(ret, cpu_env, r1, r2); +} + +static void gen_shasi(TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_shas(ret, r1, temp); + tcg_temp_free(temp); +} + +static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count) +{ + TCGv low, high; + + if (shift_count == 0) { + tcg_gen_mov_tl(ret, r1); + } else if (shift_count > 0) { + low = tcg_temp_new(); + high = tcg_temp_new(); + + tcg_gen_andi_tl(high, r1, 0xffff0000); + tcg_gen_shli_tl(low, r1, shift_count); + tcg_gen_shli_tl(ret, high, shift_count); + tcg_gen_deposit_tl(ret, ret, low, 0, 16); + + tcg_temp_free(low); + tcg_temp_free(high); + } else { + low = tcg_temp_new(); + high = tcg_temp_new(); + + tcg_gen_ext16s_tl(low, r1); + tcg_gen_sari_tl(low, low, -shift_count); + tcg_gen_sari_tl(ret, r1, -shift_count); + tcg_gen_deposit_tl(ret, ret, low, 0, 16); + + tcg_temp_free(low); + tcg_temp_free(high); + } + +} + +/* ret = {ret[30:0], (r1 cond r2)}; */ +static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + + tcg_gen_shli_tl(temp, ret, 1); + tcg_gen_setcond_tl(cond, temp2, r1, r2); + tcg_gen_or_tl(ret, temp, temp2); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_sh_cond(cond, ret, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2) +{ + gen_helper_add_ssov(ret, cpu_env, r1, r2); +} + +static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_helper_add_ssov(ret, cpu_env, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con) +{ + TCGv temp = tcg_const_i32(con); + gen_helper_add_suov(ret, cpu_env, r1, temp); + tcg_temp_free(temp); +} + +static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2) +{ + gen_helper_sub_ssov(ret, cpu_env, r1, r2); +} + +static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2) +{ + gen_helper_sub_suov(ret, cpu_env, r1, r2); +} + +static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2, + int pos1, int pos2, + void(*op1)(TCGv, TCGv, TCGv), + void(*op2)(TCGv, TCGv, TCGv)) +{ + TCGv temp1, temp2; + + temp1 = tcg_temp_new(); + temp2 = tcg_temp_new(); + + tcg_gen_shri_tl(temp2, r2, pos2); + tcg_gen_shri_tl(temp1, r1, pos1); + + (*op1)(temp1, temp1, temp2); + (*op2)(temp1 , ret, temp1); + + tcg_gen_deposit_tl(ret, ret, temp1, 0, 1); + + tcg_temp_free(temp1); + tcg_temp_free(temp2); +} + +/* ret = r1[pos1] op1 r2[pos2]; */ +static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2, + int pos1, int pos2, + void(*op1)(TCGv, TCGv, TCGv)) +{ + TCGv temp1, temp2; + + temp1 = tcg_temp_new(); + temp2 = tcg_temp_new(); + + tcg_gen_shri_tl(temp2, r2, pos2); + tcg_gen_shri_tl(temp1, r1, pos1); + + (*op1)(ret, temp1, temp2); + + tcg_gen_andi_tl(ret, ret, 0x1); + + tcg_temp_free(temp1); + tcg_temp_free(temp2); +} + +static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2, + void(*op)(TCGv, TCGv, TCGv)) +{ + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + /* temp = (arg1 cond arg2 )*/ + tcg_gen_setcond_tl(cond, temp, r1, r2); + /* temp2 = ret[0]*/ + tcg_gen_andi_tl(temp2, ret, 0x1); + /* temp = temp insn temp2 */ + (*op)(temp, temp, temp2); + /* ret = {ret[31:1], temp} */ + tcg_gen_deposit_tl(ret, ret, temp, 0, 1); + + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void +gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con, + void(*op)(TCGv, TCGv, TCGv)) +{ + TCGv temp = tcg_const_i32(con); + gen_accumulating_cond(cond, ret, r1, temp, op); + tcg_temp_free(temp); +} + +/* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/ +static inline void gen_cond_w(TCGCond cond, TCGv ret, TCGv r1, TCGv r2) +{ + tcg_gen_setcond_tl(cond, ret, r1, r2); + tcg_gen_neg_tl(ret, ret); +} + +static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con) +{ + TCGv b0 = tcg_temp_new(); + TCGv b1 = tcg_temp_new(); + TCGv b2 = tcg_temp_new(); + TCGv b3 = tcg_temp_new(); + + /* byte 0 */ + tcg_gen_andi_tl(b0, r1, 0xff); + tcg_gen_setcondi_tl(TCG_COND_EQ, b0, b0, con & 0xff); + + /* byte 1 */ + tcg_gen_andi_tl(b1, r1, 0xff00); + tcg_gen_setcondi_tl(TCG_COND_EQ, b1, b1, con & 0xff00); + + /* byte 2 */ + tcg_gen_andi_tl(b2, r1, 0xff0000); + tcg_gen_setcondi_tl(TCG_COND_EQ, b2, b2, con & 0xff0000); + + /* byte 3 */ + tcg_gen_andi_tl(b3, r1, 0xff000000); + tcg_gen_setcondi_tl(TCG_COND_EQ, b3, b3, con & 0xff000000); + + /* combine them */ + tcg_gen_or_tl(ret, b0, b1); + tcg_gen_or_tl(ret, ret, b2); + tcg_gen_or_tl(ret, ret, b3); + + tcg_temp_free(b0); + tcg_temp_free(b1); + tcg_temp_free(b2); + tcg_temp_free(b3); +} + +static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con) +{ + TCGv h0 = tcg_temp_new(); + TCGv h1 = tcg_temp_new(); + + /* halfword 0 */ + tcg_gen_andi_tl(h0, r1, 0xffff); + tcg_gen_setcondi_tl(TCG_COND_EQ, h0, h0, con & 0xffff); + + /* halfword 1 */ + tcg_gen_andi_tl(h1, r1, 0xffff0000); + tcg_gen_setcondi_tl(TCG_COND_EQ, h1, h1, con & 0xffff0000); + + /* combine them */ + tcg_gen_or_tl(ret, h0, h1); + + tcg_temp_free(h0); + tcg_temp_free(h1); +} +/* mask = ((1 << width) -1) << pos; + ret = (r1 & ~mask) | (r2 << pos) & mask); */ +static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos) +{ + TCGv mask = tcg_temp_new(); + TCGv temp = tcg_temp_new(); + TCGv temp2 = tcg_temp_new(); + + tcg_gen_movi_tl(mask, 1); + tcg_gen_shl_tl(mask, mask, width); + tcg_gen_subi_tl(mask, mask, 1); + tcg_gen_shl_tl(mask, mask, pos); + + tcg_gen_shl_tl(temp, r2, pos); + tcg_gen_and_tl(temp, temp, mask); + tcg_gen_andc_tl(temp2, r1, mask); + tcg_gen_or_tl(ret, temp, temp2); + + tcg_temp_free(mask); + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1) +{ + TCGv_i64 temp = tcg_temp_new_i64(); + + gen_helper_bsplit(temp, r1); + tcg_gen_extr_i64_i32(rl, rh, temp); + + tcg_temp_free_i64(temp); +} + +static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1) +{ + TCGv_i64 temp = tcg_temp_new_i64(); + + gen_helper_unpack(temp, r1); + tcg_gen_extr_i64_i32(rl, rh, temp); + + tcg_temp_free_i64(temp); +} + +static inline void +gen_dvinit_b(CPUTriCoreState *env, TCGv rl, TCGv rh, TCGv r1, TCGv r2) +{ + TCGv_i64 ret = tcg_temp_new_i64(); + + if (!tricore_feature(env, TRICORE_FEATURE_131)) { + gen_helper_dvinit_b_13(ret, cpu_env, r1, r2); + } else { + gen_helper_dvinit_b_131(ret, cpu_env, r1, r2); + } + tcg_gen_extr_i64_i32(rl, rh, ret); + + tcg_temp_free_i64(ret); +} + +static inline void +gen_dvinit_h(CPUTriCoreState *env, TCGv rl, TCGv rh, TCGv r1, TCGv r2) +{ + TCGv_i64 ret = tcg_temp_new_i64(); + + if (!tricore_feature(env, TRICORE_FEATURE_131)) { + gen_helper_dvinit_h_13(ret, cpu_env, r1, r2); + } else { + gen_helper_dvinit_h_131(ret, cpu_env, r1, r2); + } + tcg_gen_extr_i64_i32(rl, rh, ret); + + tcg_temp_free_i64(ret); +} + +static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high) +{ + TCGv temp = tcg_temp_new(); + /* calc AV bit */ + tcg_gen_add_tl(temp, arg_low, arg_low); + tcg_gen_xor_tl(temp, temp, arg_low); + tcg_gen_add_tl(cpu_PSW_AV, arg_high, arg_high); + tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, arg_high); + tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_temp_free(temp); +} + +static void gen_calc_usb_mulr_h(TCGv arg) +{ + TCGv temp = tcg_temp_new(); + /* calc AV bit */ + tcg_gen_add_tl(temp, arg, arg); + tcg_gen_xor_tl(temp, temp, arg); + tcg_gen_shli_tl(cpu_PSW_AV, temp, 16); + tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp); + /* calc SAV bit */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + /* clear V bit */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_temp_free(temp); +} + +/* helpers for generating program flow micro-ops */ + +static inline void gen_save_pc(target_ulong pc) +{ + tcg_gen_movi_tl(cpu_PC, pc); +} + +static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +{ + TranslationBlock *tb; + tb = ctx->tb; + if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) && + likely(!ctx->singlestep_enabled)) { + tcg_gen_goto_tb(n); + gen_save_pc(dest); + tcg_gen_exit_tb((uintptr_t)tb + n); + } else { + gen_save_pc(dest); + if (ctx->singlestep_enabled) { + /* raise exception debug */ + } + tcg_gen_exit_tb(0); + } +} + +static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1, + TCGv r2, int16_t address) +{ + TCGLabel *jumpLabel = gen_new_label(); + tcg_gen_brcond_tl(cond, r1, r2, jumpLabel); + + gen_goto_tb(ctx, 1, ctx->next_pc); + + gen_set_label(jumpLabel); + gen_goto_tb(ctx, 0, ctx->pc + address * 2); +} + +static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1, + int r2, int16_t address) +{ + TCGv temp = tcg_const_i32(r2); + gen_branch_cond(ctx, cond, r1, temp, address); + tcg_temp_free(temp); +} + +static void gen_loop(DisasContext *ctx, int r1, int32_t offset) +{ + TCGLabel *l1 = gen_new_label(); + + tcg_gen_subi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], 1); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1); + gen_goto_tb(ctx, 1, ctx->pc + offset); + gen_set_label(l1); + gen_goto_tb(ctx, 0, ctx->next_pc); +} + +static void gen_fcall_save_ctx(DisasContext *ctx) +{ + TCGv temp = tcg_temp_new(); + + tcg_gen_addi_tl(temp, cpu_gpr_a[10], -4); + tcg_gen_qemu_st_tl(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL); + tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc); + tcg_gen_mov_tl(cpu_gpr_a[10], temp); + + tcg_temp_free(temp); +} + +static void gen_fret(DisasContext *ctx) +{ + TCGv temp = tcg_temp_new(); + + tcg_gen_andi_tl(temp, cpu_gpr_a[11], ~0x1); + tcg_gen_qemu_ld_tl(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL); + tcg_gen_addi_tl(cpu_gpr_a[10], cpu_gpr_a[10], 4); + tcg_gen_mov_tl(cpu_PC, temp); + tcg_gen_exit_tb(0); + ctx->bstate = BS_BRANCH; + + tcg_temp_free(temp); +} + +static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, + int r2 , int32_t constant , int32_t offset) +{ + TCGv temp, temp2; + int n; + + switch (opc) { +/* SB-format jumps */ + case OPC1_16_SB_J: + case OPC1_32_B_J: + gen_goto_tb(ctx, 0, ctx->pc + offset * 2); + break; + case OPC1_32_B_CALL: + case OPC1_16_SB_CALL: + gen_helper_1arg(call, ctx->next_pc); + gen_goto_tb(ctx, 0, ctx->pc + offset * 2); + break; + case OPC1_16_SB_JZ: + gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], 0, offset); + break; + case OPC1_16_SB_JNZ: + gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], 0, offset); + break; +/* SBC-format jumps */ + case OPC1_16_SBC_JEQ: + gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant, offset); + break; + case OPC1_16_SBC_JNE: + gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], constant, offset); + break; +/* SBRN-format jumps */ + case OPC1_16_SBRN_JZ_T: + temp = tcg_temp_new(); + tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); + gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); + tcg_temp_free(temp); + break; + case OPC1_16_SBRN_JNZ_T: + temp = tcg_temp_new(); + tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); + gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); + tcg_temp_free(temp); + break; +/* SBR-format jumps */ + case OPC1_16_SBR_JEQ: + gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], + offset); + break; + case OPC1_16_SBR_JNE: + gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], + offset); + break; + case OPC1_16_SBR_JNZ: + gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JNZ_A: + gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset); + break; + case OPC1_16_SBR_JGEZ: + gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JGTZ: + gen_branch_condi(ctx, TCG_COND_GT, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JLEZ: + gen_branch_condi(ctx, TCG_COND_LE, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JLTZ: + gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JZ: + gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JZ_A: + gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset); + break; + case OPC1_16_SBR_LOOP: + gen_loop(ctx, r1, offset * 2 - 32); + break; +/* SR-format jumps */ + case OPC1_16_SR_JI: + tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe); + tcg_gen_exit_tb(0); + break; + case OPC2_32_SYS_RET: + case OPC2_16_SR_RET: + gen_helper_ret(cpu_env); + tcg_gen_exit_tb(0); + break; +/* B-format */ + case OPC1_32_B_CALLA: + gen_helper_1arg(call, ctx->next_pc); + gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); + break; + case OPC1_32_B_FCALL: + gen_fcall_save_ctx(ctx); + gen_goto_tb(ctx, 0, ctx->pc + offset * 2); + break; + case OPC1_32_B_FCALLA: + gen_fcall_save_ctx(ctx); + gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); + break; + case OPC1_32_B_JLA: + tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc); + /* fall through */ + case OPC1_32_B_JA: + gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); + break; + case OPC1_32_B_JL: + tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc); + gen_goto_tb(ctx, 0, ctx->pc + offset * 2); + break; +/* BOL format */ + case OPCM_32_BRC_EQ_NEQ: + if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JEQ) { + gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], constant, offset); + } else { + gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], constant, offset); + } + break; + case OPCM_32_BRC_GE: + if (MASK_OP_BRC_OP2(ctx->opcode) == OP2_32_BRC_JGE) { + gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], constant, offset); + } else { + constant = MASK_OP_BRC_CONST4(ctx->opcode); + gen_branch_condi(ctx, TCG_COND_GEU, cpu_gpr_d[r1], constant, + offset); + } + break; + case OPCM_32_BRC_JLT: + if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JLT) { + gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], constant, offset); + } else { + constant = MASK_OP_BRC_CONST4(ctx->opcode); + gen_branch_condi(ctx, TCG_COND_LTU, cpu_gpr_d[r1], constant, + offset); + } + break; + case OPCM_32_BRC_JNE: + temp = tcg_temp_new(); + if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) { + tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); + /* subi is unconditional */ + tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); + gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); + } else { + tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); + /* addi is unconditional */ + tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); + gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); + } + tcg_temp_free(temp); + break; +/* BRN format */ + case OPCM_32_BRN_JTT: + n = MASK_OP_BRN_N(ctx->opcode); + + temp = tcg_temp_new(); + tcg_gen_andi_tl(temp, cpu_gpr_d[r1], (1 << n)); + + if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) { + gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); + } else { + gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); + } + tcg_temp_free(temp); + break; +/* BRR Format */ + case OPCM_32_BRR_EQ_NEQ: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ) { + gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2], + offset); + } else { + gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2], + offset); + } + break; + case OPCM_32_BRR_ADDR_EQ_NEQ: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ_A) { + gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_a[r1], cpu_gpr_a[r2], + offset); + } else { + gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_a[r1], cpu_gpr_a[r2], + offset); + } + break; + case OPCM_32_BRR_GE: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JGE) { + gen_branch_cond(ctx, TCG_COND_GE, cpu_gpr_d[r1], cpu_gpr_d[r2], + offset); + } else { + gen_branch_cond(ctx, TCG_COND_GEU, cpu_gpr_d[r1], cpu_gpr_d[r2], + offset); + } + break; + case OPCM_32_BRR_JLT: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JLT) { + gen_branch_cond(ctx, TCG_COND_LT, cpu_gpr_d[r1], cpu_gpr_d[r2], + offset); + } else { + gen_branch_cond(ctx, TCG_COND_LTU, cpu_gpr_d[r1], cpu_gpr_d[r2], + offset); + } + break; + case OPCM_32_BRR_LOOP: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_LOOP) { + gen_loop(ctx, r2, offset * 2); + } else { + /* OPC2_32_BRR_LOOPU */ + gen_goto_tb(ctx, 0, ctx->pc + offset * 2); + } + break; + case OPCM_32_BRR_JNE: + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) { + tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); + /* also save r2, in case of r1 == r2, so r2 is not decremented */ + tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]); + /* subi is unconditional */ + tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); + gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); + } else { + tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); + /* also save r2, in case of r1 == r2, so r2 is not decremented */ + tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]); + /* addi is unconditional */ + tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); + gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); + } + tcg_temp_free(temp); + tcg_temp_free(temp2); + break; + case OPCM_32_BRR_JNZ: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) { + gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset); + } else { + gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset); + } + break; + default: + printf("Branch Error at %x\n", ctx->pc); + } + ctx->bstate = BS_BRANCH; +} + + +/* + * Functions for decoding instructions + */ + +static void decode_src_opc(CPUTriCoreState *env, DisasContext *ctx, int op1) +{ + int r1; + int32_t const4; + TCGv temp, temp2; + + r1 = MASK_OP_SRC_S1D(ctx->opcode); + const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode); + + switch (op1) { + case OPC1_16_SRC_ADD: + gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_ADD_A15: + gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[15], const4); + break; + case OPC1_16_SRC_ADD_15A: + gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_ADD_A: + tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4); + break; + case OPC1_16_SRC_CADD: + gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1], + cpu_gpr_d[15]); + break; + case OPC1_16_SRC_CADDN: + gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const4, cpu_gpr_d[r1], + cpu_gpr_d[15]); + break; + case OPC1_16_SRC_CMOV: + temp = tcg_const_tl(0); + temp2 = tcg_const_tl(const4); + tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + temp2, cpu_gpr_d[r1]); + tcg_temp_free(temp); + tcg_temp_free(temp2); + break; + case OPC1_16_SRC_CMOVN: + temp = tcg_const_tl(0); + temp2 = tcg_const_tl(const4); + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + temp2, cpu_gpr_d[r1]); + tcg_temp_free(temp); + tcg_temp_free(temp2); + break; + case OPC1_16_SRC_EQ: + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], + const4); + break; + case OPC1_16_SRC_LT: + tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1], + const4); + break; + case OPC1_16_SRC_MOV: + tcg_gen_movi_tl(cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_MOV_A: + const4 = MASK_OP_SRC_CONST4(ctx->opcode); + tcg_gen_movi_tl(cpu_gpr_a[r1], const4); + break; + case OPC1_16_SRC_MOV_E: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + tcg_gen_movi_tl(cpu_gpr_d[r1], const4); + tcg_gen_sari_tl(cpu_gpr_d[r1+1], cpu_gpr_d[r1], 31); + } /* TODO: else raise illegal opcode trap */ + break; + case OPC1_16_SRC_SH: + gen_shi(cpu_gpr_d[r1], cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_SHA: + gen_shaci(cpu_gpr_d[r1], cpu_gpr_d[r1], const4); + break; + } +} + +static void decode_srr_opc(DisasContext *ctx, int op1) +{ + int r1, r2; + TCGv temp; + + r1 = MASK_OP_SRR_S1D(ctx->opcode); + r2 = MASK_OP_SRR_S2(ctx->opcode); + + switch (op1) { + case OPC1_16_SRR_ADD: + gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_A15: + gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_15A: + gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_A: + tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_ADDS: + gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_AND: + tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_CMOV: + temp = tcg_const_tl(0); + tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + cpu_gpr_d[r2], cpu_gpr_d[r1]); + tcg_temp_free(temp); + break; + case OPC1_16_SRR_CMOVN: + temp = tcg_const_tl(0); + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + cpu_gpr_d[r2], cpu_gpr_d[r1]); + tcg_temp_free(temp); + break; + case OPC1_16_SRR_EQ: + tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_LT: + tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV: + tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV_A: + tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV_AA: + tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_MOV_D: + tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_MUL: + gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_OR: + tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB: + gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB_A15B: + gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB_15AB: + gen_sub_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUBS: + gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_XOR: + tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + } +} + +static void decode_ssr_opc(DisasContext *ctx, int op1) +{ + int r1, r2; + + r1 = MASK_OP_SSR_S1(ctx->opcode); + r2 = MASK_OP_SSR_S2(ctx->opcode); + + switch (op1) { + case OPC1_16_SSR_ST_A: + tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + break; + case OPC1_16_SSR_ST_A_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + break; + case OPC1_16_SSR_ST_B: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + break; + case OPC1_16_SSR_ST_B_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1); + break; + case OPC1_16_SSR_ST_H: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); + break; + case OPC1_16_SSR_ST_H_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2); + break; + case OPC1_16_SSR_ST_W: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + break; + case OPC1_16_SSR_ST_W_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + break; + } +} + +static void decode_sc_opc(DisasContext *ctx, int op1) +{ + int32_t const16; + + const16 = MASK_OP_SC_CONST8(ctx->opcode); + + switch (op1) { + case OPC1_16_SC_AND: + tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16); + break; + case OPC1_16_SC_BISR: + gen_helper_1arg(bisr, const16 & 0xff); + break; + case OPC1_16_SC_LD_A: + gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_LD_W: + gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_MOV: + tcg_gen_movi_tl(cpu_gpr_d[15], const16); + break; + case OPC1_16_SC_OR: + tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16); + break; + case OPC1_16_SC_ST_A: + gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_ST_W: + gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_SUB_A: + tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16); + break; + } +} + +static void decode_slr_opc(DisasContext *ctx, int op1) +{ + int r1, r2; + + r1 = MASK_OP_SLR_D(ctx->opcode); + r2 = MASK_OP_SLR_S2(ctx->opcode); + + switch (op1) { +/* SLR-format */ + case OPC1_16_SLR_LD_A: + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + break; + case OPC1_16_SLR_LD_A_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + break; + case OPC1_16_SLR_LD_BU: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + break; + case OPC1_16_SLR_LD_BU_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1); + break; + case OPC1_16_SLR_LD_H: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + break; + case OPC1_16_SLR_LD_H_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2); + break; + case OPC1_16_SLR_LD_W: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + break; + case OPC1_16_SLR_LD_W_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + break; + } +} + +static void decode_sro_opc(DisasContext *ctx, int op1) +{ + int r2; + int32_t address; + + r2 = MASK_OP_SRO_S2(ctx->opcode); + address = MASK_OP_SRO_OFF4(ctx->opcode); + +/* SRO-format */ + switch (op1) { + case OPC1_16_SRO_LD_A: + gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL); + break; + case OPC1_16_SRO_LD_BU: + gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB); + break; + case OPC1_16_SRO_LD_H: + gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_LESW); + break; + case OPC1_16_SRO_LD_W: + gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL); + break; + case OPC1_16_SRO_ST_A: + gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL); + break; + case OPC1_16_SRO_ST_B: + gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB); + break; + case OPC1_16_SRO_ST_H: + gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 2, MO_LESW); + break; + case OPC1_16_SRO_ST_W: + gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL); + break; + } +} + +static void decode_sr_system(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + op2 = MASK_OP_SR_OP2(ctx->opcode); + + switch (op2) { + case OPC2_16_SR_NOP: + break; + case OPC2_16_SR_RET: + gen_compute_branch(ctx, op2, 0, 0, 0, 0); + break; + case OPC2_16_SR_RFE: + gen_helper_rfe(cpu_env); + tcg_gen_exit_tb(0); + ctx->bstate = BS_BRANCH; + break; + case OPC2_16_SR_DEBUG: + /* raise EXCP_DEBUG */ + break; + case OPC2_16_SR_FRET: + gen_fret(ctx); + } +} + +static void decode_sr_accu(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1; + TCGv temp; + + r1 = MASK_OP_SR_S1D(ctx->opcode); + op2 = MASK_OP_SR_OP2(ctx->opcode); + + switch (op2) { + case OPC2_16_SR_RSUB: + /* overflow only if r1 = -0x80000000 */ + temp = tcg_const_i32(-0x80000000); + /* calc V bit */ + tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* calc SV bit */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* sub */ + tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]); + /* calc av */ + tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]); + tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV); + /* calc sav */ + tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_temp_free(temp); + break; + case OPC2_16_SR_SAT_B: + gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80); + break; + case OPC2_16_SR_SAT_BU: + gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xff); + break; + case OPC2_16_SR_SAT_H: + gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7fff, -0x8000); + break; + case OPC2_16_SR_SAT_HU: + gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xffff); + break; + } +} + +static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx) +{ + int op1; + int r1, r2; + int32_t const16; + int32_t address; + TCGv temp; + + op1 = MASK_OP_MAJOR(ctx->opcode); + + /* handle ADDSC.A opcode only being 6 bit long */ + if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) { + op1 = OPC1_16_SRRS_ADDSC_A; + } + + switch (op1) { + case OPC1_16_SRC_ADD: + case OPC1_16_SRC_ADD_A15: + case OPC1_16_SRC_ADD_15A: + case OPC1_16_SRC_ADD_A: + case OPC1_16_SRC_CADD: + case OPC1_16_SRC_CADDN: + case OPC1_16_SRC_CMOV: + case OPC1_16_SRC_CMOVN: + case OPC1_16_SRC_EQ: + case OPC1_16_SRC_LT: + case OPC1_16_SRC_MOV: + case OPC1_16_SRC_MOV_A: + case OPC1_16_SRC_MOV_E: + case OPC1_16_SRC_SH: + case OPC1_16_SRC_SHA: + decode_src_opc(env, ctx, op1); + break; +/* SRR-format */ + case OPC1_16_SRR_ADD: + case OPC1_16_SRR_ADD_A15: + case OPC1_16_SRR_ADD_15A: + case OPC1_16_SRR_ADD_A: + case OPC1_16_SRR_ADDS: + case OPC1_16_SRR_AND: + case OPC1_16_SRR_CMOV: + case OPC1_16_SRR_CMOVN: + case OPC1_16_SRR_EQ: + case OPC1_16_SRR_LT: + case OPC1_16_SRR_MOV: + case OPC1_16_SRR_MOV_A: + case OPC1_16_SRR_MOV_AA: + case OPC1_16_SRR_MOV_D: + case OPC1_16_SRR_MUL: + case OPC1_16_SRR_OR: + case OPC1_16_SRR_SUB: + case OPC1_16_SRR_SUB_A15B: + case OPC1_16_SRR_SUB_15AB: + case OPC1_16_SRR_SUBS: + case OPC1_16_SRR_XOR: + decode_srr_opc(ctx, op1); + break; +/* SSR-format */ + case OPC1_16_SSR_ST_A: + case OPC1_16_SSR_ST_A_POSTINC: + case OPC1_16_SSR_ST_B: + case OPC1_16_SSR_ST_B_POSTINC: + case OPC1_16_SSR_ST_H: + case OPC1_16_SSR_ST_H_POSTINC: + case OPC1_16_SSR_ST_W: + case OPC1_16_SSR_ST_W_POSTINC: + decode_ssr_opc(ctx, op1); + break; +/* SRRS-format */ + case OPC1_16_SRRS_ADDSC_A: + r2 = MASK_OP_SRRS_S2(ctx->opcode); + r1 = MASK_OP_SRRS_S1D(ctx->opcode); + const16 = MASK_OP_SRRS_N(ctx->opcode); + temp = tcg_temp_new(); + tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16); + tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp); + tcg_temp_free(temp); + break; +/* SLRO-format */ + case OPC1_16_SLRO_LD_A: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL); + break; + case OPC1_16_SLRO_LD_BU: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB); + break; + case OPC1_16_SLRO_LD_H: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW); + break; + case OPC1_16_SLRO_LD_W: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL); + break; +/* SB-format */ + case OPC1_16_SB_CALL: + case OPC1_16_SB_J: + case OPC1_16_SB_JNZ: + case OPC1_16_SB_JZ: + address = MASK_OP_SB_DISP8_SEXT(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, 0, address); + break; +/* SBC-format */ + case OPC1_16_SBC_JEQ: + case OPC1_16_SBC_JNE: + address = MASK_OP_SBC_DISP4(ctx->opcode); + const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, const16, address); + break; +/* SBRN-format */ + case OPC1_16_SBRN_JNZ_T: + case OPC1_16_SBRN_JZ_T: + address = MASK_OP_SBRN_DISP4(ctx->opcode); + const16 = MASK_OP_SBRN_N(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, const16, address); + break; +/* SBR-format */ + case OPC1_16_SBR_JEQ: + case OPC1_16_SBR_JGEZ: + case OPC1_16_SBR_JGTZ: + case OPC1_16_SBR_JLEZ: + case OPC1_16_SBR_JLTZ: + case OPC1_16_SBR_JNE: + case OPC1_16_SBR_JNZ: + case OPC1_16_SBR_JNZ_A: + case OPC1_16_SBR_JZ: + case OPC1_16_SBR_JZ_A: + case OPC1_16_SBR_LOOP: + r1 = MASK_OP_SBR_S2(ctx->opcode); + address = MASK_OP_SBR_DISP4(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, 0, address); + break; +/* SC-format */ + case OPC1_16_SC_AND: + case OPC1_16_SC_BISR: + case OPC1_16_SC_LD_A: + case OPC1_16_SC_LD_W: + case OPC1_16_SC_MOV: + case OPC1_16_SC_OR: + case OPC1_16_SC_ST_A: + case OPC1_16_SC_ST_W: + case OPC1_16_SC_SUB_A: + decode_sc_opc(ctx, op1); + break; +/* SLR-format */ + case OPC1_16_SLR_LD_A: + case OPC1_16_SLR_LD_A_POSTINC: + case OPC1_16_SLR_LD_BU: + case OPC1_16_SLR_LD_BU_POSTINC: + case OPC1_16_SLR_LD_H: + case OPC1_16_SLR_LD_H_POSTINC: + case OPC1_16_SLR_LD_W: + case OPC1_16_SLR_LD_W_POSTINC: + decode_slr_opc(ctx, op1); + break; +/* SRO-format */ + case OPC1_16_SRO_LD_A: + case OPC1_16_SRO_LD_BU: + case OPC1_16_SRO_LD_H: + case OPC1_16_SRO_LD_W: + case OPC1_16_SRO_ST_A: + case OPC1_16_SRO_ST_B: + case OPC1_16_SRO_ST_H: + case OPC1_16_SRO_ST_W: + decode_sro_opc(ctx, op1); + break; +/* SSRO-format */ + case OPC1_16_SSRO_ST_A: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL); + break; + case OPC1_16_SSRO_ST_B: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB); + break; + case OPC1_16_SSRO_ST_H: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW); + break; + case OPC1_16_SSRO_ST_W: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL); + break; +/* SR-format */ + case OPCM_16_SR_SYSTEM: + decode_sr_system(env, ctx); + break; + case OPCM_16_SR_ACCU: + decode_sr_accu(env, ctx); + break; + case OPC1_16_SR_JI: + r1 = MASK_OP_SR_S1D(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, 0, 0); + break; + case OPC1_16_SR_NOT: + r1 = MASK_OP_SR_S1D(ctx->opcode); + tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]); + break; + } +} + +/* + * 32 bit instructions + */ + +/* ABS-format */ +static void decode_abs_ldw(CPUTriCoreState *env, DisasContext *ctx) +{ + int32_t op2; + int32_t r1; + uint32_t address; + TCGv temp; + + r1 = MASK_OP_ABS_S1D(ctx->opcode); + address = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + temp = tcg_const_i32(EA_ABS_FORMAT(address)); + + switch (op2) { + case OPC2_32_ABS_LD_A: + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); + break; + case OPC2_32_ABS_LD_D: + gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); + break; + case OPC2_32_ABS_LD_DA: + gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); + break; + case OPC2_32_ABS_LD_W: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); + break; + } + + tcg_temp_free(temp); +} + +static void decode_abs_ldb(CPUTriCoreState *env, DisasContext *ctx) +{ + int32_t op2; + int32_t r1; + uint32_t address; + TCGv temp; + + r1 = MASK_OP_ABS_S1D(ctx->opcode); + address = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + temp = tcg_const_i32(EA_ABS_FORMAT(address)); + + switch (op2) { + case OPC2_32_ABS_LD_B: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB); + break; + case OPC2_32_ABS_LD_BU: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB); + break; + case OPC2_32_ABS_LD_H: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW); + break; + case OPC2_32_ABS_LD_HU: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); + break; + } + + tcg_temp_free(temp); +} + +static void decode_abs_ldst_swap(CPUTriCoreState *env, DisasContext *ctx) +{ + int32_t op2; + int32_t r1; + uint32_t address; + TCGv temp; + + r1 = MASK_OP_ABS_S1D(ctx->opcode); + address = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + temp = tcg_const_i32(EA_ABS_FORMAT(address)); + + switch (op2) { + case OPC2_32_ABS_LDMST: + gen_ldmst(ctx, r1, temp); + break; + case OPC2_32_ABS_SWAP_W: + gen_swap(ctx, r1, temp); + break; + } + + tcg_temp_free(temp); +} + +static void decode_abs_ldst_context(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int32_t off18; + + off18 = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_ABS_LDLCX: + gen_helper_1arg(ldlcx, EA_ABS_FORMAT(off18)); + break; + case OPC2_32_ABS_LDUCX: + gen_helper_1arg(lducx, EA_ABS_FORMAT(off18)); + break; + case OPC2_32_ABS_STLCX: + gen_helper_1arg(stlcx, EA_ABS_FORMAT(off18)); + break; + case OPC2_32_ABS_STUCX: + gen_helper_1arg(stucx, EA_ABS_FORMAT(off18)); + break; + } +} + +static void decode_abs_store(CPUTriCoreState *env, DisasContext *ctx) +{ + int32_t op2; + int32_t r1; + uint32_t address; + TCGv temp; + + r1 = MASK_OP_ABS_S1D(ctx->opcode); + address = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + temp = tcg_const_i32(EA_ABS_FORMAT(address)); + + switch (op2) { + case OPC2_32_ABS_ST_A: + tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); + break; + case OPC2_32_ABS_ST_D: + gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); + break; + case OPC2_32_ABS_ST_DA: + gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); + break; + case OPC2_32_ABS_ST_W: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); + break; + + } + tcg_temp_free(temp); +} + +static void decode_abs_storeb_h(CPUTriCoreState *env, DisasContext *ctx) +{ + int32_t op2; + int32_t r1; + uint32_t address; + TCGv temp; + + r1 = MASK_OP_ABS_S1D(ctx->opcode); + address = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + temp = tcg_const_i32(EA_ABS_FORMAT(address)); + + switch (op2) { + case OPC2_32_ABS_ST_B: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB); + break; + case OPC2_32_ABS_ST_H: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); + break; + } + tcg_temp_free(temp); +} + +/* Bit-format */ + +static void decode_bit_andacc(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3; + int pos1, pos2; + + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + op2 = MASK_OP_BIT_OP2(ctx->opcode); + + + switch (op2) { + case OPC2_32_BIT_AND_AND_T: + gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_and_tl, &tcg_gen_and_tl); + break; + case OPC2_32_BIT_AND_ANDN_T: + gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl); + break; + case OPC2_32_BIT_AND_NOR_T: + if (TCG_TARGET_HAS_andc_i32) { + gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl); + } else { + gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_and_tl); + } + break; + case OPC2_32_BIT_AND_OR_T: + gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl, &tcg_gen_and_tl); + break; + } +} + +static void decode_bit_logical_t(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3; + int pos1, pos2; + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + op2 = MASK_OP_BIT_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_BIT_AND_T: + gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_and_tl); + break; + case OPC2_32_BIT_ANDN_T: + gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_andc_tl); + break; + case OPC2_32_BIT_NOR_T: + gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_nor_tl); + break; + case OPC2_32_BIT_OR_T: + gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl); + break; + } +} + +static void decode_bit_insert(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3; + int pos1, pos2; + TCGv temp; + op2 = MASK_OP_BIT_OP2(ctx->opcode); + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + + temp = tcg_temp_new(); + + tcg_gen_shri_tl(temp, cpu_gpr_d[r2], pos2); + if (op2 == OPC2_32_BIT_INSN_T) { + tcg_gen_not_tl(temp, temp); + } + tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1); + tcg_temp_free(temp); +} + +static void decode_bit_logical_t2(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + + int r1, r2, r3; + int pos1, pos2; + + op2 = MASK_OP_BIT_OP2(ctx->opcode); + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + + switch (op2) { + case OPC2_32_BIT_NAND_T: + gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_nand_tl); + break; + case OPC2_32_BIT_ORN_T: + gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_orc_tl); + break; + case OPC2_32_BIT_XNOR_T: + gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_eqv_tl); + break; + case OPC2_32_BIT_XOR_T: + gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_xor_tl); + break; + } +} + +static void decode_bit_orand(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + + int r1, r2, r3; + int pos1, pos2; + + op2 = MASK_OP_BIT_OP2(ctx->opcode); + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + + switch (op2) { + case OPC2_32_BIT_OR_AND_T: + gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_and_tl, &tcg_gen_or_tl); + break; + case OPC2_32_BIT_OR_ANDN_T: + gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl); + break; + case OPC2_32_BIT_OR_NOR_T: + if (TCG_TARGET_HAS_orc_i32) { + gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl); + } else { + gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_or_tl); + } + break; + case OPC2_32_BIT_OR_OR_T: + gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl, &tcg_gen_or_tl); + break; + } +} + +static void decode_bit_sh_logic1(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3; + int pos1, pos2; + TCGv temp; + + op2 = MASK_OP_BIT_OP2(ctx->opcode); + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + + temp = tcg_temp_new(); + + switch (op2) { + case OPC2_32_BIT_SH_AND_T: + gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_and_tl); + break; + case OPC2_32_BIT_SH_ANDN_T: + gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_andc_tl); + break; + case OPC2_32_BIT_SH_NOR_T: + gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_nor_tl); + break; + case OPC2_32_BIT_SH_OR_T: + gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl); + break; + } + tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1); + tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); + tcg_temp_free(temp); +} + +static void decode_bit_sh_logic2(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3; + int pos1, pos2; + TCGv temp; + + op2 = MASK_OP_BIT_OP2(ctx->opcode); + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + + temp = tcg_temp_new(); + + switch (op2) { + case OPC2_32_BIT_SH_NAND_T: + gen_bit_1op(temp, cpu_gpr_d[r1] , cpu_gpr_d[r2] , + pos1, pos2, &tcg_gen_nand_tl); + break; + case OPC2_32_BIT_SH_ORN_T: + gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_orc_tl); + break; + case OPC2_32_BIT_SH_XNOR_T: + gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_eqv_tl); + break; + case OPC2_32_BIT_SH_XOR_T: + gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_xor_tl); + break; + } + tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1); + tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); + tcg_temp_free(temp); +} + +/* BO-format */ + + +static void decode_bo_addrmode_post_pre_base(CPUTriCoreState *env, + DisasContext *ctx) +{ + uint32_t op2; + uint32_t off10; + int32_t r1, r2; + TCGv temp; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_BO_CACHEA_WI_SHORTOFF: + case OPC2_32_BO_CACHEA_W_SHORTOFF: + case OPC2_32_BO_CACHEA_I_SHORTOFF: + /* instruction to access the cache */ + break; + case OPC2_32_BO_CACHEA_WI_POSTINC: + case OPC2_32_BO_CACHEA_W_POSTINC: + case OPC2_32_BO_CACHEA_I_POSTINC: + /* instruction to access the cache, but we still need to handle + the addressing mode */ + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_CACHEA_WI_PREINC: + case OPC2_32_BO_CACHEA_W_PREINC: + case OPC2_32_BO_CACHEA_I_PREINC: + /* instruction to access the cache, but we still need to handle + the addressing mode */ + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_CACHEI_WI_SHORTOFF: + case OPC2_32_BO_CACHEI_W_SHORTOFF: + /* TODO: Raise illegal opcode trap, + if !tricore_feature(TRICORE_FEATURE_131) */ + break; + case OPC2_32_BO_CACHEI_W_POSTINC: + case OPC2_32_BO_CACHEI_WI_POSTINC: + if (tricore_feature(env, TRICORE_FEATURE_131)) { + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + } /* TODO: else raise illegal opcode trap */ + break; + case OPC2_32_BO_CACHEI_W_PREINC: + case OPC2_32_BO_CACHEI_WI_PREINC: + if (tricore_feature(env, TRICORE_FEATURE_131)) { + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + } /* TODO: else raise illegal opcode trap */ + break; + case OPC2_32_BO_ST_A_SHORTOFF: + gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL); + break; + case OPC2_32_BO_ST_A_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_LESL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_A_PREINC: + gen_st_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL); + break; + case OPC2_32_BO_ST_B_SHORTOFF: + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB); + break; + case OPC2_32_BO_ST_B_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_UB); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_B_PREINC: + gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB); + break; + case OPC2_32_BO_ST_D_SHORTOFF: + gen_offset_st_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], + off10, ctx); + break; + case OPC2_32_BO_ST_D_POSTINC: + gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_D_PREINC: + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); + tcg_gen_mov_tl(cpu_gpr_a[r2], temp); + tcg_temp_free(temp); + break; + case OPC2_32_BO_ST_DA_SHORTOFF: + gen_offset_st_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], + off10, ctx); + break; + case OPC2_32_BO_ST_DA_POSTINC: + gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_DA_PREINC: + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); + tcg_gen_mov_tl(cpu_gpr_a[r2], temp); + tcg_temp_free(temp); + break; + case OPC2_32_BO_ST_H_SHORTOFF: + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); + break; + case OPC2_32_BO_ST_H_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUW); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_H_PREINC: + gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); + break; + case OPC2_32_BO_ST_Q_SHORTOFF: + temp = tcg_temp_new(); + tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); + gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW); + tcg_temp_free(temp); + break; + case OPC2_32_BO_ST_Q_POSTINC: + temp = tcg_temp_new(); + tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUW); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_temp_free(temp); + break; + case OPC2_32_BO_ST_Q_PREINC: + temp = tcg_temp_new(); + tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); + gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW); + tcg_temp_free(temp); + break; + case OPC2_32_BO_ST_W_SHORTOFF: + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); + break; + case OPC2_32_BO_ST_W_POSTINC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_W_PREINC: + gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); + break; + } +} + +static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState *env, + DisasContext *ctx) +{ + uint32_t op2; + uint32_t off10; + int32_t r1, r2; + TCGv temp, temp2, temp3; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + temp3 = tcg_const_i32(off10); + + tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); + tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); + + switch (op2) { + case OPC2_32_BO_CACHEA_WI_BR: + case OPC2_32_BO_CACHEA_W_BR: + case OPC2_32_BO_CACHEA_I_BR: + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_CACHEA_WI_CIRC: + case OPC2_32_BO_CACHEA_W_CIRC: + case OPC2_32_BO_CACHEA_I_CIRC: + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_A_BR: + tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_A_CIRC: + tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_B_BR: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_B_CIRC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_D_BR: + gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_D_CIRC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16); + tcg_gen_addi_tl(temp, temp, 4); + tcg_gen_rem_tl(temp, temp, temp2); + tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); + tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_DA_BR: + gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_DA_CIRC: + tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16); + tcg_gen_addi_tl(temp, temp, 4); + tcg_gen_rem_tl(temp, temp, temp2); + tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); + tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_H_BR: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_H_CIRC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_Q_BR: + tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_Q_CIRC: + tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_W_BR: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_W_CIRC: + tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + } + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); +} + +static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState *env, + DisasContext *ctx) +{ + uint32_t op2; + uint32_t off10; + int32_t r1, r2; + TCGv temp; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_BO_LD_A_SHORTOFF: + gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL); + break; + case OPC2_32_BO_LD_A_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_A_PREINC: + gen_ld_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL); + break; + case OPC2_32_BO_LD_B_SHORTOFF: + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB); + break; + case OPC2_32_BO_LD_B_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_SB); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_B_PREINC: + gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB); + break; + case OPC2_32_BO_LD_BU_SHORTOFF: + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB); + break; + case OPC2_32_BO_LD_BU_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_UB); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_BU_PREINC: + gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB); + break; + case OPC2_32_BO_LD_D_SHORTOFF: + gen_offset_ld_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], + off10, ctx); + break; + case OPC2_32_BO_LD_D_POSTINC: + gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_D_PREINC: + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); + tcg_gen_mov_tl(cpu_gpr_a[r2], temp); + tcg_temp_free(temp); + break; + case OPC2_32_BO_LD_DA_SHORTOFF: + gen_offset_ld_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], + off10, ctx); + break; + case OPC2_32_BO_LD_DA_POSTINC: + gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_DA_PREINC: + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); + tcg_gen_mov_tl(cpu_gpr_a[r2], temp); + tcg_temp_free(temp); + break; + case OPC2_32_BO_LD_H_SHORTOFF: + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW); + break; + case OPC2_32_BO_LD_H_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_LESW); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_H_PREINC: + gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW); + break; + case OPC2_32_BO_LD_HU_SHORTOFF: + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); + break; + case OPC2_32_BO_LD_HU_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUW); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_HU_PREINC: + gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); + break; + case OPC2_32_BO_LD_Q_SHORTOFF: + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); + tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + break; + case OPC2_32_BO_LD_Q_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUW); + tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_Q_PREINC: + gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); + tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + break; + case OPC2_32_BO_LD_W_SHORTOFF: + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); + break; + case OPC2_32_BO_LD_W_POSTINC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUL); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_W_PREINC: + gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); + break; + } +} + +static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState *env, + DisasContext *ctx) +{ + uint32_t op2; + uint32_t off10; + int r1, r2; + + TCGv temp, temp2, temp3; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + temp3 = tcg_const_i32(off10); + + tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); + tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); + + + switch (op2) { + case OPC2_32_BO_LD_A_BR: + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_A_CIRC: + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_B_BR: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_B_CIRC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_BU_BR: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_BU_CIRC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_D_BR: + gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_D_CIRC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16); + tcg_gen_addi_tl(temp, temp, 4); + tcg_gen_rem_tl(temp, temp, temp2); + tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_DA_BR: + gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_DA_CIRC: + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16); + tcg_gen_addi_tl(temp, temp, 4); + tcg_gen_rem_tl(temp, temp, temp2); + tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_H_BR: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_H_CIRC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_HU_BR: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_HU_CIRC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_Q_BR: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_Q_CIRC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_W_BR: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_W_CIRC: + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + } + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); +} + +static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState *env, + DisasContext *ctx) +{ + uint32_t op2; + uint32_t off10; + int r1, r2; + + TCGv temp, temp2; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + + switch (op2) { + case OPC2_32_BO_LDLCX_SHORTOFF: + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_helper_ldlcx(cpu_env, temp); + break; + case OPC2_32_BO_LDMST_SHORTOFF: + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_ldmst(ctx, r1, temp); + break; + case OPC2_32_BO_LDMST_POSTINC: + gen_ldmst(ctx, r1, cpu_gpr_a[r2]); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LDMST_PREINC: + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + gen_ldmst(ctx, r1, cpu_gpr_a[r2]); + break; + case OPC2_32_BO_LDUCX_SHORTOFF: + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_helper_lducx(cpu_env, temp); + break; + case OPC2_32_BO_LEA_SHORTOFF: + tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_STLCX_SHORTOFF: + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_helper_stlcx(cpu_env, temp); + break; + case OPC2_32_BO_STUCX_SHORTOFF: + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_helper_stucx(cpu_env, temp); + break; + case OPC2_32_BO_SWAP_W_SHORTOFF: + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_swap(ctx, r1, temp); + break; + case OPC2_32_BO_SWAP_W_POSTINC: + gen_swap(ctx, r1, cpu_gpr_a[r2]); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_SWAP_W_PREINC: + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + gen_swap(ctx, r1, cpu_gpr_a[r2]); + break; + case OPC2_32_BO_CMPSWAP_W_SHORTOFF: + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_cmpswap(ctx, r1, temp); + break; + case OPC2_32_BO_CMPSWAP_W_POSTINC: + gen_cmpswap(ctx, r1, cpu_gpr_a[r2]); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_CMPSWAP_W_PREINC: + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + gen_cmpswap(ctx, r1, cpu_gpr_a[r2]); + break; + case OPC2_32_BO_SWAPMSK_W_SHORTOFF: + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + gen_swapmsk(ctx, r1, temp); + break; + case OPC2_32_BO_SWAPMSK_W_POSTINC: + gen_swapmsk(ctx, r1, cpu_gpr_a[r2]); + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_SWAPMSK_W_PREINC: + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + gen_swapmsk(ctx, r1, cpu_gpr_a[r2]); + break; + } + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState *env, + DisasContext *ctx) +{ + uint32_t op2; + uint32_t off10; + int r1, r2; + + TCGv temp, temp2, temp3; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + temp3 = tcg_const_i32(off10); + + tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); + tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); + + switch (op2) { + case OPC2_32_BO_LDMST_BR: + gen_ldmst(ctx, r1, temp2); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LDMST_CIRC: + gen_ldmst(ctx, r1, temp2); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_SWAP_W_BR: + gen_swap(ctx, r1, temp2); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_SWAP_W_CIRC: + gen_swap(ctx, r1, temp2); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_CMPSWAP_W_BR: + gen_cmpswap(ctx, r1, temp2); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_CMPSWAP_W_CIRC: + gen_cmpswap(ctx, r1, temp2); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_SWAPMSK_W_BR: + gen_swapmsk(ctx, r1, temp2); + gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_SWAPMSK_W_CIRC: + gen_swapmsk(ctx, r1, temp2); + gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3); + break; + } + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); +} + +static void decode_bol_opc(CPUTriCoreState *env, DisasContext *ctx, int32_t op1) +{ + int r1, r2; + int32_t address; + TCGv temp; + + r1 = MASK_OP_BOL_S1D(ctx->opcode); + r2 = MASK_OP_BOL_S2(ctx->opcode); + address = MASK_OP_BOL_OFF16_SEXT(ctx->opcode); + + switch (op1) { + case OPC1_32_BOL_LD_A_LONGOFF: + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address); + tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL); + tcg_temp_free(temp); + break; + case OPC1_32_BOL_LD_W_LONGOFF: + temp = tcg_temp_new(); + tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address); + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL); + tcg_temp_free(temp); + break; + case OPC1_32_BOL_LEA_LONGOFF: + tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address); + break; + case OPC1_32_BOL_ST_A_LONGOFF: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], address, MO_LEUL); + } else { + /* raise illegal opcode trap */ + } + break; + case OPC1_32_BOL_ST_W_LONGOFF: + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUL); + break; + case OPC1_32_BOL_LD_B_LONGOFF: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_SB); + } else { + /* raise illegal opcode trap */ + } + break; + case OPC1_32_BOL_LD_BU_LONGOFF: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_UB); + } else { + /* raise illegal opcode trap */ + } + break; + case OPC1_32_BOL_LD_H_LONGOFF: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW); + } else { + /* raise illegal opcode trap */ + } + break; + case OPC1_32_BOL_LD_HU_LONGOFF: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUW); + } else { + /* raise illegal opcode trap */ + } + break; + case OPC1_32_BOL_ST_B_LONGOFF: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_SB); + } else { + /* raise illegal opcode trap */ + } + break; + case OPC1_32_BOL_ST_H_LONGOFF: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW); + } else { + /* raise illegal opcode trap */ + } + break; + } +} + +/* RC format */ +static void decode_rc_logical_shift(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2; + int32_t const9; + TCGv temp; + + r2 = MASK_OP_RC_D(ctx->opcode); + r1 = MASK_OP_RC_S1(ctx->opcode); + const9 = MASK_OP_RC_CONST9(ctx->opcode); + op2 = MASK_OP_RC_OP2(ctx->opcode); + + temp = tcg_temp_new(); + + switch (op2) { + case OPC2_32_RC_AND: + tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ANDN: + tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9); + break; + case OPC2_32_RC_NAND: + tcg_gen_movi_tl(temp, const9); + tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_NOR: + tcg_gen_movi_tl(temp, const9); + tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_OR: + tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ORN: + tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9); + break; + case OPC2_32_RC_SH: + const9 = sextract32(const9, 0, 6); + gen_shi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_H: + const9 = sextract32(const9, 0, 5); + gen_sh_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SHA: + const9 = sextract32(const9, 0, 6); + gen_shaci(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SHA_H: + const9 = sextract32(const9, 0, 5); + gen_sha_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SHAS: + gen_shasi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_XNOR: + tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_not_tl(cpu_gpr_d[r2], cpu_gpr_d[r2]); + break; + case OPC2_32_RC_XOR: + tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + } + tcg_temp_free(temp); +} + +static void decode_rc_accumulator(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2; + int16_t const9; + + TCGv temp; + + r2 = MASK_OP_RC_D(ctx->opcode); + r1 = MASK_OP_RC_S1(ctx->opcode); + const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode); + + op2 = MASK_OP_RC_OP2(ctx->opcode); + + temp = tcg_temp_new(); + + switch (op2) { + case OPC2_32_RC_ABSDIF: + gen_absdifi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ABSDIFS: + gen_absdifsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ADD: + gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ADDC: + gen_addci_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ADDS: + gen_addsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ADDS_U: + gen_addsui(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ADDX: + gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_AND_EQ: + gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_AND_GE: + gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_AND_GE_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_AND_LT: + gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_AND_LT_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_AND_NE: + gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_EQ: + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_EQANY_B: + gen_eqany_bi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_EQANY_H: + gen_eqany_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_GE: + tcg_gen_setcondi_tl(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_GE_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_LT: + tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_LT_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_MAX: + tcg_gen_movi_tl(temp, const9); + tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, + cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_MAX_U: + tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode)); + tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, + cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_MIN: + tcg_gen_movi_tl(temp, const9); + tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, + cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_MIN_U: + tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode)); + tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, + cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_NE: + tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_OR_EQ: + gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_OR_GE: + gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_OR_GE_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_OR_LT: + gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_OR_LT_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_OR_NE: + gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_RSUB: + tcg_gen_movi_tl(temp, const9); + gen_sub_d(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]); + break; + case OPC2_32_RC_RSUBS: + tcg_gen_movi_tl(temp, const9); + gen_subs(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]); + break; + case OPC2_32_RC_RSUBS_U: + tcg_gen_movi_tl(temp, const9); + gen_subsu(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]); + break; + case OPC2_32_RC_SH_EQ: + gen_sh_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_GE: + gen_sh_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_GE_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_sh_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_LT: + gen_sh_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_LT_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_sh_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_NE: + gen_sh_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_XOR_EQ: + gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + case OPC2_32_RC_XOR_GE: + gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + case OPC2_32_RC_XOR_GE_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + case OPC2_32_RC_XOR_LT: + gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + case OPC2_32_RC_XOR_LT_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + case OPC2_32_RC_XOR_NE: + gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + } + tcg_temp_free(temp); +} + +static void decode_rc_serviceroutine(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t const9; + + op2 = MASK_OP_RC_OP2(ctx->opcode); + const9 = MASK_OP_RC_CONST9(ctx->opcode); + + switch (op2) { + case OPC2_32_RC_BISR: + gen_helper_1arg(bisr, const9); + break; + case OPC2_32_RC_SYSCALL: + /* TODO: Add exception generation */ + break; + } +} + +static void decode_rc_mul(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2; + int16_t const9; + + r2 = MASK_OP_RC_D(ctx->opcode); + r1 = MASK_OP_RC_S1(ctx->opcode); + const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode); + + op2 = MASK_OP_RC_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_RC_MUL_32: + gen_muli_i32s(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_MUL_64: + gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_MULS_32: + gen_mulsi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_MUL_U_64: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_MULS_U_32: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_mulsui_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + break; + } +} + +/* RCPW format */ +static void decode_rcpw_insert(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2; + int32_t pos, width, const4; + + TCGv temp; + + op2 = MASK_OP_RCPW_OP2(ctx->opcode); + r1 = MASK_OP_RCPW_S1(ctx->opcode); + r2 = MASK_OP_RCPW_D(ctx->opcode); + const4 = MASK_OP_RCPW_CONST4(ctx->opcode); + width = MASK_OP_RCPW_WIDTH(ctx->opcode); + pos = MASK_OP_RCPW_POS(ctx->opcode); + + switch (op2) { + case OPC2_32_RCPW_IMASK: + /* if pos + width > 31 undefined result */ + if (pos + width <= 31) { + tcg_gen_movi_tl(cpu_gpr_d[r2+1], ((1u << width) - 1) << pos); + tcg_gen_movi_tl(cpu_gpr_d[r2], (const4 << pos)); + } + break; + case OPC2_32_RCPW_INSERT: + /* if pos + width > 32 undefined result */ + if (pos + width <= 32) { + temp = tcg_const_i32(const4); + tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width); + tcg_temp_free(temp); + } + break; + } +} + +/* RCRW format */ + +static void decode_rcrw_insert(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r3, r4; + int32_t width, const4; + + TCGv temp, temp2, temp3; + + op2 = MASK_OP_RCRW_OP2(ctx->opcode); + r1 = MASK_OP_RCRW_S1(ctx->opcode); + r3 = MASK_OP_RCRW_S3(ctx->opcode); + r4 = MASK_OP_RCRW_D(ctx->opcode); + width = MASK_OP_RCRW_WIDTH(ctx->opcode); + const4 = MASK_OP_RCRW_CONST4(ctx->opcode); + + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + + switch (op2) { + case OPC2_32_RCRW_IMASK: + tcg_gen_andi_tl(temp, cpu_gpr_d[r4], 0x1f); + tcg_gen_movi_tl(temp2, (1 << width) - 1); + tcg_gen_shl_tl(cpu_gpr_d[r3 + 1], temp2, temp); + tcg_gen_movi_tl(temp2, const4); + tcg_gen_shl_tl(cpu_gpr_d[r3], temp2, temp); + break; + case OPC2_32_RCRW_INSERT: + temp3 = tcg_temp_new(); + + tcg_gen_movi_tl(temp, width); + tcg_gen_movi_tl(temp2, const4); + tcg_gen_andi_tl(temp3, cpu_gpr_d[r4], 0x1f); + gen_insert(cpu_gpr_d[r3], cpu_gpr_d[r1], temp2, temp, temp3); + + tcg_temp_free(temp3); + break; + } + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +/* RCR format */ + +static void decode_rcr_cond_select(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r3, r4; + int32_t const9; + + TCGv temp, temp2; + + op2 = MASK_OP_RCR_OP2(ctx->opcode); + r1 = MASK_OP_RCR_S1(ctx->opcode); + const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode); + r3 = MASK_OP_RCR_S3(ctx->opcode); + r4 = MASK_OP_RCR_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RCR_CADD: + gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const9, cpu_gpr_d[r3], + cpu_gpr_d[r4]); + break; + case OPC2_32_RCR_CADDN: + gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const9, cpu_gpr_d[r3], + cpu_gpr_d[r4]); + break; + case OPC2_32_RCR_SEL: + temp = tcg_const_i32(0); + temp2 = tcg_const_i32(const9); + tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, + cpu_gpr_d[r1], temp2); + tcg_temp_free(temp); + tcg_temp_free(temp2); + break; + case OPC2_32_RCR_SELN: + temp = tcg_const_i32(0); + temp2 = tcg_const_i32(const9); + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, + cpu_gpr_d[r1], temp2); + tcg_temp_free(temp); + tcg_temp_free(temp2); + break; + } +} + +static void decode_rcr_madd(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r3, r4; + int32_t const9; + + + op2 = MASK_OP_RCR_OP2(ctx->opcode); + r1 = MASK_OP_RCR_S1(ctx->opcode); + const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode); + r3 = MASK_OP_RCR_S3(ctx->opcode); + r4 = MASK_OP_RCR_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RCR_MADD_32: + gen_maddi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MADD_64: + gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MADDS_32: + gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MADDS_64: + gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MADD_U_64: + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MADDS_U_32: + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_maddsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MADDS_U_64: + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + break; + } +} + +static void decode_rcr_msub(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r3, r4; + int32_t const9; + + + op2 = MASK_OP_RCR_OP2(ctx->opcode); + r1 = MASK_OP_RCR_S1(ctx->opcode); + const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode); + r3 = MASK_OP_RCR_S3(ctx->opcode); + r4 = MASK_OP_RCR_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RCR_MSUB_32: + gen_msubi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MSUB_64: + gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MSUBS_32: + gen_msubsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MSUBS_64: + gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MSUB_U_64: + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MSUBS_U_32: + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_msubsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MSUBS_U_64: + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + break; + } +} + +/* RLC format */ + +static void decode_rlc_opc(CPUTriCoreState *env, DisasContext *ctx, + uint32_t op1) +{ + int32_t const16; + int r1, r2; + + const16 = MASK_OP_RLC_CONST16_SEXT(ctx->opcode); + r1 = MASK_OP_RLC_S1(ctx->opcode); + r2 = MASK_OP_RLC_D(ctx->opcode); + + switch (op1) { + case OPC1_32_RLC_ADDI: + gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16); + break; + case OPC1_32_RLC_ADDIH: + gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16 << 16); + break; + case OPC1_32_RLC_ADDIH_A: + tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16); + break; + case OPC1_32_RLC_MFCR: + const16 = MASK_OP_RLC_CONST16(ctx->opcode); + gen_mfcr(env, cpu_gpr_d[r2], const16); + break; + case OPC1_32_RLC_MOV: + tcg_gen_movi_tl(cpu_gpr_d[r2], const16); + break; + case OPC1_32_RLC_MOV_64: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + if ((r2 & 0x1) != 0) { + /* TODO: raise OPD trap */ + } + tcg_gen_movi_tl(cpu_gpr_d[r2], const16); + tcg_gen_movi_tl(cpu_gpr_d[r2+1], const16 >> 15); + } else { + /* TODO: raise illegal opcode trap */ + } + break; + case OPC1_32_RLC_MOV_U: + const16 = MASK_OP_RLC_CONST16(ctx->opcode); + tcg_gen_movi_tl(cpu_gpr_d[r2], const16); + break; + case OPC1_32_RLC_MOV_H: + tcg_gen_movi_tl(cpu_gpr_d[r2], const16 << 16); + break; + case OPC1_32_RLC_MOVH_A: + tcg_gen_movi_tl(cpu_gpr_a[r2], const16 << 16); + break; + case OPC1_32_RLC_MTCR: + const16 = MASK_OP_RLC_CONST16(ctx->opcode); + gen_mtcr(env, ctx, cpu_gpr_d[r1], const16); + break; + } +} + +/* RR format */ +static void decode_rr_accumulator(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r3, r2, r1; + + r3 = MASK_OP_RR_D(ctx->opcode); + r2 = MASK_OP_RR_S2(ctx->opcode); + r1 = MASK_OP_RR_S1(ctx->opcode); + op2 = MASK_OP_RR_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_RR_ABS: + gen_abs(cpu_gpr_d[r3], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABS_B: + gen_helper_abs_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABS_H: + gen_helper_abs_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSDIF: + gen_absdif(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSDIF_B: + gen_helper_absdif_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSDIF_H: + gen_helper_absdif_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSDIFS: + gen_helper_absdif_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSDIFS_H: + gen_helper_absdif_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSS: + gen_helper_abs_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSS_H: + gen_helper_abs_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADD: + gen_add_d(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADD_B: + gen_helper_add_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADD_H: + gen_helper_add_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDC: + gen_addc_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDS: + gen_adds(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDS_H: + gen_helper_add_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDS_HU: + gen_helper_add_h_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDS_U: + gen_helper_add_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDX: + gen_add_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_AND_EQ: + gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_AND_GE: + gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_AND_GE_U: + gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_AND_LT: + gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_AND_LT_U: + gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_AND_NE: + gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_EQ: + tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_EQ_B: + gen_helper_eq_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_EQ_H: + gen_helper_eq_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_EQ_W: + gen_cond_w(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_EQANY_B: + gen_helper_eqany_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_EQANY_H: + gen_helper_eqany_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_GE: + tcg_gen_setcond_tl(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_GE_U: + tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT: + tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_U: + tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_B: + gen_helper_lt_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_BU: + gen_helper_lt_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_H: + gen_helper_lt_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_HU: + gen_helper_lt_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_W: + gen_cond_w(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_WU: + gen_cond_w(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX: + tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX_U: + tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX_B: + gen_helper_max_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX_BU: + gen_helper_max_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX_H: + gen_helper_max_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX_HU: + gen_helper_max_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN: + tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN_U: + tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN_B: + gen_helper_min_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN_BU: + gen_helper_min_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN_H: + gen_helper_min_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN_HU: + gen_helper_min_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MOV: + tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_NE: + tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_OR_EQ: + gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_OR_GE: + gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_OR_GE_U: + gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_OR_LT: + gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_OR_LT_U: + gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_OR_NE: + gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_SAT_B: + gen_saturate(cpu_gpr_d[r3], cpu_gpr_d[r1], 0x7f, -0x80); + break; + case OPC2_32_RR_SAT_BU: + gen_saturate_u(cpu_gpr_d[r3], cpu_gpr_d[r1], 0xff); + break; + case OPC2_32_RR_SAT_H: + gen_saturate(cpu_gpr_d[r3], cpu_gpr_d[r1], 0x7fff, -0x8000); + break; + case OPC2_32_RR_SAT_HU: + gen_saturate_u(cpu_gpr_d[r3], cpu_gpr_d[r1], 0xffff); + break; + case OPC2_32_RR_SH_EQ: + gen_sh_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_GE: + gen_sh_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_GE_U: + gen_sh_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_LT: + gen_sh_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_LT_U: + gen_sh_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_NE: + gen_sh_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUB: + gen_sub_d(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUB_B: + gen_helper_sub_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUB_H: + gen_helper_sub_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBC: + gen_subc_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBS: + gen_subs(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBS_U: + gen_subsu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBS_H: + gen_helper_sub_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBS_HU: + gen_helper_sub_h_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBX: + gen_sub_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_XOR_EQ: + gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + case OPC2_32_RR_XOR_GE: + gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + case OPC2_32_RR_XOR_GE_U: + gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + case OPC2_32_RR_XOR_LT: + gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + case OPC2_32_RR_XOR_LT_U: + gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + case OPC2_32_RR_XOR_NE: + gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + } +} + +static void decode_rr_logical_shift(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r3, r2, r1; + TCGv temp; + + r3 = MASK_OP_RR_D(ctx->opcode); + r2 = MASK_OP_RR_S2(ctx->opcode); + r1 = MASK_OP_RR_S1(ctx->opcode); + + temp = tcg_temp_new(); + op2 = MASK_OP_RR_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_RR_AND: + tcg_gen_and_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ANDN: + tcg_gen_andc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_CLO: + gen_helper_clo(cpu_gpr_d[r3], cpu_gpr_d[r1]); + break; + case OPC2_32_RR_CLO_H: + gen_helper_clo_h(cpu_gpr_d[r3], cpu_gpr_d[r1]); + break; + case OPC2_32_RR_CLS: + gen_helper_cls(cpu_gpr_d[r3], cpu_gpr_d[r1]); + break; + case OPC2_32_RR_CLS_H: + gen_helper_cls_h(cpu_gpr_d[r3], cpu_gpr_d[r1]); + break; + case OPC2_32_RR_CLZ: + gen_helper_clz(cpu_gpr_d[r3], cpu_gpr_d[r1]); + break; + case OPC2_32_RR_CLZ_H: + gen_helper_clz_h(cpu_gpr_d[r3], cpu_gpr_d[r1]); + break; + case OPC2_32_RR_NAND: + tcg_gen_nand_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_NOR: + tcg_gen_nor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_OR: + tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ORN: + tcg_gen_orc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH: + gen_helper_sh(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_H: + gen_helper_sh_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SHA: + gen_helper_sha(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SHA_H: + gen_helper_sha_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SHAS: + gen_shas(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_XNOR: + tcg_gen_eqv_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_XOR: + tcg_gen_xor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + } + tcg_temp_free(temp); +} + +static void decode_rr_address(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2, n; + int r1, r2, r3; + TCGv temp; + + op2 = MASK_OP_RR_OP2(ctx->opcode); + r3 = MASK_OP_RR_D(ctx->opcode); + r2 = MASK_OP_RR_S2(ctx->opcode); + r1 = MASK_OP_RR_S1(ctx->opcode); + n = MASK_OP_RR_N(ctx->opcode); + + switch (op2) { + case OPC2_32_RR_ADD_A: + tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]); + break; + case OPC2_32_RR_ADDSC_A: + temp = tcg_temp_new(); + tcg_gen_shli_tl(temp, cpu_gpr_d[r1], n); + tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r2], temp); + tcg_temp_free(temp); + break; + case OPC2_32_RR_ADDSC_AT: + temp = tcg_temp_new(); + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 3); + tcg_gen_add_tl(temp, cpu_gpr_a[r2], temp); + tcg_gen_andi_tl(cpu_gpr_a[r3], temp, 0xFFFFFFFC); + tcg_temp_free(temp); + break; + case OPC2_32_RR_EQ_A: + tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], + cpu_gpr_a[r2]); + break; + case OPC2_32_RR_EQZ: + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], 0); + break; + case OPC2_32_RR_GE_A: + tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_a[r1], + cpu_gpr_a[r2]); + break; + case OPC2_32_RR_LT_A: + tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_a[r1], + cpu_gpr_a[r2]); + break; + case OPC2_32_RR_MOV_A: + tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MOV_AA: + tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_a[r2]); + break; + case OPC2_32_RR_MOV_D: + tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_a[r2]); + break; + case OPC2_32_RR_NE_A: + tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], + cpu_gpr_a[r2]); + break; + case OPC2_32_RR_NEZ_A: + tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], 0); + break; + case OPC2_32_RR_SUB_A: + tcg_gen_sub_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]); + break; + } +} + +static void decode_rr_idirect(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1; + + op2 = MASK_OP_RR_OP2(ctx->opcode); + r1 = MASK_OP_RR_S1(ctx->opcode); + + switch (op2) { + case OPC2_32_RR_JI: + tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1); + break; + case OPC2_32_RR_JLI: + tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc); + tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1); + break; + case OPC2_32_RR_CALLI: + gen_helper_1arg(call, ctx->next_pc); + tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1); + break; + case OPC2_32_RR_FCALLI: + gen_fcall_save_ctx(ctx); + tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1); + break; + } + tcg_gen_exit_tb(0); + ctx->bstate = BS_BRANCH; +} + +static void decode_rr_divide(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3; + + TCGv temp, temp2, temp3; + + op2 = MASK_OP_RR_OP2(ctx->opcode); + r3 = MASK_OP_RR_D(ctx->opcode); + r2 = MASK_OP_RR_S2(ctx->opcode); + r1 = MASK_OP_RR_S1(ctx->opcode); + + switch (op2) { + case OPC2_32_RR_BMERGE: + gen_helper_bmerge(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR_BSPLIT: + gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]); + break; + case OPC2_32_RR_DVINIT_B: + gen_dvinit_b(env, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_DVINIT_BU: + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + temp3 = tcg_temp_new(); + + tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 8); + /* reset av */ + tcg_gen_movi_tl(cpu_PSW_AV, 0); + if (!tricore_feature(env, TRICORE_FEATURE_131)) { + /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */ + tcg_gen_neg_tl(temp, temp3); + /* use cpu_PSW_AV to compare against 0 */ + tcg_gen_movcond_tl(TCG_COND_LT, temp, temp3, cpu_PSW_AV, + temp, temp3); + tcg_gen_neg_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_movcond_tl(TCG_COND_LT, temp2, cpu_gpr_d[r2], cpu_PSW_AV, + temp2, cpu_gpr_d[r2]); + tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2); + } else { + /* overflow = (D[b] == 0) */ + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); + } + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* sv */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* write result */ + tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 24); + tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); + break; + case OPC2_32_RR_DVINIT_H: + gen_dvinit_h(env, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR_DVINIT_HU: + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + temp3 = tcg_temp_new(); + + tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 16); + /* reset av */ + tcg_gen_movi_tl(cpu_PSW_AV, 0); + if (!tricore_feature(env, TRICORE_FEATURE_131)) { + /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */ + tcg_gen_neg_tl(temp, temp3); + /* use cpu_PSW_AV to compare against 0 */ + tcg_gen_movcond_tl(TCG_COND_LT, temp, temp3, cpu_PSW_AV, + temp, temp3); + tcg_gen_neg_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_movcond_tl(TCG_COND_LT, temp2, cpu_gpr_d[r2], cpu_PSW_AV, + temp2, cpu_gpr_d[r2]); + tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2); + } else { + /* overflow = (D[b] == 0) */ + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); + } + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* sv */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* write result */ + tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 16); + tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3); + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); + break; + case OPC2_32_RR_DVINIT: + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + /* overflow = ((D[b] == 0) || + ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */ + tcg_gen_setcondi_tl(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff); + tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000); + tcg_gen_and_tl(temp, temp, temp2); + tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0); + tcg_gen_or_tl(cpu_PSW_V, temp, temp2); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* sv */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* reset av */ + tcg_gen_movi_tl(cpu_PSW_AV, 0); + /* write result */ + tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); + /* sign extend to high reg */ + tcg_gen_sari_tl(cpu_gpr_d[r3+1], cpu_gpr_d[r1], 31); + tcg_temp_free(temp); + tcg_temp_free(temp2); + break; + case OPC2_32_RR_DVINIT_U: + /* overflow = (D[b] == 0) */ + tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); + tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + /* sv */ + tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + /* reset av */ + tcg_gen_movi_tl(cpu_PSW_AV, 0); + /* write result */ + tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); + /* zero extend to high reg*/ + tcg_gen_movi_tl(cpu_gpr_d[r3+1], 0); + break; + case OPC2_32_RR_PARITY: + gen_helper_parity(cpu_gpr_d[r3], cpu_gpr_d[r1]); + break; + case OPC2_32_RR_UNPACK: + gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]); + break; + case OPC2_32_RR_CRC32: + if (tricore_feature(env, TRICORE_FEATURE_161)) { + gen_helper_crc32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + } /* TODO: else raise illegal opcode trap */ + break; + case OPC2_32_RR_DIV: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + GEN_HELPER_RR(divide, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + } /* TODO: else raise illegal opcode trap */ + break; + case OPC2_32_RR_DIV_U: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + GEN_HELPER_RR(divide_u, cpu_gpr_d[r3], cpu_gpr_d[r3+1], + cpu_gpr_d[r1], cpu_gpr_d[r2]); + } /* TODO: else raise illegal opcode trap */ + break; + } +} + +/* RR1 Format */ +static void decode_rr1_mul(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + + int r1, r2, r3; + TCGv n; + TCGv_i64 temp64; + + r1 = MASK_OP_RR1_S1(ctx->opcode); + r2 = MASK_OP_RR1_S2(ctx->opcode); + r3 = MASK_OP_RR1_D(ctx->opcode); + n = tcg_const_i32(MASK_OP_RR1_N(ctx->opcode)); + op2 = MASK_OP_RR1_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_RR1_MUL_H_32_LL: + temp64 = tcg_temp_new_i64(); + GEN_HELPER_LL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); + tcg_temp_free_i64(temp64); + break; + case OPC2_32_RR1_MUL_H_32_LU: + temp64 = tcg_temp_new_i64(); + GEN_HELPER_LU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); + tcg_temp_free_i64(temp64); + break; + case OPC2_32_RR1_MUL_H_32_UL: + temp64 = tcg_temp_new_i64(); + GEN_HELPER_UL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); + tcg_temp_free_i64(temp64); + break; + case OPC2_32_RR1_MUL_H_32_UU: + temp64 = tcg_temp_new_i64(); + GEN_HELPER_UU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); + tcg_temp_free_i64(temp64); + break; + case OPC2_32_RR1_MULM_H_64_LL: + temp64 = tcg_temp_new_i64(); + GEN_HELPER_LL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + /* reset V bit */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + /* reset AV bit */ + tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); + tcg_temp_free_i64(temp64); + break; + case OPC2_32_RR1_MULM_H_64_LU: + temp64 = tcg_temp_new_i64(); + GEN_HELPER_LU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + /* reset V bit */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + /* reset AV bit */ + tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); + tcg_temp_free_i64(temp64); + break; + case OPC2_32_RR1_MULM_H_64_UL: + temp64 = tcg_temp_new_i64(); + GEN_HELPER_UL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + /* reset V bit */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + /* reset AV bit */ + tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); + tcg_temp_free_i64(temp64); + break; + case OPC2_32_RR1_MULM_H_64_UU: + temp64 = tcg_temp_new_i64(); + GEN_HELPER_UU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + /* reset V bit */ + tcg_gen_movi_tl(cpu_PSW_V, 0); + /* reset AV bit */ + tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); + tcg_temp_free_i64(temp64); + + break; + case OPC2_32_RR1_MULR_H_16_LL: + GEN_HELPER_LL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n); + gen_calc_usb_mulr_h(cpu_gpr_d[r3]); + break; + case OPC2_32_RR1_MULR_H_16_LU: + GEN_HELPER_LU(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n); + gen_calc_usb_mulr_h(cpu_gpr_d[r3]); + break; + case OPC2_32_RR1_MULR_H_16_UL: + GEN_HELPER_UL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n); + gen_calc_usb_mulr_h(cpu_gpr_d[r3]); + break; + case OPC2_32_RR1_MULR_H_16_UU: + GEN_HELPER_UU(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n); + gen_calc_usb_mulr_h(cpu_gpr_d[r3]); + break; + } + tcg_temp_free(n); +} + +static void decode_rr1_mulq(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3; + uint32_t n; + + TCGv temp, temp2; + + r1 = MASK_OP_RR1_S1(ctx->opcode); + r2 = MASK_OP_RR1_S2(ctx->opcode); + r3 = MASK_OP_RR1_D(ctx->opcode); + n = MASK_OP_RR1_N(ctx->opcode); + op2 = MASK_OP_RR1_OP2(ctx->opcode); + + temp = tcg_temp_new(); + temp2 = tcg_temp_new(); + + switch (op2) { + case OPC2_32_RR1_MUL_Q_32: + gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], cpu_gpr_d[r2], n, 32); + break; + case OPC2_32_RR1_MUL_Q_64: + gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, 0); + break; + case OPC2_32_RR1_MUL_Q_32_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16); + break; + case OPC2_32_RR1_MUL_Q_64_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0); + break; + case OPC2_32_RR1_MUL_Q_32_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16); + break; + case OPC2_32_RR1_MUL_Q_64_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0); + break; + case OPC2_32_RR1_MUL_Q_32_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RR1_MUL_Q_32_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RR1_MULR_Q_32_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RR1_MULR_Q_32_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n); + break; + } + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +/* RR2 format */ +static void decode_rr2_mul(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3; + + op2 = MASK_OP_RR2_OP2(ctx->opcode); + r1 = MASK_OP_RR2_S1(ctx->opcode); + r2 = MASK_OP_RR2_S2(ctx->opcode); + r3 = MASK_OP_RR2_D(ctx->opcode); + switch (op2) { + case OPC2_32_RR2_MUL_32: + gen_mul_i32s(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + break; + case OPC2_32_RR2_MUL_64: + gen_mul_i64s(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR2_MULS_32: + gen_helper_mul_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR2_MUL_U_64: + gen_mul_i64u(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + case OPC2_32_RR2_MULS_U_32: + gen_helper_mul_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r2]); + break; + } +} + +/* RRPW format */ +static void decode_rrpw_extract_insert(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3; + int32_t pos, width; + + op2 = MASK_OP_RRPW_OP2(ctx->opcode); + r1 = MASK_OP_RRPW_S1(ctx->opcode); + r2 = MASK_OP_RRPW_S2(ctx->opcode); + r3 = MASK_OP_RRPW_D(ctx->opcode); + pos = MASK_OP_RRPW_POS(ctx->opcode); + width = MASK_OP_RRPW_WIDTH(ctx->opcode); + + switch (op2) { + case OPC2_32_RRPW_EXTR: + if (pos + width <= 31) { + /* optimize special cases */ + if ((pos == 0) && (width == 8)) { + tcg_gen_ext8s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); + } else if ((pos == 0) && (width == 16)) { + tcg_gen_ext16s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); + } else { + tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 32 - pos - width); + tcg_gen_sari_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 32 - width); + } + } + break; + case OPC2_32_RRPW_EXTR_U: + if (width == 0) { + tcg_gen_movi_tl(cpu_gpr_d[r3], 0); + } else { + tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos); + tcg_gen_andi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], ~0u >> (32-width)); + } + break; + case OPC2_32_RRPW_IMASK: + if (pos + width <= 31) { + tcg_gen_movi_tl(cpu_gpr_d[r3+1], ((1u << width) - 1) << pos); + tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos); + } + break; + case OPC2_32_RRPW_INSERT: + if (pos + width <= 31) { + tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + width, pos); + } + break; + } +} + +/* RRR format */ +static void decode_rrr_cond_select(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3, r4; + TCGv temp; + + op2 = MASK_OP_RRR_OP2(ctx->opcode); + r1 = MASK_OP_RRR_S1(ctx->opcode); + r2 = MASK_OP_RRR_S2(ctx->opcode); + r3 = MASK_OP_RRR_S3(ctx->opcode); + r4 = MASK_OP_RRR_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR_CADD: + gen_cond_add(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2], + cpu_gpr_d[r4], cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_CADDN: + gen_cond_add(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4], + cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_CSUB: + gen_cond_sub(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4], + cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_CSUBN: + gen_cond_sub(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4], + cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_SEL: + temp = tcg_const_i32(0); + tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, + cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_temp_free(temp); + break; + case OPC2_32_RRR_SELN: + temp = tcg_const_i32(0); + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, + cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_temp_free(temp); + break; + } +} + +static void decode_rrr_divide(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + + int r1, r2, r3, r4; + + op2 = MASK_OP_RRR_OP2(ctx->opcode); + r1 = MASK_OP_RRR_S1(ctx->opcode); + r2 = MASK_OP_RRR_S2(ctx->opcode); + r3 = MASK_OP_RRR_S3(ctx->opcode); + r4 = MASK_OP_RRR_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR_DVADJ: + GEN_HELPER_RRR(dvadj, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_DVSTEP: + GEN_HELPER_RRR(dvstep, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_DVSTEP_U: + GEN_HELPER_RRR(dvstep_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_IXMAX: + GEN_HELPER_RRR(ixmax, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_IXMAX_U: + GEN_HELPER_RRR(ixmax_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_IXMIN: + GEN_HELPER_RRR(ixmin, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_IXMIN_U: + GEN_HELPER_RRR(ixmin_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_PACK: + gen_helper_pack(cpu_gpr_d[r4], cpu_PSW_C, cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1]); + break; + } +} + +/* RRR2 format */ +static void decode_rrr2_madd(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1, r2, r3, r4; + + op2 = MASK_OP_RRR2_OP2(ctx->opcode); + r1 = MASK_OP_RRR2_S1(ctx->opcode); + r2 = MASK_OP_RRR2_S2(ctx->opcode); + r3 = MASK_OP_RRR2_S3(ctx->opcode); + r4 = MASK_OP_RRR2_D(ctx->opcode); + switch (op2) { + case OPC2_32_RRR2_MADD_32: + gen_madd32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], + cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADD_64: + gen_madd64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADDS_32: + gen_helper_madd32_ssov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADDS_64: + gen_madds_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADD_U_64: + gen_maddu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADDS_U_32: + gen_helper_madd32_suov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADDS_U_64: + gen_maddsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + } +} + +static void decode_rrr2_msub(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1, r2, r3, r4; + + op2 = MASK_OP_RRR2_OP2(ctx->opcode); + r1 = MASK_OP_RRR2_S1(ctx->opcode); + r2 = MASK_OP_RRR2_S2(ctx->opcode); + r3 = MASK_OP_RRR2_S3(ctx->opcode); + r4 = MASK_OP_RRR2_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR2_MSUB_32: + gen_msub32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], + cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUB_64: + gen_msub64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUBS_32: + gen_helper_msub32_ssov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUBS_64: + gen_msubs_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUB_U_64: + gen_msubu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUBS_U_32: + gen_helper_msub32_suov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUBS_U_64: + gen_msubsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + break; + } +} + +/* RRR1 format */ +static void decode_rrr1_madd(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR1_MADD_H_LL: + gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADD_H_LU: + gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADD_H_UL: + gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADD_H_UU: + gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDS_H_LL: + gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDS_H_LU: + gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDS_H_UL: + gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDS_H_UU: + gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDM_H_LL: + gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDM_H_LU: + gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDM_H_UL: + gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDM_H_UU: + gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDMS_H_LL: + gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDMS_H_LU: + gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDMS_H_UL: + gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDMS_H_UU: + gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDR_H_LL: + gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDR_H_LU: + gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDR_H_UL: + gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDR_H_UU: + gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDRS_H_LL: + gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDRS_H_LU: + gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDRS_H_UL: + gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDRS_H_UU: + gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UU); + break; + } +} + +static void decode_rrr1_maddq_h(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + TCGv temp, temp2; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + temp = tcg_const_i32(n); + temp2 = tcg_temp_new(); + + switch (op2) { + case OPC2_32_RRR1_MADD_Q_32: + gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, 32, env); + break; + case OPC2_32_RRR1_MADD_Q_64: + gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, env); + break; + case OPC2_32_RRR1_MADD_Q_32_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + temp, n, 16, env); + break; + case OPC2_32_RRR1_MADD_Q_64_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + n, env); + break; + case OPC2_32_RRR1_MADD_Q_32_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + temp, n, 16, env); + break; + case OPC2_32_RRR1_MADD_Q_64_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + n, env); + break; + case OPC2_32_RRR1_MADD_Q_32_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADD_Q_64_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MADD_Q_32_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADD_Q_64_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDS_Q_32: + gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, 32); + break; + case OPC2_32_RRR1_MADDS_Q_64: + gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n); + break; + case OPC2_32_RRR1_MADDS_Q_32_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MADDS_Q_64_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MADDS_Q_32_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MADDS_Q_64_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MADDS_Q_32_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDS_Q_64_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDS_Q_32_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDS_Q_64_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDR_H_64_UL: + gen_maddr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1], + cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2); + break; + case OPC2_32_RRR1_MADDRS_H_64_UL: + gen_maddr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1], + cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2); + break; + case OPC2_32_RRR1_MADDR_Q_32_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDR_Q_32_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDRS_Q_32_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDRS_Q_32_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + } + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static void decode_rrr1_maddsu_h(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR1_MADDSU_H_32_LL: + gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSU_H_32_LU: + gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSU_H_32_UL: + gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSU_H_32_UU: + gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDSUS_H_32_LL: + gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSUS_H_32_LU: + gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSUS_H_32_UL: + gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSUS_H_32_UU: + gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MADDSUM_H_64_LL: + gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSUM_H_64_LU: + gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSUM_H_64_UL: + gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSUM_H_64_UU: + gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MADDSUMS_H_64_LL: + gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSUMS_H_64_LU: + gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSUMS_H_64_UL: + gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSUMS_H_64_UU: + gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MADDSUR_H_16_LL: + gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSUR_H_16_LU: + gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSUR_H_16_UL: + gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSUR_H_16_UU: + gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDSURS_H_16_LL: + gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSURS_H_16_LU: + gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSURS_H_16_UL: + gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSURS_H_16_UU: + gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UU); + break; + } +} + +static void decode_rrr1_msub(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR1_MSUB_H_LL: + gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUB_H_LU: + gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUB_H_UL: + gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUB_H_UU: + gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBS_H_LL: + gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBS_H_LU: + gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBS_H_UL: + gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBS_H_UU: + gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBM_H_LL: + gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBM_H_LU: + gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBM_H_UL: + gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBM_H_UU: + gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBMS_H_LL: + gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBMS_H_LU: + gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBMS_H_UL: + gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBMS_H_UU: + gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBR_H_LL: + gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBR_H_LU: + gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBR_H_UL: + gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBR_H_UU: + gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBRS_H_LL: + gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBRS_H_LU: + gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBRS_H_UL: + gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBRS_H_UU: + gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UU); + break; + } +} + +static void decode_rrr1_msubq_h(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + TCGv temp, temp2; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + temp = tcg_const_i32(n); + temp2 = tcg_temp_new(); + + switch (op2) { + case OPC2_32_RRR1_MSUB_Q_32: + gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, 32, env); + break; + case OPC2_32_RRR1_MSUB_Q_64: + gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, env); + break; + case OPC2_32_RRR1_MSUB_Q_32_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + temp, n, 16, env); + break; + case OPC2_32_RRR1_MSUB_Q_64_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + n, env); + break; + case OPC2_32_RRR1_MSUB_Q_32_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + temp, n, 16, env); + break; + case OPC2_32_RRR1_MSUB_Q_64_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + n, env); + break; + case OPC2_32_RRR1_MSUB_Q_32_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUB_Q_64_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUB_Q_32_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUB_Q_64_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBS_Q_32: + gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, 32); + break; + case OPC2_32_RRR1_MSUBS_Q_64: + gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n); + break; + case OPC2_32_RRR1_MSUBS_Q_32_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MSUBS_Q_64_L: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MSUBS_Q_32_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MSUBS_Q_64_U: + tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MSUBS_Q_32_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBS_Q_64_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBS_Q_32_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBS_Q_64_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBR_H_64_UL: + gen_msubr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1], + cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2); + break; + case OPC2_32_RRR1_MSUBRS_H_64_UL: + gen_msubr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1], + cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2); + break; + case OPC2_32_RRR1_MSUBR_Q_32_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBR_Q_32_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBRS_Q_32_LL: + tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBRS_Q_32_UU: + tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); + break; + } + tcg_temp_free(temp); + tcg_temp_free(temp2); +} + +static void decode_rrr1_msubad_h(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR1_MSUBAD_H_32_LL: + gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBAD_H_32_LU: + gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBAD_H_32_UL: + gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBAD_H_32_UU: + gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBADS_H_32_LL: + gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBADS_H_32_LU: + gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBADS_H_32_UL: + gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBADS_H_32_UU: + gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBADM_H_64_LL: + gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBADM_H_64_LU: + gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBADM_H_64_UL: + gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBADM_H_64_UU: + gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBADMS_H_64_LL: + gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBADMS_H_64_LU: + gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBADMS_H_64_UL: + gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBADMS_H_64_UU: + gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], + cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBADR_H_16_LL: + gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBADR_H_16_LU: + gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBADR_H_16_UL: + gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBADR_H_16_UU: + gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBADRS_H_16_LL: + gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBADRS_H_16_LU: + gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBADRS_H_16_UL: + gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBADRS_H_16_UU: + gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], + cpu_gpr_d[r2], n, MODE_UU); + break; + } +} + +/* RRRR format */ +static void decode_rrrr_extract_insert(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3, r4; + TCGv tmp_width, tmp_pos; + + r1 = MASK_OP_RRRR_S1(ctx->opcode); + r2 = MASK_OP_RRRR_S2(ctx->opcode); + r3 = MASK_OP_RRRR_S3(ctx->opcode); + r4 = MASK_OP_RRRR_D(ctx->opcode); + op2 = MASK_OP_RRRR_OP2(ctx->opcode); + + tmp_pos = tcg_temp_new(); + tmp_width = tcg_temp_new(); + + switch (op2) { + case OPC2_32_RRRR_DEXTR: + tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f); + if (r1 == r2) { + tcg_gen_rotl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos); + } else { + tcg_gen_shl_tl(tmp_width, cpu_gpr_d[r1], tmp_pos); + tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos); + tcg_gen_shr_tl(tmp_pos, cpu_gpr_d[r2], tmp_pos); + tcg_gen_or_tl(cpu_gpr_d[r4], tmp_width, tmp_pos); + } + break; + case OPC2_32_RRRR_EXTR: + case OPC2_32_RRRR_EXTR_U: + tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f); + tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f); + tcg_gen_add_tl(tmp_pos, tmp_pos, tmp_width); + tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos); + tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos); + tcg_gen_subfi_tl(tmp_width, 32, tmp_width); + if (op2 == OPC2_32_RRRR_EXTR) { + tcg_gen_sar_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width); + } else { + tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width); + } + break; + case OPC2_32_RRRR_INSERT: + tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f); + tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f); + gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], tmp_width, + tmp_pos); + break; + } + tcg_temp_free(tmp_pos); + tcg_temp_free(tmp_width); +} + +/* RRRW format */ +static void decode_rrrw_extract_insert(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + int r1, r2, r3, r4; + int32_t width; + + TCGv temp, temp2; + + op2 = MASK_OP_RRRW_OP2(ctx->opcode); + r1 = MASK_OP_RRRW_S1(ctx->opcode); + r2 = MASK_OP_RRRW_S2(ctx->opcode); + r3 = MASK_OP_RRRW_S3(ctx->opcode); + r4 = MASK_OP_RRRW_D(ctx->opcode); + width = MASK_OP_RRRW_WIDTH(ctx->opcode); + + temp = tcg_temp_new(); + + switch (op2) { + case OPC2_32_RRRW_EXTR: + tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f); + tcg_gen_addi_tl(temp, temp, width); + tcg_gen_subfi_tl(temp, 32, temp); + tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp); + tcg_gen_sari_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], 32 - width); + break; + case OPC2_32_RRRW_EXTR_U: + if (width == 0) { + tcg_gen_movi_tl(cpu_gpr_d[r4], 0); + } else { + tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f); + tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp); + tcg_gen_andi_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], ~0u >> (32-width)); + } + break; + case OPC2_32_RRRW_IMASK: + temp2 = tcg_temp_new(); + + tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f); + tcg_gen_movi_tl(temp2, (1 << width) - 1); + tcg_gen_shl_tl(temp2, temp2, temp); + tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r2], temp); + tcg_gen_mov_tl(cpu_gpr_d[r4+1], temp2); + + tcg_temp_free(temp2); + break; + case OPC2_32_RRRW_INSERT: + temp2 = tcg_temp_new(); + + tcg_gen_movi_tl(temp, width); + tcg_gen_andi_tl(temp2, cpu_gpr_d[r3], 0x1f); + gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], temp, temp2); + + tcg_temp_free(temp2); + break; + } + tcg_temp_free(temp); +} + +/* SYS Format*/ +static void decode_sys_interrupts(CPUTriCoreState *env, DisasContext *ctx) +{ + uint32_t op2; + uint32_t r1; + TCGLabel *l1; + TCGv tmp; + + op2 = MASK_OP_SYS_OP2(ctx->opcode); + r1 = MASK_OP_SYS_S1D(ctx->opcode); + + switch (op2) { + case OPC2_32_SYS_DEBUG: + /* raise EXCP_DEBUG */ + break; + case OPC2_32_SYS_DISABLE: + tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~MASK_ICR_IE); + break; + case OPC2_32_SYS_DSYNC: + break; + case OPC2_32_SYS_ENABLE: + tcg_gen_ori_tl(cpu_ICR, cpu_ICR, MASK_ICR_IE); + break; + case OPC2_32_SYS_ISYNC: + break; + case OPC2_32_SYS_NOP: + break; + case OPC2_32_SYS_RET: + gen_compute_branch(ctx, op2, 0, 0, 0, 0); + break; + case OPC2_32_SYS_FRET: + gen_fret(ctx); + break; + case OPC2_32_SYS_RFE: + gen_helper_rfe(cpu_env); + tcg_gen_exit_tb(0); + ctx->bstate = BS_BRANCH; + break; + case OPC2_32_SYS_RFM: + if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) { + tmp = tcg_temp_new(); + l1 = gen_new_label(); + + tcg_gen_ld32u_tl(tmp, cpu_env, offsetof(CPUTriCoreState, DBGSR)); + tcg_gen_andi_tl(tmp, tmp, MASK_DBGSR_DE); + tcg_gen_brcondi_tl(TCG_COND_NE, tmp, 1, l1); + gen_helper_rfm(cpu_env); + gen_set_label(l1); + tcg_gen_exit_tb(0); + ctx->bstate = BS_BRANCH; + tcg_temp_free(tmp); + } else { + /* generate privilege trap */ + } + break; + case OPC2_32_SYS_RSLCX: + gen_helper_rslcx(cpu_env); + break; + case OPC2_32_SYS_SVLCX: + gen_helper_svlcx(cpu_env); + break; + case OPC2_32_SYS_RESTORE: + if (tricore_feature(env, TRICORE_FEATURE_16)) { + if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM || + (ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_UM1) { + tcg_gen_deposit_tl(cpu_ICR, cpu_ICR, cpu_gpr_d[r1], 8, 1); + } /* else raise privilege trap */ + } /* else raise illegal opcode trap */ + break; + case OPC2_32_SYS_TRAPSV: + /* TODO: raise sticky overflow trap */ + break; + case OPC2_32_SYS_TRAPV: + /* TODO: raise overflow trap */ + break; + } +} + +static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx) +{ + int op1; + int32_t r1, r2, r3; + int32_t address, const16; + int8_t b, const4; + int32_t bpos; + TCGv temp, temp2, temp3; + + op1 = MASK_OP_MAJOR(ctx->opcode); + + /* handle JNZ.T opcode only being 7 bit long */ + if (unlikely((op1 & 0x7f) == OPCM_32_BRN_JTT)) { + op1 = OPCM_32_BRN_JTT; + } + + switch (op1) { +/* ABS-format */ + case OPCM_32_ABS_LDW: + decode_abs_ldw(env, ctx); + break; + case OPCM_32_ABS_LDB: + decode_abs_ldb(env, ctx); + break; + case OPCM_32_ABS_LDMST_SWAP: + decode_abs_ldst_swap(env, ctx); + break; + case OPCM_32_ABS_LDST_CONTEXT: + decode_abs_ldst_context(env, ctx); + break; + case OPCM_32_ABS_STORE: + decode_abs_store(env, ctx); + break; + case OPCM_32_ABS_STOREB_H: + decode_abs_storeb_h(env, ctx); + break; + case OPC1_32_ABS_STOREQ: + address = MASK_OP_ABS_OFF18(ctx->opcode); + r1 = MASK_OP_ABS_S1D(ctx->opcode); + temp = tcg_const_i32(EA_ABS_FORMAT(address)); + temp2 = tcg_temp_new(); + + tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW); + + tcg_temp_free(temp2); + tcg_temp_free(temp); + break; + case OPC1_32_ABS_LD_Q: + address = MASK_OP_ABS_OFF18(ctx->opcode); + r1 = MASK_OP_ABS_S1D(ctx->opcode); + temp = tcg_const_i32(EA_ABS_FORMAT(address)); + + tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); + tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + + tcg_temp_free(temp); + break; + case OPC1_32_ABS_LEA: + address = MASK_OP_ABS_OFF18(ctx->opcode); + r1 = MASK_OP_ABS_S1D(ctx->opcode); + tcg_gen_movi_tl(cpu_gpr_a[r1], EA_ABS_FORMAT(address)); + break; +/* ABSB-format */ + case OPC1_32_ABSB_ST_T: + address = MASK_OP_ABS_OFF18(ctx->opcode); + b = MASK_OP_ABSB_B(ctx->opcode); + bpos = MASK_OP_ABSB_BPOS(ctx->opcode); + + temp = tcg_const_i32(EA_ABS_FORMAT(address)); + temp2 = tcg_temp_new(); + + tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB); + tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos)); + tcg_gen_ori_tl(temp2, temp2, (b << bpos)); + tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + break; +/* B-format */ + case OPC1_32_B_CALL: + case OPC1_32_B_CALLA: + case OPC1_32_B_FCALL: + case OPC1_32_B_FCALLA: + case OPC1_32_B_J: + case OPC1_32_B_JA: + case OPC1_32_B_JL: + case OPC1_32_B_JLA: + address = MASK_OP_B_DISP24_SEXT(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, 0, address); + break; +/* Bit-format */ + case OPCM_32_BIT_ANDACC: + decode_bit_andacc(env, ctx); + break; + case OPCM_32_BIT_LOGICAL_T1: + decode_bit_logical_t(env, ctx); + break; + case OPCM_32_BIT_INSERT: + decode_bit_insert(env, ctx); + break; + case OPCM_32_BIT_LOGICAL_T2: + decode_bit_logical_t2(env, ctx); + break; + case OPCM_32_BIT_ORAND: + decode_bit_orand(env, ctx); + break; + case OPCM_32_BIT_SH_LOGIC1: + decode_bit_sh_logic1(env, ctx); + break; + case OPCM_32_BIT_SH_LOGIC2: + decode_bit_sh_logic2(env, ctx); + break; + /* BO Format */ + case OPCM_32_BO_ADDRMODE_POST_PRE_BASE: + decode_bo_addrmode_post_pre_base(env, ctx); + break; + case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR: + decode_bo_addrmode_bitreverse_circular(env, ctx); + break; + case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE: + decode_bo_addrmode_ld_post_pre_base(env, ctx); + break; + case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR: + decode_bo_addrmode_ld_bitreverse_circular(env, ctx); + break; + case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE: + decode_bo_addrmode_stctx_post_pre_base(env, ctx); + break; + case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR: + decode_bo_addrmode_ldmst_bitreverse_circular(env, ctx); + break; +/* BOL-format */ + case OPC1_32_BOL_LD_A_LONGOFF: + case OPC1_32_BOL_LD_W_LONGOFF: + case OPC1_32_BOL_LEA_LONGOFF: + case OPC1_32_BOL_ST_W_LONGOFF: + case OPC1_32_BOL_ST_A_LONGOFF: + case OPC1_32_BOL_LD_B_LONGOFF: + case OPC1_32_BOL_LD_BU_LONGOFF: + case OPC1_32_BOL_LD_H_LONGOFF: + case OPC1_32_BOL_LD_HU_LONGOFF: + case OPC1_32_BOL_ST_B_LONGOFF: + case OPC1_32_BOL_ST_H_LONGOFF: + decode_bol_opc(env, ctx, op1); + break; +/* BRC Format */ + case OPCM_32_BRC_EQ_NEQ: + case OPCM_32_BRC_GE: + case OPCM_32_BRC_JLT: + case OPCM_32_BRC_JNE: + const4 = MASK_OP_BRC_CONST4_SEXT(ctx->opcode); + address = MASK_OP_BRC_DISP15_SEXT(ctx->opcode); + r1 = MASK_OP_BRC_S1(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, const4, address); + break; +/* BRN Format */ + case OPCM_32_BRN_JTT: + address = MASK_OP_BRN_DISP15_SEXT(ctx->opcode); + r1 = MASK_OP_BRN_S1(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, 0, address); + break; +/* BRR Format */ + case OPCM_32_BRR_EQ_NEQ: + case OPCM_32_BRR_ADDR_EQ_NEQ: + case OPCM_32_BRR_GE: + case OPCM_32_BRR_JLT: + case OPCM_32_BRR_JNE: + case OPCM_32_BRR_JNZ: + case OPCM_32_BRR_LOOP: + address = MASK_OP_BRR_DISP15_SEXT(ctx->opcode); + r2 = MASK_OP_BRR_S2(ctx->opcode); + r1 = MASK_OP_BRR_S1(ctx->opcode); + gen_compute_branch(ctx, op1, r1, r2, 0, address); + break; +/* RC Format */ + case OPCM_32_RC_LOGICAL_SHIFT: + decode_rc_logical_shift(env, ctx); + break; + case OPCM_32_RC_ACCUMULATOR: + decode_rc_accumulator(env, ctx); + break; + case OPCM_32_RC_SERVICEROUTINE: + decode_rc_serviceroutine(env, ctx); + break; + case OPCM_32_RC_MUL: + decode_rc_mul(env, ctx); + break; +/* RCPW Format */ + case OPCM_32_RCPW_MASK_INSERT: + decode_rcpw_insert(env, ctx); + break; +/* RCRR Format */ + case OPC1_32_RCRR_INSERT: + r1 = MASK_OP_RCRR_S1(ctx->opcode); + r2 = MASK_OP_RCRR_S3(ctx->opcode); + r3 = MASK_OP_RCRR_D(ctx->opcode); + const16 = MASK_OP_RCRR_CONST4(ctx->opcode); + temp = tcg_const_i32(const16); + temp2 = tcg_temp_new(); /* width*/ + temp3 = tcg_temp_new(); /* pos */ + + tcg_gen_andi_tl(temp2, cpu_gpr_d[r3+1], 0x1f); + tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f); + + gen_insert(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, temp2, temp3); + + tcg_temp_free(temp); + tcg_temp_free(temp2); + tcg_temp_free(temp3); + break; +/* RCRW Format */ + case OPCM_32_RCRW_MASK_INSERT: + decode_rcrw_insert(env, ctx); + break; +/* RCR Format */ + case OPCM_32_RCR_COND_SELECT: + decode_rcr_cond_select(env, ctx); + break; + case OPCM_32_RCR_MADD: + decode_rcr_madd(env, ctx); + break; + case OPCM_32_RCR_MSUB: + decode_rcr_msub(env, ctx); + break; +/* RLC Format */ + case OPC1_32_RLC_ADDI: + case OPC1_32_RLC_ADDIH: + case OPC1_32_RLC_ADDIH_A: + case OPC1_32_RLC_MFCR: + case OPC1_32_RLC_MOV: + case OPC1_32_RLC_MOV_64: + case OPC1_32_RLC_MOV_U: + case OPC1_32_RLC_MOV_H: + case OPC1_32_RLC_MOVH_A: + case OPC1_32_RLC_MTCR: + decode_rlc_opc(env, ctx, op1); + break; +/* RR Format */ + case OPCM_32_RR_ACCUMULATOR: + decode_rr_accumulator(env, ctx); + break; + case OPCM_32_RR_LOGICAL_SHIFT: + decode_rr_logical_shift(env, ctx); + break; + case OPCM_32_RR_ADDRESS: + decode_rr_address(env, ctx); + break; + case OPCM_32_RR_IDIRECT: + decode_rr_idirect(env, ctx); + break; + case OPCM_32_RR_DIVIDE: + decode_rr_divide(env, ctx); + break; +/* RR1 Format */ + case OPCM_32_RR1_MUL: + decode_rr1_mul(env, ctx); + break; + case OPCM_32_RR1_MULQ: + decode_rr1_mulq(env, ctx); + break; +/* RR2 format */ + case OPCM_32_RR2_MUL: + decode_rr2_mul(env, ctx); + break; +/* RRPW format */ + case OPCM_32_RRPW_EXTRACT_INSERT: + decode_rrpw_extract_insert(env, ctx); + break; + case OPC1_32_RRPW_DEXTR: + r1 = MASK_OP_RRPW_S1(ctx->opcode); + r2 = MASK_OP_RRPW_S2(ctx->opcode); + r3 = MASK_OP_RRPW_D(ctx->opcode); + const16 = MASK_OP_RRPW_POS(ctx->opcode); + if (r1 == r2) { + tcg_gen_rotli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], const16); + } else { + temp = tcg_temp_new(); + tcg_gen_shli_tl(temp, cpu_gpr_d[r1], const16); + tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], 32 - const16); + tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); + tcg_temp_free(temp); + } + break; +/* RRR Format */ + case OPCM_32_RRR_COND_SELECT: + decode_rrr_cond_select(env, ctx); + break; + case OPCM_32_RRR_DIVIDE: + decode_rrr_divide(env, ctx); +/* RRR2 Format */ + case OPCM_32_RRR2_MADD: + decode_rrr2_madd(env, ctx); + break; + case OPCM_32_RRR2_MSUB: + decode_rrr2_msub(env, ctx); + break; +/* RRR1 format */ + case OPCM_32_RRR1_MADD: + decode_rrr1_madd(env, ctx); + break; + case OPCM_32_RRR1_MADDQ_H: + decode_rrr1_maddq_h(env, ctx); + break; + case OPCM_32_RRR1_MADDSU_H: + decode_rrr1_maddsu_h(env, ctx); + break; + case OPCM_32_RRR1_MSUB_H: + decode_rrr1_msub(env, ctx); + break; + case OPCM_32_RRR1_MSUB_Q: + decode_rrr1_msubq_h(env, ctx); + break; + case OPCM_32_RRR1_MSUBAD_H: + decode_rrr1_msubad_h(env, ctx); + break; +/* RRRR format */ + case OPCM_32_RRRR_EXTRACT_INSERT: + decode_rrrr_extract_insert(env, ctx); +/* RRRW format */ + case OPCM_32_RRRW_EXTRACT_INSERT: + decode_rrrw_extract_insert(env, ctx); + break; +/* SYS format */ + case OPCM_32_SYS_INTERRUPTS: + decode_sys_interrupts(env, ctx); + break; + case OPC1_32_SYS_RSTV: + tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_mov_tl(cpu_PSW_SV, cpu_PSW_V); + tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); + tcg_gen_mov_tl(cpu_PSW_SAV, cpu_PSW_V); + break; + } +} + +static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch) +{ + /* 16-Bit Instruction */ + if ((ctx->opcode & 0x1) == 0) { + ctx->next_pc = ctx->pc + 2; + decode_16Bit_opc(env, ctx); + /* 32-Bit Instruction */ + } else { + ctx->next_pc = ctx->pc + 4; + decode_32Bit_opc(env, ctx); + } +} + +void gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb) +{ + TriCoreCPU *cpu = tricore_env_get_cpu(env); + CPUState *cs = CPU(cpu); + DisasContext ctx; + target_ulong pc_start; + int num_insns, max_insns; + + num_insns = 0; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) { + max_insns = CF_COUNT_MASK; + } + if (singlestep) { + max_insns = 1; + } + if (max_insns > TCG_MAX_INSNS) { + max_insns = TCG_MAX_INSNS; + } + + pc_start = tb->pc; + ctx.pc = pc_start; + ctx.saved_pc = -1; + ctx.tb = tb; + ctx.singlestep_enabled = cs->singlestep_enabled; + ctx.bstate = BS_NONE; + ctx.mem_idx = cpu_mmu_index(env, false); + + tcg_clear_temp_count(); + gen_tb_start(tb); + while (ctx.bstate == BS_NONE) { + tcg_gen_insn_start(ctx.pc); + num_insns++; + + ctx.opcode = cpu_ldl_code(env, ctx.pc); + decode_opc(env, &ctx, 0); + + if (num_insns >= max_insns || tcg_op_buf_full()) { + gen_save_pc(ctx.next_pc); + tcg_gen_exit_tb(0); + break; + } + ctx.pc = ctx.next_pc; + } + + gen_tb_end(tb, num_insns); + tb->size = ctx.pc - pc_start; + tb->icount = num_insns; + + if (tcg_check_temp_count()) { + printf("LEAK at %08x\n", env->PC); + } + +#ifdef DEBUG_DISAS + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + qemu_log("IN: %s\n", lookup_symbol(pc_start)); + log_target_disas(cs, pc_start, ctx.pc - pc_start, 0); + qemu_log("\n"); + } +#endif +} + +void +restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, + target_ulong *data) +{ + env->PC = data[0]; +} +/* + * + * Initialization + * + */ + +void cpu_state_reset(CPUTriCoreState *env) +{ + /* Reset Regs to Default Value */ + env->PSW = 0xb80; +} + +static void tricore_tcg_init_csfr(void) +{ + cpu_PCXI = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PCXI), "PCXI"); + cpu_PSW = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW), "PSW"); + cpu_PC = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PC), "PC"); + cpu_ICR = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, ICR), "ICR"); +} + +void tricore_tcg_init(void) +{ + int i; + static int inited; + if (inited) { + return; + } + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); + /* reg init */ + for (i = 0 ; i < 16 ; i++) { + cpu_gpr_a[i] = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, gpr_a[i]), + regnames_a[i]); + } + for (i = 0 ; i < 16 ; i++) { + cpu_gpr_d[i] = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, gpr_d[i]), + regnames_d[i]); + } + tricore_tcg_init_csfr(); + /* init PSW flag cache */ + cpu_PSW_C = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW_USB_C), + "PSW_C"); + cpu_PSW_V = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW_USB_V), + "PSW_V"); + cpu_PSW_SV = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW_USB_SV), + "PSW_SV"); + cpu_PSW_AV = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW_USB_AV), + "PSW_AV"); + cpu_PSW_SAV = tcg_global_mem_new(TCG_AREG0, + offsetof(CPUTriCoreState, PSW_USB_SAV), + "PSW_SAV"); +} diff --git a/src/target-tricore/tricore-defs.h b/src/target-tricore/tricore-defs.h new file mode 100644 index 0000000..4350b03 --- /dev/null +++ b/src/target-tricore/tricore-defs.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#if !defined(__QEMU_TRICORE_DEFS_H__) +#define __QEMU_TRICORE_DEFS_H__ + +#define TARGET_PAGE_BITS 14 +#define TARGET_LONG_BITS 32 +#define TARGET_PHYS_ADDR_SPACE_BITS 32 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 + +#define TRICORE_TLB_MAX 128 + +#endif /* __QEMU_TRICORE_DEFS_H__ */ diff --git a/src/target-tricore/tricore-opcodes.h b/src/target-tricore/tricore-opcodes.h new file mode 100644 index 0000000..1bfed0c --- /dev/null +++ b/src/target-tricore/tricore-opcodes.h @@ -0,0 +1,1445 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* + * Opcode Masks for Tricore + * Format MASK_OP_InstrFormatName_Field + */ + +/* This creates a mask with bits start .. end set to 1 and applies it to op */ +#define MASK_BITS_SHIFT(op, start, end) (extract32(op, (start), \ + (end) - (start) + 1)) +#define MASK_BITS_SHIFT_SEXT(op, start, end) (sextract32(op, (start),\ + (end) - (start) + 1)) + +/* new opcode masks */ + +#define MASK_OP_MAJOR(op) MASK_BITS_SHIFT(op, 0, 7) + +/* 16-Bit Formats */ +#define MASK_OP_SB_DISP8(op) MASK_BITS_SHIFT(op, 8, 15) +#define MASK_OP_SB_DISP8_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 8, 15) + +#define MASK_OP_SBC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SBC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) +#define MASK_OP_SBC_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SBR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SBR_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SBRN_N(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SBRN_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SC_CONST8(op) MASK_BITS_SHIFT(op, 8, 15) + +#define MASK_OP_SLR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SLR_D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SLRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SLRO_D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SR_OP2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SR_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) +#define MASK_OP_SRC_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRO_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRO_OFF4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRR_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRRS_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRRS_S1D(op) MASK_BITS_SHIFT(op, 8, 11) +#define MASK_OP_SRRS_N(op) MASK_BITS_SHIFT(op, 6, 7) + +#define MASK_OP_SSR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SSR_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SSRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SSRO_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* 32-Bit Formats */ + +/* ABS Format */ +#define MASK_OP_ABS_OFF18(op) (MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6) + \ + (MASK_BITS_SHIFT(op, 22, 25) << 10) +\ + (MASK_BITS_SHIFT(op, 12, 15) << 14)) +#define MASK_OP_ABS_OP2(op) MASK_BITS_SHIFT(op, 26, 27) +#define MASK_OP_ABS_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +/* ABSB Format */ +#define MASK_OP_ABSB_OFF18(op) MASK_OP_ABS_OFF18(op) +#define MASK_OP_ABSB_OP2(op) MASK_BITS_SHIFT(op, 26, 27) +#define MASK_OP_ABSB_B(op) MASK_BITS_SHIFT(op, 11, 11) +#define MASK_OP_ABSB_BPOS(op) MASK_BITS_SHIFT(op, 8, 10) + +/* B Format */ +#define MASK_OP_B_DISP24(op) (MASK_BITS_SHIFT(op, 16, 31) + \ + (MASK_BITS_SHIFT(op, 8, 15) << 16)) +#define MASK_OP_B_DISP24_SEXT(op) (MASK_BITS_SHIFT(op, 16, 31) + \ + (MASK_BITS_SHIFT_SEXT(op, 8, 15) << 16)) +/* BIT Format */ +#define MASK_OP_BIT_D(op) MASK_BITS_SHIFT(op, 28, 31) +#define MASK_OP_BIT_POS2(op) MASK_BITS_SHIFT(op, 23, 27) +#define MASK_OP_BIT_OP2(op) MASK_BITS_SHIFT(op, 21, 22) +#define MASK_OP_BIT_POS1(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_BIT_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BIT_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BO Format */ +#define MASK_OP_BO_OFF10(op) (MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6)) +#define MASK_OP_BO_OFF10_SEXT(op) (MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT_SEXT(op, 28, 31) << 6)) +#define MASK_OP_BO_OP2(op) MASK_BITS_SHIFT(op, 22, 27) +#define MASK_OP_BO_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BO_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BOL Format */ +#define MASK_OP_BOL_OFF16(op) ((MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \ + (MASK_BITS_SHIFT(op, 22, 27) << 10)) +#define MASK_OP_BOL_OFF16_SEXT(op) ((MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \ + (MASK_BITS_SHIFT_SEXT(op, 22, 27) << 10)) +#define MASK_OP_BOL_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BOL_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BRC Format */ +#define MASK_OP_BRC_OP2(op) MASK_BITS_SHIFT(op, 31, 31) +#define MASK_OP_BRC_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) +#define MASK_OP_BRC_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30) +#define MASK_OP_BRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) +#define MASK_OP_BRC_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BRN Format */ +#define MASK_OP_BRN_OP2(op) MASK_BITS_SHIFT(op, 31, 31) +#define MASK_OP_BRN_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) +#define MASK_OP_BRN_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30) +#define MASK_OP_BRN_N(op) (MASK_BITS_SHIFT(op, 12, 15) + \ + (MASK_BITS_SHIFT(op, 7, 7) << 4)) +#define MASK_OP_BRN_S1(op) MASK_BITS_SHIFT(op, 8, 11) +/* BRR Format */ +#define MASK_OP_BRR_OP2(op) MASK_BITS_SHIFT(op, 31, 31) +#define MASK_OP_BRR_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) +#define MASK_OP_BRR_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30) +#define MASK_OP_BRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BRR_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* META MASK for similar instr Formats */ +#define MASK_OP_META_D(op) MASK_BITS_SHIFT(op, 28, 31) +#define MASK_OP_META_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* RC Format */ +#define MASK_OP_RC_D(op) MASK_OP_META_D(op) +#define MASK_OP_RC_OP2(op) MASK_BITS_SHIFT(op, 21, 27) +#define MASK_OP_RC_CONST9(op) MASK_BITS_SHIFT(op, 12, 20) +#define MASK_OP_RC_CONST9_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 20) +#define MASK_OP_RC_S1(op) MASK_OP_META_S1(op) + +/* RCPW Format */ + +#define MASK_OP_RCPW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCPW_POS(op) MASK_BITS_SHIFT(op, 23, 27) +#define MASK_OP_RCPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22) +#define MASK_OP_RCPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RCPW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RCPW_S1(op) MASK_OP_META_S1(op) + +/* RCR Format */ + +#define MASK_OP_RCR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RCR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RCR_CONST9(op) MASK_BITS_SHIFT(op, 12, 20) +#define MASK_OP_RCR_CONST9_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 20) +#define MASK_OP_RCR_S1(op) MASK_OP_META_S1(op) + +/* RCRR Format */ + +#define MASK_OP_RCRR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RCRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RCRR_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RCRR_S1(op) MASK_OP_META_S1(op) + +/* RCRW Format */ + +#define MASK_OP_RCRW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCRW_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RCRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RCRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RCRW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RCRW_S1(op) MASK_OP_META_S1(op) + +/* RLC Format */ + +#define MASK_OP_RLC_D(op) MASK_OP_META_D(op) +#define MASK_OP_RLC_CONST16(op) MASK_BITS_SHIFT(op, 12, 27) +#define MASK_OP_RLC_CONST16_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 27) +#define MASK_OP_RLC_S1(op) MASK_OP_META_S1(op) + +/* RR Format */ +#define MASK_OP_RR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RR_OP2(op) MASK_BITS_SHIFT(op, 20, 27) +#define MASK_OP_RR_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RR_S1(op) MASK_OP_META_S1(op) + +/* RR1 Format */ +#define MASK_OP_RR1_D(op) MASK_OP_META_D(op) +#define MASK_OP_RR1_OP2(op) MASK_BITS_SHIFT(op, 18, 27) +#define MASK_OP_RR1_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RR1_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RR1_S1(op) MASK_OP_META_S1(op) + +/* RR2 Format */ +#define MASK_OP_RR2_D(op) MASK_OP_META_D(op) +#define MASK_OP_RR2_OP2(op) MASK_BITS_SHIFT(op, 16, 27) +#define MASK_OP_RR2_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RR2_S1(op) MASK_OP_META_S1(op) + +/* RRPW Format */ +#define MASK_OP_RRPW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRPW_POS(op) MASK_BITS_SHIFT(op, 23, 27) +#define MASK_OP_RRPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22) +#define MASK_OP_RRPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RRPW_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRPW_S1(op) MASK_OP_META_S1(op) + +/* RRR Format */ +#define MASK_OP_RRR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRR_OP2(op) MASK_BITS_SHIFT(op, 20, 23) +#define MASK_OP_RRR_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRR_S1(op) MASK_OP_META_S1(op) + +/* RRR1 Format */ +#define MASK_OP_RRR1_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRR1_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRR1_OP2(op) MASK_BITS_SHIFT(op, 18, 23) +#define MASK_OP_RRR1_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RRR1_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRR1_S1(op) MASK_OP_META_S1(op) + +/* RRR2 Format */ +#define MASK_OP_RRR2_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRR2_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRR2_OP2(op) MASK_BITS_SHIFT(op, 16, 23) +#define MASK_OP_RRR2_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRR2_S1(op) MASK_OP_META_S1(op) + +/* RRRR Format */ +#define MASK_OP_RRRR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RRRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRRR_S1(op) MASK_OP_META_S1(op) + +/* RRRW Format */ +#define MASK_OP_RRRW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRRW_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RRRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RRRW_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRRW_S1(op) MASK_OP_META_S1(op) + +/* SYS Format */ +#define MASK_OP_SYS_OP2(op) MASK_BITS_SHIFT(op, 22, 27) +#define MASK_OP_SYS_S1D(op) MASK_OP_META_S1(op) + + + +/* + * Tricore Opcodes Enums + * + * Format: OPC(1|2|M)_InstrLen_Name + * OPC1 = only op1 field is used + * OPC2 = op1 and op2 field used part of OPCM + * OPCM = op1 field used to group Instr + * InstrLen = 16|32 + * Name = Name of Instr + */ + +/* 16-Bit */ +enum { + + OPCM_16_SR_SYSTEM = 0x00, + OPCM_16_SR_ACCU = 0x32, + + OPC1_16_SRC_ADD = 0xc2, + OPC1_16_SRC_ADD_A15 = 0x92, + OPC1_16_SRC_ADD_15A = 0x9a, + OPC1_16_SRR_ADD = 0x42, + OPC1_16_SRR_ADD_A15 = 0x12, + OPC1_16_SRR_ADD_15A = 0x1a, + OPC1_16_SRC_ADD_A = 0xb0, + OPC1_16_SRR_ADD_A = 0x30, + OPC1_16_SRR_ADDS = 0x22, + OPC1_16_SRRS_ADDSC_A = 0x10, + OPC1_16_SC_AND = 0x16, + OPC1_16_SRR_AND = 0x26, + OPC1_16_SC_BISR = 0xe0, + OPC1_16_SRC_CADD = 0x8a, + OPC1_16_SRC_CADDN = 0xca, + OPC1_16_SB_CALL = 0x5c, + OPC1_16_SRC_CMOV = 0xaa, + OPC1_16_SRR_CMOV = 0x2a, + OPC1_16_SRC_CMOVN = 0xea, + OPC1_16_SRR_CMOVN = 0x6a, + OPC1_16_SRC_EQ = 0xba, + OPC1_16_SRR_EQ = 0x3a, + OPC1_16_SB_J = 0x3c, + OPC1_16_SBC_JEQ = 0x1e, + OPC1_16_SBR_JEQ = 0x3e, + OPC1_16_SBR_JGEZ = 0xce, + OPC1_16_SBR_JGTZ = 0x4e, + OPC1_16_SR_JI = 0xdc, + OPC1_16_SBR_JLEZ = 0x8e, + OPC1_16_SBR_JLTZ = 0x0e, + OPC1_16_SBC_JNE = 0x5e, + OPC1_16_SBR_JNE = 0x7e, + OPC1_16_SB_JNZ = 0xee, + OPC1_16_SBR_JNZ = 0xf6, + OPC1_16_SBR_JNZ_A = 0x7c, + OPC1_16_SBRN_JNZ_T = 0xae, + OPC1_16_SB_JZ = 0x6e, + OPC1_16_SBR_JZ = 0x76, + OPC1_16_SBR_JZ_A = 0xbc, + OPC1_16_SBRN_JZ_T = 0x2e, + OPC1_16_SC_LD_A = 0xd8, + OPC1_16_SLR_LD_A = 0xd4, + OPC1_16_SLR_LD_A_POSTINC = 0xc4, + OPC1_16_SLRO_LD_A = 0xc8, + OPC1_16_SRO_LD_A = 0xcc, + OPC1_16_SLR_LD_BU = 0x14, + OPC1_16_SLR_LD_BU_POSTINC = 0x04, + OPC1_16_SLRO_LD_BU = 0x08, + OPC1_16_SRO_LD_BU = 0x0c, + OPC1_16_SLR_LD_H = 0x94, + OPC1_16_SLR_LD_H_POSTINC = 0x84, + OPC1_16_SLRO_LD_H = 0x88, + OPC1_16_SRO_LD_H = 0x8c, + OPC1_16_SC_LD_W = 0x58, + OPC1_16_SLR_LD_W = 0x54, + OPC1_16_SLR_LD_W_POSTINC = 0x44, + OPC1_16_SLRO_LD_W = 0x48, + OPC1_16_SRO_LD_W = 0x4c, + OPC1_16_SBR_LOOP = 0xfc, + OPC1_16_SRC_LT = 0xfa, + OPC1_16_SRR_LT = 0x7a, + OPC1_16_SC_MOV = 0xda, + OPC1_16_SRC_MOV = 0x82, + OPC1_16_SRR_MOV = 0x02, + OPC1_16_SRC_MOV_E = 0xd2,/* 1.6 only */ + OPC1_16_SRC_MOV_A = 0xa0, + OPC1_16_SRR_MOV_A = 0x60, + OPC1_16_SRR_MOV_AA = 0x40, + OPC1_16_SRR_MOV_D = 0x80, + OPC1_16_SRR_MUL = 0xe2, + OPC1_16_SR_NOT = 0x46, + OPC1_16_SC_OR = 0x96, + OPC1_16_SRR_OR = 0xa6, + OPC1_16_SRC_SH = 0x06, + OPC1_16_SRC_SHA = 0x86, + OPC1_16_SC_ST_A = 0xf8, + OPC1_16_SRO_ST_A = 0xec, + OPC1_16_SSR_ST_A = 0xf4, + OPC1_16_SSR_ST_A_POSTINC = 0xe4, + OPC1_16_SSRO_ST_A = 0xe8, + OPC1_16_SRO_ST_B = 0x2c, + OPC1_16_SSR_ST_B = 0x34, + OPC1_16_SSR_ST_B_POSTINC = 0x24, + OPC1_16_SSRO_ST_B = 0x28, + OPC1_16_SRO_ST_H = 0xac, + OPC1_16_SSR_ST_H = 0xb4, + OPC1_16_SSR_ST_H_POSTINC = 0xa4, + OPC1_16_SSRO_ST_H = 0xa8, + OPC1_16_SC_ST_W = 0x78, + OPC1_16_SRO_ST_W = 0x6c, + OPC1_16_SSR_ST_W = 0x74, + OPC1_16_SSR_ST_W_POSTINC = 0x64, + OPC1_16_SSRO_ST_W = 0x68, + OPC1_16_SRR_SUB = 0xa2, + OPC1_16_SRR_SUB_A15B = 0x52, + OPC1_16_SRR_SUB_15AB = 0x5a, + OPC1_16_SC_SUB_A = 0x20, + OPC1_16_SRR_SUBS = 0x62, + OPC1_16_SRR_XOR = 0xc6, + +}; + +/* + * SR Format + */ +/* OPCM_16_SR_SYSTEM */ +enum { + + OPC2_16_SR_NOP = 0x00, + OPC2_16_SR_RET = 0x09, + OPC2_16_SR_RFE = 0x08, + OPC2_16_SR_DEBUG = 0x0a, + OPC2_16_SR_FRET = 0x07, +}; +/* OPCM_16_SR_ACCU */ +enum { + OPC2_16_SR_RSUB = 0x05, + OPC2_16_SR_SAT_B = 0x00, + OPC2_16_SR_SAT_BU = 0x01, + OPC2_16_SR_SAT_H = 0x02, + OPC2_16_SR_SAT_HU = 0x03, + +}; + +/* 32-Bit */ + +enum { +/* ABS Format 1, M */ + OPCM_32_ABS_LDW = 0x85, + OPCM_32_ABS_LDB = 0x05, + OPCM_32_ABS_LDMST_SWAP = 0xe5, + OPCM_32_ABS_LDST_CONTEXT = 0x15, + OPCM_32_ABS_STORE = 0xa5, + OPCM_32_ABS_STOREB_H = 0x25, + OPC1_32_ABS_STOREQ = 0x65, + OPC1_32_ABS_LD_Q = 0x45, + OPC1_32_ABS_LEA = 0xc5, +/* ABSB Format */ + OPC1_32_ABSB_ST_T = 0xd5, +/* B Format */ + OPC1_32_B_CALL = 0x6d, + OPC1_32_B_CALLA = 0xed, + OPC1_32_B_FCALL = 0x61, + OPC1_32_B_FCALLA = 0xe1, + OPC1_32_B_J = 0x1d, + OPC1_32_B_JA = 0x9d, + OPC1_32_B_JL = 0x5d, + OPC1_32_B_JLA = 0xdd, +/* Bit Format */ + OPCM_32_BIT_ANDACC = 0x47, + OPCM_32_BIT_LOGICAL_T1 = 0x87, + OPCM_32_BIT_INSERT = 0x67, + OPCM_32_BIT_LOGICAL_T2 = 0x07, + OPCM_32_BIT_ORAND = 0xc7, + OPCM_32_BIT_SH_LOGIC1 = 0x27, + OPCM_32_BIT_SH_LOGIC2 = 0xa7, +/* BO Format */ + OPCM_32_BO_ADDRMODE_POST_PRE_BASE = 0x89, + OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR = 0xa9, + OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE = 0x09, + OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR = 0x29, + OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE = 0x49, + OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR = 0x69, +/* BOL Format */ + OPC1_32_BOL_LD_A_LONGOFF = 0x99, + OPC1_32_BOL_LD_W_LONGOFF = 0x19, + OPC1_32_BOL_LEA_LONGOFF = 0xd9, + OPC1_32_BOL_ST_W_LONGOFF = 0x59, + OPC1_32_BOL_ST_A_LONGOFF = 0xb5, /* 1.6 only */ + OPC1_32_BOL_LD_B_LONGOFF = 0x79, /* 1.6 only */ + OPC1_32_BOL_LD_BU_LONGOFF = 0x39, /* 1.6 only */ + OPC1_32_BOL_LD_H_LONGOFF = 0xc9, /* 1.6 only */ + OPC1_32_BOL_LD_HU_LONGOFF = 0xb9, /* 1.6 only */ + OPC1_32_BOL_ST_B_LONGOFF = 0xe9, /* 1.6 only */ + OPC1_32_BOL_ST_H_LONGOFF = 0xf9, /* 1.6 only */ +/* BRC Format */ + OPCM_32_BRC_EQ_NEQ = 0xdf, + OPCM_32_BRC_GE = 0xff, + OPCM_32_BRC_JLT = 0xbf, + OPCM_32_BRC_JNE = 0x9f, +/* BRN Format */ + OPCM_32_BRN_JTT = 0x6f, +/* BRR Format */ + OPCM_32_BRR_EQ_NEQ = 0x5f, + OPCM_32_BRR_ADDR_EQ_NEQ = 0x7d, + OPCM_32_BRR_GE = 0x7f, + OPCM_32_BRR_JLT = 0x3f, + OPCM_32_BRR_JNE = 0x1f, + OPCM_32_BRR_JNZ = 0xbd, + OPCM_32_BRR_LOOP = 0xfd, +/* RC Format */ + OPCM_32_RC_LOGICAL_SHIFT = 0x8f, + OPCM_32_RC_ACCUMULATOR = 0x8b, + OPCM_32_RC_SERVICEROUTINE = 0xad, + OPCM_32_RC_MUL = 0x53, +/* RCPW Format */ + OPCM_32_RCPW_MASK_INSERT = 0xb7, +/* RCR Format */ + OPCM_32_RCR_COND_SELECT = 0xab, + OPCM_32_RCR_MADD = 0x13, + OPCM_32_RCR_MSUB = 0x33, +/* RCRR Format */ + OPC1_32_RCRR_INSERT = 0x97, +/* RCRW Format */ + OPCM_32_RCRW_MASK_INSERT = 0xd7, +/* RLC Format */ + OPC1_32_RLC_ADDI = 0x1b, + OPC1_32_RLC_ADDIH = 0x9b, + OPC1_32_RLC_ADDIH_A = 0x11, + OPC1_32_RLC_MFCR = 0x4d, + OPC1_32_RLC_MOV = 0x3b, + OPC1_32_RLC_MOV_64 = 0xfb, /* 1.6 only */ + OPC1_32_RLC_MOV_U = 0xbb, + OPC1_32_RLC_MOV_H = 0x7b, + OPC1_32_RLC_MOVH_A = 0x91, + OPC1_32_RLC_MTCR = 0xcd, +/* RR Format */ + OPCM_32_RR_LOGICAL_SHIFT = 0x0f, + OPCM_32_RR_ACCUMULATOR = 0x0b, + OPCM_32_RR_ADDRESS = 0x01, + OPCM_32_RR_DIVIDE = 0x4b, + OPCM_32_RR_IDIRECT = 0x2d, +/* RR1 Format */ + OPCM_32_RR1_MUL = 0xb3, + OPCM_32_RR1_MULQ = 0x93, +/* RR2 Format */ + OPCM_32_RR2_MUL = 0x73, +/* RRPW Format */ + OPCM_32_RRPW_EXTRACT_INSERT = 0x37, + OPC1_32_RRPW_DEXTR = 0x77, +/* RRR Format */ + OPCM_32_RRR_COND_SELECT = 0x2b, + OPCM_32_RRR_DIVIDE = 0x6b, +/* RRR1 Format */ + OPCM_32_RRR1_MADD = 0x83, + OPCM_32_RRR1_MADDQ_H = 0x43, + OPCM_32_RRR1_MADDSU_H = 0xc3, + OPCM_32_RRR1_MSUB_H = 0xa3, + OPCM_32_RRR1_MSUB_Q = 0x63, + OPCM_32_RRR1_MSUBAD_H = 0xe3, +/* RRR2 Format */ + OPCM_32_RRR2_MADD = 0x03, + OPCM_32_RRR2_MSUB = 0x23, +/* RRRR Format */ + OPCM_32_RRRR_EXTRACT_INSERT = 0x17, +/* RRRW Format */ + OPCM_32_RRRW_EXTRACT_INSERT = 0x57, +/* SYS Format */ + OPCM_32_SYS_INTERRUPTS = 0x0d, + OPC1_32_SYS_RSTV = 0x2f, +}; + + + +/* + * ABS Format + */ + +/* OPCM_32_ABS_LDW */ +enum { + + OPC2_32_ABS_LD_A = 0x02, + OPC2_32_ABS_LD_D = 0x01, + OPC2_32_ABS_LD_DA = 0x03, + OPC2_32_ABS_LD_W = 0x00, +}; + +/* OPCM_32_ABS_LDB */ +enum { + OPC2_32_ABS_LD_B = 0x00, + OPC2_32_ABS_LD_BU = 0x01, + OPC2_32_ABS_LD_H = 0x02, + OPC2_32_ABS_LD_HU = 0x03, +}; +/* OPCM_32_ABS_LDMST_SWAP */ +enum { + OPC2_32_ABS_LDMST = 0x01, + OPC2_32_ABS_SWAP_W = 0x00, +}; +/* OPCM_32_ABS_LDST_CONTEXT */ +enum { + OPC2_32_ABS_LDLCX = 0x02, + OPC2_32_ABS_LDUCX = 0x03, + OPC2_32_ABS_STLCX = 0x00, + OPC2_32_ABS_STUCX = 0x01, +}; +/* OPCM_32_ABS_STORE */ +enum { + OPC2_32_ABS_ST_A = 0x02, + OPC2_32_ABS_ST_D = 0x01, + OPC2_32_ABS_ST_DA = 0x03, + OPC2_32_ABS_ST_W = 0x00, +}; +/* OPCM_32_ABS_STOREB_H */ +enum { + OPC2_32_ABS_ST_B = 0x00, + OPC2_32_ABS_ST_H = 0x02, +}; +/* + * Bit Format + */ +/* OPCM_32_BIT_ANDACC */ +enum { + OPC2_32_BIT_AND_AND_T = 0x00, + OPC2_32_BIT_AND_ANDN_T = 0x03, + OPC2_32_BIT_AND_NOR_T = 0x02, + OPC2_32_BIT_AND_OR_T = 0x01, +}; +/* OPCM_32_BIT_LOGICAL_T */ +enum { + OPC2_32_BIT_AND_T = 0x00, + OPC2_32_BIT_ANDN_T = 0x03, + OPC2_32_BIT_NOR_T = 0x02, + OPC2_32_BIT_OR_T = 0x01, +}; +/* OPCM_32_BIT_INSERT */ +enum { + OPC2_32_BIT_INS_T = 0x00, + OPC2_32_BIT_INSN_T = 0x01, +}; +/* OPCM_32_BIT_LOGICAL_T2 */ +enum { + OPC2_32_BIT_NAND_T = 0x00, + OPC2_32_BIT_ORN_T = 0x01, + OPC2_32_BIT_XNOR_T = 0x02, + OPC2_32_BIT_XOR_T = 0x03, +}; +/* OPCM_32_BIT_ORAND */ +enum { + OPC2_32_BIT_OR_AND_T = 0x00, + OPC2_32_BIT_OR_ANDN_T = 0x03, + OPC2_32_BIT_OR_NOR_T = 0x02, + OPC2_32_BIT_OR_OR_T = 0x01, +}; +/*OPCM_32_BIT_SH_LOGIC1 */ +enum { + OPC2_32_BIT_SH_AND_T = 0x00, + OPC2_32_BIT_SH_ANDN_T = 0x03, + OPC2_32_BIT_SH_NOR_T = 0x02, + OPC2_32_BIT_SH_OR_T = 0x01, +}; +/* OPCM_32_BIT_SH_LOGIC2 */ +enum { + OPC2_32_BIT_SH_NAND_T = 0x00, + OPC2_32_BIT_SH_ORN_T = 0x01, + OPC2_32_BIT_SH_XNOR_T = 0x02, + OPC2_32_BIT_SH_XOR_T = 0x03, +}; +/* + * BO Format + */ +/* OPCM_32_BO_ADDRMODE_POST_PRE_BASE */ +enum { + OPC2_32_BO_CACHEA_I_SHORTOFF = 0x2e, + OPC2_32_BO_CACHEA_I_POSTINC = 0x0e, + OPC2_32_BO_CACHEA_I_PREINC = 0x1e, + OPC2_32_BO_CACHEA_W_SHORTOFF = 0x2c, + OPC2_32_BO_CACHEA_W_POSTINC = 0x0c, + OPC2_32_BO_CACHEA_W_PREINC = 0x1c, + OPC2_32_BO_CACHEA_WI_SHORTOFF = 0x2d, + OPC2_32_BO_CACHEA_WI_POSTINC = 0x0d, + OPC2_32_BO_CACHEA_WI_PREINC = 0x1d, + /* 1.3.1 only */ + OPC2_32_BO_CACHEI_W_SHORTOFF = 0x2b, + OPC2_32_BO_CACHEI_W_POSTINC = 0x0b, + OPC2_32_BO_CACHEI_W_PREINC = 0x1b, + OPC2_32_BO_CACHEI_WI_SHORTOFF = 0x2f, + OPC2_32_BO_CACHEI_WI_POSTINC = 0x0f, + OPC2_32_BO_CACHEI_WI_PREINC = 0x1f, + /* end 1.3.1 only */ + OPC2_32_BO_ST_A_SHORTOFF = 0x26, + OPC2_32_BO_ST_A_POSTINC = 0x06, + OPC2_32_BO_ST_A_PREINC = 0x16, + OPC2_32_BO_ST_B_SHORTOFF = 0x20, + OPC2_32_BO_ST_B_POSTINC = 0x00, + OPC2_32_BO_ST_B_PREINC = 0x10, + OPC2_32_BO_ST_D_SHORTOFF = 0x25, + OPC2_32_BO_ST_D_POSTINC = 0x05, + OPC2_32_BO_ST_D_PREINC = 0x15, + OPC2_32_BO_ST_DA_SHORTOFF = 0x27, + OPC2_32_BO_ST_DA_POSTINC = 0x07, + OPC2_32_BO_ST_DA_PREINC = 0x17, + OPC2_32_BO_ST_H_SHORTOFF = 0x22, + OPC2_32_BO_ST_H_POSTINC = 0x02, + OPC2_32_BO_ST_H_PREINC = 0x12, + OPC2_32_BO_ST_Q_SHORTOFF = 0x28, + OPC2_32_BO_ST_Q_POSTINC = 0x08, + OPC2_32_BO_ST_Q_PREINC = 0x18, + OPC2_32_BO_ST_W_SHORTOFF = 0x24, + OPC2_32_BO_ST_W_POSTINC = 0x04, + OPC2_32_BO_ST_W_PREINC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR */ +enum { + OPC2_32_BO_CACHEA_I_BR = 0x0e, + OPC2_32_BO_CACHEA_I_CIRC = 0x1e, + OPC2_32_BO_CACHEA_W_BR = 0x0c, + OPC2_32_BO_CACHEA_W_CIRC = 0x1c, + OPC2_32_BO_CACHEA_WI_BR = 0x0d, + OPC2_32_BO_CACHEA_WI_CIRC = 0x1d, + OPC2_32_BO_ST_A_BR = 0x06, + OPC2_32_BO_ST_A_CIRC = 0x16, + OPC2_32_BO_ST_B_BR = 0x00, + OPC2_32_BO_ST_B_CIRC = 0x10, + OPC2_32_BO_ST_D_BR = 0x05, + OPC2_32_BO_ST_D_CIRC = 0x15, + OPC2_32_BO_ST_DA_BR = 0x07, + OPC2_32_BO_ST_DA_CIRC = 0x17, + OPC2_32_BO_ST_H_BR = 0x02, + OPC2_32_BO_ST_H_CIRC = 0x12, + OPC2_32_BO_ST_Q_BR = 0x08, + OPC2_32_BO_ST_Q_CIRC = 0x18, + OPC2_32_BO_ST_W_BR = 0x04, + OPC2_32_BO_ST_W_CIRC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE */ +enum { + OPC2_32_BO_LD_A_SHORTOFF = 0x26, + OPC2_32_BO_LD_A_POSTINC = 0x06, + OPC2_32_BO_LD_A_PREINC = 0x16, + OPC2_32_BO_LD_B_SHORTOFF = 0x20, + OPC2_32_BO_LD_B_POSTINC = 0x00, + OPC2_32_BO_LD_B_PREINC = 0x10, + OPC2_32_BO_LD_BU_SHORTOFF = 0x21, + OPC2_32_BO_LD_BU_POSTINC = 0x01, + OPC2_32_BO_LD_BU_PREINC = 0x11, + OPC2_32_BO_LD_D_SHORTOFF = 0x25, + OPC2_32_BO_LD_D_POSTINC = 0x05, + OPC2_32_BO_LD_D_PREINC = 0x15, + OPC2_32_BO_LD_DA_SHORTOFF = 0x27, + OPC2_32_BO_LD_DA_POSTINC = 0x07, + OPC2_32_BO_LD_DA_PREINC = 0x17, + OPC2_32_BO_LD_H_SHORTOFF = 0x22, + OPC2_32_BO_LD_H_POSTINC = 0x02, + OPC2_32_BO_LD_H_PREINC = 0x12, + OPC2_32_BO_LD_HU_SHORTOFF = 0x23, + OPC2_32_BO_LD_HU_POSTINC = 0x03, + OPC2_32_BO_LD_HU_PREINC = 0x13, + OPC2_32_BO_LD_Q_SHORTOFF = 0x28, + OPC2_32_BO_LD_Q_POSTINC = 0x08, + OPC2_32_BO_LD_Q_PREINC = 0x18, + OPC2_32_BO_LD_W_SHORTOFF = 0x24, + OPC2_32_BO_LD_W_POSTINC = 0x04, + OPC2_32_BO_LD_W_PREINC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR */ +enum { + OPC2_32_BO_LD_A_BR = 0x06, + OPC2_32_BO_LD_A_CIRC = 0x16, + OPC2_32_BO_LD_B_BR = 0x00, + OPC2_32_BO_LD_B_CIRC = 0x10, + OPC2_32_BO_LD_BU_BR = 0x01, + OPC2_32_BO_LD_BU_CIRC = 0x11, + OPC2_32_BO_LD_D_BR = 0x05, + OPC2_32_BO_LD_D_CIRC = 0x15, + OPC2_32_BO_LD_DA_BR = 0x07, + OPC2_32_BO_LD_DA_CIRC = 0x17, + OPC2_32_BO_LD_H_BR = 0x02, + OPC2_32_BO_LD_H_CIRC = 0x12, + OPC2_32_BO_LD_HU_BR = 0x03, + OPC2_32_BO_LD_HU_CIRC = 0x13, + OPC2_32_BO_LD_Q_BR = 0x08, + OPC2_32_BO_LD_Q_CIRC = 0x18, + OPC2_32_BO_LD_W_BR = 0x04, + OPC2_32_BO_LD_W_CIRC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE */ +enum { + OPC2_32_BO_LDLCX_SHORTOFF = 0x24, + OPC2_32_BO_LDMST_SHORTOFF = 0x21, + OPC2_32_BO_LDMST_POSTINC = 0x01, + OPC2_32_BO_LDMST_PREINC = 0x11, + OPC2_32_BO_LDUCX_SHORTOFF = 0x25, + OPC2_32_BO_LEA_SHORTOFF = 0x28, + OPC2_32_BO_STLCX_SHORTOFF = 0x26, + OPC2_32_BO_STUCX_SHORTOFF = 0x27, + OPC2_32_BO_SWAP_W_SHORTOFF = 0x20, + OPC2_32_BO_SWAP_W_POSTINC = 0x00, + OPC2_32_BO_SWAP_W_PREINC = 0x10, + OPC2_32_BO_CMPSWAP_W_SHORTOFF = 0x23, + OPC2_32_BO_CMPSWAP_W_POSTINC = 0x03, + OPC2_32_BO_CMPSWAP_W_PREINC = 0x13, + OPC2_32_BO_SWAPMSK_W_SHORTOFF = 0x22, + OPC2_32_BO_SWAPMSK_W_POSTINC = 0x02, + OPC2_32_BO_SWAPMSK_W_PREINC = 0x12, +}; +/*OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR */ +enum { + OPC2_32_BO_LDMST_BR = 0x01, + OPC2_32_BO_LDMST_CIRC = 0x11, + OPC2_32_BO_SWAP_W_BR = 0x00, + OPC2_32_BO_SWAP_W_CIRC = 0x10, + OPC2_32_BO_CMPSWAP_W_BR = 0x03, + OPC2_32_BO_CMPSWAP_W_CIRC = 0x13, + OPC2_32_BO_SWAPMSK_W_BR = 0x02, + OPC2_32_BO_SWAPMSK_W_CIRC = 0x12, +}; +/* + * BRC Format + */ +/*OPCM_32_BRC_EQ_NEQ */ +enum { + OPC2_32_BRC_JEQ = 0x00, + OPC2_32_BRC_JNE = 0x01, +}; +/* OPCM_32_BRC_GE */ +enum { + OP2_32_BRC_JGE = 0x00, + OPC_32_BRC_JGE_U = 0x01, +}; +/* OPCM_32_BRC_JLT */ +enum { + OPC2_32_BRC_JLT = 0x00, + OPC2_32_BRC_JLT_U = 0x01, +}; +/* OPCM_32_BRC_JNE */ +enum { + OPC2_32_BRC_JNED = 0x01, + OPC2_32_BRC_JNEI = 0x00, +}; +/* + * BRN Format + */ +/* OPCM_32_BRN_JTT */ +enum { + OPC2_32_BRN_JNZ_T = 0x01, + OPC2_32_BRN_JZ_T = 0x00, +}; +/* + * BRR Format + */ +/* OPCM_32_BRR_EQ_NEQ */ +enum { + OPC2_32_BRR_JEQ = 0x00, + OPC2_32_BRR_JNE = 0x01, +}; +/* OPCM_32_BRR_ADDR_EQ_NEQ */ +enum { + OPC2_32_BRR_JEQ_A = 0x00, + OPC2_32_BRR_JNE_A = 0x01, +}; +/*OPCM_32_BRR_GE */ +enum { + OPC2_32_BRR_JGE = 0x00, + OPC2_32_BRR_JGE_U = 0x01, +}; +/* OPCM_32_BRR_JLT */ +enum { + OPC2_32_BRR_JLT = 0x00, + OPC2_32_BRR_JLT_U = 0x01, +}; +/* OPCM_32_BRR_JNE */ +enum { + OPC2_32_BRR_JNED = 0x01, + OPC2_32_BRR_JNEI = 0x00, +}; +/* OPCM_32_BRR_JNZ */ +enum { + OPC2_32_BRR_JNZ_A = 0x01, + OPC2_32_BRR_JZ_A = 0x00, +}; +/* OPCM_32_BRR_LOOP */ +enum { + OPC2_32_BRR_LOOP = 0x00, + OPC2_32_BRR_LOOPU = 0x01, +}; +/* + * RC Format + */ +/* OPCM_32_RC_LOGICAL_SHIFT */ +enum { + OPC2_32_RC_AND = 0x08, + OPC2_32_RC_ANDN = 0x0e, + OPC2_32_RC_NAND = 0x09, + OPC2_32_RC_NOR = 0x0b, + OPC2_32_RC_OR = 0x0a, + OPC2_32_RC_ORN = 0x0f, + OPC2_32_RC_SH = 0x00, + OPC2_32_RC_SH_H = 0x40, + OPC2_32_RC_SHA = 0x01, + OPC2_32_RC_SHA_H = 0x41, + OPC2_32_RC_SHAS = 0x02, + OPC2_32_RC_XNOR = 0x0d, + OPC2_32_RC_XOR = 0x0c, +}; +/* OPCM_32_RC_ACCUMULATOR */ +enum { + OPC2_32_RC_ABSDIF = 0x0e, + OPC2_32_RC_ABSDIFS = 0x0f, + OPC2_32_RC_ADD = 0x00, + OPC2_32_RC_ADDC = 0x05, + OPC2_32_RC_ADDS = 0x02, + OPC2_32_RC_ADDS_U = 0x03, + OPC2_32_RC_ADDX = 0x04, + OPC2_32_RC_AND_EQ = 0x20, + OPC2_32_RC_AND_GE = 0x24, + OPC2_32_RC_AND_GE_U = 0x25, + OPC2_32_RC_AND_LT = 0x22, + OPC2_32_RC_AND_LT_U = 0x23, + OPC2_32_RC_AND_NE = 0x21, + OPC2_32_RC_EQ = 0x10, + OPC2_32_RC_EQANY_B = 0x56, + OPC2_32_RC_EQANY_H = 0x76, + OPC2_32_RC_GE = 0x14, + OPC2_32_RC_GE_U = 0x15, + OPC2_32_RC_LT = 0x12, + OPC2_32_RC_LT_U = 0x13, + OPC2_32_RC_MAX = 0x1a, + OPC2_32_RC_MAX_U = 0x1b, + OPC2_32_RC_MIN = 0x18, + OPC2_32_RC_MIN_U = 0x19, + OPC2_32_RC_NE = 0x11, + OPC2_32_RC_OR_EQ = 0x27, + OPC2_32_RC_OR_GE = 0x2b, + OPC2_32_RC_OR_GE_U = 0x2c, + OPC2_32_RC_OR_LT = 0x29, + OPC2_32_RC_OR_LT_U = 0x2a, + OPC2_32_RC_OR_NE = 0x28, + OPC2_32_RC_RSUB = 0x08, + OPC2_32_RC_RSUBS = 0x0a, + OPC2_32_RC_RSUBS_U = 0x0b, + OPC2_32_RC_SH_EQ = 0x37, + OPC2_32_RC_SH_GE = 0x3b, + OPC2_32_RC_SH_GE_U = 0x3c, + OPC2_32_RC_SH_LT = 0x39, + OPC2_32_RC_SH_LT_U = 0x3a, + OPC2_32_RC_SH_NE = 0x38, + OPC2_32_RC_XOR_EQ = 0x2f, + OPC2_32_RC_XOR_GE = 0x33, + OPC2_32_RC_XOR_GE_U = 0x34, + OPC2_32_RC_XOR_LT = 0x31, + OPC2_32_RC_XOR_LT_U = 0x32, + OPC2_32_RC_XOR_NE = 0x30, +}; +/* OPCM_32_RC_SERVICEROUTINE */ +enum { + OPC2_32_RC_BISR = 0x00, + OPC2_32_RC_SYSCALL = 0x04, +}; +/* OPCM_32_RC_MUL */ +enum { + OPC2_32_RC_MUL_32 = 0x01, + OPC2_32_RC_MUL_64 = 0x03, + OPC2_32_RC_MULS_32 = 0x05, + OPC2_32_RC_MUL_U_64 = 0x02, + OPC2_32_RC_MULS_U_32 = 0x04, +}; +/* + * RCPW Format + */ +/* OPCM_32_RCPW_MASK_INSERT */ +enum { + OPC2_32_RCPW_IMASK = 0x01, + OPC2_32_RCPW_INSERT = 0x00, +}; +/* + * RCR Format + */ +/* OPCM_32_RCR_COND_SELECT */ +enum { + OPC2_32_RCR_CADD = 0x00, + OPC2_32_RCR_CADDN = 0x01, + OPC2_32_RCR_SEL = 0x04, + OPC2_32_RCR_SELN = 0x05, +}; +/* OPCM_32_RCR_MADD */ +enum { + OPC2_32_RCR_MADD_32 = 0x01, + OPC2_32_RCR_MADD_64 = 0x03, + OPC2_32_RCR_MADDS_32 = 0x05, + OPC2_32_RCR_MADDS_64 = 0x07, + OPC2_32_RCR_MADD_U_64 = 0x02, + OPC2_32_RCR_MADDS_U_32 = 0x04, + OPC2_32_RCR_MADDS_U_64 = 0x06, +}; +/* OPCM_32_RCR_MSUB */ +enum { + OPC2_32_RCR_MSUB_32 = 0x01, + OPC2_32_RCR_MSUB_64 = 0x03, + OPC2_32_RCR_MSUBS_32 = 0x05, + OPC2_32_RCR_MSUBS_64 = 0x07, + OPC2_32_RCR_MSUB_U_64 = 0x02, + OPC2_32_RCR_MSUBS_U_32 = 0x04, + OPC2_32_RCR_MSUBS_U_64 = 0x06, +}; +/* + * RCRW Format + */ +/* OPCM_32_RCRW_MASK_INSERT */ +enum { + OPC2_32_RCRW_IMASK = 0x01, + OPC2_32_RCRW_INSERT = 0x00, +}; + +/* + * RR Format + */ +/* OPCM_32_RR_LOGICAL_SHIFT */ +enum { + OPC2_32_RR_AND = 0x08, + OPC2_32_RR_ANDN = 0x0e, + OPC2_32_RR_CLO = 0x1c, + OPC2_32_RR_CLO_H = 0x7d, + OPC2_32_RR_CLS = 0x1d, + OPC2_32_RR_CLS_H = 0x7e, + OPC2_32_RR_CLZ = 0x1b, + OPC2_32_RR_CLZ_H = 0x7c, + OPC2_32_RR_NAND = 0x09, + OPC2_32_RR_NOR = 0x0b, + OPC2_32_RR_OR = 0x0a, + OPC2_32_RR_ORN = 0x0f, + OPC2_32_RR_SH = 0x00, + OPC2_32_RR_SH_H = 0x40, + OPC2_32_RR_SHA = 0x01, + OPC2_32_RR_SHA_H = 0x41, + OPC2_32_RR_SHAS = 0x02, + OPC2_32_RR_XNOR = 0x0d, + OPC2_32_RR_XOR = 0x0c, +}; +/* OPCM_32_RR_ACCUMULATOR */ +enum { + OPC2_32_RR_ABS = 0x1c, + OPC2_32_RR_ABS_B = 0x5c, + OPC2_32_RR_ABS_H = 0x7c, + OPC2_32_RR_ABSDIF = 0x0e, + OPC2_32_RR_ABSDIF_B = 0x4e, + OPC2_32_RR_ABSDIF_H = 0x6e, + OPC2_32_RR_ABSDIFS = 0x0f, + OPC2_32_RR_ABSDIFS_H = 0x6f, + OPC2_32_RR_ABSS = 0x1d, + OPC2_32_RR_ABSS_H = 0x7d, + OPC2_32_RR_ADD = 0x00, + OPC2_32_RR_ADD_B = 0x40, + OPC2_32_RR_ADD_H = 0x60, + OPC2_32_RR_ADDC = 0x05, + OPC2_32_RR_ADDS = 0x02, + OPC2_32_RR_ADDS_H = 0x62, + OPC2_32_RR_ADDS_HU = 0x63, + OPC2_32_RR_ADDS_U = 0x03, + OPC2_32_RR_ADDX = 0x04, + OPC2_32_RR_AND_EQ = 0x20, + OPC2_32_RR_AND_GE = 0x24, + OPC2_32_RR_AND_GE_U = 0x25, + OPC2_32_RR_AND_LT = 0x22, + OPC2_32_RR_AND_LT_U = 0x23, + OPC2_32_RR_AND_NE = 0x21, + OPC2_32_RR_EQ = 0x10, + OPC2_32_RR_EQ_B = 0x50, + OPC2_32_RR_EQ_H = 0x70, + OPC2_32_RR_EQ_W = 0x90, + OPC2_32_RR_EQANY_B = 0x56, + OPC2_32_RR_EQANY_H = 0x76, + OPC2_32_RR_GE = 0x14, + OPC2_32_RR_GE_U = 0x15, + OPC2_32_RR_LT = 0x12, + OPC2_32_RR_LT_U = 0x13, + OPC2_32_RR_LT_B = 0x52, + OPC2_32_RR_LT_BU = 0x53, + OPC2_32_RR_LT_H = 0x72, + OPC2_32_RR_LT_HU = 0x73, + OPC2_32_RR_LT_W = 0x92, + OPC2_32_RR_LT_WU = 0x93, + OPC2_32_RR_MAX = 0x1a, + OPC2_32_RR_MAX_U = 0x1b, + OPC2_32_RR_MAX_B = 0x5a, + OPC2_32_RR_MAX_BU = 0x5b, + OPC2_32_RR_MAX_H = 0x7a, + OPC2_32_RR_MAX_HU = 0x7b, + OPC2_32_RR_MIN = 0x18, + OPC2_32_RR_MIN_U = 0x19, + OPC2_32_RR_MIN_B = 0x58, + OPC2_32_RR_MIN_BU = 0x59, + OPC2_32_RR_MIN_H = 0x78, + OPC2_32_RR_MIN_HU = 0x79, + OPC2_32_RR_MOV = 0x1f, + OPC2_32_RR_NE = 0x11, + OPC2_32_RR_OR_EQ = 0x27, + OPC2_32_RR_OR_GE = 0x2b, + OPC2_32_RR_OR_GE_U = 0x2c, + OPC2_32_RR_OR_LT = 0x29, + OPC2_32_RR_OR_LT_U = 0x2a, + OPC2_32_RR_OR_NE = 0x28, + OPC2_32_RR_SAT_B = 0x5e, + OPC2_32_RR_SAT_BU = 0x5f, + OPC2_32_RR_SAT_H = 0x7e, + OPC2_32_RR_SAT_HU = 0x7f, + OPC2_32_RR_SH_EQ = 0x37, + OPC2_32_RR_SH_GE = 0x3b, + OPC2_32_RR_SH_GE_U = 0x3c, + OPC2_32_RR_SH_LT = 0x39, + OPC2_32_RR_SH_LT_U = 0x3a, + OPC2_32_RR_SH_NE = 0x38, + OPC2_32_RR_SUB = 0x08, + OPC2_32_RR_SUB_B = 0x48, + OPC2_32_RR_SUB_H = 0x68, + OPC2_32_RR_SUBC = 0x0d, + OPC2_32_RR_SUBS = 0x0a, + OPC2_32_RR_SUBS_U = 0x0b, + OPC2_32_RR_SUBS_H = 0x6a, + OPC2_32_RR_SUBS_HU = 0x6b, + OPC2_32_RR_SUBX = 0x0c, + OPC2_32_RR_XOR_EQ = 0x2f, + OPC2_32_RR_XOR_GE = 0x33, + OPC2_32_RR_XOR_GE_U = 0x34, + OPC2_32_RR_XOR_LT = 0x31, + OPC2_32_RR_XOR_LT_U = 0x32, + OPC2_32_RR_XOR_NE = 0x30, +}; +/* OPCM_32_RR_ADDRESS */ +enum { + OPC2_32_RR_ADD_A = 0x01, + OPC2_32_RR_ADDSC_A = 0x60, + OPC2_32_RR_ADDSC_AT = 0x62, + OPC2_32_RR_EQ_A = 0x40, + OPC2_32_RR_EQZ = 0x48, + OPC2_32_RR_GE_A = 0x43, + OPC2_32_RR_LT_A = 0x42, + OPC2_32_RR_MOV_A = 0x63, + OPC2_32_RR_MOV_AA = 0x00, + OPC2_32_RR_MOV_D = 0x4c, + OPC2_32_RR_NE_A = 0x41, + OPC2_32_RR_NEZ_A = 0x49, + OPC2_32_RR_SUB_A = 0x02, +}; +/* OPCM_32_RR_FLOAT */ +enum { + OPC2_32_RR_BMERGE = 0x01, + OPC2_32_RR_BSPLIT = 0x09, + OPC2_32_RR_DVINIT_B = 0x5a, + OPC2_32_RR_DVINIT_BU = 0x4a, + OPC2_32_RR_DVINIT_H = 0x3a, + OPC2_32_RR_DVINIT_HU = 0x2a, + OPC2_32_RR_DVINIT = 0x1a, + OPC2_32_RR_DVINIT_U = 0x0a, + OPC2_32_RR_PARITY = 0x02, + OPC2_32_RR_UNPACK = 0x08, + OPC2_32_RR_CRC32 = 0x03, + OPC2_32_RR_DIV = 0x20, + OPC2_32_RR_DIV_U = 0x21, +}; +/* OPCM_32_RR_IDIRECT */ +enum { + OPC2_32_RR_JI = 0x03, + OPC2_32_RR_JLI = 0x02, + OPC2_32_RR_CALLI = 0x00, + OPC2_32_RR_FCALLI = 0x01, +}; +/* + * RR1 Format + */ +/* OPCM_32_RR1_MUL */ +enum { + OPC2_32_RR1_MUL_H_32_LL = 0x1a, + OPC2_32_RR1_MUL_H_32_LU = 0x19, + OPC2_32_RR1_MUL_H_32_UL = 0x18, + OPC2_32_RR1_MUL_H_32_UU = 0x1b, + OPC2_32_RR1_MULM_H_64_LL = 0x1e, + OPC2_32_RR1_MULM_H_64_LU = 0x1d, + OPC2_32_RR1_MULM_H_64_UL = 0x1c, + OPC2_32_RR1_MULM_H_64_UU = 0x1f, + OPC2_32_RR1_MULR_H_16_LL = 0x0e, + OPC2_32_RR1_MULR_H_16_LU = 0x0d, + OPC2_32_RR1_MULR_H_16_UL = 0x0c, + OPC2_32_RR1_MULR_H_16_UU = 0x0f, +}; +/* OPCM_32_RR1_MULQ */ +enum { + OPC2_32_RR1_MUL_Q_32 = 0x02, + OPC2_32_RR1_MUL_Q_64 = 0x1b, + OPC2_32_RR1_MUL_Q_32_L = 0x01, + OPC2_32_RR1_MUL_Q_64_L = 0x19, + OPC2_32_RR1_MUL_Q_32_U = 0x00, + OPC2_32_RR1_MUL_Q_64_U = 0x18, + OPC2_32_RR1_MUL_Q_32_LL = 0x05, + OPC2_32_RR1_MUL_Q_32_UU = 0x04, + OPC2_32_RR1_MULR_Q_32_L = 0x07, + OPC2_32_RR1_MULR_Q_32_U = 0x06, +}; +/* + * RR2 Format + */ +/* OPCM_32_RR2_MUL */ +enum { + OPC2_32_RR2_MUL_32 = 0x0a, + OPC2_32_RR2_MUL_64 = 0x6a, + OPC2_32_RR2_MULS_32 = 0x8a, + OPC2_32_RR2_MUL_U_64 = 0x68, + OPC2_32_RR2_MULS_U_32 = 0x88, +}; +/* + * RRPW Format + */ +/* OPCM_32_RRPW_EXTRACT_INSERT */ +enum { + + OPC2_32_RRPW_EXTR = 0x02, + OPC2_32_RRPW_EXTR_U = 0x03, + OPC2_32_RRPW_IMASK = 0x01, + OPC2_32_RRPW_INSERT = 0x00, +}; +/* + * RRR Format + */ +/* OPCM_32_RRR_COND_SELECT */ +enum { + OPC2_32_RRR_CADD = 0x00, + OPC2_32_RRR_CADDN = 0x01, + OPC2_32_RRR_CSUB = 0x02, + OPC2_32_RRR_CSUBN = 0x03, + OPC2_32_RRR_SEL = 0x04, + OPC2_32_RRR_SELN = 0x05, +}; +/* OPCM_32_RRR_FLOAT */ +enum { + OPC2_32_RRR_DVADJ = 0x0d, + OPC2_32_RRR_DVSTEP = 0x0f, + OPC2_32_RRR_DVSTEP_U = 0x0e, + OPC2_32_RRR_IXMAX = 0x0a, + OPC2_32_RRR_IXMAX_U = 0x0b, + OPC2_32_RRR_IXMIN = 0x08, + OPC2_32_RRR_IXMIN_U = 0x09, + OPC2_32_RRR_PACK = 0x00, +}; +/* + * RRR1 Format + */ +/* OPCM_32_RRR1_MADD */ +enum { + OPC2_32_RRR1_MADD_H_LL = 0x1a, + OPC2_32_RRR1_MADD_H_LU = 0x19, + OPC2_32_RRR1_MADD_H_UL = 0x18, + OPC2_32_RRR1_MADD_H_UU = 0x1b, + OPC2_32_RRR1_MADDS_H_LL = 0x3a, + OPC2_32_RRR1_MADDS_H_LU = 0x39, + OPC2_32_RRR1_MADDS_H_UL = 0x38, + OPC2_32_RRR1_MADDS_H_UU = 0x3b, + OPC2_32_RRR1_MADDM_H_LL = 0x1e, + OPC2_32_RRR1_MADDM_H_LU = 0x1d, + OPC2_32_RRR1_MADDM_H_UL = 0x1c, + OPC2_32_RRR1_MADDM_H_UU = 0x1f, + OPC2_32_RRR1_MADDMS_H_LL = 0x3e, + OPC2_32_RRR1_MADDMS_H_LU = 0x3d, + OPC2_32_RRR1_MADDMS_H_UL = 0x3c, + OPC2_32_RRR1_MADDMS_H_UU = 0x3f, + OPC2_32_RRR1_MADDR_H_LL = 0x0e, + OPC2_32_RRR1_MADDR_H_LU = 0x0d, + OPC2_32_RRR1_MADDR_H_UL = 0x0c, + OPC2_32_RRR1_MADDR_H_UU = 0x0f, + OPC2_32_RRR1_MADDRS_H_LL = 0x2e, + OPC2_32_RRR1_MADDRS_H_LU = 0x2d, + OPC2_32_RRR1_MADDRS_H_UL = 0x2c, + OPC2_32_RRR1_MADDRS_H_UU = 0x2f, +}; +/* OPCM_32_RRR1_MADDQ_H */ +enum { + OPC2_32_RRR1_MADD_Q_32 = 0x02, + OPC2_32_RRR1_MADD_Q_64 = 0x1b, + OPC2_32_RRR1_MADD_Q_32_L = 0x01, + OPC2_32_RRR1_MADD_Q_64_L = 0x19, + OPC2_32_RRR1_MADD_Q_32_U = 0x00, + OPC2_32_RRR1_MADD_Q_64_U = 0x18, + OPC2_32_RRR1_MADD_Q_32_LL = 0x05, + OPC2_32_RRR1_MADD_Q_64_LL = 0x1d, + OPC2_32_RRR1_MADD_Q_32_UU = 0x04, + OPC2_32_RRR1_MADD_Q_64_UU = 0x1c, + OPC2_32_RRR1_MADDS_Q_32 = 0x22, + OPC2_32_RRR1_MADDS_Q_64 = 0x3b, + OPC2_32_RRR1_MADDS_Q_32_L = 0x21, + OPC2_32_RRR1_MADDS_Q_64_L = 0x39, + OPC2_32_RRR1_MADDS_Q_32_U = 0x20, + OPC2_32_RRR1_MADDS_Q_64_U = 0x38, + OPC2_32_RRR1_MADDS_Q_32_LL = 0x25, + OPC2_32_RRR1_MADDS_Q_64_LL = 0x3d, + OPC2_32_RRR1_MADDS_Q_32_UU = 0x24, + OPC2_32_RRR1_MADDS_Q_64_UU = 0x3c, + OPC2_32_RRR1_MADDR_H_64_UL = 0x1e, + OPC2_32_RRR1_MADDRS_H_64_UL = 0x3e, + OPC2_32_RRR1_MADDR_Q_32_LL = 0x07, + OPC2_32_RRR1_MADDR_Q_32_UU = 0x06, + OPC2_32_RRR1_MADDRS_Q_32_LL = 0x27, + OPC2_32_RRR1_MADDRS_Q_32_UU = 0x26, +}; +/* OPCM_32_RRR1_MADDSU_H */ +enum { + OPC2_32_RRR1_MADDSU_H_32_LL = 0x1a, + OPC2_32_RRR1_MADDSU_H_32_LU = 0x19, + OPC2_32_RRR1_MADDSU_H_32_UL = 0x18, + OPC2_32_RRR1_MADDSU_H_32_UU = 0x1b, + OPC2_32_RRR1_MADDSUS_H_32_LL = 0x3a, + OPC2_32_RRR1_MADDSUS_H_32_LU = 0x39, + OPC2_32_RRR1_MADDSUS_H_32_UL = 0x38, + OPC2_32_RRR1_MADDSUS_H_32_UU = 0x3b, + OPC2_32_RRR1_MADDSUM_H_64_LL = 0x1e, + OPC2_32_RRR1_MADDSUM_H_64_LU = 0x1d, + OPC2_32_RRR1_MADDSUM_H_64_UL = 0x1c, + OPC2_32_RRR1_MADDSUM_H_64_UU = 0x1f, + OPC2_32_RRR1_MADDSUMS_H_64_LL = 0x3e, + OPC2_32_RRR1_MADDSUMS_H_64_LU = 0x3d, + OPC2_32_RRR1_MADDSUMS_H_64_UL = 0x3c, + OPC2_32_RRR1_MADDSUMS_H_64_UU = 0x3f, + OPC2_32_RRR1_MADDSUR_H_16_LL = 0x0e, + OPC2_32_RRR1_MADDSUR_H_16_LU = 0x0d, + OPC2_32_RRR1_MADDSUR_H_16_UL = 0x0c, + OPC2_32_RRR1_MADDSUR_H_16_UU = 0x0f, + OPC2_32_RRR1_MADDSURS_H_16_LL = 0x2e, + OPC2_32_RRR1_MADDSURS_H_16_LU = 0x2d, + OPC2_32_RRR1_MADDSURS_H_16_UL = 0x2c, + OPC2_32_RRR1_MADDSURS_H_16_UU = 0x2f, +}; +/* OPCM_32_RRR1_MSUB_H */ +enum { + OPC2_32_RRR1_MSUB_H_LL = 0x1a, + OPC2_32_RRR1_MSUB_H_LU = 0x19, + OPC2_32_RRR1_MSUB_H_UL = 0x18, + OPC2_32_RRR1_MSUB_H_UU = 0x1b, + OPC2_32_RRR1_MSUBS_H_LL = 0x3a, + OPC2_32_RRR1_MSUBS_H_LU = 0x39, + OPC2_32_RRR1_MSUBS_H_UL = 0x38, + OPC2_32_RRR1_MSUBS_H_UU = 0x3b, + OPC2_32_RRR1_MSUBM_H_LL = 0x1e, + OPC2_32_RRR1_MSUBM_H_LU = 0x1d, + OPC2_32_RRR1_MSUBM_H_UL = 0x1c, + OPC2_32_RRR1_MSUBM_H_UU = 0x1f, + OPC2_32_RRR1_MSUBMS_H_LL = 0x3e, + OPC2_32_RRR1_MSUBMS_H_LU = 0x3d, + OPC2_32_RRR1_MSUBMS_H_UL = 0x3c, + OPC2_32_RRR1_MSUBMS_H_UU = 0x3f, + OPC2_32_RRR1_MSUBR_H_LL = 0x0e, + OPC2_32_RRR1_MSUBR_H_LU = 0x0d, + OPC2_32_RRR1_MSUBR_H_UL = 0x0c, + OPC2_32_RRR1_MSUBR_H_UU = 0x0f, + OPC2_32_RRR1_MSUBRS_H_LL = 0x2e, + OPC2_32_RRR1_MSUBRS_H_LU = 0x2d, + OPC2_32_RRR1_MSUBRS_H_UL = 0x2c, + OPC2_32_RRR1_MSUBRS_H_UU = 0x2f, +}; +/* OPCM_32_RRR1_MSUB_Q */ +enum { + OPC2_32_RRR1_MSUB_Q_32 = 0x02, + OPC2_32_RRR1_MSUB_Q_64 = 0x1b, + OPC2_32_RRR1_MSUB_Q_32_L = 0x01, + OPC2_32_RRR1_MSUB_Q_64_L = 0x19, + OPC2_32_RRR1_MSUB_Q_32_U = 0x00, + OPC2_32_RRR1_MSUB_Q_64_U = 0x18, + OPC2_32_RRR1_MSUB_Q_32_LL = 0x05, + OPC2_32_RRR1_MSUB_Q_64_LL = 0x1d, + OPC2_32_RRR1_MSUB_Q_32_UU = 0x04, + OPC2_32_RRR1_MSUB_Q_64_UU = 0x1c, + OPC2_32_RRR1_MSUBS_Q_32 = 0x22, + OPC2_32_RRR1_MSUBS_Q_64 = 0x3b, + OPC2_32_RRR1_MSUBS_Q_32_L = 0x21, + OPC2_32_RRR1_MSUBS_Q_64_L = 0x39, + OPC2_32_RRR1_MSUBS_Q_32_U = 0x20, + OPC2_32_RRR1_MSUBS_Q_64_U = 0x38, + OPC2_32_RRR1_MSUBS_Q_32_LL = 0x25, + OPC2_32_RRR1_MSUBS_Q_64_LL = 0x3d, + OPC2_32_RRR1_MSUBS_Q_32_UU = 0x24, + OPC2_32_RRR1_MSUBS_Q_64_UU = 0x3c, + OPC2_32_RRR1_MSUBR_H_64_UL = 0x1e, + OPC2_32_RRR1_MSUBRS_H_64_UL = 0x3e, + OPC2_32_RRR1_MSUBR_Q_32_LL = 0x07, + OPC2_32_RRR1_MSUBR_Q_32_UU = 0x06, + OPC2_32_RRR1_MSUBRS_Q_32_LL = 0x27, + OPC2_32_RRR1_MSUBRS_Q_32_UU = 0x26, +}; +/* OPCM_32_RRR1_MSUBADS_H */ +enum { + OPC2_32_RRR1_MSUBAD_H_32_LL = 0x1a, + OPC2_32_RRR1_MSUBAD_H_32_LU = 0x19, + OPC2_32_RRR1_MSUBAD_H_32_UL = 0x18, + OPC2_32_RRR1_MSUBAD_H_32_UU = 0x1b, + OPC2_32_RRR1_MSUBADS_H_32_LL = 0x3a, + OPC2_32_RRR1_MSUBADS_H_32_LU = 0x39, + OPC2_32_RRR1_MSUBADS_H_32_UL = 0x38, + OPC2_32_RRR1_MSUBADS_H_32_UU = 0x3b, + OPC2_32_RRR1_MSUBADM_H_64_LL = 0x1e, + OPC2_32_RRR1_MSUBADM_H_64_LU = 0x1d, + OPC2_32_RRR1_MSUBADM_H_64_UL = 0x1c, + OPC2_32_RRR1_MSUBADM_H_64_UU = 0x1f, + OPC2_32_RRR1_MSUBADMS_H_64_LL = 0x3e, + OPC2_32_RRR1_MSUBADMS_H_64_LU = 0x3d, + OPC2_32_RRR1_MSUBADMS_H_64_UL = 0x3c, + OPC2_32_RRR1_MSUBADMS_H_64_UU = 0x3f, + OPC2_32_RRR1_MSUBADR_H_16_LL = 0x0e, + OPC2_32_RRR1_MSUBADR_H_16_LU = 0x0d, + OPC2_32_RRR1_MSUBADR_H_16_UL = 0x0c, + OPC2_32_RRR1_MSUBADR_H_16_UU = 0x0f, + OPC2_32_RRR1_MSUBADRS_H_16_LL = 0x2e, + OPC2_32_RRR1_MSUBADRS_H_16_LU = 0x2d, + OPC2_32_RRR1_MSUBADRS_H_16_UL = 0x2c, + OPC2_32_RRR1_MSUBADRS_H_16_UU = 0x2f, +}; +/* + * RRR2 Format + */ +/* OPCM_32_RRR2_MADD */ +enum { + OPC2_32_RRR2_MADD_32 = 0x0a, + OPC2_32_RRR2_MADD_64 = 0x6a, + OPC2_32_RRR2_MADDS_32 = 0x8a, + OPC2_32_RRR2_MADDS_64 = 0xea, + OPC2_32_RRR2_MADD_U_64 = 0x68, + OPC2_32_RRR2_MADDS_U_32 = 0x88, + OPC2_32_RRR2_MADDS_U_64 = 0xe8, +}; +/* OPCM_32_RRR2_MSUB */ +enum { + OPC2_32_RRR2_MSUB_32 = 0x0a, + OPC2_32_RRR2_MSUB_64 = 0x6a, + OPC2_32_RRR2_MSUBS_32 = 0x8a, + OPC2_32_RRR2_MSUBS_64 = 0xea, + OPC2_32_RRR2_MSUB_U_64 = 0x68, + OPC2_32_RRR2_MSUBS_U_32 = 0x88, + OPC2_32_RRR2_MSUBS_U_64 = 0xe8, +}; +/* + * RRRR Format + */ +/* OPCM_32_RRRR_EXTRACT_INSERT */ +enum { + OPC2_32_RRRR_DEXTR = 0x04, + OPC2_32_RRRR_EXTR = 0x02, + OPC2_32_RRRR_EXTR_U = 0x03, + OPC2_32_RRRR_INSERT = 0x00, +}; +/* + * RRRW Format + */ +/* OPCM_32_RRRW_EXTRACT_INSERT */ +enum { + OPC2_32_RRRW_EXTR = 0x02, + OPC2_32_RRRW_EXTR_U = 0x03, + OPC2_32_RRRW_IMASK = 0x01, + OPC2_32_RRRW_INSERT = 0x00, +}; +/* + * SYS Format + */ +/* OPCM_32_SYS_INTERRUPTS */ +enum { + OPC2_32_SYS_DEBUG = 0x04, + OPC2_32_SYS_DISABLE = 0x0d, + OPC2_32_SYS_DSYNC = 0x12, + OPC2_32_SYS_ENABLE = 0x0c, + OPC2_32_SYS_ISYNC = 0x13, + OPC2_32_SYS_NOP = 0x00, + OPC2_32_SYS_RET = 0x06, + OPC2_32_SYS_RFE = 0x07, + OPC2_32_SYS_RFM = 0x05, + OPC2_32_SYS_RSLCX = 0x09, + OPC2_32_SYS_SVLCX = 0x08, + OPC2_32_SYS_TRAPSV = 0x15, + OPC2_32_SYS_TRAPV = 0x14, + OPC2_32_SYS_RESTORE = 0x0e, + OPC2_32_SYS_FRET = 0x03, +}; |