summaryrefslogtreecommitdiff
path: root/arm
diff options
context:
space:
mode:
authornotaz2011-09-02 17:44:41 +0300
committernotaz2011-09-02 17:44:41 +0300
commitd5e0983c10e0ca717603dd1a406ff0d6e450e905 (patch)
tree99632bed2f11849a5709fb28c4d360e8efa71b09 /arm
parentc3c98c2f7c4d6cb73d3f038e2730ebe791d4492e (diff)
downloadpicogpsp-d5e0983c10e0ca717603dd1a406ff0d6e450e905.tar.gz
picogpsp-d5e0983c10e0ca717603dd1a406ff0d6e450e905.tar.bz2
picogpsp-d5e0983c10e0ca717603dd1a406ff0d6e450e905.zip
move platform-independent stuff out of /gp2x.
Diffstat (limited to 'arm')
-rw-r--r--arm/arm_codegen.h1392
-rw-r--r--arm/arm_dpimacros.h1661
-rw-r--r--arm/arm_emit.h1966
-rw-r--r--arm/arm_stub.S1004
-rw-r--r--arm/video_blend.S181
5 files changed, 6204 insertions, 0 deletions
diff --git a/arm/arm_codegen.h b/arm/arm_codegen.h
new file mode 100644
index 0000000..42b8795
--- /dev/null
+++ b/arm/arm_codegen.h
@@ -0,0 +1,1392 @@
+/*
+ * arm-codegen.h
+ *
+ * Copyright (c) 2002 Wild West Software
+ * Copyright (c) 2001, 2002 Sergey Chaban
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#ifndef ARM_CG_H
+#define ARM_CG_H
+
+typedef unsigned long arminstr_t;
+typedef unsigned long armword_t;
+
+/* Helper functions */
+/*void arm_emit_std_prologue(cg_segment_t * segment, unsigned int local_size);
+void arm_emit_std_epilogue(cg_segment_t * segment, unsigned int local_size, int pop_regs);
+void arm_emit_lean_prologue(cg_segment_t * segment, unsigned int local_size, int push_regs);
+int arm_is_power_of_2(armword_t val);
+int calc_arm_mov_const_shift(armword_t val);
+int is_arm_const(armword_t val);
+int arm_bsf(armword_t val);
+void arm_mov_reg_imm32_cond(cg_segment_t * segment, int reg, armword_t imm32, int cond);
+void arm_mov_reg_imm32(cg_segment_t * segment, int reg, armword_t imm32);*/
+
+
+//*** check for correctness ***
+//extern u32* x86Ptr;
+
+void write_to_file(u32 val);
+
+//#define write32(val) { *(u32 *)translation_ptr = val; write_to_file(*(u32 *)translation_ptr); translation_ptr += 4; }
+
+//#define write32(val) { if( g_PcWatch.IsReset == RECRESET_OFF ) { *(u32*)pCurPage->pCodeCurrent = val; pCurPage->pCodeCurrent +=4; if( (u32)pCurPage->pCodeCurrent >= (u32)pCurPage->pCodeEnd ) { g_PcWatch.IsReset = RECRESET_START; recResize(); g_PcWatch.IsReset = RECRESET_END; return; } }else{ if( g_PcWatch.IsReset == RECRESET_END ){ g_PcWatch.IsReset = RECRESET_OFF; return; } } }
+//#define write32_ret(val) { if( g_PcWatch.IsReset == RECRESET_OFF ) { *(u32*)pCurPage->pCodeCurrent = val; pCurPage->pCodeCurrent +=4; if( (u32)pCurPage->pCodeCurrent >= (u32)pCurPage->pCodeEnd ) { g_PcWatch.IsReset = RECRESET_START; recResize(); g_PcWatch.IsReset = RECRESET_END; return 0; } }else{ if( g_PcWatch.IsReset == RECRESET_END ){ g_PcWatch.IsReset = RECRESET_OFF; return 0; } } }
+//#define write32(val) { *(u32*)pCurPage->pCodeCurrent = val; pCurPage->pCodeCurrent +=4; }
+
+#define ARM_EMIT(p, i) write32(i);
+//write32(i);
+/*{ *(u32*)translation_ptr = (i); translation_ptr += 4; } */
+
+#if defined(GIZMONDO) || defined(POCKETPC) /* Implemented but not working right yet for PPC */
+
+// --------------------------------------------------------------------------
+// These declarations for coredll are extracted from platform builder
+// source code
+// --------------------------------------------------------------------------
+
+/* Flags for CacheSync/CacheRangeFlush */
+#define CACHE_SYNC_DISCARD 0x001 /* write back & discard all cached data */
+#define CACHE_SYNC_INSTRUCTIONS 0x002 /* discard all cached instructions */
+#define CACHE_SYNC_WRITEBACK 0x004 /* write back but don't discard data cache*/
+#define CACHE_SYNC_FLUSH_I_TLB 0x008 /* flush I-TLB */
+#define CACHE_SYNC_FLUSH_D_TLB 0x010 /* flush D-TLB */
+#define CACHE_SYNC_FLUSH_TLB (CACHE_SYNC_FLUSH_I_TLB|CACHE_SYNC_FLUSH_D_TLB) /* flush all TLB */
+#define CACHE_SYNC_L2_WRITEBACK 0x020 /* write-back L2 Cache */
+#define CACHE_SYNC_L2_DISCARD 0x040 /* discard L2 Cache */
+
+#define CACHE_SYNC_ALL 0x07F /* sync and discard everything in Cache/TLB */
+
+extern "C" {
+ void CacheSync(int flags);
+}
+#define CLEAR_INSN_CACHE(BEG, END) CacheSync(CACHE_SYNC_INSTRUCTIONS | CACHE_SYNC_WRITEBACK);
+
+#else
+
+#if 0
+#define CLEAR_INSN_CACHE(BEG, END) \
+{ \
+ register unsigned long _beg __asm ("a1") = (unsigned long) (BEG); \
+ register unsigned long _end __asm ("a2") = (unsigned long) (END); \
+ register unsigned long _flg __asm ("a3") = 0; \
+ register unsigned long _scno __asm ("r7") = 0xf0002; \
+ __asm __volatile ("swi 0x9f0002 @ sys_cacheflush" \
+ : "=r" (_beg) \
+ : "0" (_beg), "r" (_end), "r" (_flg), "r" (_scno)); \
+}
+
+#endif
+
+#endif
+
+#if defined(_MSC_VER) && !defined(ARM_NOIASM)
+# define ARM_IASM(_expr) __easfdmit (_expr)
+#else
+# define ARM_IASM(_expr)
+#endif
+
+/* even_scale = rot << 1 */
+#define ARM_SCALE(imm8, even_scale) ( ((imm8) >> (even_scale)) | ((imm8) << (32 - even_scale)) )
+
+
+
+typedef enum {
+ ARMREG_R0 = 0,
+ ARMREG_R1,
+ ARMREG_R2,
+ ARMREG_R3,
+ ARMREG_R4,
+ ARMREG_R5,
+ ARMREG_R6,
+ ARMREG_R7,
+ ARMREG_R8,
+ ARMREG_R9,
+ ARMREG_R10,
+ ARMREG_R11,
+ ARMREG_R12,
+ ARMREG_R13,
+ ARMREG_R14,
+ ARMREG_R15,
+
+
+ /* aliases */
+ /* args */
+ ARMREG_A1 = ARMREG_R0,
+ ARMREG_A2 = ARMREG_R1,
+ ARMREG_A3 = ARMREG_R2,
+ ARMREG_A4 = ARMREG_R3,
+
+ /* local vars */
+ ARMREG_V1 = ARMREG_R4,
+ ARMREG_V2 = ARMREG_R5,
+ ARMREG_V3 = ARMREG_R6,
+ ARMREG_V4 = ARMREG_R7,
+ ARMREG_V5 = ARMREG_R8,
+ ARMREG_V6 = ARMREG_R9,
+ ARMREG_V7 = ARMREG_R10,
+
+ ARMREG_FP = ARMREG_R11,
+ ARMREG_IP = ARMREG_R12,
+ ARMREG_SP = ARMREG_R13,
+ ARMREG_LR = ARMREG_R14,
+ ARMREG_PC = ARMREG_R15,
+
+ /* FPU */
+ ARMREG_F0 = 0,
+ ARMREG_F1,
+ ARMREG_F2,
+ ARMREG_F3,
+ ARMREG_F4,
+ ARMREG_F5,
+ ARMREG_F6,
+ ARMREG_F7,
+
+ /* co-processor */
+ ARMREG_CR0 = 0,
+ ARMREG_CR1,
+ ARMREG_CR2,
+ ARMREG_CR3,
+ ARMREG_CR4,
+ ARMREG_CR5,
+ ARMREG_CR6,
+ ARMREG_CR7,
+ ARMREG_CR8,
+ ARMREG_CR9,
+ ARMREG_CR10,
+ ARMREG_CR11,
+ ARMREG_CR12,
+ ARMREG_CR13,
+ ARMREG_CR14,
+ ARMREG_CR15,
+
+ /* XScale: acc0 on CP0 */
+ ARMREG_ACC0 = ARMREG_CR0,
+
+ ARMREG_MAX = ARMREG_R15,
+
+ /* flags */
+ ARMREG_CPSR = 0,
+ ARMREG_SPSR = 1
+} ARMReg;
+
+typedef enum {
+ ARM_FCONST_0_0 = 8,
+ ARM_FCONST_1_0,
+ ARM_FCONST_2_0,
+ ARM_FCONST_3_0,
+ ARM_FCONST_4_0,
+ ARM_FCONST_5_0,
+ ARM_FCONST_0_5,
+ ARM_FCONST_10_0
+} ARMFPUConst;
+
+/* number of argument registers */
+#define ARM_NUM_ARG_REGS 4
+
+/* number of non-argument registers */
+#define ARM_NUM_VARIABLE_REGS 7
+
+/* number of global registers */
+#define ARM_NUM_GLOBAL_REGS 5
+
+/* bitvector for all argument regs (A1-A4) */
+#define ARM_ALL_ARG_REGS \
+ (1 << ARMREG_A1) | (1 << ARMREG_A2) | (1 << ARMREG_A3) | (1 << ARMREG_A4)
+
+
+typedef enum {
+ ARMCOND_EQ = 0x0, /* Equal; Z = 1 */
+ ARMCOND_NE = 0x1, /* Not equal, or unordered; Z = 0 */
+ ARMCOND_CS = 0x2, /* Carry set; C = 1 */
+ ARMCOND_HS = ARMCOND_CS, /* Unsigned higher or same; */
+ ARMCOND_CC = 0x3, /* Carry clear; C = 0 */
+ ARMCOND_LO = ARMCOND_CC, /* Unsigned lower */
+ ARMCOND_MI = 0x4, /* Negative; N = 1 */
+ ARMCOND_PL = 0x5, /* Positive or zero; N = 0 */
+ ARMCOND_VS = 0x6, /* Overflow; V = 1 */
+ ARMCOND_VC = 0x7, /* No overflow; V = 0 */
+ ARMCOND_HI = 0x8, /* Unsigned higher; C = 1 && Z = 0 */
+ ARMCOND_LS = 0x9, /* Unsigned lower or same; C = 0 || Z = 1 */
+ ARMCOND_GE = 0xA, /* Signed greater than or equal; N = V */
+ ARMCOND_LT = 0xB, /* Signed less than; N != V */
+ ARMCOND_GT = 0xC, /* Signed greater than; Z = 0 && N = V */
+ ARMCOND_LE = 0xD, /* Signed less than or equal; Z = 1 && N != V */
+ ARMCOND_AL = 0xE, /* Always */
+ ARMCOND_NV = 0xF, /* Never */
+
+ ARMCOND_SHIFT = 28
+} ARMCond;
+
+#define ARMCOND_MASK (ARMCOND_NV << ARMCOND_SHIFT)
+
+#define ARM_DEF_COND(cond) (((cond) & 0xF) << ARMCOND_SHIFT)
+
+
+
+typedef enum {
+ ARMSHIFT_LSL = 0,
+ ARMSHIFT_LSR = 1,
+ ARMSHIFT_ASR = 2,
+ ARMSHIFT_ROR = 3,
+
+ ARMSHIFT_ASL = ARMSHIFT_LSL
+ /* rrx = (ror, 1) */
+} ARMShiftType;
+
+
+typedef struct {
+ armword_t PSR_c : 8;
+ armword_t PSR_x : 8;
+ armword_t PSR_s : 8;
+ armword_t PSR_f : 8;
+} ARMPSR;
+
+typedef enum {
+ ARMOP_AND = 0x0,
+ ARMOP_EOR = 0x1,
+ ARMOP_SUB = 0x2,
+ ARMOP_RSB = 0x3,
+ ARMOP_ADD = 0x4,
+ ARMOP_ADC = 0x5,
+ ARMOP_SBC = 0x6,
+ ARMOP_RSC = 0x7,
+ ARMOP_TST = 0x8,
+ ARMOP_TEQ = 0x9,
+ ARMOP_CMP = 0xa,
+ ARMOP_CMN = 0xb,
+ ARMOP_ORR = 0xc,
+ ARMOP_MOV = 0xd,
+ ARMOP_BIC = 0xe,
+ ARMOP_MVN = 0xf,
+
+
+ /* not really opcodes */
+
+ ARMOP_STR = 0x0,
+ ARMOP_LDR = 0x1,
+
+ /* ARM2+ */
+ ARMOP_MUL = 0x0, /* Rd := Rm*Rs */
+ ARMOP_MLA = 0x1, /* Rd := (Rm*Rs)+Rn */
+
+ /* ARM3M+ */
+ ARMOP_UMULL = 0x4,
+ ARMOP_UMLAL = 0x5,
+ ARMOP_SMULL = 0x6,
+ ARMOP_SMLAL = 0x7,
+
+ /* for data transfers with register offset */
+ ARM_UP = 1,
+ ARM_DOWN = 0
+} ARMOpcode;
+
+typedef enum {
+ THUMBOP_AND = 0,
+ THUMBOP_EOR = 1,
+ THUMBOP_LSL = 2,
+ THUMBOP_LSR = 3,
+ THUMBOP_ASR = 4,
+ THUMBOP_ADC = 5,
+ THUMBOP_SBC = 6,
+ THUMBOP_ROR = 7,
+ THUMBOP_TST = 8,
+ THUMBOP_NEG = 9,
+ THUMBOP_CMP = 10,
+ THUMBOP_CMN = 11,
+ THUMBOP_ORR = 12,
+ THUMBOP_MUL = 13,
+ THUMBOP_BIC = 14,
+ THUMBOP_MVN = 15,
+ THUMBOP_MOV = 16,
+ THUMBOP_CMPI = 17,
+ THUMBOP_ADD = 18,
+ THUMBOP_SUB = 19,
+ THUMBOP_CMPH = 19,
+ THUMBOP_MOVH = 20
+} ThumbOpcode;
+
+
+/* Generic form - all ARM instructions are conditional. */
+typedef struct {
+ arminstr_t icode : 28;
+ arminstr_t cond : 4;
+} ARMInstrGeneric;
+
+
+
+/* Branch or Branch with Link instructions. */
+typedef struct {
+ arminstr_t offset : 24;
+ arminstr_t link : 1;
+ arminstr_t tag : 3; /* 1 0 1 */
+ arminstr_t cond : 4;
+} ARMInstrBR;
+
+#define ARM_BR_ID 5
+#define ARM_BR_MASK 7 << 25
+#define ARM_BR_TAG ARM_BR_ID << 25
+
+#define ARM_DEF_BR(offs, l, cond) ((offs) | ((l) << 24) | (ARM_BR_TAG) | (cond << ARMCOND_SHIFT))
+
+/* branch */
+#define ARM_B_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 0, cond))
+#define ARM_B(p, offs) ARM_B_COND((p), ARMCOND_AL, (offs))
+/* branch with link */
+#define ARM_BL_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 1, cond))
+#define ARM_BL(p, offs) ARM_BL_COND((p), ARMCOND_AL, (offs))
+
+/* branch to register and exchange */
+#define ARM_BX_COND(p, cond, reg) ARM_EMIT(p, ((cond << ARMCOND_SHIFT) | (reg) | 0x12FFF10))
+#define ARM_BX(p, reg) ARM_BX_COND((p), ARMCOND_AL, (reg))
+
+/* branch to register with link */
+#define ARM_BLX_COND(p, cond, reg) ARM_EMIT(p, ((cond << ARMCOND_SHIFT) | (reg) | 0x12FFF30))
+#define ARM_BLX(p, reg) ARM_BLX_COND((p), ARMCOND_AL, (reg))
+
+
+/* Data Processing Instructions - there are 3 types. */
+
+typedef struct {
+ arminstr_t imm : 8;
+ arminstr_t rot : 4;
+} ARMDPI_op2_imm;
+
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag : 1; /* 0 - immediate shift, 1 - reg shift */
+ arminstr_t type : 2; /* shift type - logical, arithmetic, rotate */
+} ARMDPI_op2_reg_shift;
+
+
+/* op2 is reg shift by imm */
+typedef union {
+ ARMDPI_op2_reg_shift r2;
+ struct {
+ arminstr_t _dummy_r2 : 7;
+ arminstr_t shift : 5;
+ } imm;
+} ARMDPI_op2_reg_imm;
+
+/* op2 is reg shift by reg */
+typedef union {
+ ARMDPI_op2_reg_shift r2;
+ struct {
+ arminstr_t _dummy_r2 : 7;
+ arminstr_t pad : 1; /* always 0, to differentiate from HXFER etc. */
+ arminstr_t rs : 4;
+ } reg;
+} ARMDPI_op2_reg_reg;
+
+/* Data processing instrs */
+typedef union {
+ ARMDPI_op2_imm op2_imm;
+
+ ARMDPI_op2_reg_shift op2_reg;
+ ARMDPI_op2_reg_imm op2_reg_imm;
+ ARMDPI_op2_reg_reg op2_reg_reg;
+
+ struct {
+ arminstr_t op2 : 12; /* raw operand 2 */
+ arminstr_t rd : 4; /* destination reg */
+ arminstr_t rn : 4; /* first operand reg */
+ arminstr_t s : 1; /* S-bit controls PSR update */
+ arminstr_t opcode : 4; /* arithmetic/logic operation */
+ arminstr_t type : 1; /* type of op2, 0 = register, 1 = immediate */
+ arminstr_t tag : 2; /* 0 0 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrDPI;
+
+#define ARM_DPI_ID 0
+#define ARM_DPI_MASK 3 << 26
+#define ARM_DPI_TAG ARM_DPI_ID << 26
+
+#define ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, cond) \
+ ((imm8) & 0xFF) | \
+ (((rot) & 0xF) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (1 << 25) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+
+#define ARM_DEF_DPI_IMM(imm8, rot, rd, rn, s, op) \
+ ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, ARMCOND_AL)
+
+/* codegen */
+#define ARM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond))
+#define ARM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond))
+#define ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond))
+
+
+
+#define ARM_DEF_DPI_REG_IMMSHIFT_COND(rm, shift_type, imm_shift, rd, rn, s, op, cond) \
+ (rm) | \
+ ((shift_type & 3) << 5) | \
+ (((imm_shift) & 0x1F) << 7) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+/* codegen */
+#define ARM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_type, imm_shift, (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_type, imm_shift, (rd), (rn), 1, (op), cond))
+
+#define ARM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_type, imm_shift, (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_type, imm_shift, (rd), (rn), 1, (op), cond))
+
+#define ARM_IASM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_IASM_EMIT(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond))
+
+
+/* Rd := Rn op (Rm shift_type Rs) */
+#define ARM_DEF_DPI_REG_REGSHIFT_COND(rm, shift_type, rs, rd, rn, s, op, cond) \
+ (rm) | \
+ (1 << 4) | \
+ ((shift_type & 3) << 5) | \
+ ((rs) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+/* codegen */
+#define ARM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_type, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_type, (rs), (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_type, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_type, (rs), (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_type, (rs), (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_type, (rs), (rd), (rn), 1, (op), cond))
+
+
+
+/* Multiple register transfer. */
+typedef struct {
+ arminstr_t reg_list : 16; /* bitfield */
+ arminstr_t rn : 4; /* base reg */
+ arminstr_t ls : 1; /* load(1)/store(0) */
+ arminstr_t wb : 1; /* write-back "!" */
+ arminstr_t s : 1; /* restore PSR, force user bit */
+ arminstr_t u : 1; /* up/down */
+ arminstr_t p : 1; /* pre(1)/post(0) index */
+ arminstr_t tag : 3; /* 1 0 0 */
+ arminstr_t cond : 4;
+} ARMInstrMRT;
+
+#define ARM_MRT_ID 4
+#define ARM_MRT_MASK 7 << 25
+#define ARM_MRT_TAG ARM_MRT_ID << 25
+
+#define ARM_DEF_MRT(regs, rn, l, w, s, u, p, cond) \
+ (regs) | \
+ (rn << 16) | \
+ (l << 20) | \
+ (w << 21) | \
+ (s << 22) | \
+ (u << 23) | \
+ (p << 24) | \
+ (ARM_MRT_TAG) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_STMDB(p, rbase, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, rbase, 0, 0, 0, 0, 1, ARMCOND_AL))
+#define ARM_LDMDB(p, rbase, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, rbase, 1, 0, 0, 0, 1, ARMCOND_AL))
+#define ARM_STMDB_WB(p, rbase, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, rbase, 0, 1, 0, 0, 1, ARMCOND_AL))
+#define ARM_LDMIA_WB(p, rbase, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, rbase, 1, 1, 0, 1, 0, ARMCOND_AL))
+#define ARM_LDMIA(p, rbase, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, rbase, 1, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_STMIA(p, rbase, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, rbase, 0, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_STMIA_WB(p, rbase, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, rbase, 0, 1, 0, 1, 0, ARMCOND_AL))
+
+#define ARM_LDMIA_WB_PC_S(p, rbase, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, rbase, 1, 1, 1, 1, 0, ARMCOND_AL))
+
+/* THUMB
+#define ARM_POP_OP(p) ARM_EMIT(p, 0xFF01BD17)
+#define ARM_PUSH_OP(p) ARM_EMIT(p, 0xFF02B497)
+*/
+
+/* stmdb sp!, {regs} */
+#define ARM_PUSH(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL))
+#define ARM_IASM_PUSH(regs) ARM_IASM(ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL))
+
+/* ldmia sp!, {regs} */
+#define ARM_POP(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL))
+#define ARM_IASM_POP(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL))
+
+/* ldmia sp, {regs} ; (no write-back) */
+#define ARM_POP_NWB(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_IASM_POP_NWB(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL))
+
+#define ARM_PUSH1(p, r1) ARM_PUSH(p, (1 << r1))
+#define ARM_PUSH2(p, r1, r2) ARM_PUSH(p, (1 << r1) | (1 << r2))
+#define ARM_PUSH3(p, r1, r2, r3) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3))
+#define ARM_PUSH4(p, r1, r2, r3, r4) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4))
+#define ARM_PUSH5(p, r1, r2, r3, r4, r5) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5))
+#define ARM_PUSH6(p, r1, r2, r3, r4, r5, r6) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6))
+#define ARM_PUSH7(p, r1, r2, r3, r4, r5, r6, r7) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7))
+#define ARM_PUSH8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8))
+#define ARM_PUSH9(p, r1, r2, r3, r4, r5, r6, r7, r8, r9) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8) | (1 << r9))
+
+#define ARM_POP9(p, r1, r2, r3, r4, r5, r6, r7, r8, r9) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8) | (1 << r9))
+#define ARM_POP8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8))
+#define ARM_POP7(p, r1, r2, r3, r4, r5, r6, r7) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7))
+#define ARM_POP6(p, r1, r2, r3, r4, r5, r6) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6))
+#define ARM_POP5(p, r1, r2, r3, r4, r5) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5))
+#define ARM_POP4(p, r1, r2, r3, r4) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4))
+#define ARM_POP3(p, r1, r2, r3) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3))
+#define ARM_POP2(p, r1, r2) ARM_POP(p, (1 << r1) | (1 << r2))
+#define ARM_POP1(p, r1) ARM_POP(p, (1 << r1))
+
+
+/* Multiply instructions */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag2 : 4; /* 9 */
+ arminstr_t rs : 4;
+ arminstr_t rn : 4;
+ arminstr_t rd : 4;
+ arminstr_t s : 1;
+ arminstr_t opcode : 3;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrMul;
+
+#define ARM_MUL_ID 0
+#define ARM_MUL_ID2 9
+#define ARM_MUL_MASK ((0xF << 24) | (0xF << 4))
+#define ARM_MUL_TAG ((ARM_MUL_ID << 24) | (ARM_MUL_ID2 << 4))
+
+#define ARM_DEF_MUL_COND(op, rd, rm, rs, rn, s, cond) \
+ (rm) | \
+ ((rs) << 8) | \
+ ((rn) << 12) | \
+ ((rd) << 16) | \
+ (((s) & 1) << 20) | \
+ (((op) & 7) << 21) | \
+ ARM_MUL_TAG | \
+ ARM_DEF_COND(cond)
+
+/* Rd := (Rm * Rs)[31:0]; 32 x 32 -> 32 */
+#define ARM_MUL_COND(p, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_MUL(p, rd, rm, rs) \
+ ARM_MUL_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_MULS_COND(p, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
+#define ARM_MULS(p, rd, rm, rs) \
+ ARM_MULS_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_MUL_REG_REG(p, rd, rm, rs) ARM_MUL(p, rd, rm, rs)
+#define ARM_MULS_REG_REG(p, rd, rm, rs) ARM_MULS(p, rd, rm, rs)
+
+/* inline */
+#define ARM_IASM_MUL_COND(rd, rm, rs, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_IASM_MUL(rd, rm, rs) \
+ ARM_IASM_MUL_COND(rd, rm, rs, ARMCOND_AL)
+#define ARM_IASM_MULS_COND(rd, rm, rs, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
+#define ARM_IASM_MULS(rd, rm, rs) \
+ ARM_IASM_MULS_COND(rd, rm, rs, ARMCOND_AL)
+
+
+/* Rd := (Rm * Rs) + Rn; 32x32+32->32 */
+#define ARM_MLA_COND(p, rd, rm, rs, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond))
+#define ARM_MLA(p, rd, rm, rs, rn) \
+ ARM_MLA_COND(p, rd, rm, rs, rn, ARMCOND_AL)
+#define ARM_MLAS_COND(p, rd, rm, rs, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond))
+#define ARM_MLAS(p, rd, rm, rs, rn) \
+ ARM_MLAS_COND(p, rd, rm, rs, rn, ARMCOND_AL)
+
+/* inline */
+#define ARM_IASM_MLA_COND(rd, rm, rs, rn, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond))
+#define ARM_IASM_MLA(rd, rm, rs, rn) \
+ ARM_IASM_MLA_COND(rd, rm, rs, rn, ARMCOND_AL)
+#define ARM_IASM_MLAS_COND(rd, rm, rs, rn, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond))
+#define ARM_IASM_MLAS(rd, rm, rs, rn) \
+ ARM_IASM_MLAS_COND(rd, rm, rs, rn, ARMCOND_AL)
+
+
+#define ARM_SMULL_COND(p, rn, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_SMULL, rd, rm, rs, rn, 0, cond))
+#define ARM_SMULL(p, rn, rd, rm, rs) \
+ ARM_SMULL_COND(p, rn, rd, rm, rs, ARMCOND_AL)
+
+#define ARM_SMLAL_COND(p, rn, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_SMLAL, rd, rm, rs, rn, 0, cond))
+#define ARM_SMLAL(p, rn, rd, rm, rs) \
+ ARM_SMLAL_COND(p, rn, rd, rm, rs, ARMCOND_AL)
+
+#define ARM_UMULL_COND(p, rn, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_UMULL, rd, rm, rs, rn, 0, cond))
+#define ARM_UMULL(p, rn, rd, rm, rs) \
+ ARM_UMULL_COND(p, rn, rd, rm, rs, ARMCOND_AL)
+
+#define ARM_UMLAL_COND(p, rn, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_UMLAL, rd, rm, rs, rn, 0, cond))
+#define ARM_UMLAL(p, rn, rd, rm, rs) \
+ ARM_UMLAL_COND(p, rn, rd, rm, rs, ARMCOND_AL)
+
+
+#define ARM_SMULLS_COND(p, rn, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_SMULL, rd, rm, rs, rn, 1, cond))
+#define ARM_SMULLS(p, rn, rd, rm, rs) \
+ ARM_SMULLS_COND(p, rn, rd, rm, rs, ARMCOND_AL)
+
+#define ARM_SMLALS_COND(p, rn, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_SMLAL, rd, rm, rs, rn, 1, cond))
+#define ARM_SMLALS(p, rn, rd, rm, rs) \
+ ARM_SMLALS_COND(p, rn, rd, rm, rs, ARMCOND_AL)
+
+#define ARM_UMULLS_COND(p, rn, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_UMULL, rd, rm, rs, rn, 1, cond))
+#define ARM_UMULLS(p, rn, rd, rm, rs) \
+ ARM_UMULLS_COND(p, rn, rd, rm, rs, ARMCOND_AL)
+
+#define ARM_UMLALS_COND(p, rn, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_UMLAL, rd, rm, rs, rn, 1, cond))
+#define ARM_UMLALS(p, rn, rd, rm, rs) \
+ ARM_UMLALS_COND(p, rn, rd, rm, rs, ARMCOND_AL)
+
+
+
+/* Word/byte transfer */
+typedef union {
+ ARMDPI_op2_reg_imm op2_reg_imm;
+ struct {
+ arminstr_t op2_imm : 12;
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t b : 1;
+ arminstr_t u : 1; /* down(0) / up(1) */
+ arminstr_t p : 1; /* post-index(0) / pre-index(1) */
+ arminstr_t type : 1; /* imm(0) / register(1) */
+ arminstr_t tag : 2; /* 0 1 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrWXfer;
+
+#define ARM_WXFER_ID 1
+#define ARM_WXFER_MASK 3 << 26
+#define ARM_WXFER_TAG ARM_WXFER_ID << 26
+
+
+/*
+ * ls : opcode, ARMOP_STR(0)/ARMOP_LDR(1)
+ * imm12 : immediate offset
+ * wb : write-back
+ * p : index mode, post-index (0, automatic write-back)
+ * or pre-index (1, calc effective address before memory access)
+ */
+#define ARM_DEF_WXFER_IMM(imm12, rd, rn, ls, wb, b, p, cond) \
+ ((((int)(imm12)) < 0) ? -((int)(imm12)) : (imm12)) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ ((b) << 22) | \
+ (((int)(imm12) >= 0) << 23) | \
+ ((p) << 24) | \
+ ARM_WXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_WXFER_MAX_OFFS 0xFFF
+
+/* this macro checks for imm12 bounds */
+#define ARM_EMIT_WXFER_IMM(ptr, imm12, rd, rn, ls, wb, b, p, cond) \
+ do { \
+ int _imm12 = (int)(imm12) < -ARM_WXFER_MAX_OFFS \
+ ? -ARM_WXFER_MAX_OFFS \
+ : (int)(imm12) > ARM_WXFER_MAX_OFFS \
+ ? ARM_WXFER_MAX_OFFS \
+ : (int)(imm12); \
+ ARM_EMIT((ptr), \
+ ARM_DEF_WXFER_IMM(_imm12, (rd), (rn), (ls), (wb), (b), (p), (cond))); \
+ } while (0)
+
+
+/* LDRx */
+/* immediate offset, post-index */
+#define ARM_LDR_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 0, cond))
+
+#define ARM_LDR_IMM_POST(p, rd, rn, imm) ARM_LDR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 0, cond))
+
+#define ARM_LDRB_IMM_POST(p, rd, rn, imm) ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* immediate offset, pre-index */
+#define ARM_LDR_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 1, cond))
+
+#define ARM_LDR_IMM(p, rd, rn, imm) ARM_LDR_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_LDRB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 1, cond))
+
+#define ARM_LDRB_IMM(p, rd, rn, imm) ARM_LDRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+/* STRx */
+/* immediate offset, post-index */
+#define ARM_STR_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 0, cond))
+
+#define ARM_STR_IMM_POST(p, rd, rn, imm) ARM_STR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRB_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 0, cond))
+
+#define ARM_STRB_IMM_POST(p, rd, rn, imm) ARM_STRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* immediate offset, pre-index */
+#define ARM_STR_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)
+/* ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 1, cond))*/
+/* ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 0, 0, 1, cond) */
+/* ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)) */
+
+#define ARM_STR_IMM(p, rd, rn, imm) ARM_STR_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 1, cond))
+
+#define ARM_STRB_IMM(p, rd, rn, imm) ARM_STRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* write-back */
+#define ARM_STR_IMM_WB_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 1, 0, 1, cond)
+#define ARM_STR_IMM_WB(p, rd, rn, imm) ARM_STR_IMM_WB_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+/*
+ * wb : write-back
+ * u : down(0) / up(1)
+ * p : index mode, post-index (0, automatic write-back) or pre-index (1)
+ */
+#define ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, u, p, cond) \
+ (rm) | \
+ ((shift_type) << 5) | \
+ ((shift) << 7) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ ((b) << 22) | \
+ ((u) << 23) | \
+ ((p) << 24) | \
+ (1 << 25) | \
+ ARM_WXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \
+ ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_UP, p, cond)
+#define ARM_DEF_WXFER_REG_MINUS_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \
+ ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_DOWN, p, cond)
+
+
+#define ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 0, 1, cond))
+#define ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_LDR_REG_REG(p, rd, rn, rm) \
+ ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+#define ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 1, 1, cond))
+#define ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_LDRB_REG_REG(p, rd, rn, rm) \
+ ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+#define ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 0, 1, cond))
+#define ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STR_REG_REG(p, rd, rn, rm) \
+ ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+/* post-index */
+#define ARM_STR_REG_REG_SHIFT_POST_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 0, 0, cond))
+#define ARM_STR_REG_REG_SHIFT_POST(p, rd, rn, rm, shift_type, shift) \
+ ARM_STR_REG_REG_SHIFT_POST_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STR_REG_REG_POST(p, rd, rn, rm) \
+ ARM_STR_REG_REG_SHIFT_POST(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+/* zero-extend */
+#define ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 1, 1, cond))
+#define ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STRB_REG_REG(p, rd, rn, rm) \
+ ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+
+/* ARMv4+ */
+/* Half-word or byte (signed) transfer. */
+typedef struct {
+ arminstr_t rm : 4; /* imm_lo */
+ arminstr_t tag3 : 1; /* 1 */
+ arminstr_t h : 1; /* half-word or byte */
+ arminstr_t s : 1; /* sign-extend or zero-extend */
+ arminstr_t tag2 : 1; /* 1 */
+ arminstr_t imm_hi : 4;
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t type : 1; /* imm(1) / reg(0) */
+ arminstr_t u : 1; /* +- */
+ arminstr_t p : 1; /* pre/post-index */
+ arminstr_t tag : 3;
+ arminstr_t cond : 4;
+} ARMInstrHXfer;
+
+#define ARM_HXFER_ID 0
+#define ARM_HXFER_ID2 1
+#define ARM_HXFER_ID3 1
+#define ARM_HXFER_MASK ((0x7 << 25) | (0x9 << 4))
+#define ARM_HXFER_TAG ((ARM_HXFER_ID << 25) | (ARM_HXFER_ID2 << 7) | (ARM_HXFER_ID3 << 4))
+
+#define ARM_DEF_HXFER_IMM_COND(imm, h, s, rd, rn, ls, wb, p, cond) \
+ (((int)(imm) >= 0 ? (imm) : -(int)(imm)) & 0xF) | \
+ ((h) << 5) | \
+ ((s) << 6) | \
+ ((((int)(imm) >= 0 ? (imm) : -(int)(imm)) << 4) & (0xF << 8)) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ (1 << 22) | \
+ (((int)(imm) >= 0) << 23) | \
+ ((p) << 24) | \
+ ARM_HXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_LDRH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRH_IMM(p, rd, rn, imm) \
+ ARM_LDRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+#define ARM_LDRSH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSH_IMM(p, rd, rn, imm) \
+ ARM_LDRSH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+#define ARM_LDRSB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSB_IMM(p, rd, rn, imm) \
+ ARM_LDRSB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_STRH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond))
+#define ARM_STRH_IMM(p, rd, rn, imm) \
+ ARM_STRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRH_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_STR, 0, 0, cond))
+#define ARM_STRH_IMM_POST(p, rd, rn, imm) \
+ ARM_STRH_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, u, p, cond) \
+ ((rm) & 0xF) | \
+ ((h) << 5) | \
+ ((s) << 6) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ (0 << 22) | \
+ ((u) << 23) | \
+ ((p) << 24) | \
+ ARM_HXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_HXFER_REG_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \
+ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_UP, p, cond)
+#define ARM_DEF_HXFER_REG_MINUS_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \
+ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_DOWN, p, cond)
+
+#define ARM_LDRH_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRH_REG_REG(p, rd, rn, rm) \
+ ARM_LDRH_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_LDRSH_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSH_REG_REG(p, rd, rn, rm) \
+ ARM_LDRSH_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_LDRSB_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSB_REG_REG(p, rd, rn, rm) ARM_LDRSB_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#define ARM_STRH_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond))
+#define ARM_STRH_REG_REG(p, rd, rn, rm) \
+ ARM_STRH_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#define ARM_STRH_REG_REG_POST_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_STR, 0, 0, cond))
+#define ARM_STRH_REG_REG_POST(p, rd, rn, rm) \
+ ARM_STRH_REG_REG_POST_COND(p, rd, rn, rm, ARMCOND_AL)
+
+
+
+/* Swap */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag3 : 8; /* 0x9 */
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t tag2 : 2;
+ arminstr_t b : 1;
+ arminstr_t tag : 5; /* 0x2 */
+ arminstr_t cond : 4;
+} ARMInstrSwap;
+
+#define ARM_SWP_ID 2
+#define ARM_SWP_ID2 9
+#define ARM_SWP_MASK ((0x1F << 23) | (3 << 20) | (0xFF << 4))
+#define ARM_SWP_TAG ((ARM_SWP_ID << 23) | (ARM_SWP_ID2 << 4))
+
+
+
+/* Software interrupt */
+typedef struct {
+ arminstr_t num : 24;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrSWI;
+
+#define ARM_SWI_ID 0xF
+#define ARM_SWI_MASK (0xF << 24)
+#define ARM_SWI_TAG (ARM_SWI_ID << 24)
+
+
+
+/* Co-processor Data Processing */
+typedef struct {
+ arminstr_t crm : 4;
+ arminstr_t tag2 : 1; /* 0 */
+ arminstr_t op2 : 3;
+ arminstr_t cpn : 4; /* CP number */
+ arminstr_t crd : 4;
+ arminstr_t crn : 4;
+ arminstr_t op : 4;
+ arminstr_t tag : 4; /* 0xE */
+ arminstr_t cond : 4;
+} ARMInstrCDP;
+
+#define ARM_CDP_ID 0xE
+#define ARM_CDP_ID2 0
+#define ARM_CDP_MASK ((0xF << 24) | (1 << 4))
+#define ARM_CDP_TAG ((ARM_CDP_ID << 24) | (ARM_CDP_ID2 << 4))
+
+
+/* Co-processor Data Transfer (ldc/stc) */
+typedef struct {
+ arminstr_t offs : 8;
+ arminstr_t cpn : 4;
+ arminstr_t crd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t n : 1;
+ arminstr_t u : 1;
+ arminstr_t p : 1;
+ arminstr_t tag : 3;
+ arminstr_t cond : 4;
+} ARMInstrCDT;
+
+#define ARM_CDT_ID 6
+#define ARM_CDT_MASK (7 << 25)
+#define ARM_CDT_TAG (ARM_CDT_ID << 25)
+
+
+/* Co-processor Register Transfer (mcr/mrc) */
+typedef struct {
+ arminstr_t crm : 4;
+ arminstr_t tag2 : 1;
+ arminstr_t op2 : 3;
+ arminstr_t cpn : 4;
+ arminstr_t rd : 4;
+ arminstr_t crn : 4;
+ arminstr_t ls : 1;
+ arminstr_t op1 : 3;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrCRT;
+
+#define ARM_CRT_ID 0xE
+#define ARM_CRT_ID2 0x1
+#define ARM_CRT_MASK ((0xF << 24) | (1 << 4))
+#define ARM_CRT_TAG ((ARM_CRT_ID << 24) | (ARM_CRT_ID2 << 4))
+
+/*
+ * Move from co-processor register to CPU register
+ * Rd := cRn {<op>cRm}
+ * op{condition} CP#,CPOp,Rd,CRn,CRm{,CPOp2}
+ */
+#define ARM_DEF_MRC_COND(cpn, cpop, rd, crn, crm, cpop2, cond) \
+ ((crm) & 0xF) |\
+ ((cpop2) << 5) |\
+ ((cpn) << 8) |\
+ ((rd) << 12) |\
+ ((crn) << 16) |\
+ ((ARMOP_LDR) << 20) |\
+ ((cpop) << 21) |\
+ ARM_CRT_TAG |\
+ ARM_DEF_COND(cond)
+
+#define ARM_MRC_COND(p, cpn, cpop, rd, crn, crm, cpop2, cond) \
+ ARM_EMIT(p, ARM_DEF_MRC_COND(cpn, cpop, rd, crn, crm, cpop2, cond))
+#define ARM_MRC(p, cpn, cpop, rd, crn, crm, cpop2) \
+ ARM_MRC_COND(p, cpn, cpop, rd, crn, crm, cpop2, ARMCOND_AL)
+
+
+
+/* Move register to PSR. */
+typedef union {
+ ARMDPI_op2_imm op2_imm;
+ struct {
+ arminstr_t rm : 4;
+ arminstr_t pad : 8; /* 0 */
+ arminstr_t tag4 : 4; /* 0xF */
+ arminstr_t fld : 4;
+ arminstr_t tag3 : 2; /* 0x2 */
+ arminstr_t sel : 1;
+ arminstr_t tag2 : 2; /* 0x2 */
+ arminstr_t type : 1;
+ arminstr_t tag : 2; /* 0 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrMSR;
+
+#define ARM_MSR_ID 0
+#define ARM_MSR_ID2 2
+#define ARM_MSR_ID3 2
+#define ARM_MSR_ID4 0xF
+#define ARM_MSR_MASK ((3 << 26) | \
+ (3 << 23) | \
+ (3 << 20) | \
+ (0xF << 12))
+#define ARM_MSR_TAG ((ARM_MSR_ID << 26) | \
+ (ARM_MSR_ID2 << 23) | \
+ (ARM_MSR_ID3 << 20) | \
+ (ARM_MSR_ID4 << 12))
+
+#define ARM_DEF_MSR_REG_COND(mask, rm, r, cond) \
+ ARM_MSR_TAG | \
+ ARM_DEF_COND(cond) | \
+ ((rm) & 0xf) | \
+ (((r) & 1) << 22) | \
+ (((mask) & 0xf) << 16)
+
+#define ARM_MSR_REG_COND(p, mask, rm, r, cond) \
+ ARM_EMIT(p, ARM_DEF_MSR_REG_COND(mask, rm, r, cond))
+
+#define ARM_MSR_REG(p, mask, rm, r) \
+ ARM_MSR_REG_COND(p, mask, rm, r, ARMCOND_AL)
+
+#define ARM_PSR_C 1
+#define ARM_PSR_X 2
+#define ARM_PSR_S 4
+#define ARM_PSR_F 8
+
+#define ARM_CPSR 0
+#define ARM_SPSR 1
+
+/* Move PSR to register. */
+typedef struct {
+ arminstr_t tag3 : 12;
+ arminstr_t rd : 4;
+ arminstr_t tag2 : 6;
+ arminstr_t sel : 1; /* CPSR | SPSR */
+ arminstr_t tag : 5;
+ arminstr_t cond : 4;
+} ARMInstrMRS;
+
+#define ARM_MRS_ID 2
+#define ARM_MRS_ID2 0xF
+#define ARM_MRS_ID3 0
+#define ARM_MRS_MASK ((0x1F << 23) | (0x3F << 16) | 0xFFF)
+#define ARM_MRS_TAG ((ARM_MRS_ID << 23) | (ARM_MRS_ID2 << 16) | ARM_MRS_ID3)
+
+#define ARM_DEF_MRS_COND(rd, r, cond) \
+ ARM_MRS_TAG | \
+ ARM_DEF_COND(cond) | \
+ (((r) & 1) << 22) | \
+ ((rd)& 0xf) << 12
+
+#define ARM_MRS_COND(p, rd, r, cond) \
+ ARM_EMIT(p, ARM_DEF_MRS_COND(rd, r, cond))
+
+#define ARM_MRS_CPSR_COND(p, rd, cond) \
+ ARM_MRS_COND(p, rd, ARM_CPSR, cond)
+
+#define ARM_MRS_CPSR(p, rd) \
+ ARM_MRS_CPSR_COND(p, rd, ARMCOND_AL)
+
+#define ARM_MRS_SPSR_COND(p, rd, cond) \
+ ARM_MRS_COND(p, rd, ARM_SPSR, cond)
+
+#define ARM_MRS_SPSR(p, rd) \
+ ARM_MRS_SPSR_COND(p, rd, ARMCOND_AL)
+
+
+#include "arm_dpimacros.h"
+
+#define ARM_NOP(p) ARM_MOV_REG_REG(p, ARMREG_R0, ARMREG_R0)
+
+
+#define ARM_SHL_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond)
+#define ARM_SHL_IMM(p, rd, rm, imm) \
+ ARM_SHL_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SHLS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond)
+#define ARM_SHLS_IMM(p, rd, rm, imm) \
+ ARM_SHLS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SHR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond)
+#define ARM_SHR_IMM(p, rd, rm, imm) \
+ ARM_SHR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SHRS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond)
+#define ARM_SHRS_IMM(p, rd, rm, imm) \
+ ARM_SHRS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SAR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond)
+#define ARM_SAR_IMM(p, rd, rm, imm) \
+ ARM_SAR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SARS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond)
+#define ARM_SARS_IMM(p, rd, rm, imm) \
+ ARM_SARS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_ROR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond)
+#define ARM_ROR_IMM(p, rd, rm, imm) \
+ ARM_ROR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_RORS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond)
+#define ARM_RORS_IMM(p, rd, rm, imm) \
+ ARM_RORS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SHL_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond)
+#define ARM_SHL_REG(p, rd, rm, rs) \
+ ARM_SHL_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHLS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond)
+#define ARM_SHLS_REG(p, rd, rm, rs) \
+ ARM_SHLS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHLS_REG_REG(p, rd, rm, rs) ARM_SHLS_REG(p, rd, rm, rs)
+
+#define ARM_SHR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond)
+#define ARM_SHR_REG(p, rd, rm, rs) \
+ ARM_SHR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHRS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond)
+#define ARM_SHRS_REG(p, rd, rm, rs) \
+ ARM_SHRS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHRS_REG_REG(p, rd, rm, rs) ARM_SHRS_REG(p, rd, rm, rs)
+
+#define ARM_SAR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond)
+#define ARM_SAR_REG(p, rd, rm, rs) \
+ ARM_SAR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SARS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond)
+#define ARM_SARS_REG(p, rd, rm, rs) \
+ ARM_SARS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SARS_REG_REG(p, rd, rm, rs) ARM_SARS_REG(p, rd, rm, rs)
+
+#define ARM_ROR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond)
+#define ARM_ROR_REG(p, rd, rm, rs) \
+ ARM_ROR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_RORS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond)
+#define ARM_RORS_REG(p, rd, rm, rs) \
+ ARM_RORS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_RORS_REG_REG(p, rd, rm, rs) ARM_RORS_REG(p, rd, rm, rs)
+
+#define ARM_DBRK(p) ARM_EMIT(p, 0xE6000010)
+#define ARM_IASM_DBRK() ARM_IASM_EMIT(0xE6000010)
+
+#define ARM_INC(p, reg) ARM_ADD_REG_IMM8(p, reg, reg, 1)
+#define ARM_DEC(p, reg) ARM_SUB_REG_IMM8(p, reg, reg, 1)
+
+
+/* ARM V5 */
+
+/* Count leading zeros, CLZ{cond} Rd, Rm */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag2 : 8;
+ arminstr_t rd : 4;
+ arminstr_t tag : 12;
+ arminstr_t cond : 4;
+} ARMInstrCLZ;
+
+#define ARM_CLZ_ID 0x16F
+#define ARM_CLZ_ID2 0xF1
+#define ARM_CLZ_MASK ((0xFFF << 16) | (0xFF < 4))
+#define ARM_CLZ_TAG ((ARM_CLZ_ID << 16) | (ARM_CLZ_ID2 << 4))
+
+#define ARM_DEF_CLZ_COND(rd, rm, cond) \
+ ARM_CLZ_TAG | \
+ ARM_DEF_COND(cond) | \
+ (((rm) & 0xf)) | \
+ ((rd) & 0xf) << 12
+
+#define ARM_CLZ_COND(p, rd, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_CLZ_COND(rd, rm, cond))
+
+#define ARM_CLZ(p, rd, rm) \
+ ARM_EMIT(p, ARM_DEF_CLZ_COND(rd, rm, ARMCOND_AL))
+
+/*
+ * TAG p b wb ls
+ * ARMCOND_NV | 0-1-0 | 0 | +/- | 1 | 0 | 1 | rn -|- 0xF | imm12
+ */
+#define ARM_PLD_ID 0xF45
+#define ARM_PLD_ID2 0xF /* rd */
+#define ARM_PLD_MASK ((0xFC7 << 20) | (0xF << 12))
+#define ARM_PLD_TAG ((ARM_PLD_ID << 20) | (ARM_PLD_ID2 << 12))
+#define ARM_DEF_PLD_IMM(imm12, rn) \
+ ((((int)imm12) < 0) ? -(int)(imm12) : (imm12)) | \
+ ((0xF) << 12) | \
+ ((rn) << 16) | \
+ ((1) << 20) /* ls = load(1) */ | \
+ ((0) << 21) /* wb = 0 */ | \
+ ((1) << 22) /* b = 1 */ | \
+ (((int)(imm12) >= 0) << 23) | \
+ ((1) << 24) /* pre/post = pre(1) */ | \
+ ((2) << 25) /* tag */ | \
+ ARM_DEF_COND(ARMCOND_NV)
+
+#define ARM_PLD_IMM(p, rn, imm12) ARM_EMIT(p, ARM_DEF_PLD_IMM(imm12, rn))
+
+#define ARM_DEF_PLD_REG_REG_UPDOWN_SHIFT(rn, shift_type, shift, rm, u) \
+ (rm) | \
+ ((shift_type) << 5) | \
+ ((shift) << 7) | \
+ (0xF << 12) /* rd = 0xF */ | \
+ ((rn) << 16) | \
+ (1 << 20) /* ls = load(1) */ | \
+ (0 << 21) /* wb = 0 */ | \
+ (1 << 22) /* b = 1 */ | \
+ ((u) << 23) | \
+ (1 << 24) /* pre(1) */ | \
+ (3 << 25) | \
+ ARM_DEF_COND(ARMCOND_NV)
+
+#define ARM_PLD_REG_REG_UPDOWN_SHIFT(p, rm, rn, u, shift_type, shift) \
+ ARM_EMIT(p, ARM_DEF_PLD_REG_REG_UPDOWN_SHIFT(rm, shift_type, shift, rn, u))
+
+#define ARM_PLD_REG_PLUS_REG(p, rm, rn) \
+ ARM_PLD_REG_REG_UPDOWN_SHIFT(p, rm, rn, ARM_UP, ARMSHIFT_LSL, 0)
+
+#define ARM_PLD_REG_MINUS_REG(p, rm, rn) \
+ ARM_PLD_REG_REG_UPDOWN_SHIFT(p, rm, rn, ARM_DOWN, ARMSHIFT_LSL, 0)
+
+
+#define ARM_DEF_STF_IMM_COND(p, prec, freg_const, rd, imm8, rot, cond) \
+ ((imm8) & 0xFF) | \
+ (((rot) & 0xF) << 8) | \
+ ((freg_const) << 12) | \
+ (1 << 25) | \
+ ARM_DEF_COND(cond)
+
+
+typedef union {
+ ARMInstrBR br;
+ ARMInstrDPI dpi;
+ ARMInstrMRT mrt;
+ ARMInstrMul mul;
+ ARMInstrWXfer wxfer;
+ ARMInstrHXfer hxfer;
+ ARMInstrSwap swp;
+ ARMInstrCDP cdp;
+ ARMInstrCDT cdt;
+ ARMInstrCRT crt;
+ ARMInstrSWI swi;
+ ARMInstrMSR msr;
+ ARMInstrMRS mrs;
+ ARMInstrCLZ clz;
+
+ ARMInstrGeneric generic;
+ arminstr_t raw;
+} ARMInstr;
+
+#endif /* ARM_CG_H */
+
diff --git a/arm/arm_dpimacros.h b/arm/arm_dpimacros.h
new file mode 100644
index 0000000..743d5a5
--- /dev/null
+++ b/arm/arm_dpimacros.h
@@ -0,0 +1,1661 @@
+/* Macros for DPI ops, auto-generated from template
+ *
+ * Copyright (c) 2002 Wild West Software
+ * Copyright (c) 2001, 2002 Sergey Chaban
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+/* mov/mvn */
+
+/* Rd := imm8 ROR rot */
+#define ARM_MOV_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_MOV, reg, 0, imm8, rot, cond)
+#define ARM_MOV_REG_IMM(p, reg, imm8, rot) \
+ ARM_MOV_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define ARM_MOVS_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_MOV, reg, 0, imm8, rot, cond)
+#define ARM_MOVS_REG_IMM(p, reg, imm8, rot) \
+ ARM_MOVS_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _MOV_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_MOV, reg, 0, imm8, rot, cond)
+#define _MOV_REG_IMM(reg, imm8, rot) \
+ _MOV_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define _MOVS_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_MOV, reg, 0, imm8, rot, cond)
+#define _MOVS_REG_IMM(reg, imm8, rot) \
+ _MOVS_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := imm8 */
+#define ARM_MOV_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_MOV, reg, 0, imm8, 0, cond)
+#define ARM_MOV_REG_IMM8(p, reg, imm8) \
+ ARM_MOV_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+/* S */
+#define ARM_MOVS_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_MOV, reg, 0, imm8, 0, cond)
+#define ARM_MOVS_REG_IMM8(p, reg, imm8) \
+ ARM_MOVS_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _MOV_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_MOV, reg, 0, imm8, 0, cond)
+#define _MOV_REG_IMM8(reg, imm8) \
+ _MOV_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+/* S */
+#define _MOVS_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_MOV, reg, 0, imm8, 0, cond)
+#define _MOVS_REG_IMM8(reg, imm8) \
+ _MOVS_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm */
+#define ARM_MOV_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_MOV, rd, 0, rm, cond)
+#define ARM_MOV_REG_REG(p, rd, rm) \
+ ARM_MOV_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+/* S */
+#define ARM_MOVS_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_MOV, rd, 0, rm, cond)
+#define ARM_MOVS_REG_REG(p, rd, rm) \
+ ARM_MOVS_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _MOV_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_MOV, rd, 0, rm, cond)
+#define _MOV_REG_REG(rd, rm) \
+ _MOV_REG_REG_COND(rd, rm, ARMCOND_AL)
+/* S */
+#define _MOVS_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_MOV, rd, 0, rm, cond)
+#define _MOVS_REG_REG(rd, rm) \
+ _MOVS_REG_REG_COND(rd, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm <shift_type> imm_shift */
+#define ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_MOV, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_MOV_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_MOV, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_MOVS_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _MOV_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_MOV, rd, 0, rm, shift_type, imm_shift, cond)
+#define _MOV_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _MOV_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define _MOVS_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_MOV, rd, 0, rm, shift_type, imm_shift, cond)
+#define _MOVS_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _MOVS_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+
+/* Rd := (Rm <shift_type> Rs) */
+#define ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_MOV, rd, 0, rm, shift_type, rs, cond)
+#define ARM_MOV_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_MOV, rd, 0, rm, shift_type, rs, cond)
+#define ARM_MOVS_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _MOV_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_MOV, rd, 0, rm, shift_type, rs, cond)
+#define _MOV_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _MOV_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define _MOVS_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_MOV, rd, 0, rm, shift_type, rs, cond)
+#define _MOVS_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _MOVS_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* Rd := imm8 ROR rot */
+#define ARM_MVN_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_MVN, reg, 0, imm8, rot, cond)
+#define ARM_MVN_REG_IMM(p, reg, imm8, rot) \
+ ARM_MVN_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define ARM_MVNS_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_MVN, reg, 0, imm8, rot, cond)
+#define ARM_MVNS_REG_IMM(p, reg, imm8, rot) \
+ ARM_MVNS_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _MVN_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_MVN, reg, 0, imm8, rot, cond)
+#define _MVN_REG_IMM(reg, imm8, rot) \
+ _MVN_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define _MVNS_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_MVN, reg, 0, imm8, rot, cond)
+#define _MVNS_REG_IMM(reg, imm8, rot) \
+ _MVNS_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := imm8 */
+#define ARM_MVN_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_MVN, reg, 0, imm8, 0, cond)
+#define ARM_MVN_REG_IMM8(p, reg, imm8) \
+ ARM_MVN_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+/* S */
+#define ARM_MVNS_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_MVN, reg, 0, imm8, 0, cond)
+#define ARM_MVNS_REG_IMM8(p, reg, imm8) \
+ ARM_MVNS_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _MVN_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_MVN, reg, 0, imm8, 0, cond)
+#define _MVN_REG_IMM8(reg, imm8) \
+ _MVN_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+/* S */
+#define _MVNS_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_MVN, reg, 0, imm8, 0, cond)
+#define _MVNS_REG_IMM8(reg, imm8) \
+ _MVNS_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm */
+#define ARM_MVN_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_MVN, rd, 0, rm, cond)
+#define ARM_MVN_REG_REG(p, rd, rm) \
+ ARM_MVN_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+/* S */
+#define ARM_MVNS_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_MVN, rd, 0, rm, cond)
+#define ARM_MVNS_REG_REG(p, rd, rm) \
+ ARM_MVNS_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _MVN_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_MVN, rd, 0, rm, cond)
+#define _MVN_REG_REG(rd, rm) \
+ _MVN_REG_REG_COND(rd, rm, ARMCOND_AL)
+/* S */
+#define _MVNS_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_MVN, rd, 0, rm, cond)
+#define _MVNS_REG_REG(rd, rm) \
+ _MVNS_REG_REG_COND(rd, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm <shift_type> imm_shift */
+#define ARM_MVN_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_MVN, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_MVN_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_MVN_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define ARM_MVNS_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_MVN, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_MVNS_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_MVNS_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _MVN_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_MVN, rd, 0, rm, shift_type, imm_shift, cond)
+#define _MVN_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _MVN_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define _MVNS_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_MVN, rd, 0, rm, shift_type, imm_shift, cond)
+#define _MVNS_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _MVNS_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+
+/* Rd := (Rm <shift_type> Rs) */
+#define ARM_MVN_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_MVN, rd, 0, rm, shift_type, rs, cond)
+#define ARM_MVN_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_MVN_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define ARM_MVNS_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_MVN, rd, 0, rm, shift_type, rs, cond)
+#define ARM_MVNS_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_MVNS_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _MVN_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_MVN, rd, 0, rm, shift_type, rs, cond)
+#define _MVN_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _MVN_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define _MVNS_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_MVN, rd, 0, rm, shift_type, rs, cond)
+#define _MVNS_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _MVNS_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+
+/* DPIs, arithmetic and logical */
+
+/* -- AND -- */
+
+/* Rd := Rn AND (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_AND_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_AND, rd, rn, imm8, rot, cond)
+#define ARM_AND_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_AND_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_ANDS_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_AND, rd, rn, imm8, rot, cond)
+#define ARM_ANDS_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_ANDS_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _AND_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_AND, rd, rn, imm8, rot, cond)
+#define _AND_REG_IMM(rd, rn, imm8, rot) \
+ _AND_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _ANDS_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_AND, rd, rn, imm8, rot, cond)
+#define _ANDS_REG_IMM(rd, rn, imm8, rot) \
+ _ANDS_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn AND imm8 */
+#define ARM_AND_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_AND_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_AND_REG_IMM8(p, rd, rn, imm8) \
+ ARM_AND_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_ANDS_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_ANDS_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_ANDS_REG_IMM8(p, rd, rn, imm8) \
+ ARM_ANDS_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _AND_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _AND_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _AND_REG_IMM8(rd, rn, imm8) \
+ _AND_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _ANDS_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _ANDS_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _ANDS_REG_IMM8(rd, rn, imm8) \
+ _ANDS_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn AND Rm */
+#define ARM_AND_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_AND, rd, rn, rm, cond)
+#define ARM_AND_REG_REG(p, rd, rn, rm) \
+ ARM_AND_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_ANDS_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_AND, rd, rn, rm, cond)
+#define ARM_ANDS_REG_REG(p, rd, rn, rm) \
+ ARM_ANDS_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _AND_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_AND, rd, rn, rm, cond)
+#define _AND_REG_REG(rd, rn, rm) \
+ _AND_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _ANDS_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_AND, rd, rn, rm, cond)
+#define _ANDS_REG_REG(rd, rn, rm) \
+ _ANDS_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn AND (Rm <shift_type> imm_shift) */
+#define ARM_AND_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_AND, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_AND_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_AND_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_ANDS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_AND, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_ANDS_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_ANDS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _AND_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_AND, rd, rn, rm, shift_type, imm_shift, cond)
+#define _AND_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _AND_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _ANDS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_AND, rd, rn, rm, shift_type, imm_shift, cond)
+#define _ANDS_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _ANDS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn AND (Rm <shift_type> Rs) */
+#define ARM_AND_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_AND, rd, rn, rm, shift_type, rs, cond)
+#define ARM_AND_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_AND_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_ANDS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_AND, rd, rn, rm, shift_type, rs, cond)
+#define ARM_ANDS_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_ANDS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _AND_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_AND, rd, rn, rm, shift_type, rs, cond)
+#define _AND_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _AND_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _ANDS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_AND, rd, rn, rm, shift_type, rs, cond)
+#define _ANDS_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _ANDS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* -- EOR -- */
+
+/* Rd := Rn EOR (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_EOR_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_EOR, rd, rn, imm8, rot, cond)
+#define ARM_EOR_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_EOR_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_EORS_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_EOR, rd, rn, imm8, rot, cond)
+#define ARM_EORS_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_EORS_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _EOR_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_EOR, rd, rn, imm8, rot, cond)
+#define _EOR_REG_IMM(rd, rn, imm8, rot) \
+ _EOR_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _EORS_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_EOR, rd, rn, imm8, rot, cond)
+#define _EORS_REG_IMM(rd, rn, imm8, rot) \
+ _EORS_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn EOR imm8 */
+#define ARM_EOR_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_EOR_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_EOR_REG_IMM8(p, rd, rn, imm8) \
+ ARM_EOR_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_EORS_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_EORS_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_EORS_REG_IMM8(p, rd, rn, imm8) \
+ ARM_EORS_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _EOR_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _EOR_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _EOR_REG_IMM8(rd, rn, imm8) \
+ _EOR_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _EORS_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _EORS_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _EORS_REG_IMM8(rd, rn, imm8) \
+ _EORS_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn EOR Rm */
+#define ARM_EOR_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_EOR, rd, rn, rm, cond)
+#define ARM_EOR_REG_REG(p, rd, rn, rm) \
+ ARM_EOR_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_EORS_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_EOR, rd, rn, rm, cond)
+#define ARM_EORS_REG_REG(p, rd, rn, rm) \
+ ARM_EORS_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _EOR_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_EOR, rd, rn, rm, cond)
+#define _EOR_REG_REG(rd, rn, rm) \
+ _EOR_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _EORS_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_EOR, rd, rn, rm, cond)
+#define _EORS_REG_REG(rd, rn, rm) \
+ _EORS_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn EOR (Rm <shift_type> imm_shift) */
+#define ARM_EOR_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_EOR, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_EOR_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_EOR_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_EORS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_EOR, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_EORS_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_EORS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _EOR_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_EOR, rd, rn, rm, shift_type, imm_shift, cond)
+#define _EOR_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _EOR_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _EORS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_EOR, rd, rn, rm, shift_type, imm_shift, cond)
+#define _EORS_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _EORS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn EOR (Rm <shift_type> Rs) */
+#define ARM_EOR_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_EOR, rd, rn, rm, shift_type, rs, cond)
+#define ARM_EOR_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_EOR_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_EORS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_EOR, rd, rn, rm, shift_type, rs, cond)
+#define ARM_EORS_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_EORS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _EOR_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_EOR, rd, rn, rm, shift_type, rs, cond)
+#define _EOR_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _EOR_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _EORS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_EOR, rd, rn, rm, shift_type, rs, cond)
+#define _EORS_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _EORS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* -- SUB -- */
+
+/* Rd := Rn SUB (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_SUB_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_SUB, rd, rn, imm8, rot, cond)
+#define ARM_SUB_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_SUB_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_SUBS_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_SUB, rd, rn, imm8, rot, cond)
+#define ARM_SUBS_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_SUBS_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _SUB_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_SUB, rd, rn, imm8, rot, cond)
+#define _SUB_REG_IMM(rd, rn, imm8, rot) \
+ _SUB_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _SUBS_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_SUB, rd, rn, imm8, rot, cond)
+#define _SUBS_REG_IMM(rd, rn, imm8, rot) \
+ _SUBS_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn SUB imm8 */
+#define ARM_SUB_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_SUB_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_SUB_REG_IMM8(p, rd, rn, imm8) \
+ ARM_SUB_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_SUBS_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_SUBS_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_SUBS_REG_IMM8(p, rd, rn, imm8) \
+ ARM_SUBS_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _SUB_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _SUB_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _SUB_REG_IMM8(rd, rn, imm8) \
+ _SUB_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _SUBS_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _SUBS_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _SUBS_REG_IMM8(rd, rn, imm8) \
+ _SUBS_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn SUB Rm */
+#define ARM_SUB_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_SUB, rd, rn, rm, cond)
+#define ARM_SUB_REG_REG(p, rd, rn, rm) \
+ ARM_SUB_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_SUBS_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_SUB, rd, rn, rm, cond)
+#define ARM_SUBS_REG_REG(p, rd, rn, rm) \
+ ARM_SUBS_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _SUB_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_SUB, rd, rn, rm, cond)
+#define _SUB_REG_REG(rd, rn, rm) \
+ _SUB_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _SUBS_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_SUB, rd, rn, rm, cond)
+#define _SUBS_REG_REG(rd, rn, rm) \
+ _SUBS_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn SUB (Rm <shift_type> imm_shift) */
+#define ARM_SUB_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_SUB, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_SUB_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_SUB_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_SUBS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_SUB, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_SUBS_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_SUBS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _SUB_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_SUB, rd, rn, rm, shift_type, imm_shift, cond)
+#define _SUB_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _SUB_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _SUBS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_SUB, rd, rn, rm, shift_type, imm_shift, cond)
+#define _SUBS_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _SUBS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn SUB (Rm <shift_type> Rs) */
+#define ARM_SUB_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_SUB, rd, rn, rm, shift_type, rs, cond)
+#define ARM_SUB_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_SUB_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_SUBS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_SUB, rd, rn, rm, shift_type, rs, cond)
+#define ARM_SUBS_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_SUBS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _SUB_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_SUB, rd, rn, rm, shift_type, rs, cond)
+#define _SUB_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _SUB_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _SUBS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_SUB, rd, rn, rm, shift_type, rs, cond)
+#define _SUBS_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _SUBS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* -- RSB -- */
+
+/* Rd := Rn RSB (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_RSB_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_RSB, rd, rn, imm8, rot, cond)
+#define ARM_RSB_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_RSB_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_RSBS_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_RSB, rd, rn, imm8, rot, cond)
+#define ARM_RSBS_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_RSBS_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _RSB_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_RSB, rd, rn, imm8, rot, cond)
+#define _RSB_REG_IMM(rd, rn, imm8, rot) \
+ _RSB_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _RSBS_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_RSB, rd, rn, imm8, rot, cond)
+#define _RSBS_REG_IMM(rd, rn, imm8, rot) \
+ _RSBS_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn RSB imm8 */
+#define ARM_RSB_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_RSB_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_RSB_REG_IMM8(p, rd, rn, imm8) \
+ ARM_RSB_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_RSBS_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_RSBS_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_RSBS_REG_IMM8(p, rd, rn, imm8) \
+ ARM_RSBS_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _RSB_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _RSB_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _RSB_REG_IMM8(rd, rn, imm8) \
+ _RSB_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _RSBS_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _RSBS_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _RSBS_REG_IMM8(rd, rn, imm8) \
+ _RSBS_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn RSB Rm */
+#define ARM_RSB_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_RSB, rd, rn, rm, cond)
+#define ARM_RSB_REG_REG(p, rd, rn, rm) \
+ ARM_RSB_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_RSBS_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_RSB, rd, rn, rm, cond)
+#define ARM_RSBS_REG_REG(p, rd, rn, rm) \
+ ARM_RSBS_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _RSB_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_RSB, rd, rn, rm, cond)
+#define _RSB_REG_REG(rd, rn, rm) \
+ _RSB_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _RSBS_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_RSB, rd, rn, rm, cond)
+#define _RSBS_REG_REG(rd, rn, rm) \
+ _RSBS_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn RSB (Rm <shift_type> imm_shift) */
+#define ARM_RSB_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_RSB, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_RSB_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_RSB_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_RSBS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_RSB, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_RSBS_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_RSBS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _RSB_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_RSB, rd, rn, rm, shift_type, imm_shift, cond)
+#define _RSB_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _RSB_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _RSBS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_RSB, rd, rn, rm, shift_type, imm_shift, cond)
+#define _RSBS_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _RSBS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn RSB (Rm <shift_type> Rs) */
+#define ARM_RSB_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_RSB, rd, rn, rm, shift_type, rs, cond)
+#define ARM_RSB_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_RSB_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_RSBS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_RSB, rd, rn, rm, shift_type, rs, cond)
+#define ARM_RSBS_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_RSBS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _RSB_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_RSB, rd, rn, rm, shift_type, rs, cond)
+#define _RSB_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _RSB_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _RSBS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_RSB, rd, rn, rm, shift_type, rs, cond)
+#define _RSBS_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _RSBS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* -- ADD -- */
+
+/* Rd := Rn ADD (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_ADD_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_ADD, rd, rn, imm8, rot, cond)
+#define ARM_ADD_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_ADD_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_ADDS_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_ADD, rd, rn, imm8, rot, cond)
+#define ARM_ADDS_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_ADDS_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ADD_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_ADD, rd, rn, imm8, rot, cond)
+#define _ADD_REG_IMM(rd, rn, imm8, rot) \
+ _ADD_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _ADDS_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_ADD, rd, rn, imm8, rot, cond)
+#define _ADDS_REG_IMM(rd, rn, imm8, rot) \
+ _ADDS_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ADD imm8 */
+#define ARM_ADD_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_ADD_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_ADD_REG_IMM8(p, rd, rn, imm8) \
+ ARM_ADD_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_ADDS_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_ADDS_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_ADDS_REG_IMM8(p, rd, rn, imm8) \
+ ARM_ADDS_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ADD_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _ADD_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _ADD_REG_IMM8(rd, rn, imm8) \
+ _ADD_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _ADDS_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _ADDS_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _ADDS_REG_IMM8(rd, rn, imm8) \
+ _ADDS_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ADD Rm */
+#define ARM_ADD_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_ADD, rd, rn, rm, cond)
+#define ARM_ADD_REG_REG(p, rd, rn, rm) \
+ ARM_ADD_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_ADDS_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_ADD, rd, rn, rm, cond)
+#define ARM_ADDS_REG_REG(p, rd, rn, rm) \
+ ARM_ADDS_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ADD_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_ADD, rd, rn, rm, cond)
+#define _ADD_REG_REG(rd, rn, rm) \
+ _ADD_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _ADDS_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_ADD, rd, rn, rm, cond)
+#define _ADDS_REG_REG(rd, rn, rm) \
+ _ADDS_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ADD (Rm <shift_type> imm_shift) */
+#define ARM_ADD_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_ADD, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_ADD_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_ADD_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_ADDS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_ADD, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_ADDS_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_ADDS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ADD_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_ADD, rd, rn, rm, shift_type, imm_shift, cond)
+#define _ADD_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _ADD_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _ADDS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_ADD, rd, rn, rm, shift_type, imm_shift, cond)
+#define _ADDS_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _ADDS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ADD (Rm <shift_type> Rs) */
+#define ARM_ADD_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_ADD, rd, rn, rm, shift_type, rs, cond)
+#define ARM_ADD_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_ADD_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_ADDS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_ADD, rd, rn, rm, shift_type, rs, cond)
+#define ARM_ADDS_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_ADDS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ADD_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_ADD, rd, rn, rm, shift_type, rs, cond)
+#define _ADD_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _ADD_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _ADDS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_ADD, rd, rn, rm, shift_type, rs, cond)
+#define _ADDS_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _ADDS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* -- ADC -- */
+
+/* Rd := Rn ADC (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_ADC_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_ADC, rd, rn, imm8, rot, cond)
+#define ARM_ADC_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_ADC_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_ADCS_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_ADC, rd, rn, imm8, rot, cond)
+#define ARM_ADCS_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_ADCS_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ADC_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_ADC, rd, rn, imm8, rot, cond)
+#define _ADC_REG_IMM(rd, rn, imm8, rot) \
+ _ADC_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _ADCS_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_ADC, rd, rn, imm8, rot, cond)
+#define _ADCS_REG_IMM(rd, rn, imm8, rot) \
+ _ADCS_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ADC imm8 */
+#define ARM_ADC_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_ADC_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_ADC_REG_IMM8(p, rd, rn, imm8) \
+ ARM_ADC_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_ADCS_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_ADCS_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_ADCS_REG_IMM8(p, rd, rn, imm8) \
+ ARM_ADCS_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ADC_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _ADC_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _ADC_REG_IMM8(rd, rn, imm8) \
+ _ADC_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _ADCS_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _ADCS_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _ADCS_REG_IMM8(rd, rn, imm8) \
+ _ADCS_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ADC Rm */
+#define ARM_ADC_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_ADC, rd, rn, rm, cond)
+#define ARM_ADC_REG_REG(p, rd, rn, rm) \
+ ARM_ADC_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_ADCS_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_ADC, rd, rn, rm, cond)
+#define ARM_ADCS_REG_REG(p, rd, rn, rm) \
+ ARM_ADCS_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ADC_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_ADC, rd, rn, rm, cond)
+#define _ADC_REG_REG(rd, rn, rm) \
+ _ADC_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _ADCS_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_ADC, rd, rn, rm, cond)
+#define _ADCS_REG_REG(rd, rn, rm) \
+ _ADCS_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ADC (Rm <shift_type> imm_shift) */
+#define ARM_ADC_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_ADC, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_ADC_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_ADC_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_ADCS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_ADC, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_ADCS_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_ADCS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ADC_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_ADC, rd, rn, rm, shift_type, imm_shift, cond)
+#define _ADC_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _ADC_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _ADCS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_ADC, rd, rn, rm, shift_type, imm_shift, cond)
+#define _ADCS_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _ADCS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ADC (Rm <shift_type> Rs) */
+#define ARM_ADC_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_ADC, rd, rn, rm, shift_type, rs, cond)
+#define ARM_ADC_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_ADC_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_ADCS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_ADC, rd, rn, rm, shift_type, rs, cond)
+#define ARM_ADCS_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_ADCS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ADC_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_ADC, rd, rn, rm, shift_type, rs, cond)
+#define _ADC_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _ADC_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _ADCS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_ADC, rd, rn, rm, shift_type, rs, cond)
+#define _ADCS_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _ADCS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* -- SBC -- */
+
+/* Rd := Rn SBC (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_SBC_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_SBC, rd, rn, imm8, rot, cond)
+#define ARM_SBC_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_SBC_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_SBCS_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_SBC, rd, rn, imm8, rot, cond)
+#define ARM_SBCS_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_SBCS_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _SBC_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_SBC, rd, rn, imm8, rot, cond)
+#define _SBC_REG_IMM(rd, rn, imm8, rot) \
+ _SBC_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _SBCS_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_SBC, rd, rn, imm8, rot, cond)
+#define _SBCS_REG_IMM(rd, rn, imm8, rot) \
+ _SBCS_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn SBC imm8 */
+#define ARM_SBC_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_SBC_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_SBC_REG_IMM8(p, rd, rn, imm8) \
+ ARM_SBC_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_SBCS_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_SBCS_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_SBCS_REG_IMM8(p, rd, rn, imm8) \
+ ARM_SBCS_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _SBC_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _SBC_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _SBC_REG_IMM8(rd, rn, imm8) \
+ _SBC_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _SBCS_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _SBCS_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _SBCS_REG_IMM8(rd, rn, imm8) \
+ _SBCS_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn SBC Rm */
+#define ARM_SBC_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_SBC, rd, rn, rm, cond)
+#define ARM_SBC_REG_REG(p, rd, rn, rm) \
+ ARM_SBC_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_SBCS_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_SBC, rd, rn, rm, cond)
+#define ARM_SBCS_REG_REG(p, rd, rn, rm) \
+ ARM_SBCS_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _SBC_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_SBC, rd, rn, rm, cond)
+#define _SBC_REG_REG(rd, rn, rm) \
+ _SBC_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _SBCS_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_SBC, rd, rn, rm, cond)
+#define _SBCS_REG_REG(rd, rn, rm) \
+ _SBCS_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn SBC (Rm <shift_type> imm_shift) */
+#define ARM_SBC_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_SBC, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_SBC_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_SBC_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_SBCS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_SBC, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_SBCS_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_SBCS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _SBC_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_SBC, rd, rn, rm, shift_type, imm_shift, cond)
+#define _SBC_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _SBC_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _SBCS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_SBC, rd, rn, rm, shift_type, imm_shift, cond)
+#define _SBCS_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _SBCS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn SBC (Rm <shift_type> Rs) */
+#define ARM_SBC_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_SBC, rd, rn, rm, shift_type, rs, cond)
+#define ARM_SBC_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_SBC_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_SBCS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_SBC, rd, rn, rm, shift_type, rs, cond)
+#define ARM_SBCS_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_SBCS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _SBC_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_SBC, rd, rn, rm, shift_type, rs, cond)
+#define _SBC_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _SBC_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _SBCS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_SBC, rd, rn, rm, shift_type, rs, cond)
+#define _SBCS_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _SBCS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* -- RSC -- */
+
+/* Rd := Rn RSC (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_RSC_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_RSC, rd, rn, imm8, rot, cond)
+#define ARM_RSC_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_RSC_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_RSCS_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_RSC, rd, rn, imm8, rot, cond)
+#define ARM_RSCS_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_RSCS_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _RSC_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_RSC, rd, rn, imm8, rot, cond)
+#define _RSC_REG_IMM(rd, rn, imm8, rot) \
+ _RSC_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _RSCS_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_RSC, rd, rn, imm8, rot, cond)
+#define _RSCS_REG_IMM(rd, rn, imm8, rot) \
+ _RSCS_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn RSC imm8 */
+#define ARM_RSC_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_RSC_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_RSC_REG_IMM8(p, rd, rn, imm8) \
+ ARM_RSC_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_RSCS_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_RSCS_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_RSCS_REG_IMM8(p, rd, rn, imm8) \
+ ARM_RSCS_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _RSC_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _RSC_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _RSC_REG_IMM8(rd, rn, imm8) \
+ _RSC_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _RSCS_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _RSCS_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _RSCS_REG_IMM8(rd, rn, imm8) \
+ _RSCS_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn RSC Rm */
+#define ARM_RSC_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_RSC, rd, rn, rm, cond)
+#define ARM_RSC_REG_REG(p, rd, rn, rm) \
+ ARM_RSC_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_RSCS_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_RSC, rd, rn, rm, cond)
+#define ARM_RSCS_REG_REG(p, rd, rn, rm) \
+ ARM_RSCS_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _RSC_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_RSC, rd, rn, rm, cond)
+#define _RSC_REG_REG(rd, rn, rm) \
+ _RSC_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _RSCS_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_RSC, rd, rn, rm, cond)
+#define _RSCS_REG_REG(rd, rn, rm) \
+ _RSCS_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn RSC (Rm <shift_type> imm_shift) */
+#define ARM_RSC_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_RSC, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_RSC_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_RSC_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_RSCS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_RSC, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_RSCS_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_RSCS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _RSC_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_RSC, rd, rn, rm, shift_type, imm_shift, cond)
+#define _RSC_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _RSC_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _RSCS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_RSC, rd, rn, rm, shift_type, imm_shift, cond)
+#define _RSCS_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _RSCS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn RSC (Rm <shift_type> Rs) */
+#define ARM_RSC_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_RSC, rd, rn, rm, shift_type, rs, cond)
+#define ARM_RSC_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_RSC_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_RSCS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_RSC, rd, rn, rm, shift_type, rs, cond)
+#define ARM_RSCS_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_RSCS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _RSC_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_RSC, rd, rn, rm, shift_type, rs, cond)
+#define _RSC_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _RSC_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _RSCS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_RSC, rd, rn, rm, shift_type, rs, cond)
+#define _RSCS_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _RSCS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* -- ORR -- */
+
+/* Rd := Rn ORR (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_ORR_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_ORR, rd, rn, imm8, rot, cond)
+#define ARM_ORR_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_ORR_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_ORRS_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_ORR, rd, rn, imm8, rot, cond)
+#define ARM_ORRS_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_ORRS_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ORR_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_ORR, rd, rn, imm8, rot, cond)
+#define _ORR_REG_IMM(rd, rn, imm8, rot) \
+ _ORR_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _ORRS_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_ORR, rd, rn, imm8, rot, cond)
+#define _ORRS_REG_IMM(rd, rn, imm8, rot) \
+ _ORRS_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ORR imm8 */
+#define ARM_ORR_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_ORR_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_ORR_REG_IMM8(p, rd, rn, imm8) \
+ ARM_ORR_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_ORRS_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_ORRS_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_ORRS_REG_IMM8(p, rd, rn, imm8) \
+ ARM_ORRS_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ORR_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _ORR_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _ORR_REG_IMM8(rd, rn, imm8) \
+ _ORR_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _ORRS_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _ORRS_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _ORRS_REG_IMM8(rd, rn, imm8) \
+ _ORRS_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ORR Rm */
+#define ARM_ORR_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_ORR, rd, rn, rm, cond)
+#define ARM_ORR_REG_REG(p, rd, rn, rm) \
+ ARM_ORR_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_ORRS_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_ORR, rd, rn, rm, cond)
+#define ARM_ORRS_REG_REG(p, rd, rn, rm) \
+ ARM_ORRS_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ORR_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_ORR, rd, rn, rm, cond)
+#define _ORR_REG_REG(rd, rn, rm) \
+ _ORR_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _ORRS_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_ORR, rd, rn, rm, cond)
+#define _ORRS_REG_REG(rd, rn, rm) \
+ _ORRS_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ORR (Rm <shift_type> imm_shift) */
+#define ARM_ORR_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_ORR, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_ORR_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_ORR_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_ORRS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_ORR, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_ORRS_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_ORRS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ORR_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_ORR, rd, rn, rm, shift_type, imm_shift, cond)
+#define _ORR_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _ORR_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _ORRS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_ORR, rd, rn, rm, shift_type, imm_shift, cond)
+#define _ORRS_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _ORRS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn ORR (Rm <shift_type> Rs) */
+#define ARM_ORR_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_ORR, rd, rn, rm, shift_type, rs, cond)
+#define ARM_ORR_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_ORR_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_ORRS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_ORR, rd, rn, rm, shift_type, rs, cond)
+#define ARM_ORRS_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_ORRS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _ORR_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_ORR, rd, rn, rm, shift_type, rs, cond)
+#define _ORR_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _ORR_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _ORRS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_ORR, rd, rn, rm, shift_type, rs, cond)
+#define _ORRS_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _ORRS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* -- BIC -- */
+
+/* Rd := Rn BIC (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_BIC_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_BIC, rd, rn, imm8, rot, cond)
+#define ARM_BIC_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_BIC_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_BICS_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_BIC, rd, rn, imm8, rot, cond)
+#define ARM_BICS_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_BICS_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _BIC_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_BIC, rd, rn, imm8, rot, cond)
+#define _BIC_REG_IMM(rd, rn, imm8, rot) \
+ _BIC_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _BICS_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_BIC, rd, rn, imm8, rot, cond)
+#define _BICS_REG_IMM(rd, rn, imm8, rot) \
+ _BICS_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn BIC imm8 */
+#define ARM_BIC_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_BIC_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_BIC_REG_IMM8(p, rd, rn, imm8) \
+ ARM_BIC_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_BICS_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_BICS_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_BICS_REG_IMM8(p, rd, rn, imm8) \
+ ARM_BICS_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _BIC_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _BIC_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _BIC_REG_IMM8(rd, rn, imm8) \
+ _BIC_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _BICS_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _BICS_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _BICS_REG_IMM8(rd, rn, imm8) \
+ _BICS_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn BIC Rm */
+#define ARM_BIC_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_BIC, rd, rn, rm, cond)
+#define ARM_BIC_REG_REG(p, rd, rn, rm) \
+ ARM_BIC_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_BICS_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_BIC, rd, rn, rm, cond)
+#define ARM_BICS_REG_REG(p, rd, rn, rm) \
+ ARM_BICS_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _BIC_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_BIC, rd, rn, rm, cond)
+#define _BIC_REG_REG(rd, rn, rm) \
+ _BIC_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _BICS_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_BIC, rd, rn, rm, cond)
+#define _BICS_REG_REG(rd, rn, rm) \
+ _BICS_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn BIC (Rm <shift_type> imm_shift) */
+#define ARM_BIC_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_BIC, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_BIC_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_BIC_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_BICS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_BIC, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_BICS_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_BICS_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _BIC_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_BIC, rd, rn, rm, shift_type, imm_shift, cond)
+#define _BIC_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _BIC_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _BICS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_BIC, rd, rn, rm, shift_type, imm_shift, cond)
+#define _BICS_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _BICS_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn BIC (Rm <shift_type> Rs) */
+#define ARM_BIC_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_BIC, rd, rn, rm, shift_type, rs, cond)
+#define ARM_BIC_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_BIC_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_BICS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_BIC, rd, rn, rm, shift_type, rs, cond)
+#define ARM_BICS_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_BICS_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _BIC_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_BIC, rd, rn, rm, shift_type, rs, cond)
+#define _BIC_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _BIC_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _BICS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_BIC, rd, rn, rm, shift_type, rs, cond)
+#define _BICS_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _BICS_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+
+
+
+
+/* DPIs, comparison */
+
+/* PSR := TST Rn, (imm8 ROR 2*rot) */
+#define ARM_TST_REG_IMM_COND(p, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_TST, 0, rn, imm8, rot, cond)
+#define ARM_TST_REG_IMM(p, rn, imm8, rot) \
+ ARM_TST_REG_IMM_COND(p, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _TST_REG_IMM_COND(rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_TST, 0, rn, imm8, rot, cond)
+#define _TST_REG_IMM(rn, imm8, rot) \
+ _TST_REG_IMM_COND(rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* PSR := TST Rn, imm8 */
+#define ARM_TST_REG_IMM8_COND(p, rn, imm8, cond) \
+ ARM_TST_REG_IMM_COND(p, rn, imm8, 0, cond)
+#define ARM_TST_REG_IMM8(p, rn, imm8) \
+ ARM_TST_REG_IMM8_COND(p, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _TST_REG_IMM8_COND(rn, imm8, cond) \
+ _TST_REG_IMM_COND(rn, imm8, 0, cond)
+#define _TST_REG_IMM8(rn, imm8) \
+ _TST_REG_IMM8_COND(rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* PSR := TST Rn, Rm */
+#define ARM_TST_REG_REG_COND(p, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_TST, 0, rn, rm, cond)
+#define ARM_TST_REG_REG(p, rn, rm) \
+ ARM_TST_REG_REG_COND(p, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _TST_REG_REG_COND(rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_TST, 0, rn, rm, cond)
+#define _TST_REG_REG(rn, rm) \
+ _TST_REG_REG_COND(rn, rm, ARMCOND_AL)
+#endif
+
+
+/* PSR := TST Rn, (Rm <shift_type> imm8) */
+#define ARM_TST_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_TST, 0, rn, rm, shift_type, imm_shift, cond)
+#define ARM_TST_REG_IMMSHIFT(p, rn, rm, shift_type, imm_shift) \
+ ARM_TST_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _TST_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_TST, 0, rn, rm, shift_type, imm_shift, cond)
+#define _TST_REG_IMMSHIFT(rn, rm, shift_type, imm_shift) \
+ _TST_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* PSR := TEQ Rn, (imm8 ROR 2*rot) */
+#define ARM_TEQ_REG_IMM_COND(p, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_TEQ, 0, rn, imm8, rot, cond)
+#define ARM_TEQ_REG_IMM(p, rn, imm8, rot) \
+ ARM_TEQ_REG_IMM_COND(p, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _TEQ_REG_IMM_COND(rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_TEQ, 0, rn, imm8, rot, cond)
+#define _TEQ_REG_IMM(rn, imm8, rot) \
+ _TEQ_REG_IMM_COND(rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* PSR := TEQ Rn, imm8 */
+#define ARM_TEQ_REG_IMM8_COND(p, rn, imm8, cond) \
+ ARM_TEQ_REG_IMM_COND(p, rn, imm8, 0, cond)
+#define ARM_TEQ_REG_IMM8(p, rn, imm8) \
+ ARM_TEQ_REG_IMM8_COND(p, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _TEQ_REG_IMM8_COND(rn, imm8, cond) \
+ _TEQ_REG_IMM_COND(rn, imm8, 0, cond)
+#define _TEQ_REG_IMM8(rn, imm8) \
+ _TEQ_REG_IMM8_COND(rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* PSR := TEQ Rn, Rm */
+#define ARM_TEQ_REG_REG_COND(p, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_TEQ, 0, rn, rm, cond)
+#define ARM_TEQ_REG_REG(p, rn, rm) \
+ ARM_TEQ_REG_REG_COND(p, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _TEQ_REG_REG_COND(rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_TEQ, 0, rn, rm, cond)
+#define _TEQ_REG_REG(rn, rm) \
+ _TEQ_REG_REG_COND(rn, rm, ARMCOND_AL)
+#endif
+
+
+/* PSR := TEQ Rn, (Rm <shift_type> imm8) */
+#define ARM_TEQ_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_TEQ, 0, rn, rm, shift_type, imm_shift, cond)
+#define ARM_TEQ_REG_IMMSHIFT(p, rn, rm, shift_type, imm_shift) \
+ ARM_TEQ_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _TEQ_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_TEQ, 0, rn, rm, shift_type, imm_shift, cond)
+#define _TEQ_REG_IMMSHIFT(rn, rm, shift_type, imm_shift) \
+ _TEQ_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* PSR := CMP Rn, (imm8 ROR 2*rot) */
+#define ARM_CMP_REG_IMM_COND(p, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_CMP, 0, rn, imm8, rot, cond)
+#define ARM_CMP_REG_IMM(p, rn, imm8, rot) \
+ ARM_CMP_REG_IMM_COND(p, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _CMP_REG_IMM_COND(rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_CMP, 0, rn, imm8, rot, cond)
+#define _CMP_REG_IMM(rn, imm8, rot) \
+ _CMP_REG_IMM_COND(rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* PSR := CMP Rn, imm8 */
+#define ARM_CMP_REG_IMM8_COND(p, rn, imm8, cond) \
+ ARM_CMP_REG_IMM_COND(p, rn, imm8, 0, cond)
+#define ARM_CMP_REG_IMM8(p, rn, imm8) \
+ ARM_CMP_REG_IMM8_COND(p, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _CMP_REG_IMM8_COND(rn, imm8, cond) \
+ _CMP_REG_IMM_COND(rn, imm8, 0, cond)
+#define _CMP_REG_IMM8(rn, imm8) \
+ _CMP_REG_IMM8_COND(rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* PSR := CMP Rn, Rm */
+#define ARM_CMP_REG_REG_COND(p, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_CMP, 0, rn, rm, cond)
+#define ARM_CMP_REG_REG(p, rn, rm) \
+ ARM_CMP_REG_REG_COND(p, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _CMP_REG_REG_COND(rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_CMP, 0, rn, rm, cond)
+#define _CMP_REG_REG(rn, rm) \
+ _CMP_REG_REG_COND(rn, rm, ARMCOND_AL)
+#endif
+
+
+/* PSR := CMP Rn, (Rm <shift_type> imm8) */
+#define ARM_CMP_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_CMP, 0, rn, rm, shift_type, imm_shift, cond)
+#define ARM_CMP_REG_IMMSHIFT(p, rn, rm, shift_type, imm_shift) \
+ ARM_CMP_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _CMP_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_CMP, 0, rn, rm, shift_type, imm_shift, cond)
+#define _CMP_REG_IMMSHIFT(rn, rm, shift_type, imm_shift) \
+ _CMP_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* PSR := CMP Rn, (Rm <shift_type> Rs) */
+#define ARM_CMP_REG_REGSHIFT_COND(p, rn, rm, shift_type, rs, cond) \
+ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_CMP, 0, rn, rm, shift_type, rs, cond)
+#define ARM_CMP_REG_REGSHIFT(p, rn, rm, shift_type, rs) \
+ARM_CMP_REG_REGSHIFT_COND(p, rn, rm, shift_type, rs, ARMCOND_AL)
+
+/* PSR := CMN Rn, (Rm <shift_type> Rs) */
+#define ARM_CMN_REG_REGSHIFT_COND(p, rn, rm, shift_type, rs, cond) \
+ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_CMN, 0, rn, rm, shift_type, rs, cond)
+#define ARM_CMN_REG_REGSHIFT(p, rn, rm, shift_type, rs) \
+ARM_CMN_REG_REGSHIFT_COND(p, rn, rm, shift_type, rs, ARMCOND_AL)
+
+/* PSR := TST Rn, (Rm <shift_type> Rs) */
+#define ARM_TST_REG_REGSHIFT_COND(p, rn, rm, shift_type, rs, cond) \
+ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_TST, 0, rn, rm, shift_type, rs, cond)
+#define ARM_TST_REG_REGSHIFT(p, rn, rm, shift_type, rs) \
+ARM_CMN_REG_REGSHIFT_COND(p, rn, rm, shift_type, rs, ARMCOND_AL)
+
+/* PSR := TEQ Rn, (Rm <shift_type> Rs) */
+#define ARM_TEQ_REG_REGSHIFT_COND(p, rn, rm, shift_type, rs, cond) \
+ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_TEQ, 0, rn, rm, shift_type, rs, cond)
+#define ARM_TEQ_REG_REGSHIFT(p, rn, rm, shift_type, rs) \
+ARM_CMN_REG_REGSHIFT_COND(p, rn, rm, shift_type, rs, ARMCOND_AL)
+
+
+
+#ifndef ARM_NOIASM
+#define _CMP_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_CMP, rd, rn, rm, shift_type, rs, cond)
+#define _CMP_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+_CMP_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
+/* PSR := CMN Rn, (imm8 ROR 2*rot) */
+#define ARM_CMN_REG_IMM_COND(p, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_CMN, 0, rn, imm8, rot, cond)
+#define ARM_CMN_REG_IMM(p, rn, imm8, rot) \
+ ARM_CMN_REG_IMM_COND(p, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _CMN_REG_IMM_COND(rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_CMN, 0, rn, imm8, rot, cond)
+#define _CMN_REG_IMM(rn, imm8, rot) \
+ _CMN_REG_IMM_COND(rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* PSR := CMN Rn, imm8 */
+#define ARM_CMN_REG_IMM8_COND(p, rn, imm8, cond) \
+ ARM_CMN_REG_IMM_COND(p, rn, imm8, 0, cond)
+#define ARM_CMN_REG_IMM8(p, rn, imm8) \
+ ARM_CMN_REG_IMM8_COND(p, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _CMN_REG_IMM8_COND(rn, imm8, cond) \
+ _CMN_REG_IMM_COND(rn, imm8, 0, cond)
+#define _CMN_REG_IMM8(rn, imm8) \
+ _CMN_REG_IMM8_COND(rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* PSR := CMN Rn, Rm */
+#define ARM_CMN_REG_REG_COND(p, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_CMN, 0, rn, rm, cond)
+#define ARM_CMN_REG_REG(p, rn, rm) \
+ ARM_CMN_REG_REG_COND(p, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _CMN_REG_REG_COND(rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_CMN, 0, rn, rm, cond)
+#define _CMN_REG_REG(rn, rm) \
+ _CMN_REG_REG_COND(rn, rm, ARMCOND_AL)
+#endif
+
+
+/* PSR := CMN Rn, (Rm <shift_type> imm8) */
+#define ARM_CMN_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_CMN, 0, rn, rm, shift_type, imm_shift, cond)
+#define ARM_CMN_REG_IMMSHIFT(p, rn, rm, shift_type, imm_shift) \
+ ARM_CMN_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _CMN_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_CMN, 0, rn, rm, shift_type, imm_shift, cond)
+#define _CMN_REG_IMMSHIFT(rn, rm, shift_type, imm_shift) \
+ _CMN_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+
+/* end generated */
+
diff --git a/arm/arm_emit.h b/arm/arm_emit.h
new file mode 100644
index 0000000..5d2eee0
--- /dev/null
+++ b/arm/arm_emit.h
@@ -0,0 +1,1966 @@
+/* gameplaySP
+ *
+ * Copyright (C) 2006 Exophase <exophase@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef ARM_EMIT_H
+#define ARM_EMIT_H
+
+#include "arm_codegen.h"
+
+u32 arm_update_gba_arm(u32 pc);
+u32 arm_update_gba_thumb(u32 pc);
+u32 arm_update_gba_idle_arm(u32 pc);
+u32 arm_update_gba_idle_thumb(u32 pc);
+
+// Although these are defined as a function, don't call them as
+// such (jump to it instead)
+void arm_indirect_branch_arm(u32 address);
+void arm_indirect_branch_thumb(u32 address);
+void arm_indirect_branch_dual_arm(u32 address);
+void arm_indirect_branch_dual_thumb(u32 address);
+
+void execute_store_cpsr(u32 new_cpsr, u32 store_mask, u32 address);
+u32 execute_store_cpsr_body(u32 _cpsr, u32 store_mask, u32 address);
+void execute_store_spsr(u32 new_cpsr, u32 store_mask);
+u32 execute_read_spsr();
+u32 execute_spsr_restore(u32 address);
+
+void execute_swi_arm(u32 pc);
+void execute_swi_thumb(u32 pc);
+
+void function_cc execute_store_u32_safe(u32 address, u32 source);
+
+void step_debug_arm(u32 pc);
+
+
+#define write32(value) \
+ *((u32 *)translation_ptr) = value; \
+ translation_ptr += 4 \
+
+#define arm_relative_offset(source, offset) \
+ (((((u32)offset - (u32)source) - 8) >> 2) & 0xFFFFFF) \
+
+
+// reg_base_offset is the amount of bytes after reg_base where the registers
+// actually begin.
+
+#define reg_base_offset 1024
+
+
+#define reg_a0 ARMREG_R0
+#define reg_a1 ARMREG_R1
+#define reg_a2 ARMREG_R2
+
+#define reg_s0 ARMREG_R9
+#define reg_base ARMREG_SP
+#define reg_flags ARMREG_R11
+
+#define reg_cycles ARMREG_R12
+
+#define reg_rv ARMREG_R0
+
+#define reg_rm ARMREG_R0
+#define reg_rn ARMREG_R1
+#define reg_rs ARMREG_R14
+#define reg_rd ARMREG_R0
+
+
+// Register allocation layout for ARM and Thumb:
+// Map from a GBA register to a host ARM register. -1 means load it
+// from memory into one of the temp registers.
+
+// The following registers are chosen based on statistical analysis
+// of a few games (see below), but might not be the best ones. Results
+// vary tremendously between ARM and Thumb (for obvious reasons), so
+// two sets are used. Take care to not call any function which can
+// overwrite any of these registers from the dynarec - only call
+// trusted functions in arm_stub.S which know how to save/restore
+// them and know how to transfer them to the C functions it calls
+// if necessary.
+
+// The following define the actual registers available for allocation.
+// As registers are freed up add them to this list.
+
+// Note that r15 is linked to the a0 temp reg - this register will
+// be preloaded with a constant upon read, and used to link to
+// indirect branch functions upon write.
+
+#define reg_x0 ARMREG_R3
+#define reg_x1 ARMREG_R4
+#define reg_x2 ARMREG_R5
+#define reg_x3 ARMREG_R6
+#define reg_x4 ARMREG_R7
+#define reg_x5 ARMREG_R8
+
+#define mem_reg -1
+
+/*
+
+ARM register usage (38.775138% ARM instructions):
+r00: 18.263814% (-- 18.263814%)
+r12: 11.531477% (-- 29.795291%)
+r09: 11.500162% (-- 41.295453%)
+r14: 9.063440% (-- 50.358893%)
+r06: 7.837682% (-- 58.196574%)
+r01: 7.401049% (-- 65.597623%)
+r07: 6.778340% (-- 72.375963%)
+r05: 5.445009% (-- 77.820973%)
+r02: 5.427288% (-- 83.248260%)
+r03: 5.293743% (-- 88.542003%)
+r04: 3.601103% (-- 92.143106%)
+r11: 3.207311% (-- 95.350417%)
+r10: 2.334864% (-- 97.685281%)
+r08: 1.708207% (-- 99.393488%)
+r15: 0.311270% (-- 99.704757%)
+r13: 0.295243% (-- 100.000000%)
+
+Thumb register usage (61.224862% Thumb instructions):
+r00: 34.788858% (-- 34.788858%)
+r01: 26.564083% (-- 61.352941%)
+r03: 10.983500% (-- 72.336441%)
+r02: 8.303127% (-- 80.639567%)
+r04: 4.900381% (-- 85.539948%)
+r05: 3.941292% (-- 89.481240%)
+r06: 3.257582% (-- 92.738822%)
+r07: 2.644851% (-- 95.383673%)
+r13: 1.408824% (-- 96.792497%)
+r08: 0.906433% (-- 97.698930%)
+r09: 0.679693% (-- 98.378623%)
+r10: 0.656446% (-- 99.035069%)
+r12: 0.453668% (-- 99.488737%)
+r14: 0.248909% (-- 99.737646%)
+r11: 0.171066% (-- 99.908713%)
+r15: 0.091287% (-- 100.000000%)
+
+*/
+
+s32 arm_register_allocation[] =
+{
+ reg_x0, // GBA r0
+ reg_x1, // GBA r1
+ mem_reg, // GBA r2
+ mem_reg, // GBA r3
+ mem_reg, // GBA r4
+ mem_reg, // GBA r5
+ reg_x2, // GBA r6
+ mem_reg, // GBA r7
+ mem_reg, // GBA r8
+ reg_x3, // GBA r9
+ mem_reg, // GBA r10
+ mem_reg, // GBA r11
+ reg_x4, // GBA r12
+ mem_reg, // GBA r13
+ reg_x5, // GBA r14
+ reg_a0 // GBA r15
+
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+};
+
+s32 thumb_register_allocation[] =
+{
+ reg_x0, // GBA r0
+ reg_x1, // GBA r1
+ reg_x2, // GBA r2
+ reg_x3, // GBA r3
+ reg_x4, // GBA r4
+ reg_x5, // GBA r5
+ mem_reg, // GBA r6
+ mem_reg, // GBA r7
+ mem_reg, // GBA r8
+ mem_reg, // GBA r9
+ mem_reg, // GBA r10
+ mem_reg, // GBA r11
+ mem_reg, // GBA r12
+ mem_reg, // GBA r13
+ mem_reg, // GBA r14
+ reg_a0 // GBA r15
+
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+ mem_reg,
+};
+
+
+
+#define arm_imm_lsl_to_rot(value) \
+ (32 - value) \
+
+
+u32 arm_disect_imm_32bit(u32 imm, u32 *stores, u32 *rotations)
+{
+ u32 store_count = 0;
+ u32 left_shift = 0;
+ u32 i;
+
+ // Otherwise it'll return 0 things to store because it'll never
+ // find anything.
+ if(imm == 0)
+ {
+ rotations[0] = 0;
+ stores[0] = 0;
+ return 1;
+ }
+
+ // Find chunks of non-zero data at 2 bit alignments.
+ while(1)
+ {
+ for(; left_shift < 32; left_shift += 2)
+ {
+ if((imm >> left_shift) & 0x03)
+ break;
+ }
+
+ if(left_shift == 32)
+ {
+ // We've hit the end of the useful data.
+ return store_count;
+ }
+
+ // Hit the end, it might wrap back around to the beginning.
+ if(left_shift >= 24)
+ {
+ // Make a mask for the residual bits. IE, if we have
+ // 5 bits of data at the end we can wrap around to 3
+ // bits of data in the beginning. Thus the first
+ // thing, after being shifted left, has to be less
+ // than 111b, 0x7, or (1 << 3) - 1.
+ u32 top_bits = 32 - left_shift;
+ u32 residual_bits = 8 - top_bits;
+ u32 residual_mask = (1 << residual_bits) - 1;
+
+ if((store_count > 1) && (left_shift > 24) &&
+ ((stores[0] << ((32 - rotations[0]) & 0x1F)) < residual_mask))
+ {
+ // Then we can throw out the last bit and tack it on
+ // to the first bit.
+ u32 initial_bits = rotations[0];
+ stores[0] =
+ (stores[0] << ((top_bits + (32 - rotations[0])) & 0x1F)) |
+ ((imm >> left_shift) & 0xFF);
+ rotations[0] = top_bits;
+
+ return store_count;
+ }
+ else
+ {
+ // There's nothing to wrap over to in the beginning
+ stores[store_count] = (imm >> left_shift) & 0xFF;
+ rotations[store_count] = (32 - left_shift) & 0x1F;
+ return store_count + 1;
+ }
+ break;
+ }
+
+ stores[store_count] = (imm >> left_shift) & 0xFF;
+ rotations[store_count] = (32 - left_shift) & 0x1F;
+
+ store_count++;
+ left_shift += 8;
+ }
+}
+
+#define arm_load_imm_32bit(ireg, imm) \
+{ \
+ u32 stores[4]; \
+ u32 rotations[4]; \
+ u32 store_count = arm_disect_imm_32bit(imm, stores, rotations); \
+ u32 i; \
+ \
+ ARM_MOV_REG_IMM(0, ireg, stores[0], rotations[0]); \
+ \
+ for(i = 1; i < store_count; i++) \
+ { \
+ ARM_ORR_REG_IMM(0, ireg, ireg, stores[i], rotations[i]); \
+ } \
+} \
+
+
+#define generate_load_pc(ireg, new_pc) \
+ arm_load_imm_32bit(ireg, new_pc) \
+
+#define generate_load_imm(ireg, imm, imm_ror) \
+ ARM_MOV_REG_IMM(0, ireg, imm, imm_ror) \
+
+
+
+#define generate_shift_left(ireg, imm) \
+ ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_LSL, imm) \
+
+#define generate_shift_right(ireg, imm) \
+ ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_LSR, imm) \
+
+#define generate_shift_right_arithmetic(ireg, imm) \
+ ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_ASR, imm) \
+
+#define generate_rotate_right(ireg, imm) \
+ ARM_MOV_REG_IMMSHIFT(0, ireg, ireg, ARMSHIFT_ROR, imm) \
+
+#define generate_add(ireg_dest, ireg_src) \
+ ARM_ADD_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \
+
+#define generate_sub(ireg_dest, ireg_src) \
+ ARM_SUB_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \
+
+#define generate_or(ireg_dest, ireg_src) \
+ ARM_ORR_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \
+
+#define generate_xor(ireg_dest, ireg_src) \
+ ARM_EOR_REG_REG(0, ireg_dest, ireg_dest, ireg_src) \
+
+#define generate_add_imm(ireg, imm, imm_ror) \
+ ARM_ADD_REG_IMM(0, ireg, ireg, imm, imm_ror) \
+
+#define generate_sub_imm(ireg, imm, imm_ror) \
+ ARM_SUB_REG_IMM(0, ireg, ireg, imm, imm_ror) \
+
+#define generate_xor_imm(ireg, imm, imm_ror) \
+ ARM_EOR_REG_IMM(0, ireg, ireg, imm, imm_ror) \
+
+#define generate_add_reg_reg_imm(ireg_dest, ireg_src, imm, imm_ror) \
+ ARM_ADD_REG_IMM(0, ireg_dest, ireg_src, imm, imm_ror) \
+
+#define generate_and_imm(ireg, imm, imm_ror) \
+ ARM_AND_REG_IMM(0, ireg, ireg, imm, imm_ror) \
+
+#define generate_mov(ireg_dest, ireg_src) \
+ if(ireg_dest != ireg_src) \
+ { \
+ ARM_MOV_REG_REG(0, ireg_dest, ireg_src); \
+ } \
+
+#define generate_function_call(function_location) \
+ ARM_BL(0, arm_relative_offset(translation_ptr, function_location)) \
+
+#define generate_exit_block() \
+ ARM_BX(0, ARMREG_LR) \
+
+// The branch target is to be filled in later (thus a 0 for now)
+
+#define generate_branch_filler(condition_code, writeback_location) \
+ (writeback_location) = translation_ptr; \
+ ARM_B_COND(0, condition_code, 0) \
+
+#define generate_update_pc(new_pc) \
+ generate_load_pc(reg_a0, new_pc) \
+
+#define generate_cycle_update() \
+ if(cycle_count) \
+ { \
+ if(cycle_count >> 8) \
+ { \
+ ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count >> 8) & 0xFF, \
+ arm_imm_lsl_to_rot(8)); \
+ } \
+ ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count & 0xFF), 0); \
+ cycle_count = 0; \
+ } \
+
+#define generate_cycle_update_flag_set() \
+ if(cycle_count >> 8) \
+ { \
+ ARM_ADD_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count >> 8) & 0xFF, \
+ arm_imm_lsl_to_rot(8)); \
+ } \
+ generate_save_flags(); \
+ ARM_ADDS_REG_IMM(0, reg_cycles, reg_cycles, (cycle_count & 0xFF), 0); \
+ cycle_count = 0 \
+
+#define generate_branch_patch_conditional(dest, offset) \
+ *((u32 *)(dest)) = (*((u32 *)dest) & 0xFF000000) | \
+ arm_relative_offset(dest, offset) \
+
+#define generate_branch_patch_unconditional(dest, offset) \
+ *((u32 *)(dest)) = (*((u32 *)dest) & 0xFF000000) | \
+ arm_relative_offset(dest, offset) \
+
+// A different function is called for idle updates because of the relative
+// location of the embedded PC. The idle version could be optimized to put
+// the CPU into halt mode too, however.
+
+#define generate_branch_idle_eliminate(writeback_location, new_pc, mode) \
+ generate_function_call(arm_update_gba_idle_##mode); \
+ write32(new_pc); \
+ generate_branch_filler(ARMCOND_AL, writeback_location) \
+
+#define generate_branch_update(writeback_location, new_pc, mode) \
+ ARM_MOV_REG_IMMSHIFT(0, reg_a0, reg_cycles, ARMSHIFT_LSR, 31); \
+ ARM_ADD_REG_IMMSHIFT(0, ARMREG_PC, ARMREG_PC, reg_a0, ARMSHIFT_LSL, 2); \
+ write32(new_pc); \
+ generate_function_call(arm_update_gba_##mode); \
+ generate_branch_filler(ARMCOND_AL, writeback_location) \
+
+
+#define generate_branch_no_cycle_update(writeback_location, new_pc, mode) \
+ if(pc == idle_loop_target_pc) \
+ { \
+ generate_branch_idle_eliminate(writeback_location, new_pc, mode); \
+ } \
+ else \
+ { \
+ generate_branch_update(writeback_location, new_pc, mode); \
+ } \
+
+#define generate_branch_cycle_update(writeback_location, new_pc, mode) \
+ generate_cycle_update(); \
+ generate_branch_no_cycle_update(writeback_location, new_pc, mode) \
+
+// a0 holds the destination
+
+#define generate_indirect_branch_no_cycle_update(type) \
+ ARM_B(0, arm_relative_offset(translation_ptr, arm_indirect_branch_##type)) \
+
+#define generate_indirect_branch_cycle_update(type) \
+ generate_cycle_update(); \
+ generate_indirect_branch_no_cycle_update(type) \
+
+#define generate_block_prologue() \
+
+#define generate_block_extra_vars_arm() \
+ void generate_indirect_branch_arm() \
+ { \
+ if(condition == 0x0E) \
+ { \
+ generate_cycle_update(); \
+ } \
+ generate_indirect_branch_no_cycle_update(arm); \
+ } \
+ \
+ void generate_indirect_branch_dual() \
+ { \
+ if(condition == 0x0E) \
+ { \
+ generate_cycle_update(); \
+ } \
+ generate_indirect_branch_no_cycle_update(dual_arm); \
+ } \
+ \
+ u32 prepare_load_reg(u32 scratch_reg, u32 reg_index) \
+ { \
+ u32 reg_use = arm_register_allocation[reg_index]; \
+ if(reg_use == mem_reg) \
+ { \
+ ARM_LDR_IMM(0, scratch_reg, reg_base, \
+ (reg_base_offset + (reg_index * 4))); \
+ return scratch_reg; \
+ } \
+ \
+ return reg_use; \
+ } \
+ \
+ u32 prepare_load_reg_pc(u32 scratch_reg, u32 reg_index, u32 pc_offset) \
+ { \
+ if(reg_index == 15) \
+ { \
+ generate_load_pc(scratch_reg, pc + pc_offset); \
+ return scratch_reg; \
+ } \
+ return prepare_load_reg(scratch_reg, reg_index); \
+ } \
+ \
+ u32 prepare_store_reg(u32 scratch_reg, u32 reg_index) \
+ { \
+ u32 reg_use = arm_register_allocation[reg_index]; \
+ if(reg_use == mem_reg) \
+ return scratch_reg; \
+ \
+ return reg_use; \
+ } \
+ \
+ void complete_store_reg(u32 scratch_reg, u32 reg_index) \
+ { \
+ if(arm_register_allocation[reg_index] == mem_reg) \
+ { \
+ ARM_STR_IMM(0, scratch_reg, reg_base, \
+ (reg_base_offset + (reg_index * 4))); \
+ } \
+ } \
+ \
+ void complete_store_reg_pc_no_flags(u32 scratch_reg, u32 reg_index) \
+ { \
+ if(reg_index == 15) \
+ { \
+ generate_indirect_branch_arm(); \
+ } \
+ else \
+ { \
+ complete_store_reg(scratch_reg, reg_index); \
+ } \
+ } \
+ \
+ void complete_store_reg_pc_flags(u32 scratch_reg, u32 reg_index) \
+ { \
+ if(reg_index == 15) \
+ { \
+ if(condition == 0x0E) \
+ { \
+ generate_cycle_update(); \
+ } \
+ generate_function_call(execute_spsr_restore); \
+ } \
+ else \
+ { \
+ complete_store_reg(scratch_reg, reg_index); \
+ } \
+ } \
+ \
+ void generate_load_reg(u32 ireg, u32 reg_index) \
+ { \
+ s32 load_src = arm_register_allocation[reg_index]; \
+ if(load_src != mem_reg) \
+ { \
+ ARM_MOV_REG_REG(0, ireg, load_src); \
+ } \
+ else \
+ { \
+ ARM_LDR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \
+ } \
+ } \
+ \
+ void generate_store_reg(u32 ireg, u32 reg_index) \
+ { \
+ s32 store_dest = arm_register_allocation[reg_index]; \
+ if(store_dest != mem_reg) \
+ { \
+ ARM_MOV_REG_REG(0, store_dest, ireg); \
+ } \
+ else \
+ { \
+ ARM_STR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \
+ } \
+ } \
+
+
+#define generate_block_extra_vars_thumb() \
+ u32 prepare_load_reg(u32 scratch_reg, u32 reg_index) \
+ { \
+ u32 reg_use = thumb_register_allocation[reg_index]; \
+ if(reg_use == mem_reg) \
+ { \
+ ARM_LDR_IMM(0, scratch_reg, reg_base, \
+ (reg_base_offset + (reg_index * 4))); \
+ return scratch_reg; \
+ } \
+ \
+ return reg_use; \
+ } \
+ \
+ u32 prepare_load_reg_pc(u32 scratch_reg, u32 reg_index, u32 pc_offset) \
+ { \
+ if(reg_index == 15) \
+ { \
+ generate_load_pc(scratch_reg, pc + pc_offset); \
+ return scratch_reg; \
+ } \
+ return prepare_load_reg(scratch_reg, reg_index); \
+ } \
+ \
+ u32 prepare_store_reg(u32 scratch_reg, u32 reg_index) \
+ { \
+ u32 reg_use = thumb_register_allocation[reg_index]; \
+ if(reg_use == mem_reg) \
+ return scratch_reg; \
+ \
+ return reg_use; \
+ } \
+ \
+ void complete_store_reg(u32 scratch_reg, u32 reg_index) \
+ { \
+ if(thumb_register_allocation[reg_index] == mem_reg) \
+ { \
+ ARM_STR_IMM(0, scratch_reg, reg_base, \
+ (reg_base_offset + (reg_index * 4))); \
+ } \
+ } \
+ \
+ void generate_load_reg(u32 ireg, u32 reg_index) \
+ { \
+ s32 load_src = thumb_register_allocation[reg_index]; \
+ if(load_src != mem_reg) \
+ { \
+ ARM_MOV_REG_REG(0, ireg, load_src); \
+ } \
+ else \
+ { \
+ ARM_LDR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \
+ } \
+ } \
+ \
+ void generate_store_reg(u32 ireg, u32 reg_index) \
+ { \
+ s32 store_dest = thumb_register_allocation[reg_index]; \
+ if(store_dest != mem_reg) \
+ { \
+ ARM_MOV_REG_REG(0, store_dest, ireg); \
+ } \
+ else \
+ { \
+ ARM_STR_IMM(0, ireg, reg_base, (reg_base_offset + (reg_index * 4))); \
+ } \
+ } \
+
+u8 *last_rom_translation_ptr = rom_translation_cache;
+u8 *last_ram_translation_ptr = ram_translation_cache;
+u8 *last_bios_translation_ptr = bios_translation_cache;
+
+#define translate_invalidate_dcache_one(which) \
+ if (which##_translation_ptr > last_##which##_translation_ptr) \
+ { \
+ warm_cache_op_range(WOP_D_CLEAN, last_##which##_translation_ptr, \
+ which##_translation_ptr - last_##which##_translation_ptr); \
+ warm_cache_op_range(WOP_I_INVALIDATE, last_##which##_translation_ptr, 32);\
+ last_##which##_translation_ptr = which##_translation_ptr; \
+ }
+
+#define translate_invalidate_dcache() \
+{ \
+ translate_invalidate_dcache_one(rom) \
+ translate_invalidate_dcache_one(ram) \
+ translate_invalidate_dcache_one(bios) \
+}
+
+#define invalidate_icache_region(addr, size) \
+ warm_cache_op_range(WOP_I_INVALIDATE, addr, size)
+
+
+#define block_prologue_size 0
+
+
+// It should be okay to still generate result flags, spsr will overwrite them.
+// This is pretty infrequent (returning from interrupt handlers, et al) so
+// probably not worth optimizing for.
+
+#define check_for_interrupts() \
+ if((io_registers[REG_IE] & io_registers[REG_IF]) && \
+ io_registers[REG_IME] && ((reg[REG_CPSR] & 0x80) == 0)) \
+ { \
+ reg_mode[MODE_IRQ][6] = pc + 4; \
+ spsr[MODE_IRQ] = reg[REG_CPSR]; \
+ reg[REG_CPSR] = 0xD2; \
+ pc = 0x00000018; \
+ set_cpu_mode(MODE_IRQ); \
+ } \
+
+#define generate_load_reg_pc(ireg, reg_index, pc_offset) \
+ if(reg_index == 15) \
+ { \
+ generate_load_pc(ireg, pc + pc_offset); \
+ } \
+ else \
+ { \
+ generate_load_reg(ireg, reg_index); \
+ } \
+
+#define generate_store_reg_pc_no_flags(ireg, reg_index) \
+ generate_store_reg(ireg, reg_index); \
+ if(reg_index == 15) \
+ { \
+ generate_indirect_branch_arm(); \
+ } \
+
+
+u32 function_cc execute_spsr_restore_body(u32 pc)
+{
+ set_cpu_mode(cpu_modes[reg[REG_CPSR] & 0x1F]);
+ check_for_interrupts();
+
+ return pc;
+}
+
+
+#define generate_store_reg_pc_flags(ireg, reg_index) \
+ generate_store_reg(ireg, reg_index); \
+ if(reg_index == 15) \
+ { \
+ if(condition == 0x0E) \
+ { \
+ generate_cycle_update(); \
+ } \
+ generate_function_call(execute_spsr_restore); \
+ } \
+
+
+#define generate_load_flags() \
+/* ARM_MSR_REG(0, ARM_PSR_F, reg_flags, ARM_CPSR) */ \
+
+#define generate_store_flags() \
+/* ARM_MRS_CPSR(0, reg_flags) */ \
+
+#define generate_save_flags() \
+ ARM_MRS_CPSR(0, reg_flags) \
+
+#define generate_restore_flags() \
+ ARM_MSR_REG(0, ARM_PSR_F, reg_flags, ARM_CPSR) \
+
+
+#define condition_opposite_eq ARMCOND_NE
+#define condition_opposite_ne ARMCOND_EQ
+#define condition_opposite_cs ARMCOND_CC
+#define condition_opposite_cc ARMCOND_CS
+#define condition_opposite_mi ARMCOND_PL
+#define condition_opposite_pl ARMCOND_MI
+#define condition_opposite_vs ARMCOND_VC
+#define condition_opposite_vc ARMCOND_VS
+#define condition_opposite_hi ARMCOND_LS
+#define condition_opposite_ls ARMCOND_HI
+#define condition_opposite_ge ARMCOND_LT
+#define condition_opposite_lt ARMCOND_GE
+#define condition_opposite_gt ARMCOND_LE
+#define condition_opposite_le ARMCOND_GT
+#define condition_opposite_al ARMCOND_NV
+#define condition_opposite_nv ARMCOND_AL
+
+#define generate_branch(mode) \
+{ \
+ generate_branch_cycle_update( \
+ block_exits[block_exit_position].branch_source, \
+ block_exits[block_exit_position].branch_target, mode); \
+ block_exit_position++; \
+} \
+
+
+#define generate_op_and_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_AND_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_orr_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_ORR_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_eor_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_EOR_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_bic_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_BIC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_sub_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_SUB_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_rsb_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_RSB_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_sbc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_SBC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_rsc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_RSC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_add_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_ADD_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_adc_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_ADC_REG_IMMSHIFT(0, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_mov_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_MOV_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \
+
+#define generate_op_mvn_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ ARM_MVN_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \
+
+
+#define generate_op_and_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_AND_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_orr_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_ORR_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_eor_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_EOR_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_bic_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_BIC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_sub_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_SUB_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_rsb_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_RSB_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_sbc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_SBC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_rsc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_RSC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_add_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_ADD_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_adc_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_ADC_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_mov_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_MOV_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \
+
+#define generate_op_mvn_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ ARM_MVN_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \
+
+
+#define generate_op_and_imm(_rd, _rn) \
+ ARM_AND_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_orr_imm(_rd, _rn) \
+ ARM_ORR_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_eor_imm(_rd, _rn) \
+ ARM_EOR_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_bic_imm(_rd, _rn) \
+ ARM_BIC_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_sub_imm(_rd, _rn) \
+ ARM_SUB_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_rsb_imm(_rd, _rn) \
+ ARM_RSB_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_sbc_imm(_rd, _rn) \
+ ARM_SBC_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_rsc_imm(_rd, _rn) \
+ ARM_RSC_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_add_imm(_rd, _rn) \
+ ARM_ADD_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_adc_imm(_rd, _rn) \
+ ARM_ADC_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_mov_imm(_rd, _rn) \
+ ARM_MOV_REG_IMM(0, _rd, imm, imm_ror) \
+
+#define generate_op_mvn_imm(_rd, _rn) \
+ ARM_MVN_REG_IMM(0, _rd, imm, imm_ror) \
+
+
+#define generate_op_reg_immshift_lflags(name, _rd, _rn, _rm, st, shift) \
+ ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, shift) \
+
+#define generate_op_reg_immshift_aflags(name, _rd, _rn, _rm, st, shift) \
+ ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, shift) \
+
+#define generate_op_reg_immshift_aflags_load_c(name, _rd, _rn, _rm, st, sh) \
+ ARM_##name##_REG_IMMSHIFT(0, _rd, _rn, _rm, st, sh) \
+
+#define generate_op_reg_immshift_uflags(name, _rd, _rm, shift_type, shift) \
+ ARM_##name##_REG_IMMSHIFT(0, _rd, _rm, shift_type, shift) \
+
+#define generate_op_reg_immshift_tflags(name, _rn, _rm, shift_type, shift) \
+ ARM_##name##_REG_IMMSHIFT(0, _rn, _rm, shift_type, shift) \
+
+
+#define generate_op_reg_regshift_lflags(name, _rd, _rn, _rm, shift_type, _rs) \
+ ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_reg_regshift_aflags(name, _rd, _rn, _rm, st, _rs) \
+ ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, st, _rs) \
+
+#define generate_op_reg_regshift_aflags_load_c(name, _rd, _rn, _rm, st, _rs) \
+ ARM_##name##_REG_REGSHIFT(0, _rd, _rn, _rm, st, _rs) \
+
+#define generate_op_reg_regshift_uflags(name, _rd, _rm, shift_type, _rs) \
+ ARM_##name##_REG_REGSHIFT(0, _rd, _rm, shift_type, _rs) \
+
+#define generate_op_reg_regshift_tflags(name, _rn, _rm, shift_type, _rs) \
+ ARM_##name##_REG_REGSHIFT(0, _rn, _rm, shift_type, _rs) \
+
+
+#define generate_op_imm_lflags(name, _rd, _rn) \
+ ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_imm_aflags(name, _rd, _rn) \
+ ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_imm_aflags_load_c(name, _rd, _rn) \
+ ARM_##name##_REG_IMM(0, _rd, _rn, imm, imm_ror) \
+
+#define generate_op_imm_uflags(name, _rd) \
+ ARM_##name##_REG_IMM(0, _rd, imm, imm_ror) \
+
+#define generate_op_imm_tflags(name, _rn) \
+ ARM_##name##_REG_IMM(0, _rn, imm, imm_ror) \
+
+
+#define generate_op_ands_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_lflags(ANDS, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_orrs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_lflags(ORRS, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_eors_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_lflags(EORS, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_bics_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_lflags(BICS, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_subs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_aflags(SUBS, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_rsbs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_aflags(RSBS, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_sbcs_reg_immshift(_rd, _rn, _rm, st, shift) \
+ generate_op_reg_immshift_aflags_load_c(SBCS, _rd, _rn, _rm, st, shift) \
+
+#define generate_op_rscs_reg_immshift(_rd, _rn, _rm, st, shift) \
+ generate_op_reg_immshift_aflags_load_c(RSCS, _rd, _rn, _rm, st, shift) \
+
+#define generate_op_adds_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_aflags(ADDS, _rd, _rn, _rm, shift_type, shift) \
+
+#define generate_op_adcs_reg_immshift(_rd, _rn, _rm, st, shift) \
+ generate_op_reg_immshift_aflags_load_c(ADCS, _rd, _rn, _rm, st, shift) \
+
+#define generate_op_movs_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_uflags(MOVS, _rd, _rm, shift_type, shift) \
+
+#define generate_op_mvns_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_uflags(MVNS, _rd, _rm, shift_type, shift) \
+
+// The reg operand is in reg_rm, not reg_rn like expected, so rsbs isn't
+// being used here. When rsbs is fully inlined it can be used with the
+// apropriate operands.
+
+#define generate_op_neg_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+{ \
+ generate_load_imm(reg_rn, 0, 0); \
+ generate_op_subs_reg_immshift(_rd, reg_rn, _rm, ARMSHIFT_LSL, 0); \
+} \
+
+#define generate_op_muls_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_load_flags(); \
+ ARM_MULS(0, _rd, _rn, _rm); \
+ generate_store_flags() \
+
+#define generate_op_cmp_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_tflags(CMP, _rn, _rm, shift_type, shift) \
+
+#define generate_op_cmn_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_tflags(CMN, _rn, _rm, shift_type, shift) \
+
+#define generate_op_tst_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_tflags(TST, _rn, _rm, shift_type, shift) \
+
+#define generate_op_teq_reg_immshift(_rd, _rn, _rm, shift_type, shift) \
+ generate_op_reg_immshift_tflags(TEQ, _rn, _rm, shift_type, shift) \
+
+
+#define generate_op_ands_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_lflags(ANDS, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_orrs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_lflags(ORRS, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_eors_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_lflags(EORS, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_bics_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_lflags(BICS, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_subs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_aflags(SUBS, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_rsbs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_aflags(RSBS, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_sbcs_reg_regshift(_rd, _rn, _rm, st, _rs) \
+ generate_op_reg_regshift_aflags_load_c(SBCS, _rd, _rn, _rm, st, _rs) \
+
+#define generate_op_rscs_reg_regshift(_rd, _rn, _rm, st, _rs) \
+ generate_op_reg_regshift_aflags_load_c(RSCS, _rd, _rn, _rm, st, _rs) \
+
+#define generate_op_adds_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_aflags(ADDS, _rd, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_adcs_reg_regshift(_rd, _rn, _rm, st, _rs) \
+ generate_op_reg_regshift_aflags_load_c(ADCS, _rd, _rn, _rm, st, _rs) \
+
+#define generate_op_movs_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_uflags(MOVS, _rd, _rm, shift_type, _rs) \
+
+#define generate_op_mvns_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_uflags(MVNS, _rd, _rm, shift_type, _rs) \
+
+#define generate_op_cmp_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_tflags(CMP, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_cmn_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_tflags(CMN, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_tst_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_tflags(TST, _rn, _rm, shift_type, _rs) \
+
+#define generate_op_teq_reg_regshift(_rd, _rn, _rm, shift_type, _rs) \
+ generate_op_reg_regshift_tflags(TEQ, _rn, _rm, shift_type, _rs) \
+
+
+#define generate_op_ands_imm(_rd, _rn) \
+ generate_op_imm_lflags(ANDS, _rd, _rn) \
+
+#define generate_op_orrs_imm(_rd, _rn) \
+ generate_op_imm_lflags(ORRS, _rd, _rn) \
+
+#define generate_op_eors_imm(_rd, _rn) \
+ generate_op_imm_lflags(EORS, _rd, _rn) \
+
+#define generate_op_bics_imm(_rd, _rn) \
+ generate_op_imm_lflags(BICS, _rd, _rn) \
+
+#define generate_op_subs_imm(_rd, _rn) \
+ generate_op_imm_aflags(SUBS, _rd, _rn) \
+
+#define generate_op_rsbs_imm(_rd, _rn) \
+ generate_op_imm_aflags(RSBS, _rd, _rn) \
+
+#define generate_op_sbcs_imm(_rd, _rn) \
+ generate_op_imm_aflags_load_c(SBCS, _rd, _rn) \
+
+#define generate_op_rscs_imm(_rd, _rn) \
+ generate_op_imm_aflags_load_c(RSCS, _rd, _rn) \
+
+#define generate_op_adds_imm(_rd, _rn) \
+ generate_op_imm_aflags(ADDS, _rd, _rn) \
+
+#define generate_op_adcs_imm(_rd, _rn) \
+ generate_op_imm_aflags_load_c(ADCS, _rd, _rn) \
+
+#define generate_op_movs_imm(_rd, _rn) \
+ generate_op_imm_uflags(MOVS, _rd) \
+
+#define generate_op_mvns_imm(_rd, _rn) \
+ generate_op_imm_uflags(MVNS, _rd) \
+
+#define generate_op_cmp_imm(_rd, _rn) \
+ generate_op_imm_tflags(CMP, _rn) \
+
+#define generate_op_cmn_imm(_rd, _rn) \
+ generate_op_imm_tflags(CMN, _rn) \
+
+#define generate_op_tst_imm(_rd, _rn) \
+ generate_op_imm_tflags(TST, _rn) \
+
+#define generate_op_teq_imm(_rd, _rn) \
+ generate_op_imm_tflags(TEQ, _rn) \
+
+
+#define prepare_load_rn_yes() \
+ u32 _rn = prepare_load_reg_pc(reg_rn, rn, 8) \
+
+#define prepare_load_rn_no() \
+
+#define prepare_store_rd_yes() \
+ u32 _rd = prepare_store_reg(reg_rd, rd) \
+
+#define prepare_store_rd_no() \
+
+#define complete_store_rd_yes(flags_op) \
+ complete_store_reg_pc_##flags_op(_rd, rd) \
+
+#define complete_store_rd_no(flags_op) \
+
+#define arm_generate_op_reg(name, load_op, store_op, flags_op) \
+ u32 shift_type = (opcode >> 5) & 0x03; \
+ arm_decode_data_proc_reg(); \
+ prepare_load_rn_##load_op(); \
+ prepare_store_rd_##store_op(); \
+ \
+ if((opcode >> 4) & 0x01) \
+ { \
+ u32 rs = ((opcode >> 8) & 0x0F); \
+ u32 _rs = prepare_load_reg(reg_rs, rs); \
+ u32 _rm = prepare_load_reg_pc(reg_rm, rm, 12); \
+ generate_op_##name##_reg_regshift(_rd, _rn, _rm, shift_type, _rs); \
+ } \
+ else \
+ { \
+ u32 shift_imm = ((opcode >> 7) & 0x1F); \
+ u32 _rm = prepare_load_reg_pc(reg_rm, rm, 8); \
+ generate_op_##name##_reg_immshift(_rd, _rn, _rm, shift_type, shift_imm); \
+ } \
+ complete_store_rd_##store_op(flags_op) \
+
+#define arm_generate_op_reg_flags(name, load_op, store_op, flags_op) \
+ arm_generate_op_reg(name, load_op, store_op, flags_op) \
+
+// imm will be loaded by the called function if necessary.
+
+#define arm_generate_op_imm(name, load_op, store_op, flags_op) \
+ arm_decode_data_proc_imm(); \
+ prepare_load_rn_##load_op(); \
+ prepare_store_rd_##store_op(); \
+ generate_op_##name##_imm(_rd, _rn); \
+ complete_store_rd_##store_op(flags_op) \
+
+#define arm_generate_op_imm_flags(name, load_op, store_op, flags_op) \
+ arm_generate_op_imm(name, load_op, store_op, flags_op) \
+
+#define arm_data_proc(name, type, flags_op) \
+{ \
+ arm_generate_op_##type(name, yes, yes, flags_op); \
+} \
+
+#define arm_data_proc_test(name, type) \
+{ \
+ arm_generate_op_##type(name, yes, no, no); \
+} \
+
+#define arm_data_proc_unary(name, type, flags_op) \
+{ \
+ arm_generate_op_##type(name, no, yes, flags_op); \
+} \
+
+
+#define arm_multiply_add_no_flags_no() \
+ ARM_MUL(0, _rd, _rm, _rs) \
+
+#define arm_multiply_add_yes_flags_no() \
+ u32 _rn = prepare_load_reg(reg_a2, rn); \
+ ARM_MLA(0, _rd, _rm, _rs, _rn) \
+
+#define arm_multiply_add_no_flags_yes() \
+ generate_load_flags(); \
+ ARM_MULS(0, reg_a0, reg_a0, reg_a1) \
+ generate_store_flags() \
+
+#define arm_multiply_add_yes_flags_yes() \
+ u32 _rn = prepare_load_reg(reg_a2, rn); \
+ generate_load_flags(); \
+ ARM_MLAS(0, _rd, _rm, _rs, _rn); \
+ generate_store_flags()
+
+
+#define arm_multiply(add_op, flags) \
+{ \
+ arm_decode_multiply(); \
+ u32 _rm = prepare_load_reg(reg_a0, rm); \
+ u32 _rs = prepare_load_reg(reg_a1, rs); \
+ u32 _rd = prepare_store_reg(reg_a0, rd); \
+ arm_multiply_add_##add_op##_flags_##flags(); \
+ complete_store_reg(_rd, rd); \
+} \
+
+
+#define arm_multiply_long_name_s64 SMULL
+#define arm_multiply_long_name_u64 UMULL
+#define arm_multiply_long_name_s64_add SMLAL
+#define arm_multiply_long_name_u64_add UMLAL
+
+
+#define arm_multiply_long_flags_no(name) \
+ ARM_##name(0, _rdlo, _rdhi, _rm, _rs) \
+
+#define arm_multiply_long_flags_yes(name) \
+ generate_load_flags(); \
+ ARM_##name##S(0, _rdlo, _rdhi, _rm, _rs); \
+ generate_store_flags() \
+
+
+#define arm_multiply_long_add_no(name) \
+
+#define arm_multiply_long_add_yes(name) \
+ prepare_load_reg(reg_a0, rdlo); \
+ prepare_load_reg(reg_a1, rdhi) \
+
+
+#define arm_multiply_long_op(flags, name) \
+ arm_multiply_long_flags_##flags(name) \
+
+#define arm_multiply_long(name, add_op, flags) \
+{ \
+ arm_decode_multiply_long(); \
+ u32 _rm = prepare_load_reg(reg_a2, rm); \
+ u32 _rs = prepare_load_reg(reg_rs, rs); \
+ u32 _rdlo = prepare_store_reg(reg_a0, rdlo); \
+ u32 _rdhi = prepare_store_reg(reg_a1, rdhi); \
+ arm_multiply_long_add_##add_op(name); \
+ arm_multiply_long_op(flags, arm_multiply_long_name_##name); \
+ complete_store_reg(_rdlo, rdlo); \
+ complete_store_reg(_rdhi, rdhi); \
+} \
+
+#define arm_psr_read_cpsr() \
+ u32 _rd = prepare_store_reg(reg_a0, rd); \
+ generate_load_reg(_rd, REG_CPSR); \
+ ARM_BIC_REG_IMM(0, _rd, _rd, 0xF0, arm_imm_lsl_to_rot(24)); \
+ ARM_AND_REG_IMM(0, reg_flags, reg_flags, 0xF0, arm_imm_lsl_to_rot(24)); \
+ ARM_ORR_REG_REG(0, _rd, _rd, reg_flags); \
+ complete_store_reg(_rd, rd) \
+
+#define arm_psr_read_spsr() \
+ generate_function_call(execute_read_spsr) \
+ generate_store_reg(reg_a0, rd) \
+
+#define arm_psr_read(op_type, psr_reg) \
+ arm_psr_read_##psr_reg() \
+
+// This function's okay because it's called from an ASM function that can
+// wrap it correctly.
+
+u32 execute_store_cpsr_body(u32 _cpsr, u32 store_mask, u32 address)
+{
+ reg[REG_CPSR] = _cpsr;
+ if(store_mask & 0xFF)
+ {
+ set_cpu_mode(cpu_modes[_cpsr & 0x1F]);
+ if((io_registers[REG_IE] & io_registers[REG_IF]) &&
+ io_registers[REG_IME] && ((_cpsr & 0x80) == 0))
+ {
+ reg_mode[MODE_IRQ][6] = address + 4;
+ spsr[MODE_IRQ] = _cpsr;
+ reg[REG_CPSR] = 0xD2;
+ set_cpu_mode(MODE_IRQ);
+ return 0x00000018;
+ }
+ }
+
+ return 0;
+}
+
+#define arm_psr_load_new_reg() \
+ generate_load_reg(reg_a0, rm) \
+
+#define arm_psr_load_new_imm() \
+ generate_load_imm(reg_a0, imm, imm_ror) \
+
+#define arm_psr_store_cpsr() \
+ arm_load_imm_32bit(reg_a1, psr_masks[psr_field]); \
+ generate_function_call(execute_store_cpsr); \
+ write32(pc) \
+
+#define arm_psr_store_spsr() \
+ generate_function_call(execute_store_spsr) \
+
+#define arm_psr_store(op_type, psr_reg) \
+ arm_psr_load_new_##op_type(); \
+ arm_psr_store_##psr_reg() \
+
+
+#define arm_psr(op_type, transfer_type, psr_reg) \
+{ \
+ arm_decode_psr_##op_type(); \
+ arm_psr_##transfer_type(op_type, psr_reg); \
+} \
+
+// TODO: loads will need the PC passed as well for open address, however can
+// eventually be rectified with a hash table on the memory accesses
+// (same with the stores)
+
+#define arm_access_memory_load(mem_type) \
+ cycle_count += 2; \
+ generate_function_call(execute_load_##mem_type); \
+ write32((pc + 8)); \
+ generate_store_reg_pc_no_flags(reg_rv, rd) \
+
+#define arm_access_memory_store(mem_type) \
+ cycle_count++; \
+ generate_load_reg_pc(reg_a1, rd, 12); \
+ generate_function_call(execute_store_##mem_type); \
+ write32((pc + 4)) \
+
+// Calculate the address into a0 from _rn, _rm
+
+#define arm_access_memory_adjust_reg_sh_up(ireg) \
+ ARM_ADD_REG_IMMSHIFT(0, ireg, _rn, _rm, ((opcode >> 5) & 0x03), \
+ ((opcode >> 7) & 0x1F)) \
+
+#define arm_access_memory_adjust_reg_sh_down(ireg) \
+ ARM_SUB_REG_IMMSHIFT(0, ireg, _rn, _rm, ((opcode >> 5) & 0x03), \
+ ((opcode >> 7) & 0x1F)) \
+
+#define arm_access_memory_adjust_reg_up(ireg) \
+ ARM_ADD_REG_REG(0, ireg, _rn, _rm) \
+
+#define arm_access_memory_adjust_reg_down(ireg) \
+ ARM_SUB_REG_REG(0, ireg, _rn, _rm) \
+
+#define arm_access_memory_adjust_imm(op, ireg) \
+{ \
+ u32 stores[4]; \
+ u32 rotations[4]; \
+ u32 store_count = arm_disect_imm_32bit(offset, stores, rotations); \
+ \
+ if(store_count > 1) \
+ { \
+ ARM_##op##_REG_IMM(0, ireg, _rn, stores[0], rotations[0]); \
+ ARM_##op##_REG_IMM(0, ireg, ireg, stores[1], rotations[1]); \
+ } \
+ else \
+ { \
+ ARM_##op##_REG_IMM(0, ireg, _rn, stores[0], rotations[0]); \
+ } \
+} \
+
+#define arm_access_memory_adjust_imm_up(ireg) \
+ arm_access_memory_adjust_imm(ADD, ireg) \
+
+#define arm_access_memory_adjust_imm_down(ireg) \
+ arm_access_memory_adjust_imm(SUB, ireg) \
+
+
+#define arm_access_memory_pre(type, direction) \
+ arm_access_memory_adjust_##type##_##direction(reg_a0) \
+
+#define arm_access_memory_pre_wb(type, direction) \
+ arm_access_memory_adjust_##type##_##direction(reg_a0); \
+ generate_store_reg(reg_a0, rn) \
+
+#define arm_access_memory_post(type, direction) \
+ u32 _rn_dest = prepare_store_reg(reg_a1, rn); \
+ if(_rn != reg_a0) \
+ { \
+ generate_load_reg(reg_a0, rn); \
+ } \
+ arm_access_memory_adjust_##type##_##direction(_rn_dest); \
+ complete_store_reg(_rn_dest, rn) \
+
+
+#define arm_data_trans_reg(adjust_op, direction) \
+ arm_decode_data_trans_reg(); \
+ u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \
+ u32 _rm = prepare_load_reg(reg_a1, rm); \
+ arm_access_memory_##adjust_op(reg_sh, direction) \
+
+#define arm_data_trans_imm(adjust_op, direction) \
+ arm_decode_data_trans_imm(); \
+ u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \
+ arm_access_memory_##adjust_op(imm, direction) \
+
+
+#define arm_data_trans_half_reg(adjust_op, direction) \
+ arm_decode_half_trans_r(); \
+ u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \
+ u32 _rm = prepare_load_reg(reg_a1, rm); \
+ arm_access_memory_##adjust_op(reg, direction) \
+
+#define arm_data_trans_half_imm(adjust_op, direction) \
+ arm_decode_half_trans_of(); \
+ u32 _rn = prepare_load_reg_pc(reg_a0, rn, 8); \
+ arm_access_memory_##adjust_op(imm, direction) \
+
+
+#define arm_access_memory(access_type, direction, adjust_op, mem_type, \
+ offset_type) \
+{ \
+ arm_data_trans_##offset_type(adjust_op, direction); \
+ arm_access_memory_##access_type(mem_type); \
+} \
+
+
+#define word_bit_count(word) \
+ (bit_count[word >> 8] + bit_count[word & 0xFF]) \
+
+#define sprint_no(access_type, pre_op, post_op, wb) \
+
+#define sprint_yes(access_type, pre_op, post_op, wb) \
+ printf("sbit on %s %s %s %s\n", #access_type, #pre_op, #post_op, #wb) \
+
+
+// TODO: Make these use cached registers. Implement iwram_stack_optimize.
+
+#define arm_block_memory_load() \
+ generate_function_call(execute_load_u32); \
+ write32((pc + 8)); \
+ generate_store_reg(reg_rv, i) \
+
+#define arm_block_memory_store() \
+ generate_load_reg_pc(reg_a1, i, 8); \
+ generate_function_call(execute_store_u32_safe) \
+
+#define arm_block_memory_final_load() \
+ arm_block_memory_load() \
+
+#define arm_block_memory_final_store() \
+ generate_load_reg_pc(reg_a1, i, 12); \
+ generate_function_call(execute_store_u32); \
+ write32((pc + 4)) \
+
+#define arm_block_memory_adjust_pc_store() \
+
+#define arm_block_memory_adjust_pc_load() \
+ if(reg_list & 0x8000) \
+ { \
+ generate_mov(reg_a0, reg_rv); \
+ generate_indirect_branch_arm(); \
+ } \
+
+#define arm_block_memory_offset_down_a() \
+ generate_sub_imm(reg_s0, ((word_bit_count(reg_list) * 4) - 4), 0) \
+
+#define arm_block_memory_offset_down_b() \
+ generate_sub_imm(reg_s0, (word_bit_count(reg_list) * 4), 0) \
+
+#define arm_block_memory_offset_no() \
+
+#define arm_block_memory_offset_up() \
+ generate_add_imm(reg_s0, 4, 0) \
+
+#define arm_block_memory_writeback_down() \
+ generate_load_reg(reg_a0, rn); \
+ generate_sub_imm(reg_a0, (word_bit_count(reg_list) * 4), 0); \
+ generate_store_reg(reg_a0, rn) \
+
+#define arm_block_memory_writeback_up() \
+ generate_load_reg(reg_a0, rn); \
+ generate_add_imm(reg_a0, (word_bit_count(reg_list) * 4), 0); \
+ generate_store_reg(reg_a0, rn) \
+
+#define arm_block_memory_writeback_no()
+
+// Only emit writeback if the register is not in the list
+
+#define arm_block_memory_writeback_load(writeback_type) \
+ if(!((reg_list >> rn) & 0x01)) \
+ { \
+ arm_block_memory_writeback_##writeback_type(); \
+ } \
+
+#define arm_block_memory_writeback_store(writeback_type) \
+ arm_block_memory_writeback_##writeback_type() \
+
+#define arm_block_memory(access_type, offset_type, writeback_type, s_bit) \
+{ \
+ arm_decode_block_trans(); \
+ u32 offset = 0; \
+ u32 i; \
+ \
+ generate_load_reg(reg_s0, rn); \
+ arm_block_memory_offset_##offset_type(); \
+ arm_block_memory_writeback_##access_type(writeback_type); \
+ ARM_BIC_REG_IMM(0, reg_s0, reg_s0, 0x03, 0); \
+ \
+ for(i = 0; i < 16; i++) \
+ { \
+ if((reg_list >> i) & 0x01) \
+ { \
+ cycle_count++; \
+ generate_add_reg_reg_imm(reg_a0, reg_s0, offset, 0); \
+ if(reg_list & ~((2 << i) - 1)) \
+ { \
+ arm_block_memory_##access_type(); \
+ offset += 4; \
+ } \
+ else \
+ { \
+ arm_block_memory_final_##access_type(); \
+ break; \
+ } \
+ } \
+ } \
+ \
+ arm_block_memory_adjust_pc_##access_type(); \
+} \
+
+#define arm_swap(type) \
+{ \
+ arm_decode_swap(); \
+ cycle_count += 3; \
+ generate_load_reg(reg_a0, rn); \
+ generate_function_call(execute_load_##type); \
+ write32((pc + 8)); \
+ generate_mov(reg_s0, reg_rv); \
+ generate_load_reg(reg_a0, rn); \
+ generate_load_reg(reg_a1, rm); \
+ generate_function_call(execute_store_##type); \
+ write32((pc + 4)); \
+ generate_store_reg(reg_s0, rd); \
+} \
+
+
+#define thumb_generate_op_reg(name, _rd, _rs, _rn) \
+ u32 __rm = prepare_load_reg(reg_rm, _rn); \
+ generate_op_##name##_reg_immshift(__rd, __rn, __rm, ARMSHIFT_LSL, 0) \
+
+#define thumb_generate_op_imm(name, _rd, _rs, imm_) \
+{ \
+ u32 imm_ror = 0; \
+ generate_op_##name##_imm(__rd, __rn); \
+} \
+
+
+#define thumb_data_proc(type, name, op_type, _rd, _rs, _rn) \
+{ \
+ thumb_decode_##type(); \
+ u32 __rn = prepare_load_reg(reg_rn, _rs); \
+ u32 __rd = prepare_store_reg(reg_rd, _rd); \
+ generate_load_reg(reg_rn, _rs); \
+ thumb_generate_op_##op_type(name, _rd, _rs, _rn); \
+ complete_store_reg(__rd, _rd); \
+} \
+
+#define thumb_data_proc_test(type, name, op_type, _rd, _rs) \
+{ \
+ thumb_decode_##type(); \
+ u32 __rn = prepare_load_reg(reg_rn, _rd); \
+ thumb_generate_op_##op_type(name, 0, _rd, _rs); \
+} \
+
+#define thumb_data_proc_unary(type, name, op_type, _rd, _rs) \
+{ \
+ thumb_decode_##type(); \
+ u32 __rd = prepare_store_reg(reg_rd, _rd); \
+ thumb_generate_op_##op_type(name, _rd, 0, _rs); \
+ complete_store_reg(__rd, _rd); \
+} \
+
+
+#define complete_store_reg_pc_thumb() \
+ if(rd == 15) \
+ { \
+ generate_indirect_branch_cycle_update(thumb); \
+ } \
+ else \
+ { \
+ complete_store_reg(_rd, rd); \
+ } \
+
+#define thumb_data_proc_hi(name) \
+{ \
+ thumb_decode_hireg_op(); \
+ u32 _rd = prepare_load_reg_pc(reg_rd, rd, 4); \
+ u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \
+ generate_op_##name##_reg_immshift(_rd, _rd, _rs, ARMSHIFT_LSL, 0); \
+ complete_store_reg_pc_thumb(); \
+} \
+
+#define thumb_data_proc_test_hi(name) \
+{ \
+ thumb_decode_hireg_op(); \
+ u32 _rd = prepare_load_reg_pc(reg_rd, rd, 4); \
+ u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \
+ generate_op_##name##_reg_immshift(0, _rd, _rs, ARMSHIFT_LSL, 0); \
+} \
+
+#define thumb_data_proc_mov_hi() \
+{ \
+ thumb_decode_hireg_op(); \
+ u32 _rs = prepare_load_reg_pc(reg_rn, rs, 4); \
+ u32 _rd = prepare_store_reg(reg_rd, rd); \
+ ARM_MOV_REG_REG(0, _rd, _rs); \
+ complete_store_reg_pc_thumb(); \
+} \
+
+
+
+#define thumb_load_pc(_rd) \
+{ \
+ thumb_decode_imm(); \
+ u32 __rd = prepare_store_reg(reg_rd, _rd); \
+ generate_load_pc(__rd, (((pc & ~2) + 4) + (imm * 4))); \
+ complete_store_reg(__rd, _rd); \
+} \
+
+#define thumb_load_sp(_rd) \
+{ \
+ thumb_decode_imm(); \
+ u32 __sp = prepare_load_reg(reg_a0, REG_SP); \
+ u32 __rd = prepare_store_reg(reg_a0, _rd); \
+ ARM_ADD_REG_IMM(0, __rd, __sp, imm, arm_imm_lsl_to_rot(2)); \
+ complete_store_reg(__rd, _rd); \
+} \
+
+#define thumb_adjust_sp_up() \
+ ARM_ADD_REG_IMM(0, _sp, _sp, imm, arm_imm_lsl_to_rot(2)) \
+
+#define thumb_adjust_sp_down() \
+ ARM_SUB_REG_IMM(0, _sp, _sp, imm, arm_imm_lsl_to_rot(2)) \
+
+#define thumb_adjust_sp(direction) \
+{ \
+ thumb_decode_add_sp(); \
+ u32 _sp = prepare_load_reg(reg_a0, REG_SP); \
+ thumb_adjust_sp_##direction(); \
+ complete_store_reg(_sp, REG_SP); \
+} \
+
+#define generate_op_lsl_reg(_rd, _rm, _rs) \
+ generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_LSL, _rs) \
+
+#define generate_op_lsr_reg(_rd, _rm, _rs) \
+ generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_LSR, _rs) \
+
+#define generate_op_asr_reg(_rd, _rm, _rs) \
+ generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_ASR, _rs) \
+
+#define generate_op_ror_reg(_rd, _rm, _rs) \
+ generate_op_movs_reg_regshift(_rd, 0, _rm, ARMSHIFT_ROR, _rs) \
+
+
+#define generate_op_lsl_imm(_rd, _rm) \
+ generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_LSL, imm) \
+
+#define generate_op_lsr_imm(_rd, _rm) \
+ generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_LSR, imm) \
+
+#define generate_op_asr_imm(_rd, _rm) \
+ generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_ASR, imm) \
+
+#define generate_op_ror_imm(_rd, _rm) \
+ generate_op_movs_reg_immshift(_rd, 0, _rm, ARMSHIFT_ROR, imm) \
+
+
+#define generate_shift_reg(op_type) \
+ u32 __rm = prepare_load_reg(reg_rd, rd); \
+ u32 __rs = prepare_load_reg(reg_rs, rs); \
+ generate_op_##op_type##_reg(__rd, __rm, __rs) \
+
+#define generate_shift_imm(op_type) \
+ u32 __rs = prepare_load_reg(reg_rs, rs); \
+ generate_op_##op_type##_imm(__rd, __rs) \
+
+
+#define thumb_shift(decode_type, op_type, value_type) \
+{ \
+ thumb_decode_##decode_type(); \
+ u32 __rd = prepare_store_reg(reg_rd, rd); \
+ generate_shift_##value_type(op_type); \
+ complete_store_reg(__rd, rd); \
+} \
+
+// Operation types: imm, mem_reg, mem_imm
+
+#define thumb_access_memory_load(mem_type, _rd) \
+ cycle_count += 2; \
+ generate_function_call(execute_load_##mem_type); \
+ write32((pc + 4)); \
+ generate_store_reg(reg_rv, _rd) \
+
+#define thumb_access_memory_store(mem_type, _rd) \
+ cycle_count++; \
+ generate_load_reg(reg_a1, _rd); \
+ generate_function_call(execute_store_##mem_type); \
+ write32((pc + 2)) \
+
+#define thumb_access_memory_generate_address_pc_relative(offset, _rb, _ro) \
+ generate_load_pc(reg_a0, (offset)) \
+
+#define thumb_access_memory_generate_address_reg_imm(offset, _rb, _ro) \
+ u32 __rb = prepare_load_reg(reg_a0, _rb); \
+ ARM_ADD_REG_IMM(0, reg_a0, __rb, offset, 0) \
+
+#define thumb_access_memory_generate_address_reg_imm_sp(offset, _rb, _ro) \
+ u32 __rb = prepare_load_reg(reg_a0, _rb); \
+ ARM_ADD_REG_IMM(0, reg_a0, __rb, offset, arm_imm_lsl_to_rot(2)) \
+
+#define thumb_access_memory_generate_address_reg_reg(offset, _rb, _ro) \
+ u32 __rb = prepare_load_reg(reg_a0, _rb); \
+ u32 __ro = prepare_load_reg(reg_a1, _ro); \
+ ARM_ADD_REG_REG(0, reg_a0, __rb, __ro) \
+
+#define thumb_access_memory(access_type, op_type, _rd, _rb, _ro, \
+ address_type, offset, mem_type) \
+{ \
+ thumb_decode_##op_type(); \
+ thumb_access_memory_generate_address_##address_type(offset, _rb, _ro); \
+ thumb_access_memory_##access_type(mem_type, _rd); \
+} \
+
+// TODO: Make these use cached registers. Implement iwram_stack_optimize.
+
+#define thumb_block_address_preadjust_up() \
+ generate_add_imm(reg_s0, (bit_count[reg_list] * 4), 0) \
+
+#define thumb_block_address_preadjust_down() \
+ generate_sub_imm(reg_s0, (bit_count[reg_list] * 4), 0) \
+
+#define thumb_block_address_preadjust_push_lr() \
+ generate_sub_imm(reg_s0, ((bit_count[reg_list] + 1) * 4), 0) \
+
+#define thumb_block_address_preadjust_no() \
+
+#define thumb_block_address_postadjust_no(base_reg) \
+ generate_store_reg(reg_s0, base_reg) \
+
+#define thumb_block_address_postadjust_up(base_reg) \
+ generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \
+ generate_store_reg(reg_a0, base_reg) \
+
+#define thumb_block_address_postadjust_down(base_reg) \
+ generate_mov(reg_a0, reg_s0); \
+ generate_sub_imm(reg_a0, (bit_count[reg_list] * 4), 0); \
+ generate_store_reg(reg_a0, base_reg) \
+
+#define thumb_block_address_postadjust_pop_pc(base_reg) \
+ generate_add_reg_reg_imm(reg_a0, reg_s0, \
+ ((bit_count[reg_list] + 1) * 4), 0); \
+ generate_store_reg(reg_a0, base_reg) \
+
+#define thumb_block_address_postadjust_push_lr(base_reg) \
+ generate_store_reg(reg_s0, base_reg) \
+
+#define thumb_block_memory_extra_no() \
+
+#define thumb_block_memory_extra_up() \
+
+#define thumb_block_memory_extra_down() \
+
+#define thumb_block_memory_extra_pop_pc() \
+ generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \
+ generate_function_call(execute_load_u32); \
+ write32((pc + 4)); \
+ generate_mov(reg_a0, reg_rv); \
+ generate_indirect_branch_cycle_update(thumb) \
+
+#define thumb_block_memory_extra_push_lr(base_reg) \
+ generate_add_reg_reg_imm(reg_a0, reg_s0, (bit_count[reg_list] * 4), 0); \
+ generate_load_reg(reg_a1, REG_LR); \
+ generate_function_call(execute_store_u32_safe) \
+
+#define thumb_block_memory_load() \
+ generate_function_call(execute_load_u32); \
+ write32((pc + 4)); \
+ generate_store_reg(reg_rv, i) \
+
+#define thumb_block_memory_store() \
+ generate_load_reg(reg_a1, i); \
+ generate_function_call(execute_store_u32_safe) \
+
+#define thumb_block_memory_final_load() \
+ thumb_block_memory_load() \
+
+#define thumb_block_memory_final_store() \
+ generate_load_reg(reg_a1, i); \
+ generate_function_call(execute_store_u32); \
+ write32((pc + 2)) \
+
+#define thumb_block_memory_final_no(access_type) \
+ thumb_block_memory_final_##access_type() \
+
+#define thumb_block_memory_final_up(access_type) \
+ thumb_block_memory_final_##access_type() \
+
+#define thumb_block_memory_final_down(access_type) \
+ thumb_block_memory_final_##access_type() \
+
+#define thumb_block_memory_final_push_lr(access_type) \
+ thumb_block_memory_##access_type() \
+
+#define thumb_block_memory_final_pop_pc(access_type) \
+ thumb_block_memory_##access_type() \
+
+#define thumb_block_memory(access_type, pre_op, post_op, base_reg) \
+{ \
+ thumb_decode_rlist(); \
+ u32 i; \
+ u32 offset = 0; \
+ \
+ generate_load_reg(reg_s0, base_reg); \
+ ARM_BIC_REG_IMM(0, reg_s0, reg_s0, 0x03, 0); \
+ thumb_block_address_preadjust_##pre_op(); \
+ thumb_block_address_postadjust_##post_op(base_reg); \
+ \
+ for(i = 0; i < 8; i++) \
+ { \
+ if((reg_list >> i) & 0x01) \
+ { \
+ cycle_count++; \
+ generate_add_reg_reg_imm(reg_a0, reg_s0, offset, 0); \
+ if(reg_list & ~((2 << i) - 1)) \
+ { \
+ thumb_block_memory_##access_type(); \
+ offset += 4; \
+ } \
+ else \
+ { \
+ thumb_block_memory_final_##post_op(access_type); \
+ break; \
+ } \
+ } \
+ } \
+ \
+ thumb_block_memory_extra_##post_op(); \
+} \
+
+#define thumb_conditional_branch(condition) \
+{ \
+ generate_cycle_update(); \
+ generate_load_flags(); \
+ generate_branch_filler(condition_opposite_##condition, backpatch_address); \
+ generate_branch_no_cycle_update( \
+ block_exits[block_exit_position].branch_source, \
+ block_exits[block_exit_position].branch_target, thumb); \
+ generate_branch_patch_conditional(backpatch_address, translation_ptr); \
+ block_exit_position++; \
+} \
+
+
+#define arm_conditional_block_header() \
+ generate_cycle_update(); \
+ generate_load_flags(); \
+ /* This will choose the opposite condition */ \
+ condition ^= 0x01; \
+ generate_branch_filler(condition, backpatch_address) \
+
+#define arm_b() \
+ generate_branch(arm) \
+
+#define arm_bl() \
+ generate_update_pc((pc + 4)); \
+ generate_store_reg(reg_a0, REG_LR); \
+ generate_branch(arm) \
+
+#define arm_bx() \
+ arm_decode_branchx(); \
+ generate_load_reg(reg_a0, rn); \
+ generate_indirect_branch_dual(); \
+
+#define arm_swi() \
+ generate_swi_hle_handler((opcode >> 16) & 0xFF, arm); \
+ generate_function_call(execute_swi_arm); \
+ write32((pc + 4)); \
+ generate_branch(arm) \
+
+#define thumb_b() \
+ generate_branch(thumb) \
+
+#define thumb_bl() \
+ generate_update_pc(((pc + 2) | 0x01)); \
+ generate_store_reg(reg_a0, REG_LR); \
+ generate_branch(thumb) \
+
+#define thumb_blh() \
+{ \
+ thumb_decode_branch(); \
+ generate_update_pc(((pc + 2) | 0x01)); \
+ generate_load_reg(reg_a1, REG_LR); \
+ generate_store_reg(reg_a0, REG_LR); \
+ generate_mov(reg_a0, reg_a1); \
+ generate_add_imm(reg_a0, (offset * 2), 0); \
+ generate_indirect_branch_cycle_update(thumb); \
+} \
+
+#define thumb_bx() \
+{ \
+ thumb_decode_hireg_op(); \
+ generate_load_reg_pc(reg_a0, rs, 4); \
+ generate_indirect_branch_cycle_update(dual_thumb); \
+} \
+
+#define thumb_swi() \
+ generate_swi_hle_handler(opcode & 0xFF, thumb); \
+ generate_function_call(execute_swi_thumb); \
+ write32((pc + 2)); \
+ /* We're in ARM mode now */ \
+ generate_branch(arm) \
+
+u8 swi_hle_handle[256] =
+{
+ 0x0, // SWI 0: SoftReset
+ 0x0, // SWI 1: RegisterRAMReset
+ 0x0, // SWI 2: Halt
+ 0x0, // SWI 3: Stop/Sleep
+ 0x0, // SWI 4: IntrWait
+ 0x0, // SWI 5: VBlankIntrWait
+ 0x1, // SWI 6: Div
+ 0x0, // SWI 7: DivArm
+ 0x0, // SWI 8: Sqrt
+ 0x0, // SWI 9: ArcTan
+ 0x0, // SWI A: ArcTan2
+ 0x0, // SWI B: CpuSet
+ 0x0, // SWI C: CpuFastSet
+ 0x0, // SWI D: GetBIOSCheckSum
+ 0x0, // SWI E: BgAffineSet
+ 0x0, // SWI F: ObjAffineSet
+ 0x0, // SWI 10: BitUnpack
+ 0x0, // SWI 11: LZ77UnCompWram
+ 0x0, // SWI 12: LZ77UnCompVram
+ 0x0, // SWI 13: HuffUnComp
+ 0x0, // SWI 14: RLUnCompWram
+ 0x0, // SWI 15: RLUnCompVram
+ 0x0, // SWI 16: Diff8bitUnFilterWram
+ 0x0, // SWI 17: Diff8bitUnFilterVram
+ 0x0, // SWI 18: Diff16bitUnFilter
+ 0x0, // SWI 19: SoundBias
+ 0x0, // SWI 1A: SoundDriverInit
+ 0x0, // SWI 1B: SoundDriverMode
+ 0x0, // SWI 1C: SoundDriverMain
+ 0x0, // SWI 1D: SoundDriverVSync
+ 0x0, // SWI 1E: SoundChannelClear
+ 0x0, // SWI 1F: MidiKey2Freq
+ 0x0, // SWI 20: SoundWhatever0
+ 0x0, // SWI 21: SoundWhatever1
+ 0x0, // SWI 22: SoundWhatever2
+ 0x0, // SWI 23: SoundWhatever3
+ 0x0, // SWI 24: SoundWhatever4
+ 0x0, // SWI 25: MultiBoot
+ 0x0, // SWI 26: HardReset
+ 0x0, // SWI 27: CustomHalt
+ 0x0, // SWI 28: SoundDriverVSyncOff
+ 0x0, // SWI 29: SoundDriverVSyncOn
+ 0x0 // SWI 2A: SoundGetJumpList
+};
+
+void execute_swi_hle_div_arm();
+void execute_swi_hle_div_thumb();
+
+void execute_swi_hle_div_c()
+{
+ s32 result = (s32)reg[0] / (s32)reg[1];
+ reg[1] = (s32)reg[0] % (s32)reg[1];
+ reg[0] = result;
+
+ reg[3] = (result ^ (result >> 31)) - (result >> 31);
+}
+
+#define generate_swi_hle_handler(_swi_number, mode) \
+{ \
+ u32 swi_number = _swi_number; \
+ if(swi_hle_handle[swi_number]) \
+ { \
+ /* Div */ \
+ if(swi_number == 0x06) \
+ { \
+ generate_function_call(execute_swi_hle_div_##mode); \
+ } \
+ break; \
+ } \
+} \
+
+#define generate_translation_gate(type) \
+ generate_update_pc(pc); \
+ generate_indirect_branch_no_cycle_update(type) \
+
+#define generate_step_debug() \
+ generate_function_call(step_debug_arm); \
+ write32(pc) \
+
+#endif
+
diff --git a/arm/arm_stub.S b/arm/arm_stub.S
new file mode 100644
index 0000000..a7271fd
--- /dev/null
+++ b/arm/arm_stub.S
@@ -0,0 +1,1004 @@
+.align 2
+
+.global arm_update_gba_arm
+.global arm_update_gba_thumb
+.global arm_update_gba_idle_arm
+.global arm_update_gba_idle_thumb
+
+.global arm_indirect_branch_arm
+.global arm_indirect_branch_thumb
+.global arm_indirect_branch_dual_arm
+.global arm_indirect_branch_dual_thumb
+
+.global execute_arm_translate
+
+.global execute_store_u8
+.global execute_store_u16
+.global execute_store_u32
+.global execute_store_u32_safe
+
+.global execute_load_u8
+.global execute_load_s8
+.global execute_load_u16
+.global execute_load_s16
+.global execute_load_u32
+
+.global execute_store_cpsr
+.global execute_read_spsr
+.global execute_store_spsr
+.global execute_spsr_restore
+
+.global execute_swi_arm
+.global execute_swi_thumb
+
+.global execute_patch_bios_read
+.global execute_patch_bios_protect
+
+.global execute_bios_ptr_protected
+.global execute_bios_rom_ptr
+
+
+.global step_debug_arm
+
+.global invalidate_icache_region
+.global invalidate_cache_region
+
+.global memory_map_read
+.global memory_map_write
+.global reg
+
+#define REG_BASE_OFFSET 1024
+
+#define REG_R0 (REG_BASE_OFFSET + (0 * 4))
+#define REG_R1 (REG_BASE_OFFSET + (1 * 4))
+#define REG_R2 (REG_BASE_OFFSET + (2 * 4))
+#define REG_R3 (REG_BASE_OFFSET + (3 * 4))
+#define REG_R4 (REG_BASE_OFFSET + (4 * 4))
+#define REG_R5 (REG_BASE_OFFSET + (5 * 4))
+#define REG_R6 (REG_BASE_OFFSET + (6 * 4))
+#define REG_R7 (REG_BASE_OFFSET + (7 * 4))
+#define REG_R8 (REG_BASE_OFFSET + (8 * 4))
+#define REG_R9 (REG_BASE_OFFSET + (9 * 4))
+#define REG_R10 (REG_BASE_OFFSET + (10 * 4))
+#define REG_R11 (REG_BASE_OFFSET + (11 * 4))
+#define REG_R12 (REG_BASE_OFFSET + (12 * 4))
+#define REG_R13 (REG_BASE_OFFSET + (13 * 4))
+#define REG_R14 (REG_BASE_OFFSET + (14 * 4))
+#define REG_SP (REG_BASE_OFFSET + (13 * 4))
+#define REG_LR (REG_BASE_OFFSET + (14 * 4))
+#define REG_PC (REG_BASE_OFFSET + (15 * 4))
+
+#define REG_N_FLAG (REG_BASE_OFFSET + (16 * 4))
+#define REG_Z_FLAG (REG_BASE_OFFSET + (17 * 4))
+#define REG_C_FLAG (REG_BASE_OFFSET + (18 * 4))
+#define REG_V_FLAG (REG_BASE_OFFSET + (19 * 4))
+#define REG_CPSR (REG_BASE_OFFSET + (20 * 4))
+
+#define REG_SAVE (REG_BASE_OFFSET + (21 * 4))
+#define REG_SAVE2 (REG_BASE_OFFSET + (22 * 4))
+#define REG_SAVE3 (REG_BASE_OFFSET + (23 * 4))
+
+#define CPU_MODE (REG_BASE_OFFSET + (29 * 4))
+#define CPU_HALT_STATE (REG_BASE_OFFSET + (30 * 4))
+#define CHANGED_PC_STATUS (REG_BASE_OFFSET + (31 * 4))
+
+
+#define reg_a0 r0
+#define reg_a1 r1
+#define reg_a2 r2
+
+#define reg_s0 r9
+#define reg_base sp
+#define reg_flags r11
+
+#define reg_cycles r12
+
+#define reg_x0 r3
+#define reg_x1 r4
+#define reg_x2 r5
+#define reg_x3 r6
+#define reg_x4 r7
+#define reg_x5 r8
+
+
+#define MODE_SUPERVISOR 3
+
+
+@ Will load the register set from memory into the appropriate cached registers.
+@ See arm_emit.h for listing explanation.
+
+#define load_registers_arm() ;\
+ ldr reg_x0, [reg_base, #REG_R0] ;\
+ ldr reg_x1, [reg_base, #REG_R1] ;\
+ ldr reg_x2, [reg_base, #REG_R6] ;\
+ ldr reg_x3, [reg_base, #REG_R9] ;\
+ ldr reg_x4, [reg_base, #REG_R12] ;\
+ ldr reg_x5, [reg_base, #REG_R14] ;\
+
+#define load_registers_thumb() ;\
+ ldr reg_x0, [reg_base, #REG_R0] ;\
+ ldr reg_x1, [reg_base, #REG_R1] ;\
+ ldr reg_x2, [reg_base, #REG_R2] ;\
+ ldr reg_x3, [reg_base, #REG_R3] ;\
+ ldr reg_x4, [reg_base, #REG_R4] ;\
+ ldr reg_x5, [reg_base, #REG_R5] ;\
+
+
+@ Will store the register set from cached registers back to memory.
+
+#define store_registers_arm() ;\
+ str reg_x0, [reg_base, #REG_R0] ;\
+ str reg_x1, [reg_base, #REG_R1] ;\
+ str reg_x2, [reg_base, #REG_R6] ;\
+ str reg_x3, [reg_base, #REG_R9] ;\
+ str reg_x4, [reg_base, #REG_R12] ;\
+ str reg_x5, [reg_base, #REG_R14] ;\
+
+#define store_registers_thumb() ;\
+ str reg_x0, [reg_base, #REG_R0] ;\
+ str reg_x1, [reg_base, #REG_R1] ;\
+ str reg_x2, [reg_base, #REG_R2] ;\
+ str reg_x3, [reg_base, #REG_R3] ;\
+ str reg_x4, [reg_base, #REG_R4] ;\
+ str reg_x5, [reg_base, #REG_R5] ;\
+
+
+@ Returns an updated persistent cpsr with the cached flags register.
+@ Uses reg as a temporary register and returns the CPSR here.
+
+#define collapse_flags_no_update(reg) ;\
+ ldr reg, [reg_base, #REG_CPSR] /* reg = cpsr */;\
+ bic reg, reg, #0xF0000000 /* clear ALU flags in cpsr */;\
+ and reg_flags, reg_flags, #0xF0000000 /* clear non-ALU flags */;\
+ orr reg, reg, reg_flags /* update cpsr with ALU flags */;\
+
+@ Updates cpsr using the above macro.
+
+#define collapse_flags(reg) ;\
+ collapse_flags_no_update(reg) ;\
+ str reg, [reg_base, #REG_CPSR] ;\
+
+@ Loads the saved flags register from the persistent cpsr.
+
+#define extract_flags() ;\
+ ldr reg_flags, [reg_base, #REG_CPSR] ;\
+ msr cpsr_f, reg_flags ;\
+
+
+#define save_flags() ;\
+ mrs reg_flags, cpsr ;\
+
+#define restore_flags() ;\
+ msr cpsr_f, reg_flags ;\
+
+@ Calls a C function - all caller save registers which are important to the
+@ dynarec and to returning from this function are saved.
+
+#define call_c_function(function) ;\
+ stmdb sp!, { r3, r12, lr } ;\
+ bl function ;\
+ ldmia sp!, { r3, r12, lr } ;\
+
+
+@ Update the GBA hardware (video, sound, input, etc)
+
+@ Input:
+@ r0: current PC
+
+#define return_straight() ;\
+ bx lr ;\
+
+#define return_add() ;\
+ add pc, lr, #4 ;\
+
+#define load_pc_straight() ;\
+ ldr r0, [lr, #-8] ;\
+
+#define load_pc_add() ;\
+ ldr r0, [lr] ;\
+
+
+#define arm_update_gba_builder(name, mode, return_op) ;\
+ ;\
+arm_update_gba_##name: ;\
+ load_pc_##return_op() ;\
+ str r0, [reg_base, #REG_PC] /* write out the PC */;\
+ ;\
+ save_flags() ;\
+ collapse_flags(r0) /* update the flags */;\
+ ;\
+ store_registers_##mode() /* save out registers */;\
+ call_c_function(update_gba) /* update GBA state */;\
+ ;\
+ mvn reg_cycles, r0 /* load new cycle count */;\
+ ;\
+ ldr r0, [reg_base, #CHANGED_PC_STATUS] /* load PC changed status */;\
+ cmp r0, #0 /* see if PC has changed */;\
+ beq 1f /* if not return */;\
+ ;\
+ ldr r0, [reg_base, #REG_PC] /* load new PC */;\
+ ldr r1, [reg_base, #REG_CPSR] /* r1 = flags */;\
+ tst r1, #0x20 /* see if Thumb bit is set */;\
+ bne 2f /* if so load Thumb PC */;\
+ ;\
+ load_registers_arm() /* load ARM regs */;\
+ call_c_function(block_lookup_address_arm) ;\
+ restore_flags() ;\
+ bx r0 /* jump to new ARM block */;\
+ ;\
+1: ;\
+ load_registers_##mode() /* reload registers */;\
+ restore_flags() ;\
+ return_##return_op() ;\
+ ;\
+2: ;\
+ load_registers_thumb() /* load Thumb regs */;\
+ call_c_function(block_lookup_address_thumb) ;\
+ restore_flags() ;\
+ bx r0 /* jump to new ARM block */;\
+
+
+arm_update_gba_builder(arm, arm, straight)
+arm_update_gba_builder(thumb, thumb, straight)
+
+arm_update_gba_builder(idle_arm, arm, add)
+arm_update_gba_builder(idle_thumb, thumb, add)
+
+
+
+@ These are b stubs for performing indirect branches. They are not
+@ linked to and don't return, instead they link elsewhere.
+
+@ Input:
+@ r0: PC to branch to
+
+arm_indirect_branch_arm:
+ save_flags()
+ call_c_function(block_lookup_address_arm)
+ restore_flags()
+ bx r0
+
+arm_indirect_branch_thumb:
+ save_flags()
+ call_c_function(block_lookup_address_thumb)
+ restore_flags()
+ bx r0
+
+arm_indirect_branch_dual_arm:
+ save_flags()
+ tst r0, #0x01 @ check lower bit
+ bne 1f @ if set going to Thumb mode
+ call_c_function(block_lookup_address_arm)
+ restore_flags()
+ bx r0 @ return
+
+1:
+ bic r0, r0, #0x01
+ store_registers_arm() @ save out ARM registers
+ load_registers_thumb() @ load in Thumb registers
+ ldr r1, [reg_base, #REG_CPSR] @ load cpsr
+ orr r1, r1, #0x20 @ set Thumb mode
+ str r1, [reg_base, #REG_CPSR] @ store flags
+ call_c_function(block_lookup_address_thumb)
+ restore_flags()
+ bx r0 @ return
+
+arm_indirect_branch_dual_thumb:
+ save_flags()
+ tst r0, #0x01 @ check lower bit
+ beq 1f @ if set going to ARM mode
+ bic r0, r0, #0x01
+ call_c_function(block_lookup_address_thumb)
+ restore_flags()
+ bx r0 @ return
+
+1:
+ store_registers_thumb() @ save out Thumb registers
+ load_registers_arm() @ load in ARM registers
+ ldr r1, [reg_base, #REG_CPSR] @ load cpsr
+ bic r1, r1, #0x20 @ clear Thumb mode
+ str r1, [reg_base, #REG_CPSR] @ store flags
+ call_c_function(block_lookup_address_arm)
+ restore_flags()
+ bx r0 @ return
+
+
+@ Update the cpsr.
+
+@ Input:
+@ r0: new cpsr value
+@ r1: bitmask of which bits in cpsr to update
+@ r2: current PC
+
+execute_store_cpsr:
+ save_flags()
+ and reg_flags, r0, r1 @ reg_flags = new_cpsr & store_mask
+ ldr r0, [reg_base, #REG_CPSR] @ r0 = cpsr
+ bic r0, r0, r1 @ r0 = cpsr & ~store_mask
+ orr reg_flags, reg_flags, r0 @ reg_flags = new_cpsr | cpsr
+
+ mov r0, reg_flags @ also put new cpsr in r0
+
+ store_registers_arm() @ save ARM registers
+ ldr r2, [lr] @ r2 = pc
+ call_c_function(execute_store_cpsr_body)
+ load_registers_arm() @ restore ARM registers
+
+ cmp r0, #0 @ check new PC
+ beq 1f @ if it's zero, return
+
+ call_c_function(block_lookup_address_arm)
+
+ restore_flags()
+ bx r0 @ return to new ARM address
+
+1:
+ restore_flags()
+ add pc, lr, #4 @ return
+
+
+@ Update the current spsr.
+
+@ Input:
+@ r0: new cpsr value
+@ r1: bitmask of which bits in spsr to update
+
+execute_store_spsr:
+ ldr r1, 1f @ r1 = spsr
+ ldr r2, [reg_base, #CPU_MODE] @ r2 = CPU_MODE
+ str r0, [r1, r2, lsl #2] @ spsr[CPU_MODE] = new_spsr
+ bx lr
+
+1:
+ .word spsr
+
+@ Read the current spsr.
+
+@ Output:
+@ r0: spsr
+
+execute_read_spsr:
+ ldr r0, 1b @ r0 = spsr
+ ldr r1, [reg_base, #CPU_MODE] @ r1 = CPU_MODE
+ ldr r0, [r0, r1, lsl #2] @ r0 = spsr[CPU_MODE]
+ bx lr @ return
+
+
+@ Restore the cpsr from the mode spsr and mode shift.
+
+@ Input:
+@ r0: current pc
+
+execute_spsr_restore:
+ save_flags()
+ ldr r1, 1f @ r1 = spsr
+ ldr r2, [reg_base, #CPU_MODE] @ r2 = cpu_mode
+ ldr r1, [r1, r2, lsl #2] @ r1 = spsr[cpu_mode] (new cpsr)
+ str r1, [reg_base, #REG_CPSR] @ update cpsr
+ mov reg_flags, r1 @ also, update shadow flags
+
+ @ This function call will pass r0 (address) and return it.
+ store_registers_arm() @ save ARM registers
+ call_c_function(execute_spsr_restore_body)
+
+ ldr r1, [reg_base, #REG_CPSR] @ r1 = cpsr
+ tst r1, #0x20 @ see if Thumb mode is set
+ bne 2f @ if so handle it
+
+ load_registers_arm() @ restore ARM registers
+ call_c_function(block_lookup_address_arm)
+ restore_flags()
+ bx r0
+
+ @ This will service execute_spsr_restore and execute_swi
+1:
+ .word spsr
+
+2:
+ load_registers_thumb() @ load Thumb registers
+ call_c_function(block_lookup_address_thumb)
+ restore_flags()
+ bx r0
+
+
+
+@ Setup the mode transition work for calling an SWI.
+
+@ Input:
+@ r0: current pc
+
+#define execute_swi_builder(mode) ;\
+ ;\
+execute_swi_##mode: ;\
+ save_flags() ;\
+ ldr r1, 1f /* r1 = reg_mode */;\
+ /* reg_mode[MODE_SUPERVISOR][6] = pc */;\
+ ldr r0, [lr] /* load PC */;\
+ str r0, [r1, #((MODE_SUPERVISOR * (7 * 4)) + (6 * 4))] ;\
+ collapse_flags_no_update(r0) /* r0 = cpsr */;\
+ ldr r1, 2f /* r1 = spsr */;\
+ str r0, [r1, #(MODE_SUPERVISOR * 4)] /* spsr[MODE_SUPERVISOR] = cpsr */;\
+ bic r0, r0, #0x3F /* clear mode flag in r0 */;\
+ orr r0, r0, #0x13 /* set to supervisor mode */;\
+ str r0, [reg_base, #REG_CPSR] /* update cpsr */;\
+ ;\
+ call_c_function(bios_region_read_allow) ;\
+ ;\
+ mov r0, #MODE_SUPERVISOR ;\
+ ;\
+ store_registers_##mode() /* store regs for mode */;\
+ call_c_function(set_cpu_mode) /* set the CPU mode to svsr */;\
+ load_registers_arm() /* load ARM regs */;\
+ ;\
+ restore_flags() ;\
+ add pc, lr, #4 /* return */;\
+ ;\
+1: ;\
+ .word reg_mode ;\
+ ;\
+2: ;\
+ .word spsr ;\
+ ;\
+3: ;\
+ .word execute_bios_rom_ptr ;\
+
+execute_swi_builder(arm)
+execute_swi_builder(thumb)
+
+
+@ Wrapper for calling SWI functions in C (or can implement some in ASM if
+@ desired)
+
+#define execute_swi_function_builder(swi_function, mode) ;\
+ ;\
+ .global execute_swi_hle_##swi_function##_##mode ;\
+execute_swi_hle_##swi_function##_##mode: ;\
+ save_flags() ;\
+ store_registers_##mode() ;\
+ call_c_function(execute_swi_hle_##swi_function##_c) ;\
+ load_registers_##mode() ;\
+ restore_flags() ;\
+ bx lr ;\
+
+execute_swi_function_builder(div, arm)
+execute_swi_function_builder(div, thumb)
+
+
+@ Start program execution. Normally the mode should be Thumb and the
+@ PC should be 0x8000000, however if a save state is preloaded this
+@ will be different.
+
+@ Input:
+@ r0: initial value for cycle counter
+
+@ Uses sp as reg_base; must hold consistently true.
+
+execute_arm_translate:
+ sub sp, sp, #0x100 @ allocate room for register data
+
+ mvn reg_cycles, r0 @ load cycle counter
+
+ mov r0, reg_base @ load reg_base into first param
+ call_c_function(move_reg) @ make reg_base the new reg ptr
+
+ sub sp, sp, #REG_BASE_OFFSET @ allocate room for ptr table
+ bl load_ptr_read_function_table @ load read function ptr table
+
+ ldr r0, [reg_base, #REG_PC] @ r0 = current pc
+ ldr r1, [reg_base, #REG_CPSR] @ r1 = flags
+ tst r1, #0x20 @ see if Thumb bit is set
+
+ bne 1f @ if so lookup thumb
+
+ load_registers_arm() @ load ARM registers
+ call_c_function(block_lookup_address_arm)
+ extract_flags() @ load flags
+ bx r0 @ jump to first ARM block
+
+1:
+ load_registers_thumb() @ load Thumb registers
+ call_c_function(block_lookup_address_thumb)
+ extract_flags() @ load flags
+ bx r0 @ jump to first Thumb block
+
+
+@ Write out to memory.
+
+@ Input:
+@ r0: address
+@ r1: value
+@ r2: current pc
+
+#define execute_store_body(store_type, store_op) ;\
+ save_flags() ;\
+ stmdb sp!, { lr } /* save lr */;\
+ tst r0, #0xF0000000 /* make sure address is in range */;\
+ bne ext_store_u##store_type /* if not do ext store */;\
+ ;\
+ ldr r2, 1f /* r2 = memory_map_write */;\
+ mov lr, r0, lsr #15 /* lr = page index of address */;\
+ ldr r2, [r2, lr, lsl #2] /* r2 = memory page */;\
+ ;\
+ cmp r2, #0 /* see if map is ext */;\
+ beq ext_store_u##store_type /* if so do ext store */;\
+ ;\
+ mov r0, r0, lsl #17 /* isolate bottom 15 bits in top */;\
+ mov r0, r0, lsr #17 /* like performing and 0x7FFF */;\
+ store_op r1, [r2, r0] /* store result */;\
+
+
+#define store_align_8() ;\
+
+#define store_align_16() ;\
+ bic r0, r0, #0x01 ;\
+
+#define store_align_32() ;\
+ bic r0, r0, #0x03 ;\
+
+
+#define execute_store_builder(store_type, store_op, load_op) ;\
+ ;\
+execute_store_u##store_type: ;\
+ execute_store_body(store_type, store_op) ;\
+ sub r2, r2, #0x8000 /* Pointer to code status data */;\
+ load_op r0, [r2, r0] /* check code flag */;\
+ ;\
+ cmp r0, #0 /* see if it's not 0 */;\
+ bne 2f /* if so perform smc write */;\
+ ldmia sp!, { lr } /* restore lr */;\
+ restore_flags() ;\
+ add pc, lr, #4 /* return */;\
+ ;\
+2: ;\
+ ldmia sp!, { lr } /* restore lr */;\
+ ldr r0, [lr] /* load PC */;\
+ str r0, [reg_base, #REG_PC] /* write out PC */;\
+ b smc_write /* perform smc write */;\
+1: ;\
+ .word memory_map_write ;\
+ ;\
+ext_store_u##store_type: ;\
+ ldmia sp!, { lr } /* pop lr off of stack */;\
+ ldr r2, [lr] /* load PC */;\
+ str r2, [reg_base, #REG_PC] /* write out PC */;\
+ store_align_##store_type() ;\
+ call_c_function(write_memory##store_type) ;\
+ b write_epilogue /* handle additional write stuff */;\
+
+execute_store_builder(8, strb, ldrb)
+execute_store_builder(16, strh, ldrh)
+execute_store_builder(32, str, ldr)
+
+
+execute_store_u32_safe:
+ execute_store_body(32_safe, str)
+ restore_flags()
+ ldmia sp!, { pc } @ return
+
+1:
+ .word memory_map_write
+
+ext_store_u32_safe:
+ ldmia sp!, { lr } @ Restore lr
+ call_c_function(write_memory32) @ Perform 32bit store
+ restore_flags()
+ bx lr @ Return
+
+
+write_epilogue:
+ cmp r0, #0 @ check if the write rose an alert
+ beq 4f @ if not we can exit
+
+ collapse_flags(r1) @ interrupt needs current flags
+
+ cmp r0, #2 @ see if the alert is due to SMC
+ beq smc_write @ if so, goto SMC handler
+
+ ldr r1, [reg_base, #REG_CPSR] @ r1 = cpsr
+ tst r1, #0x20 @ see if Thumb bit is set
+ bne 1f @ if so do Thumb update
+
+ store_registers_arm() @ save ARM registers
+
+3:
+ bl update_gba @ update GBA until CPU isn't halted
+
+ mvn reg_cycles, r0 @ load new cycle count
+ ldr r0, [reg_base, #REG_PC] @ load new PC
+ ldr r1, [reg_base, #REG_CPSR] @ r1 = flags
+ tst r1, #0x20 @ see if Thumb bit is set
+ bne 2f
+
+ load_registers_arm()
+ call_c_function(block_lookup_address_arm)
+ restore_flags()
+ bx r0 @ jump to new ARM block
+
+1:
+ store_registers_thumb() @ save Thumb registers
+ b 3b
+
+2:
+ load_registers_thumb()
+ call_c_function(block_lookup_address_thumb)
+ restore_flags()
+ bx r0 @ jump to new Thumb block
+
+4:
+ restore_flags()
+ add pc, lr, #4 @ return
+
+
+smc_write:
+ call_c_function(flush_translation_cache_ram)
+
+lookup_pc:
+ ldr r0, [reg_base, #REG_PC] @ r0 = new pc
+ ldr r1, [reg_base, #REG_CPSR] @ r1 = flags
+ tst r1, #0x20 @ see if Thumb bit is set
+ beq lookup_pc_arm @ if not lookup ARM
+
+lookup_pc_thumb:
+ call_c_function(block_lookup_address_thumb)
+ restore_flags()
+ bx r0 @ jump to new Thumb block
+
+lookup_pc_arm:
+ call_c_function(block_lookup_address_arm)
+ restore_flags()
+ bx r0 @ jump to new ARM block
+
+
+#define sign_extend_u8(reg)
+#define sign_extend_u16(reg)
+#define sign_extend_u32(reg)
+
+#define sign_extend_s8(reg) ;\
+ mov reg, reg, lsl #24 /* shift reg into upper 8bits */;\
+ mov reg, reg, asr #24 /* shift down, sign extending */;\
+
+#define sign_extend_s16(reg) ;\
+ mov reg, reg, lsl #16 /* shift reg into upper 16bits */;\
+ mov reg, reg, asr #16 /* shift down, sign extending */;\
+
+#define execute_load_op_u8(load_op) ;\
+ mov r0, r0, lsl #17 ;\
+ load_op r0, [r2, r0, lsr #17] ;\
+
+#define execute_load_op_s8(load_op) ;\
+ mov r0, r0, lsl #17 ;\
+ mov r0, r0, lsr #17 ;\
+ load_op r0, [r2, r0] ;\
+
+#define execute_load_op_u16(load_op) ;\
+ execute_load_op_s8(load_op) ;\
+
+#define execute_load_op_s16(load_op) ;\
+ execute_load_op_s8(load_op) ;\
+
+#define execute_load_op_u16(load_op) ;\
+ execute_load_op_s8(load_op) ;\
+
+#define execute_load_op_u32(load_op) ;\
+ execute_load_op_u8(load_op) ;\
+
+
+#define execute_load_builder(load_type, load_function, load_op, mask) ;\
+ ;\
+execute_load_##load_type: ;\
+ save_flags() ;\
+ tst r0, mask /* make sure address is in range */;\
+ bne ext_load_##load_type /* if not do ext load */;\
+ ;\
+ ldr r2, 1f /* r2 = memory_map_read */;\
+ mov r1, r0, lsr #15 /* r1 = page index of address */;\
+ ldr r2, [r2, r1, lsl #2] /* r2 = memory page */;\
+ ;\
+ cmp r2, #0 /* see if map is ext */;\
+ beq ext_load_##load_type /* if so do ext load */;\
+ ;\
+ execute_load_op_##load_type(load_op) ;\
+ restore_flags() ;\
+ add pc, lr, #4 /* return */;\
+ ;\
+ext_load_##load_type: ;\
+ ldr r1, [lr] /* r1 = PC */;\
+ str r1, [reg_base, #REG_PC] /* update PC */;\
+ call_c_function(read_memory##load_function) ;\
+ sign_extend_##load_type(r0) /* sign extend result */;\
+ restore_flags() ;\
+ add pc, lr, #4 /* return */;\
+ ;\
+1: ;\
+ .word memory_map_read ;\
+
+
+execute_load_builder(u8, 8, ldrneb, #0xF0000000)
+execute_load_builder(s8, 8, ldrnesb, #0xF0000000)
+execute_load_builder(u16, 16, ldrneh, #0xF0000001)
+execute_load_builder(s16, 16_signed, ldrnesh, #0xF0000001)
+execute_load_builder(u32, 32, ldrne, #0xF0000000)
+
+
+#define execute_ptr_builder(region, ptr, bits) ;\
+ ;\
+execute_##region##_ptr: ;\
+ ldr r1, 1f /* load region ptr */;\
+ mov r0, r0, lsl #(32 - bits) /* isolate bottom bits */;\
+ mov r0, r0, lsr #(32 - bits) ;\
+ bx lr /* return */;\
+ ;\
+1: ;\
+ .word (ptr) ;\
+
+
+execute_bios_ptr_protected:
+ ldr r1, 1f @ load bios read ptr
+ and r0, r0, #0x03 @ only want bottom 2 bits
+ bx lr @ return
+
+1:
+ .word bios_read_protect
+
+
+@ address = (address & 0x7FFF) + ((address & 0x38000) * 2) + 0x8000;
+
+execute_ewram_ptr:
+ ldr r1, 1f @ load ewram read ptr
+ mov r2, r0, lsl #17 @ isolate bottom 15 bits
+ mov r2, r2, lsr #17
+ and r0, r0, #0x38000 @ isolate top 2 bits
+ add r0, r2, r0, lsl #1 @ add top 2 bits * 2 to bottom 15
+ bx lr @ return
+
+1:
+ .word (ewram + 0x8000)
+
+
+@ u32 gamepak_index = address >> 15;
+@ u8 *map = memory_map_read[gamepak_index];
+
+@ if(map == NULL)
+@ map = load_gamepak_page(gamepak_index & 0x3FF);
+
+@ value = address##type(map, address & 0x7FFF)
+
+execute_gamepak_ptr:
+ ldr r1, 1f @ load memory_map_read
+ mov r2, r0, lsr #15 @ isolate top 17 bits
+ ldr r1, [r1, r2, lsl #2] @ load memory map read ptr
+
+ save_flags()
+ cmp r1, #0 @ see if map entry is NULL
+ bne 2f @ if not resume
+
+ stmdb sp!, { r0 } @ save r0 on stack
+ mov r2, r2, lsl #20 @ isolate page index
+ mov r0, r2, lsr #20
+ call_c_function(load_gamepak_page) @ read new page into r0
+
+ mov r1, r0 @ new map = return
+ ldmia sp!, { r0 } @ restore r0
+
+2:
+ mov r0, r0, lsl #17 @ isolate bottom 15 bits
+ mov r0, r0, lsr #17
+ restore_flags()
+ bx lr @ return
+
+1:
+ .word memory_map_read
+
+
+@ These will store the result in a pointer, then pass that pointer.
+
+execute_eeprom_ptr:
+ save_flags()
+
+ call_c_function(read_eeprom) @ load EEPROM result
+ add r1, reg_base, #(REG_SAVE & 0xFF00)
+ add r1, r1, #(REG_SAVE & 0xFF)
+ strh r0, [r1] @ write result out
+ mov r0, #0 @ zero out address
+
+ restore_flags()
+ bx lr @ return
+
+
+execute_backup_ptr:
+ save_flags()
+
+ mov r0, r0, lsl #16 @ only want top 16 bits
+ mov r0, r0, lsr #16
+ call_c_function(read_backup) @ load backup result
+ add r1, reg_base, #(REG_SAVE & 0xFF00)
+ add r1, r1, #(REG_SAVE & 0xFF)
+ strb r0, [r1] @ write result out
+ mov r0, #0 @ zero out address
+
+ restore_flags()
+ bx lr @ return
+
+
+execute_open_ptr:
+ ldr r1, [reg_base, #REG_CPSR] @ r1 = cpsr
+ save_flags()
+
+ stmdb sp!, { r0 } @ save r0
+
+ ldr r0, [lr, #-4] @ r0 = current PC
+
+ tst r1, #0x20 @ see if Thumb bit is set
+ bne 1f @ if so load Thumb op
+
+ call_c_function(read_memory32) @ read open address
+
+ add r1, reg_base, #((REG_SAVE + 4) & 0xFF00)
+ add r1, r1, #((REG_SAVE + 4) & 0xFF)
+ add r1, r1, reg_base
+ str r0, [r1] @ write out
+
+ ldmia sp!, { r0 } @ restore r0
+ and r0, r0, #0x03 @ isolate bottom 2 bits
+
+ restore_flags()
+ bx lr
+
+1:
+ call_c_function(read_memory16) @ read open address
+
+ orr r0, r0, r0, lsl #16 @ duplicate opcode over halves
+ add r1, reg_base, #((REG_SAVE + 4) & 0xFF00)
+ add r1, r1, #((REG_SAVE + 4) & 0xFF)
+
+ add r1, r1, reg_base
+ str r0, [r1] @ write out
+
+ ldmia sp!, { r0 } @ restore r0
+ and r0, r0, #0x03 @ isolate bottom 2 bits
+
+ restore_flags();
+ bx lr
+
+
+execute_ptr_builder(bios_rom, bios_rom, 14)
+execute_ptr_builder(iwram, iwram + 0x8000, 15)
+execute_ptr_builder(vram, vram, 17)
+execute_ptr_builder(oam_ram, oam_ram, 10)
+execute_ptr_builder(io_registers, io_registers, 10)
+execute_ptr_builder(palette_ram, palette_ram, 10)
+
+ptr_read_function_table:
+ .word execute_bios_ptr_protected @ 0x00: BIOS
+ .word execute_open_ptr @ 0x01: open
+ .word execute_ewram_ptr @ 0x02: ewram
+ .word execute_iwram_ptr @ 0x03: iwram
+ .word execute_io_registers_ptr @ 0x04: I/O registers
+ .word execute_palette_ram_ptr @ 0x05: palette RAM
+ .word execute_vram_ptr @ 0x06: vram
+ .word execute_oam_ram_ptr @ 0x07: oam RAM
+ .word execute_gamepak_ptr @ 0x08: gamepak
+ .word execute_gamepak_ptr @ 0x09: gamepak
+ .word execute_gamepak_ptr @ 0x0A: gamepak
+ .word execute_gamepak_ptr @ 0x0B: gamepak
+ .word execute_gamepak_ptr @ 0x0C: gamepak
+ .word execute_eeprom_ptr @ 0x0D: EEPROM
+ .word execute_backup_ptr @ 0x0E: backup
+
+.rept (256 - 15) @ 0x0F - 0xFF: open
+ .word execute_open_ptr
+.endr
+
+
+@ Setup the read function table.
+@ Load this onto the the stack; assume we're free to use r3
+
+load_ptr_read_function_table:
+ mov r0, #256 @ 256 elements
+ ldr r1, 1f @ r0 = ptr_read_function_table
+ mov r2, sp @ load here
+
+2:
+ ldr r3, [r1], #4 @ read pointer
+ str r3, [r2], #4 @ write pointer
+
+ subs r0, r0, #1 @ goto next iteration
+ bne 2b
+
+ bx lr
+
+1:
+ .word ptr_read_function_table
+
+
+@ Patch the read function table to allow for BIOS reads.
+
+execute_patch_bios_read:
+ ldr r0, 1f @ r0 = patch function
+ ldr r1, 2f @ r1 = reg
+ ldr r1, [r1]
+ str r0, [r1, #-REG_BASE_OFFSET]
+ bx lr
+
+1:
+ .word execute_bios_rom_ptr
+
+2:
+ .word reg
+
+
+@ Patch the read function table to allow for BIOS reads.
+
+execute_patch_bios_protect:
+ ldr r0, 1f @ r0 = patch function
+ ldr r1, 2f @ r1 = reg
+ ldr r1, [r1]
+ str r0, [r1, #-REG_BASE_OFFSET]
+ bx lr
+
+1:
+ .word execute_bios_ptr_protected
+
+2:
+ .word reg
+
+
+#define save_reg_scratch(reg) ;\
+ ldr r2, [reg_base, #(REG_BASE_OFFSET + (reg * 4))] ;\
+ str r2, [reg_base, #(REG_BASE_OFFSET + (reg * 4) + 128)] ;\
+
+#define restore_reg_scratch(reg) ;\
+ ldr r2, [reg_base, #(REG_BASE_OFFSET + (reg * 4) + 128)] ;\
+ str r2, [reg_base, #(REG_BASE_OFFSET + (reg * 4))] ;\
+
+#define scratch_regs_thumb(type) ;\
+ type##_reg_scratch(0) ;\
+ type##_reg_scratch(1) ;\
+ type##_reg_scratch(2) ;\
+ type##_reg_scratch(3) ;\
+ type##_reg_scratch(4) ;\
+ type##_reg_scratch(5) ;\
+
+#define scratch_regs_arm(type) ;\
+ type##_reg_scratch(0) ;\
+ type##_reg_scratch(1) ;\
+ type##_reg_scratch(6) ;\
+ type##_reg_scratch(9) ;\
+ type##_reg_scratch(12) ;\
+ type##_reg_scratch(14) ;\
+
+
+step_debug_arm:
+ save_flags()
+ collapse_flags(r0)
+
+ ldr r0, [reg_base, #REG_CPSR] @ r1 = cpsr
+ tst r0, #0x20 @ see if Thumb bit is set
+
+ ldr r0, [lr] @ load PC
+ mvn r1, reg_cycles @ load cycle counter
+
+ beq 1f @ if not goto ARM mode
+
+ scratch_regs_thumb(save)
+
+ store_registers_thumb() @ write back Thumb regs
+ call_c_function(step_debug) @ call debug step
+ scratch_regs_thumb(restore)
+ restore_flags()
+ add pc, lr, #4 @ return
+
+1:
+ scratch_regs_arm(save)
+ store_registers_arm() @ write back ARM regs
+ call_c_function(step_debug) @ call debug step
+ scratch_regs_arm(restore)
+ restore_flags()
+ add pc, lr, #4 @ return, skipping PC
+
+
+.comm memory_map_read 0x8000
+.comm memory_map_write 0x8000
+
+
+
diff --git a/arm/video_blend.S b/arm/video_blend.S
new file mode 100644
index 0000000..63a5480
--- /dev/null
+++ b/arm/video_blend.S
@@ -0,0 +1,181 @@
+.align 2
+
+.global expand_blend
+.global expand_normal
+
+@ Input:
+@ r0 = screen_src_ptr
+@ r1 = screen_dest_ptr
+@ r2 = start
+@ r3 = end
+
+6:
+ .word io_registers
+ .word palette_ram_converted
+ .word 0x04000200 @ combine test mask
+ .word 0x07E0F81F @ clamp mask
+ .word 0x000003FE @ palette index mask
+ .word 0x08010020 @ saturation mask
+
+expand_blend:
+ stmdb sp!, { r4, r5, r6, r9, r10, r11, r14 }
+
+ add r0, r0, r2, lsl #2 @ screen_src_ptr += start
+ add r1, r1, r2, lsl #1 @ screen_dest_ptr += start
+ sub r2, r3, r2 @ r2 = end - start
+ ldr r3, 6b @ r3 = io_registers
+ ldr r3, [r3, #0x52] @ r3 = bldalpha
+ mov r4, r3, lsr #8 @ r4 = bldalpha >> 8
+ and r3, r3, #0x1F @ r3 = blend_a
+ and r4, r4, #0x1F @ r4 = blend_b
+ cmp r3, #16 @ if(blend_a > 16)
+ movgt r3, #16 @ blend_a = 16
+ cmp r4, #16 @ if(blend_b > 16)
+ movgt r3, #16 @ blend_b = 16
+
+ ldr r14, 6b + 4 @ r14 = palette_ram_converted
+ ldr r12, 6b + 8 @ r12 = 0x04000200
+ ldr r11, 6b + 12 @ r11 = 0x07E0F81F
+ ldr r10, 6b + 16 @ r10 = 0x000003FE
+
+ add r5, r3, r4 @ r5 = blend_a + blend_b
+ cmp r5, #16 @ if((blend_a + blend_b) > 16)
+ bgt 3f @ goto loop w/saturation
+
+
+ @ loop w/o saturation
+1:
+ ldr r5, [r0], #4 @ r5 = pixel_pair, screen_src_ptr++
+ and r6, r5, r12 @ r6 = r5 & 0x04000200
+ cmp r6, r12 @ if(r6 != 0x4000200)
+ bne 2f @ goto no_blend
+
+ and r6, r10, r5, lsl #1 @ r6 = (pixel_pair & 0x1FF) << 1
+ ldrh r6, [r14, r6] @ r6 = pixel_top
+ orr r6, r6, r6, lsl #16 @ r6 = pixel_top | (pixel_top << 16)
+ and r6, r6, r11 @ r6 = pixel_top_dilated
+
+ and r5, r10, r5, lsr #15 @ r5 = ((pixel_pair >> 16) & 0x1FF) << 1
+ ldrh r5, [r14, r5] @ r5 = pixel_bottom
+ orr r5, r5, r5, lsl #16 @ r5 = pixel_bottom | (pixel_bottom << 16)
+ and r5, r5, r11 @ r5 = pixel_bottom_dilated
+
+ mul r5, r4, r5 @ r5 = pixel_bottom * blend_b = bottom_mul
+ mla r5, r3, r6, r5 @ r5 = (pixel_top * blend_a) + bottom_mul
+
+ and r5, r11, r5, lsr #4 @ r5 = (color_dilated >> 4) & 0x07E0F81F
+ orr r5, r5, r5, lsr #16 @ r5 = color_dilated | (color_dilated >> 16)
+
+ strh r5, [r1], #2 @ *screen_dest_ptr = r5, screen_dest_ptr++
+ subs r2, r2, #1 @ counter--
+ bne 1b @ go again
+
+ ldmia sp!, { r4, r5, r6, r9, r10, r11, pc }
+
+2:
+ and r5, r10, r5, lsl #1 @ r5 = (pixel_pair & 0x1FF) << 1
+ ldrh r5, [r14, r5] @ r5 = pixel_top
+ strh r5, [r1], #2 @ *screen_dest_ptr = r5, screen_dest_ptr++
+
+ subs r2, r2, #1 @ counter--
+ bne 1b @ go again
+
+ ldmia sp!, { r4, r5, r6, r9, r10, r11, pc }
+
+@ loop w/saturation
+
+3:
+ ldr r9, 6b + 20 @ r9 = 0x08010020
+
+4:
+ ldr r5, [r0], #4 @ r5 = pixel_pair, screen_src_ptr++
+ and r6, r5, r12 @ r6 = r5 & 0x04000200
+ cmp r6, r12 @ if(r6 != 0x4000200)
+ bne 5f @ goto no_blend
+
+ and r6, r10, r5, lsl #1 @ r6 = (pixel_pair & 0x1FF) << 1
+ ldrh r6, [r14, r6] @ r6 = pixel_top
+ orr r6, r6, r6, lsl #16 @ r6 = pixel_top | (pixel_top << 16)
+ and r6, r6, r11 @ r6 = pixel_top_dilated
+
+ and r5, r10, r5, lsr #15 @ r5 = ((pixel_pair >> 16) & 0x1FF) << 1
+ ldrh r5, [r14, r5] @ r5 = pixel_bottom
+ orr r5, r5, r5, lsl #16 @ r5 = pixel_bottom | (pixel_bottom << 16)
+ and r5, r5, r11 @ r5 = pixel_bottom_dilated
+
+ mul r5, r4, r5 @ r5 = pixel_bottom * blend_b = bottom_mul
+ mla r5, r3, r6, r5 @ r5 = (pixel_top * blend_a) + bottom_mul
+
+ and r6, r9, r5, lsr #4 @ r6 = saturation bits
+ orr r6, r6, r6, lsr #1 @ propogate saturation down msb
+ orr r6, r6, r6, lsr #2 @ propogate down next two bits
+ orr r6, r6, r6, lsr #3 @ propogate down next three bits
+ orr r5, r6, r5, lsr #4 @ mask over result w/saturation
+
+ and r5, r11, r5 @ r5 = (color_dilated >> 4) & 0x07E0F81F
+ orr r5, r5, r5, lsr #16 @ r5 = color_dilated | (color_dilated >> 16)
+ strh r5, [r1], #2 @ *screen_dest_ptr = r5, screen_dest_ptr++
+
+ subs r2, r2, #1 @ counter--
+ bne 4b @ go again
+
+ ldmia sp!, { r4, r5, r6, r9, r10, r11, pc }
+
+5:
+ and r5, r10, r5, lsl #1 @ r5 = (pixel_pair & 0x1FF) << 1
+ ldrh r5, [r14, r5] @ r5 = pixel_top
+ strh r5, [r1], #2 @ *screen_dest_ptr = r5, screen_dest_ptr++
+
+ subs r2, r2, #1 @ counter--
+ bne 4b @ go again
+
+ ldmia sp!, { r4, r5, r6, r9, r10, r11, pc }
+
+
+
+@ The following function isn't complete (only works on run multiples of 8),
+@ but unfortunately I don't see much potential for actually being able to
+@ use it..
+
+#define expand_pixel_pair(reg, temp) ;\
+ and temp, r3, reg, lsr #15 ;\
+ ldrh temp, [r2, temp] ;\
+ ;\
+ and reg, r3, reg, lsl #1 ;\
+ ldrh reg, [r2, reg] ;\
+ ;\
+ orr reg, reg, temp, lsl #16 ;\
+
+
+@ Input:
+@ r0 = screen_ptr
+@ r1 = start
+@ r2 = end
+
+1:
+ .word palette_ram_converted
+ .word 0x3FE
+
+expand_normal:
+ stmdb sp!, { r4, r5, r6, r7, r14 }
+
+ add r0, r0, r1, lsl #1 @ screen_ptr += start
+ sub r1, r2, r1 @ r1 = end - start
+ ldr r2, 1b @ r2 = palette_ram_converted
+ ldr r3, 1b + 4 @ r3 = 0x3FE
+
+2:
+ ldmia r0, { r4, r5, r6, r7 }
+
+ expand_pixel_pair(r4, r14)
+ expand_pixel_pair(r5, r14)
+ expand_pixel_pair(r6, r14)
+ expand_pixel_pair(r7, r14)
+
+ stmia r0!, { r4, r5, r6, r7 }
+
+ subs r1, r1, #8
+ bne 2b
+
+ ldmia sp!, { r4, r5, r6, r7, pc }
+