aboutsummaryrefslogtreecommitdiff
path: root/libpcsxcore/new_dynarec/arm/assem_arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'libpcsxcore/new_dynarec/arm/assem_arm.c')
-rw-r--r--libpcsxcore/new_dynarec/arm/assem_arm.c4143
1 files changed, 4143 insertions, 0 deletions
diff --git a/libpcsxcore/new_dynarec/arm/assem_arm.c b/libpcsxcore/new_dynarec/arm/assem_arm.c
new file mode 100644
index 0000000..db1d2af
--- /dev/null
+++ b/libpcsxcore/new_dynarec/arm/assem_arm.c
@@ -0,0 +1,4143 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Mupen64plus/PCSX - assem_arm.c *
+ * Copyright (C) 2009-2011 Ari64 *
+ * Copyright (C) 2010-2011 GraÅžvydas "notaz" Ignotas *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the *
+ * Free Software Foundation, Inc., *
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "../../gte.h"
+#define FLAGLESS
+#include "../../gte.h"
+#undef FLAGLESS
+#include "../../gte_arm.h"
+#include "../../gte_neon.h"
+#include "pcnt.h"
+#include "arm_features.h"
+
+#if defined(BASE_ADDR_FIXED)
+#elif defined(BASE_ADDR_DYNAMIC)
+char *translation_cache;
+#else
+char translation_cache[1 << TARGET_SIZE_2] __attribute__((aligned(4096)));
+#endif
+
+#ifndef __MACH__
+#define CALLER_SAVE_REGS 0x100f
+#else
+#define CALLER_SAVE_REGS 0x120f
+#endif
+
+#define unused __attribute__((unused))
+
+extern int cycle_count;
+extern int last_count;
+extern int pcaddr;
+extern int pending_exception;
+extern int branch_target;
+extern uint64_t readmem_dword;
+extern void *dynarec_local;
+extern u_int mini_ht[32][2];
+
+void indirect_jump_indexed();
+void indirect_jump();
+void do_interrupt();
+void jump_vaddr_r0();
+void jump_vaddr_r1();
+void jump_vaddr_r2();
+void jump_vaddr_r3();
+void jump_vaddr_r4();
+void jump_vaddr_r5();
+void jump_vaddr_r6();
+void jump_vaddr_r7();
+void jump_vaddr_r8();
+void jump_vaddr_r9();
+void jump_vaddr_r10();
+void jump_vaddr_r12();
+
+const u_int jump_vaddr_reg[16] = {
+ (int)jump_vaddr_r0,
+ (int)jump_vaddr_r1,
+ (int)jump_vaddr_r2,
+ (int)jump_vaddr_r3,
+ (int)jump_vaddr_r4,
+ (int)jump_vaddr_r5,
+ (int)jump_vaddr_r6,
+ (int)jump_vaddr_r7,
+ (int)jump_vaddr_r8,
+ (int)jump_vaddr_r9,
+ (int)jump_vaddr_r10,
+ 0,
+ (int)jump_vaddr_r12,
+ 0,
+ 0,
+ 0};
+
+void invalidate_addr_r0();
+void invalidate_addr_r1();
+void invalidate_addr_r2();
+void invalidate_addr_r3();
+void invalidate_addr_r4();
+void invalidate_addr_r5();
+void invalidate_addr_r6();
+void invalidate_addr_r7();
+void invalidate_addr_r8();
+void invalidate_addr_r9();
+void invalidate_addr_r10();
+void invalidate_addr_r12();
+
+const u_int invalidate_addr_reg[16] = {
+ (int)invalidate_addr_r0,
+ (int)invalidate_addr_r1,
+ (int)invalidate_addr_r2,
+ (int)invalidate_addr_r3,
+ (int)invalidate_addr_r4,
+ (int)invalidate_addr_r5,
+ (int)invalidate_addr_r6,
+ (int)invalidate_addr_r7,
+ (int)invalidate_addr_r8,
+ (int)invalidate_addr_r9,
+ (int)invalidate_addr_r10,
+ 0,
+ (int)invalidate_addr_r12,
+ 0,
+ 0,
+ 0};
+
+static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
+
+/* Linker */
+
+static void set_jump_target(int addr,u_int target)
+{
+ u_char *ptr=(u_char *)addr;
+ u_int *ptr2=(u_int *)ptr;
+ if(ptr[3]==0xe2) {
+ assert((target-(u_int)ptr2-8)<1024);
+ assert((addr&3)==0);
+ assert((target&3)==0);
+ *ptr2=(*ptr2&0xFFFFF000)|((target-(u_int)ptr2-8)>>2)|0xF00;
+ //printf("target=%x addr=%x insn=%x\n",target,addr,*ptr2);
+ }
+ else if(ptr[3]==0x72) {
+ // generated by emit_jno_unlikely
+ if((target-(u_int)ptr2-8)<1024) {
+ assert((addr&3)==0);
+ assert((target&3)==0);
+ *ptr2=(*ptr2&0xFFFFF000)|((target-(u_int)ptr2-8)>>2)|0xF00;
+ }
+ else if((target-(u_int)ptr2-8)<4096&&!((target-(u_int)ptr2-8)&15)) {
+ assert((addr&3)==0);
+ assert((target&3)==0);
+ *ptr2=(*ptr2&0xFFFFF000)|((target-(u_int)ptr2-8)>>4)|0xE00;
+ }
+ else *ptr2=(0x7A000000)|(((target-(u_int)ptr2-8)<<6)>>8);
+ }
+ else {
+ assert((ptr[3]&0x0e)==0xa);
+ *ptr2=(*ptr2&0xFF000000)|(((target-(u_int)ptr2-8)<<6)>>8);
+ }
+}
+
+// This optionally copies the instruction from the target of the branch into
+// the space before the branch. Works, but the difference in speed is
+// usually insignificant.
+#if 0
+static void set_jump_target_fillslot(int addr,u_int target,int copy)
+{
+ u_char *ptr=(u_char *)addr;
+ u_int *ptr2=(u_int *)ptr;
+ assert(!copy||ptr2[-1]==0xe28dd000);
+ if(ptr[3]==0xe2) {
+ assert(!copy);
+ assert((target-(u_int)ptr2-8)<4096);
+ *ptr2=(*ptr2&0xFFFFF000)|(target-(u_int)ptr2-8);
+ }
+ else {
+ assert((ptr[3]&0x0e)==0xa);
+ u_int target_insn=*(u_int *)target;
+ if((target_insn&0x0e100000)==0) { // ALU, no immediate, no flags
+ copy=0;
+ }
+ if((target_insn&0x0c100000)==0x04100000) { // Load
+ copy=0;
+ }
+ if(target_insn&0x08000000) {
+ copy=0;
+ }
+ if(copy) {
+ ptr2[-1]=target_insn;
+ target+=4;
+ }
+ *ptr2=(*ptr2&0xFF000000)|(((target-(u_int)ptr2-8)<<6)>>8);
+ }
+}
+#endif
+
+/* Literal pool */
+static void add_literal(int addr,int val)
+{
+ assert(literalcount<sizeof(literals)/sizeof(literals[0]));
+ literals[literalcount][0]=addr;
+ literals[literalcount][1]=val;
+ literalcount++;
+}
+
+// from a pointer to external jump stub (which was produced by emit_extjump2)
+// find where the jumping insn is
+static void *find_extjump_insn(void *stub)
+{
+ int *ptr=(int *)(stub+4);
+ assert((*ptr&0x0fff0000)==0x059f0000); // ldr rx, [pc, #ofs]
+ u_int offset=*ptr&0xfff;
+ void **l_ptr=(void *)ptr+offset+8;
+ return *l_ptr;
+}
+
+// find where external branch is liked to using addr of it's stub:
+// get address that insn one after stub loads (dyna_linker arg1),
+// treat it as a pointer to branch insn,
+// return addr where that branch jumps to
+static int get_pointer(void *stub)
+{
+ //printf("get_pointer(%x)\n",(int)stub);
+ int *i_ptr=find_extjump_insn(stub);
+ assert((*i_ptr&0x0f000000)==0x0a000000);
+ return (int)i_ptr+((*i_ptr<<8)>>6)+8;
+}
+
+// Find the "clean" entry point from a "dirty" entry point
+// by skipping past the call to verify_code
+static u_int get_clean_addr(int addr)
+{
+ int *ptr=(int *)addr;
+ #ifndef HAVE_ARMV7
+ ptr+=4;
+ #else
+ ptr+=6;
+ #endif
+ if((*ptr&0xFF000000)!=0xeb000000) ptr++;
+ assert((*ptr&0xFF000000)==0xeb000000); // bl instruction
+ ptr++;
+ if((*ptr&0xFF000000)==0xea000000) {
+ return (int)ptr+((*ptr<<8)>>6)+8; // follow jump
+ }
+ return (u_int)ptr;
+}
+
+static int verify_dirty(u_int *ptr)
+{
+ #ifndef HAVE_ARMV7
+ // get from literal pool
+ assert((*ptr&0xFFFF0000)==0xe59f0000);
+ u_int offset=*ptr&0xfff;
+ u_int *l_ptr=(void *)ptr+offset+8;
+ u_int source=l_ptr[0];
+ u_int copy=l_ptr[1];
+ u_int len=l_ptr[2];
+ ptr+=4;
+ #else
+ // ARMv7 movw/movt
+ assert((*ptr&0xFFF00000)==0xe3000000);
+ u_int source=(ptr[0]&0xFFF)+((ptr[0]>>4)&0xF000)+((ptr[2]<<16)&0xFFF0000)+((ptr[2]<<12)&0xF0000000);
+ u_int copy=(ptr[1]&0xFFF)+((ptr[1]>>4)&0xF000)+((ptr[3]<<16)&0xFFF0000)+((ptr[3]<<12)&0xF0000000);
+ u_int len=(ptr[4]&0xFFF)+((ptr[4]>>4)&0xF000);
+ ptr+=6;
+ #endif
+ if((*ptr&0xFF000000)!=0xeb000000) ptr++;
+ assert((*ptr&0xFF000000)==0xeb000000); // bl instruction
+ //printf("verify_dirty: %x %x %x\n",source,copy,len);
+ return !memcmp((void *)source,(void *)copy,len);
+}
+
+// This doesn't necessarily find all clean entry points, just
+// guarantees that it's not dirty
+static int isclean(int addr)
+{
+ #ifndef HAVE_ARMV7
+ u_int *ptr=((u_int *)addr)+4;
+ #else
+ u_int *ptr=((u_int *)addr)+6;
+ #endif
+ if((*ptr&0xFF000000)!=0xeb000000) ptr++;
+ if((*ptr&0xFF000000)!=0xeb000000) return 1; // bl instruction
+ if((int)ptr+((*ptr<<8)>>6)+8==(int)verify_code) return 0;
+ if((int)ptr+((*ptr<<8)>>6)+8==(int)verify_code_vm) return 0;
+ if((int)ptr+((*ptr<<8)>>6)+8==(int)verify_code_ds) return 0;
+ return 1;
+}
+
+// get source that block at addr was compiled from (host pointers)
+static void get_bounds(int addr,u_int *start,u_int *end)
+{
+ u_int *ptr=(u_int *)addr;
+ #ifndef HAVE_ARMV7
+ // get from literal pool
+ assert((*ptr&0xFFFF0000)==0xe59f0000);
+ u_int offset=*ptr&0xfff;
+ u_int *l_ptr=(void *)ptr+offset+8;
+ u_int source=l_ptr[0];
+ //u_int copy=l_ptr[1];
+ u_int len=l_ptr[2];
+ ptr+=4;
+ #else
+ // ARMv7 movw/movt
+ assert((*ptr&0xFFF00000)==0xe3000000);
+ u_int source=(ptr[0]&0xFFF)+((ptr[0]>>4)&0xF000)+((ptr[2]<<16)&0xFFF0000)+((ptr[2]<<12)&0xF0000000);
+ //u_int copy=(ptr[1]&0xFFF)+((ptr[1]>>4)&0xF000)+((ptr[3]<<16)&0xFFF0000)+((ptr[3]<<12)&0xF0000000);
+ u_int len=(ptr[4]&0xFFF)+((ptr[4]>>4)&0xF000);
+ ptr+=6;
+ #endif
+ if((*ptr&0xFF000000)!=0xeb000000) ptr++;
+ assert((*ptr&0xFF000000)==0xeb000000); // bl instruction
+ *start=source;
+ *end=source+len;
+}
+
+/* Register allocation */
+
+// Note: registers are allocated clean (unmodified state)
+// if you intend to modify the register, you must call dirty_reg().
+static void alloc_reg(struct regstat *cur,int i,signed char reg)
+{
+ int r,hr;
+ int preferred_reg = (reg&7);
+ if(reg==CCREG) preferred_reg=HOST_CCREG;
+ if(reg==PTEMP||reg==FTEMP) preferred_reg=12;
+
+ // Don't allocate unused registers
+ if((cur->u>>reg)&1) return;
+
+ // see if it's already allocated
+ for(hr=0;hr<HOST_REGS;hr++)
+ {
+ if(cur->regmap[hr]==reg) return;
+ }
+
+ // Keep the same mapping if the register was already allocated in a loop
+ preferred_reg = loop_reg(i,reg,preferred_reg);
+
+ // Try to allocate the preferred register
+ if(cur->regmap[preferred_reg]==-1) {
+ cur->regmap[preferred_reg]=reg;
+ cur->dirty&=~(1<<preferred_reg);
+ cur->isconst&=~(1<<preferred_reg);
+ return;
+ }
+ r=cur->regmap[preferred_reg];
+ if(r<64&&((cur->u>>r)&1)) {
+ cur->regmap[preferred_reg]=reg;
+ cur->dirty&=~(1<<preferred_reg);
+ cur->isconst&=~(1<<preferred_reg);
+ return;
+ }
+ if(r>=64&&((cur->uu>>(r&63))&1)) {
+ cur->regmap[preferred_reg]=reg;
+ cur->dirty&=~(1<<preferred_reg);
+ cur->isconst&=~(1<<preferred_reg);
+ return;
+ }
+
+ // Clear any unneeded registers
+ // We try to keep the mapping consistent, if possible, because it
+ // makes branches easier (especially loops). So we try to allocate
+ // first (see above) before removing old mappings. If this is not
+ // possible then go ahead and clear out the registers that are no
+ // longer needed.
+ for(hr=0;hr<HOST_REGS;hr++)
+ {
+ r=cur->regmap[hr];
+ if(r>=0) {
+ if(r<64) {
+ if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
+ }
+ else
+ {
+ if((cur->uu>>(r&63))&1) {cur->regmap[hr]=-1;break;}
+ }
+ }
+ }
+ // Try to allocate any available register, but prefer
+ // registers that have not been used recently.
+ if(i>0) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
+ if(regs[i-1].regmap[hr]!=rs1[i-1]&&regs[i-1].regmap[hr]!=rs2[i-1]&&regs[i-1].regmap[hr]!=rt1[i-1]&&regs[i-1].regmap[hr]!=rt2[i-1]) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ }
+ // Try to allocate any available register
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+
+ // Ok, now we have to evict someone
+ // Pick a register we hopefully won't need soon
+ u_char hsn[MAXREG+1];
+ memset(hsn,10,sizeof(hsn));
+ int j;
+ lsn(hsn,i,&preferred_reg);
+ //printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",cur->regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]);
+ //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
+ if(i>0) {
+ // Don't evict the cycle count at entry points, otherwise the entry
+ // stub will have to write it.
+ if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
+ if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP)) hsn[CCREG]=2;
+ for(j=10;j>=3;j--)
+ {
+ // Alloc preferred register if available
+ if(hsn[r=cur->regmap[preferred_reg]&63]==j) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ // Evict both parts of a 64-bit register
+ if((cur->regmap[hr]&63)==r) {
+ cur->regmap[hr]=-1;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ }
+ }
+ cur->regmap[preferred_reg]=reg;
+ return;
+ }
+ for(r=1;r<=MAXREG;r++)
+ {
+ if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=HOST_CCREG||j<hsn[CCREG]) {
+ if(cur->regmap[hr]==r+64) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=HOST_CCREG||j<hsn[CCREG]) {
+ if(cur->regmap[hr]==r) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ for(j=10;j>=0;j--)
+ {
+ for(r=1;r<=MAXREG;r++)
+ {
+ if(hsn[r]==j) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(cur->regmap[hr]==r+64) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(cur->regmap[hr]==r) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ }
+ }
+ SysPrintf("This shouldn't happen (alloc_reg)");exit(1);
+}
+
+static void alloc_reg64(struct regstat *cur,int i,signed char reg)
+{
+ int preferred_reg = 8+(reg&1);
+ int r,hr;
+
+ // allocate the lower 32 bits
+ alloc_reg(cur,i,reg);
+
+ // Don't allocate unused registers
+ if((cur->uu>>reg)&1) return;
+
+ // see if the upper half is already allocated
+ for(hr=0;hr<HOST_REGS;hr++)
+ {
+ if(cur->regmap[hr]==reg+64) return;
+ }
+
+ // Keep the same mapping if the register was already allocated in a loop
+ preferred_reg = loop_reg(i,reg,preferred_reg);
+
+ // Try to allocate the preferred register
+ if(cur->regmap[preferred_reg]==-1) {
+ cur->regmap[preferred_reg]=reg|64;
+ cur->dirty&=~(1<<preferred_reg);
+ cur->isconst&=~(1<<preferred_reg);
+ return;
+ }
+ r=cur->regmap[preferred_reg];
+ if(r<64&&((cur->u>>r)&1)) {
+ cur->regmap[preferred_reg]=reg|64;
+ cur->dirty&=~(1<<preferred_reg);
+ cur->isconst&=~(1<<preferred_reg);
+ return;
+ }
+ if(r>=64&&((cur->uu>>(r&63))&1)) {
+ cur->regmap[preferred_reg]=reg|64;
+ cur->dirty&=~(1<<preferred_reg);
+ cur->isconst&=~(1<<preferred_reg);
+ return;
+ }
+
+ // Clear any unneeded registers
+ // We try to keep the mapping consistent, if possible, because it
+ // makes branches easier (especially loops). So we try to allocate
+ // first (see above) before removing old mappings. If this is not
+ // possible then go ahead and clear out the registers that are no
+ // longer needed.
+ for(hr=HOST_REGS-1;hr>=0;hr--)
+ {
+ r=cur->regmap[hr];
+ if(r>=0) {
+ if(r<64) {
+ if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
+ }
+ else
+ {
+ if((cur->uu>>(r&63))&1) {cur->regmap[hr]=-1;break;}
+ }
+ }
+ }
+ // Try to allocate any available register, but prefer
+ // registers that have not been used recently.
+ if(i>0) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
+ if(regs[i-1].regmap[hr]!=rs1[i-1]&&regs[i-1].regmap[hr]!=rs2[i-1]&&regs[i-1].regmap[hr]!=rt1[i-1]&&regs[i-1].regmap[hr]!=rt2[i-1]) {
+ cur->regmap[hr]=reg|64;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ }
+ // Try to allocate any available register
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
+ cur->regmap[hr]=reg|64;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+
+ // Ok, now we have to evict someone
+ // Pick a register we hopefully won't need soon
+ u_char hsn[MAXREG+1];
+ memset(hsn,10,sizeof(hsn));
+ int j;
+ lsn(hsn,i,&preferred_reg);
+ //printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",cur->regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]);
+ //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
+ if(i>0) {
+ // Don't evict the cycle count at entry points, otherwise the entry
+ // stub will have to write it.
+ if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
+ if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP)) hsn[CCREG]=2;
+ for(j=10;j>=3;j--)
+ {
+ // Alloc preferred register if available
+ if(hsn[r=cur->regmap[preferred_reg]&63]==j) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ // Evict both parts of a 64-bit register
+ if((cur->regmap[hr]&63)==r) {
+ cur->regmap[hr]=-1;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ }
+ }
+ cur->regmap[preferred_reg]=reg|64;
+ return;
+ }
+ for(r=1;r<=MAXREG;r++)
+ {
+ if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=HOST_CCREG||j<hsn[CCREG]) {
+ if(cur->regmap[hr]==r+64) {
+ cur->regmap[hr]=reg|64;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=HOST_CCREG||j<hsn[CCREG]) {
+ if(cur->regmap[hr]==r) {
+ cur->regmap[hr]=reg|64;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ for(j=10;j>=0;j--)
+ {
+ for(r=1;r<=MAXREG;r++)
+ {
+ if(hsn[r]==j) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(cur->regmap[hr]==r+64) {
+ cur->regmap[hr]=reg|64;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(cur->regmap[hr]==r) {
+ cur->regmap[hr]=reg|64;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ }
+ }
+ SysPrintf("This shouldn't happen");exit(1);
+}
+
+// Allocate a temporary register. This is done without regard to
+// dirty status or whether the register we request is on the unneeded list
+// Note: This will only allocate one register, even if called multiple times
+static void alloc_reg_temp(struct regstat *cur,int i,signed char reg)
+{
+ int r,hr;
+ int preferred_reg = -1;
+
+ // see if it's already allocated
+ for(hr=0;hr<HOST_REGS;hr++)
+ {
+ if(hr!=EXCLUDE_REG&&cur->regmap[hr]==reg) return;
+ }
+
+ // Try to allocate any available register
+ for(hr=HOST_REGS-1;hr>=0;hr--) {
+ if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+
+ // Find an unneeded register
+ for(hr=HOST_REGS-1;hr>=0;hr--)
+ {
+ r=cur->regmap[hr];
+ if(r>=0) {
+ if(r<64) {
+ if((cur->u>>r)&1) {
+ if(i==0||((unneeded_reg[i-1]>>r)&1)) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ else
+ {
+ if((cur->uu>>(r&63))&1) {
+ if(i==0||((unneeded_reg_upper[i-1]>>(r&63))&1)) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ // Ok, now we have to evict someone
+ // Pick a register we hopefully won't need soon
+ // TODO: we might want to follow unconditional jumps here
+ // TODO: get rid of dupe code and make this into a function
+ u_char hsn[MAXREG+1];
+ memset(hsn,10,sizeof(hsn));
+ int j;
+ lsn(hsn,i,&preferred_reg);
+ //printf("hsn: %d %d %d %d %d %d %d\n",hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
+ if(i>0) {
+ // Don't evict the cycle count at entry points, otherwise the entry
+ // stub will have to write it.
+ if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
+ if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP)) hsn[CCREG]=2;
+ for(j=10;j>=3;j--)
+ {
+ for(r=1;r<=MAXREG;r++)
+ {
+ if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=HOST_CCREG||hsn[CCREG]>2) {
+ if(cur->regmap[hr]==r+64) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=HOST_CCREG||hsn[CCREG]>2) {
+ if(cur->regmap[hr]==r) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ for(j=10;j>=0;j--)
+ {
+ for(r=1;r<=MAXREG;r++)
+ {
+ if(hsn[r]==j) {
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(cur->regmap[hr]==r+64) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(cur->regmap[hr]==r) {
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->isconst&=~(1<<hr);
+ return;
+ }
+ }
+ }
+ }
+ }
+ SysPrintf("This shouldn't happen");exit(1);
+}
+
+// Allocate a specific ARM register.
+static void alloc_arm_reg(struct regstat *cur,int i,signed char reg,int hr)
+{
+ int n;
+ int dirty=0;
+
+ // see if it's already allocated (and dealloc it)
+ for(n=0;n<HOST_REGS;n++)
+ {
+ if(n!=EXCLUDE_REG&&cur->regmap[n]==reg) {
+ dirty=(cur->dirty>>n)&1;
+ cur->regmap[n]=-1;
+ }
+ }
+
+ cur->regmap[hr]=reg;
+ cur->dirty&=~(1<<hr);
+ cur->dirty|=dirty<<hr;
+ cur->isconst&=~(1<<hr);
+}
+
+// Alloc cycle count into dedicated register
+static void alloc_cc(struct regstat *cur,int i)
+{
+ alloc_arm_reg(cur,i,CCREG,HOST_CCREG);
+}
+
+/* Special alloc */
+
+
+/* Assembler */
+
+static unused char regname[16][4] = {
+ "r0",
+ "r1",
+ "r2",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "r9",
+ "r10",
+ "fp",
+ "r12",
+ "sp",
+ "lr",
+ "pc"};
+
+static void output_w32(u_int word)
+{
+ *((u_int *)out)=word;
+ out+=4;
+}
+
+static u_int rd_rn_rm(u_int rd, u_int rn, u_int rm)
+{
+ assert(rd<16);
+ assert(rn<16);
+ assert(rm<16);
+ return((rn<<16)|(rd<<12)|rm);
+}
+
+static u_int rd_rn_imm_shift(u_int rd, u_int rn, u_int imm, u_int shift)
+{
+ assert(rd<16);
+ assert(rn<16);
+ assert(imm<256);
+ assert((shift&1)==0);
+ return((rn<<16)|(rd<<12)|(((32-shift)&30)<<7)|imm);
+}
+
+static u_int genimm(u_int imm,u_int *encoded)
+{
+ *encoded=0;
+ if(imm==0) return 1;
+ int i=32;
+ while(i>0)
+ {
+ if(imm<256) {
+ *encoded=((i&30)<<7)|imm;
+ return 1;
+ }
+ imm=(imm>>2)|(imm<<30);i-=2;
+ }
+ return 0;
+}
+
+static void genimm_checked(u_int imm,u_int *encoded)
+{
+ u_int ret=genimm(imm,encoded);
+ assert(ret);
+ (void)ret;
+}
+
+static u_int genjmp(u_int addr)
+{
+ int offset=addr-(int)out-8;
+ if(offset<-33554432||offset>=33554432) {
+ if (addr>2) {
+ SysPrintf("genjmp: out of range: %08x\n", offset);
+ exit(1);
+ }
+ return 0;
+ }
+ return ((u_int)offset>>2)&0xffffff;
+}
+
+static void emit_mov(int rs,int rt)
+{
+ assem_debug("mov %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0xe1a00000|rd_rn_rm(rt,0,rs));
+}
+
+static void emit_movs(int rs,int rt)
+{
+ assem_debug("movs %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0xe1b00000|rd_rn_rm(rt,0,rs));
+}
+
+static void emit_add(int rs1,int rs2,int rt)
+{
+ assem_debug("add %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe0800000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_adds(int rs1,int rs2,int rt)
+{
+ assem_debug("adds %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe0900000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_adcs(int rs1,int rs2,int rt)
+{
+ assem_debug("adcs %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe0b00000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_sbc(int rs1,int rs2,int rt)
+{
+ assem_debug("sbc %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe0c00000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_sbcs(int rs1,int rs2,int rt)
+{
+ assem_debug("sbcs %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe0d00000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_neg(int rs, int rt)
+{
+ assem_debug("rsb %s,%s,#0\n",regname[rt],regname[rs]);
+ output_w32(0xe2600000|rd_rn_rm(rt,rs,0));
+}
+
+static void emit_negs(int rs, int rt)
+{
+ assem_debug("rsbs %s,%s,#0\n",regname[rt],regname[rs]);
+ output_w32(0xe2700000|rd_rn_rm(rt,rs,0));
+}
+
+static void emit_sub(int rs1,int rs2,int rt)
+{
+ assem_debug("sub %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe0400000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_subs(int rs1,int rs2,int rt)
+{
+ assem_debug("subs %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe0500000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_zeroreg(int rt)
+{
+ assem_debug("mov %s,#0\n",regname[rt]);
+ output_w32(0xe3a00000|rd_rn_rm(rt,0,0));
+}
+
+static void emit_loadlp(u_int imm,u_int rt)
+{
+ add_literal((int)out,imm);
+ assem_debug("ldr %s,pc+? [=%x]\n",regname[rt],imm);
+ output_w32(0xe5900000|rd_rn_rm(rt,15,0));
+}
+
+static void emit_movw(u_int imm,u_int rt)
+{
+ assert(imm<65536);
+ assem_debug("movw %s,#%d (0x%x)\n",regname[rt],imm,imm);
+ output_w32(0xe3000000|rd_rn_rm(rt,0,0)|(imm&0xfff)|((imm<<4)&0xf0000));
+}
+
+static void emit_movt(u_int imm,u_int rt)
+{
+ assem_debug("movt %s,#%d (0x%x)\n",regname[rt],imm&0xffff0000,imm&0xffff0000);
+ output_w32(0xe3400000|rd_rn_rm(rt,0,0)|((imm>>16)&0xfff)|((imm>>12)&0xf0000));
+}
+
+static void emit_movimm(u_int imm,u_int rt)
+{
+ u_int armval;
+ if(genimm(imm,&armval)) {
+ assem_debug("mov %s,#%d\n",regname[rt],imm);
+ output_w32(0xe3a00000|rd_rn_rm(rt,0,0)|armval);
+ }else if(genimm(~imm,&armval)) {
+ assem_debug("mvn %s,#%d\n",regname[rt],imm);
+ output_w32(0xe3e00000|rd_rn_rm(rt,0,0)|armval);
+ }else if(imm<65536) {
+ #ifndef HAVE_ARMV7
+ assem_debug("mov %s,#%d\n",regname[rt],imm&0xFF00);
+ output_w32(0xe3a00000|rd_rn_imm_shift(rt,0,imm>>8,8));
+ assem_debug("add %s,%s,#%d\n",regname[rt],regname[rt],imm&0xFF);
+ output_w32(0xe2800000|rd_rn_imm_shift(rt,rt,imm&0xff,0));
+ #else
+ emit_movw(imm,rt);
+ #endif
+ }else{
+ #ifndef HAVE_ARMV7
+ emit_loadlp(imm,rt);
+ #else
+ emit_movw(imm&0x0000FFFF,rt);
+ emit_movt(imm&0xFFFF0000,rt);
+ #endif
+ }
+}
+
+static void emit_pcreladdr(u_int rt)
+{
+ assem_debug("add %s,pc,#?\n",regname[rt]);
+ output_w32(0xe2800000|rd_rn_rm(rt,15,0));
+}
+
+static void emit_loadreg(int r, int hr)
+{
+ if(r&64) {
+ SysPrintf("64bit load in 32bit mode!\n");
+ assert(0);
+ return;
+ }
+ if((r&63)==0)
+ emit_zeroreg(hr);
+ else {
+ int addr=((int)reg)+((r&63)<<REG_SHIFT)+((r&64)>>4);
+ if((r&63)==HIREG) addr=(int)&hi+((r&64)>>4);
+ if((r&63)==LOREG) addr=(int)&lo+((r&64)>>4);
+ if(r==CCREG) addr=(int)&cycle_count;
+ if(r==CSREG) addr=(int)&Status;
+ if(r==FSREG) addr=(int)&FCR31;
+ if(r==INVCP) addr=(int)&invc_ptr;
+ u_int offset = addr-(u_int)&dynarec_local;
+ assert(offset<4096);
+ assem_debug("ldr %s,fp+%d\n",regname[hr],offset);
+ output_w32(0xe5900000|rd_rn_rm(hr,FP,0)|offset);
+ }
+}
+
+static void emit_storereg(int r, int hr)
+{
+ if(r&64) {
+ SysPrintf("64bit store in 32bit mode!\n");
+ assert(0);
+ return;
+ }
+ int addr=((int)reg)+((r&63)<<REG_SHIFT)+((r&64)>>4);
+ if((r&63)==HIREG) addr=(int)&hi+((r&64)>>4);
+ if((r&63)==LOREG) addr=(int)&lo+((r&64)>>4);
+ if(r==CCREG) addr=(int)&cycle_count;
+ if(r==FSREG) addr=(int)&FCR31;
+ u_int offset = addr-(u_int)&dynarec_local;
+ assert(offset<4096);
+ assem_debug("str %s,fp+%d\n",regname[hr],offset);
+ output_w32(0xe5800000|rd_rn_rm(hr,FP,0)|offset);
+}
+
+static void emit_test(int rs, int rt)
+{
+ assem_debug("tst %s,%s\n",regname[rs],regname[rt]);
+ output_w32(0xe1100000|rd_rn_rm(0,rs,rt));
+}
+
+static void emit_testimm(int rs,int imm)
+{
+ u_int armval;
+ assem_debug("tst %s,#%d\n",regname[rs],imm);
+ genimm_checked(imm,&armval);
+ output_w32(0xe3100000|rd_rn_rm(0,rs,0)|armval);
+}
+
+static void emit_testeqimm(int rs,int imm)
+{
+ u_int armval;
+ assem_debug("tsteq %s,$%d\n",regname[rs],imm);
+ genimm_checked(imm,&armval);
+ output_w32(0x03100000|rd_rn_rm(0,rs,0)|armval);
+}
+
+static void emit_not(int rs,int rt)
+{
+ assem_debug("mvn %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0xe1e00000|rd_rn_rm(rt,0,rs));
+}
+
+static void emit_mvnmi(int rs,int rt)
+{
+ assem_debug("mvnmi %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0x41e00000|rd_rn_rm(rt,0,rs));
+}
+
+static void emit_and(u_int rs1,u_int rs2,u_int rt)
+{
+ assem_debug("and %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe0000000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_or(u_int rs1,u_int rs2,u_int rt)
+{
+ assem_debug("orr %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe1800000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_or_and_set_flags(int rs1,int rs2,int rt)
+{
+ assem_debug("orrs %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe1900000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_orrshl_imm(u_int rs,u_int imm,u_int rt)
+{
+ assert(rs<16);
+ assert(rt<16);
+ assert(imm<32);
+ assem_debug("orr %s,%s,%s,lsl #%d\n",regname[rt],regname[rt],regname[rs],imm);
+ output_w32(0xe1800000|rd_rn_rm(rt,rt,rs)|(imm<<7));
+}
+
+static void emit_orrshr_imm(u_int rs,u_int imm,u_int rt)
+{
+ assert(rs<16);
+ assert(rt<16);
+ assert(imm<32);
+ assem_debug("orr %s,%s,%s,lsr #%d\n",regname[rt],regname[rt],regname[rs],imm);
+ output_w32(0xe1800020|rd_rn_rm(rt,rt,rs)|(imm<<7));
+}
+
+static void emit_xor(u_int rs1,u_int rs2,u_int rt)
+{
+ assem_debug("eor %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe0200000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_addimm(u_int rs,int imm,u_int rt)
+{
+ assert(rs<16);
+ assert(rt<16);
+ if(imm!=0) {
+ u_int armval;
+ if(genimm(imm,&armval)) {
+ assem_debug("add %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe2800000|rd_rn_rm(rt,rs,0)|armval);
+ }else if(genimm(-imm,&armval)) {
+ assem_debug("sub %s,%s,#%d\n",regname[rt],regname[rs],-imm);
+ output_w32(0xe2400000|rd_rn_rm(rt,rs,0)|armval);
+ #ifdef HAVE_ARMV7
+ }else if(rt!=rs&&(u_int)imm<65536) {
+ emit_movw(imm&0x0000ffff,rt);
+ emit_add(rs,rt,rt);
+ }else if(rt!=rs&&(u_int)-imm<65536) {
+ emit_movw(-imm&0x0000ffff,rt);
+ emit_sub(rs,rt,rt);
+ #endif
+ }else if((u_int)-imm<65536) {
+ assem_debug("sub %s,%s,#%d\n",regname[rt],regname[rs],(-imm)&0xFF00);
+ assem_debug("sub %s,%s,#%d\n",regname[rt],regname[rt],(-imm)&0xFF);
+ output_w32(0xe2400000|rd_rn_imm_shift(rt,rs,(-imm)>>8,8));
+ output_w32(0xe2400000|rd_rn_imm_shift(rt,rt,(-imm)&0xff,0));
+ }else {
+ do {
+ int shift = (ffs(imm) - 1) & ~1;
+ int imm8 = imm & (0xff << shift);
+ genimm_checked(imm8,&armval);
+ assem_debug("add %s,%s,#0x%x\n",regname[rt],regname[rs],imm8);
+ output_w32(0xe2800000|rd_rn_rm(rt,rs,0)|armval);
+ rs = rt;
+ imm &= ~imm8;
+ }
+ while (imm != 0);
+ }
+ }
+ else if(rs!=rt) emit_mov(rs,rt);
+}
+
+static void emit_addimm_and_set_flags(int imm,int rt)
+{
+ assert(imm>-65536&&imm<65536);
+ u_int armval;
+ if(genimm(imm,&armval)) {
+ assem_debug("adds %s,%s,#%d\n",regname[rt],regname[rt],imm);
+ output_w32(0xe2900000|rd_rn_rm(rt,rt,0)|armval);
+ }else if(genimm(-imm,&armval)) {
+ assem_debug("subs %s,%s,#%d\n",regname[rt],regname[rt],imm);
+ output_w32(0xe2500000|rd_rn_rm(rt,rt,0)|armval);
+ }else if(imm<0) {
+ assem_debug("sub %s,%s,#%d\n",regname[rt],regname[rt],(-imm)&0xFF00);
+ assem_debug("subs %s,%s,#%d\n",regname[rt],regname[rt],(-imm)&0xFF);
+ output_w32(0xe2400000|rd_rn_imm_shift(rt,rt,(-imm)>>8,8));
+ output_w32(0xe2500000|rd_rn_imm_shift(rt,rt,(-imm)&0xff,0));
+ }else{
+ assem_debug("add %s,%s,#%d\n",regname[rt],regname[rt],imm&0xFF00);
+ assem_debug("adds %s,%s,#%d\n",regname[rt],regname[rt],imm&0xFF);
+ output_w32(0xe2800000|rd_rn_imm_shift(rt,rt,imm>>8,8));
+ output_w32(0xe2900000|rd_rn_imm_shift(rt,rt,imm&0xff,0));
+ }
+}
+
+static void emit_addimm_no_flags(u_int imm,u_int rt)
+{
+ emit_addimm(rt,imm,rt);
+}
+
+static void emit_addnop(u_int r)
+{
+ assert(r<16);
+ assem_debug("add %s,%s,#0 (nop)\n",regname[r],regname[r]);
+ output_w32(0xe2800000|rd_rn_rm(r,r,0));
+}
+
+static void emit_adcimm(u_int rs,int imm,u_int rt)
+{
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("adc %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe2a00000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+static void emit_rscimm(int rs,int imm,u_int rt)
+{
+ assert(0);
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("rsc %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe2e00000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+static void emit_addimm64_32(int rsh,int rsl,int imm,int rth,int rtl)
+{
+ // TODO: if(genimm(imm,&armval)) ...
+ // else
+ emit_movimm(imm,HOST_TEMPREG);
+ emit_adds(HOST_TEMPREG,rsl,rtl);
+ emit_adcimm(rsh,0,rth);
+}
+
+static void emit_andimm(int rs,int imm,int rt)
+{
+ u_int armval;
+ if(imm==0) {
+ emit_zeroreg(rt);
+ }else if(genimm(imm,&armval)) {
+ assem_debug("and %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe2000000|rd_rn_rm(rt,rs,0)|armval);
+ }else if(genimm(~imm,&armval)) {
+ assem_debug("bic %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe3c00000|rd_rn_rm(rt,rs,0)|armval);
+ }else if(imm==65535) {
+ #ifndef HAVE_ARMV6
+ assem_debug("bic %s,%s,#FF000000\n",regname[rt],regname[rs]);
+ output_w32(0xe3c00000|rd_rn_rm(rt,rs,0)|0x4FF);
+ assem_debug("bic %s,%s,#00FF0000\n",regname[rt],regname[rt]);
+ output_w32(0xe3c00000|rd_rn_rm(rt,rt,0)|0x8FF);
+ #else
+ assem_debug("uxth %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0xe6ff0070|rd_rn_rm(rt,0,rs));
+ #endif
+ }else{
+ assert(imm>0&&imm<65535);
+ #ifndef HAVE_ARMV7
+ assem_debug("mov r14,#%d\n",imm&0xFF00);
+ output_w32(0xe3a00000|rd_rn_imm_shift(HOST_TEMPREG,0,imm>>8,8));
+ assem_debug("add r14,r14,#%d\n",imm&0xFF);
+ output_w32(0xe2800000|rd_rn_imm_shift(HOST_TEMPREG,HOST_TEMPREG,imm&0xff,0));
+ #else
+ emit_movw(imm,HOST_TEMPREG);
+ #endif
+ assem_debug("and %s,%s,r14\n",regname[rt],regname[rs]);
+ output_w32(0xe0000000|rd_rn_rm(rt,rs,HOST_TEMPREG));
+ }
+}
+
+static void emit_orimm(int rs,int imm,int rt)
+{
+ u_int armval;
+ if(imm==0) {
+ if(rs!=rt) emit_mov(rs,rt);
+ }else if(genimm(imm,&armval)) {
+ assem_debug("orr %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe3800000|rd_rn_rm(rt,rs,0)|armval);
+ }else{
+ assert(imm>0&&imm<65536);
+ assem_debug("orr %s,%s,#%d\n",regname[rt],regname[rs],imm&0xFF00);
+ assem_debug("orr %s,%s,#%d\n",regname[rt],regname[rs],imm&0xFF);
+ output_w32(0xe3800000|rd_rn_imm_shift(rt,rs,imm>>8,8));
+ output_w32(0xe3800000|rd_rn_imm_shift(rt,rt,imm&0xff,0));
+ }
+}
+
+static void emit_xorimm(int rs,int imm,int rt)
+{
+ u_int armval;
+ if(imm==0) {
+ if(rs!=rt) emit_mov(rs,rt);
+ }else if(genimm(imm,&armval)) {
+ assem_debug("eor %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe2200000|rd_rn_rm(rt,rs,0)|armval);
+ }else{
+ assert(imm>0&&imm<65536);
+ assem_debug("eor %s,%s,#%d\n",regname[rt],regname[rs],imm&0xFF00);
+ assem_debug("eor %s,%s,#%d\n",regname[rt],regname[rs],imm&0xFF);
+ output_w32(0xe2200000|rd_rn_imm_shift(rt,rs,imm>>8,8));
+ output_w32(0xe2200000|rd_rn_imm_shift(rt,rt,imm&0xff,0));
+ }
+}
+
+static void emit_shlimm(int rs,u_int imm,int rt)
+{
+ assert(imm>0);
+ assert(imm<32);
+ //if(imm==1) ...
+ assem_debug("lsl %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|(imm<<7));
+}
+
+static void emit_lsls_imm(int rs,int imm,int rt)
+{
+ assert(imm>0);
+ assert(imm<32);
+ assem_debug("lsls %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe1b00000|rd_rn_rm(rt,0,rs)|(imm<<7));
+}
+
+static unused void emit_lslpls_imm(int rs,int imm,int rt)
+{
+ assert(imm>0);
+ assert(imm<32);
+ assem_debug("lslpls %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x51b00000|rd_rn_rm(rt,0,rs)|(imm<<7));
+}
+
+static void emit_shrimm(int rs,u_int imm,int rt)
+{
+ assert(imm>0);
+ assert(imm<32);
+ assem_debug("lsr %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x20|(imm<<7));
+}
+
+static void emit_sarimm(int rs,u_int imm,int rt)
+{
+ assert(imm>0);
+ assert(imm<32);
+ assem_debug("asr %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x40|(imm<<7));
+}
+
+static void emit_rorimm(int rs,u_int imm,int rt)
+{
+ assert(imm>0);
+ assert(imm<32);
+ assem_debug("ror %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x60|(imm<<7));
+}
+
+static void emit_shldimm(int rs,int rs2,u_int imm,int rt)
+{
+ assem_debug("shld %%%s,%%%s,%d\n",regname[rt],regname[rs2],imm);
+ assert(imm>0);
+ assert(imm<32);
+ //if(imm==1) ...
+ assem_debug("lsl %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|(imm<<7));
+ assem_debug("orr %s,%s,%s,lsr #%d\n",regname[rt],regname[rt],regname[rs2],32-imm);
+ output_w32(0xe1800020|rd_rn_rm(rt,rt,rs2)|((32-imm)<<7));
+}
+
+static void emit_shrdimm(int rs,int rs2,u_int imm,int rt)
+{
+ assem_debug("shrd %%%s,%%%s,%d\n",regname[rt],regname[rs2],imm);
+ assert(imm>0);
+ assert(imm<32);
+ //if(imm==1) ...
+ assem_debug("lsr %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe1a00020|rd_rn_rm(rt,0,rs)|(imm<<7));
+ assem_debug("orr %s,%s,%s,lsl #%d\n",regname[rt],regname[rt],regname[rs2],32-imm);
+ output_w32(0xe1800000|rd_rn_rm(rt,rt,rs2)|((32-imm)<<7));
+}
+
+static void emit_signextend16(int rs,int rt)
+{
+ #ifndef HAVE_ARMV6
+ emit_shlimm(rs,16,rt);
+ emit_sarimm(rt,16,rt);
+ #else
+ assem_debug("sxth %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0xe6bf0070|rd_rn_rm(rt,0,rs));
+ #endif
+}
+
+static void emit_signextend8(int rs,int rt)
+{
+ #ifndef HAVE_ARMV6
+ emit_shlimm(rs,24,rt);
+ emit_sarimm(rt,24,rt);
+ #else
+ assem_debug("sxtb %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0xe6af0070|rd_rn_rm(rt,0,rs));
+ #endif
+}
+
+static void emit_shl(u_int rs,u_int shift,u_int rt)
+{
+ assert(rs<16);
+ assert(rt<16);
+ assert(shift<16);
+ //if(imm==1) ...
+ assem_debug("lsl %s,%s,%s\n",regname[rt],regname[rs],regname[shift]);
+ output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x10|(shift<<8));
+}
+
+static void emit_shr(u_int rs,u_int shift,u_int rt)
+{
+ assert(rs<16);
+ assert(rt<16);
+ assert(shift<16);
+ assem_debug("lsr %s,%s,%s\n",regname[rt],regname[rs],regname[shift]);
+ output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x30|(shift<<8));
+}
+
+static void emit_sar(u_int rs,u_int shift,u_int rt)
+{
+ assert(rs<16);
+ assert(rt<16);
+ assert(shift<16);
+ assem_debug("asr %s,%s,%s\n",regname[rt],regname[rs],regname[shift]);
+ output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x50|(shift<<8));
+}
+
+static void emit_orrshl(u_int rs,u_int shift,u_int rt)
+{
+ assert(rs<16);
+ assert(rt<16);
+ assert(shift<16);
+ assem_debug("orr %s,%s,%s,lsl %s\n",regname[rt],regname[rt],regname[rs],regname[shift]);
+ output_w32(0xe1800000|rd_rn_rm(rt,rt,rs)|0x10|(shift<<8));
+}
+
+static void emit_orrshr(u_int rs,u_int shift,u_int rt)
+{
+ assert(rs<16);
+ assert(rt<16);
+ assert(shift<16);
+ assem_debug("orr %s,%s,%s,lsr %s\n",regname[rt],regname[rt],regname[rs],regname[shift]);
+ output_w32(0xe1800000|rd_rn_rm(rt,rt,rs)|0x30|(shift<<8));
+}
+
+static void emit_cmpimm(int rs,int imm)
+{
+ u_int armval;
+ if(genimm(imm,&armval)) {
+ assem_debug("cmp %s,#%d\n",regname[rs],imm);
+ output_w32(0xe3500000|rd_rn_rm(0,rs,0)|armval);
+ }else if(genimm(-imm,&armval)) {
+ assem_debug("cmn %s,#%d\n",regname[rs],imm);
+ output_w32(0xe3700000|rd_rn_rm(0,rs,0)|armval);
+ }else if(imm>0) {
+ assert(imm<65536);
+ emit_movimm(imm,HOST_TEMPREG);
+ assem_debug("cmp %s,r14\n",regname[rs]);
+ output_w32(0xe1500000|rd_rn_rm(0,rs,HOST_TEMPREG));
+ }else{
+ assert(imm>-65536);
+ emit_movimm(-imm,HOST_TEMPREG);
+ assem_debug("cmn %s,r14\n",regname[rs]);
+ output_w32(0xe1700000|rd_rn_rm(0,rs,HOST_TEMPREG));
+ }
+}
+
+static void emit_cmovne_imm(int imm,int rt)
+{
+ assem_debug("movne %s,#%d\n",regname[rt],imm);
+ u_int armval;
+ genimm_checked(imm,&armval);
+ output_w32(0x13a00000|rd_rn_rm(rt,0,0)|armval);
+}
+
+static void emit_cmovl_imm(int imm,int rt)
+{
+ assem_debug("movlt %s,#%d\n",regname[rt],imm);
+ u_int armval;
+ genimm_checked(imm,&armval);
+ output_w32(0xb3a00000|rd_rn_rm(rt,0,0)|armval);
+}
+
+static void emit_cmovb_imm(int imm,int rt)
+{
+ assem_debug("movcc %s,#%d\n",regname[rt],imm);
+ u_int armval;
+ genimm_checked(imm,&armval);
+ output_w32(0x33a00000|rd_rn_rm(rt,0,0)|armval);
+}
+
+static void emit_cmovs_imm(int imm,int rt)
+{
+ assem_debug("movmi %s,#%d\n",regname[rt],imm);
+ u_int armval;
+ genimm_checked(imm,&armval);
+ output_w32(0x43a00000|rd_rn_rm(rt,0,0)|armval);
+}
+
+static void emit_cmove_reg(int rs,int rt)
+{
+ assem_debug("moveq %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0x01a00000|rd_rn_rm(rt,0,rs));
+}
+
+static void emit_cmovne_reg(int rs,int rt)
+{
+ assem_debug("movne %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0x11a00000|rd_rn_rm(rt,0,rs));
+}
+
+static void emit_cmovl_reg(int rs,int rt)
+{
+ assem_debug("movlt %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0xb1a00000|rd_rn_rm(rt,0,rs));
+}
+
+static void emit_cmovs_reg(int rs,int rt)
+{
+ assem_debug("movmi %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0x41a00000|rd_rn_rm(rt,0,rs));
+}
+
+static void emit_slti32(int rs,int imm,int rt)
+{
+ if(rs!=rt) emit_zeroreg(rt);
+ emit_cmpimm(rs,imm);
+ if(rs==rt) emit_movimm(0,rt);
+ emit_cmovl_imm(1,rt);
+}
+
+static void emit_sltiu32(int rs,int imm,int rt)
+{
+ if(rs!=rt) emit_zeroreg(rt);
+ emit_cmpimm(rs,imm);
+ if(rs==rt) emit_movimm(0,rt);
+ emit_cmovb_imm(1,rt);
+}
+
+static void emit_slti64_32(int rsh,int rsl,int imm,int rt)
+{
+ assert(rsh!=rt);
+ emit_slti32(rsl,imm,rt);
+ if(imm>=0)
+ {
+ emit_test(rsh,rsh);
+ emit_cmovne_imm(0,rt);
+ emit_cmovs_imm(1,rt);
+ }
+ else
+ {
+ emit_cmpimm(rsh,-1);
+ emit_cmovne_imm(0,rt);
+ emit_cmovl_imm(1,rt);
+ }
+}
+
+static void emit_sltiu64_32(int rsh,int rsl,int imm,int rt)
+{
+ assert(rsh!=rt);
+ emit_sltiu32(rsl,imm,rt);
+ if(imm>=0)
+ {
+ emit_test(rsh,rsh);
+ emit_cmovne_imm(0,rt);
+ }
+ else
+ {
+ emit_cmpimm(rsh,-1);
+ emit_cmovne_imm(1,rt);
+ }
+}
+
+static void emit_cmp(int rs,int rt)
+{
+ assem_debug("cmp %s,%s\n",regname[rs],regname[rt]);
+ output_w32(0xe1500000|rd_rn_rm(0,rs,rt));
+}
+
+static void emit_set_gz32(int rs, int rt)
+{
+ //assem_debug("set_gz32\n");
+ emit_cmpimm(rs,1);
+ emit_movimm(1,rt);
+ emit_cmovl_imm(0,rt);
+}
+
+static void emit_set_nz32(int rs, int rt)
+{
+ //assem_debug("set_nz32\n");
+ if(rs!=rt) emit_movs(rs,rt);
+ else emit_test(rs,rs);
+ emit_cmovne_imm(1,rt);
+}
+
+static void emit_set_gz64_32(int rsh, int rsl, int rt)
+{
+ //assem_debug("set_gz64\n");
+ emit_set_gz32(rsl,rt);
+ emit_test(rsh,rsh);
+ emit_cmovne_imm(1,rt);
+ emit_cmovs_imm(0,rt);
+}
+
+static void emit_set_nz64_32(int rsh, int rsl, int rt)
+{
+ //assem_debug("set_nz64\n");
+ emit_or_and_set_flags(rsh,rsl,rt);
+ emit_cmovne_imm(1,rt);
+}
+
+static void emit_set_if_less32(int rs1, int rs2, int rt)
+{
+ //assem_debug("set if less (%%%s,%%%s),%%%s\n",regname[rs1],regname[rs2],regname[rt]);
+ if(rs1!=rt&&rs2!=rt) emit_zeroreg(rt);
+ emit_cmp(rs1,rs2);
+ if(rs1==rt||rs2==rt) emit_movimm(0,rt);
+ emit_cmovl_imm(1,rt);
+}
+
+static void emit_set_if_carry32(int rs1, int rs2, int rt)
+{
+ //assem_debug("set if carry (%%%s,%%%s),%%%s\n",regname[rs1],regname[rs2],regname[rt]);
+ if(rs1!=rt&&rs2!=rt) emit_zeroreg(rt);
+ emit_cmp(rs1,rs2);
+ if(rs1==rt||rs2==rt) emit_movimm(0,rt);
+ emit_cmovb_imm(1,rt);
+}
+
+static void emit_set_if_less64_32(int u1, int l1, int u2, int l2, int rt)
+{
+ //assem_debug("set if less64 (%%%s,%%%s,%%%s,%%%s),%%%s\n",regname[u1],regname[l1],regname[u2],regname[l2],regname[rt]);
+ assert(u1!=rt);
+ assert(u2!=rt);
+ emit_cmp(l1,l2);
+ emit_movimm(0,rt);
+ emit_sbcs(u1,u2,HOST_TEMPREG);
+ emit_cmovl_imm(1,rt);
+}
+
+static void emit_set_if_carry64_32(int u1, int l1, int u2, int l2, int rt)
+{
+ //assem_debug("set if carry64 (%%%s,%%%s,%%%s,%%%s),%%%s\n",regname[u1],regname[l1],regname[u2],regname[l2],regname[rt]);
+ assert(u1!=rt);
+ assert(u2!=rt);
+ emit_cmp(l1,l2);
+ emit_movimm(0,rt);
+ emit_sbcs(u1,u2,HOST_TEMPREG);
+ emit_cmovb_imm(1,rt);
+}
+
+static void emit_call(int a)
+{
+ assem_debug("bl %x (%x+%x)\n",a,(int)out,a-(int)out-8);
+ u_int offset=genjmp(a);
+ output_w32(0xeb000000|offset);
+}
+
+static void emit_jmp(int a)
+{
+ assem_debug("b %x (%x+%x)\n",a,(int)out,a-(int)out-8);
+ u_int offset=genjmp(a);
+ output_w32(0xea000000|offset);
+}
+
+static void emit_jne(int a)
+{
+ assem_debug("bne %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0x1a000000|offset);
+}
+
+static void emit_jeq(int a)
+{
+ assem_debug("beq %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0x0a000000|offset);
+}
+
+static void emit_js(int a)
+{
+ assem_debug("bmi %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0x4a000000|offset);
+}
+
+static void emit_jns(int a)
+{
+ assem_debug("bpl %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0x5a000000|offset);
+}
+
+static void emit_jl(int a)
+{
+ assem_debug("blt %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0xba000000|offset);
+}
+
+static void emit_jge(int a)
+{
+ assem_debug("bge %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0xaa000000|offset);
+}
+
+static void emit_jno(int a)
+{
+ assem_debug("bvc %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0x7a000000|offset);
+}
+
+static void emit_jc(int a)
+{
+ assem_debug("bcs %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0x2a000000|offset);
+}
+
+static void emit_jcc(int a)
+{
+ assem_debug("bcc %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0x3a000000|offset);
+}
+
+static void emit_callreg(u_int r)
+{
+ assert(r<15);
+ assem_debug("blx %s\n",regname[r]);
+ output_w32(0xe12fff30|r);
+}
+
+static void emit_jmpreg(u_int r)
+{
+ assem_debug("mov pc,%s\n",regname[r]);
+ output_w32(0xe1a00000|rd_rn_rm(15,0,r));
+}
+
+static void emit_readword_indexed(int offset, int rs, int rt)
+{
+ assert(offset>-4096&&offset<4096);
+ assem_debug("ldr %s,%s+%d\n",regname[rt],regname[rs],offset);
+ if(offset>=0) {
+ output_w32(0xe5900000|rd_rn_rm(rt,rs,0)|offset);
+ }else{
+ output_w32(0xe5100000|rd_rn_rm(rt,rs,0)|(-offset));
+ }
+}
+
+static void emit_readword_dualindexedx4(int rs1, int rs2, int rt)
+{
+ assem_debug("ldr %s,%s,%s lsl #2\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe7900000|rd_rn_rm(rt,rs1,rs2)|0x100);
+}
+
+static void emit_ldrcc_dualindexed(int rs1, int rs2, int rt)
+{
+ assem_debug("ldrcc %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x37900000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_ldrccb_dualindexed(int rs1, int rs2, int rt)
+{
+ assem_debug("ldrccb %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x37d00000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_ldrccsb_dualindexed(int rs1, int rs2, int rt)
+{
+ assem_debug("ldrccsb %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x319000d0|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_ldrcch_dualindexed(int rs1, int rs2, int rt)
+{
+ assem_debug("ldrcch %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x319000b0|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_ldrccsh_dualindexed(int rs1, int rs2, int rt)
+{
+ assem_debug("ldrccsh %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x319000f0|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_readword_indexed_tlb(int addr, int rs, int map, int rt)
+{
+ if(map<0) emit_readword_indexed(addr, rs, rt);
+ else {
+ assert(addr==0);
+ emit_readword_dualindexedx4(rs, map, rt);
+ }
+}
+
+static void emit_readdword_indexed_tlb(int addr, int rs, int map, int rh, int rl)
+{
+ if(map<0) {
+ if(rh>=0) emit_readword_indexed(addr, rs, rh);
+ emit_readword_indexed(addr+4, rs, rl);
+ }else{
+ assert(rh!=rs);
+ if(rh>=0) emit_readword_indexed_tlb(addr, rs, map, rh);
+ emit_addimm(map,1,map);
+ emit_readword_indexed_tlb(addr, rs, map, rl);
+ }
+}
+
+static void emit_movsbl_indexed(int offset, int rs, int rt)
+{
+ assert(offset>-256&&offset<256);
+ assem_debug("ldrsb %s,%s+%d\n",regname[rt],regname[rs],offset);
+ if(offset>=0) {
+ output_w32(0xe1d000d0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf));
+ }else{
+ output_w32(0xe15000d0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf));
+ }
+}
+
+static void emit_movsbl_indexed_tlb(int addr, int rs, int map, int rt)
+{
+ if(map<0) emit_movsbl_indexed(addr, rs, rt);
+ else {
+ if(addr==0) {
+ emit_shlimm(map,2,map);
+ assem_debug("ldrsb %s,%s+%s\n",regname[rt],regname[rs],regname[map]);
+ output_w32(0xe19000d0|rd_rn_rm(rt,rs,map));
+ }else{
+ assert(addr>-256&&addr<256);
+ assem_debug("add %s,%s,%s,lsl #2\n",regname[rt],regname[rs],regname[map]);
+ output_w32(0xe0800000|rd_rn_rm(rt,rs,map)|(2<<7));
+ emit_movsbl_indexed(addr, rt, rt);
+ }
+ }
+}
+
+static void emit_movswl_indexed(int offset, int rs, int rt)
+{
+ assert(offset>-256&&offset<256);
+ assem_debug("ldrsh %s,%s+%d\n",regname[rt],regname[rs],offset);
+ if(offset>=0) {
+ output_w32(0xe1d000f0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf));
+ }else{
+ output_w32(0xe15000f0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf));
+ }
+}
+
+static void emit_movzbl_indexed(int offset, int rs, int rt)
+{
+ assert(offset>-4096&&offset<4096);
+ assem_debug("ldrb %s,%s+%d\n",regname[rt],regname[rs],offset);
+ if(offset>=0) {
+ output_w32(0xe5d00000|rd_rn_rm(rt,rs,0)|offset);
+ }else{
+ output_w32(0xe5500000|rd_rn_rm(rt,rs,0)|(-offset));
+ }
+}
+
+static void emit_movzbl_dualindexedx4(int rs1, int rs2, int rt)
+{
+ assem_debug("ldrb %s,%s,%s lsl #2\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe7d00000|rd_rn_rm(rt,rs1,rs2)|0x100);
+}
+
+static void emit_movzbl_indexed_tlb(int addr, int rs, int map, int rt)
+{
+ if(map<0) emit_movzbl_indexed(addr, rs, rt);
+ else {
+ if(addr==0) {
+ emit_movzbl_dualindexedx4(rs, map, rt);
+ }else{
+ emit_addimm(rs,addr,rt);
+ emit_movzbl_dualindexedx4(rt, map, rt);
+ }
+ }
+}
+
+static void emit_movzwl_indexed(int offset, int rs, int rt)
+{
+ assert(offset>-256&&offset<256);
+ assem_debug("ldrh %s,%s+%d\n",regname[rt],regname[rs],offset);
+ if(offset>=0) {
+ output_w32(0xe1d000b0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf));
+ }else{
+ output_w32(0xe15000b0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf));
+ }
+}
+
+static void emit_ldrd(int offset, int rs, int rt)
+{
+ assert(offset>-256&&offset<256);
+ assem_debug("ldrd %s,%s+%d\n",regname[rt],regname[rs],offset);
+ if(offset>=0) {
+ output_w32(0xe1c000d0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf));
+ }else{
+ output_w32(0xe14000d0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf));
+ }
+}
+
+static void emit_readword(int addr, int rt)
+{
+ u_int offset = addr-(u_int)&dynarec_local;
+ assert(offset<4096);
+ assem_debug("ldr %s,fp+%d\n",regname[rt],offset);
+ output_w32(0xe5900000|rd_rn_rm(rt,FP,0)|offset);
+}
+
+static unused void emit_movsbl(int addr, int rt)
+{
+ u_int offset = addr-(u_int)&dynarec_local;
+ assert(offset<256);
+ assem_debug("ldrsb %s,fp+%d\n",regname[rt],offset);
+ output_w32(0xe1d000d0|rd_rn_rm(rt,FP,0)|((offset<<4)&0xf00)|(offset&0xf));
+}
+
+static unused void emit_movswl(int addr, int rt)
+{
+ u_int offset = addr-(u_int)&dynarec_local;
+ assert(offset<256);
+ assem_debug("ldrsh %s,fp+%d\n",regname[rt],offset);
+ output_w32(0xe1d000f0|rd_rn_rm(rt,FP,0)|((offset<<4)&0xf00)|(offset&0xf));
+}
+
+static unused void emit_movzbl(int addr, int rt)
+{
+ u_int offset = addr-(u_int)&dynarec_local;
+ assert(offset<4096);
+ assem_debug("ldrb %s,fp+%d\n",regname[rt],offset);
+ output_w32(0xe5d00000|rd_rn_rm(rt,FP,0)|offset);
+}
+
+static unused void emit_movzwl(int addr, int rt)
+{
+ u_int offset = addr-(u_int)&dynarec_local;
+ assert(offset<256);
+ assem_debug("ldrh %s,fp+%d\n",regname[rt],offset);
+ output_w32(0xe1d000b0|rd_rn_rm(rt,FP,0)|((offset<<4)&0xf00)|(offset&0xf));
+}
+
+static void emit_writeword_indexed(int rt, int offset, int rs)
+{
+ assert(offset>-4096&&offset<4096);
+ assem_debug("str %s,%s+%d\n",regname[rt],regname[rs],offset);
+ if(offset>=0) {
+ output_w32(0xe5800000|rd_rn_rm(rt,rs,0)|offset);
+ }else{
+ output_w32(0xe5000000|rd_rn_rm(rt,rs,0)|(-offset));
+ }
+}
+
+static void emit_writeword_dualindexedx4(int rt, int rs1, int rs2)
+{
+ assem_debug("str %s,%s,%s lsl #2\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe7800000|rd_rn_rm(rt,rs1,rs2)|0x100);
+}
+
+static void emit_writeword_indexed_tlb(int rt, int addr, int rs, int map, int temp)
+{
+ if(map<0) emit_writeword_indexed(rt, addr, rs);
+ else {
+ assert(addr==0);
+ emit_writeword_dualindexedx4(rt, rs, map);
+ }
+}
+
+static void emit_writedword_indexed_tlb(int rh, int rl, int addr, int rs, int map, int temp)
+{
+ if(map<0) {
+ if(rh>=0) emit_writeword_indexed(rh, addr, rs);
+ emit_writeword_indexed(rl, addr+4, rs);
+ }else{
+ assert(rh>=0);
+ if(temp!=rs) emit_addimm(map,1,temp);
+ emit_writeword_indexed_tlb(rh, addr, rs, map, temp);
+ if(temp!=rs) emit_writeword_indexed_tlb(rl, addr, rs, temp, temp);
+ else {
+ emit_addimm(rs,4,rs);
+ emit_writeword_indexed_tlb(rl, addr, rs, map, temp);
+ }
+ }
+}
+
+static void emit_writehword_indexed(int rt, int offset, int rs)
+{
+ assert(offset>-256&&offset<256);
+ assem_debug("strh %s,%s+%d\n",regname[rt],regname[rs],offset);
+ if(offset>=0) {
+ output_w32(0xe1c000b0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf));
+ }else{
+ output_w32(0xe14000b0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf));
+ }
+}
+
+static void emit_writebyte_indexed(int rt, int offset, int rs)
+{
+ assert(offset>-4096&&offset<4096);
+ assem_debug("strb %s,%s+%d\n",regname[rt],regname[rs],offset);
+ if(offset>=0) {
+ output_w32(0xe5c00000|rd_rn_rm(rt,rs,0)|offset);
+ }else{
+ output_w32(0xe5400000|rd_rn_rm(rt,rs,0)|(-offset));
+ }
+}
+
+static void emit_writebyte_dualindexedx4(int rt, int rs1, int rs2)
+{
+ assem_debug("strb %s,%s,%s lsl #2\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0xe7c00000|rd_rn_rm(rt,rs1,rs2)|0x100);
+}
+
+static void emit_writebyte_indexed_tlb(int rt, int addr, int rs, int map, int temp)
+{
+ if(map<0) emit_writebyte_indexed(rt, addr, rs);
+ else {
+ if(addr==0) {
+ emit_writebyte_dualindexedx4(rt, rs, map);
+ }else{
+ emit_addimm(rs,addr,temp);
+ emit_writebyte_dualindexedx4(rt, temp, map);
+ }
+ }
+}
+
+static void emit_strcc_dualindexed(int rs1, int rs2, int rt)
+{
+ assem_debug("strcc %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x37800000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_strccb_dualindexed(int rs1, int rs2, int rt)
+{
+ assem_debug("strccb %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x37c00000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_strcch_dualindexed(int rs1, int rs2, int rt)
+{
+ assem_debug("strcch %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x318000b0|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_writeword(int rt, int addr)
+{
+ u_int offset = addr-(u_int)&dynarec_local;
+ assert(offset<4096);
+ assem_debug("str %s,fp+%d\n",regname[rt],offset);
+ output_w32(0xe5800000|rd_rn_rm(rt,FP,0)|offset);
+}
+
+static unused void emit_writehword(int rt, int addr)
+{
+ u_int offset = addr-(u_int)&dynarec_local;
+ assert(offset<256);
+ assem_debug("strh %s,fp+%d\n",regname[rt],offset);
+ output_w32(0xe1c000b0|rd_rn_rm(rt,FP,0)|((offset<<4)&0xf00)|(offset&0xf));
+}
+
+static unused void emit_writebyte(int rt, int addr)
+{
+ u_int offset = addr-(u_int)&dynarec_local;
+ assert(offset<4096);
+ assem_debug("strb %s,fp+%d\n",regname[rt],offset);
+ output_w32(0xe5c00000|rd_rn_rm(rt,FP,0)|offset);
+}
+
+static void emit_umull(u_int rs1, u_int rs2, u_int hi, u_int lo)
+{
+ assem_debug("umull %s, %s, %s, %s\n",regname[lo],regname[hi],regname[rs1],regname[rs2]);
+ assert(rs1<16);
+ assert(rs2<16);
+ assert(hi<16);
+ assert(lo<16);
+ output_w32(0xe0800090|(hi<<16)|(lo<<12)|(rs2<<8)|rs1);
+}
+
+static void emit_smull(u_int rs1, u_int rs2, u_int hi, u_int lo)
+{
+ assem_debug("smull %s, %s, %s, %s\n",regname[lo],regname[hi],regname[rs1],regname[rs2]);
+ assert(rs1<16);
+ assert(rs2<16);
+ assert(hi<16);
+ assert(lo<16);
+ output_w32(0xe0c00090|(hi<<16)|(lo<<12)|(rs2<<8)|rs1);
+}
+
+static void emit_clz(int rs,int rt)
+{
+ assem_debug("clz %s,%s\n",regname[rt],regname[rs]);
+ output_w32(0xe16f0f10|rd_rn_rm(rt,0,rs));
+}
+
+static void emit_subcs(int rs1,int rs2,int rt)
+{
+ assem_debug("subcs %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x20400000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_shrcc_imm(int rs,u_int imm,int rt)
+{
+ assert(imm>0);
+ assert(imm<32);
+ assem_debug("lsrcc %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x31a00000|rd_rn_rm(rt,0,rs)|0x20|(imm<<7));
+}
+
+static void emit_shrne_imm(int rs,u_int imm,int rt)
+{
+ assert(imm>0);
+ assert(imm<32);
+ assem_debug("lsrne %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x11a00000|rd_rn_rm(rt,0,rs)|0x20|(imm<<7));
+}
+
+static void emit_negmi(int rs, int rt)
+{
+ assem_debug("rsbmi %s,%s,#0\n",regname[rt],regname[rs]);
+ output_w32(0x42600000|rd_rn_rm(rt,rs,0));
+}
+
+static void emit_negsmi(int rs, int rt)
+{
+ assem_debug("rsbsmi %s,%s,#0\n",regname[rt],regname[rs]);
+ output_w32(0x42700000|rd_rn_rm(rt,rs,0));
+}
+
+static void emit_orreq(u_int rs1,u_int rs2,u_int rt)
+{
+ assem_debug("orreq %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x01800000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_orrne(u_int rs1,u_int rs2,u_int rt)
+{
+ assem_debug("orrne %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]);
+ output_w32(0x11800000|rd_rn_rm(rt,rs1,rs2));
+}
+
+static void emit_bic_lsl(u_int rs1,u_int rs2,u_int shift,u_int rt)
+{
+ assem_debug("bic %s,%s,%s lsl %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]);
+ output_w32(0xe1C00000|rd_rn_rm(rt,rs1,rs2)|0x10|(shift<<8));
+}
+
+static void emit_biceq_lsl(u_int rs1,u_int rs2,u_int shift,u_int rt)
+{
+ assem_debug("biceq %s,%s,%s lsl %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]);
+ output_w32(0x01C00000|rd_rn_rm(rt,rs1,rs2)|0x10|(shift<<8));
+}
+
+static void emit_bicne_lsl(u_int rs1,u_int rs2,u_int shift,u_int rt)
+{
+ assem_debug("bicne %s,%s,%s lsl %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]);
+ output_w32(0x11C00000|rd_rn_rm(rt,rs1,rs2)|0x10|(shift<<8));
+}
+
+static void emit_bic_lsr(u_int rs1,u_int rs2,u_int shift,u_int rt)
+{
+ assem_debug("bic %s,%s,%s lsr %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]);
+ output_w32(0xe1C00000|rd_rn_rm(rt,rs1,rs2)|0x30|(shift<<8));
+}
+
+static void emit_biceq_lsr(u_int rs1,u_int rs2,u_int shift,u_int rt)
+{
+ assem_debug("biceq %s,%s,%s lsr %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]);
+ output_w32(0x01C00000|rd_rn_rm(rt,rs1,rs2)|0x30|(shift<<8));
+}
+
+static void emit_bicne_lsr(u_int rs1,u_int rs2,u_int shift,u_int rt)
+{
+ assem_debug("bicne %s,%s,%s lsr %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]);
+ output_w32(0x11C00000|rd_rn_rm(rt,rs1,rs2)|0x30|(shift<<8));
+}
+
+static void emit_teq(int rs, int rt)
+{
+ assem_debug("teq %s,%s\n",regname[rs],regname[rt]);
+ output_w32(0xe1300000|rd_rn_rm(0,rs,rt));
+}
+
+static void emit_rsbimm(int rs, int imm, int rt)
+{
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("rsb %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0xe2600000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+// Load 2 immediates optimizing for small code size
+static void emit_mov2imm_compact(int imm1,u_int rt1,int imm2,u_int rt2)
+{
+ emit_movimm(imm1,rt1);
+ u_int armval;
+ if(genimm(imm2-imm1,&armval)) {
+ assem_debug("add %s,%s,#%d\n",regname[rt2],regname[rt1],imm2-imm1);
+ output_w32(0xe2800000|rd_rn_rm(rt2,rt1,0)|armval);
+ }else if(genimm(imm1-imm2,&armval)) {
+ assem_debug("sub %s,%s,#%d\n",regname[rt2],regname[rt1],imm1-imm2);
+ output_w32(0xe2400000|rd_rn_rm(rt2,rt1,0)|armval);
+ }
+ else emit_movimm(imm2,rt2);
+}
+
+// Conditionally select one of two immediates, optimizing for small code size
+// This will only be called if HAVE_CMOV_IMM is defined
+static void emit_cmov2imm_e_ne_compact(int imm1,int imm2,u_int rt)
+{
+ u_int armval;
+ if(genimm(imm2-imm1,&armval)) {
+ emit_movimm(imm1,rt);
+ assem_debug("addne %s,%s,#%d\n",regname[rt],regname[rt],imm2-imm1);
+ output_w32(0x12800000|rd_rn_rm(rt,rt,0)|armval);
+ }else if(genimm(imm1-imm2,&armval)) {
+ emit_movimm(imm1,rt);
+ assem_debug("subne %s,%s,#%d\n",regname[rt],regname[rt],imm1-imm2);
+ output_w32(0x12400000|rd_rn_rm(rt,rt,0)|armval);
+ }
+ else {
+ #ifndef HAVE_ARMV7
+ emit_movimm(imm1,rt);
+ add_literal((int)out,imm2);
+ assem_debug("ldrne %s,pc+? [=%x]\n",regname[rt],imm2);
+ output_w32(0x15900000|rd_rn_rm(rt,15,0));
+ #else
+ emit_movw(imm1&0x0000FFFF,rt);
+ if((imm1&0xFFFF)!=(imm2&0xFFFF)) {
+ assem_debug("movwne %s,#%d (0x%x)\n",regname[rt],imm2&0xFFFF,imm2&0xFFFF);
+ output_w32(0x13000000|rd_rn_rm(rt,0,0)|(imm2&0xfff)|((imm2<<4)&0xf0000));
+ }
+ emit_movt(imm1&0xFFFF0000,rt);
+ if((imm1&0xFFFF0000)!=(imm2&0xFFFF0000)) {
+ assem_debug("movtne %s,#%d (0x%x)\n",regname[rt],imm2&0xffff0000,imm2&0xffff0000);
+ output_w32(0x13400000|rd_rn_rm(rt,0,0)|((imm2>>16)&0xfff)|((imm2>>12)&0xf0000));
+ }
+ #endif
+ }
+}
+
+// special case for checking invalid_code
+static void emit_cmpmem_indexedsr12_reg(int base,int r,int imm)
+{
+ assert(imm<128&&imm>=0);
+ assert(r>=0&&r<16);
+ assem_debug("ldrb lr,%s,%s lsr #12\n",regname[base],regname[r]);
+ output_w32(0xe7d00000|rd_rn_rm(HOST_TEMPREG,base,r)|0x620);
+ emit_cmpimm(HOST_TEMPREG,imm);
+}
+
+static void emit_callne(int a)
+{
+ assem_debug("blne %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0x1b000000|offset);
+}
+
+// Used to preload hash table entries
+static unused void emit_prefetchreg(int r)
+{
+ assem_debug("pld %s\n",regname[r]);
+ output_w32(0xf5d0f000|rd_rn_rm(0,r,0));
+}
+
+// Special case for mini_ht
+static void emit_ldreq_indexed(int rs, u_int offset, int rt)
+{
+ assert(offset<4096);
+ assem_debug("ldreq %s,[%s, #%d]\n",regname[rt],regname[rs],offset);
+ output_w32(0x05900000|rd_rn_rm(rt,rs,0)|offset);
+}
+
+static unused void emit_bicne_imm(int rs,int imm,int rt)
+{
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("bicne %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x13c00000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+static unused void emit_biccs_imm(int rs,int imm,int rt)
+{
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("biccs %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x23c00000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+static unused void emit_bicvc_imm(int rs,int imm,int rt)
+{
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("bicvc %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x73c00000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+static unused void emit_bichi_imm(int rs,int imm,int rt)
+{
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("bichi %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x83c00000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+static unused void emit_orrvs_imm(int rs,int imm,int rt)
+{
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("orrvs %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x63800000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+static void emit_orrne_imm(int rs,int imm,int rt)
+{
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("orrne %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x13800000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+static void emit_andne_imm(int rs,int imm,int rt)
+{
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("andne %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x12000000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+static unused void emit_addpl_imm(int rs,int imm,int rt)
+{
+ u_int armval;
+ genimm_checked(imm,&armval);
+ assem_debug("addpl %s,%s,#%d\n",regname[rt],regname[rs],imm);
+ output_w32(0x52800000|rd_rn_rm(rt,rs,0)|armval);
+}
+
+static void emit_jno_unlikely(int a)
+{
+ //emit_jno(a);
+ assem_debug("addvc pc,pc,#? (%x)\n",/*a-(int)out-8,*/a);
+ output_w32(0x72800000|rd_rn_rm(15,15,0));
+}
+
+static void save_regs_all(u_int reglist)
+{
+ int i;
+ if(!reglist) return;
+ assem_debug("stmia fp,{");
+ for(i=0;i<16;i++)
+ if(reglist&(1<<i))
+ assem_debug("r%d,",i);
+ assem_debug("}\n");
+ output_w32(0xe88b0000|reglist);
+}
+
+static void restore_regs_all(u_int reglist)
+{
+ int i;
+ if(!reglist) return;
+ assem_debug("ldmia fp,{");
+ for(i=0;i<16;i++)
+ if(reglist&(1<<i))
+ assem_debug("r%d,",i);
+ assem_debug("}\n");
+ output_w32(0xe89b0000|reglist);
+}
+
+// Save registers before function call
+static void save_regs(u_int reglist)
+{
+ reglist&=CALLER_SAVE_REGS; // only save the caller-save registers, r0-r3, r12
+ save_regs_all(reglist);
+}
+
+// Restore registers after function call
+static void restore_regs(u_int reglist)
+{
+ reglist&=CALLER_SAVE_REGS;
+ restore_regs_all(reglist);
+}
+
+/* Stubs/epilogue */
+
+static void literal_pool(int n)
+{
+ if(!literalcount) return;
+ if(n) {
+ if((int)out-literals[0][0]<4096-n) return;
+ }
+ u_int *ptr;
+ int i;
+ for(i=0;i<literalcount;i++)
+ {
+ u_int l_addr=(u_int)out;
+ int j;
+ for(j=0;j<i;j++) {
+ if(literals[j][1]==literals[i][1]) {
+ //printf("dup %08x\n",literals[i][1]);
+ l_addr=literals[j][0];
+ break;
+ }
+ }
+ ptr=(u_int *)literals[i][0];
+ u_int offset=l_addr-(u_int)ptr-8;
+ assert(offset<4096);
+ assert(!(offset&3));
+ *ptr|=offset;
+ if(l_addr==(u_int)out) {
+ literals[i][0]=l_addr; // remember for dupes
+ output_w32(literals[i][1]);
+ }
+ }
+ literalcount=0;
+}
+
+static void literal_pool_jumpover(int n)
+{
+ if(!literalcount) return;
+ if(n) {
+ if((int)out-literals[0][0]<4096-n) return;
+ }
+ int jaddr=(int)out;
+ emit_jmp(0);
+ literal_pool(0);
+ set_jump_target(jaddr,(int)out);
+}
+
+static void emit_extjump2(u_int addr, int target, int linker)
+{
+ u_char *ptr=(u_char *)addr;
+ assert((ptr[3]&0x0e)==0xa);
+ (void)ptr;
+
+ emit_loadlp(target,0);
+ emit_loadlp(addr,1);
+ assert(addr>=BASE_ADDR&&addr<(BASE_ADDR+(1<<TARGET_SIZE_2)));
+ //assert((target>=0x80000000&&target<0x80800000)||(target>0xA4000000&&target<0xA4001000));
+//DEBUG >
+#ifdef DEBUG_CYCLE_COUNT
+ emit_readword((int)&last_count,ECX);
+ emit_add(HOST_CCREG,ECX,HOST_CCREG);
+ emit_readword((int)&next_interupt,ECX);
+ emit_writeword(HOST_CCREG,(int)&Count);
+ emit_sub(HOST_CCREG,ECX,HOST_CCREG);
+ emit_writeword(ECX,(int)&last_count);
+#endif
+//DEBUG <
+ emit_jmp(linker);
+}
+
+static void emit_extjump(int addr, int target)
+{
+ emit_extjump2(addr, target, (int)dyna_linker);
+}
+
+static void emit_extjump_ds(int addr, int target)
+{
+ emit_extjump2(addr, target, (int)dyna_linker_ds);
+}
+
+// put rt_val into rt, potentially making use of rs with value rs_val
+static void emit_movimm_from(u_int rs_val,int rs,u_int rt_val,int rt)
+{
+ u_int armval;
+ int diff;
+ if(genimm(rt_val,&armval)) {
+ assem_debug("mov %s,#%d\n",regname[rt],rt_val);
+ output_w32(0xe3a00000|rd_rn_rm(rt,0,0)|armval);
+ return;
+ }
+ if(genimm(~rt_val,&armval)) {
+ assem_debug("mvn %s,#%d\n",regname[rt],rt_val);
+ output_w32(0xe3e00000|rd_rn_rm(rt,0,0)|armval);
+ return;
+ }
+ diff=rt_val-rs_val;
+ if(genimm(diff,&armval)) {
+ assem_debug("add %s,%s,#%d\n",regname[rt],regname[rs],diff);
+ output_w32(0xe2800000|rd_rn_rm(rt,rs,0)|armval);
+ return;
+ }else if(genimm(-diff,&armval)) {
+ assem_debug("sub %s,%s,#%d\n",regname[rt],regname[rs],-diff);
+ output_w32(0xe2400000|rd_rn_rm(rt,rs,0)|armval);
+ return;
+ }
+ emit_movimm(rt_val,rt);
+}
+
+// return 1 if above function can do it's job cheaply
+static int is_similar_value(u_int v1,u_int v2)
+{
+ u_int xs;
+ int diff;
+ if(v1==v2) return 1;
+ diff=v2-v1;
+ for(xs=diff;xs!=0&&(xs&3)==0;xs>>=2)
+ ;
+ if(xs<0x100) return 1;
+ for(xs=-diff;xs!=0&&(xs&3)==0;xs>>=2)
+ ;
+ if(xs<0x100) return 1;
+ return 0;
+}
+
+// trashes r2
+static void pass_args(int a0, int a1)
+{
+ if(a0==1&&a1==0) {
+ // must swap
+ emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0);
+ }
+ else if(a0!=0&&a1==0) {
+ emit_mov(a1,1);
+ if (a0>=0) emit_mov(a0,0);
+ }
+ else {
+ if(a0>=0&&a0!=0) emit_mov(a0,0);
+ if(a1>=0&&a1!=1) emit_mov(a1,1);
+ }
+}
+
+static void mov_loadtype_adj(int type,int rs,int rt)
+{
+ switch(type) {
+ case LOADB_STUB: emit_signextend8(rs,rt); break;
+ case LOADBU_STUB: emit_andimm(rs,0xff,rt); break;
+ case LOADH_STUB: emit_signextend16(rs,rt); break;
+ case LOADHU_STUB: emit_andimm(rs,0xffff,rt); break;
+ case LOADW_STUB: if(rs!=rt) emit_mov(rs,rt); break;
+ default: assert(0);
+ }
+}
+
+#include "../backends/psx/pcsxmem.h"
+#include "../backends/psx/pcsxmem_inline.c"
+
+static void do_readstub(int n)
+{
+ assem_debug("do_readstub %x\n",start+stubs[n][3]*4);
+ literal_pool(256);
+ set_jump_target(stubs[n][1],(int)out);
+ int type=stubs[n][0];
+ int i=stubs[n][3];
+ int rs=stubs[n][4];
+ struct regstat *i_regs=(struct regstat *)stubs[n][5];
+ u_int reglist=stubs[n][7];
+ signed char *i_regmap=i_regs->regmap;
+ int rt;
+ if(itype[i]==C1LS||itype[i]==C2LS||itype[i]==LOADLR) {
+ rt=get_reg(i_regmap,FTEMP);
+ }else{
+ rt=get_reg(i_regmap,rt1[i]);
+ }
+ assert(rs>=0);
+ int r,temp=-1,temp2=HOST_TEMPREG,regs_saved=0,restore_jump=0;
+ reglist|=(1<<rs);
+ for(r=0;r<=12;r++) {
+ if(((1<<r)&0x13ff)&&((1<<r)&reglist)==0) {
+ temp=r; break;
+ }
+ }
+ if(rt>=0&&rt1[i]!=0)
+ reglist&=~(1<<rt);
+ if(temp==-1) {
+ save_regs(reglist);
+ regs_saved=1;
+ temp=(rs==0)?2:0;
+ }
+ if((regs_saved||(reglist&2)==0)&&temp!=1&&rs!=1)
+ temp2=1;
+ emit_readword((int)&mem_rtab,temp);
+ emit_shrimm(rs,12,temp2);
+ emit_readword_dualindexedx4(temp,temp2,temp2);
+ emit_lsls_imm(temp2,1,temp2);
+ if(itype[i]==C1LS||itype[i]==C2LS||(rt>=0&&rt1[i]!=0)) {
+ switch(type) {
+ case LOADB_STUB: emit_ldrccsb_dualindexed(temp2,rs,rt); break;
+ case LOADBU_STUB: emit_ldrccb_dualindexed(temp2,rs,rt); break;
+ case LOADH_STUB: emit_ldrccsh_dualindexed(temp2,rs,rt); break;
+ case LOADHU_STUB: emit_ldrcch_dualindexed(temp2,rs,rt); break;
+ case LOADW_STUB: emit_ldrcc_dualindexed(temp2,rs,rt); break;
+ }
+ }
+ if(regs_saved) {
+ restore_jump=(int)out;
+ emit_jcc(0); // jump to reg restore
+ }
+ else
+ emit_jcc(stubs[n][2]); // return address
+
+ if(!regs_saved)
+ save_regs(reglist);
+ int handler=0;
+ if(type==LOADB_STUB||type==LOADBU_STUB)
+ handler=(int)jump_handler_read8;
+ if(type==LOADH_STUB||type==LOADHU_STUB)
+ handler=(int)jump_handler_read16;
+ if(type==LOADW_STUB)
+ handler=(int)jump_handler_read32;
+ assert(handler!=0);
+ pass_args(rs,temp2);
+ int cc=get_reg(i_regmap,CCREG);
+ if(cc<0)
+ emit_loadreg(CCREG,2);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n][6]+1),2);
+ emit_call(handler);
+ if(itype[i]==C1LS||itype[i]==C2LS||(rt>=0&&rt1[i]!=0)) {
+ mov_loadtype_adj(type,0,rt);
+ }
+ if(restore_jump)
+ set_jump_target(restore_jump,(int)out);
+ restore_regs(reglist);
+ emit_jmp(stubs[n][2]); // return address
+}
+
+// return memhandler, or get directly accessable address and return 0
+static u_int get_direct_memhandler(void *table,u_int addr,int type,u_int *addr_host)
+{
+ u_int l1,l2=0;
+ l1=((u_int *)table)[addr>>12];
+ if((l1&(1<<31))==0) {
+ u_int v=l1<<1;
+ *addr_host=v+addr;
+ return 0;
+ }
+ else {
+ l1<<=1;
+ if(type==LOADB_STUB||type==LOADBU_STUB||type==STOREB_STUB)
+ l2=((u_int *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)];
+ else if(type==LOADH_STUB||type==LOADHU_STUB||type==STOREH_STUB)
+ l2=((u_int *)l1)[0x1000/4 + (addr&0xfff)/2];
+ else
+ l2=((u_int *)l1)[(addr&0xfff)/4];
+ if((l2&(1<<31))==0) {
+ u_int v=l2<<1;
+ *addr_host=v+(addr&0xfff);
+ return 0;
+ }
+ return l2<<1;
+ }
+}
+
+static void inline_readstub(int type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist)
+{
+ int rs=get_reg(regmap,target);
+ int rt=get_reg(regmap,target);
+ if(rs<0) rs=get_reg(regmap,-1);
+ assert(rs>=0);
+ u_int handler,host_addr=0,is_dynamic,far_call=0;
+ int cc=get_reg(regmap,CCREG);
+ if(pcsx_direct_read(type,addr,CLOCK_ADJUST(adj+1),cc,target?rs:-1,rt))
+ return;
+ handler=get_direct_memhandler(mem_rtab,addr,type,&host_addr);
+ if (handler==0) {
+ if(rt<0||rt1[i]==0)
+ return;
+ if(addr!=host_addr)
+ emit_movimm_from(addr,rs,host_addr,rs);
+ switch(type) {
+ case LOADB_STUB: emit_movsbl_indexed(0,rs,rt); break;
+ case LOADBU_STUB: emit_movzbl_indexed(0,rs,rt); break;
+ case LOADH_STUB: emit_movswl_indexed(0,rs,rt); break;
+ case LOADHU_STUB: emit_movzwl_indexed(0,rs,rt); break;
+ case LOADW_STUB: emit_readword_indexed(0,rs,rt); break;
+ default: assert(0);
+ }
+ return;
+ }
+ is_dynamic=pcsxmem_is_handler_dynamic(addr);
+ if(is_dynamic) {
+ if(type==LOADB_STUB||type==LOADBU_STUB)
+ handler=(int)jump_handler_read8;
+ if(type==LOADH_STUB||type==LOADHU_STUB)
+ handler=(int)jump_handler_read16;
+ if(type==LOADW_STUB)
+ handler=(int)jump_handler_read32;
+ }
+
+ // call a memhandler
+ if(rt>=0&&rt1[i]!=0)
+ reglist&=~(1<<rt);
+ save_regs(reglist);
+ if(target==0)
+ emit_movimm(addr,0);
+ else if(rs!=0)
+ emit_mov(rs,0);
+ int offset=(int)handler-(int)out-8;
+ if(offset<-33554432||offset>=33554432) {
+ // unreachable memhandler, a plugin func perhaps
+ emit_movimm(handler,12);
+ far_call=1;
+ }
+ if(cc<0)
+ emit_loadreg(CCREG,2);
+ if(is_dynamic) {
+ emit_movimm(((u_int *)mem_rtab)[addr>>12]<<1,1);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj+1),2);
+ }
+ else {
+ emit_readword((int)&last_count,3);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj+1),2);
+ emit_add(2,3,2);
+ emit_writeword(2,(int)&Count);
+ }
+
+ if(far_call)
+ emit_callreg(12);
+ else
+ emit_call(handler);
+
+ if(rt>=0&&rt1[i]!=0) {
+ switch(type) {
+ case LOADB_STUB: emit_signextend8(0,rt); break;
+ case LOADBU_STUB: emit_andimm(0,0xff,rt); break;
+ case LOADH_STUB: emit_signextend16(0,rt); break;
+ case LOADHU_STUB: emit_andimm(0,0xffff,rt); break;
+ case LOADW_STUB: if(rt!=0) emit_mov(0,rt); break;
+ default: assert(0);
+ }
+ }
+ restore_regs(reglist);
+}
+
+static void do_writestub(int n)
+{
+ assem_debug("do_writestub %x\n",start+stubs[n][3]*4);
+ literal_pool(256);
+ set_jump_target(stubs[n][1],(int)out);
+ int type=stubs[n][0];
+ int i=stubs[n][3];
+ int rs=stubs[n][4];
+ struct regstat *i_regs=(struct regstat *)stubs[n][5];
+ u_int reglist=stubs[n][7];
+ signed char *i_regmap=i_regs->regmap;
+ int rt,r;
+ if(itype[i]==C1LS||itype[i]==C2LS) {
+ rt=get_reg(i_regmap,r=FTEMP);
+ }else{
+ rt=get_reg(i_regmap,r=rs2[i]);
+ }
+ assert(rs>=0);
+ assert(rt>=0);
+ int rtmp,temp=-1,temp2=HOST_TEMPREG,regs_saved=0,restore_jump=0,ra;
+ int reglist2=reglist|(1<<rs)|(1<<rt);
+ for(rtmp=0;rtmp<=12;rtmp++) {
+ if(((1<<rtmp)&0x13ff)&&((1<<rtmp)&reglist2)==0) {
+ temp=rtmp; break;
+ }
+ }
+ if(temp==-1) {
+ save_regs(reglist);
+ regs_saved=1;
+ for(rtmp=0;rtmp<=3;rtmp++)
+ if(rtmp!=rs&&rtmp!=rt)
+ {temp=rtmp;break;}
+ }
+ if((regs_saved||(reglist2&8)==0)&&temp!=3&&rs!=3&&rt!=3)
+ temp2=3;
+ emit_readword((int)&mem_wtab,temp);
+ emit_shrimm(rs,12,temp2);
+ emit_readword_dualindexedx4(temp,temp2,temp2);
+ emit_lsls_imm(temp2,1,temp2);
+ switch(type) {
+ case STOREB_STUB: emit_strccb_dualindexed(temp2,rs,rt); break;
+ case STOREH_STUB: emit_strcch_dualindexed(temp2,rs,rt); break;
+ case STOREW_STUB: emit_strcc_dualindexed(temp2,rs,rt); break;
+ default: assert(0);
+ }
+ if(regs_saved) {
+ restore_jump=(int)out;
+ emit_jcc(0); // jump to reg restore
+ }
+ else
+ emit_jcc(stubs[n][2]); // return address (invcode check)
+
+ if(!regs_saved)
+ save_regs(reglist);
+ int handler=0;
+ switch(type) {
+ case STOREB_STUB: handler=(int)jump_handler_write8; break;
+ case STOREH_STUB: handler=(int)jump_handler_write16; break;
+ case STOREW_STUB: handler=(int)jump_handler_write32; break;
+ }
+ assert(handler!=0);
+ pass_args(rs,rt);
+ if(temp2!=3)
+ emit_mov(temp2,3);
+ int cc=get_reg(i_regmap,CCREG);
+ if(cc<0)
+ emit_loadreg(CCREG,2);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n][6]+1),2);
+ // returns new cycle_count
+ emit_call(handler);
+ emit_addimm(0,-CLOCK_ADJUST((int)stubs[n][6]+1),cc<0?2:cc);
+ if(cc<0)
+ emit_storereg(CCREG,2);
+ if(restore_jump)
+ set_jump_target(restore_jump,(int)out);
+ restore_regs(reglist);
+ ra=stubs[n][2];
+ emit_jmp(ra);
+}
+
+static void inline_writestub(int type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist)
+{
+ int rs=get_reg(regmap,-1);
+ int rt=get_reg(regmap,target);
+ assert(rs>=0);
+ assert(rt>=0);
+ u_int handler,host_addr=0;
+ handler=get_direct_memhandler(mem_wtab,addr,type,&host_addr);
+ if (handler==0) {
+ if(addr!=host_addr)
+ emit_movimm_from(addr,rs,host_addr,rs);
+ switch(type) {
+ case STOREB_STUB: emit_writebyte_indexed(rt,0,rs); break;
+ case STOREH_STUB: emit_writehword_indexed(rt,0,rs); break;
+ case STOREW_STUB: emit_writeword_indexed(rt,0,rs); break;
+ default: assert(0);
+ }
+ return;
+ }
+
+ // call a memhandler
+ save_regs(reglist);
+ pass_args(rs,rt);
+ int cc=get_reg(regmap,CCREG);
+ if(cc<0)
+ emit_loadreg(CCREG,2);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj+1),2);
+ emit_movimm(handler,3);
+ // returns new cycle_count
+ emit_call((int)jump_handler_write_h);
+ emit_addimm(0,-CLOCK_ADJUST(adj+1),cc<0?2:cc);
+ if(cc<0)
+ emit_storereg(CCREG,2);
+ restore_regs(reglist);
+}
+
+static void do_unalignedwritestub(int n)
+{
+ assem_debug("do_unalignedwritestub %x\n",start+stubs[n][3]*4);
+ literal_pool(256);
+ set_jump_target(stubs[n][1],(int)out);
+
+ int i=stubs[n][3];
+ struct regstat *i_regs=(struct regstat *)stubs[n][4];
+ int addr=stubs[n][5];
+ u_int reglist=stubs[n][7];
+ signed char *i_regmap=i_regs->regmap;
+ int temp2=get_reg(i_regmap,FTEMP);
+ int rt;
+ rt=get_reg(i_regmap,rs2[i]);
+ assert(rt>=0);
+ assert(addr>=0);
+ assert(opcode[i]==0x2a||opcode[i]==0x2e); // SWL/SWR only implemented
+ reglist|=(1<<addr);
+ reglist&=~(1<<temp2);
+
+#if 1
+ // don't bother with it and call write handler
+ save_regs(reglist);
+ pass_args(addr,rt);
+ int cc=get_reg(i_regmap,CCREG);
+ if(cc<0)
+ emit_loadreg(CCREG,2);
+ emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n][6]+1),2);
+ emit_call((int)(opcode[i]==0x2a?jump_handle_swl:jump_handle_swr));
+ emit_addimm(0,-CLOCK_ADJUST((int)stubs[n][6]+1),cc<0?2:cc);
+ if(cc<0)
+ emit_storereg(CCREG,2);
+ restore_regs(reglist);
+ emit_jmp(stubs[n][2]); // return address
+#else
+ emit_andimm(addr,0xfffffffc,temp2);
+ emit_writeword(temp2,(int)&address);
+
+ save_regs(reglist);
+ emit_shrimm(addr,16,1);
+ int cc=get_reg(i_regmap,CCREG);
+ if(cc<0) {
+ emit_loadreg(CCREG,2);
+ }
+ emit_movimm((u_int)readmem,0);
+ emit_addimm(cc<0?2:cc,2*stubs[n][6]+2,2);
+ emit_call((int)&indirect_jump_indexed);
+ restore_regs(reglist);
+
+ emit_readword((int)&readmem_dword,temp2);
+ int temp=addr; //hmh
+ emit_shlimm(addr,3,temp);
+ emit_andimm(temp,24,temp);
+#ifdef BIG_ENDIAN_MIPS
+ if (opcode[i]==0x2e) // SWR
+#else
+ if (opcode[i]==0x2a) // SWL
+#endif
+ emit_xorimm(temp,24,temp);
+ emit_movimm(-1,HOST_TEMPREG);
+ if (opcode[i]==0x2a) { // SWL
+ emit_bic_lsr(temp2,HOST_TEMPREG,temp,temp2);
+ emit_orrshr(rt,temp,temp2);
+ }else{
+ emit_bic_lsl(temp2,HOST_TEMPREG,temp,temp2);
+ emit_orrshl(rt,temp,temp2);
+ }
+ emit_readword((int)&address,addr);
+ emit_writeword(temp2,(int)&word);
+ //save_regs(reglist); // don't need to, no state changes
+ emit_shrimm(addr,16,1);
+ emit_movimm((u_int)writemem,0);
+ //emit_call((int)&indirect_jump_indexed);
+ emit_mov(15,14);
+ emit_readword_dualindexedx4(0,1,15);
+ emit_readword((int)&Count,HOST_TEMPREG);
+ emit_readword((int)&next_interupt,2);
+ emit_addimm(HOST_TEMPREG,-2*stubs[n][6]-2,HOST_TEMPREG);
+ emit_writeword(2,(int)&last_count);
+ emit_sub(HOST_TEMPREG,2,cc<0?HOST_TEMPREG:cc);
+ if(cc<0) {
+ emit_storereg(CCREG,HOST_TEMPREG);
+ }
+ restore_regs(reglist);
+ emit_jmp(stubs[n][2]); // return address
+#endif
+}
+
+static void do_invstub(int n)
+{
+ literal_pool(20);
+ u_int reglist=stubs[n][3];
+ set_jump_target(stubs[n][1],(int)out);
+ save_regs(reglist);
+ if(stubs[n][4]!=0) emit_mov(stubs[n][4],0);
+ emit_call((int)&invalidate_addr);
+ restore_regs(reglist);
+ emit_jmp(stubs[n][2]); // return address
+}
+
+int do_dirty_stub(int i)
+{
+ assem_debug("do_dirty_stub %x\n",start+i*4);
+ u_int addr=(u_int)source;
+ // Careful about the code output here, verify_dirty needs to parse it.
+ #ifndef HAVE_ARMV7
+ emit_loadlp(addr,1);
+ emit_loadlp((int)copy,2);
+ emit_loadlp(slen*4,3);
+ #else
+ emit_movw(addr&0x0000FFFF,1);
+ emit_movw(((u_int)copy)&0x0000FFFF,2);
+ emit_movt(addr&0xFFFF0000,1);
+ emit_movt(((u_int)copy)&0xFFFF0000,2);
+ emit_movw(slen*4,3);
+ #endif
+ emit_movimm(start+i*4,0);
+ emit_call((int)start<(int)0xC0000000?(int)&verify_code:(int)&verify_code_vm);
+ int entry=(int)out;
+ load_regs_entry(i);
+ if(entry==(int)out) entry=instr_addr[i];
+ emit_jmp(instr_addr[i]);
+ return entry;
+}
+
+static void do_dirty_stub_ds()
+{
+ // Careful about the code output here, verify_dirty needs to parse it.
+ #ifndef HAVE_ARMV7
+ emit_loadlp((int)start<(int)0xC0000000?(int)source:(int)start,1);
+ emit_loadlp((int)copy,2);
+ emit_loadlp(slen*4,3);
+ #else
+ emit_movw(((int)start<(int)0xC0000000?(u_int)source:(u_int)start)&0x0000FFFF,1);
+ emit_movw(((u_int)copy)&0x0000FFFF,2);
+ emit_movt(((int)start<(int)0xC0000000?(u_int)source:(u_int)start)&0xFFFF0000,1);
+ emit_movt(((u_int)copy)&0xFFFF0000,2);
+ emit_movw(slen*4,3);
+ #endif
+ emit_movimm(start+1,0);
+ emit_call((int)&verify_code_ds);
+}
+
+static void do_cop1stub(int n)
+{
+ literal_pool(256);
+ assem_debug("do_cop1stub %x\n",start+stubs[n][3]*4);
+ set_jump_target(stubs[n][1],(int)out);
+ int i=stubs[n][3];
+// int rs=stubs[n][4];
+ struct regstat *i_regs=(struct regstat *)stubs[n][5];
+ int ds=stubs[n][6];
+ if(!ds) {
+ load_all_consts(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty,i);
+ //if(i_regs!=&regs[i]) printf("oops: regs[i]=%x i_regs=%x",(int)&regs[i],(int)i_regs);
+ }
+ //else {printf("fp exception in delay slot\n");}
+ wb_dirtys(i_regs->regmap_entry,i_regs->was32,i_regs->wasdirty);
+ if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
+ emit_movimm(start+(i-ds)*4,EAX); // Get PC
+ emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
+ emit_jmp(ds?(int)fp_exception_ds:(int)fp_exception);
+}
+
+/* Special assem */
+
+static void shift_assemble_arm(int i,struct regstat *i_regs)
+{
+ if(rt1[i]) {
+ if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
+ {
+ signed char s,t,shift;
+ t=get_reg(i_regs->regmap,rt1[i]);
+ s=get_reg(i_regs->regmap,rs1[i]);
+ shift=get_reg(i_regs->regmap,rs2[i]);
+ if(t>=0){
+ if(rs1[i]==0)
+ {
+ emit_zeroreg(t);
+ }
+ else if(rs2[i]==0)
+ {
+ assert(s>=0);
+ if(s!=t) emit_mov(s,t);
+ }
+ else
+ {
+ emit_andimm(shift,31,HOST_TEMPREG);
+ if(opcode2[i]==4) // SLLV
+ {
+ emit_shl(s,HOST_TEMPREG,t);
+ }
+ if(opcode2[i]==6) // SRLV
+ {
+ emit_shr(s,HOST_TEMPREG,t);
+ }
+ if(opcode2[i]==7) // SRAV
+ {
+ emit_sar(s,HOST_TEMPREG,t);
+ }
+ }
+ }
+ } else { // DSLLV/DSRLV/DSRAV
+ signed char sh,sl,th,tl,shift;
+ th=get_reg(i_regs->regmap,rt1[i]|64);
+ tl=get_reg(i_regs->regmap,rt1[i]);
+ sh=get_reg(i_regs->regmap,rs1[i]|64);
+ sl=get_reg(i_regs->regmap,rs1[i]);
+ shift=get_reg(i_regs->regmap,rs2[i]);
+ if(tl>=0){
+ if(rs1[i]==0)
+ {
+ emit_zeroreg(tl);
+ if(th>=0) emit_zeroreg(th);
+ }
+ else if(rs2[i]==0)
+ {
+ assert(sl>=0);
+ if(sl!=tl) emit_mov(sl,tl);
+ if(th>=0&&sh!=th) emit_mov(sh,th);
+ }
+ else
+ {
+ // FIXME: What if shift==tl ?
+ assert(shift!=tl);
+ int temp=get_reg(i_regs->regmap,-1);
+ int real_th=th;
+ if(th<0&&opcode2[i]!=0x14) {th=temp;} // DSLLV doesn't need a temporary register
+ assert(sl>=0);
+ assert(sh>=0);
+ emit_andimm(shift,31,HOST_TEMPREG);
+ if(opcode2[i]==0x14) // DSLLV
+ {
+ if(th>=0) emit_shl(sh,HOST_TEMPREG,th);
+ emit_rsbimm(HOST_TEMPREG,32,HOST_TEMPREG);
+ emit_orrshr(sl,HOST_TEMPREG,th);
+ emit_andimm(shift,31,HOST_TEMPREG);
+ emit_testimm(shift,32);
+ emit_shl(sl,HOST_TEMPREG,tl);
+ if(th>=0) emit_cmovne_reg(tl,th);
+ emit_cmovne_imm(0,tl);
+ }
+ if(opcode2[i]==0x16) // DSRLV
+ {
+ assert(th>=0);
+ emit_shr(sl,HOST_TEMPREG,tl);
+ emit_rsbimm(HOST_TEMPREG,32,HOST_TEMPREG);
+ emit_orrshl(sh,HOST_TEMPREG,tl);
+ emit_andimm(shift,31,HOST_TEMPREG);
+ emit_testimm(shift,32);
+ emit_shr(sh,HOST_TEMPREG,th);
+ emit_cmovne_reg(th,tl);
+ if(real_th>=0) emit_cmovne_imm(0,th);
+ }
+ if(opcode2[i]==0x17) // DSRAV
+ {
+ assert(th>=0);
+ emit_shr(sl,HOST_TEMPREG,tl);
+ emit_rsbimm(HOST_TEMPREG,32,HOST_TEMPREG);
+ if(real_th>=0) {
+ assert(temp>=0);
+ emit_sarimm(th,31,temp);
+ }
+ emit_orrshl(sh,HOST_TEMPREG,tl);
+ emit_andimm(shift,31,HOST_TEMPREG);
+ emit_testimm(shift,32);
+ emit_sar(sh,HOST_TEMPREG,th);
+ emit_cmovne_reg(th,tl);
+ if(real_th>=0) emit_cmovne_reg(temp,th);
+ }
+ }
+ }
+ }
+ }
+}
+
+static void speculate_mov(int rs,int rt)
+{
+ if(rt!=0) {
+ smrv_strong_next|=1<<rt;
+ smrv[rt]=smrv[rs];
+ }
+}
+
+static void speculate_mov_weak(int rs,int rt)
+{
+ if(rt!=0) {
+ smrv_weak_next|=1<<rt;
+ smrv[rt]=smrv[rs];
+ }
+}
+
+static void speculate_register_values(int i)
+{
+ if(i==0) {
+ memcpy(smrv,psxRegs.GPR.r,sizeof(smrv));
+ // gp,sp are likely to stay the same throughout the block
+ smrv_strong_next=(1<<28)|(1<<29)|(1<<30);
+ smrv_weak_next=~smrv_strong_next;
+ //printf(" llr %08x\n", smrv[4]);
+ }
+ smrv_strong=smrv_strong_next;
+ smrv_weak=smrv_weak_next;
+ switch(itype[i]) {
+ case ALU:
+ if ((smrv_strong>>rs1[i])&1) speculate_mov(rs1[i],rt1[i]);
+ else if((smrv_strong>>rs2[i])&1) speculate_mov(rs2[i],rt1[i]);
+ else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]);
+ else if((smrv_weak>>rs2[i])&1) speculate_mov_weak(rs2[i],rt1[i]);
+ else {
+ smrv_strong_next&=~(1<<rt1[i]);
+ smrv_weak_next&=~(1<<rt1[i]);
+ }
+ break;
+ case SHIFTIMM:
+ smrv_strong_next&=~(1<<rt1[i]);
+ smrv_weak_next&=~(1<<rt1[i]);
+ // fallthrough
+ case IMM16:
+ if(rt1[i]&&is_const(&regs[i],rt1[i])) {
+ int value,hr=get_reg(regs[i].regmap,rt1[i]);
+ if(hr>=0) {
+ if(get_final_value(hr,i,&value))
+ smrv[rt1[i]]=value;
+ else smrv[rt1[i]]=constmap[i][hr];
+ smrv_strong_next|=1<<rt1[i];
+ }
+ }
+ else {
+ if ((smrv_strong>>rs1[i])&1) speculate_mov(rs1[i],rt1[i]);
+ else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]);
+ }
+ break;
+ case LOAD:
+ if(start<0x2000&&(rt1[i]==26||(smrv[rt1[i]]>>24)==0xa0)) {
+ // special case for BIOS
+ smrv[rt1[i]]=0xa0000000;
+ smrv_strong_next|=1<<rt1[i];
+ break;
+ }
+ // fallthrough
+ case SHIFT:
+ case LOADLR:
+ case MOV:
+ smrv_strong_next&=~(1<<rt1[i]);
+ smrv_weak_next&=~(1<<rt1[i]);
+ break;
+ case COP0:
+ case COP2:
+ if(opcode2[i]==0||opcode2[i]==2) { // MFC/CFC
+ smrv_strong_next&=~(1<<rt1[i]);
+ smrv_weak_next&=~(1<<rt1[i]);
+ }
+ break;
+ case C2LS:
+ if (opcode[i]==0x32) { // LWC2
+ smrv_strong_next&=~(1<<rt1[i]);
+ smrv_weak_next&=~(1<<rt1[i]);
+ }
+ break;
+ }
+#if 0
+ int r=4;
+ printf("x %08x %08x %d %d c %08x %08x\n",smrv[r],start+i*4,
+ ((smrv_strong>>r)&1),(smrv_weak>>r)&1,regs[i].isconst,regs[i].wasconst);
+#endif
+}
+
+enum {
+ MTYPE_8000 = 0,
+ MTYPE_8020,
+ MTYPE_0000,
+ MTYPE_A000,
+ MTYPE_1F80,
+};
+
+static int get_ptr_mem_type(u_int a)
+{
+ if(a < 0x00200000) {
+ if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0))
+ // return wrong, must use memhandler for BIOS self-test to pass
+ // 007 does similar stuff from a00 mirror, weird stuff
+ return MTYPE_8000;
+ return MTYPE_0000;
+ }
+ if(0x1f800000 <= a && a < 0x1f801000)
+ return MTYPE_1F80;
+ if(0x80200000 <= a && a < 0x80800000)
+ return MTYPE_8020;
+ if(0xa0000000 <= a && a < 0xa0200000)
+ return MTYPE_A000;
+ return MTYPE_8000;
+}
+
+static int emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override)
+{
+ int jaddr=0,type=0;
+ int mr=rs1[i];
+ if(((smrv_strong|smrv_weak)>>mr)&1) {
+ type=get_ptr_mem_type(smrv[mr]);
+ //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
+ }
+ else {
+ // use the mirror we are running on
+ type=get_ptr_mem_type(start);
+ //printf("set nospec @%08x r%d %d\n", start+i*4, mr, type);
+ }
+
+ if(type==MTYPE_8020) { // RAM 80200000+ mirror
+ emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
+ addr=*addr_reg_override=HOST_TEMPREG;
+ type=0;
+ }
+ else if(type==MTYPE_0000) { // RAM 0 mirror
+ emit_orimm(addr,0x80000000,HOST_TEMPREG);
+ addr=*addr_reg_override=HOST_TEMPREG;
+ type=0;
+ }
+ else if(type==MTYPE_A000) { // RAM A mirror
+ emit_andimm(addr,~0x20000000,HOST_TEMPREG);
+ addr=*addr_reg_override=HOST_TEMPREG;
+ type=0;
+ }
+ else if(type==MTYPE_1F80) { // scratchpad
+ if (psxH == (void *)0x1f800000) {
+ emit_addimm(addr,-0x1f800000,HOST_TEMPREG);
+ emit_cmpimm(HOST_TEMPREG,0x1000);
+ jaddr=(int)out;
+ emit_jc(0);
+ }
+ else {
+ // do usual RAM check, jump will go to the right handler
+ type=0;
+ }
+ }
+
+ if(type==0)
+ {
+ emit_cmpimm(addr,RAM_SIZE);
+ jaddr=(int)out;
+ #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
+ // Hint to branch predictor that the branch is unlikely to be taken
+ if(rs1[i]>=28)
+ emit_jno_unlikely(0);
+ else
+ #endif
+ emit_jno(0);
+ if(ram_offset!=0) {
+ emit_addimm(addr,ram_offset,HOST_TEMPREG);
+ addr=*addr_reg_override=HOST_TEMPREG;
+ }
+ }
+
+ return jaddr;
+}
+
+#define shift_assemble shift_assemble_arm
+
+static void loadlr_assemble_arm(int i,struct regstat *i_regs)
+{
+ int s,th,tl,temp,temp2,addr,map=-1;
+ int offset;
+ int jaddr=0;
+ int memtarget=0,c=0;
+ int fastload_reg_override=0;
+ u_int hr,reglist=0;
+ th=get_reg(i_regs->regmap,rt1[i]|64);
+ tl=get_reg(i_regs->regmap,rt1[i]);
+ s=get_reg(i_regs->regmap,rs1[i]);
+ temp=get_reg(i_regs->regmap,-1);
+ temp2=get_reg(i_regs->regmap,FTEMP);
+ addr=get_reg(i_regs->regmap,AGEN1+(i&1));
+ assert(addr<0);
+ offset=imm[i];
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
+ }
+ reglist|=1<<temp;
+ if(offset||s<0||c) addr=temp2;
+ else addr=s;
+ if(s>=0) {
+ c=(i_regs->wasconst>>s)&1;
+ if(c) {
+ memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
+ }
+ }
+ if(!c) {
+ #ifdef RAM_OFFSET
+ map=get_reg(i_regs->regmap,ROREG);
+ if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
+ #endif
+ emit_shlimm(addr,3,temp);
+ if (opcode[i]==0x22||opcode[i]==0x26) {
+ emit_andimm(addr,0xFFFFFFFC,temp2); // LWL/LWR
+ }else{
+ emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR
+ }
+ jaddr=emit_fastpath_cmp_jump(i,temp2,&fastload_reg_override);
+ }
+ else {
+ if(ram_offset&&memtarget) {
+ emit_addimm(temp2,ram_offset,HOST_TEMPREG);
+ fastload_reg_override=HOST_TEMPREG;
+ }
+ if (opcode[i]==0x22||opcode[i]==0x26) {
+ emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR
+ }else{
+ emit_movimm(((constmap[i][s]+offset)<<3)&56,temp); // LDL/LDR
+ }
+ }
+ if (opcode[i]==0x22||opcode[i]==0x26) { // LWL/LWR
+ if(!c||memtarget) {
+ int a=temp2;
+ if(fastload_reg_override) a=fastload_reg_override;
+ //emit_readword_indexed((int)rdram-0x80000000,temp2,temp2);
+ emit_readword_indexed_tlb(0,a,map,temp2);
+ if(jaddr) add_stub(LOADW_STUB,jaddr,(int)out,i,temp2,(int)i_regs,ccadj[i],reglist);
+ }
+ else
+ inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj[i],reglist);
+ if(rt1[i]) {
+ assert(tl>=0);
+ emit_andimm(temp,24,temp);
+#ifdef BIG_ENDIAN_MIPS
+ if (opcode[i]==0x26) // LWR
+#else
+ if (opcode[i]==0x22) // LWL
+#endif
+ emit_xorimm(temp,24,temp);
+ emit_movimm(-1,HOST_TEMPREG);
+ if (opcode[i]==0x26) {
+ emit_shr(temp2,temp,temp2);
+ emit_bic_lsr(tl,HOST_TEMPREG,temp,tl);
+ }else{
+ emit_shl(temp2,temp,temp2);
+ emit_bic_lsl(tl,HOST_TEMPREG,temp,tl);
+ }
+ emit_or(temp2,tl,tl);
+ }
+ //emit_storereg(rt1[i],tl); // DEBUG
+ }
+ if (opcode[i]==0x1A||opcode[i]==0x1B) { // LDL/LDR
+ // FIXME: little endian, fastload_reg_override
+ int temp2h=get_reg(i_regs->regmap,FTEMP|64);
+ if(!c||memtarget) {
+ //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,temp2,temp2h);
+ //emit_readword_indexed((int)rdram-0x7FFFFFFC,temp2,temp2);
+ emit_readdword_indexed_tlb(0,temp2,map,temp2h,temp2);
+ if(jaddr) add_stub(LOADD_STUB,jaddr,(int)out,i,temp2,(int)i_regs,ccadj[i],reglist);
+ }
+ else
+ inline_readstub(LOADD_STUB,i,(constmap[i][s]+offset)&0xFFFFFFF8,i_regs->regmap,FTEMP,ccadj[i],reglist);
+ if(rt1[i]) {
+ assert(th>=0);
+ assert(tl>=0);
+ emit_testimm(temp,32);
+ emit_andimm(temp,24,temp);
+ if (opcode[i]==0x1A) { // LDL
+ emit_rsbimm(temp,32,HOST_TEMPREG);
+ emit_shl(temp2h,temp,temp2h);
+ emit_orrshr(temp2,HOST_TEMPREG,temp2h);
+ emit_movimm(-1,HOST_TEMPREG);
+ emit_shl(temp2,temp,temp2);
+ emit_cmove_reg(temp2h,th);
+ emit_biceq_lsl(tl,HOST_TEMPREG,temp,tl);
+ emit_bicne_lsl(th,HOST_TEMPREG,temp,th);
+ emit_orreq(temp2,tl,tl);
+ emit_orrne(temp2,th,th);
+ }
+ if (opcode[i]==0x1B) { // LDR
+ emit_xorimm(temp,24,temp);
+ emit_rsbimm(temp,32,HOST_TEMPREG);
+ emit_shr(temp2,temp,temp2);
+ emit_orrshl(temp2h,HOST_TEMPREG,temp2);
+ emit_movimm(-1,HOST_TEMPREG);
+ emit_shr(temp2h,temp,temp2h);
+ emit_cmovne_reg(temp2,tl);
+ emit_bicne_lsr(th,HOST_TEMPREG,temp,th);
+ emit_biceq_lsr(tl,HOST_TEMPREG,temp,tl);
+ emit_orrne(temp2h,th,th);
+ emit_orreq(temp2h,tl,tl);
+ }
+ }
+ }
+}
+#define loadlr_assemble loadlr_assemble_arm
+
+static void cop0_assemble(int i,struct regstat *i_regs)
+{
+ if(opcode2[i]==0) // MFC0
+ {
+ signed char t=get_reg(i_regs->regmap,rt1[i]);
+ char copr=(source[i]>>11)&0x1f;
+ //assert(t>=0); // Why does this happen? OOT is weird
+ if(t>=0&&rt1[i]!=0) {
+ emit_readword((int)&reg_cop0+copr*4,t);
+ }
+ }
+ else if(opcode2[i]==4) // MTC0
+ {
+ signed char s=get_reg(i_regs->regmap,rs1[i]);
+ char copr=(source[i]>>11)&0x1f;
+ assert(s>=0);
+ wb_register(rs1[i],i_regs->regmap,i_regs->dirty,i_regs->is32);
+ if(copr==9||copr==11||copr==12||copr==13) {
+ emit_readword((int)&last_count,HOST_TEMPREG);
+ emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc
+ emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
+ emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
+ emit_writeword(HOST_CCREG,(int)&Count);
+ }
+ // What a mess. The status register (12) can enable interrupts,
+ // so needs a special case to handle a pending interrupt.
+ // The interrupt must be taken immediately, because a subsequent
+ // instruction might disable interrupts again.
+ if(copr==12||copr==13) {
+ if (is_delayslot) {
+ // burn cycles to cause cc_interrupt, which will
+ // reschedule next_interupt. Relies on CCREG from above.
+ assem_debug("MTC0 DS %d\n", copr);
+ emit_writeword(HOST_CCREG,(int)&last_count);
+ emit_movimm(0,HOST_CCREG);
+ emit_storereg(CCREG,HOST_CCREG);
+ emit_loadreg(rs1[i],1);
+ emit_movimm(copr,0);
+ emit_call((int)pcsx_mtc0_ds);
+ emit_loadreg(rs1[i],s);
+ return;
+ }
+ emit_movimm(start+i*4+4,HOST_TEMPREG);
+ emit_writeword(HOST_TEMPREG,(int)&pcaddr);
+ emit_movimm(0,HOST_TEMPREG);
+ emit_writeword(HOST_TEMPREG,(int)&pending_exception);
+ }
+ //else if(copr==12&&is_delayslot) emit_call((int)MTC0_R12);
+ //else
+ if(s==HOST_CCREG)
+ emit_loadreg(rs1[i],1);
+ else if(s!=1)
+ emit_mov(s,1);
+ emit_movimm(copr,0);
+ emit_call((int)pcsx_mtc0);
+ if(copr==9||copr==11||copr==12||copr==13) {
+ emit_readword((int)&Count,HOST_CCREG);
+ emit_readword((int)&next_interupt,HOST_TEMPREG);
+ emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
+ emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
+ emit_writeword(HOST_TEMPREG,(int)&last_count);
+ emit_storereg(CCREG,HOST_CCREG);
+ }
+ if(copr==12||copr==13) {
+ assert(!is_delayslot);
+ emit_readword((int)&pending_exception,14);
+ emit_test(14,14);
+ emit_jne((int)&do_interrupt);
+ }
+ emit_loadreg(rs1[i],s);
+ if(get_reg(i_regs->regmap,rs1[i]|64)>=0)
+ emit_loadreg(rs1[i]|64,get_reg(i_regs->regmap,rs1[i]|64));
+ cop1_usable=0;
+ }
+ else
+ {
+ assert(opcode2[i]==0x10);
+ if((source[i]&0x3f)==0x10) // RFE
+ {
+ emit_readword((int)&Status,0);
+ emit_andimm(0,0x3c,1);
+ emit_andimm(0,~0xf,0);
+ emit_orrshr_imm(1,2,0);
+ emit_writeword(0,(int)&Status);
+ }
+ }
+}
+
+static void cop2_get_dreg(u_int copr,signed char tl,signed char temp)
+{
+ switch (copr) {
+ case 1:
+ case 3:
+ case 5:
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ emit_readword((int)&reg_cop2d[copr],tl);
+ emit_signextend16(tl,tl);
+ emit_writeword(tl,(int)&reg_cop2d[copr]); // hmh
+ break;
+ case 7:
+ case 16:
+ case 17:
+ case 18:
+ case 19:
+ emit_readword((int)&reg_cop2d[copr],tl);
+ emit_andimm(tl,0xffff,tl);
+ emit_writeword(tl,(int)&reg_cop2d[copr]);
+ break;
+ case 15:
+ emit_readword((int)&reg_cop2d[14],tl); // SXY2
+ emit_writeword(tl,(int)&reg_cop2d[copr]);
+ break;
+ case 28:
+ case 29:
+ emit_readword((int)&reg_cop2d[9],temp);
+ emit_testimm(temp,0x8000); // do we need this?
+ emit_andimm(temp,0xf80,temp);
+ emit_andne_imm(temp,0,temp);
+ emit_shrimm(temp,7,tl);
+ emit_readword((int)&reg_cop2d[10],temp);
+ emit_testimm(temp,0x8000);
+ emit_andimm(temp,0xf80,temp);
+ emit_andne_imm(temp,0,temp);
+ emit_orrshr_imm(temp,2,tl);
+ emit_readword((int)&reg_cop2d[11],temp);
+ emit_testimm(temp,0x8000);
+ emit_andimm(temp,0xf80,temp);
+ emit_andne_imm(temp,0,temp);
+ emit_orrshl_imm(temp,3,tl);
+ emit_writeword(tl,(int)&reg_cop2d[copr]);
+ break;
+ default:
+ emit_readword((int)&reg_cop2d[copr],tl);
+ break;
+ }
+}
+
+static void cop2_put_dreg(u_int copr,signed char sl,signed char temp)
+{
+ switch (copr) {
+ case 15:
+ emit_readword((int)&reg_cop2d[13],temp); // SXY1
+ emit_writeword(sl,(int)&reg_cop2d[copr]);
+ emit_writeword(temp,(int)&reg_cop2d[12]); // SXY0
+ emit_readword((int)&reg_cop2d[14],temp); // SXY2
+ emit_writeword(sl,(int)&reg_cop2d[14]);
+ emit_writeword(temp,(int)&reg_cop2d[13]); // SXY1
+ break;
+ case 28:
+ emit_andimm(sl,0x001f,temp);
+ emit_shlimm(temp,7,temp);
+ emit_writeword(temp,(int)&reg_cop2d[9]);
+ emit_andimm(sl,0x03e0,temp);
+ emit_shlimm(temp,2,temp);
+ emit_writeword(temp,(int)&reg_cop2d[10]);
+ emit_andimm(sl,0x7c00,temp);
+ emit_shrimm(temp,3,temp);
+ emit_writeword(temp,(int)&reg_cop2d[11]);
+ emit_writeword(sl,(int)&reg_cop2d[28]);
+ break;
+ case 30:
+ emit_movs(sl,temp);
+ emit_mvnmi(temp,temp);
+#ifdef HAVE_ARMV5
+ emit_clz(temp,temp);
+#else
+ emit_movs(temp,HOST_TEMPREG);
+ emit_movimm(0,temp);
+ emit_jeq((int)out+4*4);
+ emit_addpl_imm(temp,1,temp);
+ emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG);
+ emit_jns((int)out-2*4);
+#endif
+ emit_writeword(sl,(int)&reg_cop2d[30]);
+ emit_writeword(temp,(int)&reg_cop2d[31]);
+ break;
+ case 31:
+ break;
+ default:
+ emit_writeword(sl,(int)&reg_cop2d[copr]);
+ break;
+ }
+}
+
+static void cop2_assemble(int i,struct regstat *i_regs)
+{
+ u_int copr=(source[i]>>11)&0x1f;
+ signed char temp=get_reg(i_regs->regmap,-1);
+ if (opcode2[i]==0) { // MFC2
+ signed char tl=get_reg(i_regs->regmap,rt1[i]);
+ if(tl>=0&&rt1[i]!=0)
+ cop2_get_dreg(copr,tl,temp);
+ }
+ else if (opcode2[i]==4) { // MTC2
+ signed char sl=get_reg(i_regs->regmap,rs1[i]);
+ cop2_put_dreg(copr,sl,temp);
+ }
+ else if (opcode2[i]==2) // CFC2
+ {
+ signed char tl=get_reg(i_regs->regmap,rt1[i]);
+ if(tl>=0&&rt1[i]!=0)
+ emit_readword((int)&reg_cop2c[copr],tl);
+ }
+ else if (opcode2[i]==6) // CTC2
+ {
+ signed char sl=get_reg(i_regs->regmap,rs1[i]);
+ switch(copr) {
+ case 4:
+ case 12:
+ case 20:
+ case 26:
+ case 27:
+ case 29:
+ case 30:
+ emit_signextend16(sl,temp);
+ break;
+ case 31:
+ //value = value & 0x7ffff000;
+ //if (value & 0x7f87e000) value |= 0x80000000;
+ emit_shrimm(sl,12,temp);
+ emit_shlimm(temp,12,temp);
+ emit_testimm(temp,0x7f000000);
+ emit_testeqimm(temp,0x00870000);
+ emit_testeqimm(temp,0x0000e000);
+ emit_orrne_imm(temp,0x80000000,temp);
+ break;
+ default:
+ temp=sl;
+ break;
+ }
+ emit_writeword(temp,(int)&reg_cop2c[copr]);
+ assert(sl>=0);
+ }
+}
+
+static void c2op_prologue(u_int op,u_int reglist)
+{
+ save_regs_all(reglist);
+#ifdef PCNT
+ emit_movimm(op,0);
+ emit_call((int)pcnt_gte_start);
+#endif
+ emit_addimm(FP,(int)&psxRegs.CP2D.r[0]-(int)&dynarec_local,0); // cop2 regs
+}
+
+static void c2op_epilogue(u_int op,u_int reglist)
+{
+#ifdef PCNT
+ emit_movimm(op,0);
+ emit_call((int)pcnt_gte_end);
+#endif
+ restore_regs_all(reglist);
+}
+
+static void c2op_call_MACtoIR(int lm,int need_flags)
+{
+ if(need_flags)
+ emit_call((int)(lm?gteMACtoIR_lm1:gteMACtoIR_lm0));
+ else
+ emit_call((int)(lm?gteMACtoIR_lm1_nf:gteMACtoIR_lm0_nf));
+}
+
+static void c2op_call_rgb_func(void *func,int lm,int need_ir,int need_flags)
+{
+ emit_call((int)func);
+ // func is C code and trashes r0
+ emit_addimm(FP,(int)&psxRegs.CP2D.r[0]-(int)&dynarec_local,0);
+ if(need_flags||need_ir)
+ c2op_call_MACtoIR(lm,need_flags);
+ emit_call((int)(need_flags?gteMACtoRGB:gteMACtoRGB_nf));
+}
+
+static void c2op_assemble(int i,struct regstat *i_regs)
+{
+ u_int c2op=source[i]&0x3f;
+ u_int hr,reglist_full=0,reglist;
+ int need_flags,need_ir;
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(i_regs->regmap[hr]>=0) reglist_full|=1<<hr;
+ }
+ reglist=reglist_full&CALLER_SAVE_REGS;
+
+ if (gte_handlers[c2op]!=NULL) {
+ need_flags=!(gte_unneeded[i+1]>>63); // +1 because of how liveness detection works
+ need_ir=(gte_unneeded[i+1]&0xe00)!=0xe00;
+ assem_debug("gte op %08x, unneeded %016llx, need_flags %d, need_ir %d\n",
+ source[i],gte_unneeded[i+1],need_flags,need_ir);
+ if(new_dynarec_hacks&NDHACK_GTE_NO_FLAGS)
+ need_flags=0;
+ int shift = (source[i] >> 19) & 1;
+ int lm = (source[i] >> 10) & 1;
+ switch(c2op) {
+#ifndef DRC_DBG
+ case GTE_MVMVA: {
+#ifdef HAVE_ARMV5
+ int v = (source[i] >> 15) & 3;
+ int cv = (source[i] >> 13) & 3;
+ int mx = (source[i] >> 17) & 3;
+ reglist=reglist_full&(CALLER_SAVE_REGS|0xf0); // +{r4-r7}
+ c2op_prologue(c2op,reglist);
+ /* r4,r5 = VXYZ(v) packed; r6 = &MX11(mx); r7 = &CV1(cv) */
+ if(v<3)
+ emit_ldrd(v*8,0,4);
+ else {
+ emit_movzwl_indexed(9*4,0,4); // gteIR
+ emit_movzwl_indexed(10*4,0,6);
+ emit_movzwl_indexed(11*4,0,5);
+ emit_orrshl_imm(6,16,4);
+ }
+ if(mx<3)
+ emit_addimm(0,32*4+mx*8*4,6);
+ else
+ emit_readword((int)&zeromem_ptr,6);
+ if(cv<3)
+ emit_addimm(0,32*4+(cv*8+5)*4,7);
+ else
+ emit_readword((int)&zeromem_ptr,7);
+#ifdef __ARM_NEON__
+ emit_movimm(source[i],1); // opcode
+ emit_call((int)gteMVMVA_part_neon);
+ if(need_flags) {
+ emit_movimm(lm,1);
+ emit_call((int)gteMACtoIR_flags_neon);
+ }
+#else
+ if(cv==3&&shift)
+ emit_call((int)gteMVMVA_part_cv3sh12_arm);
+ else {
+ emit_movimm(shift,1);
+ emit_call((int)(need_flags?gteMVMVA_part_arm:gteMVMVA_part_nf_arm));
+ }
+ if(need_flags||need_ir)
+ c2op_call_MACtoIR(lm,need_flags);
+#endif
+#else /* if not HAVE_ARMV5 */
+ c2op_prologue(c2op,reglist);
+ emit_movimm(source[i],1); // opcode
+ emit_writeword(1,(int)&psxRegs.code);
+ emit_call((int)(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op]));
+#endif
+ break;
+ }
+ case GTE_OP:
+ c2op_prologue(c2op,reglist);
+ emit_call((int)(shift?gteOP_part_shift:gteOP_part_noshift));
+ if(need_flags||need_ir) {
+ emit_addimm(FP,(int)&psxRegs.CP2D.r[0]-(int)&dynarec_local,0);
+ c2op_call_MACtoIR(lm,need_flags);
+ }
+ break;
+ case GTE_DPCS:
+ c2op_prologue(c2op,reglist);
+ c2op_call_rgb_func(shift?gteDPCS_part_shift:gteDPCS_part_noshift,lm,need_ir,need_flags);
+ break;
+ case GTE_INTPL:
+ c2op_prologue(c2op,reglist);
+ c2op_call_rgb_func(shift?gteINTPL_part_shift:gteINTPL_part_noshift,lm,need_ir,need_flags);
+ break;
+ case GTE_SQR:
+ c2op_prologue(c2op,reglist);
+ emit_call((int)(shift?gteSQR_part_shift:gteSQR_part_noshift));
+ if(need_flags||need_ir) {
+ emit_addimm(FP,(int)&psxRegs.CP2D.r[0]-(int)&dynarec_local,0);
+ c2op_call_MACtoIR(lm,need_flags);
+ }
+ break;
+ case GTE_DCPL:
+ c2op_prologue(c2op,reglist);
+ c2op_call_rgb_func(gteDCPL_part,lm,need_ir,need_flags);
+ break;
+ case GTE_GPF:
+ c2op_prologue(c2op,reglist);
+ c2op_call_rgb_func(shift?gteGPF_part_shift:gteGPF_part_noshift,lm,need_ir,need_flags);
+ break;
+ case GTE_GPL:
+ c2op_prologue(c2op,reglist);
+ c2op_call_rgb_func(shift?gteGPL_part_shift:gteGPL_part_noshift,lm,need_ir,need_flags);
+ break;
+#endif
+ default:
+ c2op_prologue(c2op,reglist);
+#ifdef DRC_DBG
+ emit_movimm(source[i],1); // opcode
+ emit_writeword(1,(int)&psxRegs.code);
+#endif
+ emit_call((int)(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op]));
+ break;
+ }
+ c2op_epilogue(c2op,reglist);
+ }
+}
+
+static void cop1_unusable(int i,struct regstat *i_regs)
+{
+ // XXX: should just just do the exception instead
+ if(!cop1_usable) {
+ int jaddr=(int)out;
+ emit_jmp(0);
+ add_stub(FP_STUB,jaddr,(int)out,i,0,(int)i_regs,is_delayslot,0);
+ cop1_usable=1;
+ }
+}
+
+static void cop1_assemble(int i,struct regstat *i_regs)
+{
+ cop1_unusable(i, i_regs);
+}
+
+static void fconv_assemble_arm(int i,struct regstat *i_regs)
+{
+ cop1_unusable(i, i_regs);
+}
+#define fconv_assemble fconv_assemble_arm
+
+static void fcomp_assemble(int i,struct regstat *i_regs)
+{
+ cop1_unusable(i, i_regs);
+}
+
+static void float_assemble(int i,struct regstat *i_regs)
+{
+ cop1_unusable(i, i_regs);
+}
+
+static void multdiv_assemble_arm(int i,struct regstat *i_regs)
+{
+ // case 0x18: MULT
+ // case 0x19: MULTU
+ // case 0x1A: DIV
+ // case 0x1B: DIVU
+ // case 0x1C: DMULT
+ // case 0x1D: DMULTU
+ // case 0x1E: DDIV
+ // case 0x1F: DDIVU
+ if(rs1[i]&&rs2[i])
+ {
+ if((opcode2[i]&4)==0) // 32-bit
+ {
+ if(opcode2[i]==0x18) // MULT
+ {
+ signed char m1=get_reg(i_regs->regmap,rs1[i]);
+ signed char m2=get_reg(i_regs->regmap,rs2[i]);
+ signed char hi=get_reg(i_regs->regmap,HIREG);
+ signed char lo=get_reg(i_regs->regmap,LOREG);
+ assert(m1>=0);
+ assert(m2>=0);
+ assert(hi>=0);
+ assert(lo>=0);
+ emit_smull(m1,m2,hi,lo);
+ }
+ if(opcode2[i]==0x19) // MULTU
+ {
+ signed char m1=get_reg(i_regs->regmap,rs1[i]);
+ signed char m2=get_reg(i_regs->regmap,rs2[i]);
+ signed char hi=get_reg(i_regs->regmap,HIREG);
+ signed char lo=get_reg(i_regs->regmap,LOREG);
+ assert(m1>=0);
+ assert(m2>=0);
+ assert(hi>=0);
+ assert(lo>=0);
+ emit_umull(m1,m2,hi,lo);
+ }
+ if(opcode2[i]==0x1A) // DIV
+ {
+ signed char d1=get_reg(i_regs->regmap,rs1[i]);
+ signed char d2=get_reg(i_regs->regmap,rs2[i]);
+ assert(d1>=0);
+ assert(d2>=0);
+ signed char quotient=get_reg(i_regs->regmap,LOREG);
+ signed char remainder=get_reg(i_regs->regmap,HIREG);
+ assert(quotient>=0);
+ assert(remainder>=0);
+ emit_movs(d1,remainder);
+ emit_movimm(0xffffffff,quotient);
+ emit_negmi(quotient,quotient); // .. quotient and ..
+ emit_negmi(remainder,remainder); // .. remainder for div0 case (will be negated back after jump)
+ emit_movs(d2,HOST_TEMPREG);
+ emit_jeq((int)out+52); // Division by zero
+ emit_negsmi(HOST_TEMPREG,HOST_TEMPREG);
+#ifdef HAVE_ARMV5
+ emit_clz(HOST_TEMPREG,quotient);
+ emit_shl(HOST_TEMPREG,quotient,HOST_TEMPREG);
+#else
+ emit_movimm(0,quotient);
+ emit_addpl_imm(quotient,1,quotient);
+ emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG);
+ emit_jns((int)out-2*4);
+#endif
+ emit_orimm(quotient,1<<31,quotient);
+ emit_shr(quotient,quotient,quotient);
+ emit_cmp(remainder,HOST_TEMPREG);
+ emit_subcs(remainder,HOST_TEMPREG,remainder);
+ emit_adcs(quotient,quotient,quotient);
+ emit_shrimm(HOST_TEMPREG,1,HOST_TEMPREG);
+ emit_jcc((int)out-16); // -4
+ emit_teq(d1,d2);
+ emit_negmi(quotient,quotient);
+ emit_test(d1,d1);
+ emit_negmi(remainder,remainder);
+ }
+ if(opcode2[i]==0x1B) // DIVU
+ {
+ signed char d1=get_reg(i_regs->regmap,rs1[i]); // dividend
+ signed char d2=get_reg(i_regs->regmap,rs2[i]); // divisor
+ assert(d1>=0);
+ assert(d2>=0);
+ signed char quotient=get_reg(i_regs->regmap,LOREG);
+ signed char remainder=get_reg(i_regs->regmap,HIREG);
+ assert(quotient>=0);
+ assert(remainder>=0);
+ emit_mov(d1,remainder);
+ emit_movimm(0xffffffff,quotient); // div0 case
+ emit_test(d2,d2);
+ emit_jeq((int)out+40); // Division by zero
+#ifdef HAVE_ARMV5
+ emit_clz(d2,HOST_TEMPREG);
+ emit_movimm(1<<31,quotient);
+ emit_shl(d2,HOST_TEMPREG,d2);
+#else
+ emit_movimm(0,HOST_TEMPREG);
+ emit_addpl_imm(HOST_TEMPREG,1,HOST_TEMPREG);
+ emit_lslpls_imm(d2,1,d2);
+ emit_jns((int)out-2*4);
+ emit_movimm(1<<31,quotient);
+#endif
+ emit_shr(quotient,HOST_TEMPREG,quotient);
+ emit_cmp(remainder,d2);
+ emit_subcs(remainder,d2,remainder);
+ emit_adcs(quotient,quotient,quotient);
+ emit_shrcc_imm(d2,1,d2);
+ emit_jcc((int)out-16); // -4
+ }
+ }
+ else // 64-bit
+ assert(0);
+ }
+ else
+ {
+ // Multiply by zero is zero.
+ // MIPS does not have a divide by zero exception.
+ // The result is undefined, we return zero.
+ signed char hr=get_reg(i_regs->regmap,HIREG);
+ signed char lr=get_reg(i_regs->regmap,LOREG);
+ if(hr>=0) emit_zeroreg(hr);
+ if(lr>=0) emit_zeroreg(lr);
+ }
+}
+#define multdiv_assemble multdiv_assemble_arm
+
+static void do_preload_rhash(int r) {
+ // Don't need this for ARM. On x86, this puts the value 0xf8 into the
+ // register. On ARM the hash can be done with a single instruction (below)
+}
+
+static void do_preload_rhtbl(int ht) {
+ emit_addimm(FP,(int)&mini_ht-(int)&dynarec_local,ht);
+}
+
+static void do_rhash(int rs,int rh) {
+ emit_andimm(rs,0xf8,rh);
+}
+
+static void do_miniht_load(int ht,int rh) {
+ assem_debug("ldr %s,[%s,%s]!\n",regname[rh],regname[ht],regname[rh]);
+ output_w32(0xe7b00000|rd_rn_rm(rh,ht,rh));
+}
+
+static void do_miniht_jump(int rs,int rh,int ht) {
+ emit_cmp(rh,rs);
+ emit_ldreq_indexed(ht,4,15);
+ #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
+ emit_mov(rs,7);
+ emit_jmp(jump_vaddr_reg[7]);
+ #else
+ emit_jmp(jump_vaddr_reg[rs]);
+ #endif
+}
+
+static void do_miniht_insert(u_int return_address,int rt,int temp) {
+ #ifndef HAVE_ARMV7
+ emit_movimm(return_address,rt); // PC into link register
+ add_to_linker((int)out,return_address,1);
+ emit_pcreladdr(temp);
+ emit_writeword(rt,(int)&mini_ht[(return_address&0xFF)>>3][0]);
+ emit_writeword(temp,(int)&mini_ht[(return_address&0xFF)>>3][1]);
+ #else
+ emit_movw(return_address&0x0000FFFF,rt);
+ add_to_linker((int)out,return_address,1);
+ emit_pcreladdr(temp);
+ emit_writeword(temp,(int)&mini_ht[(return_address&0xFF)>>3][1]);
+ emit_movt(return_address&0xFFFF0000,rt);
+ emit_writeword(rt,(int)&mini_ht[(return_address&0xFF)>>3][0]);
+ #endif
+}
+
+static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t is32_pre,uint64_t u,uint64_t uu)
+{
+ //if(dirty_pre==dirty) return;
+ int hr,reg;
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=EXCLUDE_REG) {
+ reg=pre[hr];
+ if(((~u)>>(reg&63))&1) {
+ if(reg>0) {
+ if(((dirty_pre&~dirty)>>hr)&1) {
+ if(reg>0&&reg<34) {
+ emit_storereg(reg,hr);
+ if( ((is32_pre&~uu)>>reg)&1 ) {
+ emit_sarimm(hr,31,HOST_TEMPREG);
+ emit_storereg(reg|64,HOST_TEMPREG);
+ }
+ }
+ else if(reg>=64) {
+ emit_storereg(reg,hr);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+/* using strd could possibly help but you'd have to allocate registers in pairs
+static void wb_invalidate_arm(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,uint64_t u,uint64_t uu)
+{
+ int hr;
+ int wrote=-1;
+ for(hr=HOST_REGS-1;hr>=0;hr--) {
+ if(hr!=EXCLUDE_REG) {
+ if(pre[hr]!=entry[hr]) {
+ if(pre[hr]>=0) {
+ if((dirty>>hr)&1) {
+ if(get_reg(entry,pre[hr])<0) {
+ if(pre[hr]<64) {
+ if(!((u>>pre[hr])&1)) {
+ if(hr<10&&(~hr&1)&&(pre[hr+1]<0||wrote==hr+1)) {
+ if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
+ emit_sarimm(hr,31,hr+1);
+ emit_strdreg(pre[hr],hr);
+ }
+ else
+ emit_storereg(pre[hr],hr);
+ }else{
+ emit_storereg(pre[hr],hr);
+ if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
+ emit_sarimm(hr,31,hr);
+ emit_storereg(pre[hr]|64,hr);
+ }
+ }
+ }
+ }else{
+ if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
+ emit_storereg(pre[hr],hr);
+ }
+ }
+ wrote=hr;
+ }
+ }
+ }
+ }
+ }
+ }
+ for(hr=0;hr<HOST_REGS;hr++) {
+ if(hr!=EXCLUDE_REG) {
+ if(pre[hr]!=entry[hr]) {
+ if(pre[hr]>=0) {
+ int nr;
+ if((nr=get_reg(entry,pre[hr]))>=0) {
+ emit_mov(hr,nr);
+ }
+ }
+ }
+ }
+ }
+}
+#define wb_invalidate wb_invalidate_arm
+*/
+
+static void mark_clear_cache(void *target)
+{
+ u_long offset = (char *)target - (char *)BASE_ADDR;
+ u_int mask = 1u << ((offset >> 12) & 31);
+ if (!(needs_clear_cache[offset >> 17] & mask)) {
+ char *start = (char *)((u_long)target & ~4095ul);
+ start_tcache_write(start, start + 4096);
+ needs_clear_cache[offset >> 17] |= mask;
+ }
+}
+
+// Clearing the cache is rather slow on ARM Linux, so mark the areas
+// that need to be cleared, and then only clear these areas once.
+static void do_clear_cache()
+{
+ int i,j;
+ for (i=0;i<(1<<(TARGET_SIZE_2-17));i++)
+ {
+ u_int bitmap=needs_clear_cache[i];
+ if(bitmap) {
+ u_int start,end;
+ for(j=0;j<32;j++)
+ {
+ if(bitmap&(1<<j)) {
+ start=(u_int)BASE_ADDR+i*131072+j*4096;
+ end=start+4095;
+ j++;
+ while(j<32) {
+ if(bitmap&(1<<j)) {
+ end+=4096;
+ j++;
+ }else{
+ end_tcache_write((void *)start,(void *)end);
+ break;
+ }
+ }
+ }
+ }
+ needs_clear_cache[i]=0;
+ }
+ }
+}
+
+// CPU-architecture-specific initialization
+static void arch_init() {
+}
+
+// vim:shiftwidth=2:expandtab