From 6f173b35c963ed131293a898b156c6b51c2c0fe6 Mon Sep 17 00:00:00 2001 From: twinaphex Date: Tue, 27 Sep 2016 02:56:42 +0200 Subject: Rearrange files for new_dynarec --- libpcsxcore/new_dynarec/arm/assem_arm.c | 4143 +++++++++++++++++++++++++ libpcsxcore/new_dynarec/arm/assem_arm.h | 57 + libpcsxcore/new_dynarec/arm/linkage_arm.S | 866 ++++++ libpcsxcore/new_dynarec/arm/linkage_offsets.h | 41 + 4 files changed, 5107 insertions(+) create mode 100644 libpcsxcore/new_dynarec/arm/assem_arm.c create mode 100644 libpcsxcore/new_dynarec/arm/assem_arm.h create mode 100644 libpcsxcore/new_dynarec/arm/linkage_arm.S create mode 100644 libpcsxcore/new_dynarec/arm/linkage_offsets.h (limited to 'libpcsxcore/new_dynarec/arm') diff --git a/libpcsxcore/new_dynarec/arm/assem_arm.c b/libpcsxcore/new_dynarec/arm/assem_arm.c new file mode 100644 index 0000000..db1d2af --- /dev/null +++ b/libpcsxcore/new_dynarec/arm/assem_arm.c @@ -0,0 +1,4143 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Mupen64plus/PCSX - assem_arm.c * + * Copyright (C) 2009-2011 Ari64 * + * Copyright (C) 2010-2011 Gražvydas "notaz" Ignotas * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program; if not, write to the * + * Free Software Foundation, Inc., * + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include "../../gte.h" +#define FLAGLESS +#include "../../gte.h" +#undef FLAGLESS +#include "../../gte_arm.h" +#include "../../gte_neon.h" +#include "pcnt.h" +#include "arm_features.h" + +#if defined(BASE_ADDR_FIXED) +#elif defined(BASE_ADDR_DYNAMIC) +char *translation_cache; +#else +char translation_cache[1 << TARGET_SIZE_2] __attribute__((aligned(4096))); +#endif + +#ifndef __MACH__ +#define CALLER_SAVE_REGS 0x100f +#else +#define CALLER_SAVE_REGS 0x120f +#endif + +#define unused __attribute__((unused)) + +extern int cycle_count; +extern int last_count; +extern int pcaddr; +extern int pending_exception; +extern int branch_target; +extern uint64_t readmem_dword; +extern void *dynarec_local; +extern u_int mini_ht[32][2]; + +void indirect_jump_indexed(); +void indirect_jump(); +void do_interrupt(); +void jump_vaddr_r0(); +void jump_vaddr_r1(); +void jump_vaddr_r2(); +void jump_vaddr_r3(); +void jump_vaddr_r4(); +void jump_vaddr_r5(); +void jump_vaddr_r6(); +void jump_vaddr_r7(); +void jump_vaddr_r8(); +void jump_vaddr_r9(); +void jump_vaddr_r10(); +void jump_vaddr_r12(); + +const u_int jump_vaddr_reg[16] = { + (int)jump_vaddr_r0, + (int)jump_vaddr_r1, + (int)jump_vaddr_r2, + (int)jump_vaddr_r3, + (int)jump_vaddr_r4, + (int)jump_vaddr_r5, + (int)jump_vaddr_r6, + (int)jump_vaddr_r7, + (int)jump_vaddr_r8, + (int)jump_vaddr_r9, + (int)jump_vaddr_r10, + 0, + (int)jump_vaddr_r12, + 0, + 0, + 0}; + +void invalidate_addr_r0(); +void invalidate_addr_r1(); +void invalidate_addr_r2(); +void invalidate_addr_r3(); +void invalidate_addr_r4(); +void invalidate_addr_r5(); +void invalidate_addr_r6(); +void invalidate_addr_r7(); +void invalidate_addr_r8(); +void invalidate_addr_r9(); +void invalidate_addr_r10(); +void invalidate_addr_r12(); + +const u_int invalidate_addr_reg[16] = { + (int)invalidate_addr_r0, + (int)invalidate_addr_r1, + (int)invalidate_addr_r2, + (int)invalidate_addr_r3, + (int)invalidate_addr_r4, + (int)invalidate_addr_r5, + (int)invalidate_addr_r6, + (int)invalidate_addr_r7, + (int)invalidate_addr_r8, + (int)invalidate_addr_r9, + (int)invalidate_addr_r10, + 0, + (int)invalidate_addr_r12, + 0, + 0, + 0}; + +static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)]; + +/* Linker */ + +static void set_jump_target(int addr,u_int target) +{ + u_char *ptr=(u_char *)addr; + u_int *ptr2=(u_int *)ptr; + if(ptr[3]==0xe2) { + assert((target-(u_int)ptr2-8)<1024); + assert((addr&3)==0); + assert((target&3)==0); + *ptr2=(*ptr2&0xFFFFF000)|((target-(u_int)ptr2-8)>>2)|0xF00; + //printf("target=%x addr=%x insn=%x\n",target,addr,*ptr2); + } + else if(ptr[3]==0x72) { + // generated by emit_jno_unlikely + if((target-(u_int)ptr2-8)<1024) { + assert((addr&3)==0); + assert((target&3)==0); + *ptr2=(*ptr2&0xFFFFF000)|((target-(u_int)ptr2-8)>>2)|0xF00; + } + else if((target-(u_int)ptr2-8)<4096&&!((target-(u_int)ptr2-8)&15)) { + assert((addr&3)==0); + assert((target&3)==0); + *ptr2=(*ptr2&0xFFFFF000)|((target-(u_int)ptr2-8)>>4)|0xE00; + } + else *ptr2=(0x7A000000)|(((target-(u_int)ptr2-8)<<6)>>8); + } + else { + assert((ptr[3]&0x0e)==0xa); + *ptr2=(*ptr2&0xFF000000)|(((target-(u_int)ptr2-8)<<6)>>8); + } +} + +// This optionally copies the instruction from the target of the branch into +// the space before the branch. Works, but the difference in speed is +// usually insignificant. +#if 0 +static void set_jump_target_fillslot(int addr,u_int target,int copy) +{ + u_char *ptr=(u_char *)addr; + u_int *ptr2=(u_int *)ptr; + assert(!copy||ptr2[-1]==0xe28dd000); + if(ptr[3]==0xe2) { + assert(!copy); + assert((target-(u_int)ptr2-8)<4096); + *ptr2=(*ptr2&0xFFFFF000)|(target-(u_int)ptr2-8); + } + else { + assert((ptr[3]&0x0e)==0xa); + u_int target_insn=*(u_int *)target; + if((target_insn&0x0e100000)==0) { // ALU, no immediate, no flags + copy=0; + } + if((target_insn&0x0c100000)==0x04100000) { // Load + copy=0; + } + if(target_insn&0x08000000) { + copy=0; + } + if(copy) { + ptr2[-1]=target_insn; + target+=4; + } + *ptr2=(*ptr2&0xFF000000)|(((target-(u_int)ptr2-8)<<6)>>8); + } +} +#endif + +/* Literal pool */ +static void add_literal(int addr,int val) +{ + assert(literalcount>6)+8; +} + +// Find the "clean" entry point from a "dirty" entry point +// by skipping past the call to verify_code +static u_int get_clean_addr(int addr) +{ + int *ptr=(int *)addr; + #ifndef HAVE_ARMV7 + ptr+=4; + #else + ptr+=6; + #endif + if((*ptr&0xFF000000)!=0xeb000000) ptr++; + assert((*ptr&0xFF000000)==0xeb000000); // bl instruction + ptr++; + if((*ptr&0xFF000000)==0xea000000) { + return (int)ptr+((*ptr<<8)>>6)+8; // follow jump + } + return (u_int)ptr; +} + +static int verify_dirty(u_int *ptr) +{ + #ifndef HAVE_ARMV7 + // get from literal pool + assert((*ptr&0xFFFF0000)==0xe59f0000); + u_int offset=*ptr&0xfff; + u_int *l_ptr=(void *)ptr+offset+8; + u_int source=l_ptr[0]; + u_int copy=l_ptr[1]; + u_int len=l_ptr[2]; + ptr+=4; + #else + // ARMv7 movw/movt + assert((*ptr&0xFFF00000)==0xe3000000); + u_int source=(ptr[0]&0xFFF)+((ptr[0]>>4)&0xF000)+((ptr[2]<<16)&0xFFF0000)+((ptr[2]<<12)&0xF0000000); + u_int copy=(ptr[1]&0xFFF)+((ptr[1]>>4)&0xF000)+((ptr[3]<<16)&0xFFF0000)+((ptr[3]<<12)&0xF0000000); + u_int len=(ptr[4]&0xFFF)+((ptr[4]>>4)&0xF000); + ptr+=6; + #endif + if((*ptr&0xFF000000)!=0xeb000000) ptr++; + assert((*ptr&0xFF000000)==0xeb000000); // bl instruction + //printf("verify_dirty: %x %x %x\n",source,copy,len); + return !memcmp((void *)source,(void *)copy,len); +} + +// This doesn't necessarily find all clean entry points, just +// guarantees that it's not dirty +static int isclean(int addr) +{ + #ifndef HAVE_ARMV7 + u_int *ptr=((u_int *)addr)+4; + #else + u_int *ptr=((u_int *)addr)+6; + #endif + if((*ptr&0xFF000000)!=0xeb000000) ptr++; + if((*ptr&0xFF000000)!=0xeb000000) return 1; // bl instruction + if((int)ptr+((*ptr<<8)>>6)+8==(int)verify_code) return 0; + if((int)ptr+((*ptr<<8)>>6)+8==(int)verify_code_vm) return 0; + if((int)ptr+((*ptr<<8)>>6)+8==(int)verify_code_ds) return 0; + return 1; +} + +// get source that block at addr was compiled from (host pointers) +static void get_bounds(int addr,u_int *start,u_int *end) +{ + u_int *ptr=(u_int *)addr; + #ifndef HAVE_ARMV7 + // get from literal pool + assert((*ptr&0xFFFF0000)==0xe59f0000); + u_int offset=*ptr&0xfff; + u_int *l_ptr=(void *)ptr+offset+8; + u_int source=l_ptr[0]; + //u_int copy=l_ptr[1]; + u_int len=l_ptr[2]; + ptr+=4; + #else + // ARMv7 movw/movt + assert((*ptr&0xFFF00000)==0xe3000000); + u_int source=(ptr[0]&0xFFF)+((ptr[0]>>4)&0xF000)+((ptr[2]<<16)&0xFFF0000)+((ptr[2]<<12)&0xF0000000); + //u_int copy=(ptr[1]&0xFFF)+((ptr[1]>>4)&0xF000)+((ptr[3]<<16)&0xFFF0000)+((ptr[3]<<12)&0xF0000000); + u_int len=(ptr[4]&0xFFF)+((ptr[4]>>4)&0xF000); + ptr+=6; + #endif + if((*ptr&0xFF000000)!=0xeb000000) ptr++; + assert((*ptr&0xFF000000)==0xeb000000); // bl instruction + *start=source; + *end=source+len; +} + +/* Register allocation */ + +// Note: registers are allocated clean (unmodified state) +// if you intend to modify the register, you must call dirty_reg(). +static void alloc_reg(struct regstat *cur,int i,signed char reg) +{ + int r,hr; + int preferred_reg = (reg&7); + if(reg==CCREG) preferred_reg=HOST_CCREG; + if(reg==PTEMP||reg==FTEMP) preferred_reg=12; + + // Don't allocate unused registers + if((cur->u>>reg)&1) return; + + // see if it's already allocated + for(hr=0;hrregmap[hr]==reg) return; + } + + // Keep the same mapping if the register was already allocated in a loop + preferred_reg = loop_reg(i,reg,preferred_reg); + + // Try to allocate the preferred register + if(cur->regmap[preferred_reg]==-1) { + cur->regmap[preferred_reg]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[preferred_reg]; + if(r<64&&((cur->u>>r)&1)) { + cur->regmap[preferred_reg]=reg; + cur->dirty&=~(1<isconst&=~(1<=64&&((cur->uu>>(r&63))&1)) { + cur->regmap[preferred_reg]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]; + if(r>=0) { + if(r<64) { + if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;} + } + else + { + if((cur->uu>>(r&63))&1) {cur->regmap[hr]=-1;break;} + } + } + } + // Try to allocate any available register, but prefer + // registers that have not been used recently. + if(i>0) { + for(hr=0;hrregmap[hr]==-1) { + if(regs[i-1].regmap[hr]!=rs1[i-1]&®s[i-1].regmap[hr]!=rs2[i-1]&®s[i-1].regmap[hr]!=rt1[i-1]&®s[i-1].regmap[hr]!=rt2[i-1]) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==-1) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]); + //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]); + if(i>0) { + // Don't evict the cycle count at entry points, otherwise the entry + // stub will have to write it. + if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2; + if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP)) hsn[CCREG]=2; + for(j=10;j>=3;j--) + { + // Alloc preferred register if available + if(hsn[r=cur->regmap[preferred_reg]&63]==j) { + for(hr=0;hrregmap[hr]&63)==r) { + cur->regmap[hr]=-1; + cur->dirty&=~(1<isconst&=~(1<regmap[preferred_reg]=reg; + return; + } + for(r=1;r<=MAXREG;r++) + { + if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) { + for(hr=0;hrregmap[hr]==r+64) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==r) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<=0;j--) + { + for(r=1;r<=MAXREG;r++) + { + if(hsn[r]==j) { + for(hr=0;hrregmap[hr]==r+64) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==r) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<uu>>reg)&1) return; + + // see if the upper half is already allocated + for(hr=0;hrregmap[hr]==reg+64) return; + } + + // Keep the same mapping if the register was already allocated in a loop + preferred_reg = loop_reg(i,reg,preferred_reg); + + // Try to allocate the preferred register + if(cur->regmap[preferred_reg]==-1) { + cur->regmap[preferred_reg]=reg|64; + cur->dirty&=~(1<isconst&=~(1<regmap[preferred_reg]; + if(r<64&&((cur->u>>r)&1)) { + cur->regmap[preferred_reg]=reg|64; + cur->dirty&=~(1<isconst&=~(1<=64&&((cur->uu>>(r&63))&1)) { + cur->regmap[preferred_reg]=reg|64; + cur->dirty&=~(1<isconst&=~(1<=0;hr--) + { + r=cur->regmap[hr]; + if(r>=0) { + if(r<64) { + if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;} + } + else + { + if((cur->uu>>(r&63))&1) {cur->regmap[hr]=-1;break;} + } + } + } + // Try to allocate any available register, but prefer + // registers that have not been used recently. + if(i>0) { + for(hr=0;hrregmap[hr]==-1) { + if(regs[i-1].regmap[hr]!=rs1[i-1]&®s[i-1].regmap[hr]!=rs2[i-1]&®s[i-1].regmap[hr]!=rt1[i-1]&®s[i-1].regmap[hr]!=rt2[i-1]) { + cur->regmap[hr]=reg|64; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==-1) { + cur->regmap[hr]=reg|64; + cur->dirty&=~(1<isconst&=~(1<regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]); + //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]); + if(i>0) { + // Don't evict the cycle count at entry points, otherwise the entry + // stub will have to write it. + if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2; + if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP)) hsn[CCREG]=2; + for(j=10;j>=3;j--) + { + // Alloc preferred register if available + if(hsn[r=cur->regmap[preferred_reg]&63]==j) { + for(hr=0;hrregmap[hr]&63)==r) { + cur->regmap[hr]=-1; + cur->dirty&=~(1<isconst&=~(1<regmap[preferred_reg]=reg|64; + return; + } + for(r=1;r<=MAXREG;r++) + { + if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) { + for(hr=0;hrregmap[hr]==r+64) { + cur->regmap[hr]=reg|64; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==r) { + cur->regmap[hr]=reg|64; + cur->dirty&=~(1<isconst&=~(1<=0;j--) + { + for(r=1;r<=MAXREG;r++) + { + if(hsn[r]==j) { + for(hr=0;hrregmap[hr]==r+64) { + cur->regmap[hr]=reg|64; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==r) { + cur->regmap[hr]=reg|64; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==reg) return; + } + + // Try to allocate any available register + for(hr=HOST_REGS-1;hr>=0;hr--) { + if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<=0;hr--) + { + r=cur->regmap[hr]; + if(r>=0) { + if(r<64) { + if((cur->u>>r)&1) { + if(i==0||((unneeded_reg[i-1]>>r)&1)) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<uu>>(r&63))&1) { + if(i==0||((unneeded_reg_upper[i-1]>>(r&63))&1)) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]); + if(i>0) { + // Don't evict the cycle count at entry points, otherwise the entry + // stub will have to write it. + if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2; + if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP)) hsn[CCREG]=2; + for(j=10;j>=3;j--) + { + for(r=1;r<=MAXREG;r++) + { + if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) { + for(hr=0;hr2) { + if(cur->regmap[hr]==r+64) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<2) { + if(cur->regmap[hr]==r) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<=0;j--) + { + for(r=1;r<=MAXREG;r++) + { + if(hsn[r]==j) { + for(hr=0;hrregmap[hr]==r+64) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[hr]==r) { + cur->regmap[hr]=reg; + cur->dirty&=~(1<isconst&=~(1<regmap[n]==reg) { + dirty=(cur->dirty>>n)&1; + cur->regmap[n]=-1; + } + } + + cur->regmap[hr]=reg; + cur->dirty&=~(1<dirty|=dirty<isconst&=~(1<0) + { + if(imm<256) { + *encoded=((i&30)<<7)|imm; + return 1; + } + imm=(imm>>2)|(imm<<30);i-=2; + } + return 0; +} + +static void genimm_checked(u_int imm,u_int *encoded) +{ + u_int ret=genimm(imm,encoded); + assert(ret); + (void)ret; +} + +static u_int genjmp(u_int addr) +{ + int offset=addr-(int)out-8; + if(offset<-33554432||offset>=33554432) { + if (addr>2) { + SysPrintf("genjmp: out of range: %08x\n", offset); + exit(1); + } + return 0; + } + return ((u_int)offset>>2)&0xffffff; +} + +static void emit_mov(int rs,int rt) +{ + assem_debug("mov %s,%s\n",regname[rt],regname[rs]); + output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)); +} + +static void emit_movs(int rs,int rt) +{ + assem_debug("movs %s,%s\n",regname[rt],regname[rs]); + output_w32(0xe1b00000|rd_rn_rm(rt,0,rs)); +} + +static void emit_add(int rs1,int rs2,int rt) +{ + assem_debug("add %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe0800000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_adds(int rs1,int rs2,int rt) +{ + assem_debug("adds %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe0900000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_adcs(int rs1,int rs2,int rt) +{ + assem_debug("adcs %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe0b00000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_sbc(int rs1,int rs2,int rt) +{ + assem_debug("sbc %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe0c00000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_sbcs(int rs1,int rs2,int rt) +{ + assem_debug("sbcs %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe0d00000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_neg(int rs, int rt) +{ + assem_debug("rsb %s,%s,#0\n",regname[rt],regname[rs]); + output_w32(0xe2600000|rd_rn_rm(rt,rs,0)); +} + +static void emit_negs(int rs, int rt) +{ + assem_debug("rsbs %s,%s,#0\n",regname[rt],regname[rs]); + output_w32(0xe2700000|rd_rn_rm(rt,rs,0)); +} + +static void emit_sub(int rs1,int rs2,int rt) +{ + assem_debug("sub %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe0400000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_subs(int rs1,int rs2,int rt) +{ + assem_debug("subs %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe0500000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_zeroreg(int rt) +{ + assem_debug("mov %s,#0\n",regname[rt]); + output_w32(0xe3a00000|rd_rn_rm(rt,0,0)); +} + +static void emit_loadlp(u_int imm,u_int rt) +{ + add_literal((int)out,imm); + assem_debug("ldr %s,pc+? [=%x]\n",regname[rt],imm); + output_w32(0xe5900000|rd_rn_rm(rt,15,0)); +} + +static void emit_movw(u_int imm,u_int rt) +{ + assert(imm<65536); + assem_debug("movw %s,#%d (0x%x)\n",regname[rt],imm,imm); + output_w32(0xe3000000|rd_rn_rm(rt,0,0)|(imm&0xfff)|((imm<<4)&0xf0000)); +} + +static void emit_movt(u_int imm,u_int rt) +{ + assem_debug("movt %s,#%d (0x%x)\n",regname[rt],imm&0xffff0000,imm&0xffff0000); + output_w32(0xe3400000|rd_rn_rm(rt,0,0)|((imm>>16)&0xfff)|((imm>>12)&0xf0000)); +} + +static void emit_movimm(u_int imm,u_int rt) +{ + u_int armval; + if(genimm(imm,&armval)) { + assem_debug("mov %s,#%d\n",regname[rt],imm); + output_w32(0xe3a00000|rd_rn_rm(rt,0,0)|armval); + }else if(genimm(~imm,&armval)) { + assem_debug("mvn %s,#%d\n",regname[rt],imm); + output_w32(0xe3e00000|rd_rn_rm(rt,0,0)|armval); + }else if(imm<65536) { + #ifndef HAVE_ARMV7 + assem_debug("mov %s,#%d\n",regname[rt],imm&0xFF00); + output_w32(0xe3a00000|rd_rn_imm_shift(rt,0,imm>>8,8)); + assem_debug("add %s,%s,#%d\n",regname[rt],regname[rt],imm&0xFF); + output_w32(0xe2800000|rd_rn_imm_shift(rt,rt,imm&0xff,0)); + #else + emit_movw(imm,rt); + #endif + }else{ + #ifndef HAVE_ARMV7 + emit_loadlp(imm,rt); + #else + emit_movw(imm&0x0000FFFF,rt); + emit_movt(imm&0xFFFF0000,rt); + #endif + } +} + +static void emit_pcreladdr(u_int rt) +{ + assem_debug("add %s,pc,#?\n",regname[rt]); + output_w32(0xe2800000|rd_rn_rm(rt,15,0)); +} + +static void emit_loadreg(int r, int hr) +{ + if(r&64) { + SysPrintf("64bit load in 32bit mode!\n"); + assert(0); + return; + } + if((r&63)==0) + emit_zeroreg(hr); + else { + int addr=((int)reg)+((r&63)<>4); + if((r&63)==HIREG) addr=(int)&hi+((r&64)>>4); + if((r&63)==LOREG) addr=(int)&lo+((r&64)>>4); + if(r==CCREG) addr=(int)&cycle_count; + if(r==CSREG) addr=(int)&Status; + if(r==FSREG) addr=(int)&FCR31; + if(r==INVCP) addr=(int)&invc_ptr; + u_int offset = addr-(u_int)&dynarec_local; + assert(offset<4096); + assem_debug("ldr %s,fp+%d\n",regname[hr],offset); + output_w32(0xe5900000|rd_rn_rm(hr,FP,0)|offset); + } +} + +static void emit_storereg(int r, int hr) +{ + if(r&64) { + SysPrintf("64bit store in 32bit mode!\n"); + assert(0); + return; + } + int addr=((int)reg)+((r&63)<>4); + if((r&63)==HIREG) addr=(int)&hi+((r&64)>>4); + if((r&63)==LOREG) addr=(int)&lo+((r&64)>>4); + if(r==CCREG) addr=(int)&cycle_count; + if(r==FSREG) addr=(int)&FCR31; + u_int offset = addr-(u_int)&dynarec_local; + assert(offset<4096); + assem_debug("str %s,fp+%d\n",regname[hr],offset); + output_w32(0xe5800000|rd_rn_rm(hr,FP,0)|offset); +} + +static void emit_test(int rs, int rt) +{ + assem_debug("tst %s,%s\n",regname[rs],regname[rt]); + output_w32(0xe1100000|rd_rn_rm(0,rs,rt)); +} + +static void emit_testimm(int rs,int imm) +{ + u_int armval; + assem_debug("tst %s,#%d\n",regname[rs],imm); + genimm_checked(imm,&armval); + output_w32(0xe3100000|rd_rn_rm(0,rs,0)|armval); +} + +static void emit_testeqimm(int rs,int imm) +{ + u_int armval; + assem_debug("tsteq %s,$%d\n",regname[rs],imm); + genimm_checked(imm,&armval); + output_w32(0x03100000|rd_rn_rm(0,rs,0)|armval); +} + +static void emit_not(int rs,int rt) +{ + assem_debug("mvn %s,%s\n",regname[rt],regname[rs]); + output_w32(0xe1e00000|rd_rn_rm(rt,0,rs)); +} + +static void emit_mvnmi(int rs,int rt) +{ + assem_debug("mvnmi %s,%s\n",regname[rt],regname[rs]); + output_w32(0x41e00000|rd_rn_rm(rt,0,rs)); +} + +static void emit_and(u_int rs1,u_int rs2,u_int rt) +{ + assem_debug("and %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe0000000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_or(u_int rs1,u_int rs2,u_int rt) +{ + assem_debug("orr %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe1800000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_or_and_set_flags(int rs1,int rs2,int rt) +{ + assem_debug("orrs %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe1900000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_orrshl_imm(u_int rs,u_int imm,u_int rt) +{ + assert(rs<16); + assert(rt<16); + assert(imm<32); + assem_debug("orr %s,%s,%s,lsl #%d\n",regname[rt],regname[rt],regname[rs],imm); + output_w32(0xe1800000|rd_rn_rm(rt,rt,rs)|(imm<<7)); +} + +static void emit_orrshr_imm(u_int rs,u_int imm,u_int rt) +{ + assert(rs<16); + assert(rt<16); + assert(imm<32); + assem_debug("orr %s,%s,%s,lsr #%d\n",regname[rt],regname[rt],regname[rs],imm); + output_w32(0xe1800020|rd_rn_rm(rt,rt,rs)|(imm<<7)); +} + +static void emit_xor(u_int rs1,u_int rs2,u_int rt) +{ + assem_debug("eor %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe0200000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_addimm(u_int rs,int imm,u_int rt) +{ + assert(rs<16); + assert(rt<16); + if(imm!=0) { + u_int armval; + if(genimm(imm,&armval)) { + assem_debug("add %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe2800000|rd_rn_rm(rt,rs,0)|armval); + }else if(genimm(-imm,&armval)) { + assem_debug("sub %s,%s,#%d\n",regname[rt],regname[rs],-imm); + output_w32(0xe2400000|rd_rn_rm(rt,rs,0)|armval); + #ifdef HAVE_ARMV7 + }else if(rt!=rs&&(u_int)imm<65536) { + emit_movw(imm&0x0000ffff,rt); + emit_add(rs,rt,rt); + }else if(rt!=rs&&(u_int)-imm<65536) { + emit_movw(-imm&0x0000ffff,rt); + emit_sub(rs,rt,rt); + #endif + }else if((u_int)-imm<65536) { + assem_debug("sub %s,%s,#%d\n",regname[rt],regname[rs],(-imm)&0xFF00); + assem_debug("sub %s,%s,#%d\n",regname[rt],regname[rt],(-imm)&0xFF); + output_w32(0xe2400000|rd_rn_imm_shift(rt,rs,(-imm)>>8,8)); + output_w32(0xe2400000|rd_rn_imm_shift(rt,rt,(-imm)&0xff,0)); + }else { + do { + int shift = (ffs(imm) - 1) & ~1; + int imm8 = imm & (0xff << shift); + genimm_checked(imm8,&armval); + assem_debug("add %s,%s,#0x%x\n",regname[rt],regname[rs],imm8); + output_w32(0xe2800000|rd_rn_rm(rt,rs,0)|armval); + rs = rt; + imm &= ~imm8; + } + while (imm != 0); + } + } + else if(rs!=rt) emit_mov(rs,rt); +} + +static void emit_addimm_and_set_flags(int imm,int rt) +{ + assert(imm>-65536&&imm<65536); + u_int armval; + if(genimm(imm,&armval)) { + assem_debug("adds %s,%s,#%d\n",regname[rt],regname[rt],imm); + output_w32(0xe2900000|rd_rn_rm(rt,rt,0)|armval); + }else if(genimm(-imm,&armval)) { + assem_debug("subs %s,%s,#%d\n",regname[rt],regname[rt],imm); + output_w32(0xe2500000|rd_rn_rm(rt,rt,0)|armval); + }else if(imm<0) { + assem_debug("sub %s,%s,#%d\n",regname[rt],regname[rt],(-imm)&0xFF00); + assem_debug("subs %s,%s,#%d\n",regname[rt],regname[rt],(-imm)&0xFF); + output_w32(0xe2400000|rd_rn_imm_shift(rt,rt,(-imm)>>8,8)); + output_w32(0xe2500000|rd_rn_imm_shift(rt,rt,(-imm)&0xff,0)); + }else{ + assem_debug("add %s,%s,#%d\n",regname[rt],regname[rt],imm&0xFF00); + assem_debug("adds %s,%s,#%d\n",regname[rt],regname[rt],imm&0xFF); + output_w32(0xe2800000|rd_rn_imm_shift(rt,rt,imm>>8,8)); + output_w32(0xe2900000|rd_rn_imm_shift(rt,rt,imm&0xff,0)); + } +} + +static void emit_addimm_no_flags(u_int imm,u_int rt) +{ + emit_addimm(rt,imm,rt); +} + +static void emit_addnop(u_int r) +{ + assert(r<16); + assem_debug("add %s,%s,#0 (nop)\n",regname[r],regname[r]); + output_w32(0xe2800000|rd_rn_rm(r,r,0)); +} + +static void emit_adcimm(u_int rs,int imm,u_int rt) +{ + u_int armval; + genimm_checked(imm,&armval); + assem_debug("adc %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe2a00000|rd_rn_rm(rt,rs,0)|armval); +} + +static void emit_rscimm(int rs,int imm,u_int rt) +{ + assert(0); + u_int armval; + genimm_checked(imm,&armval); + assem_debug("rsc %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe2e00000|rd_rn_rm(rt,rs,0)|armval); +} + +static void emit_addimm64_32(int rsh,int rsl,int imm,int rth,int rtl) +{ + // TODO: if(genimm(imm,&armval)) ... + // else + emit_movimm(imm,HOST_TEMPREG); + emit_adds(HOST_TEMPREG,rsl,rtl); + emit_adcimm(rsh,0,rth); +} + +static void emit_andimm(int rs,int imm,int rt) +{ + u_int armval; + if(imm==0) { + emit_zeroreg(rt); + }else if(genimm(imm,&armval)) { + assem_debug("and %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe2000000|rd_rn_rm(rt,rs,0)|armval); + }else if(genimm(~imm,&armval)) { + assem_debug("bic %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe3c00000|rd_rn_rm(rt,rs,0)|armval); + }else if(imm==65535) { + #ifndef HAVE_ARMV6 + assem_debug("bic %s,%s,#FF000000\n",regname[rt],regname[rs]); + output_w32(0xe3c00000|rd_rn_rm(rt,rs,0)|0x4FF); + assem_debug("bic %s,%s,#00FF0000\n",regname[rt],regname[rt]); + output_w32(0xe3c00000|rd_rn_rm(rt,rt,0)|0x8FF); + #else + assem_debug("uxth %s,%s\n",regname[rt],regname[rs]); + output_w32(0xe6ff0070|rd_rn_rm(rt,0,rs)); + #endif + }else{ + assert(imm>0&&imm<65535); + #ifndef HAVE_ARMV7 + assem_debug("mov r14,#%d\n",imm&0xFF00); + output_w32(0xe3a00000|rd_rn_imm_shift(HOST_TEMPREG,0,imm>>8,8)); + assem_debug("add r14,r14,#%d\n",imm&0xFF); + output_w32(0xe2800000|rd_rn_imm_shift(HOST_TEMPREG,HOST_TEMPREG,imm&0xff,0)); + #else + emit_movw(imm,HOST_TEMPREG); + #endif + assem_debug("and %s,%s,r14\n",regname[rt],regname[rs]); + output_w32(0xe0000000|rd_rn_rm(rt,rs,HOST_TEMPREG)); + } +} + +static void emit_orimm(int rs,int imm,int rt) +{ + u_int armval; + if(imm==0) { + if(rs!=rt) emit_mov(rs,rt); + }else if(genimm(imm,&armval)) { + assem_debug("orr %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe3800000|rd_rn_rm(rt,rs,0)|armval); + }else{ + assert(imm>0&&imm<65536); + assem_debug("orr %s,%s,#%d\n",regname[rt],regname[rs],imm&0xFF00); + assem_debug("orr %s,%s,#%d\n",regname[rt],regname[rs],imm&0xFF); + output_w32(0xe3800000|rd_rn_imm_shift(rt,rs,imm>>8,8)); + output_w32(0xe3800000|rd_rn_imm_shift(rt,rt,imm&0xff,0)); + } +} + +static void emit_xorimm(int rs,int imm,int rt) +{ + u_int armval; + if(imm==0) { + if(rs!=rt) emit_mov(rs,rt); + }else if(genimm(imm,&armval)) { + assem_debug("eor %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe2200000|rd_rn_rm(rt,rs,0)|armval); + }else{ + assert(imm>0&&imm<65536); + assem_debug("eor %s,%s,#%d\n",regname[rt],regname[rs],imm&0xFF00); + assem_debug("eor %s,%s,#%d\n",regname[rt],regname[rs],imm&0xFF); + output_w32(0xe2200000|rd_rn_imm_shift(rt,rs,imm>>8,8)); + output_w32(0xe2200000|rd_rn_imm_shift(rt,rt,imm&0xff,0)); + } +} + +static void emit_shlimm(int rs,u_int imm,int rt) +{ + assert(imm>0); + assert(imm<32); + //if(imm==1) ... + assem_debug("lsl %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|(imm<<7)); +} + +static void emit_lsls_imm(int rs,int imm,int rt) +{ + assert(imm>0); + assert(imm<32); + assem_debug("lsls %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe1b00000|rd_rn_rm(rt,0,rs)|(imm<<7)); +} + +static unused void emit_lslpls_imm(int rs,int imm,int rt) +{ + assert(imm>0); + assert(imm<32); + assem_debug("lslpls %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x51b00000|rd_rn_rm(rt,0,rs)|(imm<<7)); +} + +static void emit_shrimm(int rs,u_int imm,int rt) +{ + assert(imm>0); + assert(imm<32); + assem_debug("lsr %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x20|(imm<<7)); +} + +static void emit_sarimm(int rs,u_int imm,int rt) +{ + assert(imm>0); + assert(imm<32); + assem_debug("asr %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x40|(imm<<7)); +} + +static void emit_rorimm(int rs,u_int imm,int rt) +{ + assert(imm>0); + assert(imm<32); + assem_debug("ror %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x60|(imm<<7)); +} + +static void emit_shldimm(int rs,int rs2,u_int imm,int rt) +{ + assem_debug("shld %%%s,%%%s,%d\n",regname[rt],regname[rs2],imm); + assert(imm>0); + assert(imm<32); + //if(imm==1) ... + assem_debug("lsl %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|(imm<<7)); + assem_debug("orr %s,%s,%s,lsr #%d\n",regname[rt],regname[rt],regname[rs2],32-imm); + output_w32(0xe1800020|rd_rn_rm(rt,rt,rs2)|((32-imm)<<7)); +} + +static void emit_shrdimm(int rs,int rs2,u_int imm,int rt) +{ + assem_debug("shrd %%%s,%%%s,%d\n",regname[rt],regname[rs2],imm); + assert(imm>0); + assert(imm<32); + //if(imm==1) ... + assem_debug("lsr %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe1a00020|rd_rn_rm(rt,0,rs)|(imm<<7)); + assem_debug("orr %s,%s,%s,lsl #%d\n",regname[rt],regname[rt],regname[rs2],32-imm); + output_w32(0xe1800000|rd_rn_rm(rt,rt,rs2)|((32-imm)<<7)); +} + +static void emit_signextend16(int rs,int rt) +{ + #ifndef HAVE_ARMV6 + emit_shlimm(rs,16,rt); + emit_sarimm(rt,16,rt); + #else + assem_debug("sxth %s,%s\n",regname[rt],regname[rs]); + output_w32(0xe6bf0070|rd_rn_rm(rt,0,rs)); + #endif +} + +static void emit_signextend8(int rs,int rt) +{ + #ifndef HAVE_ARMV6 + emit_shlimm(rs,24,rt); + emit_sarimm(rt,24,rt); + #else + assem_debug("sxtb %s,%s\n",regname[rt],regname[rs]); + output_w32(0xe6af0070|rd_rn_rm(rt,0,rs)); + #endif +} + +static void emit_shl(u_int rs,u_int shift,u_int rt) +{ + assert(rs<16); + assert(rt<16); + assert(shift<16); + //if(imm==1) ... + assem_debug("lsl %s,%s,%s\n",regname[rt],regname[rs],regname[shift]); + output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x10|(shift<<8)); +} + +static void emit_shr(u_int rs,u_int shift,u_int rt) +{ + assert(rs<16); + assert(rt<16); + assert(shift<16); + assem_debug("lsr %s,%s,%s\n",regname[rt],regname[rs],regname[shift]); + output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x30|(shift<<8)); +} + +static void emit_sar(u_int rs,u_int shift,u_int rt) +{ + assert(rs<16); + assert(rt<16); + assert(shift<16); + assem_debug("asr %s,%s,%s\n",regname[rt],regname[rs],regname[shift]); + output_w32(0xe1a00000|rd_rn_rm(rt,0,rs)|0x50|(shift<<8)); +} + +static void emit_orrshl(u_int rs,u_int shift,u_int rt) +{ + assert(rs<16); + assert(rt<16); + assert(shift<16); + assem_debug("orr %s,%s,%s,lsl %s\n",regname[rt],regname[rt],regname[rs],regname[shift]); + output_w32(0xe1800000|rd_rn_rm(rt,rt,rs)|0x10|(shift<<8)); +} + +static void emit_orrshr(u_int rs,u_int shift,u_int rt) +{ + assert(rs<16); + assert(rt<16); + assert(shift<16); + assem_debug("orr %s,%s,%s,lsr %s\n",regname[rt],regname[rt],regname[rs],regname[shift]); + output_w32(0xe1800000|rd_rn_rm(rt,rt,rs)|0x30|(shift<<8)); +} + +static void emit_cmpimm(int rs,int imm) +{ + u_int armval; + if(genimm(imm,&armval)) { + assem_debug("cmp %s,#%d\n",regname[rs],imm); + output_w32(0xe3500000|rd_rn_rm(0,rs,0)|armval); + }else if(genimm(-imm,&armval)) { + assem_debug("cmn %s,#%d\n",regname[rs],imm); + output_w32(0xe3700000|rd_rn_rm(0,rs,0)|armval); + }else if(imm>0) { + assert(imm<65536); + emit_movimm(imm,HOST_TEMPREG); + assem_debug("cmp %s,r14\n",regname[rs]); + output_w32(0xe1500000|rd_rn_rm(0,rs,HOST_TEMPREG)); + }else{ + assert(imm>-65536); + emit_movimm(-imm,HOST_TEMPREG); + assem_debug("cmn %s,r14\n",regname[rs]); + output_w32(0xe1700000|rd_rn_rm(0,rs,HOST_TEMPREG)); + } +} + +static void emit_cmovne_imm(int imm,int rt) +{ + assem_debug("movne %s,#%d\n",regname[rt],imm); + u_int armval; + genimm_checked(imm,&armval); + output_w32(0x13a00000|rd_rn_rm(rt,0,0)|armval); +} + +static void emit_cmovl_imm(int imm,int rt) +{ + assem_debug("movlt %s,#%d\n",regname[rt],imm); + u_int armval; + genimm_checked(imm,&armval); + output_w32(0xb3a00000|rd_rn_rm(rt,0,0)|armval); +} + +static void emit_cmovb_imm(int imm,int rt) +{ + assem_debug("movcc %s,#%d\n",regname[rt],imm); + u_int armval; + genimm_checked(imm,&armval); + output_w32(0x33a00000|rd_rn_rm(rt,0,0)|armval); +} + +static void emit_cmovs_imm(int imm,int rt) +{ + assem_debug("movmi %s,#%d\n",regname[rt],imm); + u_int armval; + genimm_checked(imm,&armval); + output_w32(0x43a00000|rd_rn_rm(rt,0,0)|armval); +} + +static void emit_cmove_reg(int rs,int rt) +{ + assem_debug("moveq %s,%s\n",regname[rt],regname[rs]); + output_w32(0x01a00000|rd_rn_rm(rt,0,rs)); +} + +static void emit_cmovne_reg(int rs,int rt) +{ + assem_debug("movne %s,%s\n",regname[rt],regname[rs]); + output_w32(0x11a00000|rd_rn_rm(rt,0,rs)); +} + +static void emit_cmovl_reg(int rs,int rt) +{ + assem_debug("movlt %s,%s\n",regname[rt],regname[rs]); + output_w32(0xb1a00000|rd_rn_rm(rt,0,rs)); +} + +static void emit_cmovs_reg(int rs,int rt) +{ + assem_debug("movmi %s,%s\n",regname[rt],regname[rs]); + output_w32(0x41a00000|rd_rn_rm(rt,0,rs)); +} + +static void emit_slti32(int rs,int imm,int rt) +{ + if(rs!=rt) emit_zeroreg(rt); + emit_cmpimm(rs,imm); + if(rs==rt) emit_movimm(0,rt); + emit_cmovl_imm(1,rt); +} + +static void emit_sltiu32(int rs,int imm,int rt) +{ + if(rs!=rt) emit_zeroreg(rt); + emit_cmpimm(rs,imm); + if(rs==rt) emit_movimm(0,rt); + emit_cmovb_imm(1,rt); +} + +static void emit_slti64_32(int rsh,int rsl,int imm,int rt) +{ + assert(rsh!=rt); + emit_slti32(rsl,imm,rt); + if(imm>=0) + { + emit_test(rsh,rsh); + emit_cmovne_imm(0,rt); + emit_cmovs_imm(1,rt); + } + else + { + emit_cmpimm(rsh,-1); + emit_cmovne_imm(0,rt); + emit_cmovl_imm(1,rt); + } +} + +static void emit_sltiu64_32(int rsh,int rsl,int imm,int rt) +{ + assert(rsh!=rt); + emit_sltiu32(rsl,imm,rt); + if(imm>=0) + { + emit_test(rsh,rsh); + emit_cmovne_imm(0,rt); + } + else + { + emit_cmpimm(rsh,-1); + emit_cmovne_imm(1,rt); + } +} + +static void emit_cmp(int rs,int rt) +{ + assem_debug("cmp %s,%s\n",regname[rs],regname[rt]); + output_w32(0xe1500000|rd_rn_rm(0,rs,rt)); +} + +static void emit_set_gz32(int rs, int rt) +{ + //assem_debug("set_gz32\n"); + emit_cmpimm(rs,1); + emit_movimm(1,rt); + emit_cmovl_imm(0,rt); +} + +static void emit_set_nz32(int rs, int rt) +{ + //assem_debug("set_nz32\n"); + if(rs!=rt) emit_movs(rs,rt); + else emit_test(rs,rs); + emit_cmovne_imm(1,rt); +} + +static void emit_set_gz64_32(int rsh, int rsl, int rt) +{ + //assem_debug("set_gz64\n"); + emit_set_gz32(rsl,rt); + emit_test(rsh,rsh); + emit_cmovne_imm(1,rt); + emit_cmovs_imm(0,rt); +} + +static void emit_set_nz64_32(int rsh, int rsl, int rt) +{ + //assem_debug("set_nz64\n"); + emit_or_and_set_flags(rsh,rsl,rt); + emit_cmovne_imm(1,rt); +} + +static void emit_set_if_less32(int rs1, int rs2, int rt) +{ + //assem_debug("set if less (%%%s,%%%s),%%%s\n",regname[rs1],regname[rs2],regname[rt]); + if(rs1!=rt&&rs2!=rt) emit_zeroreg(rt); + emit_cmp(rs1,rs2); + if(rs1==rt||rs2==rt) emit_movimm(0,rt); + emit_cmovl_imm(1,rt); +} + +static void emit_set_if_carry32(int rs1, int rs2, int rt) +{ + //assem_debug("set if carry (%%%s,%%%s),%%%s\n",regname[rs1],regname[rs2],regname[rt]); + if(rs1!=rt&&rs2!=rt) emit_zeroreg(rt); + emit_cmp(rs1,rs2); + if(rs1==rt||rs2==rt) emit_movimm(0,rt); + emit_cmovb_imm(1,rt); +} + +static void emit_set_if_less64_32(int u1, int l1, int u2, int l2, int rt) +{ + //assem_debug("set if less64 (%%%s,%%%s,%%%s,%%%s),%%%s\n",regname[u1],regname[l1],regname[u2],regname[l2],regname[rt]); + assert(u1!=rt); + assert(u2!=rt); + emit_cmp(l1,l2); + emit_movimm(0,rt); + emit_sbcs(u1,u2,HOST_TEMPREG); + emit_cmovl_imm(1,rt); +} + +static void emit_set_if_carry64_32(int u1, int l1, int u2, int l2, int rt) +{ + //assem_debug("set if carry64 (%%%s,%%%s,%%%s,%%%s),%%%s\n",regname[u1],regname[l1],regname[u2],regname[l2],regname[rt]); + assert(u1!=rt); + assert(u2!=rt); + emit_cmp(l1,l2); + emit_movimm(0,rt); + emit_sbcs(u1,u2,HOST_TEMPREG); + emit_cmovb_imm(1,rt); +} + +static void emit_call(int a) +{ + assem_debug("bl %x (%x+%x)\n",a,(int)out,a-(int)out-8); + u_int offset=genjmp(a); + output_w32(0xeb000000|offset); +} + +static void emit_jmp(int a) +{ + assem_debug("b %x (%x+%x)\n",a,(int)out,a-(int)out-8); + u_int offset=genjmp(a); + output_w32(0xea000000|offset); +} + +static void emit_jne(int a) +{ + assem_debug("bne %x\n",a); + u_int offset=genjmp(a); + output_w32(0x1a000000|offset); +} + +static void emit_jeq(int a) +{ + assem_debug("beq %x\n",a); + u_int offset=genjmp(a); + output_w32(0x0a000000|offset); +} + +static void emit_js(int a) +{ + assem_debug("bmi %x\n",a); + u_int offset=genjmp(a); + output_w32(0x4a000000|offset); +} + +static void emit_jns(int a) +{ + assem_debug("bpl %x\n",a); + u_int offset=genjmp(a); + output_w32(0x5a000000|offset); +} + +static void emit_jl(int a) +{ + assem_debug("blt %x\n",a); + u_int offset=genjmp(a); + output_w32(0xba000000|offset); +} + +static void emit_jge(int a) +{ + assem_debug("bge %x\n",a); + u_int offset=genjmp(a); + output_w32(0xaa000000|offset); +} + +static void emit_jno(int a) +{ + assem_debug("bvc %x\n",a); + u_int offset=genjmp(a); + output_w32(0x7a000000|offset); +} + +static void emit_jc(int a) +{ + assem_debug("bcs %x\n",a); + u_int offset=genjmp(a); + output_w32(0x2a000000|offset); +} + +static void emit_jcc(int a) +{ + assem_debug("bcc %x\n",a); + u_int offset=genjmp(a); + output_w32(0x3a000000|offset); +} + +static void emit_callreg(u_int r) +{ + assert(r<15); + assem_debug("blx %s\n",regname[r]); + output_w32(0xe12fff30|r); +} + +static void emit_jmpreg(u_int r) +{ + assem_debug("mov pc,%s\n",regname[r]); + output_w32(0xe1a00000|rd_rn_rm(15,0,r)); +} + +static void emit_readword_indexed(int offset, int rs, int rt) +{ + assert(offset>-4096&&offset<4096); + assem_debug("ldr %s,%s+%d\n",regname[rt],regname[rs],offset); + if(offset>=0) { + output_w32(0xe5900000|rd_rn_rm(rt,rs,0)|offset); + }else{ + output_w32(0xe5100000|rd_rn_rm(rt,rs,0)|(-offset)); + } +} + +static void emit_readword_dualindexedx4(int rs1, int rs2, int rt) +{ + assem_debug("ldr %s,%s,%s lsl #2\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe7900000|rd_rn_rm(rt,rs1,rs2)|0x100); +} + +static void emit_ldrcc_dualindexed(int rs1, int rs2, int rt) +{ + assem_debug("ldrcc %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x37900000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_ldrccb_dualindexed(int rs1, int rs2, int rt) +{ + assem_debug("ldrccb %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x37d00000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_ldrccsb_dualindexed(int rs1, int rs2, int rt) +{ + assem_debug("ldrccsb %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x319000d0|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_ldrcch_dualindexed(int rs1, int rs2, int rt) +{ + assem_debug("ldrcch %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x319000b0|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_ldrccsh_dualindexed(int rs1, int rs2, int rt) +{ + assem_debug("ldrccsh %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x319000f0|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_readword_indexed_tlb(int addr, int rs, int map, int rt) +{ + if(map<0) emit_readword_indexed(addr, rs, rt); + else { + assert(addr==0); + emit_readword_dualindexedx4(rs, map, rt); + } +} + +static void emit_readdword_indexed_tlb(int addr, int rs, int map, int rh, int rl) +{ + if(map<0) { + if(rh>=0) emit_readword_indexed(addr, rs, rh); + emit_readword_indexed(addr+4, rs, rl); + }else{ + assert(rh!=rs); + if(rh>=0) emit_readword_indexed_tlb(addr, rs, map, rh); + emit_addimm(map,1,map); + emit_readword_indexed_tlb(addr, rs, map, rl); + } +} + +static void emit_movsbl_indexed(int offset, int rs, int rt) +{ + assert(offset>-256&&offset<256); + assem_debug("ldrsb %s,%s+%d\n",regname[rt],regname[rs],offset); + if(offset>=0) { + output_w32(0xe1d000d0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf)); + }else{ + output_w32(0xe15000d0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf)); + } +} + +static void emit_movsbl_indexed_tlb(int addr, int rs, int map, int rt) +{ + if(map<0) emit_movsbl_indexed(addr, rs, rt); + else { + if(addr==0) { + emit_shlimm(map,2,map); + assem_debug("ldrsb %s,%s+%s\n",regname[rt],regname[rs],regname[map]); + output_w32(0xe19000d0|rd_rn_rm(rt,rs,map)); + }else{ + assert(addr>-256&&addr<256); + assem_debug("add %s,%s,%s,lsl #2\n",regname[rt],regname[rs],regname[map]); + output_w32(0xe0800000|rd_rn_rm(rt,rs,map)|(2<<7)); + emit_movsbl_indexed(addr, rt, rt); + } + } +} + +static void emit_movswl_indexed(int offset, int rs, int rt) +{ + assert(offset>-256&&offset<256); + assem_debug("ldrsh %s,%s+%d\n",regname[rt],regname[rs],offset); + if(offset>=0) { + output_w32(0xe1d000f0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf)); + }else{ + output_w32(0xe15000f0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf)); + } +} + +static void emit_movzbl_indexed(int offset, int rs, int rt) +{ + assert(offset>-4096&&offset<4096); + assem_debug("ldrb %s,%s+%d\n",regname[rt],regname[rs],offset); + if(offset>=0) { + output_w32(0xe5d00000|rd_rn_rm(rt,rs,0)|offset); + }else{ + output_w32(0xe5500000|rd_rn_rm(rt,rs,0)|(-offset)); + } +} + +static void emit_movzbl_dualindexedx4(int rs1, int rs2, int rt) +{ + assem_debug("ldrb %s,%s,%s lsl #2\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe7d00000|rd_rn_rm(rt,rs1,rs2)|0x100); +} + +static void emit_movzbl_indexed_tlb(int addr, int rs, int map, int rt) +{ + if(map<0) emit_movzbl_indexed(addr, rs, rt); + else { + if(addr==0) { + emit_movzbl_dualindexedx4(rs, map, rt); + }else{ + emit_addimm(rs,addr,rt); + emit_movzbl_dualindexedx4(rt, map, rt); + } + } +} + +static void emit_movzwl_indexed(int offset, int rs, int rt) +{ + assert(offset>-256&&offset<256); + assem_debug("ldrh %s,%s+%d\n",regname[rt],regname[rs],offset); + if(offset>=0) { + output_w32(0xe1d000b0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf)); + }else{ + output_w32(0xe15000b0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf)); + } +} + +static void emit_ldrd(int offset, int rs, int rt) +{ + assert(offset>-256&&offset<256); + assem_debug("ldrd %s,%s+%d\n",regname[rt],regname[rs],offset); + if(offset>=0) { + output_w32(0xe1c000d0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf)); + }else{ + output_w32(0xe14000d0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf)); + } +} + +static void emit_readword(int addr, int rt) +{ + u_int offset = addr-(u_int)&dynarec_local; + assert(offset<4096); + assem_debug("ldr %s,fp+%d\n",regname[rt],offset); + output_w32(0xe5900000|rd_rn_rm(rt,FP,0)|offset); +} + +static unused void emit_movsbl(int addr, int rt) +{ + u_int offset = addr-(u_int)&dynarec_local; + assert(offset<256); + assem_debug("ldrsb %s,fp+%d\n",regname[rt],offset); + output_w32(0xe1d000d0|rd_rn_rm(rt,FP,0)|((offset<<4)&0xf00)|(offset&0xf)); +} + +static unused void emit_movswl(int addr, int rt) +{ + u_int offset = addr-(u_int)&dynarec_local; + assert(offset<256); + assem_debug("ldrsh %s,fp+%d\n",regname[rt],offset); + output_w32(0xe1d000f0|rd_rn_rm(rt,FP,0)|((offset<<4)&0xf00)|(offset&0xf)); +} + +static unused void emit_movzbl(int addr, int rt) +{ + u_int offset = addr-(u_int)&dynarec_local; + assert(offset<4096); + assem_debug("ldrb %s,fp+%d\n",regname[rt],offset); + output_w32(0xe5d00000|rd_rn_rm(rt,FP,0)|offset); +} + +static unused void emit_movzwl(int addr, int rt) +{ + u_int offset = addr-(u_int)&dynarec_local; + assert(offset<256); + assem_debug("ldrh %s,fp+%d\n",regname[rt],offset); + output_w32(0xe1d000b0|rd_rn_rm(rt,FP,0)|((offset<<4)&0xf00)|(offset&0xf)); +} + +static void emit_writeword_indexed(int rt, int offset, int rs) +{ + assert(offset>-4096&&offset<4096); + assem_debug("str %s,%s+%d\n",regname[rt],regname[rs],offset); + if(offset>=0) { + output_w32(0xe5800000|rd_rn_rm(rt,rs,0)|offset); + }else{ + output_w32(0xe5000000|rd_rn_rm(rt,rs,0)|(-offset)); + } +} + +static void emit_writeword_dualindexedx4(int rt, int rs1, int rs2) +{ + assem_debug("str %s,%s,%s lsl #2\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe7800000|rd_rn_rm(rt,rs1,rs2)|0x100); +} + +static void emit_writeword_indexed_tlb(int rt, int addr, int rs, int map, int temp) +{ + if(map<0) emit_writeword_indexed(rt, addr, rs); + else { + assert(addr==0); + emit_writeword_dualindexedx4(rt, rs, map); + } +} + +static void emit_writedword_indexed_tlb(int rh, int rl, int addr, int rs, int map, int temp) +{ + if(map<0) { + if(rh>=0) emit_writeword_indexed(rh, addr, rs); + emit_writeword_indexed(rl, addr+4, rs); + }else{ + assert(rh>=0); + if(temp!=rs) emit_addimm(map,1,temp); + emit_writeword_indexed_tlb(rh, addr, rs, map, temp); + if(temp!=rs) emit_writeword_indexed_tlb(rl, addr, rs, temp, temp); + else { + emit_addimm(rs,4,rs); + emit_writeword_indexed_tlb(rl, addr, rs, map, temp); + } + } +} + +static void emit_writehword_indexed(int rt, int offset, int rs) +{ + assert(offset>-256&&offset<256); + assem_debug("strh %s,%s+%d\n",regname[rt],regname[rs],offset); + if(offset>=0) { + output_w32(0xe1c000b0|rd_rn_rm(rt,rs,0)|((offset<<4)&0xf00)|(offset&0xf)); + }else{ + output_w32(0xe14000b0|rd_rn_rm(rt,rs,0)|(((-offset)<<4)&0xf00)|((-offset)&0xf)); + } +} + +static void emit_writebyte_indexed(int rt, int offset, int rs) +{ + assert(offset>-4096&&offset<4096); + assem_debug("strb %s,%s+%d\n",regname[rt],regname[rs],offset); + if(offset>=0) { + output_w32(0xe5c00000|rd_rn_rm(rt,rs,0)|offset); + }else{ + output_w32(0xe5400000|rd_rn_rm(rt,rs,0)|(-offset)); + } +} + +static void emit_writebyte_dualindexedx4(int rt, int rs1, int rs2) +{ + assem_debug("strb %s,%s,%s lsl #2\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0xe7c00000|rd_rn_rm(rt,rs1,rs2)|0x100); +} + +static void emit_writebyte_indexed_tlb(int rt, int addr, int rs, int map, int temp) +{ + if(map<0) emit_writebyte_indexed(rt, addr, rs); + else { + if(addr==0) { + emit_writebyte_dualindexedx4(rt, rs, map); + }else{ + emit_addimm(rs,addr,temp); + emit_writebyte_dualindexedx4(rt, temp, map); + } + } +} + +static void emit_strcc_dualindexed(int rs1, int rs2, int rt) +{ + assem_debug("strcc %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x37800000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_strccb_dualindexed(int rs1, int rs2, int rt) +{ + assem_debug("strccb %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x37c00000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_strcch_dualindexed(int rs1, int rs2, int rt) +{ + assem_debug("strcch %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x318000b0|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_writeword(int rt, int addr) +{ + u_int offset = addr-(u_int)&dynarec_local; + assert(offset<4096); + assem_debug("str %s,fp+%d\n",regname[rt],offset); + output_w32(0xe5800000|rd_rn_rm(rt,FP,0)|offset); +} + +static unused void emit_writehword(int rt, int addr) +{ + u_int offset = addr-(u_int)&dynarec_local; + assert(offset<256); + assem_debug("strh %s,fp+%d\n",regname[rt],offset); + output_w32(0xe1c000b0|rd_rn_rm(rt,FP,0)|((offset<<4)&0xf00)|(offset&0xf)); +} + +static unused void emit_writebyte(int rt, int addr) +{ + u_int offset = addr-(u_int)&dynarec_local; + assert(offset<4096); + assem_debug("strb %s,fp+%d\n",regname[rt],offset); + output_w32(0xe5c00000|rd_rn_rm(rt,FP,0)|offset); +} + +static void emit_umull(u_int rs1, u_int rs2, u_int hi, u_int lo) +{ + assem_debug("umull %s, %s, %s, %s\n",regname[lo],regname[hi],regname[rs1],regname[rs2]); + assert(rs1<16); + assert(rs2<16); + assert(hi<16); + assert(lo<16); + output_w32(0xe0800090|(hi<<16)|(lo<<12)|(rs2<<8)|rs1); +} + +static void emit_smull(u_int rs1, u_int rs2, u_int hi, u_int lo) +{ + assem_debug("smull %s, %s, %s, %s\n",regname[lo],regname[hi],regname[rs1],regname[rs2]); + assert(rs1<16); + assert(rs2<16); + assert(hi<16); + assert(lo<16); + output_w32(0xe0c00090|(hi<<16)|(lo<<12)|(rs2<<8)|rs1); +} + +static void emit_clz(int rs,int rt) +{ + assem_debug("clz %s,%s\n",regname[rt],regname[rs]); + output_w32(0xe16f0f10|rd_rn_rm(rt,0,rs)); +} + +static void emit_subcs(int rs1,int rs2,int rt) +{ + assem_debug("subcs %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x20400000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_shrcc_imm(int rs,u_int imm,int rt) +{ + assert(imm>0); + assert(imm<32); + assem_debug("lsrcc %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x31a00000|rd_rn_rm(rt,0,rs)|0x20|(imm<<7)); +} + +static void emit_shrne_imm(int rs,u_int imm,int rt) +{ + assert(imm>0); + assert(imm<32); + assem_debug("lsrne %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x11a00000|rd_rn_rm(rt,0,rs)|0x20|(imm<<7)); +} + +static void emit_negmi(int rs, int rt) +{ + assem_debug("rsbmi %s,%s,#0\n",regname[rt],regname[rs]); + output_w32(0x42600000|rd_rn_rm(rt,rs,0)); +} + +static void emit_negsmi(int rs, int rt) +{ + assem_debug("rsbsmi %s,%s,#0\n",regname[rt],regname[rs]); + output_w32(0x42700000|rd_rn_rm(rt,rs,0)); +} + +static void emit_orreq(u_int rs1,u_int rs2,u_int rt) +{ + assem_debug("orreq %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x01800000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_orrne(u_int rs1,u_int rs2,u_int rt) +{ + assem_debug("orrne %s,%s,%s\n",regname[rt],regname[rs1],regname[rs2]); + output_w32(0x11800000|rd_rn_rm(rt,rs1,rs2)); +} + +static void emit_bic_lsl(u_int rs1,u_int rs2,u_int shift,u_int rt) +{ + assem_debug("bic %s,%s,%s lsl %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]); + output_w32(0xe1C00000|rd_rn_rm(rt,rs1,rs2)|0x10|(shift<<8)); +} + +static void emit_biceq_lsl(u_int rs1,u_int rs2,u_int shift,u_int rt) +{ + assem_debug("biceq %s,%s,%s lsl %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]); + output_w32(0x01C00000|rd_rn_rm(rt,rs1,rs2)|0x10|(shift<<8)); +} + +static void emit_bicne_lsl(u_int rs1,u_int rs2,u_int shift,u_int rt) +{ + assem_debug("bicne %s,%s,%s lsl %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]); + output_w32(0x11C00000|rd_rn_rm(rt,rs1,rs2)|0x10|(shift<<8)); +} + +static void emit_bic_lsr(u_int rs1,u_int rs2,u_int shift,u_int rt) +{ + assem_debug("bic %s,%s,%s lsr %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]); + output_w32(0xe1C00000|rd_rn_rm(rt,rs1,rs2)|0x30|(shift<<8)); +} + +static void emit_biceq_lsr(u_int rs1,u_int rs2,u_int shift,u_int rt) +{ + assem_debug("biceq %s,%s,%s lsr %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]); + output_w32(0x01C00000|rd_rn_rm(rt,rs1,rs2)|0x30|(shift<<8)); +} + +static void emit_bicne_lsr(u_int rs1,u_int rs2,u_int shift,u_int rt) +{ + assem_debug("bicne %s,%s,%s lsr %s\n",regname[rt],regname[rs1],regname[rs2],regname[shift]); + output_w32(0x11C00000|rd_rn_rm(rt,rs1,rs2)|0x30|(shift<<8)); +} + +static void emit_teq(int rs, int rt) +{ + assem_debug("teq %s,%s\n",regname[rs],regname[rt]); + output_w32(0xe1300000|rd_rn_rm(0,rs,rt)); +} + +static void emit_rsbimm(int rs, int imm, int rt) +{ + u_int armval; + genimm_checked(imm,&armval); + assem_debug("rsb %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0xe2600000|rd_rn_rm(rt,rs,0)|armval); +} + +// Load 2 immediates optimizing for small code size +static void emit_mov2imm_compact(int imm1,u_int rt1,int imm2,u_int rt2) +{ + emit_movimm(imm1,rt1); + u_int armval; + if(genimm(imm2-imm1,&armval)) { + assem_debug("add %s,%s,#%d\n",regname[rt2],regname[rt1],imm2-imm1); + output_w32(0xe2800000|rd_rn_rm(rt2,rt1,0)|armval); + }else if(genimm(imm1-imm2,&armval)) { + assem_debug("sub %s,%s,#%d\n",regname[rt2],regname[rt1],imm1-imm2); + output_w32(0xe2400000|rd_rn_rm(rt2,rt1,0)|armval); + } + else emit_movimm(imm2,rt2); +} + +// Conditionally select one of two immediates, optimizing for small code size +// This will only be called if HAVE_CMOV_IMM is defined +static void emit_cmov2imm_e_ne_compact(int imm1,int imm2,u_int rt) +{ + u_int armval; + if(genimm(imm2-imm1,&armval)) { + emit_movimm(imm1,rt); + assem_debug("addne %s,%s,#%d\n",regname[rt],regname[rt],imm2-imm1); + output_w32(0x12800000|rd_rn_rm(rt,rt,0)|armval); + }else if(genimm(imm1-imm2,&armval)) { + emit_movimm(imm1,rt); + assem_debug("subne %s,%s,#%d\n",regname[rt],regname[rt],imm1-imm2); + output_w32(0x12400000|rd_rn_rm(rt,rt,0)|armval); + } + else { + #ifndef HAVE_ARMV7 + emit_movimm(imm1,rt); + add_literal((int)out,imm2); + assem_debug("ldrne %s,pc+? [=%x]\n",regname[rt],imm2); + output_w32(0x15900000|rd_rn_rm(rt,15,0)); + #else + emit_movw(imm1&0x0000FFFF,rt); + if((imm1&0xFFFF)!=(imm2&0xFFFF)) { + assem_debug("movwne %s,#%d (0x%x)\n",regname[rt],imm2&0xFFFF,imm2&0xFFFF); + output_w32(0x13000000|rd_rn_rm(rt,0,0)|(imm2&0xfff)|((imm2<<4)&0xf0000)); + } + emit_movt(imm1&0xFFFF0000,rt); + if((imm1&0xFFFF0000)!=(imm2&0xFFFF0000)) { + assem_debug("movtne %s,#%d (0x%x)\n",regname[rt],imm2&0xffff0000,imm2&0xffff0000); + output_w32(0x13400000|rd_rn_rm(rt,0,0)|((imm2>>16)&0xfff)|((imm2>>12)&0xf0000)); + } + #endif + } +} + +// special case for checking invalid_code +static void emit_cmpmem_indexedsr12_reg(int base,int r,int imm) +{ + assert(imm<128&&imm>=0); + assert(r>=0&&r<16); + assem_debug("ldrb lr,%s,%s lsr #12\n",regname[base],regname[r]); + output_w32(0xe7d00000|rd_rn_rm(HOST_TEMPREG,base,r)|0x620); + emit_cmpimm(HOST_TEMPREG,imm); +} + +static void emit_callne(int a) +{ + assem_debug("blne %x\n",a); + u_int offset=genjmp(a); + output_w32(0x1b000000|offset); +} + +// Used to preload hash table entries +static unused void emit_prefetchreg(int r) +{ + assem_debug("pld %s\n",regname[r]); + output_w32(0xf5d0f000|rd_rn_rm(0,r,0)); +} + +// Special case for mini_ht +static void emit_ldreq_indexed(int rs, u_int offset, int rt) +{ + assert(offset<4096); + assem_debug("ldreq %s,[%s, #%d]\n",regname[rt],regname[rs],offset); + output_w32(0x05900000|rd_rn_rm(rt,rs,0)|offset); +} + +static unused void emit_bicne_imm(int rs,int imm,int rt) +{ + u_int armval; + genimm_checked(imm,&armval); + assem_debug("bicne %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x13c00000|rd_rn_rm(rt,rs,0)|armval); +} + +static unused void emit_biccs_imm(int rs,int imm,int rt) +{ + u_int armval; + genimm_checked(imm,&armval); + assem_debug("biccs %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x23c00000|rd_rn_rm(rt,rs,0)|armval); +} + +static unused void emit_bicvc_imm(int rs,int imm,int rt) +{ + u_int armval; + genimm_checked(imm,&armval); + assem_debug("bicvc %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x73c00000|rd_rn_rm(rt,rs,0)|armval); +} + +static unused void emit_bichi_imm(int rs,int imm,int rt) +{ + u_int armval; + genimm_checked(imm,&armval); + assem_debug("bichi %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x83c00000|rd_rn_rm(rt,rs,0)|armval); +} + +static unused void emit_orrvs_imm(int rs,int imm,int rt) +{ + u_int armval; + genimm_checked(imm,&armval); + assem_debug("orrvs %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x63800000|rd_rn_rm(rt,rs,0)|armval); +} + +static void emit_orrne_imm(int rs,int imm,int rt) +{ + u_int armval; + genimm_checked(imm,&armval); + assem_debug("orrne %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x13800000|rd_rn_rm(rt,rs,0)|armval); +} + +static void emit_andne_imm(int rs,int imm,int rt) +{ + u_int armval; + genimm_checked(imm,&armval); + assem_debug("andne %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x12000000|rd_rn_rm(rt,rs,0)|armval); +} + +static unused void emit_addpl_imm(int rs,int imm,int rt) +{ + u_int armval; + genimm_checked(imm,&armval); + assem_debug("addpl %s,%s,#%d\n",regname[rt],regname[rs],imm); + output_w32(0x52800000|rd_rn_rm(rt,rs,0)|armval); +} + +static void emit_jno_unlikely(int a) +{ + //emit_jno(a); + assem_debug("addvc pc,pc,#? (%x)\n",/*a-(int)out-8,*/a); + output_w32(0x72800000|rd_rn_rm(15,15,0)); +} + +static void save_regs_all(u_int reglist) +{ + int i; + if(!reglist) return; + assem_debug("stmia fp,{"); + for(i=0;i<16;i++) + if(reglist&(1<=BASE_ADDR&&addr<(BASE_ADDR+(1<=0x80000000&&target<0x80800000)||(target>0xA4000000&&target<0xA4001000)); +//DEBUG > +#ifdef DEBUG_CYCLE_COUNT + emit_readword((int)&last_count,ECX); + emit_add(HOST_CCREG,ECX,HOST_CCREG); + emit_readword((int)&next_interupt,ECX); + emit_writeword(HOST_CCREG,(int)&Count); + emit_sub(HOST_CCREG,ECX,HOST_CCREG); + emit_writeword(ECX,(int)&last_count); +#endif +//DEBUG < + emit_jmp(linker); +} + +static void emit_extjump(int addr, int target) +{ + emit_extjump2(addr, target, (int)dyna_linker); +} + +static void emit_extjump_ds(int addr, int target) +{ + emit_extjump2(addr, target, (int)dyna_linker_ds); +} + +// put rt_val into rt, potentially making use of rs with value rs_val +static void emit_movimm_from(u_int rs_val,int rs,u_int rt_val,int rt) +{ + u_int armval; + int diff; + if(genimm(rt_val,&armval)) { + assem_debug("mov %s,#%d\n",regname[rt],rt_val); + output_w32(0xe3a00000|rd_rn_rm(rt,0,0)|armval); + return; + } + if(genimm(~rt_val,&armval)) { + assem_debug("mvn %s,#%d\n",regname[rt],rt_val); + output_w32(0xe3e00000|rd_rn_rm(rt,0,0)|armval); + return; + } + diff=rt_val-rs_val; + if(genimm(diff,&armval)) { + assem_debug("add %s,%s,#%d\n",regname[rt],regname[rs],diff); + output_w32(0xe2800000|rd_rn_rm(rt,rs,0)|armval); + return; + }else if(genimm(-diff,&armval)) { + assem_debug("sub %s,%s,#%d\n",regname[rt],regname[rs],-diff); + output_w32(0xe2400000|rd_rn_rm(rt,rs,0)|armval); + return; + } + emit_movimm(rt_val,rt); +} + +// return 1 if above function can do it's job cheaply +static int is_similar_value(u_int v1,u_int v2) +{ + u_int xs; + int diff; + if(v1==v2) return 1; + diff=v2-v1; + for(xs=diff;xs!=0&&(xs&3)==0;xs>>=2) + ; + if(xs<0x100) return 1; + for(xs=-diff;xs!=0&&(xs&3)==0;xs>>=2) + ; + if(xs<0x100) return 1; + return 0; +} + +// trashes r2 +static void pass_args(int a0, int a1) +{ + if(a0==1&&a1==0) { + // must swap + emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0); + } + else if(a0!=0&&a1==0) { + emit_mov(a1,1); + if (a0>=0) emit_mov(a0,0); + } + else { + if(a0>=0&&a0!=0) emit_mov(a0,0); + if(a1>=0&&a1!=1) emit_mov(a1,1); + } +} + +static void mov_loadtype_adj(int type,int rs,int rt) +{ + switch(type) { + case LOADB_STUB: emit_signextend8(rs,rt); break; + case LOADBU_STUB: emit_andimm(rs,0xff,rt); break; + case LOADH_STUB: emit_signextend16(rs,rt); break; + case LOADHU_STUB: emit_andimm(rs,0xffff,rt); break; + case LOADW_STUB: if(rs!=rt) emit_mov(rs,rt); break; + default: assert(0); + } +} + +#include "../backends/psx/pcsxmem.h" +#include "../backends/psx/pcsxmem_inline.c" + +static void do_readstub(int n) +{ + assem_debug("do_readstub %x\n",start+stubs[n][3]*4); + literal_pool(256); + set_jump_target(stubs[n][1],(int)out); + int type=stubs[n][0]; + int i=stubs[n][3]; + int rs=stubs[n][4]; + struct regstat *i_regs=(struct regstat *)stubs[n][5]; + u_int reglist=stubs[n][7]; + signed char *i_regmap=i_regs->regmap; + int rt; + if(itype[i]==C1LS||itype[i]==C2LS||itype[i]==LOADLR) { + rt=get_reg(i_regmap,FTEMP); + }else{ + rt=get_reg(i_regmap,rt1[i]); + } + assert(rs>=0); + int r,temp=-1,temp2=HOST_TEMPREG,regs_saved=0,restore_jump=0; + reglist|=(1<=0&&rt1[i]!=0) + reglist&=~(1<=0&&rt1[i]!=0)) { + switch(type) { + case LOADB_STUB: emit_ldrccsb_dualindexed(temp2,rs,rt); break; + case LOADBU_STUB: emit_ldrccb_dualindexed(temp2,rs,rt); break; + case LOADH_STUB: emit_ldrccsh_dualindexed(temp2,rs,rt); break; + case LOADHU_STUB: emit_ldrcch_dualindexed(temp2,rs,rt); break; + case LOADW_STUB: emit_ldrcc_dualindexed(temp2,rs,rt); break; + } + } + if(regs_saved) { + restore_jump=(int)out; + emit_jcc(0); // jump to reg restore + } + else + emit_jcc(stubs[n][2]); // return address + + if(!regs_saved) + save_regs(reglist); + int handler=0; + if(type==LOADB_STUB||type==LOADBU_STUB) + handler=(int)jump_handler_read8; + if(type==LOADH_STUB||type==LOADHU_STUB) + handler=(int)jump_handler_read16; + if(type==LOADW_STUB) + handler=(int)jump_handler_read32; + assert(handler!=0); + pass_args(rs,temp2); + int cc=get_reg(i_regmap,CCREG); + if(cc<0) + emit_loadreg(CCREG,2); + emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n][6]+1),2); + emit_call(handler); + if(itype[i]==C1LS||itype[i]==C2LS||(rt>=0&&rt1[i]!=0)) { + mov_loadtype_adj(type,0,rt); + } + if(restore_jump) + set_jump_target(restore_jump,(int)out); + restore_regs(reglist); + emit_jmp(stubs[n][2]); // return address +} + +// return memhandler, or get directly accessable address and return 0 +static u_int get_direct_memhandler(void *table,u_int addr,int type,u_int *addr_host) +{ + u_int l1,l2=0; + l1=((u_int *)table)[addr>>12]; + if((l1&(1<<31))==0) { + u_int v=l1<<1; + *addr_host=v+addr; + return 0; + } + else { + l1<<=1; + if(type==LOADB_STUB||type==LOADBU_STUB||type==STOREB_STUB) + l2=((u_int *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)]; + else if(type==LOADH_STUB||type==LOADHU_STUB||type==STOREH_STUB) + l2=((u_int *)l1)[0x1000/4 + (addr&0xfff)/2]; + else + l2=((u_int *)l1)[(addr&0xfff)/4]; + if((l2&(1<<31))==0) { + u_int v=l2<<1; + *addr_host=v+(addr&0xfff); + return 0; + } + return l2<<1; + } +} + +static void inline_readstub(int type, int i, u_int addr, signed char regmap[], int target, int adj, u_int reglist) +{ + int rs=get_reg(regmap,target); + int rt=get_reg(regmap,target); + if(rs<0) rs=get_reg(regmap,-1); + assert(rs>=0); + u_int handler,host_addr=0,is_dynamic,far_call=0; + int cc=get_reg(regmap,CCREG); + if(pcsx_direct_read(type,addr,CLOCK_ADJUST(adj+1),cc,target?rs:-1,rt)) + return; + handler=get_direct_memhandler(mem_rtab,addr,type,&host_addr); + if (handler==0) { + if(rt<0||rt1[i]==0) + return; + if(addr!=host_addr) + emit_movimm_from(addr,rs,host_addr,rs); + switch(type) { + case LOADB_STUB: emit_movsbl_indexed(0,rs,rt); break; + case LOADBU_STUB: emit_movzbl_indexed(0,rs,rt); break; + case LOADH_STUB: emit_movswl_indexed(0,rs,rt); break; + case LOADHU_STUB: emit_movzwl_indexed(0,rs,rt); break; + case LOADW_STUB: emit_readword_indexed(0,rs,rt); break; + default: assert(0); + } + return; + } + is_dynamic=pcsxmem_is_handler_dynamic(addr); + if(is_dynamic) { + if(type==LOADB_STUB||type==LOADBU_STUB) + handler=(int)jump_handler_read8; + if(type==LOADH_STUB||type==LOADHU_STUB) + handler=(int)jump_handler_read16; + if(type==LOADW_STUB) + handler=(int)jump_handler_read32; + } + + // call a memhandler + if(rt>=0&&rt1[i]!=0) + reglist&=~(1<=33554432) { + // unreachable memhandler, a plugin func perhaps + emit_movimm(handler,12); + far_call=1; + } + if(cc<0) + emit_loadreg(CCREG,2); + if(is_dynamic) { + emit_movimm(((u_int *)mem_rtab)[addr>>12]<<1,1); + emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj+1),2); + } + else { + emit_readword((int)&last_count,3); + emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj+1),2); + emit_add(2,3,2); + emit_writeword(2,(int)&Count); + } + + if(far_call) + emit_callreg(12); + else + emit_call(handler); + + if(rt>=0&&rt1[i]!=0) { + switch(type) { + case LOADB_STUB: emit_signextend8(0,rt); break; + case LOADBU_STUB: emit_andimm(0,0xff,rt); break; + case LOADH_STUB: emit_signextend16(0,rt); break; + case LOADHU_STUB: emit_andimm(0,0xffff,rt); break; + case LOADW_STUB: if(rt!=0) emit_mov(0,rt); break; + default: assert(0); + } + } + restore_regs(reglist); +} + +static void do_writestub(int n) +{ + assem_debug("do_writestub %x\n",start+stubs[n][3]*4); + literal_pool(256); + set_jump_target(stubs[n][1],(int)out); + int type=stubs[n][0]; + int i=stubs[n][3]; + int rs=stubs[n][4]; + struct regstat *i_regs=(struct regstat *)stubs[n][5]; + u_int reglist=stubs[n][7]; + signed char *i_regmap=i_regs->regmap; + int rt,r; + if(itype[i]==C1LS||itype[i]==C2LS) { + rt=get_reg(i_regmap,r=FTEMP); + }else{ + rt=get_reg(i_regmap,r=rs2[i]); + } + assert(rs>=0); + assert(rt>=0); + int rtmp,temp=-1,temp2=HOST_TEMPREG,regs_saved=0,restore_jump=0,ra; + int reglist2=reglist|(1<=0); + assert(rt>=0); + u_int handler,host_addr=0; + handler=get_direct_memhandler(mem_wtab,addr,type,&host_addr); + if (handler==0) { + if(addr!=host_addr) + emit_movimm_from(addr,rs,host_addr,rs); + switch(type) { + case STOREB_STUB: emit_writebyte_indexed(rt,0,rs); break; + case STOREH_STUB: emit_writehword_indexed(rt,0,rs); break; + case STOREW_STUB: emit_writeword_indexed(rt,0,rs); break; + default: assert(0); + } + return; + } + + // call a memhandler + save_regs(reglist); + pass_args(rs,rt); + int cc=get_reg(regmap,CCREG); + if(cc<0) + emit_loadreg(CCREG,2); + emit_addimm(cc<0?2:cc,CLOCK_ADJUST(adj+1),2); + emit_movimm(handler,3); + // returns new cycle_count + emit_call((int)jump_handler_write_h); + emit_addimm(0,-CLOCK_ADJUST(adj+1),cc<0?2:cc); + if(cc<0) + emit_storereg(CCREG,2); + restore_regs(reglist); +} + +static void do_unalignedwritestub(int n) +{ + assem_debug("do_unalignedwritestub %x\n",start+stubs[n][3]*4); + literal_pool(256); + set_jump_target(stubs[n][1],(int)out); + + int i=stubs[n][3]; + struct regstat *i_regs=(struct regstat *)stubs[n][4]; + int addr=stubs[n][5]; + u_int reglist=stubs[n][7]; + signed char *i_regmap=i_regs->regmap; + int temp2=get_reg(i_regmap,FTEMP); + int rt; + rt=get_reg(i_regmap,rs2[i]); + assert(rt>=0); + assert(addr>=0); + assert(opcode[i]==0x2a||opcode[i]==0x2e); // SWL/SWR only implemented + reglist|=(1<regmap_entry,i_regs->was32,i_regs->wasdirty); + if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG); + emit_movimm(start+(i-ds)*4,EAX); // Get PC + emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle... + emit_jmp(ds?(int)fp_exception_ds:(int)fp_exception); +} + +/* Special assem */ + +static void shift_assemble_arm(int i,struct regstat *i_regs) +{ + if(rt1[i]) { + if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV + { + signed char s,t,shift; + t=get_reg(i_regs->regmap,rt1[i]); + s=get_reg(i_regs->regmap,rs1[i]); + shift=get_reg(i_regs->regmap,rs2[i]); + if(t>=0){ + if(rs1[i]==0) + { + emit_zeroreg(t); + } + else if(rs2[i]==0) + { + assert(s>=0); + if(s!=t) emit_mov(s,t); + } + else + { + emit_andimm(shift,31,HOST_TEMPREG); + if(opcode2[i]==4) // SLLV + { + emit_shl(s,HOST_TEMPREG,t); + } + if(opcode2[i]==6) // SRLV + { + emit_shr(s,HOST_TEMPREG,t); + } + if(opcode2[i]==7) // SRAV + { + emit_sar(s,HOST_TEMPREG,t); + } + } + } + } else { // DSLLV/DSRLV/DSRAV + signed char sh,sl,th,tl,shift; + th=get_reg(i_regs->regmap,rt1[i]|64); + tl=get_reg(i_regs->regmap,rt1[i]); + sh=get_reg(i_regs->regmap,rs1[i]|64); + sl=get_reg(i_regs->regmap,rs1[i]); + shift=get_reg(i_regs->regmap,rs2[i]); + if(tl>=0){ + if(rs1[i]==0) + { + emit_zeroreg(tl); + if(th>=0) emit_zeroreg(th); + } + else if(rs2[i]==0) + { + assert(sl>=0); + if(sl!=tl) emit_mov(sl,tl); + if(th>=0&&sh!=th) emit_mov(sh,th); + } + else + { + // FIXME: What if shift==tl ? + assert(shift!=tl); + int temp=get_reg(i_regs->regmap,-1); + int real_th=th; + if(th<0&&opcode2[i]!=0x14) {th=temp;} // DSLLV doesn't need a temporary register + assert(sl>=0); + assert(sh>=0); + emit_andimm(shift,31,HOST_TEMPREG); + if(opcode2[i]==0x14) // DSLLV + { + if(th>=0) emit_shl(sh,HOST_TEMPREG,th); + emit_rsbimm(HOST_TEMPREG,32,HOST_TEMPREG); + emit_orrshr(sl,HOST_TEMPREG,th); + emit_andimm(shift,31,HOST_TEMPREG); + emit_testimm(shift,32); + emit_shl(sl,HOST_TEMPREG,tl); + if(th>=0) emit_cmovne_reg(tl,th); + emit_cmovne_imm(0,tl); + } + if(opcode2[i]==0x16) // DSRLV + { + assert(th>=0); + emit_shr(sl,HOST_TEMPREG,tl); + emit_rsbimm(HOST_TEMPREG,32,HOST_TEMPREG); + emit_orrshl(sh,HOST_TEMPREG,tl); + emit_andimm(shift,31,HOST_TEMPREG); + emit_testimm(shift,32); + emit_shr(sh,HOST_TEMPREG,th); + emit_cmovne_reg(th,tl); + if(real_th>=0) emit_cmovne_imm(0,th); + } + if(opcode2[i]==0x17) // DSRAV + { + assert(th>=0); + emit_shr(sl,HOST_TEMPREG,tl); + emit_rsbimm(HOST_TEMPREG,32,HOST_TEMPREG); + if(real_th>=0) { + assert(temp>=0); + emit_sarimm(th,31,temp); + } + emit_orrshl(sh,HOST_TEMPREG,tl); + emit_andimm(shift,31,HOST_TEMPREG); + emit_testimm(shift,32); + emit_sar(sh,HOST_TEMPREG,th); + emit_cmovne_reg(th,tl); + if(real_th>=0) emit_cmovne_reg(temp,th); + } + } + } + } + } +} + +static void speculate_mov(int rs,int rt) +{ + if(rt!=0) { + smrv_strong_next|=1<>rs1[i])&1) speculate_mov(rs1[i],rt1[i]); + else if((smrv_strong>>rs2[i])&1) speculate_mov(rs2[i],rt1[i]); + else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]); + else if((smrv_weak>>rs2[i])&1) speculate_mov_weak(rs2[i],rt1[i]); + else { + smrv_strong_next&=~(1<=0) { + if(get_final_value(hr,i,&value)) + smrv[rt1[i]]=value; + else smrv[rt1[i]]=constmap[i][hr]; + smrv_strong_next|=1<>rs1[i])&1) speculate_mov(rs1[i],rt1[i]); + else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]); + } + break; + case LOAD: + if(start<0x2000&&(rt1[i]==26||(smrv[rt1[i]]>>24)==0xa0)) { + // special case for BIOS + smrv[rt1[i]]=0xa0000000; + smrv_strong_next|=1<>r)&1),(smrv_weak>>r)&1,regs[i].isconst,regs[i].wasconst); +#endif +} + +enum { + MTYPE_8000 = 0, + MTYPE_8020, + MTYPE_0000, + MTYPE_A000, + MTYPE_1F80, +}; + +static int get_ptr_mem_type(u_int a) +{ + if(a < 0x00200000) { + if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0)) + // return wrong, must use memhandler for BIOS self-test to pass + // 007 does similar stuff from a00 mirror, weird stuff + return MTYPE_8000; + return MTYPE_0000; + } + if(0x1f800000 <= a && a < 0x1f801000) + return MTYPE_1F80; + if(0x80200000 <= a && a < 0x80800000) + return MTYPE_8020; + if(0xa0000000 <= a && a < 0xa0200000) + return MTYPE_A000; + return MTYPE_8000; +} + +static int emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override) +{ + int jaddr=0,type=0; + int mr=rs1[i]; + if(((smrv_strong|smrv_weak)>>mr)&1) { + type=get_ptr_mem_type(smrv[mr]); + //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type); + } + else { + // use the mirror we are running on + type=get_ptr_mem_type(start); + //printf("set nospec @%08x r%d %d\n", start+i*4, mr, type); + } + + if(type==MTYPE_8020) { // RAM 80200000+ mirror + emit_andimm(addr,~0x00e00000,HOST_TEMPREG); + addr=*addr_reg_override=HOST_TEMPREG; + type=0; + } + else if(type==MTYPE_0000) { // RAM 0 mirror + emit_orimm(addr,0x80000000,HOST_TEMPREG); + addr=*addr_reg_override=HOST_TEMPREG; + type=0; + } + else if(type==MTYPE_A000) { // RAM A mirror + emit_andimm(addr,~0x20000000,HOST_TEMPREG); + addr=*addr_reg_override=HOST_TEMPREG; + type=0; + } + else if(type==MTYPE_1F80) { // scratchpad + if (psxH == (void *)0x1f800000) { + emit_addimm(addr,-0x1f800000,HOST_TEMPREG); + emit_cmpimm(HOST_TEMPREG,0x1000); + jaddr=(int)out; + emit_jc(0); + } + else { + // do usual RAM check, jump will go to the right handler + type=0; + } + } + + if(type==0) + { + emit_cmpimm(addr,RAM_SIZE); + jaddr=(int)out; + #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK + // Hint to branch predictor that the branch is unlikely to be taken + if(rs1[i]>=28) + emit_jno_unlikely(0); + else + #endif + emit_jno(0); + if(ram_offset!=0) { + emit_addimm(addr,ram_offset,HOST_TEMPREG); + addr=*addr_reg_override=HOST_TEMPREG; + } + } + + return jaddr; +} + +#define shift_assemble shift_assemble_arm + +static void loadlr_assemble_arm(int i,struct regstat *i_regs) +{ + int s,th,tl,temp,temp2,addr,map=-1; + int offset; + int jaddr=0; + int memtarget=0,c=0; + int fastload_reg_override=0; + u_int hr,reglist=0; + th=get_reg(i_regs->regmap,rt1[i]|64); + tl=get_reg(i_regs->regmap,rt1[i]); + s=get_reg(i_regs->regmap,rs1[i]); + temp=get_reg(i_regs->regmap,-1); + temp2=get_reg(i_regs->regmap,FTEMP); + addr=get_reg(i_regs->regmap,AGEN1+(i&1)); + assert(addr<0); + offset=imm[i]; + for(hr=0;hrregmap[hr]>=0) reglist|=1<=0) { + c=(i_regs->wasconst>>s)&1; + if(c) { + memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE; + } + } + if(!c) { + #ifdef RAM_OFFSET + map=get_reg(i_regs->regmap,ROREG); + if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG); + #endif + emit_shlimm(addr,3,temp); + if (opcode[i]==0x22||opcode[i]==0x26) { + emit_andimm(addr,0xFFFFFFFC,temp2); // LWL/LWR + }else{ + emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR + } + jaddr=emit_fastpath_cmp_jump(i,temp2,&fastload_reg_override); + } + else { + if(ram_offset&&memtarget) { + emit_addimm(temp2,ram_offset,HOST_TEMPREG); + fastload_reg_override=HOST_TEMPREG; + } + if (opcode[i]==0x22||opcode[i]==0x26) { + emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR + }else{ + emit_movimm(((constmap[i][s]+offset)<<3)&56,temp); // LDL/LDR + } + } + if (opcode[i]==0x22||opcode[i]==0x26) { // LWL/LWR + if(!c||memtarget) { + int a=temp2; + if(fastload_reg_override) a=fastload_reg_override; + //emit_readword_indexed((int)rdram-0x80000000,temp2,temp2); + emit_readword_indexed_tlb(0,a,map,temp2); + if(jaddr) add_stub(LOADW_STUB,jaddr,(int)out,i,temp2,(int)i_regs,ccadj[i],reglist); + } + else + inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj[i],reglist); + if(rt1[i]) { + assert(tl>=0); + emit_andimm(temp,24,temp); +#ifdef BIG_ENDIAN_MIPS + if (opcode[i]==0x26) // LWR +#else + if (opcode[i]==0x22) // LWL +#endif + emit_xorimm(temp,24,temp); + emit_movimm(-1,HOST_TEMPREG); + if (opcode[i]==0x26) { + emit_shr(temp2,temp,temp2); + emit_bic_lsr(tl,HOST_TEMPREG,temp,tl); + }else{ + emit_shl(temp2,temp,temp2); + emit_bic_lsl(tl,HOST_TEMPREG,temp,tl); + } + emit_or(temp2,tl,tl); + } + //emit_storereg(rt1[i],tl); // DEBUG + } + if (opcode[i]==0x1A||opcode[i]==0x1B) { // LDL/LDR + // FIXME: little endian, fastload_reg_override + int temp2h=get_reg(i_regs->regmap,FTEMP|64); + if(!c||memtarget) { + //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,temp2,temp2h); + //emit_readword_indexed((int)rdram-0x7FFFFFFC,temp2,temp2); + emit_readdword_indexed_tlb(0,temp2,map,temp2h,temp2); + if(jaddr) add_stub(LOADD_STUB,jaddr,(int)out,i,temp2,(int)i_regs,ccadj[i],reglist); + } + else + inline_readstub(LOADD_STUB,i,(constmap[i][s]+offset)&0xFFFFFFF8,i_regs->regmap,FTEMP,ccadj[i],reglist); + if(rt1[i]) { + assert(th>=0); + assert(tl>=0); + emit_testimm(temp,32); + emit_andimm(temp,24,temp); + if (opcode[i]==0x1A) { // LDL + emit_rsbimm(temp,32,HOST_TEMPREG); + emit_shl(temp2h,temp,temp2h); + emit_orrshr(temp2,HOST_TEMPREG,temp2h); + emit_movimm(-1,HOST_TEMPREG); + emit_shl(temp2,temp,temp2); + emit_cmove_reg(temp2h,th); + emit_biceq_lsl(tl,HOST_TEMPREG,temp,tl); + emit_bicne_lsl(th,HOST_TEMPREG,temp,th); + emit_orreq(temp2,tl,tl); + emit_orrne(temp2,th,th); + } + if (opcode[i]==0x1B) { // LDR + emit_xorimm(temp,24,temp); + emit_rsbimm(temp,32,HOST_TEMPREG); + emit_shr(temp2,temp,temp2); + emit_orrshl(temp2h,HOST_TEMPREG,temp2); + emit_movimm(-1,HOST_TEMPREG); + emit_shr(temp2h,temp,temp2h); + emit_cmovne_reg(temp2,tl); + emit_bicne_lsr(th,HOST_TEMPREG,temp,th); + emit_biceq_lsr(tl,HOST_TEMPREG,temp,tl); + emit_orrne(temp2h,th,th); + emit_orreq(temp2h,tl,tl); + } + } + } +} +#define loadlr_assemble loadlr_assemble_arm + +static void cop0_assemble(int i,struct regstat *i_regs) +{ + if(opcode2[i]==0) // MFC0 + { + signed char t=get_reg(i_regs->regmap,rt1[i]); + char copr=(source[i]>>11)&0x1f; + //assert(t>=0); // Why does this happen? OOT is weird + if(t>=0&&rt1[i]!=0) { + emit_readword((int)®_cop0+copr*4,t); + } + } + else if(opcode2[i]==4) // MTC0 + { + signed char s=get_reg(i_regs->regmap,rs1[i]); + char copr=(source[i]>>11)&0x1f; + assert(s>=0); + wb_register(rs1[i],i_regs->regmap,i_regs->dirty,i_regs->is32); + if(copr==9||copr==11||copr==12||copr==13) { + emit_readword((int)&last_count,HOST_TEMPREG); + emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc + emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG); + emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); + emit_writeword(HOST_CCREG,(int)&Count); + } + // What a mess. The status register (12) can enable interrupts, + // so needs a special case to handle a pending interrupt. + // The interrupt must be taken immediately, because a subsequent + // instruction might disable interrupts again. + if(copr==12||copr==13) { + if (is_delayslot) { + // burn cycles to cause cc_interrupt, which will + // reschedule next_interupt. Relies on CCREG from above. + assem_debug("MTC0 DS %d\n", copr); + emit_writeword(HOST_CCREG,(int)&last_count); + emit_movimm(0,HOST_CCREG); + emit_storereg(CCREG,HOST_CCREG); + emit_loadreg(rs1[i],1); + emit_movimm(copr,0); + emit_call((int)pcsx_mtc0_ds); + emit_loadreg(rs1[i],s); + return; + } + emit_movimm(start+i*4+4,HOST_TEMPREG); + emit_writeword(HOST_TEMPREG,(int)&pcaddr); + emit_movimm(0,HOST_TEMPREG); + emit_writeword(HOST_TEMPREG,(int)&pending_exception); + } + //else if(copr==12&&is_delayslot) emit_call((int)MTC0_R12); + //else + if(s==HOST_CCREG) + emit_loadreg(rs1[i],1); + else if(s!=1) + emit_mov(s,1); + emit_movimm(copr,0); + emit_call((int)pcsx_mtc0); + if(copr==9||copr==11||copr==12||copr==13) { + emit_readword((int)&Count,HOST_CCREG); + emit_readword((int)&next_interupt,HOST_TEMPREG); + emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[i]),HOST_CCREG); + emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG); + emit_writeword(HOST_TEMPREG,(int)&last_count); + emit_storereg(CCREG,HOST_CCREG); + } + if(copr==12||copr==13) { + assert(!is_delayslot); + emit_readword((int)&pending_exception,14); + emit_test(14,14); + emit_jne((int)&do_interrupt); + } + emit_loadreg(rs1[i],s); + if(get_reg(i_regs->regmap,rs1[i]|64)>=0) + emit_loadreg(rs1[i]|64,get_reg(i_regs->regmap,rs1[i]|64)); + cop1_usable=0; + } + else + { + assert(opcode2[i]==0x10); + if((source[i]&0x3f)==0x10) // RFE + { + emit_readword((int)&Status,0); + emit_andimm(0,0x3c,1); + emit_andimm(0,~0xf,0); + emit_orrshr_imm(1,2,0); + emit_writeword(0,(int)&Status); + } + } +} + +static void cop2_get_dreg(u_int copr,signed char tl,signed char temp) +{ + switch (copr) { + case 1: + case 3: + case 5: + case 8: + case 9: + case 10: + case 11: + emit_readword((int)®_cop2d[copr],tl); + emit_signextend16(tl,tl); + emit_writeword(tl,(int)®_cop2d[copr]); // hmh + break; + case 7: + case 16: + case 17: + case 18: + case 19: + emit_readword((int)®_cop2d[copr],tl); + emit_andimm(tl,0xffff,tl); + emit_writeword(tl,(int)®_cop2d[copr]); + break; + case 15: + emit_readword((int)®_cop2d[14],tl); // SXY2 + emit_writeword(tl,(int)®_cop2d[copr]); + break; + case 28: + case 29: + emit_readword((int)®_cop2d[9],temp); + emit_testimm(temp,0x8000); // do we need this? + emit_andimm(temp,0xf80,temp); + emit_andne_imm(temp,0,temp); + emit_shrimm(temp,7,tl); + emit_readword((int)®_cop2d[10],temp); + emit_testimm(temp,0x8000); + emit_andimm(temp,0xf80,temp); + emit_andne_imm(temp,0,temp); + emit_orrshr_imm(temp,2,tl); + emit_readword((int)®_cop2d[11],temp); + emit_testimm(temp,0x8000); + emit_andimm(temp,0xf80,temp); + emit_andne_imm(temp,0,temp); + emit_orrshl_imm(temp,3,tl); + emit_writeword(tl,(int)®_cop2d[copr]); + break; + default: + emit_readword((int)®_cop2d[copr],tl); + break; + } +} + +static void cop2_put_dreg(u_int copr,signed char sl,signed char temp) +{ + switch (copr) { + case 15: + emit_readword((int)®_cop2d[13],temp); // SXY1 + emit_writeword(sl,(int)®_cop2d[copr]); + emit_writeword(temp,(int)®_cop2d[12]); // SXY0 + emit_readword((int)®_cop2d[14],temp); // SXY2 + emit_writeword(sl,(int)®_cop2d[14]); + emit_writeword(temp,(int)®_cop2d[13]); // SXY1 + break; + case 28: + emit_andimm(sl,0x001f,temp); + emit_shlimm(temp,7,temp); + emit_writeword(temp,(int)®_cop2d[9]); + emit_andimm(sl,0x03e0,temp); + emit_shlimm(temp,2,temp); + emit_writeword(temp,(int)®_cop2d[10]); + emit_andimm(sl,0x7c00,temp); + emit_shrimm(temp,3,temp); + emit_writeword(temp,(int)®_cop2d[11]); + emit_writeword(sl,(int)®_cop2d[28]); + break; + case 30: + emit_movs(sl,temp); + emit_mvnmi(temp,temp); +#ifdef HAVE_ARMV5 + emit_clz(temp,temp); +#else + emit_movs(temp,HOST_TEMPREG); + emit_movimm(0,temp); + emit_jeq((int)out+4*4); + emit_addpl_imm(temp,1,temp); + emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG); + emit_jns((int)out-2*4); +#endif + emit_writeword(sl,(int)®_cop2d[30]); + emit_writeword(temp,(int)®_cop2d[31]); + break; + case 31: + break; + default: + emit_writeword(sl,(int)®_cop2d[copr]); + break; + } +} + +static void cop2_assemble(int i,struct regstat *i_regs) +{ + u_int copr=(source[i]>>11)&0x1f; + signed char temp=get_reg(i_regs->regmap,-1); + if (opcode2[i]==0) { // MFC2 + signed char tl=get_reg(i_regs->regmap,rt1[i]); + if(tl>=0&&rt1[i]!=0) + cop2_get_dreg(copr,tl,temp); + } + else if (opcode2[i]==4) { // MTC2 + signed char sl=get_reg(i_regs->regmap,rs1[i]); + cop2_put_dreg(copr,sl,temp); + } + else if (opcode2[i]==2) // CFC2 + { + signed char tl=get_reg(i_regs->regmap,rt1[i]); + if(tl>=0&&rt1[i]!=0) + emit_readword((int)®_cop2c[copr],tl); + } + else if (opcode2[i]==6) // CTC2 + { + signed char sl=get_reg(i_regs->regmap,rs1[i]); + switch(copr) { + case 4: + case 12: + case 20: + case 26: + case 27: + case 29: + case 30: + emit_signextend16(sl,temp); + break; + case 31: + //value = value & 0x7ffff000; + //if (value & 0x7f87e000) value |= 0x80000000; + emit_shrimm(sl,12,temp); + emit_shlimm(temp,12,temp); + emit_testimm(temp,0x7f000000); + emit_testeqimm(temp,0x00870000); + emit_testeqimm(temp,0x0000e000); + emit_orrne_imm(temp,0x80000000,temp); + break; + default: + temp=sl; + break; + } + emit_writeword(temp,(int)®_cop2c[copr]); + assert(sl>=0); + } +} + +static void c2op_prologue(u_int op,u_int reglist) +{ + save_regs_all(reglist); +#ifdef PCNT + emit_movimm(op,0); + emit_call((int)pcnt_gte_start); +#endif + emit_addimm(FP,(int)&psxRegs.CP2D.r[0]-(int)&dynarec_local,0); // cop2 regs +} + +static void c2op_epilogue(u_int op,u_int reglist) +{ +#ifdef PCNT + emit_movimm(op,0); + emit_call((int)pcnt_gte_end); +#endif + restore_regs_all(reglist); +} + +static void c2op_call_MACtoIR(int lm,int need_flags) +{ + if(need_flags) + emit_call((int)(lm?gteMACtoIR_lm1:gteMACtoIR_lm0)); + else + emit_call((int)(lm?gteMACtoIR_lm1_nf:gteMACtoIR_lm0_nf)); +} + +static void c2op_call_rgb_func(void *func,int lm,int need_ir,int need_flags) +{ + emit_call((int)func); + // func is C code and trashes r0 + emit_addimm(FP,(int)&psxRegs.CP2D.r[0]-(int)&dynarec_local,0); + if(need_flags||need_ir) + c2op_call_MACtoIR(lm,need_flags); + emit_call((int)(need_flags?gteMACtoRGB:gteMACtoRGB_nf)); +} + +static void c2op_assemble(int i,struct regstat *i_regs) +{ + u_int c2op=source[i]&0x3f; + u_int hr,reglist_full=0,reglist; + int need_flags,need_ir; + for(hr=0;hrregmap[hr]>=0) reglist_full|=1<>63); // +1 because of how liveness detection works + need_ir=(gte_unneeded[i+1]&0xe00)!=0xe00; + assem_debug("gte op %08x, unneeded %016llx, need_flags %d, need_ir %d\n", + source[i],gte_unneeded[i+1],need_flags,need_ir); + if(new_dynarec_hacks&NDHACK_GTE_NO_FLAGS) + need_flags=0; + int shift = (source[i] >> 19) & 1; + int lm = (source[i] >> 10) & 1; + switch(c2op) { +#ifndef DRC_DBG + case GTE_MVMVA: { +#ifdef HAVE_ARMV5 + int v = (source[i] >> 15) & 3; + int cv = (source[i] >> 13) & 3; + int mx = (source[i] >> 17) & 3; + reglist=reglist_full&(CALLER_SAVE_REGS|0xf0); // +{r4-r7} + c2op_prologue(c2op,reglist); + /* r4,r5 = VXYZ(v) packed; r6 = &MX11(mx); r7 = &CV1(cv) */ + if(v<3) + emit_ldrd(v*8,0,4); + else { + emit_movzwl_indexed(9*4,0,4); // gteIR + emit_movzwl_indexed(10*4,0,6); + emit_movzwl_indexed(11*4,0,5); + emit_orrshl_imm(6,16,4); + } + if(mx<3) + emit_addimm(0,32*4+mx*8*4,6); + else + emit_readword((int)&zeromem_ptr,6); + if(cv<3) + emit_addimm(0,32*4+(cv*8+5)*4,7); + else + emit_readword((int)&zeromem_ptr,7); +#ifdef __ARM_NEON__ + emit_movimm(source[i],1); // opcode + emit_call((int)gteMVMVA_part_neon); + if(need_flags) { + emit_movimm(lm,1); + emit_call((int)gteMACtoIR_flags_neon); + } +#else + if(cv==3&&shift) + emit_call((int)gteMVMVA_part_cv3sh12_arm); + else { + emit_movimm(shift,1); + emit_call((int)(need_flags?gteMVMVA_part_arm:gteMVMVA_part_nf_arm)); + } + if(need_flags||need_ir) + c2op_call_MACtoIR(lm,need_flags); +#endif +#else /* if not HAVE_ARMV5 */ + c2op_prologue(c2op,reglist); + emit_movimm(source[i],1); // opcode + emit_writeword(1,(int)&psxRegs.code); + emit_call((int)(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op])); +#endif + break; + } + case GTE_OP: + c2op_prologue(c2op,reglist); + emit_call((int)(shift?gteOP_part_shift:gteOP_part_noshift)); + if(need_flags||need_ir) { + emit_addimm(FP,(int)&psxRegs.CP2D.r[0]-(int)&dynarec_local,0); + c2op_call_MACtoIR(lm,need_flags); + } + break; + case GTE_DPCS: + c2op_prologue(c2op,reglist); + c2op_call_rgb_func(shift?gteDPCS_part_shift:gteDPCS_part_noshift,lm,need_ir,need_flags); + break; + case GTE_INTPL: + c2op_prologue(c2op,reglist); + c2op_call_rgb_func(shift?gteINTPL_part_shift:gteINTPL_part_noshift,lm,need_ir,need_flags); + break; + case GTE_SQR: + c2op_prologue(c2op,reglist); + emit_call((int)(shift?gteSQR_part_shift:gteSQR_part_noshift)); + if(need_flags||need_ir) { + emit_addimm(FP,(int)&psxRegs.CP2D.r[0]-(int)&dynarec_local,0); + c2op_call_MACtoIR(lm,need_flags); + } + break; + case GTE_DCPL: + c2op_prologue(c2op,reglist); + c2op_call_rgb_func(gteDCPL_part,lm,need_ir,need_flags); + break; + case GTE_GPF: + c2op_prologue(c2op,reglist); + c2op_call_rgb_func(shift?gteGPF_part_shift:gteGPF_part_noshift,lm,need_ir,need_flags); + break; + case GTE_GPL: + c2op_prologue(c2op,reglist); + c2op_call_rgb_func(shift?gteGPL_part_shift:gteGPL_part_noshift,lm,need_ir,need_flags); + break; +#endif + default: + c2op_prologue(c2op,reglist); +#ifdef DRC_DBG + emit_movimm(source[i],1); // opcode + emit_writeword(1,(int)&psxRegs.code); +#endif + emit_call((int)(need_flags?gte_handlers[c2op]:gte_handlers_nf[c2op])); + break; + } + c2op_epilogue(c2op,reglist); + } +} + +static void cop1_unusable(int i,struct regstat *i_regs) +{ + // XXX: should just just do the exception instead + if(!cop1_usable) { + int jaddr=(int)out; + emit_jmp(0); + add_stub(FP_STUB,jaddr,(int)out,i,0,(int)i_regs,is_delayslot,0); + cop1_usable=1; + } +} + +static void cop1_assemble(int i,struct regstat *i_regs) +{ + cop1_unusable(i, i_regs); +} + +static void fconv_assemble_arm(int i,struct regstat *i_regs) +{ + cop1_unusable(i, i_regs); +} +#define fconv_assemble fconv_assemble_arm + +static void fcomp_assemble(int i,struct regstat *i_regs) +{ + cop1_unusable(i, i_regs); +} + +static void float_assemble(int i,struct regstat *i_regs) +{ + cop1_unusable(i, i_regs); +} + +static void multdiv_assemble_arm(int i,struct regstat *i_regs) +{ + // case 0x18: MULT + // case 0x19: MULTU + // case 0x1A: DIV + // case 0x1B: DIVU + // case 0x1C: DMULT + // case 0x1D: DMULTU + // case 0x1E: DDIV + // case 0x1F: DDIVU + if(rs1[i]&&rs2[i]) + { + if((opcode2[i]&4)==0) // 32-bit + { + if(opcode2[i]==0x18) // MULT + { + signed char m1=get_reg(i_regs->regmap,rs1[i]); + signed char m2=get_reg(i_regs->regmap,rs2[i]); + signed char hi=get_reg(i_regs->regmap,HIREG); + signed char lo=get_reg(i_regs->regmap,LOREG); + assert(m1>=0); + assert(m2>=0); + assert(hi>=0); + assert(lo>=0); + emit_smull(m1,m2,hi,lo); + } + if(opcode2[i]==0x19) // MULTU + { + signed char m1=get_reg(i_regs->regmap,rs1[i]); + signed char m2=get_reg(i_regs->regmap,rs2[i]); + signed char hi=get_reg(i_regs->regmap,HIREG); + signed char lo=get_reg(i_regs->regmap,LOREG); + assert(m1>=0); + assert(m2>=0); + assert(hi>=0); + assert(lo>=0); + emit_umull(m1,m2,hi,lo); + } + if(opcode2[i]==0x1A) // DIV + { + signed char d1=get_reg(i_regs->regmap,rs1[i]); + signed char d2=get_reg(i_regs->regmap,rs2[i]); + assert(d1>=0); + assert(d2>=0); + signed char quotient=get_reg(i_regs->regmap,LOREG); + signed char remainder=get_reg(i_regs->regmap,HIREG); + assert(quotient>=0); + assert(remainder>=0); + emit_movs(d1,remainder); + emit_movimm(0xffffffff,quotient); + emit_negmi(quotient,quotient); // .. quotient and .. + emit_negmi(remainder,remainder); // .. remainder for div0 case (will be negated back after jump) + emit_movs(d2,HOST_TEMPREG); + emit_jeq((int)out+52); // Division by zero + emit_negsmi(HOST_TEMPREG,HOST_TEMPREG); +#ifdef HAVE_ARMV5 + emit_clz(HOST_TEMPREG,quotient); + emit_shl(HOST_TEMPREG,quotient,HOST_TEMPREG); +#else + emit_movimm(0,quotient); + emit_addpl_imm(quotient,1,quotient); + emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG); + emit_jns((int)out-2*4); +#endif + emit_orimm(quotient,1<<31,quotient); + emit_shr(quotient,quotient,quotient); + emit_cmp(remainder,HOST_TEMPREG); + emit_subcs(remainder,HOST_TEMPREG,remainder); + emit_adcs(quotient,quotient,quotient); + emit_shrimm(HOST_TEMPREG,1,HOST_TEMPREG); + emit_jcc((int)out-16); // -4 + emit_teq(d1,d2); + emit_negmi(quotient,quotient); + emit_test(d1,d1); + emit_negmi(remainder,remainder); + } + if(opcode2[i]==0x1B) // DIVU + { + signed char d1=get_reg(i_regs->regmap,rs1[i]); // dividend + signed char d2=get_reg(i_regs->regmap,rs2[i]); // divisor + assert(d1>=0); + assert(d2>=0); + signed char quotient=get_reg(i_regs->regmap,LOREG); + signed char remainder=get_reg(i_regs->regmap,HIREG); + assert(quotient>=0); + assert(remainder>=0); + emit_mov(d1,remainder); + emit_movimm(0xffffffff,quotient); // div0 case + emit_test(d2,d2); + emit_jeq((int)out+40); // Division by zero +#ifdef HAVE_ARMV5 + emit_clz(d2,HOST_TEMPREG); + emit_movimm(1<<31,quotient); + emit_shl(d2,HOST_TEMPREG,d2); +#else + emit_movimm(0,HOST_TEMPREG); + emit_addpl_imm(HOST_TEMPREG,1,HOST_TEMPREG); + emit_lslpls_imm(d2,1,d2); + emit_jns((int)out-2*4); + emit_movimm(1<<31,quotient); +#endif + emit_shr(quotient,HOST_TEMPREG,quotient); + emit_cmp(remainder,d2); + emit_subcs(remainder,d2,remainder); + emit_adcs(quotient,quotient,quotient); + emit_shrcc_imm(d2,1,d2); + emit_jcc((int)out-16); // -4 + } + } + else // 64-bit + assert(0); + } + else + { + // Multiply by zero is zero. + // MIPS does not have a divide by zero exception. + // The result is undefined, we return zero. + signed char hr=get_reg(i_regs->regmap,HIREG); + signed char lr=get_reg(i_regs->regmap,LOREG); + if(hr>=0) emit_zeroreg(hr); + if(lr>=0) emit_zeroreg(lr); + } +} +#define multdiv_assemble multdiv_assemble_arm + +static void do_preload_rhash(int r) { + // Don't need this for ARM. On x86, this puts the value 0xf8 into the + // register. On ARM the hash can be done with a single instruction (below) +} + +static void do_preload_rhtbl(int ht) { + emit_addimm(FP,(int)&mini_ht-(int)&dynarec_local,ht); +} + +static void do_rhash(int rs,int rh) { + emit_andimm(rs,0xf8,rh); +} + +static void do_miniht_load(int ht,int rh) { + assem_debug("ldr %s,[%s,%s]!\n",regname[rh],regname[ht],regname[rh]); + output_w32(0xe7b00000|rd_rn_rm(rh,ht,rh)); +} + +static void do_miniht_jump(int rs,int rh,int ht) { + emit_cmp(rh,rs); + emit_ldreq_indexed(ht,4,15); + #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK + emit_mov(rs,7); + emit_jmp(jump_vaddr_reg[7]); + #else + emit_jmp(jump_vaddr_reg[rs]); + #endif +} + +static void do_miniht_insert(u_int return_address,int rt,int temp) { + #ifndef HAVE_ARMV7 + emit_movimm(return_address,rt); // PC into link register + add_to_linker((int)out,return_address,1); + emit_pcreladdr(temp); + emit_writeword(rt,(int)&mini_ht[(return_address&0xFF)>>3][0]); + emit_writeword(temp,(int)&mini_ht[(return_address&0xFF)>>3][1]); + #else + emit_movw(return_address&0x0000FFFF,rt); + add_to_linker((int)out,return_address,1); + emit_pcreladdr(temp); + emit_writeword(temp,(int)&mini_ht[(return_address&0xFF)>>3][1]); + emit_movt(return_address&0xFFFF0000,rt); + emit_writeword(rt,(int)&mini_ht[(return_address&0xFF)>>3][0]); + #endif +} + +static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t is32_pre,uint64_t u,uint64_t uu) +{ + //if(dirty_pre==dirty) return; + int hr,reg; + for(hr=0;hr>(reg&63))&1) { + if(reg>0) { + if(((dirty_pre&~dirty)>>hr)&1) { + if(reg>0&®<34) { + emit_storereg(reg,hr); + if( ((is32_pre&~uu)>>reg)&1 ) { + emit_sarimm(hr,31,HOST_TEMPREG); + emit_storereg(reg|64,HOST_TEMPREG); + } + } + else if(reg>=64) { + emit_storereg(reg,hr); + } + } + } + } + } + } +} + + +/* using strd could possibly help but you'd have to allocate registers in pairs +static void wb_invalidate_arm(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,uint64_t u,uint64_t uu) +{ + int hr; + int wrote=-1; + for(hr=HOST_REGS-1;hr>=0;hr--) { + if(hr!=EXCLUDE_REG) { + if(pre[hr]!=entry[hr]) { + if(pre[hr]>=0) { + if((dirty>>hr)&1) { + if(get_reg(entry,pre[hr])<0) { + if(pre[hr]<64) { + if(!((u>>pre[hr])&1)) { + if(hr<10&&(~hr&1)&&(pre[hr+1]<0||wrote==hr+1)) { + if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) { + emit_sarimm(hr,31,hr+1); + emit_strdreg(pre[hr],hr); + } + else + emit_storereg(pre[hr],hr); + }else{ + emit_storereg(pre[hr],hr); + if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) { + emit_sarimm(hr,31,hr); + emit_storereg(pre[hr]|64,hr); + } + } + } + }else{ + if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) { + emit_storereg(pre[hr],hr); + } + } + wrote=hr; + } + } + } + } + } + } + for(hr=0;hr=0) { + int nr; + if((nr=get_reg(entry,pre[hr]))>=0) { + emit_mov(hr,nr); + } + } + } + } + } +} +#define wb_invalidate wb_invalidate_arm +*/ + +static void mark_clear_cache(void *target) +{ + u_long offset = (char *)target - (char *)BASE_ADDR; + u_int mask = 1u << ((offset >> 12) & 31); + if (!(needs_clear_cache[offset >> 17] & mask)) { + char *start = (char *)((u_long)target & ~4095ul); + start_tcache_write(start, start + 4096); + needs_clear_cache[offset >> 17] |= mask; + } +} + +// Clearing the cache is rather slow on ARM Linux, so mark the areas +// that need to be cleared, and then only clear these areas once. +static void do_clear_cache() +{ + int i,j; + for (i=0;i<(1<<(TARGET_SIZE_2-17));i++) + { + u_int bitmap=needs_clear_cache[i]; + if(bitmap) { + u_int start,end; + for(j=0;j<32;j++) + { + if(bitmap&(1<> 16) +#else + mov \reg, #(\imm & 0x0000ff) + orr \reg, #(\imm & 0x00ff00) + orr \reg, #(\imm & 0xff0000) +#endif +.endm + +/* r0 = virtual target address */ +/* r1 = instruction to patch */ +.macro dyna_linker_main +#ifndef NO_WRITE_EXEC + load_varadr_ext r3, jump_in + /* get_page */ + lsr r2, r0, #12 + mov r6, #4096 + bic r2, r2, #0xe0000 + sub r6, r6, #1 + cmp r2, #0x1000 + ldr r7, [r1] + biclt r2, #0x0e00 + and r6, r6, r2 + cmp r2, #2048 + add r12, r7, #2 + orrcs r2, r6, #2048 + ldr r5, [r3, r2, lsl #2] + lsl r12, r12, #8 + add r6, r1, r12, asr #6 + mov r8, #0 + /* jump_in lookup */ +1: + movs r4, r5 + beq 2f + ldr r3, [r5] /* ll_entry .vaddr */ + ldrd r4, r5, [r4, #8] /* ll_entry .next, .addr */ + teq r3, r0 + bne 1b + teq r4, r6 + moveq pc, r4 /* Stale i-cache */ + mov r8, r4 + b 1b /* jump_in may have dupes, continue search */ +2: + tst r8, r8 + beq 3f /* r0 not in jump_in */ + + mov r5, r1 + mov r1, r6 + bl add_link + sub r2, r8, r5 + and r1, r7, #0xff000000 + lsl r2, r2, #6 + sub r1, r1, #2 + add r1, r1, r2, lsr #8 + str r1, [r5] + mov pc, r8 +3: + /* hash_table lookup */ + cmp r2, #2048 + load_varadr_ext r3, jump_dirty + eor r4, r0, r0, lsl #16 + lslcc r2, r0, #9 + load_varadr_ext r6, hash_table + lsr r4, r4, #12 + lsrcc r2, r2, #21 + bic r4, r4, #15 + ldr r5, [r3, r2, lsl #2] + ldr r7, [r6, r4]! + teq r7, r0 + ldreq pc, [r6, #4] + ldr r7, [r6, #8] + teq r7, r0 + ldreq pc, [r6, #12] + /* jump_dirty lookup */ +6: + movs r4, r5 + beq 8f + ldr r3, [r5] + ldr r5, [r4, #12] + teq r3, r0 + bne 6b +7: + ldr r1, [r4, #8] + /* hash_table insert */ + ldr r2, [r6] + ldr r3, [r6, #4] + str r0, [r6] + str r1, [r6, #4] + str r2, [r6, #8] + str r3, [r6, #12] + mov pc, r1 +8: +#else + /* XXX: should be able to do better than this... */ + bl get_addr_ht + mov pc, r0 +#endif +.endm + + +FUNCTION(dyna_linker): + /* r0 = virtual target address */ + /* r1 = instruction to patch */ + dyna_linker_main + + mov r4, r0 + mov r5, r1 + bl new_recompile_block + tst r0, r0 + mov r0, r4 + mov r1, r5 + beq dyna_linker + /* pagefault */ + mov r1, r0 + mov r2, #8 + .size dyna_linker, .-dyna_linker + +FUNCTION(exec_pagefault): + /* r0 = instruction pointer */ + /* r1 = fault address */ + /* r2 = cause */ + ldr r3, [fp, #LO_reg_cop0+48] /* Status */ + mvn r6, #0xF000000F + ldr r4, [fp, #LO_reg_cop0+16] /* Context */ + bic r6, r6, #0x0F800000 + str r0, [fp, #LO_reg_cop0+56] /* EPC */ + orr r3, r3, #2 + str r1, [fp, #LO_reg_cop0+32] /* BadVAddr */ + bic r4, r4, r6 + str r3, [fp, #LO_reg_cop0+48] /* Status */ + and r5, r6, r1, lsr #9 + str r2, [fp, #LO_reg_cop0+52] /* Cause */ + and r1, r1, r6, lsl #9 + str r1, [fp, #LO_reg_cop0+40] /* EntryHi */ + orr r4, r4, r5 + str r4, [fp, #LO_reg_cop0+16] /* Context */ + mov r0, #0x80000000 + bl get_addr_ht + mov pc, r0 + .size exec_pagefault, .-exec_pagefault + +/* Special dynamic linker for the case where a page fault + may occur in a branch delay slot */ +FUNCTION(dyna_linker_ds): + /* r0 = virtual target address */ + /* r1 = instruction to patch */ + dyna_linker_main + + mov r4, r0 + bic r0, r0, #7 + mov r5, r1 + orr r0, r0, #1 + bl new_recompile_block + tst r0, r0 + mov r0, r4 + mov r1, r5 + beq dyna_linker_ds + /* pagefault */ + bic r1, r0, #7 + mov r2, #0x80000008 /* High bit set indicates pagefault in delay slot */ + sub r0, r1, #4 + b exec_pagefault + .size dyna_linker_ds, .-dyna_linker_ds + + .align 2 + +FUNCTION(jump_vaddr_r0): + eor r2, r0, r0, lsl #16 + b jump_vaddr + .size jump_vaddr_r0, .-jump_vaddr_r0 +FUNCTION(jump_vaddr_r1): + eor r2, r1, r1, lsl #16 + mov r0, r1 + b jump_vaddr + .size jump_vaddr_r1, .-jump_vaddr_r1 +FUNCTION(jump_vaddr_r2): + mov r0, r2 + eor r2, r2, r2, lsl #16 + b jump_vaddr + .size jump_vaddr_r2, .-jump_vaddr_r2 +FUNCTION(jump_vaddr_r3): + eor r2, r3, r3, lsl #16 + mov r0, r3 + b jump_vaddr + .size jump_vaddr_r3, .-jump_vaddr_r3 +FUNCTION(jump_vaddr_r4): + eor r2, r4, r4, lsl #16 + mov r0, r4 + b jump_vaddr + .size jump_vaddr_r4, .-jump_vaddr_r4 +FUNCTION(jump_vaddr_r5): + eor r2, r5, r5, lsl #16 + mov r0, r5 + b jump_vaddr + .size jump_vaddr_r5, .-jump_vaddr_r5 +FUNCTION(jump_vaddr_r6): + eor r2, r6, r6, lsl #16 + mov r0, r6 + b jump_vaddr + .size jump_vaddr_r6, .-jump_vaddr_r6 +FUNCTION(jump_vaddr_r8): + eor r2, r8, r8, lsl #16 + mov r0, r8 + b jump_vaddr + .size jump_vaddr_r8, .-jump_vaddr_r8 +FUNCTION(jump_vaddr_r9): + eor r2, r9, r9, lsl #16 + mov r0, r9 + b jump_vaddr + .size jump_vaddr_r9, .-jump_vaddr_r9 +FUNCTION(jump_vaddr_r10): + eor r2, r10, r10, lsl #16 + mov r0, r10 + b jump_vaddr + .size jump_vaddr_r10, .-jump_vaddr_r10 +FUNCTION(jump_vaddr_r12): + eor r2, r12, r12, lsl #16 + mov r0, r12 + b jump_vaddr + .size jump_vaddr_r12, .-jump_vaddr_r12 +FUNCTION(jump_vaddr_r7): + eor r2, r7, r7, lsl #16 + add r0, r7, #0 + .size jump_vaddr_r7, .-jump_vaddr_r7 +FUNCTION(jump_vaddr): + load_varadr_ext r1, hash_table + mvn r3, #15 + and r2, r3, r2, lsr #12 + ldr r2, [r1, r2]! + teq r2, r0 + ldreq pc, [r1, #4] + ldr r2, [r1, #8] + teq r2, r0 + ldreq pc, [r1, #12] + str r10, [fp, #LO_cycle_count] + bl get_addr + ldr r10, [fp, #LO_cycle_count] + mov pc, r0 + .size jump_vaddr, .-jump_vaddr + + .align 2 + +FUNCTION(verify_code_ds): + str r8, [fp, #LO_branch_target] +FUNCTION(verify_code_vm): +FUNCTION(verify_code): + /* r1 = source */ + /* r2 = target */ + /* r3 = length */ + tst r3, #4 + mov r4, #0 + add r3, r1, r3 + mov r5, #0 + ldrne r4, [r1], #4 + mov r12, #0 + ldrne r5, [r2], #4 + teq r1, r3 + beq .D3 +.D2: + ldr r7, [r1], #4 + eor r9, r4, r5 + ldr r8, [r2], #4 + orrs r9, r9, r12 + bne .D4 + ldr r4, [r1], #4 + eor r12, r7, r8 + ldr r5, [r2], #4 + cmp r1, r3 + bcc .D2 + teq r7, r8 +.D3: + teqeq r4, r5 +.D4: + ldr r8, [fp, #LO_branch_target] + moveq pc, lr +.D5: + bl get_addr + mov pc, r0 + .size verify_code, .-verify_code + .size verify_code_vm, .-verify_code_vm + + .align 2 +FUNCTION(cc_interrupt): + ldr r0, [fp, #LO_last_count] + mov r1, #0 + mov r2, #0x1fc + add r10, r0, r10 + str r1, [fp, #LO_pending_exception] + and r2, r2, r10, lsr #17 + add r3, fp, #LO_restore_candidate + str r10, [fp, #LO_cycle] /* PCSX cycles */ +@@ str r10, [fp, #LO_reg_cop0+36] /* Count */ + ldr r4, [r2, r3] + mov r10, lr + tst r4, r4 + bne .E4 +.E1: + bl gen_interupt + mov lr, r10 + ldr r10, [fp, #LO_cycle] + ldr r0, [fp, #LO_next_interupt] + ldr r1, [fp, #LO_pending_exception] + ldr r2, [fp, #LO_stop] + str r0, [fp, #LO_last_count] + sub r10, r10, r0 + tst r2, r2 + ldmfdne sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc} + tst r1, r1 + moveq pc, lr +.E2: + ldr r0, [fp, #LO_pcaddr] + bl get_addr_ht + mov pc, r0 +.E4: + /* Move 'dirty' blocks to the 'clean' list */ + lsl r5, r2, #3 + str r1, [r2, r3] +.E5: + lsrs r4, r4, #1 + mov r0, r5 + add r5, r5, #1 + blcs clean_blocks + tst r5, #31 + bne .E5 + b .E1 + .size cc_interrupt, .-cc_interrupt + + .align 2 +FUNCTION(do_interrupt): + ldr r0, [fp, #LO_pcaddr] + bl get_addr_ht + add r10, r10, #2 + mov pc, r0 + .size do_interrupt, .-do_interrupt + + .align 2 +FUNCTION(fp_exception): + mov r2, #0x10000000 +.E7: + ldr r1, [fp, #LO_reg_cop0+48] /* Status */ + mov r3, #0x80000000 + str r0, [fp, #LO_reg_cop0+56] /* EPC */ + orr r1, #2 + add r2, r2, #0x2c + str r1, [fp, #LO_reg_cop0+48] /* Status */ + str r2, [fp, #LO_reg_cop0+52] /* Cause */ + add r0, r3, #0x80 + bl get_addr_ht + mov pc, r0 + .size fp_exception, .-fp_exception + .align 2 +FUNCTION(fp_exception_ds): + mov r2, #0x90000000 /* Set high bit if delay slot */ + b .E7 + .size fp_exception_ds, .-fp_exception_ds + + .align 2 +FUNCTION(jump_syscall): + ldr r1, [fp, #LO_reg_cop0+48] /* Status */ + mov r3, #0x80000000 + str r0, [fp, #LO_reg_cop0+56] /* EPC */ + orr r1, #2 + mov r2, #0x20 + str r1, [fp, #LO_reg_cop0+48] /* Status */ + str r2, [fp, #LO_reg_cop0+52] /* Cause */ + add r0, r3, #0x80 + bl get_addr_ht + mov pc, r0 + .size jump_syscall, .-jump_syscall + .align 2 + + .align 2 +FUNCTION(jump_syscall_hle): + str r0, [fp, #LO_pcaddr] /* PC must be set to EPC for psxException */ + ldr r2, [fp, #LO_last_count] + mov r1, #0 /* in delay slot */ + add r2, r2, r10 + mov r0, #0x20 /* cause */ + str r2, [fp, #LO_cycle] /* PCSX cycle counter */ + bl psxException + + /* note: psxException might do recursive recompiler call from it's HLE code, + * so be ready for this */ +pcsx_return: + ldr r1, [fp, #LO_next_interupt] + ldr r10, [fp, #LO_cycle] + ldr r0, [fp, #LO_pcaddr] + sub r10, r10, r1 + str r1, [fp, #LO_last_count] + bl get_addr_ht + mov pc, r0 + .size jump_syscall_hle, .-jump_syscall_hle + + .align 2 +FUNCTION(jump_hlecall): + ldr r2, [fp, #LO_last_count] + str r0, [fp, #LO_pcaddr] + add r2, r2, r10 + adr lr, pcsx_return + str r2, [fp, #LO_cycle] /* PCSX cycle counter */ + bx r1 + .size jump_hlecall, .-jump_hlecall + + .align 2 +FUNCTION(jump_intcall): + ldr r2, [fp, #LO_last_count] + str r0, [fp, #LO_pcaddr] + add r2, r2, r10 + adr lr, pcsx_return + str r2, [fp, #LO_cycle] /* PCSX cycle counter */ + b execI + .size jump_hlecall, .-jump_hlecall + + .align 2 +FUNCTION(new_dyna_leave): + ldr r0, [fp, #LO_last_count] + add r12, fp, #28 + add r10, r0, r10 + str r10, [fp, #LO_cycle] + ldmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, pc} + .size new_dyna_leave, .-new_dyna_leave + + .align 2 +FUNCTION(invalidate_addr_r0): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + b invalidate_addr_call + .size invalidate_addr_r0, .-invalidate_addr_r0 + .align 2 +FUNCTION(invalidate_addr_r1): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r1 + b invalidate_addr_call + .size invalidate_addr_r1, .-invalidate_addr_r1 + .align 2 +FUNCTION(invalidate_addr_r2): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r2 + b invalidate_addr_call + .size invalidate_addr_r2, .-invalidate_addr_r2 + .align 2 +FUNCTION(invalidate_addr_r3): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r3 + b invalidate_addr_call + .size invalidate_addr_r3, .-invalidate_addr_r3 + .align 2 +FUNCTION(invalidate_addr_r4): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r4 + b invalidate_addr_call + .size invalidate_addr_r4, .-invalidate_addr_r4 + .align 2 +FUNCTION(invalidate_addr_r5): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r5 + b invalidate_addr_call + .size invalidate_addr_r5, .-invalidate_addr_r5 + .align 2 +FUNCTION(invalidate_addr_r6): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r6 + b invalidate_addr_call + .size invalidate_addr_r6, .-invalidate_addr_r6 + .align 2 +FUNCTION(invalidate_addr_r7): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r7 + b invalidate_addr_call + .size invalidate_addr_r7, .-invalidate_addr_r7 + .align 2 +FUNCTION(invalidate_addr_r8): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r8 + b invalidate_addr_call + .size invalidate_addr_r8, .-invalidate_addr_r8 + .align 2 +FUNCTION(invalidate_addr_r9): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r9 + b invalidate_addr_call + .size invalidate_addr_r9, .-invalidate_addr_r9 + .align 2 +FUNCTION(invalidate_addr_r10): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r10 + b invalidate_addr_call + .size invalidate_addr_r10, .-invalidate_addr_r10 + .align 2 +FUNCTION(invalidate_addr_r12): + stmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, lr} + mov r0, r12 + .size invalidate_addr_r12, .-invalidate_addr_r12 + .align 2 +invalidate_addr_call: + ldr r12, [fp, #LO_inv_code_start] + ldr lr, [fp, #LO_inv_code_end] + cmp r0, r12 + cmpcs lr, r0 + blcc invalidate_addr + ldmia fp, {r0, r1, r2, r3, EXTRA_UNSAVED_REGS r12, pc} + .size invalidate_addr_call, .-invalidate_addr_call + + .align 2 +FUNCTION(new_dyna_start): + /* ip is stored to conform EABI alignment */ + stmfd sp!, {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr} + load_varadr fp, dynarec_local + ldr r0, [fp, #LO_pcaddr] + bl get_addr_ht + ldr r1, [fp, #LO_next_interupt] + ldr r10, [fp, #LO_cycle] + str r1, [fp, #LO_last_count] + sub r10, r10, r1 + mov pc, r0 + .size new_dyna_start, .-new_dyna_start + +/* --------------------------------------- */ + +.align 2 + +.macro pcsx_read_mem readop tab_shift + /* r0 = address, r1 = handler_tab, r2 = cycles */ + lsl r3, r0, #20 + lsr r3, #(20+\tab_shift) + ldr r12, [fp, #LO_last_count] + ldr r1, [r1, r3, lsl #2] + add r2, r2, r12 + lsls r1, #1 +.if \tab_shift == 1 + lsl r3, #1 + \readop r0, [r1, r3] +.else + \readop r0, [r1, r3, lsl #\tab_shift] +.endif + movcc pc, lr + str r2, [fp, #LO_cycle] + bx r1 +.endm + +FUNCTION(jump_handler_read8): + add r1, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part + pcsx_read_mem ldrbcc, 0 + +FUNCTION(jump_handler_read16): + add r1, #0x1000/4*4 @ shift to r16 part + pcsx_read_mem ldrhcc, 1 + +FUNCTION(jump_handler_read32): + pcsx_read_mem ldrcc, 2 + + +.macro pcsx_write_mem wrtop tab_shift + /* r0 = address, r1 = data, r2 = cycles, r3 = handler_tab */ + lsl r12,r0, #20 + lsr r12, #(20+\tab_shift) + ldr r3, [r3, r12, lsl #2] + str r0, [fp, #LO_address] @ some handlers still need it.. + lsls r3, #1 + mov r0, r2 @ cycle return in case of direct store +.if \tab_shift == 1 + lsl r12, #1 + \wrtop r1, [r3, r12] +.else + \wrtop r1, [r3, r12, lsl #\tab_shift] +.endif + movcc pc, lr + ldr r12, [fp, #LO_last_count] + mov r0, r1 + add r2, r2, r12 + push {r2, lr} + str r2, [fp, #LO_cycle] + blx r3 + + ldr r0, [fp, #LO_next_interupt] + pop {r2, r3} + str r0, [fp, #LO_last_count] + sub r0, r2, r0 + bx r3 +.endm + +FUNCTION(jump_handler_write8): + add r3, #0x1000/4*4 + 0x1000/2*4 @ shift to r8 part + pcsx_write_mem strbcc, 0 + +FUNCTION(jump_handler_write16): + add r3, #0x1000/4*4 @ shift to r16 part + pcsx_write_mem strhcc, 1 + +FUNCTION(jump_handler_write32): + pcsx_write_mem strcc, 2 + +FUNCTION(jump_handler_write_h): + /* r0 = address, r1 = data, r2 = cycles, r3 = handler */ + ldr r12, [fp, #LO_last_count] + str r0, [fp, #LO_address] @ some handlers still need it.. + add r2, r2, r12 + mov r0, r1 + push {r2, lr} + str r2, [fp, #LO_cycle] + blx r3 + + ldr r0, [fp, #LO_next_interupt] + pop {r2, r3} + str r0, [fp, #LO_last_count] + sub r0, r2, r0 + bx r3 + +FUNCTION(jump_handle_swl): + /* r0 = address, r1 = data, r2 = cycles */ + ldr r3, [fp, #LO_mem_wtab] + mov r12,r0,lsr #12 + ldr r3, [r3, r12, lsl #2] + lsls r3, #1 + bcs 4f + add r3, r0, r3 + mov r0, r2 + tst r3, #2 + beq 101f + tst r3, #1 + beq 2f +3: + str r1, [r3, #-3] + bx lr +2: + lsr r2, r1, #8 + lsr r1, #24 + strh r2, [r3, #-2] + strb r1, [r3] + bx lr +101: + tst r3, #1 + lsrne r1, #16 @ 1 + lsreq r12, r1, #24 @ 0 + strhne r1, [r3, #-1] + strbeq r12, [r3] + bx lr +4: + mov r0, r2 +@ b abort + bx lr @ TODO? + + +FUNCTION(jump_handle_swr): + /* r0 = address, r1 = data, r2 = cycles */ + ldr r3, [fp, #LO_mem_wtab] + mov r12,r0,lsr #12 + ldr r3, [r3, r12, lsl #2] + lsls r3, #1 + bcs 4f + add r3, r0, r3 + and r12,r3, #3 + mov r0, r2 + cmp r12,#2 + strbgt r1, [r3] @ 3 + strheq r1, [r3] @ 2 + cmp r12,#1 + strlt r1, [r3] @ 0 + bxne lr + lsr r2, r1, #8 @ 1 + strb r1, [r3] + strh r2, [r3, #1] + bx lr +4: + mov r0, r2 +@ b abort + bx lr @ TODO? + + +.macro rcntx_read_mode0 num + /* r0 = address, r2 = cycles */ + ldr r3, [fp, #LO_rcnts+6*4+7*4*\num] @ cycleStart + mov r0, r2, lsl #16 + sub r0, r0, r3, lsl #16 + lsr r0, #16 + bx lr +.endm + +FUNCTION(rcnt0_read_count_m0): + rcntx_read_mode0 0 + +FUNCTION(rcnt1_read_count_m0): + rcntx_read_mode0 1 + +FUNCTION(rcnt2_read_count_m0): + rcntx_read_mode0 2 + +FUNCTION(rcnt0_read_count_m1): + /* r0 = address, r2 = cycles */ + ldr r3, [fp, #LO_rcnts+6*4+7*4*0] @ cycleStart + mov_16 r1, 0x3334 + sub r2, r2, r3 + mul r0, r1, r2 @ /= 5 + lsr r0, #16 + bx lr + +FUNCTION(rcnt1_read_count_m1): + /* r0 = address, r2 = cycles */ + ldr r3, [fp, #LO_rcnts+6*4+7*4*1] + mov_24 r1, 0x1e6cde + sub r2, r2, r3 + umull r3, r0, r1, r2 @ ~ /= hsync_cycles, max ~0x1e6cdd + bx lr + +FUNCTION(rcnt2_read_count_m1): + /* r0 = address, r2 = cycles */ + ldr r3, [fp, #LO_rcnts+6*4+7*4*2] + mov r0, r2, lsl #16-3 + sub r0, r0, r3, lsl #16-3 + lsr r0, #16 @ /= 8 + bx lr + +@ vim:filetype=armasm diff --git a/libpcsxcore/new_dynarec/arm/linkage_offsets.h b/libpcsxcore/new_dynarec/arm/linkage_offsets.h new file mode 100644 index 0000000..f7e1911 --- /dev/null +++ b/libpcsxcore/new_dynarec/arm/linkage_offsets.h @@ -0,0 +1,41 @@ + +#define LO_next_interupt 64 +#define LO_cycle_count (LO_next_interupt + 4) +#define LO_last_count (LO_cycle_count + 4) +#define LO_pending_exception (LO_last_count + 4) +#define LO_stop (LO_pending_exception + 4) +#define LO_invc_ptr (LO_stop + 4) +#define LO_address (LO_invc_ptr + 4) +#define LO_psxRegs (LO_address + 4) +#define LO_reg (LO_psxRegs) +#define LO_lo (LO_reg + 128) +#define LO_hi (LO_lo + 4) +#define LO_reg_cop0 (LO_hi + 4) +#define LO_reg_cop2d (LO_reg_cop0 + 128) +#define LO_reg_cop2c (LO_reg_cop2d + 128) +#define LO_PC (LO_reg_cop2c + 128) +#define LO_pcaddr (LO_PC) +#define LO_code (LO_PC + 4) +#define LO_cycle (LO_code + 4) +#define LO_interrupt (LO_cycle + 4) +#define LO_intCycle (LO_interrupt + 4) +#define LO_psxRegs_end (LO_intCycle + 256) +#define LO_rcnts (LO_psxRegs_end) +#define LO_rcnts_end (LO_rcnts + 7*4*4) +#define LO_mem_rtab (LO_rcnts_end) +#define LO_mem_wtab (LO_mem_rtab + 4) +#define LO_psxH_ptr (LO_mem_wtab + 4) +#define LO_zeromem_ptr (LO_psxH_ptr + 4) +#define LO_inv_code_start (LO_zeromem_ptr + 4) +#define LO_inv_code_end (LO_inv_code_start + 4) +#define LO_branch_target (LO_inv_code_end + 4) +#define LO_scratch_buf_ptr (LO_branch_target + 4) +#define LO_align0 (LO_scratch_buf_ptr + 4) +#define LO_mini_ht (LO_align0 + 12) +#define LO_restore_candidate (LO_mini_ht + 256) +#define LO_dynarec_local_size (LO_restore_candidate + 512) + +#define LO_FCR0 (LO_align0) +#define LO_FCR31 (LO_align0) + +#define LO_cop2_to_scratch_buf (LO_scratch_buf_ptr - LO_reg_cop2d) -- cgit v1.2.3