aboutsummaryrefslogtreecommitdiff
path: root/libpcsxcore
diff options
context:
space:
mode:
Diffstat (limited to 'libpcsxcore')
-rw-r--r--libpcsxcore/new_dynarec/assem_arm.c38
-rw-r--r--libpcsxcore/new_dynarec/assem_arm.h1
-rw-r--r--libpcsxcore/new_dynarec/linkage_arm.s105
-rw-r--r--libpcsxcore/new_dynarec/new_dynarec.c12
4 files changed, 155 insertions, 1 deletions
diff --git a/libpcsxcore/new_dynarec/assem_arm.c b/libpcsxcore/new_dynarec/assem_arm.c
index 0153bfc..a40acf1 100644
--- a/libpcsxcore/new_dynarec/assem_arm.c
+++ b/libpcsxcore/new_dynarec/assem_arm.c
@@ -66,6 +66,37 @@ const u_int jump_vaddr_reg[16] = {
0,
0};
+void invalidate_addr_r0();
+void invalidate_addr_r1();
+void invalidate_addr_r2();
+void invalidate_addr_r3();
+void invalidate_addr_r4();
+void invalidate_addr_r5();
+void invalidate_addr_r6();
+void invalidate_addr_r7();
+void invalidate_addr_r8();
+void invalidate_addr_r9();
+void invalidate_addr_r10();
+void invalidate_addr_r12();
+
+const u_int invalidate_addr_reg[16] = {
+ (int)invalidate_addr_r0,
+ (int)invalidate_addr_r1,
+ (int)invalidate_addr_r2,
+ (int)invalidate_addr_r3,
+ (int)invalidate_addr_r4,
+ (int)invalidate_addr_r5,
+ (int)invalidate_addr_r6,
+ (int)invalidate_addr_r7,
+ (int)invalidate_addr_r8,
+ (int)invalidate_addr_r9,
+ (int)invalidate_addr_r10,
+ 0,
+ (int)invalidate_addr_r12,
+ 0,
+ 0,
+ 0};
+
#include "fpu.h"
unsigned int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
@@ -2195,6 +2226,13 @@ void emit_addsr12(int rs1,int rs2,int rt)
output_w32(0xe0800620|rd_rn_rm(rt,rs1,rs2));
}
+void emit_callne(int a)
+{
+ assem_debug("blne %x\n",a);
+ u_int offset=genjmp(a);
+ output_w32(0x1b000000|offset);
+}
+
// Used to preload hash table entries
void emit_prefetch(void *addr)
{
diff --git a/libpcsxcore/new_dynarec/assem_arm.h b/libpcsxcore/new_dynarec/assem_arm.h
index 5060095..a289aa1 100644
--- a/libpcsxcore/new_dynarec/assem_arm.h
+++ b/libpcsxcore/new_dynarec/assem_arm.h
@@ -8,6 +8,7 @@
#define CORTEX_A8_BRANCH_PREDICTION_HACK 1
#define USE_MINI_HT 1
//#define REG_PREFETCH 1
+#define HAVE_CONDITIONAL_CALL 1
#define DISABLE_TLB 1
//#define MUPEN64
#define FORCE32 1
diff --git a/libpcsxcore/new_dynarec/linkage_arm.s b/libpcsxcore/new_dynarec/linkage_arm.s
index 8744608..57fb3d2 100644
--- a/libpcsxcore/new_dynarec/linkage_arm.s
+++ b/libpcsxcore/new_dynarec/linkage_arm.s
@@ -1,6 +1,6 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* linkage_arm.s for PCSX *
- * Copyright (C) 2009-2010 Ari64 *
+ * Copyright (C) 2009-2011 Ari64 *
* Copyright (C) 2010-2011 GraÅžvydas "notaz" Ignotas *
* *
* This program is free software; you can redistribute it and/or modify *
@@ -748,6 +748,109 @@ indirect_jump:
.size indirect_jump_indexed, .-indirect_jump_indexed
.align 2
+ .global invalidate_addr_r0
+ .type invalidate_addr_r0, %function
+invalidate_addr_r0:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r0, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r0, .-invalidate_addr_r0
+ .align 2
+ .global invalidate_addr_r1
+ .type invalidate_addr_r1, %function
+invalidate_addr_r1:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r1, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r1, .-invalidate_addr_r1
+ .align 2
+ .global invalidate_addr_r2
+ .type invalidate_addr_r2, %function
+invalidate_addr_r2:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r2, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r2, .-invalidate_addr_r2
+ .align 2
+ .global invalidate_addr_r3
+ .type invalidate_addr_r3, %function
+invalidate_addr_r3:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r3, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r3, .-invalidate_addr_r3
+ .align 2
+ .global invalidate_addr_r4
+ .type invalidate_addr_r4, %function
+invalidate_addr_r4:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r4, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r4, .-invalidate_addr_r4
+ .align 2
+ .global invalidate_addr_r5
+ .type invalidate_addr_r5, %function
+invalidate_addr_r5:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r5, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r5, .-invalidate_addr_r5
+ .align 2
+ .global invalidate_addr_r6
+ .type invalidate_addr_r6, %function
+invalidate_addr_r6:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r6, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r6, .-invalidate_addr_r6
+ .align 2
+ .global invalidate_addr_r7
+ .type invalidate_addr_r7, %function
+invalidate_addr_r7:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r7, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r7, .-invalidate_addr_r7
+ .align 2
+ .global invalidate_addr_r8
+ .type invalidate_addr_r8, %function
+invalidate_addr_r8:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r8, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r8, .-invalidate_addr_r8
+ .align 2
+ .global invalidate_addr_r9
+ .type invalidate_addr_r9, %function
+invalidate_addr_r9:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r9, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r9, .-invalidate_addr_r9
+ .align 2
+ .global invalidate_addr_r10
+ .type invalidate_addr_r10, %function
+invalidate_addr_r10:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r10, #12
+ b invalidate_addr_call
+ .size invalidate_addr_r10, .-invalidate_addr_r10
+ .align 2
+ .global invalidate_addr_r12
+ .type invalidate_addr_r12, %function
+invalidate_addr_r12:
+ stmia fp, {r0, r1, r2, r3, r12, lr}
+ lsr r0, r12, #12
+ .size invalidate_addr_r12, .-invalidate_addr_r12
+ .align 2
+ .global invalidate_addr_call
+ .type invalidate_addr_call, %function
+invalidate_addr_call:
+ bl invalidate_block
+ ldmia fp, {r0, r1, r2, r3, r12, pc}
+ .size invalidate_addr_call, .-invalidate_addr_call
+
+ .align 2
.global new_dyna_start
.type new_dyna_start, %function
new_dyna_start:
diff --git a/libpcsxcore/new_dynarec/new_dynarec.c b/libpcsxcore/new_dynarec/new_dynarec.c
index 84b4400..9bc0f60 100644
--- a/libpcsxcore/new_dynarec/new_dynarec.c
+++ b/libpcsxcore/new_dynarec/new_dynarec.c
@@ -3198,9 +3198,13 @@ void store_assemble(int i,struct regstat *i_regs)
#else
emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
#endif
+ #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ emit_callne(invalidate_addr_reg[addr]);
+ #else
jaddr2=(int)out;
emit_jne(0);
add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
+ #endif
}
}
if(jaddr) {
@@ -3632,9 +3636,13 @@ void c1ls_assemble(int i,struct regstat *i_regs)
#else
emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
#endif
+ #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ emit_callne(invalidate_addr_reg[temp]);
+ #else
jaddr3=(int)out;
emit_jne(0);
add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
+ #endif
}
}
if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
@@ -3740,9 +3748,13 @@ void c2ls_assemble(int i,struct regstat *i_regs)
#else
emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
#endif
+ #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
+ emit_callne(invalidate_addr_reg[ar]);
+ #else
jaddr3=(int)out;
emit_jne(0);
add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
+ #endif
}
if (opcode[i]==0x32) { // LWC2
cop2_put_dreg(copr,tl,HOST_TEMPREG);