Commit de6678ee authored by Flyinghead's avatar Flyinghead
Browse files

dynarec: add negc and xtrct op codes

native implementation of negc and xtrct for x64 and arm64
inline mmu_intruction_translation() and clean up
parent 9ae97bdc
......@@ -98,12 +98,11 @@ DynarecCodeEntryPtr DYNACALL bm_GetCodeByVAddr(u32 addr)
}
u32 paddr;
bool shared;
u32 rv = mmu_instruction_translation(addr, paddr, shared);
u32 rv = mmu_instruction_translation(addr, paddr);
if (rv != MMU_ERROR_NONE)
{
DoMMUException(addr, rv, MMU_TT_IREAD);
mmu_instruction_translation(next_pc, paddr, shared);
mmu_instruction_translation(next_pc, paddr);
}
return bm_GetCode(paddr);
......
......@@ -950,6 +950,10 @@ static bool dec_generic(u32 op)
}
break;
case DM_NEGC:
Emit(natop, rs1, rs2, mk_reg(reg_sr_T), 0, shil_param(), mk_reg(reg_sr_T));
break;
default:
verify(false);
}
......
......@@ -215,8 +215,7 @@ bool RuntimeBlockInfo::Setup(u32 rpc,fpscr_t rfpu_cfg)
vaddr=rpc;
if (mmu_enabled())
{
bool shared;
u32 rv = mmu_instruction_translation(vaddr, addr, shared);
u32 rv = mmu_instruction_translation(vaddr, addr);
if (rv != MMU_ERROR_NONE)
{
DoMMUException(vaddr, rv, MMU_TT_IREAD);
......
......@@ -347,6 +347,28 @@ shil_compile
)
shil_opc_end()
//shop_negc - Negate with carry
shil_opc(negc)
shil_canonical
(
u64,f1,(u32 r1, u32 C),
u64 res = -(u64)r1 - C;
u64 rv;
((u32*)&rv)[0]=res;
((u32*)&rv)[1]=(res>>32)&1;
return rv;
)
shil_compile
(
shil_cf_arg_u32(rs2);
shil_cf_arg_u32(rs1);
shil_cf(f1);
shil_cf_rv_u64(rd);
)
shil_opc_end()
//shop_ror
shil_opc(ror)
......@@ -1017,6 +1039,22 @@ shil_compile
)
shil_opc_end()
//shop_xtrct
shil_opc(xtrct)
shil_canonical
(
u32,f1,(u32 r1, u32 r2),
return (r1 >> 16) | (r2 << 16);
)
shil_compile
(
shil_cf_arg_ptr(rs2);
shil_cf_arg_ptr(rs1);
shil_cf_arg_ptr(rd);
shil_cf(f1);
)
shil_opc_end()
SHIL_END
......
......@@ -23,7 +23,6 @@ extern u32 sq_remap[64];
#define printf_win32(...)
extern const u32 mmu_mask[4];
extern const u32 fast_reg_lut[8];
#include "wince.h"
......@@ -274,20 +273,12 @@ u32 mmu_data_translation(u32 va, u32& rv)
}
}
// if ((sr.MD == 0) && (va & 0x80000000) != 0)
// {
// //if on kernel, and not SQ addr -> error
// return MMU_ERROR_BADADDR;
// }
if (sr.MD == 1 && ((va & 0xFC000000) == 0x7C000000))
{
rv = va;
return MMU_ERROR_NONE;
}
// Not called if CCN_MMUCR.AT == 0
//if ((CCN_MMUCR.AT == 0) || (fast_reg_lut[va >> 29] != 0))
if (fast_reg_lut[va >> 29] != 0)
{
rv = va;
......@@ -297,9 +288,6 @@ u32 mmu_data_translation(u32 va, u32& rv)
const TLB_Entry *entry;
u32 lookup = mmu_full_lookup(va, &entry, rv);
// if (lookup != MMU_ERROR_NONE)
// return lookup;
#ifdef TRACE_WINCE_SYSCALLS
if (unresolved_unicode_string != 0 && lookup == MMU_ERROR_NONE)
{
......@@ -311,34 +299,6 @@ u32 mmu_data_translation(u32 va, u32& rv)
}
#endif
// u32 md = entry->Data.PR >> 1;
//
// //0X & User mode-> protection violation
// //Priv mode protection
// if ((md == 0) && sr.MD == 0)
// {
// die("MMU_ERROR_PROTECTED");
// return MMU_ERROR_PROTECTED;
// }
//
// //X0 -> read olny
// //X1 -> read/write , can be FW
//
// //Write Protection (Lock or FW)
// if (translation_type == MMU_TT_DWRITE)
// {
// if ((entry->Data.PR & 1) == 0)
// {
// die("MMU_ERROR_PROTECTED");
// return MMU_ERROR_PROTECTED;
// }
// else if (entry->Data.D == 0)
// {
// die("MMU_ERROR_FIRSTWRITE");
// return MMU_ERROR_FIRSTWRITE;
// }
// }
return lookup;
}
template u32 mmu_data_translation<MMU_TT_DREAD, u8>(u32 va, u32& rv);
......@@ -351,52 +311,8 @@ template u32 mmu_data_translation<MMU_TT_DWRITE, u16>(u32 va, u32& rv);
template u32 mmu_data_translation<MMU_TT_DWRITE, u32>(u32 va, u32& rv);
template u32 mmu_data_translation<MMU_TT_DWRITE, u64>(u32 va, u32& rv);
u32 mmu_instruction_translation(u32 va, u32& rv, bool& shared)
{
if (va & 1)
{
return MMU_ERROR_BADADDR;
}
// if ((sr.MD == 0) && (va & 0x80000000) != 0)
// {
// //if SQ disabled , or if if SQ on but out of SQ mem then BAD ADDR ;)
// if (va >= 0xE0000000)
// return MMU_ERROR_BADADDR;
// }
if ((CCN_MMUCR.AT == 0) || (fast_reg_lut[va >> 29] != 0))
{
rv = va;
return MMU_ERROR_NONE;
}
// Hack fast implementation
const TLB_Entry *tlb_entry;
u32 lookup = mmu_full_lookup(va, &tlb_entry, rv);
if (lookup != MMU_ERROR_NONE)
return lookup;
u32 md = tlb_entry->Data.PR >> 1;
//0X & User mode-> protection violation
//Priv mode protection
// if ((md == 0) && sr.MD == 0)
// {
// return MMU_ERROR_PROTECTED;
// }
shared = tlb_entry->Data.SH == 1;
return MMU_ERROR_NONE;
}
void mmu_flush_table()
{
// printf("MMU tables flushed\n");
// ITLB[0].Data.V = 0;
// ITLB[1].Data.V = 0;
// ITLB[2].Data.V = 0;
// ITLB[3].Data.V = 0;
//
// for (u32 i = 0; i < 64; i++)
// UTLB[i].Data.V = 0;
lru_entry = NULL;
flush_cache();
}
......
......@@ -541,7 +541,7 @@ template u32 mmu_data_translation<MMU_TT_DREAD, u16>(u32 va, u32& rv);
template u32 mmu_data_translation<MMU_TT_DREAD, u32>(u32 va, u32& rv);
template u32 mmu_data_translation<MMU_TT_DWRITE, u64>(u32 va, u32& rv);
u32 mmu_instruction_translation(u32 va, u32& rv, bool& shared)
u32 mmu_instruction_translation(u32 va, u32& rv)
{
if (va & 1)
{
......@@ -582,7 +582,6 @@ retry_ITLB_Match:
nom++;
//VPN->PPN | low bits
rv = ((ITLB[i].Data.PPN << 10)&mask) | (va&(~mask));
shared = ITLB[i].Data.SH == 1;
}
}
}
......@@ -724,8 +723,7 @@ T DYNACALL mmu_ReadMem(u32 adr)
u16 DYNACALL mmu_IReadMem16(u32 vaddr)
{
u32 addr;
bool shared;
u32 rv = mmu_instruction_translation(vaddr, addr, shared);
u32 rv = mmu_instruction_translation(vaddr, addr);
if (rv != MMU_ERROR_NONE)
mmu_raise_exception(rv, vaddr, MMU_TT_IREAD);
return _vmem_ReadMem16(addr);
......@@ -766,8 +764,7 @@ template u64 mmu_ReadMemNoEx<u64>(u32 adr, u32 *exception_occurred);
u16 DYNACALL mmu_IReadMem16NoEx(u32 vaddr, u32 *exception_occurred)
{
u32 addr;
bool shared;
u32 rv = mmu_instruction_translation(vaddr, addr, shared);
u32 rv = mmu_instruction_translation(vaddr, addr);
if (rv != MMU_ERROR_NONE)
{
DoMMUException(vaddr, rv, MMU_TT_IREAD);
......
......@@ -36,6 +36,7 @@ struct TLB_Entry
extern TLB_Entry UTLB[64];
extern TLB_Entry ITLB[4];
extern u32 sq_remap[64];
extern const u32 fast_reg_lut[8];
//These are working only for SQ remaps on ndce
bool UTLB_Sync(u32 entry);
......@@ -56,7 +57,25 @@ static INLINE bool mmu_enabled()
template<bool internal = false>
u32 mmu_full_lookup(u32 va, const TLB_Entry **entry, u32& rv);
u32 mmu_instruction_translation(u32 va, u32& rv, bool& shared);
#ifdef FAST_MMU
static INLINE u32 mmu_instruction_translation(u32 va, u32& rv)
{
if (va & 1)
return MMU_ERROR_BADADDR;
if (fast_reg_lut[va >> 29] != 0)
{
rv = va;
return MMU_ERROR_NONE;
}
const TLB_Entry *tlb_entry;
return mmu_full_lookup(va, &tlb_entry, rv);
}
#else
u32 mmu_instruction_translation(u32 va, u32& rv);
#endif
template<u32 translation_type, typename T>
extern u32 mmu_data_translation(u32 va, u32& rv);
void DoMMUException(u32 addr, u32 error_code, u32 access_type);
......
......@@ -152,7 +152,7 @@ sh4_opcodelistentry opcodes[]=
{0 ,i0010_nnnn_mmmm_1010 ,Mask_n_m ,0x200A ,Normal ,"xor <REG_M>,<REG_N>" ,1,1,EX,fix_none ,dec_Bin_rNrM(shop_xor)}, //xor <REG_M>,<REG_N>
{0 ,i0010_nnnn_mmmm_1011 ,Mask_n_m ,0x200B ,Normal ,"or <REG_M>,<REG_N>" ,1,1,EX,fix_none ,dec_Bin_rNrM(shop_or)}, //or <REG_M>,<REG_N>
{0 ,i0010_nnnn_mmmm_1100 ,Mask_n_m ,0x200C ,Normal |NO_FP,"cmp/str <REG_M>,<REG_N>" ,1,1,MT,fix_none ,dec_cmp(shop_setpeq,PRM_RN,PRM_RM)}, //cmp/str <REG_M>,<REG_N>
{0 ,i0010_nnnn_mmmm_1101 ,Mask_n_m ,0x200D ,Normal |NO_FP,"xtrct <REG_M>,<REG_N>" ,1,1,EX,fix_none}, //xtrct <REG_M>,<REG_N>
{0 ,i0010_nnnn_mmmm_1101 ,Mask_n_m ,0x200D ,Normal |NO_FP,"xtrct <REG_M>,<REG_N>" ,1,1,EX,fix_none ,dec_Bin_rNrM(shop_xtrct)}, //xtrct <REG_M>,<REG_N>
{0 ,i0010_nnnn_mmmm_1110 ,Mask_n_m ,0x200E ,Normal ,"mulu.w <REG_M>,<REG_N>" ,1,4,CO,fix_none ,dec_mul(16)}, //mulu.w <REG_M>,<REG_N>
{0 ,i0010_nnnn_mmmm_1111 ,Mask_n_m ,0x200F ,Normal ,"muls.w <REG_M>,<REG_N>" ,1,4,CO,fix_none ,dec_mul(-16)}, //muls.w <REG_M>,<REG_N>
{0 ,i0011_nnnn_mmmm_0000 ,Mask_n_m ,0x3000 ,Normal ,"cmp/eq <REG_M>,<REG_N>" ,1,1,MT,fix_none ,dec_cmp(shop_seteq,PRM_RN,PRM_RM)}, // cmp/eq <REG_M>,<REG_N>
......@@ -310,7 +310,7 @@ sh4_opcodelistentry opcodes[]=
{0 ,i0110_nnnn_mmmm_0111 ,Mask_n_m ,0x6007 ,Normal ,"not <REG_M>,<REG_N>" ,1,1,EX,fix_none ,dec_Un_rNrM(shop_not)}, //not <REG_M>,<REG_N>
{0 ,i0110_nnnn_mmmm_1000 ,Mask_n_m ,0x6008 ,Normal ,"swap.b <REG_M>,<REG_N>" ,1,1,EX,fix_none ,dec_Un_rNrM(shop_swaplb)}, //swap.b <REG_M>,<REG_N>
{0 ,i0110_nnnn_mmmm_1001 ,Mask_n_m ,0x6009 ,Normal ,"swap.w <REG_M>,<REG_N>" ,1,1,EX,fix_none ,dec_Fill(DM_Rot,PRM_RN,PRM_RM,shop_ror,16|0x1000)}, //swap.w <REG_M>,<REG_N>
{0 ,i0110_nnnn_mmmm_1010 ,Mask_n_m ,0x600A ,Normal ,"negc <REG_M>,<REG_N>" ,1,1,EX,fix_none}, //negc <REG_M>,<REG_N>
{0 ,i0110_nnnn_mmmm_1010 ,Mask_n_m ,0x600A ,Normal ,"negc <REG_M>,<REG_N>" ,1,1,EX,fix_none ,dec_Fill(DM_NEGC,PRM_RN,PRM_RM,shop_negc)}, //negc <REG_M>,<REG_N>
{0 ,i0110_nnnn_mmmm_1011 ,Mask_n_m ,0x600B ,Normal ,"neg <REG_M>,<REG_N>" ,1,1,EX,fix_none ,dec_Un_rNrM(shop_neg)}, //neg <REG_M>,<REG_N>
{0 ,i0110_nnnn_mmmm_1100 ,Mask_n_m ,0x600C ,Normal ,"extu.b <REG_M>,<REG_N>" ,1,1,EX,fix_none ,dec_Fill(DM_EXTOP,PRM_RN,PRM_RM,shop_and,1)}, //extu.b <REG_M>,<REG_N>
{0 ,i0110_nnnn_mmmm_1101 ,Mask_n_m ,0x600D ,Normal ,"extu.w <REG_M>,<REG_N>" ,1,1,EX,fix_none ,dec_Fill(DM_EXTOP,PRM_RN,PRM_RM,shop_and,2)}, //extu.w <REG_M>,<REG_N>
......
......@@ -128,5 +128,6 @@ enum DecMode
DM_EXTOP,
DM_MUL,
DM_DIV0,
DM_ADC
DM_ADC,
DM_NEGC,
};
......@@ -432,6 +432,11 @@ public:
Sbcs(regalloc.MapRegister(op.rd), regalloc.MapRegister(op.rs1), regalloc.MapRegister(op.rs2)); // (C,rd) = rs1 - rs2 - ~rs3(C)
Cset(regalloc.MapRegister(op.rd2), cc); // rd2 = ~C
break;
case shop_negc:
Cmp(wzr, regalloc.MapRegister(op.rs2)); // C = ~rs2
Sbcs(regalloc.MapRegister(op.rd), wzr, regalloc.MapRegister(op.rs1)); // (C,rd) = 0 - rs1 - ~rs2(C)
Cset(regalloc.MapRegister(op.rd2), cc); // rd2 = ~C
break;
case shop_rocr:
Ubfx(w0, regalloc.MapRegister(op.rs1), 0, 1); // w0 = rs1[0] (new C)
......@@ -567,6 +572,12 @@ public:
Sxth(regalloc.MapRegister(op.rd), regalloc.MapRegister(op.rs1));
break;
case shop_xtrct:
Lsr(regalloc.MapRegister(op.rd), regalloc.MapRegister(op.rs1), 16);
Lsl(w0, regalloc.MapRegister(op.rs2), 16);
Orr(regalloc.MapRegister(op.rd), regalloc.MapRegister(op.rd), w0);
break;
//
// FPU
//
......
......@@ -556,7 +556,20 @@ public:
cmovc(regalloc.MapRegister(op.rd2), ecx); // rd2 = C
break;
*/
case shop_rocr:
case shop_negc:
{
if (regalloc.mapg(op.rd) != regalloc.mapg(op.rs1))
mov(regalloc.MapRegister(op.rd), regalloc.MapRegister(op.rs1));
Xbyak::Reg64 rd64 = regalloc.MapRegister(op.rd).cvt64();
neg(rd64);
sub(rd64, regalloc.MapRegister(op.rs2).cvt64());
Xbyak::Reg64 rd2_64 = regalloc.MapRegister(op.rd2).cvt64();
mov(rd2_64, rd64);
shr(rd2_64, 63);
}
break;
case shop_rocr:
case shop_rocl:
if (regalloc.mapg(op.rd) != regalloc.mapg(op.rs1))
mov(regalloc.MapRegister(op.rd), regalloc.MapRegister(op.rs1));
......@@ -741,6 +754,15 @@ public:
movsx(regalloc.MapRegister(op.rd), Xbyak::Reg16(regalloc.MapRegister(op.rs1).getIdx()));
break;
case shop_xtrct:
if (regalloc.mapg(op.rd) != regalloc.mapg(op.rs1))
mov(regalloc.MapRegister(op.rd), regalloc.MapRegister(op.rs1));
shr(regalloc.MapRegister(op.rd), 16);
mov(eax, regalloc.MapRegister(op.rs2));
shl(eax, 16);
or_(regalloc.MapRegister(op.rd), eax);
break;
//
// FPU
//
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment