mirror of
https://github.com/prirun/p50em.git
synced 2026-01-18 00:42:32 +00:00
changed get/put(16,32)r to check for ring change and use regular get/put call if possible, so brp supercache can be used FUTURE: could add separate brp cache entry for R0 accesses changed ea32r64r live register test so normal path is first changed ea64v live register test so normal path is first change ea32i to use INCRP macro instead of RPL++
116 lines
3.4 KiB
C
116 lines
3.4 KiB
C
#define IMM_EA 0x80000000
|
|
|
|
static inline ea_t ea32i (ea_t earp, unsigned short inst, unsigned long *immu32, unsigned long long *immu64) {
|
|
|
|
int tm, sr, br, ring;
|
|
unsigned short d;
|
|
int temp32;
|
|
ea_t ea, ip;
|
|
|
|
*immu32 = 0xAAAAAAAA;
|
|
*immu64 = 0xAAAAAAAAAAAAAAAALL;
|
|
|
|
tm = (inst >> 5) & 3;
|
|
sr = (inst >> 2) & 7;
|
|
br = inst & 3;
|
|
ring = RP & RINGMASK32;
|
|
TRACE(T_EAI, " tm=%d, sr=%d, dr=%d, br=%d\n", tm, sr, (inst >> 7) & 7, br);
|
|
|
|
switch (tm) {
|
|
case 0:
|
|
switch (br) {
|
|
case 0: /* reg-reg */
|
|
*immu32 = crsl[sr];
|
|
return IMM_EA;
|
|
|
|
case 1:
|
|
d = iget16(RP);
|
|
RPL++;
|
|
if (sr == 0) /* imm type 1 */
|
|
*immu32 = d << 16;
|
|
else /* imm type 2 */
|
|
*(int *)immu32 = *(short *)&d;
|
|
return IMM_EA;
|
|
|
|
case 2:
|
|
switch (sr) {
|
|
case 0: /* imm type 3 */
|
|
d = iget16(RP);
|
|
INCRP;
|
|
*immu64 = (((long long)(d & 0xFF00)) << 48) | (d & 0xFF);
|
|
return IMM_EA;
|
|
case 1: /* FAC0 source */
|
|
*immu64 = *(unsigned long long *)(crsl+FAC0);
|
|
return IMM_EA;
|
|
case 3: /* FAC1 source */
|
|
*immu64 = *(unsigned long long *)(crsl+FAC1);
|
|
return IMM_EA;
|
|
case 2:
|
|
case 4:
|
|
case 5:
|
|
case 6:
|
|
case 7:
|
|
fault(UIIFAULT, RPL, RP);
|
|
fatal("ea32i: return from UII fault!");
|
|
default:
|
|
fatal("ea32i: sr < 0 or > 7?");
|
|
}
|
|
fatal("ea32i: case tm=0 br=2 fall-through");
|
|
|
|
case 3: /* GR relative */
|
|
d = iget16(RP);
|
|
INCRP;
|
|
ea = (crsl[sr] & 0xFFFF0000) | ((crsl[sr] + d) & 0xFFFF);
|
|
TRACE(T_EAI, " GRR, d=%x, crsl[sr]=%o/%o, ea=%o/%o\n", d, crsl[sr]>>16, crsl[sr]&0xFFFF, ea>>16, ea&0xFFFF);
|
|
if (ea & 0x80000000)
|
|
fault(POINTERFAULT, ea>>16, ea);
|
|
return ea | ring;
|
|
|
|
default:
|
|
fatal("ea32i: tm=0, br < 0 or > 3?");
|
|
}
|
|
fatal("ea32i: tm=0 fall-through");
|
|
|
|
case 1: /* TM=1: Direct and Indexed */
|
|
d = iget16(RP);
|
|
INCRP;
|
|
if (sr == 0)
|
|
ea = (crsl[BR+br] & 0xFFFF0000) | ((crsl[BR+br] + d) & 0xFFFF);
|
|
else
|
|
ea = (crsl[BR+br] & 0xFFFF0000) | ((crsl[BR+br] + d + crs[sr*2]) & 0xFFFF);
|
|
return ea | ring;
|
|
|
|
case 2: /* TM=2: Indirect and Indirect Preindexed */
|
|
d = iget16(RP);
|
|
INCRP;
|
|
if (sr == 0)
|
|
ea = (crsl[BR+br] & 0xFFFF0000) | ((crsl[BR+br] + d) & 0xFFFF);
|
|
else
|
|
ea = (crsl[BR+br] & 0xFFFF0000) | ((crsl[BR+br] + d + crs[sr*2]) & 0xFFFF);
|
|
ip = get32(ea | ring);
|
|
if (ip & 0x80000000)
|
|
fault(POINTERFAULT, ip>>16, ea);
|
|
return ip | ring;
|
|
|
|
case 3: /* TM=3: Indirect and Indirect Postindexed */
|
|
TRACE(T_EAI, " TM=3: Indirect [Postindexed]");
|
|
d = iget16(RP);
|
|
INCRP;
|
|
ea = (crsl[BR+br] & 0xFFFF0000) | ((crsl[BR+br] + d) & 0xFFFF);
|
|
TRACE(T_EAI, " BR[%d]=%o/%o, d=%o, ip ea=%o/%o\n", br, crsl[BR+br]>>16, crsl[BR+br]&0xFFFF, d, ea>>16, ea&0xFFFF);
|
|
ip = get32(ea | ring);
|
|
TRACE(T_EAI, " after indirect, ea=%o/%o\n", ip>>16, ip&0xFFFF);
|
|
if (ip & 0x80000000)
|
|
fault(POINTERFAULT, ip>>16, ea);
|
|
if (sr > 0) {
|
|
ip = (ip & 0xFFFF0000) | ((ip + crs[sr*2]) & 0xFFFF);
|
|
TRACE(T_EAI, " index by crs[%d]='%o/%d, ea=%o/%o\n", sr, crs[sr*2], crs[sr*2], ea>>16, ea&0xFFFF);
|
|
}
|
|
return ip | ring;
|
|
|
|
default:
|
|
fatal("ea32i: tm out of range!");
|
|
}
|
|
fatal("ea32i: main switch fall through");
|
|
}
|