pull the PC masking into the code translation cache, so that we only need to mask the PC on our slow path. Gives us another 5-10% speedup.

This commit is contained in:
Rune Holm
2021-06-11 19:34:03 +01:00
parent 3203ac8590
commit fe3b4bb032
2 changed files with 10 additions and 8 deletions

View File

@@ -1254,12 +1254,14 @@ inline unsigned int m68k_read_pcrelative_32(unsigned int address) {
#endif
uint m68ki_read_imm6_addr_slowpath(uint32_t address, address_translation_cache *cache)
uint m68ki_read_imm6_addr_slowpath(uint32_t pc, address_translation_cache *cache)
{
uint32_t address = ADDRESS_68K(pc);
uint32_t pc_address_diff = pc - address;
for (int i = 0; i < read_ranges; i++) {
if(address >= read_addr[i] && address < read_upper[i]) {
cache->lower = read_addr[i];
cache->upper = read_upper[i];
cache->lower = read_addr[i] + pc_address_diff;
cache->upper = read_upper[i] + pc_address_diff;
cache->data = read_data[i];
REG_PC += 2;
return be16toh(((unsigned short *)(read_data[i] + (address - read_addr[i])))[0]);

View File

@@ -1161,21 +1161,21 @@ static inline uint32 m68ki_ic_readimm16(uint32 address)
/* Handles all immediate reads, does address error check, function code setting,
* and prefetching if they are enabled in m68kconf.h
*/
uint m68ki_read_imm6_addr_slowpath(uint32_t address, address_translation_cache *cache);
uint m68ki_read_imm6_addr_slowpath(uint32_t pc, address_translation_cache *cache);
static inline uint m68ki_read_imm_16(void)
{
uint32_t address = ADDRESS_68K(REG_PC);
uint32_t pc = REG_PC;
address_translation_cache *cache = &code_translation_cache;
if(address >= cache->lower && address < cache->upper)
if(pc >= cache->lower && pc < cache->upper)
{
REG_PC += 2;
return be16toh(((unsigned short *)(cache->data + (address - cache->lower)))[0]);
return be16toh(((unsigned short *)(cache->data + (pc - cache->lower)))[0]);
}
return m68ki_read_imm6_addr_slowpath(address, cache);
return m68ki_read_imm6_addr_slowpath(pc, cache);
}