Refactored CPU_run

This commit is contained in:
Rebecca Buckingham 2024-07-23 20:10:49 -04:00
parent cafbf52f2f
commit f4653891c7
2 changed files with 151 additions and 204 deletions

View File

@ -8,6 +8,9 @@
* Copyright (C) 2006 by Samuel A. Falvo II
*
* Modified for greater portability and virtual hardware independence.
*
* Copyright (C) 2024 by Rebecca Buckingham
* highly modified to integrate with bsx emulator.
*/
#define CPU_DISPATCH
@ -15,10 +18,7 @@
#include "cpu.h"
#include "cpumicro.h"
#include "util.h"
//#define LIMIT_INSTRUCTION_COUNT 10000
//void CPUEvent_elapse( word32 cycles );
#include <stdint.h>
int dispatch_quit = 0;
@ -45,216 +45,164 @@ union {
duala atmp,opaddr;
dualw wtmp,otmp,operand;
int a1,a2,a3,a4,o1,o2,o3,o4;
#ifdef OLDCYCLES
byte *cpu_curr_cycle_table;
#endif
void (**cpu_curr_opcode_table)();
extern int cpu_reset,cpu_abort,cpu_nmi,cpu_irq,cpu_stop,cpu_wait,cpu_trace;
extern int cpu_update_period;
extern int cpu_irne64,cpu_irqt5;
extern int cpu_update_period;
extern void (*cpu_opcode_table[1310])();
#ifdef OLDCYCLES
/* Base cycle counts for all possible 1310 opcodes (262 opcodes x 5 modes). */
/* The opcode handlers may add additional cycles to handle special cases such */
/* a non-page-aligned direct page register or taking a branch. */
uint64_t last_update, next_update;
byte cpu_cycle_table[1310] =
{
8, 6, 8, 4, 5, 3, 5, 6, 3, 2, 2, 4, 6, 4, 6, 5, /* e=0, m=1, x=1 */
2, 5, 5, 7, 5, 4, 6, 6, 2, 4, 2, 2, 6, 4, 7, 5,
6, 6, 8, 4, 3, 3, 5, 6, 4, 2, 2, 5, 4, 4, 6, 5,
2, 5, 5, 7, 4, 4, 6, 6, 2, 4, 2, 2, 4, 4, 7, 5,
7, 6, 2, 4, 7, 3, 5, 6, 3, 2, 2, 3, 3, 4, 6, 5,
2, 5, 5, 7, 7, 4, 6, 6, 2, 4, 3, 2, 4, 4, 7, 5,
6, 6, 6, 4, 3, 3, 5, 6, 4, 2, 2, 6, 5, 4, 6, 5,
2, 5, 5, 7, 4, 4, 6, 6, 2, 4, 4, 2, 6, 4, 7, 5,
2, 6, 3, 4, 3, 3, 3, 6, 2, 2, 2, 3, 4, 4, 4, 5,
2, 6, 5, 7, 4, 4, 4, 6, 2, 5, 2, 2, 4, 5, 5, 5,
2, 6, 2, 4, 3, 3, 3, 6, 2, 2, 2, 4, 4, 4, 4, 5,
2, 5, 5, 7, 4, 4, 4, 6, 2, 4, 2, 2, 4, 4, 4, 5,
2, 6, 3, 4, 3, 3, 5, 6, 2, 2, 2, 3, 4, 4, 4, 5,
2, 5, 5, 7, 6, 4, 6, 6, 2, 4, 3, 3, 6, 4, 7, 5,
2, 6, 3, 4, 3, 3, 5, 6, 2, 2, 2, 3, 4, 4, 6, 5,
2, 5, 5, 7, 5, 4, 6, 6, 2, 4, 4, 2, 6, 4, 7, 5,
0, 0, 0, 0, 0, 0,
#define RESET_OP 256
#define ABORT_OP 257
#define NMI_OP 258
#define IRQ_OP 259
#define IRNE64_OP 260
#define IRQT5_OP 261
8, 6, 8, 4, 5, 3, 5, 6, 3, 2, 2, 4, 6, 4, 6, 5, /* e=0, m=1, x=0 */
2, 6, 5, 7, 5, 4, 6, 6, 2, 5, 2, 2, 6, 5, 7, 5,
6, 6, 8, 4, 3, 3, 5, 6, 4, 2, 2, 5, 4, 4, 6, 5,
2, 6, 5, 7, 4, 4, 6, 6, 2, 5, 2, 2, 5, 5, 7, 5,
7, 6, 2, 4, 0, 3, 5, 6, 4, 2, 2, 3, 3, 4, 6, 5,
2, 6, 5, 7, 0, 4, 6, 6, 2, 5, 4, 2, 4, 5, 7, 5,
6, 6, 6, 4, 3, 3, 5, 6, 5, 2, 2, 6, 5, 4, 6, 5,
2, 6, 5, 7, 4, 4, 6, 6, 2, 5, 5, 2, 6, 5, 7, 5,
2, 6, 3, 4, 4, 3, 4, 6, 2, 2, 2, 3, 5, 4, 5, 5,
2, 6, 5, 7, 5, 4, 5, 6, 2, 5, 2, 2, 4, 5, 5, 5,
3, 6, 3, 4, 4, 3, 4, 6, 2, 2, 2, 4, 5, 4, 5, 5,
2, 6, 5, 7, 5, 4, 5, 6, 2, 5, 2, 2, 5, 5, 5, 5,
3, 6, 3, 4, 4, 3, 6, 6, 2, 2, 2, 3, 5, 4, 6, 5,
2, 6, 5, 7, 6, 4, 8, 6, 2, 5, 4, 3, 6, 5, 7, 5,
3, 6, 3, 4, 4, 3, 6, 6, 2, 2, 2, 3, 5, 4, 6, 5,
2, 6, 5, 7, 5, 4, 8, 6, 2, 5, 5, 2, 6, 5, 7, 5,
0, 0, 0, 0, 0, 0,
8, 7, 8, 5, 7, 4, 7, 7, 3, 3, 2, 4, 8, 5, 8, 6, /* e=0, m=0, x=1 */
2, 6, 6, 8, 7, 5, 8, 7, 2, 5, 2, 2, 8, 5, 9, 6,
6, 7, 8, 5, 4, 4, 7, 7, 4, 3, 2, 5, 5, 5, 8, 6,
2, 6, 6, 8, 5, 5, 8, 7, 2, 5, 2, 2, 5, 5, 9, 6,
7, 7, 2, 5, 0, 4, 7, 7, 4, 3, 2, 3, 3, 5, 8, 6,
2, 6, 6, 8, 0, 5, 8, 7, 2, 5, 3, 2, 4, 5, 9, 6,
6, 7, 6, 5, 4, 4, 7, 7, 5, 3, 2, 6, 5, 5, 8, 6,
2, 6, 6, 8, 5, 5, 8, 7, 2, 5, 4, 2, 6, 5, 9, 6,
2, 7, 3, 5, 3, 4, 3, 7, 2, 3, 2, 3, 4, 5, 4, 6,
2, 6, 6, 8, 4, 5, 4, 7, 2, 5, 2, 2, 5, 5, 5, 6,
2, 7, 2, 5, 3, 4, 3, 7, 2, 3, 2, 4, 4, 5, 4, 6,
2, 6, 6, 8, 4, 5, 4, 7, 2, 5, 2, 2, 4, 5, 4, 6,
2, 7, 3, 5, 3, 4, 7, 7, 2, 3, 2, 3, 4, 5, 8, 6,
2, 6, 6, 8, 6, 5, 8, 7, 2, 5, 3, 3, 6, 5, 9, 6,
2, 7, 3, 5, 3, 4, 7, 7, 2, 3, 2, 3, 4, 5, 8, 6,
2, 6, 6, 8, 5, 5, 8, 7, 2, 5, 4, 2, 6, 5, 9, 6,
0, 0, 0, 0, 0, 0,
8, 7, 8, 5, 7, 4, 7, 7, 3, 3, 2, 4, 8, 5, 8, 6, /* e=0, m=0, x=0 */
2, 7, 6, 8, 7, 5, 8, 7, 2, 6, 2, 2, 8, 6, 9, 6,
6, 7, 8, 5, 4, 4, 7, 7, 4, 3, 2, 5, 5, 5, 8, 6,
2, 7, 6, 8, 5, 5, 8, 7, 2, 6, 2, 2, 6, 6, 9, 6,
7, 7, 2, 5, 0, 4, 7, 7, 3, 3, 2, 3, 3, 5, 8, 6,
2, 7, 6, 8, 0, 5, 8, 7, 2, 6, 4, 2, 4, 6, 9, 6,
6, 7, 6, 5, 4, 4, 7, 7, 4, 3, 2, 6, 5, 5, 8, 6,
2, 7, 6, 8, 5, 5, 8, 7, 2, 6, 5, 2, 6, 6, 9, 6,
2, 7, 3, 5, 4, 4, 4, 7, 2, 3, 2, 3, 5, 5, 5, 6,
2, 7, 6, 8, 5, 5, 5, 7, 2, 6, 2, 2, 5, 6, 6, 6,
3, 7, 3, 5, 4, 4, 4, 7, 2, 3, 2, 4, 5, 5, 5, 6,
2, 7, 6, 8, 5, 5, 5, 7, 2, 6, 2, 2, 5, 6, 5, 6,
3, 7, 3, 5, 4, 4, 7, 7, 2, 3, 2, 3, 5, 5, 8, 6,
2, 7, 6, 8, 6, 5, 8, 7, 2, 6, 4, 3, 6, 6, 9, 6,
3, 7, 3, 5, 4, 4, 7, 7, 2, 3, 2, 3, 5, 5, 8, 6,
2, 7, 6, 8, 5, 5, 8, 7, 2, 6, 5, 2, 6, 6, 9, 6,
0, 0, 0, 0, 0, 0,
8, 6, 8, 4, 5, 3, 5, 6, 3, 2, 2, 4, 6, 4, 6, 5, /* e=1, m=1, x=1 */
2, 5, 5, 7, 5, 4, 6, 6, 2, 4, 2, 2, 6, 4, 7, 5,
6, 6, 8, 4, 3, 3, 5, 6, 4, 2, 2, 5, 4, 4, 6, 5,
2, 5, 5, 7, 4, 4, 6, 6, 2, 4, 2, 2, 4, 4, 7, 5,
7, 6, 2, 4, 0, 3, 5, 6, 3, 2, 2, 3, 3, 4, 6, 5,
2, 5, 5, 7, 0, 4, 6, 6, 2, 4, 3, 2, 4, 4, 7, 5,
6, 6, 6, 4, 3, 3, 5, 6, 4, 2, 2, 6, 5, 4, 6, 5,
2, 5, 5, 7, 4, 4, 6, 6, 2, 4, 4, 2, 6, 4, 7, 5,
2, 6, 3, 4, 3, 3, 3, 6, 2, 2, 2, 3, 4, 4, 4, 5,
2, 5, 5, 7, 4, 4, 4, 6, 2, 4, 2, 2, 4, 4, 4, 5,
2, 6, 2, 4, 3, 3, 3, 6, 2, 2, 2, 4, 4, 4, 4, 5,
2, 5, 5, 7, 4, 4, 4, 6, 2, 4, 2, 2, 4, 4, 4, 5,
2, 6, 3, 4, 3, 3, 5, 6, 2, 2, 2, 3, 4, 4, 6, 5,
2, 5, 5, 7, 6, 4, 6, 6, 2, 4, 3, 3, 6, 4, 7, 5,
2, 6, 3, 4, 3, 3, 5, 6, 2, 2, 2, 3, 4, 4, 6, 5,
2, 5, 5, 7, 5, 4, 6, 6, 2, 4, 4, 2, 6, 4, 7, 5,
0, 0, 0, 0, 0, 0
};
#endif
void CPU_run(void)
{
word32 last_update,next_update;
int opcode;
#ifdef LIMIT_INSTRUCTION_COUNT
long instructionCount = 0;
#endif
cpu_cycle_count = 0;
last_update = 0;
next_update = cpu_update_period;
E = 1;
F_setM(1);
F_setX(1);
CPU_modeSwitch();
dispatch:
// CPUEvent_elapse( cpu_cycle_count );
// cpu_cycle_count = 0;
if (dispatch_quit)
return;
// TODO remove this.
#ifdef LIMIT_INSTRUCTION_COUNT
instructionCount++;
if (instructionCount > LIMIT_INSTRUCTION_COUNT)
return;
#endif
// #ifdef E_UPDATE
if (cpu_cycle_count >= next_update) goto update;
update_resume:
// #endif
#ifdef DEBUG
if (cpu_trace) goto debug;
debug_resume:
#endif
if (cpu_reset) goto reset;
if (cpu_stop) goto dispatch;
if (cpu_abort) goto abort;
if (cpu_nmi) goto nmi;
if (cpu_irq) goto irq;
if (cpu_irne64) goto irne64;
if (cpu_irqt5) goto irqt5;
irq_return:
if (cpu_wait) { cpu_cycle_count++; goto dispatch; }
opcode = M_READ_OPCODE(PC.A);
PC.W.PC++;
#ifdef OLDCYCLES
cpu_cycle_count += cpu_curr_cycle_table[opcode];
#endif
(**cpu_curr_opcode_table[opcode])();
goto dispatch;
/* Special cases. Since these don't happen a lot more often than they */
/* do happen, accessing them this way means most of the time the */
/* generated code is _not_ branching. Only during the special cases do */
/* we take the branch penalty (if there is one). */
// #ifdef E_UPDATE
update:
E_UPDATE(cpu_cycle_count);
last_update = cpu_cycle_count;
next_update = last_update + cpu_update_period;
goto update_resume;
// #endif
#ifdef DEBUG
debug:
CPU_debug();
goto debug_resume;
#endif
reset:
(**cpu_curr_opcode_table[256])();
goto dispatch;
abort:
(**cpu_curr_opcode_table[257])();
goto dispatch;
nmi:
(**cpu_curr_opcode_table[258])();
goto dispatch;
irq:
if (P & 0x04) goto irq_return;
(**cpu_curr_opcode_table[259])();
goto dispatch;
irne64:
if (P & 0x04) goto irq_return;
(**cpu_curr_opcode_table[260])();
goto dispatch;
irqt5:
if (P & 0x04) goto irq_return;
(**cpu_curr_opcode_table[261])();
goto dispatch;
void handleSignal(int type) {
(**cpu_curr_opcode_table[type])();
}
void doUpdate() {
E_UPDATE(cpu_cycle_count);
last_update = cpu_cycle_count;
next_update = last_update + cpu_update_period;
}
void CPU_init(void) {
last_update = 0;
next_update = cpu_update_period;
cpu_cycle_count = 0;
E = 1;
F_setM(1);
F_setX(1);
CPU_modeSwitch();
}
void CPU_step(void) {
if (cpu_cycle_count >= next_update) doUpdate();
int opcode = M_READ_OPCODE(PC.A);
PC.W.PC++;
(**cpu_curr_opcode_table[opcode])();
}
void CPU_run(void) {
while (!dispatch_quit) {
if (cpu_trace) CPU_debug();
if (cpu_reset) { handleSignal(RESET_OP); continue; }
if (cpu_abort) { handleSignal(ABORT_OP); continue; }
if (cpu_nmi) { handleSignal(NMI_OP); continue; }
if (cpu_irq) { handleSignal(IRQ_OP); continue; }
if (cpu_irne64) { handleSignal(IRNE64_OP); continue; }
if (cpu_irqt5) { handleSignal(IRQT5_OP); continue; }
if (cpu_wait) { cpu_cycle_count++; continue; }
if (cpu_stop) continue;
CPU_step();
}
}
// void old_CPU_run(void)
// {
// word32 last_update,next_update;
// int opcode;
// #ifdef LIMIT_INSTRUCTION_COUNT
// long instructionCount = 0;
// #endif
// cpu_cycle_count = 0;
// last_update = 0;
// next_update = cpu_update_period;
// E = 1;
// F_setM(1);
// F_setX(1);
// CPU_modeSwitch();
// dispatch:
// // CPUEvent_elapse( cpu_cycle_count );
// // cpu_cycle_count = 0;
// if (dispatch_quit)
// return;
// // TODO remove this.
// #ifdef LIMIT_INSTRUCTION_COUNT
// instructionCount++;
// if (instructionCount > LIMIT_INSTRUCTION_COUNT)
// return;
// #endif
// // #ifdef E_UPDATE
// if (cpu_cycle_count >= next_update) goto update;
// update_resume:
// // #endif
// #ifdef DEBUG
// if (cpu_trace) goto debug;
// debug_resume:
// #endif
// if (cpu_reset) goto reset;
// if (cpu_stop) goto dispatch;
// if (cpu_abort) goto abort;
// if (cpu_nmi) goto nmi;
// if (cpu_irq) goto irq;
// if (cpu_irne64) goto irne64;
// if (cpu_irqt5) goto irqt5;
// irq_return:
// if (cpu_wait) { cpu_cycle_count++; goto dispatch; }
// opcode = M_READ_OPCODE(PC.A);
// PC.W.PC++;
// #ifdef OLDCYCLES
// cpu_cycle_count += cpu_curr_cycle_table[opcode];
// #endif
// (**cpu_curr_opcode_table[opcode])();
// goto dispatch;
// /* Special cases. Since these don't happen a lot more often than they */
// /* do happen, accessing them this way means most of the time the */
// /* generated code is _not_ branching. Only during the special cases do */
// /* we take the branch penalty (if there is one). */
// // #ifdef E_UPDATE
// update:
// E_UPDATE(cpu_cycle_count);
// last_update = cpu_cycle_count;
// next_update = last_update + cpu_update_period;
// goto update_resume;
// // #endif
// #ifdef DEBUG
// debug:
// CPU_debug();
// goto debug_resume;
// #endif
// reset:
// (**cpu_curr_opcode_table[256])();
// goto dispatch;
// abort:
// (**cpu_curr_opcode_table[257])();
// goto dispatch;
// nmi:
// (**cpu_curr_opcode_table[258])();
// goto dispatch;
// irq:
// if (P & 0x04) goto irq_return;
// (**cpu_curr_opcode_table[259])();
// goto dispatch;
// irne64:
// if (P & 0x04) goto irq_return;
// (**cpu_curr_opcode_table[260])();
// goto dispatch;
// irqt5:
// if (P & 0x04) goto irq_return;
// (**cpu_curr_opcode_table[261])();
// goto dispatch;
// }
/* Recalculate opcode_offset based on the new processor mode */
void CPU_modeSwitch(void) {

View File

@ -26,7 +26,6 @@ void EMUL_hardwareUpdate(word32 timestamp) {
}
if (SDL_PollEvent(&sdl_event)) {
puts("got an event!");
handleEvent(&sdl_event);
}