diff --git a/Visual Studio Projects/Simh.sln b/Visual Studio Projects/Simh.sln
index 0d07c865..98f4fd71 100644
--- a/Visual Studio Projects/Simh.sln
+++ b/Visual Studio Projects/Simh.sln
@@ -59,6 +59,8 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "swtp6800mp-a2", "swtp6800mp
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sigma", "sigma.vcproj", "{7DDB6DF6-3837-4DE3-80D7-63181195021F}"
EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "alpha", "alpha.vcproj", "{14C5D369-E4A1-4DA2-B23C-B49367874F7B}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
@@ -179,6 +181,10 @@ Global
{7DDB6DF6-3837-4DE3-80D7-63181195021F}.Debug|Win32.Build.0 = Debug|Win32
{7DDB6DF6-3837-4DE3-80D7-63181195021F}.Release|Win32.ActiveCfg = Release|Win32
{7DDB6DF6-3837-4DE3-80D7-63181195021F}.Release|Win32.Build.0 = Release|Win32
+ {14C5D369-E4A1-4DA2-B23C-B49367874F7B}.Debug|Win32.ActiveCfg = Debug|Win32
+ {14C5D369-E4A1-4DA2-B23C-B49367874F7B}.Debug|Win32.Build.0 = Debug|Win32
+ {14C5D369-E4A1-4DA2-B23C-B49367874F7B}.Release|Win32.ActiveCfg = Release|Win32
+ {14C5D369-E4A1-4DA2-B23C-B49367874F7B}.Release|Win32.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
diff --git a/Visual Studio Projects/alpha.vcproj b/Visual Studio Projects/alpha.vcproj
new file mode 100644
index 00000000..e4bfd673
--- /dev/null
+++ b/Visual Studio Projects/alpha.vcproj
@@ -0,0 +1,325 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/alpha/alpha_500au_syslist.c b/alpha/alpha_500au_syslist.c
new file mode 100644
index 00000000..5acdc997
--- /dev/null
+++ b/alpha/alpha_500au_syslist.c
@@ -0,0 +1,49 @@
+/* alpha_500au_syslist.c: Alpha device list for 500au
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+*/
+
+#include "alpha_defs.h"
+
+extern DEVICE cpu_dev;
+extern DEVICE tlb_dev;
+extern DEVICE ev5pal_dev;
+extern DEVICE rom_dev;
+
+/* SCP data structures and interface routines
+
+ sim_name simulator name
+ sim_devices array of pointers to simulated devices
+*/
+
+char sim_name[] = "Alpha";
+
+DEVICE *sim_devices[] = {
+ &cpu_dev,
+ &tlb_dev,
+ &ev5pal_dev,
+ &rom_dev,
+ NULL
+ };
+
diff --git a/alpha/alpha_cpu.c b/alpha/alpha_cpu.c
new file mode 100644
index 00000000..7cacf0ea
--- /dev/null
+++ b/alpha/alpha_cpu.c
@@ -0,0 +1,1865 @@
+/* alpha_cpu.c: Alpha CPU simulator
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ Alpha architecturally-defined CPU state:
+
+ PC<63:0> program counter
+ R[0:31]<63:0> integer registers
+ F[0:31]<63:0> floating registers
+ FPCR<63:0> floating point control register
+ (only left 32b are implemented)
+ PCC<63:0> hardware cycle counter
+ trap_summ<6:0> arithmetic trap summary
+ trap_mask<63:0> arithmetic trap register mask
+ lock_flag load_locked flag
+ vax_flag<0> VAX compatibility interrupt flag
+ FEN<0> floating point enable flag
+
+ The Alpha CPU privileged state is "soft" and varies significantly from
+ operating system to operating system. Alpha provides an intermediate layer
+ of software (called PALcode) that implements the privileged state as well
+ as a library of complex instruction functions. PALcode implementations
+ are chip specific and system specific, as well as OS specific.
+
+ Alpha memory management is also "soft" and supported a variety of mapping
+ schemes. VMS and Unix use a three level page table and directly expose
+ the underlying 64b hardware PTE. NT uses a condensed 32b PTE.
+
+ All Alpha instructions are 32b wide. There are five basic formats: PALcall,
+ branch, memory reference, integer operate, and floating operate.
+
+ 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | |
+ | opcode | PAL function | PAL
+ | | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | | |
+ | opcode | Ra | branch displacement | branch
+ | | | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | | | |
+ | opcode | Ra | Rb | address displacement | memory
+ | | | | | reference
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | | | | | | |
+ | opcode | Ra | Rb |0 0 0|0| function | Rc | integer
+ | | | | | | | | operate
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | |
+ | literal |1|
+ | | |
+ +-+-+-+-+-+-+-+-+-+
+
+ 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | | | | | | |
+ | opcode | Ra | Rb | trap|rnd| function | Rc | floating
+ | | | | | | | | operate
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Memory reference format is also used for some two-operand operates;
+ the address displacement is the function code.
+
+ This routine is the instruction decode routine for the Alpha. It
+ is called from the simulator control program to execute instructions
+ in simulated memory, starting at the simulated PC. It runs until an
+ enabled exception is encountered.
+
+ General notes:
+
+ 1. Traps and interrupts. Variable trap_summ summarizes the outstanding
+ trap requests (if any). Variable intr_summ summarizes the outstanding
+ interrupt requests (if any).
+
+ 2. Interrupt requests are maintained in the int_req array, one word per
+ interrupt level, one bit per device.
+
+ 3. Adding I/O devices. These modules must be modified:
+
+ alpha_defs.h add device address and interrupt definitions
+ alpha_sys.c add sim_devices table entry
+*/
+
+#include "alpha_defs.h"
+
+#define UNIT_V_CONH (UNIT_V_UF + 0) /* halt to console */
+#define UNIT_V_MSIZE (UNIT_V_UF + 1)
+#define UNIT_CONH (1 << UNIT_V_CONH)
+#define UNIT_MSIZE (1 << UNIT_V_MSIZE)
+
+#define HIST_PC 0x2
+#define HIST_MIN 64
+#define HIST_MAX (1 << 18)
+
+typedef struct {
+ t_uint64 pc;
+ uint32 ir;
+ uint32 filler;
+ t_uint64 ra;
+ t_uint64 rb;
+ } InstHistory;
+
+#define H_A 0x01
+#define H_B 0x02
+#define H_B_LIT 0x04
+#define H_EA 0x08
+#define H_EA_B 0x10
+#define H_EA_L16 0x20
+#define H_MRF (H_A|H_B|H_EA)
+#define H_BRA (H_A|H_EA|H_EA_B)
+#define H_IOP (H_A|H_B|H_B_LIT)
+#define H_FOP (H_A|H_B)
+#define H_PAL (H_A|H_EA|H_EA_L16)
+#define H_JMP (H_A|H_B|H_EA|H_EA_L16)
+
+t_uint64 *M = 0; /* memory */
+t_uint64 R[32]; /* integer reg */
+t_uint64 FR[32]; /* floating reg */
+t_uint64 PC; /* PC, <1:0> MBZ */
+uint32 pc_align = 0; /* PC<1:0> */
+t_uint64 trap_mask = 0; /* trap reg mask */
+uint32 trap_summ = 0; /* trap summary */
+uint32 fpcr = 0; /* fp ctrl reg */
+uint32 pcc_l = 0; /* rpcc high */
+uint32 pcc_h = 0; /* rpcc low */
+uint32 pcc_enb = 0;
+uint32 arch_mask = AMASK_BWX | AMASK_PRC; /* arch mask */
+uint32 impl_ver = IMPLV_EV5; /* impl version */
+uint32 lock_flag = 0; /* load lock flag */
+uint32 vax_flag = 0; /* vax intr flag */
+uint32 intr_summ = 0; /* interrupt summary */
+uint32 pal_mode = 1; /* PAL mode */
+uint32 pal_type = PAL_UNDF; /* PAL type */
+uint32 dmapen = 0; /* data mapping enable */
+uint32 fpen = 0; /* flt point enabled */
+uint32 ir = 0; /* instruction register */
+t_uint64 p1 = 0; /* exception parameter */
+uint32 int_req[IPL_HLVL] = { 0 }; /* interrupt requests */
+REG *pcq_r = NULL; /* PC queue reg ptr */
+t_uint64 pcq[PCQ_SIZE] = { 0 }; /* PC queue */
+int32 pcq_p = 0; /* PC queue ptr */
+uint32 cpu_astop = 0;
+uint32 hst_p = 0; /* history pointer */
+uint32 hst_lnt = 0; /* history length */
+InstHistory *hst = NULL; /* instruction history */
+jmp_buf save_env;
+
+const t_uint64 byte_mask[8] = {
+ 0x00000000000000FF, 0x000000000000FF00,
+ 0x0000000000FF0000, 0x00000000FF000000,
+ 0x000000FF00000000, 0x0000FF0000000000,
+ 0x00FF000000000000, 0xFF00000000000000
+ };
+
+const t_uint64 word_mask[4] = {
+ 0x000000000000FFFF, 0x00000000FFFF0000,
+ 0x0000FFFF00000000, 0xFFFF000000000000
+ };
+
+extern int32 sim_interval;
+extern int32 sim_int_char;
+extern FILE *sim_deb;
+extern uint32 sim_brk_types, sim_brk_dflt, sim_brk_summ; /* breakpoint info */
+
+t_uint64 uemul64 (t_uint64 a, t_uint64 b, t_uint64 *hi);
+t_uint64 byte_zap (t_uint64 op, uint32 mask);
+t_stat cpu_reset (DEVICE *dptr);
+t_stat cpu_boot (int32 unitno, DEVICE *dptr);
+t_stat cpu_ex (t_value *vptr, t_addr exta, UNIT *uptr, int32 sw);
+t_stat cpu_dep (t_value val, t_addr exta, UNIT *uptr, int32 sw);
+t_stat cpu_set_size (UNIT *uptr, int32 val, char *cptr, void *desc);
+t_stat cpu_set_hist (UNIT *uptr, int32 val, char *cptr, void *desc);
+t_stat cpu_show_hist (FILE *st, UNIT *uptr, int32 val, void *desc);
+t_stat cpu_show_virt (FILE *of, UNIT *uptr, int32 val, void *desc);
+t_stat cpu_fprint_one_inst (FILE *st, uint32 ir, t_uint64 pc, t_uint64 ra, t_uint64 rb);
+
+extern t_uint64 op_ldf (t_uint64 op);
+extern t_uint64 op_ldg (t_uint64 op);
+extern t_uint64 op_lds (t_uint64 op);
+extern t_uint64 op_stf (t_uint64 op);
+extern t_uint64 op_stg (t_uint64 op);
+extern t_uint64 op_sts (t_uint64 op);
+extern t_uint64 vax_sqrt (uint32 ir, t_bool dp);
+extern t_uint64 ieee_sqrt (uint32 ir, t_bool dp);
+extern void vax_fop (uint32 ir);
+extern void ieee_fop (uint32 ir);
+extern t_stat pal_19 (uint32 ir);
+extern t_stat pal_1b (uint32 ir);
+extern t_stat pal_1d (uint32 ir);
+extern t_stat pal_1e (uint32 ir);
+extern t_stat pal_1f (uint32 ir);
+extern t_uint64 trans_c (t_uint64 va);
+extern t_stat cpu_show_tlb (FILE *of, UNIT *uptr, int32 val, void *desc);
+extern t_stat pal_eval_intr (uint32 flag);
+extern t_stat pal_proc_excp (uint32 type);
+extern t_stat pal_proc_trap (uint32 type);
+extern t_stat pal_proc_intr (uint32 type);
+extern t_stat pal_proc_inst (uint32 fnc);
+extern uint32 tlb_set_cm (int32 cm);
+
+/* CPU data structures
+
+ cpu_dev CPU device descriptor
+ cpu_unit CPU unit
+ cpu_reg CPU register list
+ cpu_mod CPU modifier list
+*/
+
+UNIT cpu_unit = { UDATA (NULL, UNIT_FIX + UNIT_BINK, INITMEMSIZE) };
+
+REG cpu_reg[] = {
+ { HRDATA (PC, PC, 64), PV_LEFT },
+ { HRDATA (PCALG, pc_align, 3) },
+ { HRDATA (R0, R[0], 64) },
+ { HRDATA (R1, R[1], 64) },
+ { HRDATA (R2, R[2], 64) },
+ { HRDATA (R3, R[3], 64) },
+ { HRDATA (R4, R[4], 64) },
+ { HRDATA (R5, R[5], 64) },
+ { HRDATA (R6, R[6], 64) },
+ { HRDATA (R7, R[7], 64) },
+ { HRDATA (R8, R[8], 64) },
+ { HRDATA (R9, R[9], 64) },
+ { HRDATA (R10, R[10], 64) },
+ { HRDATA (R11, R[11], 64) },
+ { HRDATA (R12, R[12], 64) },
+ { HRDATA (R13, R[13], 64) },
+ { HRDATA (R14, R[14], 64) },
+ { HRDATA (R15, R[15], 64) },
+ { HRDATA (R16, R[16], 64) },
+ { HRDATA (R17, R[17], 64) },
+ { HRDATA (R18, R[18], 64) },
+ { HRDATA (R19, R[19], 64) },
+ { HRDATA (R20, R[20], 64) },
+ { HRDATA (R21, R[21], 64) },
+ { HRDATA (R22, R[22], 64) },
+ { HRDATA (R23, R[23], 64) },
+ { HRDATA (R24, R[24], 64) },
+ { HRDATA (R25, R[25], 64) },
+ { HRDATA (R26, R[26], 64) },
+ { HRDATA (R27, R[27], 64) },
+ { HRDATA (R28, R[28], 64) },
+ { HRDATA (R29, R[29], 64) },
+ { HRDATA (R30, R[30], 64) },
+ { HRDATA (R31, R[31], 64), REG_RO },
+ { HRDATA (F0, FR[0], 64) },
+ { HRDATA (F1, FR[1], 64) },
+ { HRDATA (F2, FR[2], 64) },
+ { HRDATA (F3, FR[3], 64) },
+ { HRDATA (F4, FR[4], 64) },
+ { HRDATA (F5, FR[5], 64) },
+ { HRDATA (F6, FR[6], 64) },
+ { HRDATA (F7, FR[7], 64) },
+ { HRDATA (F8, FR[8], 64) },
+ { HRDATA (F9, FR[9], 64) },
+ { HRDATA (F10, FR[10], 64) },
+ { HRDATA (F11, FR[11], 64) },
+ { HRDATA (F12, FR[12], 64) },
+ { HRDATA (F13, FR[13], 64) },
+ { HRDATA (F14, FR[14], 64) },
+ { HRDATA (F15, FR[15], 64) },
+ { HRDATA (F16, FR[16], 64) },
+ { HRDATA (F17, FR[17], 64) },
+ { HRDATA (F18, FR[18], 64) },
+ { HRDATA (F19, FR[19], 64) },
+ { HRDATA (F20, FR[20], 64) },
+ { HRDATA (F21, FR[21], 64) },
+ { HRDATA (F22, FR[22], 64) },
+ { HRDATA (F23, FR[23], 64) },
+ { HRDATA (F24, FR[24], 64) },
+ { HRDATA (F25, FR[25], 64) },
+ { HRDATA (F26, FR[26], 64) },
+ { HRDATA (F27, FR[27], 64) },
+ { HRDATA (F28, FR[28], 64) },
+ { HRDATA (F29, FR[29], 64) },
+ { HRDATA (F30, FR[30], 64) },
+ { HRDATA (F31, FR[31], 64), REG_RO },
+ { HRDATA (FPCR, fpcr, 32) },
+ { FLDATA (FEN, fpen, 0) },
+ { HRDATA (TRAPS, trap_summ, 8) },
+ { HRDATA (TRAPM, trap_mask, 64) },
+ { HRDATA (PCCH, pcc_h, 32) },
+ { HRDATA (PCCL, pcc_l, 32) },
+ { FLDATA (LOCK, lock_flag, 0) },
+ { FLDATA (VAXF, vax_flag, 0) },
+ { FLDATA (PALMODE, pal_mode, 0) },
+ { HRDATA (PALTYPE, pal_type, 2), REG_HRO },
+ { HRDATA (DMAPEN, dmapen, 0) },
+ { HRDATA (AMASK, arch_mask, 13), REG_RO },
+ { HRDATA (IMPLV, impl_ver, 2), REG_RO },
+ { BRDATA (PCQ, pcq, 16, 32, PCQ_SIZE), REG_RO+REG_CIRC },
+ { HRDATA (PCQP, pcq_p, 6), REG_HRO },
+ { HRDATA (WRU, sim_int_char, 8) },
+ { NULL }
+ };
+
+MTAB cpu_mod[] = {
+ { UNIT_MSIZE, (1u << 25), NULL, "32M", &cpu_set_size },
+ { UNIT_MSIZE, (1u << 26), NULL, "64M", &cpu_set_size },
+ { UNIT_MSIZE, (1u << 27), NULL, "128M", &cpu_set_size },
+ { UNIT_MSIZE, (1u << 28), NULL, "256M", &cpu_set_size },
+ { UNIT_MSIZE, (1u << 29), NULL, "512M", &cpu_set_size },
+ { UNIT_CONH, 0, "HALT to SIMH", "SIMHALT", NULL },
+ { UNIT_CONH, UNIT_CONH, "HALT to console", "CONHALT", NULL },
+ { MTAB_XTD|MTAB_VDV|MTAB_NMO|MTAB_SHP, 0, "VIRTUAL", NULL,
+ NULL, &cpu_show_virt },
+ { MTAB_XTD|MTAB_VDV|MTAB_NMO|MTAB_SHP, 0, "ITLB", NULL,
+ NULL, &cpu_show_tlb },
+ { MTAB_XTD|MTAB_VDV|MTAB_NMO|MTAB_SHP, 1, "DTLB", NULL,
+ NULL, &cpu_show_tlb },
+ { MTAB_XTD|MTAB_VDV|MTAB_NMO|MTAB_SHP, 0, "HISTORY", "HISTORY",
+ &cpu_set_hist, &cpu_show_hist },
+ { 0 }
+ };
+
+DEVICE cpu_dev = {
+ "CPU", &cpu_unit, cpu_reg, cpu_mod,
+ 1, 16, 48, 8, 16, 64,
+ &cpu_ex, &cpu_dep, &cpu_reset,
+ &cpu_boot, NULL, NULL,
+ NULL, DEV_DYNM|DEV_DEBUG, 0,
+ NULL, &cpu_set_size, NULL
+ };
+
+t_stat sim_instr (void)
+{
+t_stat reason;
+int abortval;
+t_bool tracing;
+
+PC = PC | pc_align; /* put PC together */
+abortval = setjmp (save_env); /* set abort hdlr */
+if (abortval != 0) { /* exception? */
+ if (abortval < 0) { /* SCP stop? */
+ pcc_l = pcc_l & M32;
+ pcq_r->qptr = pcq_p; /* update pc q ptr */
+ pc_align = ((uint32) PC) & 3; /* separate PC<1:0> */
+ PC = PC & 0xFFFFFFFFFFFFFFFC;
+ return -abortval;
+ }
+ reason = pal_proc_excp (abortval); /* pal processing */
+ }
+else reason = 0;
+tlb_set_cm (-1); /* resync cm */
+tracing = ((hst_lnt != 0) || DEBUG_PRS (cpu_dev));
+
+intr_summ = pal_eval_intr (1); /* eval interrupts */
+
+/* Main instruction loop */
+
+while (reason == 0) {
+
+ int32 i;
+ uint32 op, ra, rb, rc, fnc, sc, s32, t32, sgn;
+ t_int64 s1, s2, sr;
+ t_uint64 ea, dsp, rbv, res, s64, t64;
+
+ if (cpu_astop) { /* debug stop? */
+ cpu_astop = 0; /* clear */
+ reason = SCPE_STOP; /* stop simulation */
+ break;
+ }
+
+ if (sim_interval <= 0) { /* chk clock queue */
+ if (reason = sim_process_event ()) break;
+ intr_summ = pal_eval_intr (1); /* eval interrupts */
+ }
+
+ if (intr_summ && !pal_mode) { /* interrupt pending? */
+ reason = pal_proc_intr (intr_summ); /* pal processing */
+ intr_summ = pal_eval_intr (1); /* eval interrupts */
+ continue;
+ }
+
+ if (sim_brk_summ && sim_brk_test (PC, SWMASK ('E'))) { /* breakpoint? */
+ reason = STOP_IBKPT; /* stop simulation */
+ break;
+ }
+
+ sim_interval = sim_interval - 1; /* count instr */
+ pcc_l = pcc_l + pcc_enb;
+ ir = ReadI (PC); /* get instruction */
+ op = I_GETOP (ir); /* get opcode */
+ ra = I_GETRA (ir); /* get ra */
+ rb = I_GETRB (ir); /* get rb */
+
+ if (tracing) { /* trace or history? */
+ if (hst_lnt) { /* history enabled? */
+ hst_p = (hst_p + 1); /* next entry */
+ if (hst_p >= hst_lnt) hst_p = 0;
+ hst[hst_p].pc = PC | pc_align | HIST_PC; /* save PC */
+ hst[hst_p].ir = ir; /* save ir */
+ hst[hst_p].ra = R[ra]; /* save Ra */
+ hst[hst_p].rb = R[rb]; /* save Rb */
+ }
+ if (DEBUG_PRS (cpu_dev)) /* trace enabled? */
+ cpu_fprint_one_inst (sim_deb, ir, PC | pc_align, R[ra], R[rb]);
+ }
+
+ PC = (PC + 4) & M64; /* advance PC */
+ switch (op) {
+
+/* Memory reference instructions */
+
+ case OP_LDA: /* LDA */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ R[ra] = ea;
+ }
+ break;
+
+ case OP_LDAH: /* LDAH */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir) << 16;
+ ea = (R[rb] + SEXT_L_Q (dsp)) & M64;
+ R[ra] = ea;
+ }
+ break;
+
+ case OP_LDBU: /* LDBU */
+ if (!(arch_mask & AMASK_BWX)) ABORT (EXC_RSVI); /* EV56 or later */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ R[ra] = ReadB (ea);
+ }
+ break;
+
+ case OP_LDQ_U: /* LDQ_U */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ R[ra] = ReadQ (ea & ~7); /* ignore ea<2:0> */
+ }
+ break;
+
+ case OP_LDWU: /* LDWU */
+ if (!(arch_mask & AMASK_BWX)) ABORT (EXC_RSVI); /* EV56 or later */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ R[ra] = ReadW (ea);
+ }
+ break;
+
+ case OP_STW: /* STW */
+ if (!(arch_mask & AMASK_BWX)) ABORT (EXC_RSVI); /* EV56 or later */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ WriteW (ea, R[ra]);
+ break;
+
+ case OP_STB: /* STB */
+ if (!(arch_mask & AMASK_BWX)) ABORT (EXC_RSVI); /* EV56 or later */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ WriteB (ea, R[ra]);
+ break;
+
+ case OP_STQ_U: /* STQ_U */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ WriteQ (ea & ~7, R[ra]); /* ignore ea<2:0> */
+ break;
+
+ case OP_LDF: /* LDF */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ FR[ra] = op_ldf (ReadL (ea)); /* swizzle bits */
+ }
+ break;
+
+ case OP_LDG: /* LDG */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ FR[ra] = op_ldg (ReadQ (ea)); /* swizzle bits */
+ }
+ break;
+
+ case OP_LDS: /* LDS */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ FR[ra] = op_lds (ReadL (ea)); /* swizzle bits */
+ }
+ break;
+
+ case OP_LDT: /* LDT */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ FR[ra] = ReadQ (ea); /* no swizzling needed */
+ }
+ break;
+
+ case OP_STF: /* STF */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ WriteL (ea, op_stf (FR[ra])); /* swizzle bits */
+ break;
+
+ case OP_STG: /* STG */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ WriteQ (ea, op_stg (FR[ra])); /* swizzle bits */
+ break;
+
+ case OP_STS: /* STS */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ WriteL (ea, op_sts (FR[ra])); /* swizzle bits */
+ break;
+
+ case OP_STT: /* STT */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ WriteQ (ea, FR[ra]); /* no swizzling needed */
+ break;
+
+ case OP_LDL: /* LDL */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ res = ReadL (ea);
+ R[ra] = SEXT_L_Q (res);
+ }
+ break;
+
+ case OP_LDQ: /* LDQ */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ R[ra] = ReadQ (ea);
+ }
+ break;
+
+ case OP_LDL_L: /* LDL_L */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ res = ReadL (ea);
+ R[ra] = SEXT_L_Q (res);
+ lock_flag = 1; /* set lock flag */
+ }
+ break;
+
+ case OP_LDQ_L: /* LDQ_L */
+ if (ra != 31) {
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ R[ra] = ReadQ (ea);
+ lock_flag = 1; /* set lock flag */
+ }
+ break;
+
+ case OP_STL: /* STL */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ WriteL (ea, R[ra]);
+ break;
+
+ case OP_STQ: /* STQ */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ WriteQ (ea, R[ra]);
+ break;
+
+ case OP_STL_C: /* STL_C */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ if (lock_flag) WriteL (ea, R[ra]); /* unlocking? ok */
+ else R[ra] = 0; /* write fails */
+ lock_flag = 0; /* clear lock */
+ break;
+
+ case OP_STQ_C: /* STQ_C */
+ dsp = I_GETMDSP (ir);
+ ea = (R[rb] + SEXT_MDSP (dsp)) & M64;
+ if (lock_flag) WriteQ (ea, R[ra]); /* unlocking? ok */
+ else R[ra] = 0; /* write fails */
+ lock_flag = 0; /* clear lock */
+ break;
+
+/* Control instructions */
+
+ case OP_JMP: /* JMP */
+ PCQ_ENTRY;
+ rbv = R[rb]; /* in case Ra = Rb */
+ if (ra != 31) R[ra] = PC; /* save PC */
+ PC = rbv; /* jump */
+ break;
+
+ case OP_BR: /* BR, BSR */
+ case OP_BSR:
+ PCQ_ENTRY;
+ if (ra != 31) R[ra] = PC; /* save PC */
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64; /* branch */
+ break;
+
+ case OP_FBEQ: /* FBEQ */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ if ((FR[ra] & ~FPR_SIGN) == 0) { /* +0 or - 0? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_FBLT: /* FBLT */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ if (FR[ra] > FPR_SIGN) { /* -0 to -n? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_FBLE: /* FBLE */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ if ((FR[ra] & FPR_SIGN) || (FR[ra] == 0)) { /* - or 0? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_FBNE: /* FBNE */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ if ((FR[ra] & ~FPR_SIGN) != 0) { /* not +0 or -0? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_FBGE: /* FBGE */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ if (FR[ra] <= FPR_SIGN) { /* +0 to +n? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_FBGT: /* FBGT */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ if (!(FR[ra] & FPR_SIGN) && (FR[ra] != 0)) { /* not - and not 0? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_BLBC: /* BLBC */
+ if ((R[ra] & 1) == 0) { /* R<0> == 0? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_BEQ: /* BEQ */
+ if (R[ra] == 0) { /* R == 0? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_BLT: /* BLT */
+ if (R[ra] & Q_SIGN) { /* R<63> == 1? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_BLE: /* BLE */
+ if ((R[ra] == 0) || (R[ra] & Q_SIGN)) { /* R == 0 || R<63> == 1? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_BLBS: /* BLBS */
+ if ((R[ra] & 1) != 0) { /* R<0> == 1? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_BNE: /* BNE */
+ if (R[ra] != 0) { /* R != 0? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_BGE: /* BGE */
+ if (!(R[ra] & Q_SIGN)) { /* R<63> == 0? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+ case OP_BGT: /* BGT */
+ if ((R[ra] != 0) && !(R[ra] & Q_SIGN)) { /* R != 0 && R<63> == 0? */
+ PCQ_ENTRY;
+ dsp = I_GETBDSP (ir);
+ PC = (PC + (SEXT_BDSP (dsp) << 2)) & M64;
+ }
+ break;
+
+/* Integer arithmetic operates (10) */
+
+ case OP_IALU: /* integer arith opr */
+ rc = I_GETRC (ir); /* get rc */
+ if (ir & I_ILIT) rbv = I_GETLIT8 (ir); /* literal? rbv = lit */
+ else rbv = R[rb]; /* no, rbv = R[rb] */
+ fnc = I_GETIFNC (ir); /* get function */
+ switch (fnc) { /* case on function */
+
+ case 0x00: /* ADDL */
+ res = SEXT_L_Q (R[ra] + rbv);
+ break;
+
+ case 0x02: /* S4ADDL */
+ res = SEXT_L_Q ((R[ra] << 2) + rbv);
+ break;
+
+ case 0x09: /* SUBL */
+ res = SEXT_L_Q (R[ra] - rbv);
+ break;
+
+ case 0x0B: /* S4SUBL */
+ res = SEXT_L_Q ((R[ra] << 2) - rbv);
+ break;
+
+ case 0x0F: /* CMPBGE */
+ for (i = 0, res = 0; i < 8; i++) {
+ if ((R[ra] & byte_mask[i]) >= (rbv & byte_mask[i]))
+ res = res | ((t_uint64) 1u << i);
+ }
+ break;
+
+ case 0x12: /* S8ADDL */
+ res = SEXT_L_Q ((R[ra] << 3) + rbv);
+ break;
+
+ case 0x1B: /* S8SUBL */
+ res = SEXT_L_Q ((R[ra] << 3) - rbv);
+ break;
+
+ case 0x1D: /* CMPULT */
+ res = (R[ra] < rbv);
+ break;
+
+ case 0x20: /* ADDQ */
+ res = R[ra] + rbv;
+ break;
+
+ case 0x22: /* S4ADDQ */
+ res = (R[ra] << 2) + rbv;
+ break;
+
+ case 0x29: /* SUBQ */
+ res = R[ra] - rbv;
+ break;
+
+ case 0x2B: /* S4SUBQ */
+ res = (R[ra] << 2) - rbv;
+ break;
+
+ case 0x2D: /* CMPEQ */
+ res = (R[ra] == rbv);
+ break;
+
+ case 0x32: /* S8ADDQ */
+ res = (R[ra] << 3) + rbv;
+ break;
+
+ case 0x3B: /* S8SUBQ */
+ res = (R[ra] << 3) - rbv;
+ break;
+
+ case 0x3D: /* CMPULE */
+ res = (R[ra] <= rbv);
+ break;
+
+ case 0x40: /* ADDL/V */
+ res = SEXT_L_Q (R[ra] + rbv);
+ if (((~R[ra] ^ rbv) & (R[ra] ^ res)) & L_SIGN)
+ arith_trap (TRAP_IOV, ir);
+ break;
+
+ case 0x49: /* SUBL/V */
+ res = SEXT_L_Q (R[ra] - rbv);
+ if (((R[ra] ^ rbv) & (~rbv ^ res)) & L_SIGN)
+ arith_trap (TRAP_IOV, ir);
+ break;
+
+ case 0x4D: /* CMPLT */
+ sgn = Q_GETSIGN (R[ra]); /* get Ra sign */
+ if (sgn ^ Q_GETSIGN (rbv)) res = sgn; /* signs diff? */
+ else res = sgn ^ (R[ra] < rbv);
+ break;
+
+ case 0x60: /* ADDQ/V */
+ res = R[ra] + rbv;
+ if (((~R[ra] ^ rbv) & (R[ra] ^ res)) & Q_SIGN)
+ arith_trap (TRAP_IOV, ir);
+ break;
+
+ case 0x69: /* SUBQ/V */
+ res = R[ra] - rbv;
+ if (((R[ra] ^ rbv) & (~rbv ^ res)) & Q_SIGN)
+ arith_trap (TRAP_IOV, ir);
+ break;
+
+ case 0x6D: /* CMPLE */
+ if (R[ra] == rbv) res = 1;
+ else {
+ sgn = Q_GETSIGN (R[ra]); /* get Ra sign */
+ if (sgn ^ Q_GETSIGN (rbv)) res = sgn; /* signs diff? */
+ else res = sgn ^ (R[ra] < rbv);
+ }
+
+ break;
+ default:
+ res = R[rc];
+ break;
+ }
+
+ if (rc != 31) R[rc] = res & M64;
+ break;
+
+/* Integer logical operates (11) */
+
+ case OP_ILOG: /* integer logic opr */
+ rc = I_GETRC (ir); /* get rc */
+ if (ir & I_ILIT) rbv = I_GETLIT8 (ir); /* literal? rbv = lit */
+ else rbv = R[rb]; /* no, rbv = R[rb] */
+ fnc = I_GETIFNC (ir); /* get function */
+ switch (fnc) { /* case on function */
+
+ case 0x00: /* AND */
+ res = R[ra] & rbv;
+ break;
+
+ case 0x08: /* BIC */
+ res = R[ra] & ~rbv;
+ break;
+
+ case 0x14: /* CMOVLBS */
+ if ((R[ra] & 1) != 0) res = rbv;
+ else res = R[rc];
+ break;
+
+ case 0x16: /* CMOVLBC */
+ if ((R[ra] & 1) == 0) res = rbv;
+ else res = R[rc];
+ break;
+
+ case 0x20: /* BIS */
+ res = R[ra] | rbv;
+ break;
+
+ case 0x24: /* CMOVEQ */
+ if (R[ra] == 0) res = rbv;
+ else res = R[rc];
+ break;
+
+ case 0x26: /* CMOVNE */
+ if (R[ra] != 0) res = rbv;
+ else res = R[rc];
+ break;
+
+ case 0x28: /* ORNOT */
+ res = R[ra] | ~rbv;
+ break;
+
+ case 0x40: /* XOR */
+ res = R[ra] ^ rbv;
+ break;
+
+ case 0x44: /* CMOVLT */
+ if (R[ra] & Q_SIGN) res = rbv;
+ else res = R[rc];
+ break;
+
+ case 0x46: /* CMOVGE */
+ if (!(R[ra] & Q_SIGN)) res = rbv;
+ else res = R[rc];
+ break;
+
+ case 0x48: /* EQV */
+ res = R[ra] ^ ~rbv;
+ break;
+
+ case 0x61: /* AMASK */
+ res = rbv & ~arch_mask;
+ break;
+
+ case 0x64: /* CMOVLE */
+ if ((R[ra] & Q_SIGN) || (R[ra] == 0)) res = rbv;
+ else res = R[rc];
+ break;
+
+ case 0x66: /* CMOVGT */
+ if (!(R[ra] & Q_SIGN) && (R[ra] != 0)) res = rbv;
+ else res = R[rc];
+ break;
+
+ case 0x6C: /* IMPLVER */
+ res = impl_ver;
+ break;
+
+ default:
+ res = R[rc];
+ break;
+ }
+
+ if (rc != 31) R[rc] = res & M64;
+ break;
+
+/* Integer logical shifts (12) */
+
+ case OP_ISHFT: /* integer shifts */
+ rc = I_GETRC (ir); /* get rc */
+ if (ir & I_ILIT) rbv = I_GETLIT8 (ir); /* literal? rbv = lit */
+ else rbv = R[rb]; /* no, rbv = R[rb] */
+ fnc = I_GETIFNC (ir); /* get function */
+ switch (fnc) { /* case on function */
+
+ case 0x02: /* MSKBL */
+ sc = ((uint32) rbv) & 7;
+ res = byte_zap (R[ra], 0x1 << sc);
+ break;
+
+ case 0x06: /* EXTBL */
+ sc = (((uint32) rbv) << 3) & 0x3F;
+ res = (R[ra] >> sc) & M8;
+ break;
+
+ case 0x0B: /* INSBL */
+ sc = (((uint32) rbv) << 3) & 0x3F;
+ res = (R[ra] & M8) << sc;
+ break;
+
+ case 0x12: /* MSKWL */
+ sc = ((uint32) rbv) & 7;
+ res = byte_zap (R[ra], 0x3 << sc);
+ break;
+
+ case 0x16: /* EXTWL */
+ sc = (((uint32) rbv) << 3) & 0x3F;
+ res = (R[ra] >> sc) & M16;
+ break;
+
+ case 0x1B: /* INSWL */
+ sc = (((uint32) rbv) << 3) & 0x3F;
+ res = (R[ra] & M16) << sc;
+ break;
+
+ case 0x22: /* MSKLL */
+ sc = ((uint32) rbv) & 7;
+ res = byte_zap (R[ra], 0xF << sc);
+ break;
+
+ case 0x26: /* EXTLL */
+ sc = (((uint32) rbv) << 3) & 0x3F;
+ res = (R[ra] >> sc) & M32;
+ break;
+
+ case 0x2B: /* INSLL */
+ sc = (((uint32) rbv) << 3) & 0x3F;
+ res = (R[ra] & M32) << sc;
+ break;
+
+ case 0x30: /* ZAP */
+ res = byte_zap (R[ra], (uint32) rbv);
+ break;
+
+ case 0x31: /* ZAPNOT */
+ res = byte_zap (R[ra], ~((uint32) rbv));
+ break;
+
+ case 0x32: /* MSKQL */
+ sc = ((uint32) rbv) & 7;
+ res = byte_zap (R[ra], 0xFF << sc);
+ break;
+
+ case 0x34: /* SRL */
+ sc = ((uint32) rbv) & 0x3F;
+ res = R[ra] >> sc;
+ break;
+
+ case 0x36: /* EXTQL */
+ sc = (((uint32) rbv) << 3) & 0x3F;
+ res = R[ra] >> sc;
+ break;
+
+ case 0x39: /* SLL */
+ sc = ((uint32) rbv) & 0x3F;
+ res = R[ra] << sc;
+ break;
+
+ case 0x3B: /* INSQL */
+ sc = (((uint32) rbv) << 3) & 0x3F;
+ res = R[ra] << sc;
+ break;
+
+ case 0x3C: /* SRA */
+ sc = ((uint32) rbv) & 0x3F;
+ res = (R[ra] >> sc);
+ if (sc && (R[ra] & Q_SIGN)) res = res |
+ (((t_uint64) M64) << (64 - sc));
+ break;
+
+ case 0x52: /* MSKWH */
+ sc = 8 - (((uint32) rbv) & 7);
+ res = byte_zap (R[ra], 0x3 >> sc);
+ break;
+
+ case 0x57: /* EXTWH */
+ sc = (64 - (((uint32) rbv) << 3)) & 0x3F;
+ res = (R[ra] << sc) & M16;
+ break;
+
+ case 0x5A: /* INSWH */
+ sc = (64 - (((uint32) rbv) << 3)) & 0x3F;
+ res = (R[ra] & M16) >> sc;
+ break;
+
+ case 0x62: /* MSKLH */
+ sc = 8 - (((uint32) rbv) & 7);
+ res = byte_zap (R[ra], 0xF >> sc);
+ break;
+
+ case 0x67: /* EXTLH */
+ sc = (64 - (((uint32) rbv) << 3)) & 0x3F;
+ res = (R[ra] << sc) & M32;
+ break;
+
+ case 0x6A: /* INSLH */
+ sc = (64 - (((uint32) rbv) << 3)) & 0x3F;
+ res = (R[ra] & M32) >> sc;
+ break;
+
+ case 0x72: /* MSKQH */
+ sc = 8 - (((uint32) rbv) & 7);
+ res = byte_zap (R[ra], 0xFF >> sc);
+ break;
+
+ case 0x77: /* EXTQH */
+ sc = (64 - (((uint32) rbv) << 3)) & 0x3F;
+ res = R[ra] << sc;
+ break;
+
+ case 0x7A: /* INSQH */
+ sc = (64 - (((uint32) rbv) << 3)) & 0x3F;
+ res = R[ra] >> sc;
+ break;
+
+ default:
+ res = R[rc];
+ break;
+ }
+
+ if (rc != 31) R[rc] = res & M64;
+ break;
+
+/* Integer multiply (13) */
+
+ case OP_IMUL: /* integer multiply */
+ rc = I_GETRC (ir); /* get rc */
+ if (ir & I_ILIT) rbv = I_GETLIT8 (ir); /* literal? rbv = lit */
+ else rbv = R[rb]; /* no, rbv = R[rb] */
+ fnc = I_GETIFNC (ir); /* get function */
+ switch (fnc) { /* case on function */
+
+ case 0x00: /* MULL */
+ s1 = SEXT_L_Q (R[ra]);
+ s2 = SEXT_L_Q (rbv);
+ sr = s1 * s2;
+ res = SEXT_L_Q (sr);
+ break;
+
+ case 0x20: /* MULQ */
+ res = uemul64 (R[ra], rbv, NULL); /* low 64b invariant */
+ break; /* with sign/unsigned */
+
+ case 0x30: /* UMULH */
+ uemul64 (R[ra], rbv, &res);
+ break;
+
+ case 0x40: /* MULL/V */
+ s1 = SEXT_L_Q (R[ra]);
+ s2 = SEXT_L_Q (rbv);
+ sr = s1 * s2;
+ res = SEXT_L_Q (sr);
+ if (((sr ^ res) & M64) != 0) /* overflow? */
+ arith_trap (TRAP_IOV, ir);
+ break;
+
+ case 0x60: /* MULQ/V */
+ res = uemul64 (R[ra], rbv, &t64);
+ if (Q_GETSIGN(R[ra]))
+ t64 = (t64 - rbv) & M64;
+ if (Q_GETSIGN(rbv))
+ t64 = (t64 - R[ra]) & M64;
+ if (Q_GETSIGN (res)? (t64 != M64): (t64 != 0))
+ arith_trap (TRAP_IOV, ir);
+ break;
+
+ default:
+ res = R[rc];
+ break;
+ }
+
+ if (rc != 31) R[rc] = res & M64;
+ break;
+
+/* FIX optional floating point set (14) */
+
+ case OP_IFLT: /* int to flt */
+ if (!(arch_mask & AMASK_FIX)) ABORT (EXC_RSVI); /* EV56 or later */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ rc = I_GETRC (ir); /* get rc */
+ fnc = I_GETFFNC (ir); /* get function */
+ switch (fnc) { /* case on function */
+
+ case 0x04: /* ITOFS */
+ if (ir & (I_FRND|I_FTRP)) ABORT (EXC_RSVI);
+ t32 = ((uint32) R[ra]) & M32;
+ res = op_lds (t32);
+ break;
+
+ case 0x0A: /* SQRTF */
+ if (ir & I_F_VAXRSV) ABORT (EXC_RSVI);
+ res = vax_sqrt (ir, DT_F);
+ break;
+
+ case 0x0B: /* SQRTS */
+ res = ieee_sqrt (ir, DT_S);
+ break;
+
+ case 0x14: /* ITOFF */
+ if (ir & (I_FRND|I_FTRP)) ABORT (EXC_RSVI);
+ t32 = ((uint32) R[ra]) & M32;
+ res = op_ldf (SWAP_VAXF (t32));
+ break;
+
+ case 0x24: /* ITOFT */
+ if (ir & (I_FRND|I_FTRP)) ABORT (EXC_RSVI);
+ res = R[ra];
+ break;
+
+ case 0x2A: /* SQRTG */
+ if (ir & I_F_VAXRSV) ABORT (EXC_RSVI);
+ res = vax_sqrt (ir, DT_G);
+ break;
+
+ case 0x2B: /* SQRTT */
+ res = ieee_sqrt (ir, DT_T);
+ break;
+
+ default:
+ ABORT (EXC_RSVI);
+ }
+
+ if (rc != 31) FR[rc] = res & M64;
+ break;
+
+/* VAX and IEEE floating point operates - done externally */
+
+ case OP_VAX: /* VAX fp opr */
+ if (ir & I_F_VAXRSV) ABORT (EXC_RSVI); /* reserved */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ vax_fop (ir);
+ break;
+
+ case OP_IEEE: /* IEEE fp opr */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ ieee_fop (ir);
+ break;
+
+/* Data type independent floating point (17) */
+
+ case OP_FP: /* other fp */
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ rc = I_GETRC (ir); /* get rc */
+ fnc = I_GETFFNC (ir); /* get function */
+ switch (fnc) { /* case on function */
+
+ case 0x10: /* CVTLQ */
+ res = ((FR[rb] >> 32) & 0xC0000000) | ((FR[rb] >> 29) & 0x3FFFFFFF);
+ res = SEXT_L_Q (res);
+ break;
+
+ case 0x20: /* CPYS */
+ res = (FR[ra] & FPR_SIGN) | (FR[rb] & ~FPR_SIGN);
+ break;
+
+ case 0x21: /* CPYSN */
+ res = ((FR[ra] & FPR_SIGN) ^ FPR_SIGN) | (FR[rb] & ~FPR_SIGN);
+ break;
+
+ case 0x22: /* CPYSE */
+ res = (FR[ra] & (FPR_SIGN|FPR_EXP)) | (FR[rb] & ~(FPR_SIGN|FPR_EXP));
+ break;
+
+ case 0x24: /* MT_FPCR */
+ fpcr = ((uint32) (FR[ra] >> 32)) & ~FPCR_RAZ;
+ res = FR[rc];
+ break;
+
+ case 0x25: /* MF_FPCR */
+ res = ((t_uint64) fpcr) << 32;
+ break;
+
+ case 0x2A: /* FCMOVEQ */
+ if ((FR[ra] & ~FPR_SIGN) == 0) res = FR[rb];
+ else res = FR[rc];
+ break;
+
+ case 0x2B: /* FCMOVNE */
+ if ((FR[ra] & ~FPR_SIGN) != 0) res = FR[rb];
+ else res = FR[rc];
+ break;
+
+ case 0x2C: /* FCMOVLT */
+ if (FR[ra] > FPR_SIGN) res = FR[rb];
+ else res = FR[rc];
+ break;
+
+ case 0x2D: /* FCMOVGE */
+ if (FR[ra] <= FPR_SIGN) res = FR[rb];
+ else res = FR[rc];
+ break;
+
+ case 0x2E: /* FCMOVLE */
+ if (FPR_GETSIGN (FR[ra]) || (FR[ra] == 0)) res = FR[rb];
+ else res = FR[rc];
+ break;
+
+ case 0x2F: /* FCMOVGT */
+ if (!FPR_GETSIGN (FR[ra]) && (FR[ra] != 0)) res = FR[rb];
+ else res = FR[rc];
+ break;
+
+ case 0x30: /* CVTQL */
+ res = ((FR[rb] & 0xC0000000) << 32) | ((FR[rb] & 0x3FFFFFFF) << 29);
+ if (FPR_GETSIGN (FR[rb])?
+ (FR[rb] < 0xFFFFFFFF80000000):
+ (FR[rb] > 0x000000007FFFFFFF)) {
+ fpcr = fpcr | FPCR_IOV | FPCR_INE | FPCR_SUM;
+ if (ir & I_FTRP_V) arith_trap (TRAP_IOV, ir);
+ }
+ break;
+
+ default:
+ res = FR[rc];
+ break;
+ }
+
+ if (rc != 31) FR[rc] = res & M64;
+ break;
+
+/* Barriers and misc (18)
+
+ Alpha has a weak memory ordering model and an imprecise exception model;
+ together, they require a wide variety of barrier instructions to guarantee
+ memory coherency in multiprocessor systems, as well as backward compatible
+ exception instruction semantics.
+
+ The simulator is uniprocessor only, and has ordered memory accesses and
+ precise exceptions. Therefore, the barriers are all NOP's. */
+
+ case OP_MISC: /* misc */
+ fnc = I_GETMDSP (ir); /* get function */
+ switch (fnc) { /* case on function */
+
+ case 0xC000: /* RPCC */
+ pcc_l = pcc_l & M32;
+ if (ra != 31) R[ra] = (((t_uint64) pcc_h) << 32) | ((t_uint64) pcc_l);
+ break;
+
+ case 0xE000: /* RC */
+ if (ra != 31) R[ra] = vax_flag;
+ vax_flag = 0;
+ break;
+
+ case 0xF000: /* RS */
+ if (ra != 31) R[ra] = vax_flag;
+ vax_flag = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ break;
+
+/* Optional instruction sets (1C) */
+
+ case OP_FLTI: /* float to int */
+ rc = I_GETRC (ir); /* get rc */
+ if (ir & I_ILIT) rbv = I_GETLIT8 (ir); /* literal? rbv = lit */
+ else rbv = R[rb]; /* no, rbv = R[rb] */
+ fnc = I_GETIFNC (ir); /* get function */
+ switch (fnc) { /* case on function */
+
+ case 0x00: /* SEXTB */
+ if (!(arch_mask & AMASK_BWX)) ABORT (EXC_RSVI);
+ res = SEXT_B_Q (rbv);
+ break;
+
+ case 0x01: /* SEXTW */
+ if (!(arch_mask & AMASK_BWX)) ABORT (EXC_RSVI);
+ res = SEXT_W_Q (rbv);
+ break;
+
+ case 0x30: /* CTPOP */
+ if (!(arch_mask & AMASK_CIX)) ABORT (EXC_RSVI);
+ for (res = 0; rbv != 0; res++) {
+ rbv = rbv & ~(rbv & NEG_Q (rbv));
+ }
+ break;
+
+ case 0x31: /* PERR */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 64; i = i + 8) {
+ s32 = (uint32) (R[ra] >> i) & M8;
+ t32 = (uint32) (rbv >> i) & M8;
+ res = res + ((t_uint64) (s32 >= t32)? (s32 - t32): (t32 - s32));
+ }
+ break;
+
+ case 0x32: /* CTLZ */
+ if (!(arch_mask & AMASK_CIX)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 64; i++) {
+ if ((rbv >> (63 - i)) & 1) break;
+ res = res + 1;
+ }
+ break;
+
+ case 0x33: /* CTTZ */
+ if (!(arch_mask & AMASK_CIX)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 64; i++) {
+ if ((rbv >> i) & 1) break;
+ res = res + 1;
+ }
+ break;
+
+ case 0x34: /* UNPKBL */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ res = ((rbv & 0xFF00) << 24) | (rbv & 0xFF);
+ break;
+
+ case 0x35: /* UNPKBW */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ res = ((rbv & 0xFF000000) << 24) | ((rbv & 0xFF0000) << 16) |
+ ((rbv & 0xFF00) << 8) | (rbv & 0xFF);
+ break;
+
+ case 0x36: /* PKWB */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ res = ((rbv >> 24) & 0xFF000000) | ((rbv >> 16) & 0xFF0000) |
+ ((rbv >> 8) & 0xFF00) | (rbv & 0xFF);
+ break;
+
+ case 0x37: /* PKLB */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ res = ((rbv >> 24) & 0xFF00) | (rbv & 0xFF);
+ break;
+
+ case 0x38: /* MINSB8 */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 8; i++) {
+ s1 = SEXT_B_Q (R[ra] >> (i << 3));
+ s2 = SEXT_B_Q (rbv >> (i << 3));
+ res = res | (((s1 <= s2)? R[ra]: rbv) & byte_mask[i]);
+ }
+ break;
+
+ case 0x39: /* MINSW4 */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 8; i = i++) {
+ s1 = SEXT_W_Q (R[ra] >> (i << 4));
+ s2 = SEXT_W_Q (rbv >> (i << 4));
+ res = res | (((s1 <= s2)? R[ra]: rbv) & word_mask[i]);
+ }
+ break;
+
+ case 0x3A: /* MINUB8 */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 8; i++) {
+ s64 = R[ra] & byte_mask[i];
+ t64 = rbv & byte_mask[i];
+ res = res | ((s64 <= t64)? s64: t64);
+ }
+ break;
+
+ case 0x3B: /* MINUW4 */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 8; i = i++) {
+ s64 = R[ra] & word_mask[i];
+ t64 = rbv & word_mask[i];
+ res = res | ((s64 <= t64)? s64: t64);
+ }
+ break;
+
+ case 0x3C: /* MAXUB8 */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 8; i++) {
+ s64 = R[ra] & byte_mask[i];
+ t64 = rbv & byte_mask[i];
+ res = res | ((s64 >= t64)? s64: t64);
+ }
+ break;
+
+ case 0x3D: /* MAXUW4 */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 8; i = i++) {
+ s64 = R[ra] & word_mask[i];
+ t64 = rbv & word_mask[i];
+ res = res | ((s64 >= t64)? s64: t64);
+ }
+ break;
+
+ case 0x3E: /* MAXSB8 */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 8; i++) {
+ s1 = SEXT_B_Q (R[ra] >> (i << 3));
+ s2 = SEXT_B_Q (rbv >> (i << 3));
+ res = res | (((s1 >= s2)? R[ra]: rbv) & byte_mask[i]);
+ }
+ break;
+
+ case 0x3F: /* MAXSW4 */
+ if (!(arch_mask & AMASK_MVI)) ABORT (EXC_RSVI);
+ for (i = 0, res = 0; i < 8; i = i++) {
+ s1 = SEXT_W_Q (R[ra] >> (i << 4));
+ s2 = SEXT_W_Q (rbv >> (i << 4));
+ res = res | (((s1 >= s2)? R[ra]: rbv) & word_mask[i]);
+ }
+ break;
+
+ case 0x70: /* FTOIS */
+ if (!(arch_mask & AMASK_FIX)) ABORT (EXC_RSVI);
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ res = op_sts (FR[ra]);
+ break;
+
+ case 0x78: /* FTOIT */
+ if (!(arch_mask & AMASK_FIX)) ABORT (EXC_RSVI);
+ if (fpen == 0) ABORT (EXC_FPDIS); /* flt point disabled? */
+ res = FR[ra];
+ break;
+
+ default:
+ ABORT (EXC_RSVI);
+ }
+
+ if (rc != 31) R[rc] = res & M64;
+ break;
+
+/* PAL hardware functions */
+
+ case OP_PAL19:
+ reason = pal_19 (ir);
+ intr_summ = pal_eval_intr (1); /* eval interrupts */
+ break;
+
+ case OP_PAL1B:
+ reason = pal_1b (ir);
+ intr_summ = pal_eval_intr (1); /* eval interrupts */
+ break;
+
+ case OP_PAL1D:
+ reason = pal_1d (ir);
+ intr_summ = pal_eval_intr (1); /* eval interrupts */
+ break;
+
+ case OP_PAL1E:
+ reason = pal_1e (ir);
+ intr_summ = pal_eval_intr (1); /* eval interrupts */
+ break;
+
+ case OP_PAL1F:
+ reason = pal_1f (ir);
+ intr_summ = pal_eval_intr (1); /* eval interrupts */
+ break;
+
+ case OP_PAL: /* PAL code */
+ fnc = I_GETPAL (ir); /* get function code */
+ if ((fnc & 0x40) || (fnc >= 0xC0)) /* out of range? */
+ ABORT (EXC_RSVI);
+ reason = pal_proc_inst (fnc); /* processed externally */
+ intr_summ = pal_eval_intr (1); /* eval interrupts */
+ break;
+
+ default:
+ ABORT (EXC_RSVI);
+ } /* end case */
+ if (trap_summ) { /* any traps? */
+ reason = pal_proc_trap (trap_summ); /* process trap */
+ trap_summ = 0; /* clear trap reg */
+ trap_mask = 0;
+ intr_summ = pal_eval_intr (1); /* eval interrupts */
+ }
+ } /* end while */
+pcc_l = pcc_l & M32;
+pcq_r->qptr = pcq_p; /* update pc q ptr */
+pc_align = ((uint32) PC) & 3; /* separate PC<1:0> */
+PC = PC & 0xFFFFFFFFFFFFFFFC;
+return reason;
+}
+
+/* Utility routines */
+
+/* Byte zap function */
+
+t_uint64 byte_zap (t_uint64 op, uint32 m)
+{
+int32 i;
+
+m = m & 0xFF; /* 8 bit mask */
+for (i = 0; m != 0; m = m >> 1, i++) {
+ if (m & 1) op = op & ~byte_mask[i];
+ }
+return op;
+}
+
+/* 64b * 64b unsigned multiply */
+
+t_uint64 uemul64 (t_uint64 a, t_uint64 b, t_uint64 *hi)
+{
+t_uint64 ahi, alo, bhi, blo, rhi, rmid1, rmid2, rlo;
+
+ahi = (a >> 32) & M32;
+alo = a & M32;
+bhi = (b >> 32) & M32;
+blo = b & M32;
+rhi = ahi * bhi;
+rmid1 = ahi * blo;
+rmid2 = alo * bhi;
+rlo = alo * blo;
+rhi = rhi + ((rmid1 >> 32) & M32) + ((rmid2 >> 32) & M32);
+rmid1 = (rmid1 << 32) & M64;
+rmid2 = (rmid2 << 32) & M64;
+rlo = (rlo + rmid1) & M64;
+if (rlo < rmid1) rhi = rhi + 1;
+rlo = (rlo + rmid2) & M64;
+if (rlo < rmid2) rhi = rhi + 1;
+if (hi) *hi = rhi & M64;
+return rlo;
+}
+
+/* 64b / 64b unsigned fraction divide */
+
+t_uint64 ufdiv64 (t_uint64 dvd, t_uint64 dvr, uint32 prec, uint32 *sticky)
+{
+t_uint64 quo;
+uint32 i;
+
+quo = 0; /* clear quotient */
+for (i = 0; (i < prec) && dvd; i++) { /* divide loop */
+ quo = quo << 1; /* shift quo */
+ if (dvd >= dvr) { /* div step ok? */
+ dvd = dvd - dvr; /* subtract */
+ quo = quo + 1; /* quo bit = 1 */
+ }
+ dvd = dvd << 1; /* shift divd */
+ }
+quo = quo << (UF_V_NM - i + 1); /* shift quo */
+if (sticky) *sticky = (dvd? 1: 0); /* set sticky bit */
+return quo; /* return quotient */
+}
+
+/* Set arithmetic trap */
+
+void arith_trap (uint32 mask, uint32 ir)
+{
+uint32 rc = I_GETRC (ir);
+
+trap_summ = trap_summ | mask;
+if (ir & I_FTRP_S) trap_summ = trap_summ | TRAP_SWC;
+if ((mask & TRAP_IOV) == 0) rc = rc + 32;
+trap_mask = trap_mask | ((t_uint64) 1u << rc);
+return;
+}
+
+/* Reset */
+
+t_stat cpu_reset (DEVICE *dptr)
+{
+R[31] = 0;
+FR[31] = 0;
+pal_mode = 1;
+dmapen = 0;
+fpen = 1;
+vax_flag = 0;
+lock_flag = 0;
+trap_summ = 0;
+trap_mask = 0;
+if (M == NULL) M = (t_uint64 *) calloc (((uint32) MEMSIZE) >> 3, sizeof (t_uint64));
+if (M == NULL) return SCPE_MEM;
+pcq_r = find_reg ("PCQ", NULL, dptr);
+if (pcq_r) pcq_r->qptr = 0;
+else return SCPE_IERR;
+sim_brk_types = sim_brk_dflt = SWMASK ('E');
+return SCPE_OK;
+}
+
+/* Bootstrap */
+
+t_stat cpu_boot (int32 unitno, DEVICE *dptr)
+{
+return SCPE_ARG;
+}
+
+/* Memory examine */
+
+t_stat cpu_ex (t_value *vptr, t_addr addr, UNIT *uptr, int32 sw)
+{
+if (vptr == NULL) return SCPE_ARG;
+if (sw & SWMASK ('V') && dmapen) {
+ addr = trans_c (addr);
+ if (addr == M64) return STOP_MME;
+ }
+if (ADDR_IS_MEM (addr)) {
+ *vptr = ReadPQ (addr);
+ return SCPE_OK;
+ }
+return SCPE_NXM;
+}
+
+/* Memory deposit */
+
+t_stat cpu_dep (t_value val, t_addr addr, UNIT *uptr, int32 sw)
+{
+if (sw & SWMASK ('V') && dmapen) {
+ addr = trans_c (addr);
+ if (addr == M64) return STOP_MME;
+ }
+if (ADDR_IS_MEM (addr)) {
+ WritePQ (addr, val);
+ return SCPE_OK;
+ }
+return SCPE_NXM;
+}
+
+/* Memory allocation */
+
+t_stat cpu_set_size (UNIT *uptr, int32 val, char *cptr, void *desc)
+{
+t_uint64 mc = 0;
+uint32 i, clim;
+t_uint64 *nM = NULL;
+
+for (i = val; i < MEMSIZE; i = i + 8) mc = mc | M[i >> 3];
+if ((mc != 0) && !get_yn ("Really truncate memory [N]?", FALSE))
+ return SCPE_OK;
+nM = (t_uint64 *) calloc (val >> 3, sizeof (t_uint64));
+if (nM == NULL) return SCPE_MEM;
+clim = (uint32) ((((uint32) val) < MEMSIZE)? val: MEMSIZE);
+for (i = 0; i < clim; i = i + 8) nM[i >> 3] = M[i >>3];
+free (M);
+M = nM;
+MEMSIZE = val;
+return SCPE_OK;
+}
+
+/* Show virtual address */
+
+t_stat cpu_show_virt (FILE *of, UNIT *uptr, int32 val, void *desc)
+{
+t_stat r;
+char *cptr = (char *) desc;
+t_uint64 va, pa;
+
+if (cptr) {
+ DEVICE *dptr = find_dev_from_unit (uptr);
+ if (dptr == NULL) return SCPE_IERR;
+ va = get_uint (cptr, 16, M64, &r);
+ if (r == SCPE_OK) {
+ pa = trans_c (va);
+ if (pa == M64) {
+ fprintf (of, "Translation error\n");
+ return SCPE_OK;
+ }
+ fputs ("Virtual ", of);
+ fprint_val (of, va, 16, 64, PV_LEFT);
+ fputs (" = physical ", of);
+ fprint_val (of, pa, 16, 64, PV_LEFT);
+ fputc ('\n', of);
+ return SCPE_OK;
+ }
+ }
+fprintf (of, "Invalid argument\n");
+return SCPE_OK;
+}
+
+/* Set history */
+
+t_stat cpu_set_hist (UNIT *uptr, int32 val, char *cptr, void *desc)
+{
+uint32 i, lnt;
+t_stat r;
+
+if (cptr == NULL) {
+ for (i = 0; i < hst_lnt; i++) hst[i].pc = 0;
+ hst_p = 0;
+ return SCPE_OK;
+ }
+lnt = (uint32) get_uint (cptr, 10, HIST_MAX, &r);
+if ((r != SCPE_OK) || (lnt && (lnt < HIST_MIN))) return SCPE_ARG;
+hst_p = 0;
+if (hst_lnt) {
+ free (hst);
+ hst_lnt = 0;
+ hst = NULL;
+ }
+if (lnt) {
+ hst = (InstHistory *) calloc (lnt, sizeof (InstHistory));
+ if (hst == NULL) return SCPE_MEM;
+ hst_lnt = lnt;
+ }
+return SCPE_OK;
+}
+
+/* Print instruction trace */
+
+t_stat cpu_fprint_one_inst (FILE *st, uint32 ir, t_uint64 pc, t_uint64 ra, t_uint64 rb)
+{
+uint32 op;
+t_value sim_val;
+extern t_stat fprint_sym (FILE *ofile, t_addr addr, t_value *val,
+ UNIT *uptr, int32 sw);
+
+static const h_fmt[64] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ H_MRF, H_MRF, H_MRF, H_MRF, H_MRF, H_MRF, H_MRF, H_MRF,
+ H_IOP, H_IOP, H_IOP, H_IOP, H_FOP, H_FOP, H_FOP, H_FOP,
+ 0, H_PAL, H_JMP, H_PAL, H_FOP, H_PAL, H_PAL, H_PAL,
+ H_MRF, H_MRF, H_MRF, H_MRF, H_MRF, H_MRF, H_MRF, H_MRF,
+ H_MRF, H_MRF, H_MRF, H_MRF, H_MRF, H_MRF, H_MRF, H_MRF,
+ H_BRA, H_BRA, H_BRA, H_BRA, H_BRA, H_BRA, H_BRA, H_BRA,
+ H_BRA, H_BRA, H_BRA, H_BRA, H_BRA, H_BRA, H_BRA, H_BRA
+ };
+
+pc = pc & ~HIST_PC;
+fprint_val (st, pc, 16, 64, PV_RZRO);
+fputc (' ', st);
+op = I_GETOP (ir); /* get opcode */
+if (h_fmt[op] & H_A) fprint_val (st, ra, 16, 64, PV_RZRO);
+else fputs (" ", st);
+fputc (' ', st);
+if (h_fmt[op] & H_B) { /* Rb? */
+ t_uint64 rbv;
+ if ((h_fmt[op] & H_B_LIT) && (ir & I_ILIT))
+ rbv = I_GETLIT8 (ir); /* literal? rbv = lit */
+ else rbv = rb; /* no, rbv = R[rb] */
+ fprint_val (st, rbv, 16, 64, PV_RZRO);
+ }
+else fputs (" ", st);
+fputc (' ', st);
+if (h_fmt[op] & H_EA) { /* ea? */
+ t_uint64 ea;
+ if (h_fmt[op] & H_EA_L16) ea = ir & M16;
+ else if (h_fmt[op] & H_EA_B)
+ ea = (pc + (SEXT_BDSP (I_GETBDSP (ir)) << 2)) & M64;
+ else ea = (rb + SEXT_MDSP (I_GETMDSP (ir))) & M64;
+ fprint_val (st, ea, 16, 64, PV_RZRO);
+ }
+else fputs (" ", st);
+fputc (' ', st);
+if (pc & 4) sim_val = ((t_uint64) ir) << 32;
+else sim_val = ir;
+if ((fprint_sym (st, pc & ~03, &sim_val, &cpu_unit, SWMASK ('M'))) > 0)
+ fprintf (st, "(undefined) %08X", ir);
+fputc ('\n', st); /* end line */
+return SCPE_OK;
+}
+
+/* Show history */
+
+t_stat cpu_show_hist (FILE *st, UNIT *uptr, int32 val, void *desc)
+{
+uint32 k, di, lnt;
+char *cptr = (char *) desc;
+t_stat r;
+InstHistory *h;
+
+if (hst_lnt == 0) return SCPE_NOFNC; /* enabled? */
+if (cptr) {
+ lnt = (int32) get_uint (cptr, 10, hst_lnt, &r);
+ if ((r != SCPE_OK) || (lnt == 0)) return SCPE_ARG;
+ }
+else lnt = hst_lnt;
+di = hst_p - lnt; /* work forward */
+if (di < 0) di = di + hst_lnt;
+fprintf (st, "PC Ra Rb IR\n\n");
+for (k = 0; k < lnt; k++) { /* print specified */
+ h = &hst[(++di) % hst_lnt]; /* entry pointer */
+ if (h->pc & HIST_PC) { /* instruction? */
+ cpu_fprint_one_inst (st, h->ir, h->pc, h->ra, h->rb);
+ } /* end if */
+ } /* end for */
+return SCPE_OK;
+}
diff --git a/alpha/alpha_defs.h b/alpha/alpha_defs.h
new file mode 100644
index 00000000..6f1ee907
--- /dev/null
+++ b/alpha/alpha_defs.h
@@ -0,0 +1,457 @@
+/* alpha_defs.h: Alpha architecture definitions file
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ Respectfully dedicated to the great people of the Alpha chip, systems, and
+ software development projects; and to the memory of Peter Conklin, of the
+ Alpha Program Office.
+*/
+
+#ifndef _ALPHA_DEFS_H_
+#define _ALPHA_DEFS_H_ 0
+
+#include "sim_defs.h"
+#include
+
+#if defined (__GNUC__)
+#define INLINE inline
+#else
+#define INLINE
+#endif
+
+/* Configuration */
+
+#define INITMEMSIZE (1 << 24) /* !!debug!! */
+#define MEMSIZE (cpu_unit.capac)
+#define ADDR_IS_MEM(x) ((x) < MEMSIZE)
+#define DEV_DIB (1u << (DEV_V_UF + 0)) /* takes a DIB */
+
+/* Simulator stops */
+
+#define STOP_HALT 1 /* halt */
+#define STOP_IBKPT 2 /* breakpoint */
+#define STOP_NSPAL 3 /* non-supported PAL */
+#define STOP_KSNV 4 /* kernel stk inval */
+#define STOP_INVABO 5 /* invalid abort code */
+#define STOP_MME 6 /* console mem mgt error */
+
+/* Bit patterns */
+
+#define M8 0xFF
+#define M16 0xFFFF
+#define M32 0xFFFFFFFF
+#define M64 0xFFFFFFFFFFFFFFFF
+#define B_SIGN 0x80
+#define W_SIGN 0x8000
+#define L_SIGN 0x80000000
+#define Q_SIGN 0x8000000000000000
+#define Q_GETSIGN(x) (((uint32) ((x) >> 63)) & 1)
+
+/* Architectural variants */
+
+#define AMASK_BWX 0x0001 /* byte/word */
+#define AMASK_FIX 0x0002 /* sqrt/flt-int moves */
+#define AMASK_CIX 0x0004 /* counts */
+#define AMASK_MVI 0x0100 /* multimedia */
+#define AMASK_PRC 0x0200 /* precise exceptions */
+#define AMASK_PFM 0x1000 /* prefetch w modify */
+
+#define IMPLV_EV4 0x0 /* EV4 (21064) */
+#define IMPLV_EV5 0x1 /* EV5 (21164) */
+#define IMPLV_EV6 0x2 /* EV6 (21264) */
+#define IMPLV_EV7 0x3 /* EV7 (21364) */
+
+/* Instruction formats */
+
+#define I_V_OP 26 /* opcode */
+#define I_M_OP 0x3F
+#define I_OP (I_M_OP << I_V_OP)
+#define I_V_RA 21 /* Ra */
+#define I_M_RA 0x1F
+#define I_V_RB 16 /* Rb */
+#define I_M_RB 0x1F
+#define I_V_FTRP 13 /* floating trap mode */
+#define I_M_FTRP 0x7
+#define I_FTRP (I_M_FTRP << I_V_FTRP)
+#define I_F_VAXRSV 0x4800 /* VAX reserved */
+#define I_FTRP_V 0x2000 /* /V trap */
+#define I_FTRP_U 0x2000 /* /U trap */
+#define I_FTRP_S 0x8000 /* /S trap */
+#define I_FTRP_SUI 0xE000 /* /SUI trap */
+#define I_FTRP_SVI 0xE000 /* /SVI trap */
+#define I_V_FRND 11 /* floating round mode */
+#define I_M_FRND 0x3
+#define I_FRND (I_M_FRND << I_V_FRND)
+#define I_FRND_C 0 /* chopped */
+#define I_FRND_M 1 /* to minus inf */
+#define I_FRND_N 2 /* normal */
+#define I_FRND_D 3 /* dynamic */
+#define I_FRND_P 3 /* in FPCR: plus inf */
+#define I_V_FSRC 9 /* floating source */
+#define I_M_FSRC 0x3
+#define I_FSRC (I_M_FSRC << I_V_FSRC)
+#define I_FSRC_X 0x0200 /* data type X */
+#define I_V_FFNC 5 /* floating function */
+#define I_M_FFNC 0x3F
+#define I_V_LIT8 13 /* integer 8b literal */
+#define I_M_LIT8 0xFF
+#define I_V_ILIT 12 /* literal flag */
+#define I_ILIT (1u << I_V_ILIT)
+#define I_V_IFNC 5 /* integer function */
+#define I_M_IFNC 0x3F
+#define I_V_RC 0 /* Rc */
+#define I_M_RC 0x1F
+#define I_V_MDSP 0 /* memory displacement */
+#define I_M_MDSP 0xFFFF
+#define I_V_BDSP 0
+#define I_M_BDSP 0x1FFFFF /* branch displacement */
+#define I_V_PALOP 0
+#define I_M_PALOP 0x3FFFFFF /* PAL subopcode */
+#define I_GETOP(x) (((x) >> I_V_OP) & I_M_OP)
+#define I_GETRA(x) (((x) >> I_V_RA) & I_M_RA)
+#define I_GETRB(x) (((x) >> I_V_RB) & I_M_RB)
+#define I_GETLIT8(x) (((x) >> I_V_LIT8) & I_M_LIT8)
+#define I_GETIFNC(x) (((x) >> I_V_IFNC) & I_M_IFNC)
+#define I_GETFRND(x) (((x) >> I_V_FRND) & I_M_FRND)
+#define I_GETFFNC(x) (((x) >> I_V_FFNC) & I_M_FFNC)
+#define I_GETRC(x) (((x) >> I_V_RC) & I_M_RC)
+#define I_GETMDSP(x) (((x) >> I_V_MDSP) & I_M_MDSP)
+#define I_GETBDSP(x) (((x) >> I_V_BDSP) & I_M_BDSP)
+#define I_GETPAL(x) (((x) >> I_V_PALOP) & I_M_PALOP)
+
+/* Floating point types */
+
+#define DT_F 0 /* type F */
+#define DT_G 1 /* type G */
+#define DT_S 0 /* type S */
+#define DT_T 1 /* type T */
+
+/* Floating point memory format (VAX F) */
+
+#define F_V_SIGN 15
+#define F_SIGN (1u << F_V_SIGN)
+#define F_V_EXP 7
+#define F_M_EXP 0xFF
+#define F_BIAS 0x80
+#define F_EXP (F_M_EXP << F_V_EXP)
+#define F_V_FRAC 29
+#define F_GETEXP(x) ((uint32) (((x) >> F_V_EXP) & F_M_EXP))
+#define SWAP_VAXF(x) ((((x) >> 16) & 0xFFFF) | (((x) & 0xFFFF) << 16))
+
+/* Floating point memory format (VAX G) */
+
+#define G_V_SIGN 15
+#define G_SIGN (1u << F_V_SIGN)
+#define G_V_EXP 4
+#define G_M_EXP 0x7FF
+#define G_BIAS 0x400
+#define G_EXP (G_M_EXP << G_V_EXP)
+#define G_GETEXP(x) ((uint32) (((x) >> G_V_EXP) & G_M_EXP))
+#define SWAP_VAXG(x) ((((x) & 0x000000000000FFFF) << 48) | \
+ (((x) & 0x00000000FFFF0000) << 16) | \
+ (((x) >> 16) & 0x00000000FFFF0000) | \
+ (((x) >> 48) & 0x000000000000FFFF))
+
+/* Floating memory format (IEEE S) */
+
+#define S_V_SIGN 31
+#define S_SIGN (1u << S_V_SIGN)
+#define S_V_EXP 23
+#define S_M_EXP 0xFF
+#define S_BIAS 0x7F
+#define S_NAN 0xFF
+#define S_EXP (S_M_EXP << S_V_EXP)
+#define S_V_FRAC 29
+#define S_GETEXP(x) ((uint32) (((x) >> S_V_EXP) & S_M_EXP))
+
+/* Floating point memory format (IEEE T) */
+
+#define T_V_SIGN 63
+#define T_SIGN 0x8000000000000000
+#define T_V_EXP 52
+#define T_M_EXP 0x7FF
+#define T_BIAS 0x3FF
+#define T_NAN 0x7FF
+#define T_EXP 0x7FF0000000000000
+#define T_FRAC 0x000FFFFFFFFFFFFF
+#define T_GETEXP(x) ((uint32) (((uint32) ((x) >> T_V_EXP)) & T_M_EXP))
+
+/* Floating point register format (all except VAX D) */
+
+#define FPR_V_SIGN 63
+#define FPR_SIGN 0x8000000000000000
+#define FPR_V_EXP 52
+#define FPR_M_EXP 0x7FF
+#define FPR_NAN 0x7FF
+#define FPR_EXP 0x7FF0000000000000
+#define FPR_HB 0x0010000000000000
+#define FPR_FRAC 0x000FFFFFFFFFFFFF
+#define FPR_GUARD (UF_V_NM - FPR_V_EXP)
+#define FPR_GETSIGN(x) (((uint32) ((x) >> FPR_V_SIGN)) & 1)
+#define FPR_GETEXP(x) (((uint32) ((x) >> FPR_V_EXP)) & FPR_M_EXP)
+#define FPR_GETFRAC(x) ((x) & FPR_FRAC)
+
+#define FP_TRUE 0x4000000000000000 /* 0.5/2.0 in reg */
+
+/* Floating point register format (VAX D) */
+
+#define FDR_V_SIGN 63
+#define FDR_SIGN 0x8000000000000000
+#define FDR_V_EXP 55
+#define FDR_M_EXP 0xFF
+#define FDR_EXP 0x7F80000000000000
+#define FDR_HB 0x0080000000000000
+#define FDR_FRAC 0x007FFFFFFFFFFFFF
+#define FDR_GUARD (UF_V_NM - FDR_V_EXP)
+#define FDR_GETSIGN(x) (((uint32) ((x) >> FDR_V_SIGN)) & 1)
+#define FDR_GETEXP(x) (((uint32) ((x) >> FDR_V_EXP)) & FDR_M_EXP)
+#define FDR_GETFRAC(x) ((x) & FDR_FRAC)
+
+#define D_BIAS 0x80
+
+/* Unpacked floating point number */
+
+typedef struct {
+ uint32 sign;
+ int32 exp;
+ t_uint64 frac;
+ } UFP;
+
+#define UF_V_NM 63
+#define UF_NM 0x8000000000000000 /* normalized */
+
+/* IEEE control register (left 32b only) */
+
+#define FPCR_SUM 0x80000000 /* summary */
+#define FPCR_INED 0x40000000 /* inexact disable */
+#define FPCR_UNFD 0x20000000 /* underflow disable */
+#define FPCR_UNDZ 0x10000000 /* underflow to 0 */
+#define FPCR_V_RMOD 26 /* rounding mode */
+#define FPCR_M_RMOD 0x3
+#define FPCR_IOV 0x02000000 /* integer overflow */
+#define FPCR_INE 0x01000000 /* inexact */
+#define FPCR_UNF 0x00800000 /* underflow */
+#define FPCR_OVF 0x00400000 /* overflow */
+#define FPCR_DZE 0x00200000 /* div by zero */
+#define FPCR_INV 0x00100000 /* invalid operation */
+#define FPCR_OVFD 0x00080000 /* overflow disable */
+#define FPCR_DZED 0x00040000 /* div by zero disable */
+#define FPCR_INVD 0x00020000 /* invalid op disable */
+#define FPCR_DNZ 0x00010000 /* denormal to zero */
+#define FPCR_DNOD 0x00008000 /* denormal disable */
+#define FPCR_RAZ 0x00007FFF /* zero */
+#define FPCR_ERR (FPCR_IOV|FPCR_INE|FPCR_UNF|FPCR_OVF|FPCR_DZE|FPCR_INV)
+#define FPCR_GETFRND(x) (((x) >> FPCR_V_RMOD) & FPCR_M_RMOD)
+
+/* PTE - hardware format */
+
+#define PTE_V_PFN 32 /* PFN */
+#define PFN_MASK 0xFFFFFFFF
+#define PTE_V_UWE 15 /* write enables */
+#define PTE_V_SWE 14
+#define PTE_V_EWE 13
+#define PTE_V_KWE 12
+#define PTE_V_URE 11 /* read enables */
+#define PTE_V_SRE 10
+#define PTE_V_ERE 9
+#define PTE_V_KRE 8
+#define PTE_V_GH 5 /* granularity hint */
+#define PTE_M_GH 0x3
+#define PTE_GH (PTE_M_GH << PTE_V_GH)
+#define PTE_V_ASM 4 /* address space match */
+#define PTE_V_FOE 3 /* fault on execute */
+#define PTE_V_FOW 2 /* fault on write */
+#define PTE_V_FOR 1 /* fault on read */
+#define PTE_V_V 0 /* valid */
+#define PTE_UWE (1u << PTE_V_UWE)
+#define PTE_SWE (1u << PTE_V_SWE)
+#define PTE_EWE (1u << PTE_V_EWE)
+#define PTE_KWE (1u << PTE_V_KWE)
+#define PTE_URE (1u << PTE_V_URE)
+#define PTE_SRE (1u << PTE_V_SRE)
+#define PTE_ERE (1u << PTE_V_ERE)
+#define PTE_KRE (1u << PTE_V_KRE)
+#define PTE_ASM (1u << PTE_V_ASM)
+#define PTE_FOE (1u << PTE_V_FOE)
+#define PTE_FOW (1u << PTE_V_FOW)
+#define PTE_FOR (1u << PTE_V_FOR)
+#define PTE_V (1u << PTE_V_V)
+#define PTE_MASK 0xFF7F
+#define PTE_GETGH(x) ((((uint32) (x)) >> PTE_V_GH) & PTE_M_GH)
+#define VPN_GETLVL1(x) (((x) >> ((2 * VA_N_LVL) - 3)) & (VA_M_LVL << 3))
+#define VPN_GETLVL2(x) (((x) >> (VA_N_LVL - 3)) & (VA_M_LVL << 3))
+#define VPN_GETLVL3(x) (((x) << 3) & (VA_M_LVL << 3))
+
+#define ACC_E(m) ((PTE_KRE << (m)) | PTE_FOE | PTE_V)
+#define ACC_R(m) ((PTE_KRE << (m)) | PTE_FOR | PTE_V)
+#define ACC_W(m) ((PTE_KWE << (m)) | PTE_FOW | PTE_V)
+#define ACC_M(m) (((PTE_KRE|PTE_KWE) << (m)) | PTE_FOR | PTE_FOW | PTE_V)
+
+/* Exceptions */
+
+#define ABORT(x) longjmp (save_env, (x))
+#define ABORT1(x,y) { p1 = (x); longjmp (save_env, (y)); }
+
+#define EXC_RSVI 0x01 /* reserved instruction */
+#define EXC_RSVO 0x02 /* reserved operand */
+#define EXC_ALIGN 0x03 /* operand alignment */
+#define EXC_FPDIS 0x04 /* flt point disabled */
+#define EXC_TBM 0x08 /* TLB miss */
+#define EXC_FOX 0x10 /* fault on r/w/e */
+#define EXC_ACV 0x14 /* access control viol */
+#define EXC_TNV 0x18 /* translation not valid */
+#define EXC_BVA 0x1C /* bad address format */
+#define EXC_E 0x00 /* offset for execute */
+#define EXC_R 0x01 /* offset for read */
+#define EXC_W 0x02 /* offset for write */
+
+/* Traps - corresponds to arithmetic trap summary register */
+
+#define TRAP_SWC 0x001 /* software completion */
+#define TRAP_INV 0x002 /* invalid operand */
+#define TRAP_DZE 0x004 /* divide by zero */
+#define TRAP_OVF 0x008 /* overflow */
+#define TRAP_UNF 0x010 /* underflow */
+#define TRAP_INE 0x020 /* inexact */
+#define TRAP_IOV 0x040 /* integer overflow */
+#define TRAP_SUMM_RW 0x07F
+
+/* PALcode */
+
+#define SP R[30] /* stack pointer */
+#define MODE_K 0 /* kernel */
+#define MODE_E 1 /* executive (UNIX user) */
+#define MODE_S 2 /* supervisor */
+#define MODE_U 3 /* user */
+
+#define PAL_UNDF 0 /* undefined */
+#define PAL_VMS 1 /* VMS */
+#define PAL_UNIX 2 /* UNIX */
+#define PAL_NT 3 /* Windows NT */
+
+/* Machine check error summary register */
+
+#define MCES_INP 0x01 /* in progress */
+#define MCES_SCRD 0x02 /* sys corr in prog */
+#define MCES_PCRD 0x04 /* proc corr in prog */
+#define MCES_DSCRD 0x08 /* disable system corr */
+#define MCES_DPCRD 0x10 /* disable proc corr */
+#define MCES_W1C (MCES_INP|MCES_SCRD|MCES_PCRD)
+#define MCES_DIS (MCES_DSCRD|MCES_DPCRD)
+
+/* I/O devices */
+
+#define L_BYTE 0 /* IO request lengths */
+#define L_WORD 1
+#define L_LONG 2
+#define L_QUAD 3
+
+/* Device information block */
+
+typedef struct { /* device info block */
+ t_uint64 low; /* low addr */
+ t_uint64 high; /* high addr */
+ t_bool (*read)(t_uint64 pa, t_uint64 *val, uint32 lnt);
+ t_bool (*write)(t_uint64 pa, t_uint64 val, uint32 lnt);
+ uint32 ipl;
+ } DIB;
+
+/* Interrupt system - 6 levels in EV4 and EV6, 4 in EV5 - software expects 4 */
+
+#define IPL_HMAX 0x17 /* highest hwre level */
+#define IPL_HMIN 0x14 /* lowest hwre level */
+#define IPL_HLVL (IPL_HMAX - IPL_HMIN + 1) /* # hardware levels */
+#define IPL_SMAX 0x0F /* highest swre level */
+
+/* Macros */
+
+#define PCQ_SIZE 64 /* must be 2**n */
+#define PCQ_MASK (PCQ_SIZE - 1)
+#define PCQ_ENTRY pcq[pcq_p = (pcq_p - 1) & PCQ_MASK] = (PC - 4) & M64
+
+#define SEXT_B_Q(x) (((x) & B_SIGN)? ((x) | ~((t_uint64) M8)): ((x) & M8))
+#define SEXT_W_Q(x) (((x) & W_SIGN)? ((x) | ~((t_uint64) M16)): ((x) & M16))
+#define SEXT_L_Q(x) (((x) & L_SIGN)? ((x) | ~((t_uint64) M32)): ((x) & M32))
+#define NEG_Q(x) ((~(x) + 1) & M64)
+#define ABS_Q(x) (((x) & Q_SIGN)? NEG_Q (x): (x))
+
+#define SIGN_BDSP 0x100000
+#define SIGN_MDSP 0x008000
+#define SEXT_MDSP(x) (((x) & SIGN_MDSP)? \
+ ((x) | ~((t_uint64) I_M_MDSP)): ((x) & I_M_MDSP))
+#define SEXT_BDSP(x) (((x) & SIGN_BDSP)? \
+ ((x) | ~((t_uint64) I_M_BDSP)): ((x) & I_M_BDSP))
+
+/* Opcodes */
+
+enum opcodes {
+ OP_PAL, OP_OPC01, OP_OPC02, OP_OPC03,
+ OP_OPC04, OP_OPC05, OP_OPC06, OP_OPC07,
+ OP_LDA, OP_LDAH, OP_LDBU, OP_LDQ_U,
+ OP_LDWU, OP_STW, OP_STB, OP_STQ_U,
+ OP_IALU, OP_ILOG, OP_ISHFT, OP_IMUL,
+ OP_IFLT, OP_VAX, OP_IEEE, OP_FP,
+ OP_MISC, OP_PAL19, OP_JMP, OP_PAL1B,
+ OP_FLTI, OP_PAL1D, OP_PAL1E, OP_PAL1F,
+ OP_LDF, OP_LDG, OP_LDS, OP_LDT,
+ OP_STF, OP_STG, OP_STS, OP_STT,
+ OP_LDL, OP_LDQ, OP_LDL_L, OP_LDQ_L,
+ OP_STL, OP_STQ, OP_STL_C, OP_STQ_C,
+ OP_BR, OP_FBEQ, OP_FBLT, OP_FBLE,
+ OP_BSR, OP_FBNE, OP_FBGE, OP_FBGT,
+ OP_BLBC, OP_BEQ, OP_BLT, OP_BLE,
+ OP_BLBS, OP_BNE, OP_BGE, OP_BGT
+ };
+
+/* Function prototypes */
+
+uint32 ReadI (t_uint64 va);
+t_uint64 ReadB (t_uint64 va);
+t_uint64 ReadW (t_uint64 va);
+t_uint64 ReadL (t_uint64 va);
+t_uint64 ReadQ (t_uint64 va);
+t_uint64 ReadAccL (t_uint64 va, uint32 acc);
+t_uint64 ReadAccQ (t_uint64 va, uint32 acc);
+INLINE t_uint64 ReadPB (t_uint64 pa);
+INLINE t_uint64 ReadPW (t_uint64 pa);
+INLINE t_uint64 ReadPL (t_uint64 pa);
+INLINE t_uint64 ReadPQ (t_uint64 pa);
+t_bool ReadIO (t_uint64 pa, t_uint64 *val, uint32 lnt);
+void WriteB (t_uint64 va, t_uint64 dat);
+void WriteW (t_uint64 va, t_uint64 dat);
+void WriteL (t_uint64 va, t_uint64 dat);
+void WriteQ (t_uint64 va, t_uint64 dat);
+void WriteAccL (t_uint64 va, t_uint64 dat, uint32 acc);
+void WriteAccQ (t_uint64 va, t_uint64 dat, uint32 acc);
+INLINE void WritePB (t_uint64 pa, t_uint64 dat);
+INLINE void WritePW (t_uint64 pa, t_uint64 dat);
+INLINE void WritePL (t_uint64 pa, t_uint64 dat);
+INLINE void WritePQ (t_uint64 pa, t_uint64 dat);
+t_bool WriteIO (t_uint64 pa, t_uint64 val, uint32 lnt);
+uint32 mmu_set_cm (uint32 mode);
+void mmu_set_icm (uint32 mode);
+void mmu_set_dcm (uint32 mode);
+void arith_trap (uint32 trap, uint32 ir);
+
+#endif
diff --git a/alpha/alpha_ev5_cons.c b/alpha/alpha_ev5_cons.c
new file mode 100644
index 00000000..fb576705
--- /dev/null
+++ b/alpha/alpha_ev5_cons.c
@@ -0,0 +1,143 @@
+/* alpha_ev5_cons.c - Alpha console support routines for EV5
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+*/
+
+#include "alpha_defs.h"
+#include "alpha_ev5_defs.h"
+
+t_uint64 srm_ptbr = 1;
+
+extern uint32 dtlb_spage;
+extern uint32 pal_type;
+extern uint32 ev5_mcsr;
+extern t_uint64 *M;
+extern t_uint64 ev5_mvptbr;
+extern UNIT cpu_unit;
+
+/* Local quadword physical read - exceptions or IO space lookups */
+
+t_stat l_ReadPQ (t_uint64 pa, t_uint64 *dat)
+{
+if (ADDR_IS_MEM (pa)) {
+ *dat = M[pa >> 3];
+ return TRUE;
+ }
+return FALSE;
+}
+
+/* "SRM" 3-level pte lookup
+
+ Inputs:
+ va = virtual address
+ *pte = pointer to pte to be returned
+ Output:
+ status = 0 for successful fill
+ EXC_ACV for ACV on intermediate level
+ EXC_TNV for TNV on intermediate level
+*/
+
+uint32 cons_find_pte_srm (t_uint64 va, t_uint64 *l3pte)
+{
+t_uint64 vptea, l1ptea, l2ptea, l3ptea, l1pte, l2pte;
+uint32 vpte_vpn;
+TLBENT *vpte_p;
+
+vptea = FMT_MVA_VMS (va); /* try virt lookup */
+vpte_vpn = VA_GETVPN (vptea); /* get vpte vpn */
+vpte_p = dtlb_lookup (vpte_vpn); /* get vpte tlb ptr */
+if (vpte_p && ((vpte_p->pte & (PTE_KRE|PTE_V)) == (PTE_KRE|PTE_V)))
+ l3ptea = PHYS_ADDR (vpte_p->pfn, vptea);
+else {
+ uint32 vpn = VA_GETVPN (va);
+ if (srm_ptbr & 1) return 1; /* uninitialized? */
+ l1ptea = srm_ptbr + VPN_GETLVL1 (vpn);
+ if (!l_ReadPQ (l1ptea, &l1pte)) return 1;
+ if ((l1pte & PTE_V) == 0)
+ return ((l1pte & PTE_KRE)? EXC_TNV: EXC_ACV);
+ l2ptea = (l1pte & PFN_MASK) >> (PTE_V_PFN - VA_N_OFF);
+ l2ptea = l2ptea + VPN_GETLVL2 (vpn);
+ if (!l_ReadPQ (l2ptea, &l2pte)) return 1;
+ if ((l2pte & PTE_V) == 0)
+ return ((l2pte & PTE_KRE)? EXC_TNV: EXC_ACV);
+ l3ptea = (l2pte & PFN_MASK) >> (PTE_V_PFN - VA_N_OFF);
+ l3ptea = l3ptea + VPN_GETLVL3 (vpn);
+ }
+if (!l_ReadPQ (l3ptea, l3pte)) return 1;
+return 0;
+}
+
+/* NT 2-level pte lookup
+
+ Inputs:
+ va = virtual address
+ *pte = pointer to pte to be returned
+ Output:
+ status = 0 for successful fill
+ EXC_ACV for ACV on intermediate level
+ EXC_TNV for TNV on intermediate level
+*/
+
+uint32 cons_find_pte_nt (t_uint64 va, t_uint64 *l3pte)
+{
+t_uint64 vptea, l3ptea;
+uint32 vpte_vpn;
+TLBENT *vpte_p;
+
+vptea = FMT_MVA_NT (va); /* try virt lookup */
+vpte_vpn = VA_GETVPN (vptea); /* get vpte vpn */
+vpte_p = dtlb_lookup (vpte_vpn); /* get vpte tlb ptr */
+if (vpte_p && ((vpte_p->pte & (PTE_KRE|PTE_V)) == (PTE_KRE|PTE_V)))
+ l3ptea = PHYS_ADDR (vpte_p->pfn, vptea);
+else {
+ return 1; /* for now */
+ }
+if (!l_ReadPQ (l3ptea, l3pte)) return 1;
+return 0;
+}
+
+/* Translate address for console access */
+
+t_uint64 trans_c (t_uint64 va)
+{
+uint32 va_sext = VA_GETSEXT (va);
+uint32 vpn = VA_GETVPN (va);
+TLBENT *tlbp;
+t_uint64 pte64;
+uint32 exc, pfn;
+
+if ((va_sext != 0) && (va_sext != VA_M_SEXT)) /* invalid virt addr? */
+ return M64;
+if ((dtlb_spage & SPEN_43) && (VPN_GETSP43 (vpn) == 2))
+ return (va & SP43_MASK); /* 43b superpage? */
+if ((dtlb_spage & SPEN_32) && (VPN_GETSP32 (vpn) == 0x1FFE))
+ return (va & SP32_MASK); /* 32b superpage? */
+if (tlbp = dtlb_lookup (vpn)) /* try TLB */
+ return PHYS_ADDR (tlbp->pfn, va); /* found it */
+if (ev5_mcsr & MCSR_NT) exc = cons_find_pte_nt (va, &pte64);
+else exc = cons_find_pte_srm (va, &pte64);
+if (exc || ((pte64 & PTE_V) == 0)) return M64; /* check valid */
+pfn = (uint32) (pte64 >> 32) & M32;
+return PHYS_ADDR (pfn, va); /* return phys addr */
+}
diff --git a/alpha/alpha_ev5_defs.h b/alpha/alpha_ev5_defs.h
new file mode 100644
index 00000000..cba29614
--- /dev/null
+++ b/alpha/alpha_ev5_defs.h
@@ -0,0 +1,428 @@
+/* alpha_ev5_defs.h: Alpha EV5 chip definitions file
+
+ Copyright (c) 2003-2005, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ Respectfully dedicated to the great people of the Alpha chip, systems, and
+ software development projects; and to the memory of Peter Conklin, of the
+ Alpha Program Office.
+*/
+
+#ifndef _ALPHA_EV5_DEFS_H_
+#define _ALPHA_EV5_DEFS_H_ 0
+
+/* Address limits */
+
+#define VA_SIZE 43 /* VA size */
+#define NTVA_WIDTH 32 /* VA width for NT */
+#define VA_MASK 0x000007FFFFFFFFFF
+#define EV5_PA_SIZE 40 /* PA size */
+#define EV5_PA_MASK 0x000000FFFFFFFFFF
+
+/* Virtual address */
+
+#define VA_N_OFF 13 /* offset size */
+#define VA_PAGSIZE (1u << VA_N_OFF) /* page size */
+#define VA_M_OFF ((1u << VA_N_OFF) - 1) /* offset mask */
+#define VA_N_LVL 10 /* width per level */
+#define VA_M_LVL ((1u << VA_N_LVL) - 1) /* level mask */
+#define VA_V_VPN VA_N_OFF /* vpn start */
+#define VA_N_VPN (VA_N_LVL * 3) /* vpn size */
+#define VA_M_VPN ((1u << VA_N_VPN) - 1) /* vpn mask */
+#define VA_WIDTH (VA_N_VPN + VA_N_OFF) /* total VA size */
+#define VA_V_SEXT (VA_WIDTH - 1) /* sext start */
+#define VA_M_SEXT ((1u << (64 - VA_V_SEXT)) - 1) /* sext mask */
+#define VA_GETOFF(x) (((uint32) (x)) & VA_M_OFF)
+#define VA_GETVPN(x) (((uint32) ((x) >> VA_V_VPN)) & VA_M_VPN)
+#define VA_GETSEXT(x) (((uint32) ((x) >> VA_V_SEXT)) & VA_M_SEXT)
+#define PHYS_ADDR(p,v) ((((t_uint64) (p)) < VA_N_OFF) | VA_GETOFF (v))
+
+/* 43b and 32b superpages - present in all implementations */
+
+#define SPEN_43 0x2
+#define SPEN_32 0x1
+#define SP43_MASK 0x000001FFFFFFFFFF
+#define SP32_MASK 0x000000003FFFFFFF
+#define VPN_GETSP43(x) ((uint32) (((x) >> (VA_WIDTH - VA_N_OFF - 2)) & 3))
+#define VPN_GETSP32(x) ((uint32) (((x) >> (NTVA_WIDTH - VA_N_OFF - 2)) & 0x1FFF))
+
+/* TLBs */
+
+#define INV_TAG M32
+#define ITLB_SIZE 48
+#define DTLB_SIZE 64
+#define ITLB_WIDTH 6
+#define DTLB_WIDTH 6
+
+#define TLB_CI 0x1 /* clear I */
+#define TLB_CD 0x2 /* clear D */
+#define TLB_CA 0x4 /* clear all */
+
+typedef struct {
+ uint32 tag; /* tag */
+ uint8 asn; /* addr space # */
+ uint8 idx; /* entry # */
+ uint16 gh_mask; /* gh mask */
+ uint32 pfn; /* pfn */
+ uint32 pte; /* swre/pte */
+ } TLBENT;
+
+/* Register shadow */
+
+#define PALSHAD_SIZE 8
+#define PAL_USE_SHADOW \
+ ev5_palsave[0] = R[8]; ev5_palsave[1] = R[9]; \
+ ev5_palsave[2] = R[10]; ev5_palsave[3] = R[11]; \
+ ev5_palsave[4] = R[12]; ev5_palsave[5] = R[13]; \
+ ev5_palsave[6] = R[14]; ev5_palsave[7] = R[25]; \
+ R[8] = ev5_palshad[0]; R[9] = ev5_palshad[1]; \
+ R[10] = ev5_palshad[2]; R[11] = ev5_palshad[3]; \
+ R[12] = ev5_palshad[4]; R[13] = ev5_palshad[5]; \
+ R[14] = ev5_palshad[6]; R[25] = ev5_palshad[7]
+#define PAL_USE_MAIN \
+ ev5_palshad[0] = R[8]; ev5_palshad[1] = R[9]; \
+ ev5_palshad[2] = R[10]; ev5_palshad[3] = R[11]; \
+ ev5_palshad[4] = R[12]; ev5_palshad[5] = R[13]; \
+ ev5_palshad[6] = R[14]; ev5_palshad[7] = R[25]; \
+ R[8] = ev5_palsave[0]; R[9] = ev5_palsave[1]; \
+ R[10] = ev5_palsave[2]; R[11] = ev5_palsave[3]; \
+ R[12] = ev5_palsave[4]; R[13] = ev5_palsave[5]; \
+ R[14] = ev5_palsave[6]; R[25] = ev5_palsave[7]
+
+/* PAL instructions */
+
+#define HW_MFPR 0x19
+#define HW_LD 0x1B
+#define HW_MTPR 0x1D
+#define HW_REI 0x1E
+#define HW_ST 0x1F
+
+#define HW_LD_V 0x8000
+#define HW_LD_ALT 0x4000
+#define HW_LD_WCH 0x2000
+#define HW_LD_Q 0x1000
+#define HW_LD_PTE 0x0800
+#define HW_LD_LCK 0x0400
+#define HW_LD_DSP 0x03FF
+#define SIGN_HW_LD_DSP 0x0200
+#define HW_LD_GETDSP(x) ((x) & HW_LD_DSP)
+#define SEXT_HW_LD_DSP(x) (((x) & SIGN_HW_LD_DSP)? \
+ ((x) | ~((t_uint64) HW_LD_DSP)): ((x) & HW_LD_DSP))
+
+#define HW_REI_S 0x4000
+
+/* PAL entry offsets */
+
+#define PALO_RESET 0x0000
+#define PALO_IACV 0x0080
+#define PALO_INTR 0x0100
+#define PALO_ITBM 0x0180
+#define PALO_DTBM 0x0200
+#define PALO_DTBM_D 0x0280
+#define PALO_ALGN 0x0300
+#define PALO_DFLT 0x0380
+#define PALO_MCHK 0x0400
+#define PALO_RSVI 0x0480
+#define PALO_TRAP 0x0500
+#define PALO_FDIS 0x0580
+#define PALO_CALLPR 0x2000
+#define PALO_CALLUNPR 0x3000
+
+/* Special (above 1F) and normal interrupt levels */
+
+#define IPL_HALT 0x40
+#define IPL_SLI 0x20
+#define IPL_1F 0x1F /* highest level */
+#define IPL_CRD 0x1F /* corrected read data */
+#define IPL_PWRFL 0x1E /* power fail */
+#define IPL_AST 0x02 /* AST interrupt level */
+
+/* Internal registers */
+
+#define PALTEMP_SIZE 24
+
+enum ev5_internal_reg {
+ ISR = 0x100, ITB_TAG, ITB_PTE, ITB_ASN,
+ ITB_PTE_TEMP, ITB_IA, ITB_IAP, ITB_IS,
+ SIRR, ASTRR, ASTEN, EXC_ADDR,
+ EXC_SUMM, EXC_MASK, PAL_BASE, ICM,
+ IPLR, INTID, IFAULT_VA_FORM, IVPTBR,
+ HWINT_CLR = 0x115, SL_XMIT, SL_RCV,
+ ICSR, IC_FLUSH_CTL, ICPERR_STAT, PMCTR = 0x11C,
+ PALTEMP = 0x140,
+ DTB_ASN = 0x200, DTB_CM, DTB_TAG, DTB_PTE,
+ DTB_PTE_TEMP, MM_STAT, VA, VA_FORM,
+ MVPTBR, DTB_IAP, DTB_IA, DTB_IS,
+ ALTMODE, CC, CC_CTL, MCSR,
+ DC_FLUSH, DC_PERR_STAT = 0x212, DC_TEST_CTL,
+ DC_TEST_TAG, DC_TEST_TAG_TEMP, DC_MODE, MAF_MODE
+ };
+
+/* Ibox registers */
+/* ISR - instruction summary register - read only */
+
+#define ISR_V_AST 0
+#define ISR_V_SIRR 4
+#define ISR_V_ATR 19
+#define ISR_V_IRQ0 20
+#define ISR_V_IRQ1 21
+#define ISR_V_IRQ2 22
+#define ISR_V_IRQ3 23
+#define ISR_V_PFL 30
+#define ISR_V_MCHK 31
+#define ISR_V_CRD 32
+#define ISR_V_SLI 33
+#define ISR_V_HALT 34
+
+#define ISR_ATR (((t_uint64) 1u) << ISR_V_ATR)
+#define ISR_IRQ0 (((t_uint64) 1u) << ISR_V_IRQ0)
+#define ISR_IRQ1 (((t_uint64) 1u) << ISR_V_IRQ1)
+#define ISR_IRQ2 (((t_uint64) 1u) << ISR_V_IRQ2)
+#define ISR_IRQ3 (((t_uint64) 1u) << ISR_V_IRQ3)
+#define ISR_HALT (((t_uint64) 1u) << ISR_V_HALT)
+
+/* ITB_TAG - ITLB tag - write only - stores VPN (tag) of faulting address */
+
+/* ITB_PTE - ITLB pte - read and write in different formats */
+
+#define ITBR_PTE_V_ASM 13
+#define ITBR_PTE_ASM (1u << ITBR_PTE_V_ASM)
+#define ITBR_PTE_V_KRE 18
+#define ITBR_PTE_GH0 0x00000000
+#define ITBR_PTE_GH1 0x20000000
+#define ITBR_PTE_GH2 0x60000000
+#define ITBR_PTE_GH3 0xE0000000
+
+/* ITB_ASN - ITLB ASN - read write */
+
+#define ITB_ASN_V_ASN 4
+#define ITB_ASN_M_ASN 0x7F
+#define ITB_ASN_WIDTH 7
+
+/* ITB_PTE_TEMP - ITLB PTE readout - read only */
+
+/* ITB_IA, ITB_IAP, ITB_IS - ITLB invalidates - write only */
+
+/* SIRR - software interrupt request register - read/write */
+
+#define SIRR_V_SIRR 4
+#define SIRR_M_SIRR 0x7FFF
+
+/* ASTRR, ASTEN - AST request, enable registers - read/write */
+
+#define AST_MASK 0xF /* AST bits */
+
+/* EXC_ADDR - read/write */
+
+/* EXC_SUMM - read/cleared on write */
+
+/* EXC_MASK - read only */
+
+/* PAL_BASE - read/write */
+
+#define PAL_BASE_RW 0x000000FFFFFFFFC000
+
+/* ICM - ITLB current mode - read/write */
+
+#define ICM_V_CM 3
+#define ICM_M_CM 0x3
+
+/* IPLR - interrupt priority level - read/write */
+
+#define IPLR_V_IPL 0
+#define IPLR_M_IPL 0x1F
+
+/* INTID - interrupt ID - read only */
+
+#define INTID_MASK 0x1F
+
+/* IFAULT_VA_FORM - formated fault VA - read only */
+
+/* IVPTBR - virtual page table base - read/write */
+
+#define IVPTBR_VMS 0xFFFFFFF800000000
+#define IVPTBR_NT 0xFFFFFFFFC0000000
+#define FMT_IVA_VMS(x) (ev5_ivptbr | (((x) >> (VA_N_OFF - 3)) & 0x1FFFFFFF8))
+#define FMT_IVA_NT(x) (ev5_ivptbr | (((x) >> (VA_N_OFF - 3)) & 0x0003FFFF8))
+
+/* HWINT_CLR - hardware interrupt clear - write only */
+
+#define HWINT_CLR_W1C 0x00000003C8000000
+
+/* SL_XMIT - serial line transmit - write only */
+
+/* SL_RCV - real line receive - read only */
+
+/* ICSR - Ibox control/status - read/write */
+
+#define ICSR_V_PME 8
+#define ICSR_M_PME 0x3
+#define ICSR_V_BSE 17
+#define ICSR_V_MSK0 20
+#define ICSR_V_MSK1 21
+#define ICSR_V_MSK2 22
+#define ICSR_V_MSK3 23
+#define ICSR_V_TMM 24
+#define ICSR_V_TMD 25
+#define ICSR_V_FPE 26
+#define ICSR_V_HWE 27
+#define ICSR_V_SPE 28
+#define ICSR_M_SPE 0x3
+#define ICSR_V_SDE 30
+#define ICSR_V_CRDE 32
+#define ICSR_V_SLE 33
+#define ICSR_V_FMS 34
+#define ICSR_V_FBT 35
+#define ICSR_V_FBD 36
+#define ICSR_V_BIST 38
+#define ICSR_V_TEST 39
+
+#define ICSR_NT (((t_uint64) 1u) << ICSR_V_SPE)
+#define ICSR_BSE (((t_uint64) 1u) << ICSR_V_BSE)
+#define ICSR_MSK0 (((t_uint64) 1u) << ICSR_V_MSK0)
+#define ICSR_MSK1 (((t_uint64) 1u) << ICSR_V_MSK1)
+#define ICSR_MSK2 (((t_uint64) 1u) << ICSR_V_MSK2)
+#define ICSR_MSK3 (((t_uint64) 1u) << ICSR_V_MSK3)
+#define ICSR_HWE (((t_uint64) 1u) << ICSR_V_HWE)
+#define ICSR_SDE (((t_uint64) 1u) << ICSR_V_SDE)
+#define ICSR_CRDE (((t_uint64) 1u) << ICSR_V_CRDE)
+#define ICSR_SLE (((t_uint64) 1u) << ICSR_V_SLE)
+
+#define ICSR_RW 0x0000009F4BF00300
+#define ICSR_MBO 0x0000006000000000
+
+/* IC_FLUSH_CTL - Icache flush control - write only */
+
+/* ICPERR_STAT - Icache parity status - read/write 1 to clear */
+
+#define ICPERR_V_DPE 11
+#define ICPERR_V_TPE 12
+#define ICPERR_V_TMO 13
+
+#define ICPERR_DPE (1u << ICPERR_V_DPE)
+#define ICPERR_TPE (1u << ICPERR_V_TPE)
+#define ICPERR_TMO (1u << ICPERR_V_TMO)
+
+#define ICPERR_W1C (ICPERR_DPE|ICPERR_TPE|ICPERR_TMO)
+
+/* Mbox registers */
+/* DTB_ASN - DTLB ASN - write only */
+
+#define DTB_ASN_V_ASN 57
+#define DTB_ASN_M_ASN 0x7F
+#define DTB_ASN_WIDTH 7
+
+/* DTB_CM - DTLB current mode - write only */
+
+#define DCM_V_CM 3
+#define DCM_M_CM 0x3
+
+/* DTB_TAG - DTLB tag and update - write only */
+
+/* DTB_PTE - DTLB PTE - read/write */
+
+/* DTB_PTE_TEMP - DTLB PTE read out register - read only */
+
+/* MM_STAT - data fault status register - read only */
+
+#define MM_STAT_WR 0x00001
+#define MM_STAT_ACV 0x00002
+#define MM_STAT_FOR 0x00004
+#define MM_STAT_FOW 0x00008
+#define MM_STAT_TBM 0x00010
+#define MM_STAT_BVA 0x00020
+#define MM_STAT_V_RA 6
+#define MM_STAT_IMASK 0x1FFC0
+
+/* VA - data fault virtual address - read only */
+
+/* VA_FORM - data fault formated virtual address - read only */
+
+#define FMT_MVA_VMS(x) (ev5_mvptbr | (((x) >> (VA_N_OFF - 3)) & 0x1FFFFFFF8))
+#define FMT_MVA_NT(x) (ev5_mvptbr | (((x) >> (VA_N_OFF - 3)) & 0x0003FFFF8))
+
+/* MVPTBR - DTB virtual page table base - write only */
+
+#define MVPTBR_MBZ ((t_uint64) 0x3FFFFFFF)
+
+/* DTB_IAP, DTB_IA, DTB_IS - DTB invalidates - write only */
+
+/* ALT_MODE - DTLB current mode - write only */
+
+#define ALT_V_CM 3
+#define ALT_M_CM 0x3
+
+/* CC - cycle counter - upper half is RW, lower half is RO */
+
+/* CC_CTL - cycle counter control - write only */
+
+#define CC_CTL_ENB 0x100000000
+#define CC_CTL_MBZ 0xF
+
+/* MCSR - Mbox control/status register - read/write */
+
+#define MCSR_RW 0x11
+#define MCSR_V_SPE 1
+#define MCSR_M_SPE 0x3
+#define MCSR_NT 0x02
+
+/* DC_PERR_STAT - data cache parity error status - read/write */
+
+#define DC_PERR_W1C 0x3
+#define DC_PERR_ERR 0x1C
+
+/* DC_MODE - data cache mode - read/write */
+
+#define DC_MODE_RW 0xF
+
+/* MAF_MODE - miss address file mode - read/write */
+
+#define MAF_MODE_RW 0xFF
+
+/* DC_TEST_CTL - data cache test control - read/write */
+
+#define DC_TEST_CTL_RW 0x1FFFB
+
+/* DC_TEST_TAG - data cache test tag - read/write */
+
+#define DC_TEST_TAG_RW 0x0000007FFFFFFF04
+
+/* Function prototypes (TLB interface) */
+
+void tlb_ia (uint32 flags);
+void tlb_is (t_uint64 va, uint32 flags);
+void itlb_set_asn (uint32 asn);
+void itlb_set_cm (uint32 mode);
+void itlb_set_spage (uint32 spage);
+TLBENT *itlb_lookup (uint32 vpn);
+TLBENT *itlb_load (uint32 vpn, t_uint64 pte);
+t_uint64 itlb_read (void);
+void dtlb_set_asn (uint32 asn);
+void dtlb_set_cm (uint32 mode);
+void dtlb_set_spage (uint32 spage);
+TLBENT *dtlb_lookup (uint32 vpn);
+TLBENT *dtlb_load (uint32 vpn, t_uint64 pte);
+t_uint64 dtlb_read (void);
+
+#endif
+
diff --git a/alpha/alpha_ev5_pal.c b/alpha/alpha_ev5_pal.c
new file mode 100644
index 00000000..4910b0ce
--- /dev/null
+++ b/alpha/alpha_ev5_pal.c
@@ -0,0 +1,961 @@
+/* alpha_ev5_pal.c - Alpha EV5 PAL mode simulator
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ EV5 was the second generation Alpha CPU. It was a four-way, in order issue
+ CPU with onchip primary instruction and data caches, an onchip second level
+ cache, and support for an offchip third level cache. EV56 was a shrink, with
+ added support for byte and word operations. PCA56 was a version of EV56
+ without the onchip second level cache. PCA57 was a shrink of PCA56.
+
+ EV5 includes the usual five PALcode instructions:
+
+ HW_LD PALcode load
+ HW_ST PALcode store
+ HW_MTPR PALcode move to internal processor register
+ HW_MFPR PALcode move from internal processor register
+ HW_REI PALcode return
+
+ PALcode instructions can only be issued in PALmode, or in kernel mode
+ if the appropriate bit is set in ICSR.
+
+ EV5 implements 8 "PAL shadow" registers, which replace R8-R14, R25 in
+ PALmode without save/restore; and 24 "PAL temporary" registers.
+
+ Internal registers fall into three groups: IBox IPRs, MBox IPRs, and
+ PAL temporaries.
+*/
+
+#include "alpha_defs.h"
+#include "alpha_ev5_defs.h"
+
+t_uint64 ev5_palshad[PALSHAD_SIZE] = { 0 }; /* PAL shadow reg */
+t_uint64 ev5_palsave[PALSHAD_SIZE] = { 0 }; /* PAL save main */
+t_uint64 ev5_paltemp[PALTEMP_SIZE] = { 0 }; /* PAL temps */
+t_uint64 ev5_palbase = 0; /* PALcode base */
+t_uint64 ev5_excaddr = 0; /* exception address */
+t_uint64 ev5_isr = 0; /* intr summary */
+t_uint64 ev5_icsr = 0; /* IBox control */
+t_uint64 ev5_itb_pte = 0; /* ITLB pte */
+t_uint64 ev5_itb_pte_temp = 0; /* ITLB readout */
+t_uint64 ev5_ivptbr = 0; /* IBox virt ptbl */
+t_uint64 ev5_iva_form = 0; /* Ibox fmt'd VA */
+t_uint64 ev5_va = 0; /* Mbox VA */
+t_uint64 ev5_mvptbr = 0; /* Mbox virt ptbl */
+t_uint64 ev5_va_form = 0; /* Mbox fmt'd VA */
+t_uint64 ev5_dtb_pte = 0; /* DTLB pte */
+t_uint64 ev5_dtb_pte_temp = 0; /* DTLB readout */
+t_uint64 ev5_dc_test_tag = 0; /* Dcache test tag */
+t_uint64 ev5_dc_test_tag_temp = 0; /* Dcache tag readout */
+uint32 ev5_itb_tag = 0; /* ITLB tag (vpn) */
+uint32 ev5_dtb_tag = 0; /* DTLB tag (vpn) */
+uint32 ev5_icperr = 0; /* Icache par err */
+uint32 ev5_mm_stat = 0; /* MBox fault code */
+uint32 ev5_mcsr = 0; /* MBox control */
+uint32 ev5_alt_mode = 0; /* MBox alt mode */
+uint32 ev5_dc_mode = 0; /* Dcache mode */
+uint32 ev5_dcperr = 0; /* Dcache par err */
+uint32 ev5_dc_test_ctl = 0; /* Dcache test ctrl */
+uint32 ev5_maf_mode = 0; /* MAF mode */
+uint32 ev5_va_lock = 0; /* VA lock flag */
+uint32 ev5_mchk = 0; /* machine check pin */
+uint32 ev5_sli = 0; /* serial line intr */
+uint32 ev5_crd = 0; /* corr read data pin */
+uint32 ev5_pwrfl = 0; /* power fail pin */
+uint32 ev5_ipl = 0; /* ipl */
+uint32 ev5_sirr = 0; /* software int req */
+uint32 ev5_astrr = 0; /* AST requests */
+uint32 ev5_asten = 0; /* AST enables */
+const uint32 ast_map[4] = { 0x1, 0x3, 0x7, 0xF };
+
+t_stat ev5_palent (t_uint64 fpc, uint32 off);
+t_stat ev5_palent_d (t_uint64 fpc, uint32 off, uint32 sta);
+t_stat pal_proc_reset_hwre (DEVICE *dptr);
+t_stat pal_proc_intr_ev5 (uint32 lvl);
+uint32 pal_eval_intr_ev5 (uint32 flag);
+
+extern t_uint64 R[32];
+extern t_uint64 PC;
+extern t_uint64 trap_mask;
+extern t_uint64 p1;
+extern uint32 ir;
+extern uint32 vax_flag, lock_flag;
+extern uint32 fpen;
+extern uint32 pcc_h, pcc_l, pcc_enb;
+extern uint32 trap_summ;
+extern uint32 arch_mask;
+extern uint32 pal_mode, pal_type;
+extern uint32 int_req[IPL_HLVL];
+extern uint32 itlb_cm, dtlb_cm;
+extern uint32 itlb_asn, dtlb_asn;
+extern uint32 itlb_spage, dtlb_spage;
+extern jmp_buf save_env;
+extern uint32 pal_type;
+extern t_uint64 pcq[PCQ_SIZE]; /* PC queue */
+extern int32 pcq_p; /* PC queue ptr */
+
+extern int32 parse_reg (char *cptr);
+
+/* EV5PAL data structures
+
+ ev5pal_dev device descriptor
+ ev5pal_unit unit
+ ev5pal_reg register list
+*/
+
+UNIT ev5pal_unit = { UDATA (NULL, 0, 0) };
+
+REG ev5pal_reg[] = {
+ { BRDATA (PALSHAD, ev5_palshad, 16, 64, PALSHAD_SIZE) },
+ { BRDATA (PALSAVE, ev5_palsave, 16, 64, PALSHAD_SIZE) },
+ { BRDATA (PALTEMP, ev5_paltemp, 16, 64, PALTEMP_SIZE) },
+ { HRDATA (PALBASE, ev5_palbase, 64) },
+ { HRDATA (EXCADDR, ev5_excaddr, 64) },
+ { HRDATA (IPL, ev5_ipl, 5) },
+ { HRDATA (SIRR, ev5_sirr, 15) },
+ { HRDATA (ASTRR, ev5_astrr, 4) },
+ { HRDATA (ASTEN, ev5_asten, 4) },
+ { HRDATA (ISR, ev5_isr, 35) },
+ { HRDATA (ICSR, ev5_icsr, 40) },
+ { HRDATA (ITB_TAG, ev5_itb_tag, 32) },
+ { HRDATA (ITB_PTE, ev5_itb_pte, 64) },
+ { HRDATA (ITB_PTE_TEMP, ev5_itb_pte_temp, 64) },
+ { HRDATA (IVA_FORM, ev5_iva_form, 64) },
+ { HRDATA (IVPTBR, ev5_ivptbr, 64) },
+ { HRDATA (ICPERR_STAT, ev5_icperr, 14) },
+ { HRDATA (VA, ev5_va, 64) },
+ { HRDATA (VA_FORM, ev5_va_form, 64) },
+ { HRDATA (MVPTBR, ev5_mvptbr, 64) },
+ { HRDATA (MM_STAT, ev5_mm_stat, 17) },
+ { HRDATA (MCSR, ev5_mcsr, 6) },
+ { HRDATA (DTB_TAG, ev5_dtb_tag, 32) },
+ { HRDATA (DTB_PTE, ev5_dtb_pte, 64) },
+ { HRDATA (DTB_PTE_TEMP, ev5_dtb_pte_temp, 64) },
+ { HRDATA (DC_MODE, ev5_dc_mode, 4) },
+ { HRDATA (DC_PERR_STAT, ev5_dcperr, 6) },
+ { HRDATA (DC_TEST_CTL, ev5_dc_test_ctl, 13) },
+ { HRDATA (DC_TEST_TAG, ev5_dc_test_tag, 39) },
+ { HRDATA (DC_TEST_TAG_TEMP, ev5_dc_test_tag_temp, 39) },
+ { HRDATA (MAF_MODE, ev5_maf_mode, 8) },
+ { FLDATA (VA_LOCK, ev5_va_lock, 0) },
+ { FLDATA (MCHK, ev5_mchk, 0) },
+ { FLDATA (CRD, ev5_crd, 0) },
+ { FLDATA (PWRFL, ev5_pwrfl, 0) },
+ { FLDATA (SLI, ev5_sli, 0) },
+ { NULL }
+ };
+
+DEVICE ev5pal_dev = {
+ "EV5PAL", &ev5pal_unit, ev5pal_reg, NULL,
+ 1, 16, 1, 1, 16, 8,
+ NULL, NULL, &pal_proc_reset_hwre,
+ NULL, NULL, NULL,
+ NULL, DEV_DIS
+ };
+
+/* EV5 interrupt dispatch - reached from top of instruction loop -
+ dispatch to PALcode */
+
+t_stat pal_proc_intr (uint32 lvl)
+{
+return ev5_palent (PC, PALO_INTR);
+}
+
+/* EV5 trap dispatch - reached from bottom of instruction loop -
+ trap_mask and trap_summ are set up correctly - dispatch to PALcode */
+
+t_stat pal_proc_trap (uint32 summ)
+{
+return ev5_palent (PC, PALO_TRAP);
+}
+
+/* EV5 exception dispatch - reached from ABORT handler -
+ set up any exception-specific registers - dispatch to PALcode */
+
+t_stat pal_proc_excp (uint32 abval)
+{
+switch (abval) {
+
+ case EXC_RSVI: /* reserved instruction */
+ return ev5_palent (PC, PALO_RSVI);
+
+ case EXC_ALIGN: /* unaligned */
+ return ev5_palent (PC, PALO_ALGN);
+
+ case EXC_FPDIS: /* fp disabled */
+ return ev5_palent (PC, PALO_FDIS);
+
+ case EXC_FOX+EXC_R: /* FOR */
+ return ev5_palent_d (PC, PALO_DFLT, MM_STAT_FOR);
+
+ case EXC_FOX+EXC_W: /* FOW */
+ return ev5_palent_d (PC, PALO_DFLT, MM_STAT_FOR|MM_STAT_WR);
+
+ case EXC_BVA+EXC_E: /* instr bad VA */
+ case EXC_ACV+EXC_E: /* instr ACV */
+ ev5_itb_tag = VA_GETVPN (PC); /* fault VPN */
+ if (ev5_icsr & ICSR_NT) /* formatted addr */
+ ev5_iva_form = ev5_ivptbr | FMT_IVA_NT (PC);
+ else ev5_iva_form = ev5_ivptbr | FMT_IVA_VMS (PC);
+ return ev5_palent (PC, PALO_IACV);
+
+ case EXC_ACV+EXC_R: /* data read ACV */
+ return ev5_palent_d (PC, PALO_DFLT, MM_STAT_ACV);
+
+ case EXC_ACV+EXC_W: /* data write ACV */
+ return ev5_palent_d (PC, PALO_DFLT, MM_STAT_ACV|MM_STAT_WR);
+
+ case EXC_BVA+EXC_R: /* data read bad addr */
+ return ev5_palent_d (PC, PALO_DFLT, MM_STAT_BVA);
+
+ case EXC_BVA+EXC_W: /* data write bad addr */
+ return ev5_palent_d (PC, PALO_DFLT, MM_STAT_BVA|MM_STAT_WR);
+
+ case EXC_TBM + EXC_E: /* TLB miss */
+ ev5_itb_tag = VA_GETVPN (PC); /* fault VPN */
+ if (ev5_icsr & ICSR_NT) /* formatted addr */
+ ev5_iva_form = ev5_ivptbr | FMT_IVA_NT (PC);
+ else ev5_iva_form = ev5_ivptbr | FMT_IVA_VMS (PC);
+ return ev5_palent (PC, PALO_ITBM);
+
+ case EXC_TBM + EXC_R: /* data TB miss read */
+ if ((I_GETOP (ir) == HW_LD) && (ir & HW_LD_PTE))
+ return ev5_palent_d (PC, PALO_DTBM_D, MM_STAT_TBM);
+ return ev5_palent_d (PC, PALO_DTBM, MM_STAT_TBM);
+
+ case EXC_TBM + EXC_W: /* data TB miss write */
+ if ((I_GETOP (ir) == HW_LD) && (ir & HW_LD_PTE))
+ return ev5_palent_d (PC, PALO_DTBM_D, MM_STAT_TBM|MM_STAT_WR);
+ return ev5_palent_d (PC, PALO_DTBM, MM_STAT_TBM|MM_STAT_WR);
+
+ case EXC_RSVO: /* reserved operand */
+ case EXC_TNV+EXC_E: /* instr TNV */
+ case EXC_TNV+EXC_R: /* data read TNV */
+ case EXC_TNV+EXC_W: /* data write TNV */
+ case EXC_FOX+EXC_E: /* FOE */
+ return SCPE_IERR; /* should never get here */
+
+ default:
+ return STOP_INVABO;
+ }
+
+return SCPE_OK;
+}
+
+/* EV5 call PAL - reached from instruction decoder -
+ compute offset from function code - dispatch to PALcode */
+
+t_stat pal_proc_inst (uint32 fnc)
+{
+uint32 off = (fnc & 0x3F) << 6;
+
+if (fnc & 0x80) return ev5_palent (PC, PALO_CALLUNPR + off);
+if (itlb_cm != MODE_K) ABORT (EXC_RSVI);
+return ev5_palent (PC, PALO_CALLPR + off);
+}
+
+/* EV5 evaluate interrupts - returns highest outstanding
+ interrupt level about target ipl - plus nonmaskable flags
+
+ flag = 1: evaluate for real interrupt capability
+ flag = 0: evaluate as though IPL = 0, normal mode */
+
+uint32 pal_eval_intr (uint32 flag)
+{
+uint32 i, req = 0;
+uint32 lvl = flag? ev5_ipl: 0;
+
+if (flag && pal_mode) return 0;
+if (ev5_mchk) req = IPL_1F;
+else if (ev5_crd && (ICSR & ICSR_CRDE)) req = IPL_CRD;
+else if (ev5_pwrfl) req = IPL_PWRFL;
+else if (int_req[3] && !(ICSR & ICSR_MSK3)) req = IPL_HMIN + 3;
+else if (int_req[2] && !(ICSR & ICSR_MSK2)) req = IPL_HMIN + 2;
+else if (int_req[1] && !(ICSR & ICSR_MSK1)) req = IPL_HMIN + 1;
+else if (int_req[0] && !(ICSR & ICSR_MSK0)) req = IPL_HMIN + 0;
+else if (ev5_sirr) {
+ for (i = IPL_SMAX; i > 0; i--) { /* check swre int */
+ if ((ev5_sirr >> (i - 1)) & 1) { /* req != 0? int */
+ req = i;
+ break;
+ }
+ }
+ }
+if ((req < IPL_AST) && (ev5_astrr & ev5_asten & ast_map[itlb_cm]))
+ req = IPL_AST;
+if (req <= lvl) req = 0;
+if (ev5_sli && (ICSR & ICSR_SLE)) req = req | IPL_SLI;
+if (ev5_isr & ISR_HALT) req = req | IPL_HALT;
+return req;
+}
+
+/* EV5 enter PAL, data TLB miss/memory management flows -
+ set Mbox registers - dispatch to PALcode */
+
+t_stat ev5_palent_d (t_uint64 fpc, uint32 off, uint32 sta)
+{
+if (!ev5_va_lock) { /* not locked? */
+ ev5_mm_stat = sta | /* merge IR<31:21> */
+ ((ir >> (I_V_RA - MM_STAT_V_RA)) & MM_STAT_IMASK);
+ ev5_va = p1; /* fault address */
+ if (ev5_mcsr & MCSR_NT) /* formatted VA */
+ ev5_va_form = ev5_mvptbr | FMT_MVA_NT (p1);
+ else ev5_va_form = ev5_mvptbr | FMT_MVA_VMS (p1);
+ ev5_va_lock = 1; /* lock registers */
+ }
+return ev5_palent (fpc, off);
+}
+
+/* EV5 enter PAL */
+
+t_stat ev5_palent (t_uint64 fpc, uint32 off)
+{
+ev5_excaddr = fpc | pal_mode; /* save exc addr */
+PCQ_ENTRY; /* save PC */
+PC = ev5_palbase + off; /* new PC */
+if (!pal_mode && (ev5_icsr & ICSR_SDE)) { /* entering PALmode? */
+ PAL_USE_SHADOW; /* swap in shadows */
+ }
+pal_mode = 1; /* in PAL mode */
+return SCPE_OK;
+}
+
+/* PAL instructions */
+
+/* 1B: HW_LD */
+
+t_stat pal_1b (uint32 ir)
+{
+t_uint64 dsp, ea, res;
+uint32 ra, rb, acc, mode;
+
+if (!pal_mode && (!(itlb_cm == MODE_K) || /* pal mode, or kernel */
+ !(ev5_icsr & ICSR_HWE))) ABORT (EXC_RSVI); /* and enabled? */
+ra = I_GETRA (ir); /* get ra */
+rb = I_GETRB (ir); /* get rb */
+dsp = HW_LD_GETDSP (ir); /* get displacement */
+ea = (R[rb] + (SEXT_HW_LD_DSP (dsp))) & M64; /* eff address */
+if (ir & HW_LD_V) { /* virtual? */
+ mode = (ir & HW_LD_ALT)? ev5_alt_mode: dtlb_cm; /* access mode */
+ acc = (ir & HW_LD_WCH)? ACC_W (mode): ACC_R (mode);
+ if (ir & HW_LD_Q) res = ReadAccQ (ea, acc); /* quad? */
+ else { /* long, sext */
+ res = ReadAccL (ea, acc);
+ res = SEXT_L_Q (res);
+ }
+ }
+else if (ir & HW_LD_Q) R[ra] = ReadPQ (ea); /* physical, quad? */
+else {
+ res = ReadPL (ea); /* long, sext */
+ res = SEXT_L_Q (res);
+ }
+if (ir & HW_LD_LCK) lock_flag = 1; /* lock? set flag */
+if (ra != 31) R[ra] = res; /* if not R31, store */
+return SCPE_OK;
+}
+
+/* 1F: HW_ST */
+
+t_stat pal_1f (uint32 ir)
+{
+t_uint64 dsp, ea;
+uint32 ra, rb, acc, mode;
+
+if (!pal_mode && (!(itlb_cm == MODE_K) || /* pal mode, or kernel */
+ !(ev5_icsr & ICSR_HWE))) ABORT (EXC_RSVI); /* and enabled? */
+ra = I_GETRA (ir); /* get ra */
+rb = I_GETRB (ir); /* get rb */
+dsp = HW_LD_GETDSP (ir); /* get displacement */
+ea = (R[rb] + (SEXT_HW_LD_DSP (dsp))) & M64; /* eff address */
+if ((ir & HW_LD_LCK) && !lock_flag) R[ra] = 0; /* lock fail? */
+else {
+ if (ir & HW_LD_V) { /* virtual? */
+ mode = (ir & HW_LD_ALT)? ev5_alt_mode: dtlb_cm; /* access mode */
+ acc = ACC_W (mode);
+ if (ir & HW_LD_Q) WriteAccQ (ea, R[ra], acc); /* quad? */
+ else WriteAccL (ea, R[ra], acc); /* long */
+ }
+ else if (ir & HW_LD_Q) WritePQ (ea, R[ra]); /* physical, quad? */
+ else WritePL (ea, R[ra]); /* long */
+ if (ir & HW_LD_LCK) lock_flag = 0; /* unlock? clr flag */
+ }
+return SCPE_OK;
+}
+
+/* 1E: HW_REI */
+
+t_stat pal_1e (uint32 ir)
+{
+uint32 new_pal = ((uint32) ev5_excaddr) & 1;
+
+if (!pal_mode && (!(itlb_cm == MODE_K) || /* pal mode, or kernel */
+ !(ev5_icsr & ICSR_HWE))) ABORT (EXC_RSVI); /* and enabled? */
+PCQ_ENTRY;
+PC = ev5_excaddr;
+if (pal_mode && !new_pal && (ev5_icsr & ICSR_SDE)) { /* leaving PAL mode? */
+ PAL_USE_MAIN; /* swap out shadows */
+ }
+pal_mode = new_pal;
+return SCPE_OK;
+}
+
+/* PAL move from processor registers */
+
+t_stat pal_19 (uint32 ir)
+{
+t_uint64 res;
+uint32 fnc, ra;
+static const uint32 itbr_map_gh[4] = {
+ ITBR_PTE_GH0, ITBR_PTE_GH1, ITBR_PTE_GH2, ITBR_PTE_GH3 };
+
+if (!pal_mode && (!(itlb_cm == MODE_K) || /* pal mode, or kernel */
+ !(ev5_icsr & ICSR_HWE))) ABORT (EXC_RSVI); /* and enabled? */
+fnc = I_GETMDSP (ir);
+ra = I_GETRA (ir);
+switch (fnc) {
+
+ case ISR: /* intr summary */
+ res = ev5_isr | ((ev5_astrr & ev5_asten) << ISR_V_AST) |
+ ((ev5_sirr & SIRR_M_SIRR) << ISR_V_SIRR) |
+ (int_req[0] && !(ev5_icsr & ICSR_MSK0)? ISR_IRQ0: 0) |
+ (int_req[1] && !(ev5_icsr & ICSR_MSK1)? ISR_IRQ1: 0) |
+ (int_req[2] && !(ev5_icsr & ICSR_MSK2)? ISR_IRQ2: 0) |
+ (int_req[3] && !(ev5_icsr & ICSR_MSK3)? ISR_IRQ3: 0);
+ if (ev5_astrr & ev5_asten & ast_map[itlb_cm]) res = res | ISR_ATR;
+ break;
+
+ case ITB_PTE:
+ res = itlb_read ();
+ ev5_itb_pte_temp = (res & PFN_MASK) |
+ ((res & PTE_ASM)? ITBR_PTE_ASM: 0) |
+ ((res & (PTE_KRE|PTE_ERE|PTE_SRE|PTE_URE)) <<
+ (ITBR_PTE_V_KRE - PTE_V_KRE)) |
+ itbr_map_gh[PTE_GETGH (res)];
+ res = 0;
+ break;
+
+ case ITB_ASN:
+ res = (itlb_asn & ITB_ASN_M_ASN) << ITB_ASN_V_ASN;
+ break;
+
+ case ITB_PTE_TEMP:
+ res = ev5_itb_pte_temp;
+ break;
+
+ case SIRR:
+ res = (ev5_sirr & SIRR_M_SIRR) << SIRR_V_SIRR;
+ break;
+
+ case ASTRR:
+ res = ev5_astrr & AST_MASK;
+ break;
+
+ case ASTEN:
+ res = ev5_asten & AST_MASK;
+ break;
+
+ case EXC_ADDR:
+ res = ev5_excaddr;
+ break;
+
+ case EXC_SUMM:
+ res = trap_summ & TRAP_SUMM_RW;
+ break;
+
+ case EXC_MASK:
+ res = trap_mask;
+ break;
+
+ case PAL_BASE:
+ res = ev5_palbase & PAL_BASE_RW;
+ break;
+
+ case ICM:
+ res = (itlb_cm & ICM_M_CM) << ICM_V_CM;
+ break;
+
+ case IPLR:
+ res = (ev5_ipl & IPLR_M_IPL) << IPLR_V_IPL;
+ break;
+
+ case INTID:
+ res = pal_eval_intr (0) & INTID_MASK;
+ break;
+
+ case IFAULT_VA_FORM:
+ res = ev5_iva_form;
+ break;
+
+ case IVPTBR:
+ res = ev5_ivptbr;
+ break;
+
+ case ICSR:
+ res = (ev5_icsr & ICSR_RW) | ICSR_MBO |
+ ((itlb_spage & ICSR_M_SPE) << ICSR_V_SPE) |
+ ((fpen & 1) << ICSR_V_FPE) |
+ ((arch_mask & AMASK_BWX)? ICSR_BSE: 0);
+ break;
+
+ case PALTEMP+0x00: case PALTEMP+0x01: case PALTEMP+0x02: case PALTEMP+0x03:
+ case PALTEMP+0x04: case PALTEMP+0x05: case PALTEMP+0x06: case PALTEMP+0x07:
+ case PALTEMP+0x08: case PALTEMP+0x09: case PALTEMP+0x0A: case PALTEMP+0x0B:
+ case PALTEMP+0x0C: case PALTEMP+0x0D: case PALTEMP+0x0E: case PALTEMP+0x0F:
+ case PALTEMP+0x10: case PALTEMP+0x11: case PALTEMP+0x12: case PALTEMP+0x13:
+ case PALTEMP+0x14: case PALTEMP+0x15: case PALTEMP+0x16: case PALTEMP+0x17:
+ res = ev5_paltemp[fnc - PALTEMP];
+ break;
+
+ case DTB_PTE:
+ ev5_dtb_pte_temp = dtlb_read ();
+ res = 0;
+ break;
+
+ case DTB_PTE_TEMP:
+ res = ev5_dtb_pte_temp;
+ break;
+
+ case MM_STAT:
+ res = ev5_mm_stat;
+ break;
+
+ case VA:
+ res = ev5_va;
+ ev5_va_lock = 0;
+ break;
+
+ case VA_FORM:
+ res = ev5_va_form;
+ break;
+
+ case DC_PERR_STAT:
+ res = ev5_dcperr;
+ break;
+
+ case MCSR:
+ res = (ev5_mcsr & MCSR_RW) | ((dtlb_spage & MCSR_M_SPE) << MCSR_V_SPE);
+ break;
+
+ case DC_MODE:
+ res = ev5_dc_mode & DC_MODE_RW;
+ break;
+
+ case MAF_MODE:
+ res = ev5_maf_mode & MAF_MODE_RW;
+ break;
+
+ case CC:
+ res = (((t_uint64) pcc_h) << 32) | ((t_uint64) pcc_l);
+ break;
+
+ case DC_TEST_CTL:
+ res = ev5_dc_test_ctl & DC_TEST_CTL_RW;
+ break;
+
+ case DC_TEST_TAG:
+ // to be determined
+ res = 0;
+ break;
+
+ case DC_TEST_TAG_TEMP:
+ res = ev5_dc_test_tag_temp & DC_TEST_TAG_RW;
+ break;
+
+ default:
+ res = 0;
+ break;
+ }
+
+if (ra != 31) R[ra] = res & M64;
+return SCPE_OK;
+}
+
+/* PAL move to processor registers */
+
+t_stat pal_1d (uint32 ir)
+{
+uint32 fnc = I_GETMDSP (ir);
+uint32 ra = I_GETRA (ir);
+t_uint64 val = R[ra];
+
+if (!pal_mode && (!(itlb_cm == MODE_K) || /* pal mode, or kernel */
+ !(ev5_icsr & ICSR_HWE))) ABORT (EXC_RSVI); /* and enabled? */
+switch (fnc) {
+
+ case ITB_TAG:
+ ev5_itb_tag = VA_GETVPN (val);
+ break;
+
+ case ITB_PTE:
+ ev5_itb_pte = (val | PTE_V) & (PFN_MASK | ((t_uint64) (PTE_ASM | PTE_GH |
+ PTE_KRE | PTE_ERE | PTE_SRE | PTE_URE)));
+ itlb_load (ev5_itb_tag, ev5_itb_pte);
+ break;
+
+ case ITB_ASN:
+ itlb_set_asn ((((uint32) val) >> ITB_ASN_V_ASN) & ITB_ASN_M_ASN);
+ break;
+
+ case ITB_IA:
+ tlb_ia (TLB_CI | TLB_CA);
+ break;
+
+ case ITB_IAP:
+ tlb_ia (TLB_CI);
+ break;
+
+ case ITB_IS:
+ tlb_is (val, TLB_CI);
+ break;
+
+ case SIRR:
+ ev5_sirr = (((uint32) val) >> SIRR_V_SIRR) & SIRR_M_SIRR;
+ break;
+
+ case ASTRR:
+ ev5_astrr = ((uint32) val) & AST_MASK;
+ break;
+
+ case ASTEN:
+ ev5_asten = ((uint32) val) & AST_MASK;
+ break;
+
+ case EXC_ADDR:
+ ev5_excaddr = val;
+ break;
+
+ case EXC_SUMM:
+ trap_summ = 0;
+ trap_mask = 0;
+ break;
+
+ case PAL_BASE:
+ ev5_palbase = val & PAL_BASE_RW;
+ break;
+
+ case ICM:
+ itlb_set_cm ((((uint32) val) >> ICM_V_CM) & ICM_M_CM);
+ break;
+
+ case IPLR:
+ ev5_ipl = (((uint32) val) >> IPLR_V_IPL) & IPLR_M_IPL;
+ break;
+
+ case IVPTBR:
+ if (ev5_icsr & ICSR_NT) ev5_ivptbr = val & IVPTBR_NT;
+ else ev5_ivptbr = val & IVPTBR_VMS;
+ break;
+
+ case HWINT_CLR:
+ ev5_isr = ev5_isr & ~(val & HWINT_CLR_W1C);
+ break;
+
+ case ICSR:
+ if (pal_mode && ((val ^ ev5_icsr) & ICSR_SDE)) {
+ if (val & ICSR_SDE) { PAL_USE_SHADOW; }
+ else { PAL_USE_MAIN; }
+ }
+ ev5_icsr = val & ICSR_RW;
+ itlb_set_spage ((((uint32) val) >> ICSR_V_SPE) & ICSR_M_SPE);
+ fpen = (((uint32) val) >> ICSR_V_FPE) & 1;
+ if (val & ICSR_BSE) arch_mask = arch_mask | AMASK_BWX;
+ else arch_mask = arch_mask & ~AMASK_BWX;
+ break;
+
+ case ICPERR_STAT:
+ ev5_icperr = ev5_icperr & ~(((uint32) val) & ICPERR_W1C);
+ break;
+
+ case PALTEMP+0x00: case PALTEMP+0x01: case PALTEMP+0x02: case PALTEMP+0x03:
+ case PALTEMP+0x04: case PALTEMP+0x05: case PALTEMP+0x06: case PALTEMP+0x07:
+ case PALTEMP+0x08: case PALTEMP+0x09: case PALTEMP+0x0A: case PALTEMP+0x0B:
+ case PALTEMP+0x0C: case PALTEMP+0x0D: case PALTEMP+0x0E: case PALTEMP+0x0F:
+ case PALTEMP+0x10: case PALTEMP+0x11: case PALTEMP+0x12: case PALTEMP+0x13:
+ case PALTEMP+0x14: case PALTEMP+0x15: case PALTEMP+0x16: case PALTEMP+0x17:
+ ev5_paltemp[fnc - PALTEMP] = val;
+ break;
+
+ case DTB_ASN:
+ dtlb_set_asn (((uint32) (val >> DTB_ASN_V_ASN)) & DTB_ASN_M_ASN);
+ break;
+
+ case DTB_CM:
+ dtlb_set_cm (((uint32) (val >> ICM_V_CM)) & ICM_M_CM);
+ break;
+
+ case DTB_TAG:
+ ev5_dtb_tag = VA_GETVPN (val);
+ val = (val | PTE_V) & (PFN_MASK | ((t_uint64) (PTE_MASK & ~PTE_FOE)));
+ dtlb_load (ev5_dtb_tag, val);
+ break;
+
+ case DTB_PTE:
+ ev5_dtb_pte = val;
+ break;
+
+ case MVPTBR:
+ ev5_mvptbr = val & ~MVPTBR_MBZ;
+ break;
+
+ case DC_PERR_STAT:
+ ev5_dcperr = ev5_dcperr & ~(((uint32) val) & DC_PERR_W1C);
+ if ((ev5_dcperr & DC_PERR_W1C) == 0) ev5_dcperr = 0;
+ break;
+
+ case DTB_IA:
+ tlb_ia (TLB_CD | TLB_CA);
+ break;
+
+ case DTB_IAP:
+ tlb_ia (TLB_CD);
+ break;
+
+ case DTB_IS:
+ tlb_is (val, TLB_CD);
+ break;
+
+ case MCSR:
+ ev5_mcsr = ((uint32) val) & MCSR_RW;
+ dtlb_set_spage ((((uint32) val) >> MCSR_V_SPE) & MCSR_M_SPE);
+ if (ev5_mcsr & MCSR_NT) pal_type = PAL_NT;
+ break;
+
+ case DC_MODE:
+ ev5_dc_mode = ((uint32) val) & DC_MODE_RW;
+ break;
+
+ case MAF_MODE:
+ ev5_maf_mode = ((uint32) val) & MAF_MODE_RW;
+ break;
+
+ case CC:
+ pcc_h = (uint32) ((val >> 32) & M32);
+ break;
+
+ case CC_CTL:
+ pcc_l = ((uint32) val) & (M32 & ~CC_CTL_MBZ);
+ if (val & CC_CTL_ENB) pcc_enb = 1;
+ else pcc_enb = 0;
+ break;
+
+ case DC_TEST_CTL:
+ ev5_dc_test_ctl = ((uint32) val) & DC_TEST_CTL_RW;
+ break;
+
+ case DC_TEST_TAG:
+ ev5_dc_test_tag = val & DC_TEST_TAG_RW;
+ break;
+
+ default:
+ break;
+ }
+
+return SCPE_OK;
+}
+
+/* EV5 PALcode reset */
+
+t_stat pal_proc_reset_hwre (DEVICE *dptr)
+{
+ev5_palbase = 0;
+ev5_mchk = 0;
+ev5_pwrfl = 0;
+ev5_crd = 0;
+ev5_sli = 0;
+itlb_set_cm (MODE_K);
+itlb_set_asn (0);
+itlb_set_spage (0);
+dtlb_set_cm (MODE_K);
+dtlb_set_asn (0);
+dtlb_set_spage (0);
+return SCPE_OK;
+}
+
+/* EV5 PAL instruction print and parse routines */
+
+static const char *pal_inam[] = {
+ "HW_MFPR", "HW_LD", "HW_MTPR", "HW_REI", "HW_ST", NULL
+ };
+
+static const uint32 pal_ival[] = {
+ 0x64000000, 0x6C000000, 0x74000000, 0x7BFF8000, 0x7C000000
+ };
+
+struct pal_opt {
+ uint32 mask; /* bit mask */
+ char let; /* matching letter */
+ };
+
+static struct pal_opt ld_st_opt[] = {
+ { HW_LD_V, 'V' },
+ { HW_LD_ALT, 'A' },
+ { HW_LD_WCH, 'W' },
+ { HW_LD_Q, 'Q' },
+ { HW_LD_PTE, 'P' },
+ { HW_LD_LCK, 'L' },
+ { 0 }
+ };
+
+static struct pal_opt rei_opt[] = {
+ { HW_REI_S, 'S' },
+ { 0 }
+ };
+
+/* Print options for hardware PAL instruction */
+
+void fprint_opt_ev5 (FILE *of, uint32 inst, struct pal_opt opt[])
+{
+uint32 i;
+
+for (i = 0; opt[i].mask != 0; i++) {
+ if (inst & opt[i].mask) {
+ fprintf (of, "/%c", opt[i].let);
+ inst = inst & ~opt[i].mask;
+ }
+ }
+return;
+}
+
+/* Parse options for hardware PAL instruction */
+
+char *parse_opt_ev5 (char *cptr, uint32 *val, struct pal_opt opt[])
+{
+uint32 i;
+char *tptr, gbuf[CBUFSIZE];
+
+if (*(cptr - 1) != '/') return cptr;
+cptr = get_glyph (cptr - 1, tptr = gbuf, 0);
+while (*tptr == '/') {
+ tptr++;
+ for (i = 0; opt[i].mask != 0; i++) {
+ if (*tptr == opt[i].let) {
+ *val = *val | opt[i].mask;
+ break;
+ }
+ }
+ if (opt[i].mask == 0) return NULL;
+ tptr++;
+ }
+if (*tptr != 0) return NULL;
+return cptr;
+}
+
+/* Print PAL hardware opcode symbolically */
+
+t_stat fprint_pal_hwre (FILE *of, uint32 inst)
+{
+uint32 op, ra, rb;
+
+op = I_GETOP (inst);
+ra = I_GETRA (inst);
+rb = I_GETRB (inst);
+switch (op) {
+
+ case OP_PAL19: /* HW_MFPR */
+ case OP_PAL1D: /* HW_MTPR */
+ fputs ((op == OP_PAL19)? "HW_MFPR": "HW_MTPR", of);
+ fprintf (of, " R%d,%X", ra, inst & M16);
+ break;
+
+ case OP_PAL1B: /* HW_LD */
+ case OP_PAL1F: /* HW_ST */
+ fputs ((op == OP_PAL1B)? "HW_LD": "HW_ST", of);
+ fprint_opt_ev5 (of, inst, ld_st_opt);
+ fprintf (of, " R%d,%X", ra, inst & HW_LD_DSP);
+ if (rb != 31) fprintf (of, "(R%d)", rb);
+ break;
+
+ case OP_PAL1E: /* HW_REI */
+ fputs ("HW_REI", of);
+ fprint_opt_ev5 (of, inst, rei_opt);
+ break;
+
+ default:
+ return SCPE_ARG;
+ }
+
+return -3;
+}
+
+/* Parse PAL hardware opcode symbolically */
+
+t_stat parse_pal_hwre (char *cptr, t_value *inst)
+{
+uint32 i, d, val = 0;
+int32 reg;
+char *tptr, gbuf[CBUFSIZE];
+t_stat r;
+
+cptr = get_glyph (cptr, gbuf, '/');
+for (i = 0; pal_inam[i] != NULL; i++) {
+ if (strcmp (gbuf, pal_inam[i]) == 0) val = pal_ival[i];
+ }
+if (val == 0) return SCPE_ARG;
+switch (I_GETOP (val)) {
+
+ case OP_PAL19: /* HW_MFPR */
+ case OP_PAL1D: /* HW_MTPR */
+ if (*(cptr - 1) == '/') return SCPE_ARG;
+ cptr = get_glyph (cptr, gbuf, ','); /* get reg */
+ if ((reg = parse_reg (gbuf)) < 0) return SCPE_ARG;
+ val = val | (reg << I_V_RA) | (reg << I_V_RB);
+ cptr = get_glyph (cptr, gbuf, 0); /* get ipr */
+ d = (uint32) get_uint (gbuf, 16, M16, &r);
+ if (r != SCPE_OK) return r;
+ val = val | d;
+ break;
+
+ case OP_PAL1B: /* HW_LD */
+ case OP_PAL1F: /* HW_ST */
+ cptr = parse_opt_ev5 (cptr, &val, ld_st_opt);
+ if (cptr == NULL) return SCPE_ARG;
+ cptr = get_glyph (cptr, gbuf, ','); /* get reg */
+ if ((reg = parse_reg (gbuf)) < 0) return SCPE_ARG;
+ val = val | (reg << I_V_RA);
+ cptr = get_glyph (cptr, gbuf, 0);
+ d = (uint32) strtotv (gbuf, &tptr, 16);
+ if ((gbuf == tptr) || (d > HW_LD_DSP)) return SCPE_ARG;
+ val = val | d;
+ if (*tptr == '(') {
+ tptr = get_glyph (tptr + 1, gbuf, ')');
+ if ((reg = parse_reg (gbuf)) < 0) return SCPE_ARG;
+ val = val | (reg << I_V_RB);
+ }
+ else val = val | (31 << I_V_RB);
+ break;
+
+ case OP_PAL1E: /* HW_REI */
+ cptr = parse_opt_ev5 (cptr, &val, rei_opt);
+ if (cptr == NULL) return SCPE_ARG;
+ break;
+
+ default:
+ return SCPE_ARG;
+ }
+
+*inst = val;
+if (*cptr != 0) return SCPE_ARG;
+return -3;
+}
+
diff --git a/alpha/alpha_ev5_tlb.c b/alpha/alpha_ev5_tlb.c
new file mode 100644
index 00000000..9cfded4a
--- /dev/null
+++ b/alpha/alpha_ev5_tlb.c
@@ -0,0 +1,566 @@
+/* alpha_ev5_tlb.c - Alpha EV5 TLB simulator
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ EV5 was the second generation Alpha CPU. It was a four-way, in order issue
+ CPU with onchip primary instruction and data caches, an onchip second level
+ cache, and support for an offchip third level cache. EV56 was a shrink, with
+ added support for byte and word operations. EV56PC was a version of EV56
+ without the onchip second level cache.
+
+ This module contains the routines for
+
+ itlb_lookup lookup vpn in instruction TLB
+ itlb_load load pte into instruction TLB
+ itlb_read read pte from instruction TLB using NLU pointer
+ itlb_set_asn set iasn
+ itlb_set_cm set icm
+ itlb_set_spage set ispage
+ dtlb_lookup lookup vpn in data TLB
+ dtlb_load load pte into data TLB
+ dtlb_read read pte from data TLB using NLU pointer
+ dtlb_set_asn set dasn
+ dtlb_set_cm set dcm
+ dtlb_set_spage set dspage
+ tlb_ia TLB invalidate all
+ tlb_is TLB invalidate single
+ tlb_set_cm TLB set current mode
+*/
+
+#include "alpha_defs.h"
+#include "alpha_ev5_defs.h"
+
+#define ITLB_SORT qsort (itlb, ITLB_SIZE, sizeof (TLBENT), &tlb_comp);
+#define DTLB_SORT qsort (dtlb, DTLB_SIZE, sizeof (TLBENT), &tlb_comp);
+#define TLB_ESIZE (sizeof (TLBENT)/sizeof (uint32))
+#define MM_RW(x) (((x) & PTE_FOW)? EXC_W: EXC_R)
+
+uint32 itlb_cm = 0; /* current modes */
+uint32 itlb_spage = 0; /* superpage enables */
+uint32 itlb_asn = 0;
+uint32 itlb_nlu = 0;
+TLBENT i_mini_tlb;
+TLBENT itlb[ITLB_SIZE];
+uint32 dtlb_cm = 0;
+uint32 dtlb_spage = 0;
+uint32 dtlb_asn = 0;
+uint32 dtlb_nlu = 0;
+TLBENT d_mini_tlb;
+TLBENT dtlb[DTLB_SIZE];
+
+uint32 cm_eacc = ACC_E (MODE_K); /* precomputed */
+uint32 cm_racc = ACC_R (MODE_K); /* access checks */
+uint32 cm_wacc = ACC_W (MODE_K);
+uint32 cm_macc = ACC_M (MODE_K);
+
+extern t_uint64 p1;
+extern jmp_buf save_env;
+
+uint32 mm_exc (uint32 macc);
+void tlb_inval (TLBENT *tlbp);
+t_stat itlb_reset (void);
+t_stat dtlb_reset (void);
+int tlb_comp (const void *e1, const void *e2);
+t_stat tlb_reset (DEVICE *dptr);
+
+/* TLB data structures
+
+ tlb_dev pager device descriptor
+ tlb_unit pager units
+ tlb_reg pager register list
+*/
+
+UNIT tlb_unit = { UDATA (NULL, 0, 0) };
+
+REG tlb_reg[] = {
+ { HRDATA (ICM, itlb_cm, 2) },
+ { HRDATA (ISPAGE, itlb_spage, 2), REG_HRO },
+ { HRDATA (IASN, itlb_asn, ITB_ASN_WIDTH) },
+ { HRDATA (INLU, itlb_nlu, ITLB_WIDTH) },
+ { BRDATA (IMINI, &i_mini_tlb, 16, 32, TLB_ESIZE) },
+ { BRDATA (ITLB, itlb, 16, 32, ITLB_SIZE*TLB_ESIZE) },
+ { HRDATA (DCM, dtlb_cm, 2) },
+ { HRDATA (DSPAGE, dtlb_spage, 2), REG_HRO },
+ { HRDATA (DASN, dtlb_asn, DTB_ASN_WIDTH) },
+ { HRDATA (DNLU, dtlb_nlu, DTLB_WIDTH) },
+ { BRDATA (DMINI, &d_mini_tlb, 16, 32, TLB_ESIZE) },
+ { BRDATA (DTLB, dtlb, 16, 32, DTLB_SIZE*TLB_ESIZE) },
+ { NULL }
+ };
+
+DEVICE tlb_dev = {
+ "TLB", &tlb_unit, tlb_reg, NULL,
+ 1, 0, 0, 1, 0, 0,
+ NULL, NULL, &tlb_reset,
+ NULL, NULL, NULL
+ };
+
+/* Translate address, instruction, data, and console
+
+ Inputs:
+ va = virtual address
+ acc = (VAX only) access mode
+ Outputs:
+ pa = translation buffer index
+*/
+
+t_uint64 trans_i (t_uint64 va)
+{
+uint32 va_sext = VA_GETSEXT (va);
+uint32 vpn = VA_GETVPN (va);
+TLBENT *tlbp;
+
+if ((va_sext != 0) && (va_sext != VA_M_SEXT)) /* invalid virt addr? */
+ ABORT1 (va, EXC_BVA + EXC_E);
+if ((itlb_spage & SPEN_43) && VPN_GETSP43 (vpn) == 2) { /* 43b superpage? */
+ if (itlb_cm != MODE_K) ABORT1 (va, EXC_ACV + EXC_E);
+ return (va & SP43_MASK);
+ }
+if ((itlb_spage & SPEN_32) && (VPN_GETSP32 (vpn) == 0x1FFE)) {
+ if (itlb_cm != MODE_K) ABORT1 (va, EXC_ACV + EXC_E);
+ return (va & SP32_MASK); /* 32b superpage? */
+ }
+if (!(tlbp = itlb_lookup (vpn))) /* lookup vpn; miss? */
+ ABORT1 (va, EXC_TBM + EXC_E); /* abort reference */
+if (cm_eacc & ~tlbp->pte) /* check access */
+ ABORT1 (va, mm_exc (cm_eacc & ~tlbp->pte) | EXC_E);
+return PHYS_ADDR (tlbp->pfn, va); /* return phys addr */
+}
+
+t_uint64 trans_d (t_uint64 va, uint32 acc)
+{
+uint32 va_sext = VA_GETSEXT (va);
+uint32 vpn = VA_GETVPN (va);
+TLBENT *tlbp;
+
+if ((va_sext != 0) && (va_sext != VA_M_SEXT)) /* invalid virt addr? */
+ ABORT1 (va, EXC_BVA + MM_RW (acc));
+if ((dtlb_spage & SPEN_43) && (VPN_GETSP43 (vpn) == 2)) {
+ if (dtlb_cm != MODE_K) ABORT1 (va, EXC_ACV + MM_RW (acc));
+ return (va & SP43_MASK); /* 43b superpage? */
+ }
+if ((dtlb_spage & SPEN_32) && (VPN_GETSP32 (vpn) == 0x1FFE)) {
+ if (dtlb_cm != MODE_K) ABORT1 (va, EXC_ACV + MM_RW (acc));
+ return (va & SP32_MASK); /* 32b superpage? */
+ }
+if (!(tlbp = dtlb_lookup (vpn))) /* lookup vpn; miss? */
+ ABORT1 (va, EXC_TBM + MM_RW (acc)); /* abort reference */
+if (acc & ~tlbp->pte) /* check access */
+ ABORT1 (va, mm_exc (acc & ~tlbp->pte) | MM_RW (acc));
+return PHYS_ADDR (tlbp->pfn, va); /* return phys addr */
+}
+
+/* Generate a memory management error code, based on the access check bits not
+ set in PTE
+
+ - If the access check bits, without FOx and V, fail, then ACV
+ - If FOx set, then FOx
+ - Otherwise, TNV */
+
+uint32 mm_exc (uint32 not_set)
+{
+uint32 tacc;
+
+tacc = not_set & ~(PTE_FOR | PTE_FOW | PTE_FOE | PTE_V);
+if (tacc) return EXC_ACV;
+tacc = not_set & (PTE_FOR | PTE_FOW | PTE_FOE);
+if (tacc) return EXC_FOX;
+return EXC_TNV;
+}
+
+/* TLB invalidate single */
+
+void tlb_is (t_uint64 va, uint32 flags)
+{
+uint32 va_sext = VA_GETSEXT (va);
+uint32 vpn = VA_GETVPN (va);
+TLBENT *itlbp, *dtlbp;
+
+if ((va_sext != 0) && (va_sext != VA_M_SEXT)) return;
+if ((flags & TLB_CI) && (itlbp = itlb_lookup (vpn))) {
+ tlb_inval (itlbp);
+ tlb_inval (&i_mini_tlb);
+ ITLB_SORT;
+ }
+if ((flags & TLB_CD) && (dtlbp = dtlb_lookup (vpn))) {
+ tlb_inval (dtlbp);
+ tlb_inval (&d_mini_tlb);
+ DTLB_SORT;
+ }
+return;
+}
+
+/* TLB invalidate all */
+
+void tlb_ia (uint32 flags)
+{
+uint32 i;
+
+if (flags & TLB_CA) {
+ if (flags & TLB_CI) itlb_reset ();
+ if (flags & TLB_CD) dtlb_reset ();
+ return;
+ }
+if (flags & TLB_CI) {
+ for (i = 0; i < ITLB_SIZE; i++) {
+ if (!(itlb[i].pte & PTE_ASM)) tlb_inval (&itlb[i]);
+ }
+ tlb_inval (&i_mini_tlb);
+ ITLB_SORT;
+ }
+if (flags & TLB_CD) {
+ for (i = 0; i < DTLB_SIZE; i++) {
+ if (!(dtlb[i].pte & PTE_ASM)) tlb_inval (&dtlb[i]);
+ }
+ tlb_inval (&d_mini_tlb);
+ DTLB_SORT;
+ }
+return;
+}
+
+/* TLB lookup */
+
+TLBENT *itlb_lookup (uint32 vpn)
+{
+int32 p, hi, lo;
+
+if (vpn == i_mini_tlb.tag) return &i_mini_tlb;
+lo = 0; /* initial bounds */
+hi = ITLB_SIZE - 1;
+do {
+ p = (lo + hi) >> 1; /* probe */
+ if ((itlb_asn == itlb[p].asn) &&
+ (((vpn ^ itlb[p].tag) &
+ ~((uint32) itlb[p].gh_mask)) == 0)) { /* match to TLB? */
+ i_mini_tlb.tag = vpn;
+ i_mini_tlb.pte = itlb[p].pte;
+ i_mini_tlb.pfn = itlb[p].pfn;
+ itlb_nlu = itlb[p].idx + 1;
+ if (itlb_nlu >= ITLB_SIZE) itlb_nlu = 0;
+ return &i_mini_tlb;
+ }
+ if ((itlb_asn < itlb[p].asn) ||
+ ((itlb_asn == itlb[p].asn) && (vpn < itlb[p].tag)))
+ hi = p - 1; /* go down? p is upper */
+ else lo = p + 1; /* go up? p is lower */
+ }
+while (lo <= hi);
+return NULL;
+}
+
+TLBENT *dtlb_lookup (uint32 vpn)
+{
+int32 p, hi, lo;
+
+if (vpn == d_mini_tlb.tag) return &d_mini_tlb;
+lo = 0; /* initial bounds */
+hi = DTLB_SIZE - 1;
+do {
+ p = (lo + hi) >> 1; /* probe */
+ if ((dtlb_asn == dtlb[p].asn) &&
+ (((vpn ^ dtlb[p].tag) &
+ ~((uint32) dtlb[p].gh_mask)) == 0)) { /* match to TLB? */
+ d_mini_tlb.tag = vpn;
+ d_mini_tlb.pte = dtlb[p].pte;
+ d_mini_tlb.pfn = dtlb[p].pfn;
+ dtlb_nlu = dtlb[p].idx + 1;
+ if (dtlb_nlu >= DTLB_SIZE) dtlb_nlu = 0;
+ return &d_mini_tlb;
+ }
+ if ((dtlb_asn < dtlb[p].asn) ||
+ ((dtlb_asn == dtlb[p].asn) && (vpn < dtlb[p].tag)))
+ hi = p - 1; /* go down? p is upper */
+ else lo = p + 1; /* go up? p is lower */
+ }
+while (lo <= hi);
+return NULL;
+}
+
+/* Load TLB entry at NLU pointer, advance NLU pointer */
+
+TLBENT *itlb_load (uint32 vpn, t_uint64 l3pte)
+{
+uint32 i, gh;
+
+for (i = 0; i < ITLB_SIZE; i++) {
+ if (itlb[i].idx == itlb_nlu) {
+ TLBENT *tlbp = itlb + i;
+ itlb_nlu = itlb_nlu + 1;
+ if (itlb_nlu >= ITLB_SIZE) itlb_nlu = 0;
+ tlbp->tag = vpn;
+ tlbp->pte = (uint32) (l3pte & PTE_MASK) ^ (PTE_FOR|PTE_FOR|PTE_FOE);
+ tlbp->pfn = ((uint32) (l3pte >> PTE_V_PFN)) & PFN_MASK;
+ tlbp->asn = itlb_asn;
+ gh = PTE_GETGH (tlbp->pte);
+ tlbp->gh_mask = (1u << (3 * gh)) - 1;
+ tlb_inval (&i_mini_tlb);
+ ITLB_SORT;
+ return tlbp;
+ }
+ }
+fprintf (stderr, "%%ITLB entry not found, itlb_nlu = %d\n", itlb_nlu);
+ABORT (-SCPE_IERR);
+return NULL;
+}
+
+TLBENT *dtlb_load (uint32 vpn, t_uint64 l3pte)
+{
+uint32 i, gh;
+
+for (i = 0; i < DTLB_SIZE; i++) {
+ if (dtlb[i].idx == dtlb_nlu) {
+ TLBENT *tlbp = dtlb + i;
+ dtlb_nlu = dtlb_nlu + 1;
+ if (dtlb_nlu >= ITLB_SIZE) dtlb_nlu = 0;
+ tlbp->tag = vpn;
+ tlbp->pte = (uint32) (l3pte & PTE_MASK) ^ (PTE_FOR|PTE_FOR|PTE_FOE);
+ tlbp->pfn = ((uint32) (l3pte >> PTE_V_PFN)) & PFN_MASK;
+ tlbp->asn = dtlb_asn;
+ gh = PTE_GETGH (tlbp->pte);
+ tlbp->gh_mask = (1u << (3 * gh)) - 1;
+ tlb_inval (&d_mini_tlb);
+ DTLB_SORT;
+ return tlbp;
+ }
+ }
+fprintf (stderr, "%%DTLB entry not found, dtlb_nlu = %d\n", dtlb_nlu);
+ABORT (-SCPE_IERR);
+return NULL;
+}
+
+/* Read TLB entry at NLU pointer, advance NLU pointer */
+
+t_uint64 itlb_read (void)
+{
+uint8 i;
+
+for (i = 0; i < ITLB_SIZE; i++) {
+ if (itlb[i].idx == itlb_nlu) {
+ TLBENT *tlbp = itlb + i;
+ itlb_nlu = itlb_nlu + 1;
+ if (itlb_nlu >= ITLB_SIZE) itlb_nlu = 0;
+ return (((t_uint64) tlbp->pfn) << PTE_V_PFN) |
+ ((tlbp->pte ^ (PTE_FOR|PTE_FOR|PTE_FOE)) & PTE_MASK);
+ }
+ }
+fprintf (stderr, "%%ITLB entry not found, itlb_nlu = %d\n", itlb_nlu);
+ABORT (-SCPE_IERR);
+return 0;
+}
+
+t_uint64 dtlb_read (void)
+{
+uint8 i;
+
+for (i = 0; i < DTLB_SIZE; i++) {
+ if (dtlb[i].idx == dtlb_nlu) {
+ TLBENT *tlbp = dtlb + i;
+ dtlb_nlu = dtlb_nlu + 1;
+ if (dtlb_nlu >= DTLB_SIZE) dtlb_nlu = 0;
+ return (((t_uint64) tlbp->pfn) << PTE_V_PFN) |
+ ((tlbp->pte ^ (PTE_FOR|PTE_FOR|PTE_FOE)) & PTE_MASK);
+ }
+ }
+fprintf (stderr, "%%DTLB entry not found, dtlb_nlu = %d\n", dtlb_nlu);
+ABORT (-SCPE_IERR);
+return 0;
+}
+
+/* Set ASN - rewrite TLB globals with correct ASN */
+
+void itlb_set_asn (uint32 asn)
+{
+int32 i;
+
+itlb_asn = asn;
+for (i = 0; i < ITLB_SIZE; i++) {
+ if (itlb[i].pte & PTE_ASM) itlb[i].asn = asn;
+ }
+tlb_inval (&i_mini_tlb);
+ITLB_SORT;
+return;
+}
+
+void dtlb_set_asn (uint32 asn)
+{
+int32 i;
+
+dtlb_asn = asn;
+for (i = 0; i < DTLB_SIZE; i++) {
+ if (dtlb[i].pte & PTE_ASM) dtlb[i].asn = asn;
+ }
+tlb_inval (&d_mini_tlb);
+DTLB_SORT;
+return;
+}
+
+/* Set superpage */
+
+void itlb_set_spage (uint32 spage)
+{
+itlb_spage = spage;
+return;
+}
+
+void dtlb_set_spage (uint32 spage)
+{
+dtlb_spage = spage;
+return;
+}
+
+/* Set current mode */
+
+void itlb_set_cm (uint32 mode)
+{
+itlb_cm = mode;
+cm_eacc = ACC_E (mode);
+return;
+}
+
+void dtlb_set_cm (uint32 mode)
+{
+dtlb_cm = mode;
+cm_racc = ACC_R (mode);
+cm_wacc = ACC_W (mode);
+return;
+}
+
+uint32 tlb_set_cm (int32 cm)
+{
+if (cm >= 0) {
+ itlb_set_cm (cm);
+ dtlb_set_cm (cm);
+ return cm;
+ }
+itlb_set_cm (itlb_cm);
+dtlb_set_cm (dtlb_cm);
+return dtlb_cm;
+}
+
+/* Invalidate TLB entry */
+
+void tlb_inval (TLBENT *tlbp)
+{
+tlbp->tag = INV_TAG;
+tlbp->pte = 0;
+tlbp->pfn = 0;
+tlbp->asn = tlbp->idx;
+tlbp->gh_mask = 0;
+return;
+}
+
+/* Compare routine for qsort */
+
+int tlb_comp (const void *e1, const void *e2)
+{
+TLBENT *t1 = (TLBENT *) e1;
+TLBENT *t2 = (TLBENT *) e2;
+
+if (t1->asn > t2->asn) return +1;
+if (t1->asn < t2->asn) return -1;
+if (t1->tag > t2->tag) return +1;
+if (t1->tag < t2->tag) return -1;
+return 0;
+}
+
+/* ITLB reset */
+
+t_stat itlb_reset (void)
+{
+int32 i;
+
+itlb_nlu = 0;
+for (i = 0; i < ITLB_SIZE; i++) {
+ itlb[i].tag = INV_TAG;
+ itlb[i].pte = 0;
+ itlb[i].pfn = 0;
+ itlb[i].asn = i;
+ itlb[i].gh_mask = 0;
+ itlb[i].idx = i;
+ }
+tlb_inval (&i_mini_tlb);
+return SCPE_OK;
+}
+/* DTLB reset */
+
+t_stat dtlb_reset (void)
+{
+int32 i;
+
+dtlb_nlu = 0;
+for (i = 0; i < DTLB_SIZE; i++) {
+ dtlb[i].tag = INV_TAG;
+ dtlb[i].pte = 0;
+ dtlb[i].pfn = 0;
+ dtlb[i].asn = i;
+ dtlb[i].gh_mask = 0;
+ dtlb[i].idx = i;
+ }
+tlb_inval (&d_mini_tlb);
+return SCPE_OK;
+}
+
+/* SimH reset */
+
+t_stat tlb_reset (DEVICE *dptr)
+{
+itlb_reset ();
+dtlb_reset ();
+return SCPE_OK;
+}
+
+/* Show TLB entry or entries */
+
+t_stat cpu_show_tlb (FILE *of, UNIT *uptr, int32 val, void *desc)
+{
+t_addr lo, hi;
+uint32 lnt;
+TLBENT *tlbp;
+DEVICE *dptr;
+char *cptr = (char *) desc;
+
+lnt = (val)? DTLB_SIZE: ITLB_SIZE;
+dptr = find_dev_from_unit (uptr);
+if (dptr == NULL) return SCPE_IERR;
+if (cptr) {
+ cptr = get_range (dptr, cptr, &lo, &hi, 10, lnt, 0);
+ if ((cptr == NULL) || (*cptr != 0)) return SCPE_ARG;
+ }
+else {
+ lo = 0;
+ hi = lnt - 1;
+ }
+tlbp = (val)? dtlb + lo: itlb + lo;
+
+do {
+ fprintf (of, "TLB %02d\tTAG=%02X/%08X, ", (uint32) lo, tlbp->asn, tlbp->tag);
+ fprintf (of, "MASK=%X, INDX=%d, ", tlbp->gh_mask, tlbp->idx);
+ fprintf (of, "PTE=%04X, PFN=%08X\n", tlbp->pte, tlbp->pfn);
+ tlbp++;
+ lo++;
+ } while (lo <= hi);
+
+return SCPE_OK;
+}
+
diff --git a/alpha/alpha_fpi.c b/alpha/alpha_fpi.c
new file mode 100644
index 00000000..62c7b459
--- /dev/null
+++ b/alpha/alpha_fpi.c
@@ -0,0 +1,776 @@
+/* alpha_fpi.c - Alpha IEEE floating point simulator
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ This module contains the instruction simulators for
+
+ - single precision floating point, S
+ - double precision floating point, T
+
+ Portions of this module (specifically, the convert floating to integer
+ routine and the square root routine) are a derivative work from SoftFloat,
+ written by John Hauser. SoftFloat includes the following license terms:
+
+ Written by John R. Hauser. This work was made possible in part by the
+ International Computer Science Institute, located at Suite 600, 1947 Center
+ Street, Berkeley, California 94704. Funding was partially provided by the
+ National Science Foundation under grant MIP-9311980. The original version
+ of this code was written as part of a project to build a fixed-point vector
+ processor in collaboration with the University of California at Berkeley,
+ overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+ is available through the Web page 'http://www.cs.berkeley.edu/~jhauser/
+ arithmetic/SoftFloat.html'.
+
+ THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
+ been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+ RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+ AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+ COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+ EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+ INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+ OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+ Derivative works are acceptable, even for commercial purposes, so long as
+ (1) the source code for the derivative work includes prominent notice that
+ the work is derivative, and (2) the source code includes prominent notice with
+ these four paragraphs for those parts of this code that are retained.
+*/
+
+#include "alpha_defs.h"
+
+#define UFT_ZERO 0 /* unpacked: zero */
+#define UFT_FIN 1 /* finite */
+#define UFT_DENORM 2 /* denormal */
+#define UFT_INF 3 /* infinity */
+#define UFT_NAN 4 /* not a number */
+
+#define Q_FINITE(x) ((x) <= UFT_FIN) /* finite */
+#define Q_SUI(x) (((x) & I_FTRP) == I_FTRP_SVI)
+
+/* Register format constants */
+
+#define QNAN 0x0008000000000000 /* quiet NaN flag */
+#define CQNAN 0xFFF8000000000000 /* canonical quiet NaN */
+#define FPZERO 0x0000000000000000 /* plus zero (fp) */
+#define FMZERO 0x8000000000000000 /* minus zero (fp) */
+#define FPINF 0x7FF0000000000000 /* plus infinity (fp) */
+#define FMINF 0xFFF0000000000000 /* minus infinity (fp) */
+#define FPMAX 0x7FEFFFFFFFFFFFFF /* plus MAX (fp) */
+#define FMMAX 0xFFEFFFFFFFFFFFFF /* minus MAX (fp) */
+#define IPMAX 0x7FFFFFFFFFFFFFFF /* plus MAX (int) */
+#define IMMAX 0x8000000000000000 /* minus MAX (int) */
+
+/* Unpacked rounding constants */
+
+#define UF_SRND 0x0000008000000000 /* S normal round */
+#define UF_SINF 0x000000FFFFFFFFFF /* S infinity round */
+#define UF_TRND 0x0000000000000400 /* T normal round */
+#define UF_TINF 0x00000000000007FF /* T infinity round */
+
+extern t_uint64 FR[32];
+extern uint32 fpcr;
+extern jmp_buf save_env;
+
+t_bool ieee_unpack (t_uint64 op, UFP *r, uint32 ir);
+void ieee_norm (UFP *r);
+t_uint64 ieee_rpack (UFP *r, uint32 ir, uint32 dp);
+void ieee_trap (uint32 trap, uint32 instenb, uint32 fpcrdsb, uint32 ir);
+int32 ieee_fcmp (t_uint64 a, t_uint64 b, uint32 ir, uint32 signal_nan);
+t_uint64 ieee_cvtst (t_uint64 op, uint32 ir);
+t_uint64 ieee_cvtts (t_uint64 op, uint32 ir);
+t_uint64 ieee_cvtif (t_uint64 val, uint32 ir, uint32 dp);
+t_uint64 ieee_cvtfi (t_uint64 op, uint32 ir);
+t_uint64 ieee_fadd (t_uint64 a, t_uint64 b, uint32 ir, uint32 dp, t_bool sub);
+t_uint64 ieee_fmul (t_uint64 a, t_uint64 b, uint32 ir, uint32 dp);
+t_uint64 ieee_fdiv (t_uint64 a, t_uint64 b, uint32 ir, uint32 dp);
+uint32 estimateSqrt32 (uint32 exp, uint32 a);
+t_uint64 estimateDiv128 (t_uint64 hi, t_uint64 lo, t_uint64 dvr);
+
+extern t_uint64 uemul64 (t_uint64 a, t_uint64 b, t_uint64 *hi);
+extern t_uint64 ufdiv64 (t_uint64 dvd, t_uint64 dvr, uint32 prec, uint32 *sticky);
+t_uint64 fsqrt64 (t_uint64 frac, int32 exp);
+
+/* IEEE S load */
+
+t_uint64 op_lds (t_uint64 op)
+{
+uint32 exp = S_GETEXP (op); /* get exponent */
+
+if (exp == S_NAN) exp = FPR_NAN; /* inf or NaN? */
+else if (exp != 0) exp = exp + T_BIAS - S_BIAS; /* zero or denorm? */
+return (((t_uint64) (op & S_SIGN))? FPR_SIGN: 0) | /* reg format */
+ (((t_uint64) exp) << FPR_V_EXP) |
+ (((t_uint64) (op & ~(S_SIGN|S_EXP))) << S_V_FRAC);
+}
+
+/* IEEE S store */
+
+t_uint64 op_sts (t_uint64 op)
+{
+uint32 sign = FPR_GETSIGN (op)? S_SIGN: 0;
+uint32 frac = ((uint32) (op >> S_V_FRAC)) & M32;
+uint32 exp = FPR_GETEXP (op);
+
+if (exp == FPR_NAN) exp = S_NAN; /* inf or NaN? */
+else if (exp != 0) exp = exp + S_BIAS - T_BIAS; /* non-zero? */
+exp = (exp & S_M_EXP) << S_V_EXP;
+return (t_uint64) (sign | exp | (frac & ~(S_SIGN|S_EXP)));
+}
+
+/* IEEE floating operate */
+
+void ieee_fop (uint32 ir)
+{
+UFP a, b;
+uint32 ftpa, ftpb, fnc, ra, rb, rc;
+t_uint64 res;
+
+fnc = I_GETFFNC (ir); /* get function */
+ra = I_GETRA (ir); /* get registers */
+rb = I_GETRB (ir);
+rc = I_GETRC (ir);
+switch (fnc) { /* case on func */
+
+ case 0x00: /* ADDS */
+ res = ieee_fadd (FR[ra], FR[rb], ir, DT_S, 0);
+ break;
+
+ case 0x01: /* SUBS */
+ res = ieee_fadd (FR[ra], FR[rb], ir, DT_S, 1);
+ break;
+
+ case 0x02: /* MULS */
+ res = ieee_fmul (FR[ra], FR[rb], ir, DT_S);
+ break;
+
+ case 0x03: /* DIVS */
+ res = ieee_fdiv (FR[ra], FR[rb], ir, DT_S);
+ break;
+
+ case 0x20: /* ADDT */
+ res = ieee_fadd (FR[ra], FR[rb], ir, DT_T, 0);
+ break;
+
+ case 0x21: /* SUBT */
+ res = ieee_fadd (FR[ra], FR[rb], ir, DT_T, 1);
+ break;
+
+ case 0x22: /* MULT */
+ res = ieee_fmul (FR[ra], FR[rb], ir, DT_T);
+ break;
+
+ case 0x23: /* DIVT */
+ res = ieee_fdiv (FR[ra], FR[rb], ir, DT_T);
+ break;
+
+ case 0x24: /* CMPTUN */
+ ftpa = ieee_unpack (FR[ra], &a, ir); /* unpack */
+ ftpb = ieee_unpack (FR[rb], &b, ir);
+ if ((ftpa == UFT_NAN) || (ftpb == UFT_NAN)) /* if NaN, T */
+ res = FP_TRUE;
+ else res = 0;
+ break;
+
+ case 0x25: /* CMPTEQ */
+ if (ieee_fcmp (FR[ra], FR[rb], ir, 0) == 0) res = FP_TRUE;
+ else res = 0;
+ break;
+
+ case 0x26: /* CMPTLT */
+ if (ieee_fcmp (FR[ra], FR[rb], ir, 1) < 0) res = FP_TRUE;
+ else res = 0;
+ break;
+
+ case 0x27: /* CMPTLE */
+ if (ieee_fcmp (FR[ra], FR[rb], ir, 1) <= 0) res = FP_TRUE;
+ else res = 0;
+ break;
+
+ case 0x2C: /* CVTST, CVTTS */
+ if (ir & 0x2000) res = ieee_cvtst (FR[rb], ir); /* CVTST */
+ else res = ieee_cvtts (FR[rb], ir); /* CVTTS */
+ break;
+
+ case 0x2F: /* CVTTQ */
+ res = ieee_cvtfi (FR[rb], ir);
+ break;
+
+ case 0x3C: /* CVTQS */
+ res = ieee_cvtif (FR[rb], ir, DT_S);
+ break;
+
+ case 0x3E: /* CVTQT */
+ res = ieee_cvtif (FR[rb], ir, DT_T);
+ break;
+
+ default:
+ if ((ir & I_FSRC) == I_FSRC_X) ABORT (EXC_RSVI);
+ res = FR[rc];
+ break;
+ }
+
+if (rc != 31) FR[rc] = res & M64;
+return;
+}
+
+/* IEEE S to T convert - LDS doesn't handle denorms correctly */
+
+t_uint64 ieee_cvtst (t_uint64 op, uint32 ir)
+{
+UFP b;
+uint32 ftpb;
+
+ftpb = ieee_unpack (op, &b, ir); /* unpack; norm dnorm */
+if (ftpb == UFT_DENORM) { /* denormal? */
+ b.exp = b.exp + T_BIAS - S_BIAS; /* change 0 exp to T */
+ return ieee_rpack (&b, ir, DT_T); /* round, pack */
+ }
+else return op; /* identity */
+}
+
+/* IEEE T to S convert */
+
+t_uint64 ieee_cvtts (t_uint64 op, uint32 ir)
+{
+UFP b;
+uint32 ftpb;
+
+ftpb = ieee_unpack (op, &b, ir); /* unpack */
+if (Q_FINITE (ftpb)) return ieee_rpack (&b, ir, DT_S); /* finite? round, pack */
+if (ftpb == UFT_NAN) return (op | QNAN); /* nan? cvt to quiet */
+if (ftpb == UFT_INF) return op; /* inf? unchanged */
+return 0; /* denorm? 0 */
+}
+
+/* IEEE floating compare
+
+ - Take care of NaNs
+ - Force -0 to +0
+ - Then normal compare will work (even on inf and denorms) */
+
+int32 ieee_fcmp (t_uint64 s1, t_uint64 s2, uint32 ir, uint32 trap_nan)
+{
+UFP a, b;
+uint32 ftpa, ftpb;
+
+ftpa = ieee_unpack (s1, &a, ir);
+ftpb = ieee_unpack (s2, &b, ir);
+if ((ftpa == UFT_NAN) || (ftpb == UFT_NAN)) { /* NaN involved? */
+ if (trap_nan) ieee_trap (TRAP_INV, 1, FPCR_INVD, ir);
+ return +1; /* force failure */
+ }
+if (ftpa == UFT_ZERO) a.sign = 0; /* only +0 allowed */
+if (ftpb == UFT_ZERO) b.sign = 0;
+if (a.sign != b.sign) return (a.sign? -1: +1); /* unequal signs? */
+if (a.exp != b.exp) return ((a.sign ^ (a.exp < b.exp))? -1: +1);
+if (a.frac != b.frac) return ((a.sign ^ (a.frac < b.frac))? -1: +1);
+return 0;
+}
+
+/* IEEE integer to floating convert */
+
+t_uint64 ieee_cvtif (t_uint64 val, uint32 ir, uint32 dp)
+{
+UFP a;
+
+if (val == 0) return 0; /* 0? return +0 */
+if (val & FPR_SIGN) { /* < 0? */
+ a.sign = 1; /* set sign */
+ val = NEG_Q (val); /* |val| */
+ }
+else a.sign = 0;
+a.exp = 63 + T_BIAS; /* set exp */
+a.frac = val; /* set frac */
+ieee_norm (&a); /* normalize */
+return ieee_rpack (&a, ir, dp); /* round and pack */
+}
+
+/* IEEE floating to integer convert - rounding code from SoftFloat
+ The Alpha architecture specifies return of the low order bits of
+ the true result, whereas the IEEE standard specifies the return
+ of the maximum plus or minus value */
+
+t_uint64 ieee_cvtfi (t_uint64 op, uint32 ir)
+{
+UFP a;
+t_uint64 sticky;
+uint32 rndm, ftpa, ovf;
+int32 ubexp;
+
+ftpa = ieee_unpack (op, &a, ir); /* unpack */
+if (!Q_FINITE (ftpa)) { /* inf, NaN, dnorm? */
+ ieee_trap (TRAP_INV, 1, FPCR_INVD, ir); /* inv operation */
+ return 0;
+ }
+if (ftpa == UFT_ZERO) return 0; /* zero? */
+ovf = 0; /* assume no ovflo */
+ubexp = a.exp - T_BIAS; /* unbiased exp */
+if (ubexp < 0) { /* < 1? */
+ if (ubexp == -1) sticky = a.frac; /* [.5,1)? */
+ else sticky = 1; /* (0,.5) */
+ a.frac = 0;
+ }
+else if (ubexp < UF_V_NM) { /* in range? */
+ sticky = (a.frac << (64 - (UF_V_NM - ubexp))) & M64;
+ a.frac = a.frac >> (UF_V_NM - ubexp); /* result */
+ }
+else if (ubexp == UF_V_NM) sticky = 0; /* at limit of range? */
+else {
+ if ((ubexp - UF_V_NM) > 63) a.frac = 0; /* out of range */
+ else a.frac = (a.frac << (ubexp - UF_V_NM)) & M64;
+ ovf = 1; /* overflow */
+ sticky = 0; /* no rounding */
+ }
+rndm = I_GETFRND (ir); /* get round mode */
+if (((rndm == I_FRND_N) && (sticky & Q_SIGN)) || /* nearest? */
+ ((rndm == I_FRND_P) && !a.sign && sticky) || /* +inf and +? */
+ ((rndm == I_FRND_M) && a.sign && sticky)) { /* -inf and -? */
+ a.frac = (a.frac + 1) & M64;
+ if (a.frac == 0) ovf = 1; /* overflow? */
+ if ((rndm == I_FRND_N) && (sticky == Q_SIGN)) /* round nearest hack */
+ a.frac = a.frac & ~1;
+ }
+if (a.frac > (a.sign? IMMAX: IPMAX)) ovf = 1; /* overflow? */
+if (ovf) ieee_trap (TRAP_IOV, ir & I_FTRP_V, 0, 0); /* overflow trap */
+if (ovf || sticky) /* ovflo or round? */
+ ieee_trap (TRAP_INE, Q_SUI (ir), FPCR_INED, ir);
+return (a.sign? NEG_Q (a.frac): a.frac);
+}
+
+/* IEEE floating add
+
+ - Take care of NaNs and infinites
+ - Test for zero (fast exit)
+ - Sticky logic for floating add
+ > If result normalized, sticky in right place
+ > If result carries out, renormalize, retain sticky
+ - Sticky logic for floating subtract
+ > If shift < guard, no sticky bits; 64b result is exact
+ If shift <= 1, result may require extensive normalization,
+ but there are no sticky bits to worry about
+ > If shift >= guard, there is a sticky bit,
+ but normalization is at most 1 place, sticky bit is retained
+ for rounding purposes (but not in low order bit) */
+
+t_uint64 ieee_fadd (t_uint64 s1, t_uint64 s2, uint32 ir, uint32 dp, t_bool sub)
+{
+UFP a, b, t;
+uint32 ftpa, ftpb;
+uint32 sticky, rndm;
+int32 ediff;
+
+ftpa = ieee_unpack (s1, &a, ir); /* unpack operands */
+ftpb = ieee_unpack (s2, &b, ir);
+if (ftpb == UFT_NAN) return s2 | QNAN; /* B = NaN? quiet B */
+if (ftpa == UFT_NAN) return s1 | QNAN; /* A = NaN? quiet A */
+if (sub) b.sign = b.sign ^ 1; /* sign of B */
+if (ftpb == UFT_INF) { /* B = inf? */
+ if ((ftpa == UFT_INF) && (a.sign ^ b.sign)) { /* eff sub of inf? */
+ ieee_trap (TRAP_INV, 1, FPCR_INVD, ir); /* inv op trap */
+ return CQNAN; /* canonical NaN */
+ }
+ return (sub? (s2 ^ FPR_SIGN): s2); /* return B */
+ }
+if (ftpa == UFT_INF) return s1; /* A = inf? ret A */
+rndm = I_GETFRND (ir); /* inst round mode */
+if (rndm == I_FRND_D) rndm = FPCR_GETFRND (fpcr); /* dynamic? use FPCR */
+if (ftpa == UFT_ZERO) { /* A = 0? */
+ if (ftpb != UFT_ZERO) a = b; /* B != 0? result is B */
+ else if (a.sign != b.sign) /* both 0, subtract? */
+ a.sign = (rndm == I_FRND_M); /* +0 unless RM */
+ }
+else if (ftpb != UFT_ZERO) { /* s2 != 0? */
+ if ((a.exp < b.exp) || /* s1 < s2? swap */
+ ((a.exp == b.exp) && (a.frac < b.frac))) {
+ t = a;
+ a = b;
+ b = t;
+ }
+ ediff = a.exp - b.exp; /* exp diff */
+ if (ediff > 63) b.frac = 1; /* >63? retain sticky */
+ else if (ediff) { /* [1,63]? shift */
+ sticky = ((b.frac << (64 - ediff)) & M64)? 1: 0; /* lost bits */
+ b.frac = ((b.frac >> ediff) & M64) | sticky;
+ }
+ if (a.sign ^ b.sign) { /* eff sub? */
+ a.frac = (a.frac - b.frac) & M64; /* subtract fractions */
+ if (a.frac == 0) { /* result 0? */
+ a.exp = 0;
+ a.sign = (rndm == I_FRND_M); /* +0 unless RM */
+ }
+ else ieee_norm (&a); /* normalize */
+ }
+ else { /* eff add */
+ a.frac = (a.frac + b.frac) & M64; /* add frac */
+ if (a.frac < b.frac) { /* chk for carry */
+ a.frac = UF_NM | (a.frac >> 1) | /* shift in carry */
+ (a.frac & 1); /* retain sticky */
+ a.exp = a.exp + 1; /* skip norm */
+ }
+ }
+ } /* end else if */
+return ieee_rpack (&a, ir, dp); /* round and pack */
+}
+
+/* IEEE floating multiply
+
+ - Take care of NaNs and infinites
+ - Test for zero operands (fast exit)
+ - 64b x 64b fraction multiply, yielding 128b result
+ - Normalize (at most 1 bit)
+ - Insert "sticky" bit in low order fraction, for rounding
+
+ Because IEEE fractions have a range of [1,2), the result can have a range
+ of [1,4). Results in the range of [1,2) appear to be denormalized by one
+ place, when in fact they are correct. Results in the range of [2,4) appear
+ to be in correct, when in fact they are 2X larger. This problem is taken
+ care of in the result exponent calculation. */
+
+t_uint64 ieee_fmul (t_uint64 s1, t_uint64 s2, uint32 ir, uint32 dp)
+{
+UFP a, b;
+uint32 ftpa, ftpb;
+t_uint64 resl;
+
+ftpa = ieee_unpack (s1, &a, ir); /* unpack operands */
+ftpb = ieee_unpack (s2, &b, ir);
+if (ftpb == UFT_NAN) return s2 | QNAN; /* B = NaN? quiet B */
+if (ftpa == UFT_NAN) return s1 | QNAN; /* A = NaN? quiet A */
+a.sign = a.sign ^ b.sign; /* sign of result */
+if ((ftpa == UFT_ZERO) || (ftpb == UFT_ZERO)) { /* zero operand? */
+ if ((ftpa == UFT_INF) || (ftpb == UFT_INF)) { /* 0 * inf? */
+ ieee_trap (TRAP_INV, 1, FPCR_INVD, ir); /* inv op trap */
+ return CQNAN; /* canonical NaN */
+ }
+ return (a.sign? FMZERO: FPZERO); /* return signed 0 */
+ }
+if (ftpb == UFT_INF) return (a.sign? FMINF: FPINF); /* B = inf? */
+if (ftpa == UFT_INF) return (a.sign? FMINF: FPINF); /* A = inf? */
+a.exp = a.exp + b.exp + 1 - T_BIAS; /* add exponents */
+resl = uemul64 (a.frac, b.frac, &a.frac); /* multiply fracs */
+ieee_norm (&a); /* normalize */
+a.frac = a.frac | (resl? 1: 0); /* sticky bit */
+return ieee_rpack (&a, ir, dp); /* round and pack */
+}
+
+/* Floating divide
+
+ - Take care of NaNs and infinites
+ - Check for zero cases
+ - Divide fractions (55b to develop a rounding bit)
+ - Set sticky bit if remainder non-zero
+
+ Because IEEE fractions have a range of [1,2), the result can have a range
+ of (.5,2). Results in the range of [1,2) are correct. Results in the
+ range of (.5,1) need to be normalized by one place. */
+
+t_uint64 ieee_fdiv (t_uint64 s1, t_uint64 s2, uint32 ir, uint32 dp)
+{
+UFP a, b;
+uint32 ftpa, ftpb, sticky;
+
+ftpa = ieee_unpack (s1, &a, ir);
+ftpb = ieee_unpack (s2, &b, ir);
+if (ftpb == UFT_NAN) return s2 | QNAN; /* B = NaN? quiet B */
+if (ftpa == UFT_NAN) return s1 | QNAN; /* A = NaN? quiet A */
+a.sign = a.sign ^ b.sign; /* sign of result */
+if (ftpb == UFT_INF) { /* B = inf? */
+ if (ftpa == UFT_INF) { /* inf/inf? */
+ ieee_trap (TRAP_INV, 1, FPCR_INVD, ir); /* inv op trap */
+ return CQNAN; /* canonical NaN */
+ }
+ return (a.sign? FMZERO: FPZERO); /* !inf/inf, ret 0 */
+ }
+if (ftpa == UFT_INF) /* A = inf? */
+ return (a.sign? FMINF: FPINF); /* return inf */
+if (ftpb == UFT_ZERO) { /* B = 0? */
+ if (ftpa == UFT_ZERO) { /* 0/0? */
+ ieee_trap (TRAP_INV, 1, FPCR_INVD, ir); /* inv op trap */
+ return CQNAN; /* canonical NaN */
+ }
+ ieee_trap (TRAP_DZE, 1, FPCR_DZED, ir); /* div by 0 trap */
+ return (a.sign? FMINF: FPINF); /* return inf */
+ }
+if (ftpa == UFT_ZERO) return (a.sign? FMZERO: FPZERO); /* A = 0? */
+a.exp = a.exp - b.exp + T_BIAS; /* unbiased exp */
+a.frac = a.frac >> 1; /* allow 1 bit left */
+b.frac = b.frac >> 1;
+a.frac = ufdiv64 (a.frac, b.frac, 55, &sticky); /* divide */
+ieee_norm (&a); /* normalize */
+a.frac = a.frac | sticky; /* insert sticky */
+return ieee_rpack (&a, ir, dp); /* round and pack */
+}
+
+/* IEEE floating square root
+
+ - Take care of NaNs, +infinite, zero
+ - Check for negative operand
+ - Compute result exponent
+ - Compute sqrt of fraction */
+
+t_uint64 ieee_sqrt (uint32 ir, uint32 dp)
+{
+t_uint64 op;
+uint32 ftpb;
+UFP b;
+
+op = FR[I_GETRB (ir)]; /* get F[rb] */
+ftpb = ieee_unpack (op, &b, ir); /* unpack */
+if (ftpb == UFT_NAN) return op | QNAN; /* NaN? */
+if ((ftpb == UFT_ZERO) || /* zero? */
+ ((ftpb == UFT_INF) && !b.sign)) return op; /* +infinity? */
+if (b.sign) { /* minus? */
+ ieee_trap (TRAP_INV, 1, FPCR_INVD, ir); /* signal inv op */
+ return CQNAN;
+ }
+b.exp = ((b.exp - T_BIAS) >> 1) + T_BIAS - 1; /* result exponent */
+b.frac = fsqrt64 (b.frac, b.exp); /* result fraction */
+return ieee_rpack (&b, ir, dp); /* round and pack */
+}
+
+/* Support routines */
+
+t_bool ieee_unpack (t_uint64 op, UFP *r, uint32 ir)
+{
+r->sign = FPR_GETSIGN (op); /* get sign */
+r->exp = FPR_GETEXP (op); /* get exponent */
+r->frac = FPR_GETFRAC (op); /* get fraction */
+if (r->exp == 0) { /* exponent = 0? */
+ if (r->frac == 0) return UFT_ZERO; /* frac = 0? then true 0 */
+ if (fpcr & FPCR_DNZ) { /* denorms to 0? */
+ r->frac = 0; /* clear fraction */
+ return UFT_ZERO;
+ }
+ r->frac = r->frac << FPR_GUARD; /* guard fraction */
+ ieee_norm (r); /* normalize dnorm */
+ ieee_trap (TRAP_INV, 1, FPCR_INVD, ir); /* signal inv op */
+ return UFT_DENORM;
+ }
+if (r->exp == FPR_NAN) { /* exponent = max? */
+ if (r->frac == 0) return UFT_INF; /* frac = 0? then inf */
+ if (!(r->frac & QNAN)) /* signaling NaN? */
+ ieee_trap (TRAP_INV, 1, FPCR_INVD, ir); /* signal inv op */
+ return UFT_NAN;
+ }
+r->frac = (r->frac | FPR_HB) << FPR_GUARD; /* ins hidden bit, guard */
+return UFT_FIN; /* finite */
+}
+
+/* Normalize - input must be zero, finite, or denorm */
+
+void ieee_norm (UFP *r)
+{
+int32 i;
+static t_uint64 normmask[5] = {
+ 0xc000000000000000, 0xf000000000000000, 0xff00000000000000,
+ 0xffff000000000000, 0xffffffff00000000
+ };
+static int32 normtab[6] = { 1, 2, 4, 8, 16, 32 };
+
+r->frac = r->frac & M64;
+if (r->frac == 0) { /* if fraction = 0 */
+ r->sign = 0;
+ r->exp = 0; /* result is 0 */
+ return;
+ }
+while ((r->frac & UF_NM) == 0) { /* normalized? */
+ for (i = 0; i < 5; i++) { /* find first 1 */
+ if (r->frac & normmask[i]) break;
+ }
+ r->frac = r->frac << normtab[i]; /* shift frac */
+ r->exp = r->exp - normtab[i]; /* decr exp */
+ }
+return;
+}
+
+/* Round and pack
+
+ Much of the treachery of the IEEE standard is buried here
+ - Rounding modes (chopped, +infinity, nearest, -infinity)
+ - Inexact (set if there are any rounding bits, regardless of rounding)
+ - Overflow (result is infinite if rounded, max if not)
+ - Underflow (no denorms!)
+
+ Underflow handling is particularly complicated
+ - Result is always 0
+ - UNF and INE are always set in FPCR
+ - If /U is set,
+ o If /S is clear, trap
+ o If /S is set, UNFD is set, but UNFZ is clear, ignore UNFD and
+ trap, because the hardware cannot produce denormals
+ o If /S is set, UNFD is set, and UNFZ is set, do not trap
+ - If /SUI is set, and INED is clear, trap */
+
+t_uint64 ieee_rpack (UFP *r, uint32 ir, uint32 dp)
+{
+static const t_uint64 stdrnd[2] = { UF_SRND, UF_TRND };
+static const t_uint64 infrnd[2] = { UF_SINF, UF_TINF };
+static const int32 expmax[2] = { T_BIAS - S_BIAS + S_M_EXP - 1, T_M_EXP - 1 };
+static const int32 expmin[2] = { T_BIAS - S_BIAS, 0 };
+t_uint64 rndadd, rndbits, res;
+uint32 rndm;
+
+if (r->frac == 0) /* result 0? */
+ return ((t_uint64) r->sign << FPR_V_SIGN);
+rndm = I_GETFRND (ir); /* inst round mode */
+if (rndm == I_FRND_D) rndm = FPCR_GETFRND (fpcr); /* dynamic? use FPCR */
+rndbits = r->frac & infrnd[dp]; /* isolate round bits */
+if (rndm == I_FRND_N) rndadd = stdrnd[dp]; /* round to nearest? */
+else if (((rndm == I_FRND_P) && !r->sign) || /* round to inf and */
+ ((rndm == I_FRND_M) && r->sign)) /* right sign? */
+ rndadd = infrnd[dp];
+else rndadd = 0;
+r->frac = (r->frac + rndadd) & M64; /* round */
+if ((r->frac & UF_NM) == 0) { /* carry out? */
+ r->frac = (r->frac >> 1) | UF_NM; /* renormalize */
+ r->exp = r->exp + 1;
+ }
+if (rndbits) /* inexact? */
+ ieee_trap (TRAP_INE, Q_SUI (ir), FPCR_INED, ir); /* set inexact */
+if (r->exp > expmax[dp]) { /* ovflo? */
+ ieee_trap (TRAP_OVF, 1, FPCR_OVFD, ir); /* set overflow trap */
+ ieee_trap (TRAP_INE, Q_SUI (ir), FPCR_INED, ir); /* set inexact */
+ if (rndadd) /* did we round? */
+ return (r->sign? FMINF: FPINF); /* return infinity */
+ return (r->sign? FMMAX: FPMAX); /* no, return max */
+ }
+if (r->exp <= expmin[dp]) { /* underflow? */
+ ieee_trap (TRAP_UNF, ir & I_FTRP_U, /* set underflow trap */
+ (fpcr & FPCR_UNDZ)? FPCR_UNFD: 0, ir); /* (dsbl only if UNFZ set) */
+ ieee_trap (TRAP_INE, Q_SUI (ir), FPCR_INED, ir); /* set inexact */
+ return 0; /* underflow to +0 */
+ }
+res = (((t_uint64) r->sign) << FPR_V_SIGN) | /* form result */
+ (((t_uint64) r->exp) << FPR_V_EXP) |
+ ((r->frac >> FPR_GUARD) & FPR_FRAC);
+if ((rndm == I_FRND_N) && (rndbits == stdrnd[dp])) /* nearest and halfway? */
+ res = res & ~1; /* clear lo bit */
+return res;
+}
+
+/* IEEE arithmetic trap - only one can be set at a time! */
+
+void ieee_trap (uint32 trap, uint32 instenb, uint32 fpcrdsb, uint32 ir)
+{
+fpcr = fpcr | (trap << 19); /* FPCR to trap summ offset */
+if ((instenb == 0) || /* not enabled in inst? ignore */
+ ((ir & I_FTRP_S) && (fpcr & fpcrdsb))) return; /* /S and disabled? ignore */
+arith_trap (trap, ir); /* set Alpha trap */
+return;
+}
+
+/* Fraction square root routine - code from SoftFloat */
+
+t_uint64 fsqrt64 (t_uint64 asig, int32 exp)
+{
+t_uint64 zsig, remh, reml, t;
+uint32 sticky = 0;
+
+zsig = estimateSqrt32 (exp, (uint32) (asig >> 32));
+
+/* Calculate the final answer in two steps. First, do one iteration of
+ Newton's approximation. The divide-by-2 is accomplished by clever
+ positioning of the operands. Then, check the bits just below the
+ (double precision) rounding bit to see if they are close to zero
+ (that is, the rounding bits are close to midpoint). If so, make
+ sure that the result^2 is the input operand */
+
+asig = asig >> ((exp & 1)? 3: 2); /* leave 2b guard */
+zsig = estimateDiv128 (asig, 0, zsig << 32) + (zsig << 30 );
+if ((zsig & 0x1FF) <= 5) { /* close to even? */
+ reml = uemul64 (zsig, zsig, &remh); /* result^2 */
+ remh = asig - remh - (reml? 1:0); /* arg - result^2 */
+ reml = NEG_Q (reml);
+ while (Q_GETSIGN (remh) != 0) { /* if arg < result^2 */
+ zsig = zsig - 1; /* decr result */
+ t = (zsig << 1) | 1; /* incr result^2 */
+ reml = reml + t; /* and retest */
+ remh = remh + (zsig >> 63) + ((reml < t)? 1: 0);
+ }
+ if ((remh | reml) != 0 ) sticky = 1; /* not exact? */
+ }
+return zsig;
+}
+
+/* Estimate 32b SQRT
+
+ Calculate an approximation to the square root of the 32-bit significand given
+ by 'a'. Considered as an integer, 'a' must be at least 2^31. If bit 0 of
+ 'exp' (the least significant bit) is 1, the integer returned approximates
+ 2^31*sqrt('a'/2^31), where 'a' is considered an integer. If bit 0 of 'exp'
+ is 0, the integer returned approximates 2^31*sqrt('a'/2^30). In either
+ case, the approximation returned lies strictly within +/-2 of the exact
+ value. */
+
+uint32 estimateSqrt32 (uint32 exp, uint32 a)
+{
+uint32 index, z;
+static const uint32 sqrtOdd[] = {
+ 0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0,
+ 0x039C, 0x0468, 0x0545, 0x0631, 0x072B, 0x0832, 0x0946, 0x0A67
+ };
+static const uint32 sqrtEven[] = {
+ 0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E,
+ 0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002
+ };
+
+index = (a >> 27) & 0xF; /* bits<30:27> */
+if (exp & 1) { /* odd exp? */
+ z = 0x4000 + (a >> 17) - sqrtOdd[index]; /* initial guess */
+ z = ((a / z) << 14) + (z << 15); /* Newton iteration */
+ a = a >> 1;
+ }
+else {
+ z = 0x8000 + (a >> 17) - sqrtEven[index]; /* initial guess */
+ z = (a / z) + z; /* Newton iteration */
+ z = (z >= 0x20000) ? 0xFFFF8000: (z << 15);
+ if (z <= a) z = (a >> 1) | 0x80000000;
+ }
+return (uint32) ((((((t_uint64) a) << 31) / ((t_uint64) z)) + (z >> 1)) & M32);
+}
+
+/* Estimate 128b unsigned divide */
+
+t_uint64 estimateDiv128 (t_uint64 a0, t_uint64 a1, t_uint64 b)
+{
+t_uint64 b0, b1;
+t_uint64 rem0, rem1, term0, term1;
+t_uint64 z;
+
+if (b <= a0) return 0xFFFFFFFFFFFFFFFF;
+b0 = b >> 32;
+z = ((b0 << 32) <= a0)? 0xFFFFFFFF00000000: ((a0 / b0) << 32);
+term1 = uemul64 (b, z, &term0);
+rem0 = a0 - term0 - (a1 < term1);
+rem1 = a1 - term1;
+while (Q_GETSIGN (rem0)) {
+ z = z - ((t_uint64) 0x100000000);
+ b1 = b << 32;
+ rem1 = b1 + rem1;
+ rem0 = b0 + rem0 + (rem1 < b1);
+ }
+rem0 = (rem0 << 32) | (rem1 >> 32);
+z |= (((b0 << 32) <= rem0)? 0xFFFFFFFF : (rem0 / b0));
+return z;
+}
diff --git a/alpha/alpha_fpv.c b/alpha/alpha_fpv.c
new file mode 100644
index 00000000..67fca723
--- /dev/null
+++ b/alpha/alpha_fpv.c
@@ -0,0 +1,457 @@
+/* alpha_fpv.c - Alpha VAX floating point simulator
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ This module contains the instruction simulators for
+
+ - single precision floating point, F
+ - double precision floating point, G
+*/
+
+#include "alpha_defs.h"
+
+#define IPMAX 0x7FFFFFFFFFFFFFFF /* plus MAX (int) */
+#define IMMAX 0x8000000000000000 /* minus MAX (int) */
+
+/* Unpacked rounding constants */
+
+#define UF_FRND 0x0000008000000000 /* F round */
+#define UF_DRND 0x0000000000000080 /* D round */
+#define UF_GRND 0x0000000000000400 /* G round */
+
+extern t_uint64 FR[32];
+extern jmp_buf save_env;
+
+t_bool vax_unpack (t_uint64 op, UFP *a, uint32 ir);
+t_bool vax_unpack_d (t_uint64 op, UFP *a, uint32 ir);
+void vax_norm (UFP *a);
+t_uint64 vax_rpack (UFP *a, uint32 ir, uint32 dp);
+t_uint64 vax_rpack_d (UFP *a, uint32 ir);
+int32 vax_fcmp (t_uint64 a, t_uint64 b, uint32 ir);
+t_uint64 vax_cvtif (t_uint64 val, uint32 ir, uint32 dp);
+t_uint64 vax_cvtfi (t_uint64 op, uint32 ir);
+t_uint64 vax_fadd (t_uint64 a, t_uint64 b, uint32 ir, uint32 dp, t_bool sub);
+t_uint64 vax_fmul (t_uint64 a, t_uint64 b, uint32 ir, uint32 dp);
+t_uint64 vax_fdiv (t_uint64 a, t_uint64 b, uint32 ir, uint32 dp);
+
+extern t_uint64 uemul64 (t_uint64 a, t_uint64 b, t_uint64 *hi);
+extern t_uint64 ufdiv64 (t_uint64 dvd, t_uint64 dvr, uint32 prec, uint32 *sticky);
+extern t_uint64 fsqrt64 (t_uint64 frac, int32 exp);
+
+/* VAX floating point loads and stores */
+
+t_uint64 op_ldf (t_uint64 op)
+{
+uint32 exp = F_GETEXP (op);
+
+if (exp != 0) exp = exp + G_BIAS - F_BIAS; /* zero? */
+return (((t_uint64) (op & F_SIGN))? FPR_SIGN: 0) | /* finite non-zero */
+ (((t_uint64) exp) << FPR_V_EXP) |
+ (((t_uint64) SWAP_VAXF (op & ~(F_SIGN|F_EXP))) << F_V_FRAC);
+}
+
+t_uint64 op_ldg (t_uint64 op)
+{
+return SWAP_VAXG (op); /* swizzle bits */
+}
+
+t_uint64 op_stf (t_uint64 op)
+{
+uint32 sign = FPR_GETSIGN (op)? F_SIGN: 0;
+uint32 frac = (uint32) (op >> F_V_FRAC);
+uint32 exp = FPR_GETEXP (op);
+
+if (exp != 0) exp = exp + F_BIAS - G_BIAS; /* zero? */
+exp = (exp & F_M_EXP) << F_V_EXP;
+return (t_uint64) (sign | exp | (SWAP_VAXF (frac) & ~(F_SIGN|F_EXP)));
+}
+
+t_uint64 op_stg (t_uint64 op)
+{
+return SWAP_VAXG (op); /* swizzle bits */
+}
+
+/* VAX floating point operate */
+
+void vax_fop (uint32 ir)
+{
+UFP b;
+t_uint64 res;
+uint32 fnc, ra, rb, rc;
+
+fnc = I_GETFFNC (ir); /* get function */
+ra = I_GETRA (ir); /* get registers */
+rb = I_GETRB (ir);
+rc = I_GETRC (ir);
+switch (fnc) { /* case on func */
+
+ case 0x00: /* ADDF */
+ res = vax_fadd (FR[ra], FR[rb], ir, DT_F, 0);
+ break;
+
+ case 0x01: /* SUBF */
+ res = vax_fadd (FR[ra], FR[rb], ir, DT_F, 1);
+ break;
+
+ case 0x02: /* MULF */
+ res = vax_fmul (FR[ra], FR[rb], ir, DT_F);
+ break;
+
+ case 0x03: /* DIVF */
+ res = vax_fdiv (FR[ra], FR[rb], ir, DT_F);
+ break;
+
+ case 0x20: /* ADDG */
+ res = vax_fadd (FR[ra], FR[rb], ir, DT_G, 0);
+ break;
+
+ case 0x21: /* SUBG */
+ res = vax_fadd (FR[ra], FR[rb], ir, DT_G, 1);
+ break;
+
+ case 0x22: /* MULG */
+ res = vax_fmul (FR[ra], FR[rb], ir, DT_G);
+ break;
+
+ case 0x23: /* DIVG */
+ res = vax_fdiv (FR[ra], FR[rb], ir, DT_G);
+ break;
+
+ case 0x25: /* CMPGEQ */
+ if (vax_fcmp (FR[ra], FR[rb], ir) == 0) res = FP_TRUE;
+ else res = 0;
+ break;
+
+ case 0x26: /* CMPGLT */
+ if (vax_fcmp (FR[ra], FR[rb], ir) < 0) res = FP_TRUE;
+ else res = 0;
+ break;
+
+ case 0x27: /* CMPGLE */
+ if (vax_fcmp (FR[ra], FR[rb], ir) <= 0) res = FP_TRUE;
+ else res = 0;
+ break;
+
+ case 0x1E: /* CVTDG */
+ if (vax_unpack_d (FR[rb], &b, ir)) res = 0;
+ else res = vax_rpack (&b, ir, DT_G);
+ break;
+
+ case 0x2C: /* CVTGF */
+ if (vax_unpack (FR[rb], &b, ir)) res = 0;
+ else res = vax_rpack (&b, ir, DT_F);
+ break;
+
+ case 0x2D: /* CVTGD */
+ if (vax_unpack (FR[rb], &b, ir)) res = 0;
+ else res = vax_rpack_d (&b, ir);
+ break;
+
+ case 0x2F: /* CVTGQ */
+ res = vax_cvtfi (FR[rb], ir);
+ break;
+
+ case 0x3C: /* CVTQF */
+ res = vax_cvtif (FR[rb], ir, DT_F);
+ break;
+
+ case 0x3E: /* CVTQG */
+ res = vax_cvtif (FR[rb], ir, DT_G);
+ break;
+
+ default:
+ res = FR[rc];
+ break;
+ }
+
+if (rc != 31) FR[rc] = res & M64;
+return;
+}
+
+/* VAX floating compare */
+
+int32 vax_fcmp (t_uint64 s1, t_uint64 s2, uint32 ir)
+{
+UFP a, b;
+
+if (vax_unpack (s1, &a, ir)) return +1; /* unpack, rsv? */
+if (vax_unpack (s2, &b, ir)) return +1; /* unpack, rsv? */
+if (s1 == s2) return 0; /* equal? */
+if (a.sign != b.sign) return (a.sign? -1: +1); /* opp signs? */
+return (((s1 < s2) ^ a.sign)? -1: +1); /* like signs */
+}
+
+/* VAX integer to floating convert */
+
+t_uint64 vax_cvtif (t_uint64 val, uint32 ir, uint32 dp)
+{
+UFP a;
+
+if (val == 0) return 0; /* 0? return +0 */
+if (val < 0) { /* < 0? */
+ a.sign = 1; /* set sign */
+ val = NEG_Q (val); /* |val| */
+ }
+else a.sign = 0;
+a.exp = 64 + G_BIAS; /* set exp */
+a.frac = val; /* set frac */
+vax_norm (&a); /* normalize */
+return vax_rpack (&a, ir, dp); /* round and pack */
+}
+
+/* VAX floating to integer convert - note that rounding cannot cause a
+ carry unless the fraction has been shifted right at least FP_GUARD
+ places; in which case a carry out is impossible */
+
+t_uint64 vax_cvtfi (t_uint64 op, uint32 ir)
+{
+UFP a;
+uint32 rndm = I_GETFRND (ir);
+int32 ubexp;
+
+if (vax_unpack (op, &a, ir)) return 0; /* unpack, rsv? */
+ubexp = a.exp - G_BIAS; /* unbiased exp */
+if (ubexp < 0) return 0; /* zero or too small? */
+if (ubexp <= UF_V_NM) { /* in range? */
+ a.frac = a.frac >> (UF_V_NM - ubexp); /* leave rnd bit */
+ if (rndm) a.frac = a.frac + 1; /* not chopped, round */
+ a.frac = a.frac >> 1; /* now justified */
+ if ((a.frac > (a.sign? IMMAX: IPMAX)) && /* out of range? */
+ (ir & I_FTRP_V)) /* trap enabled? */
+ arith_trap (TRAP_IOV, ir); /* set overflow */
+ }
+else {
+ if (ubexp > (UF_V_NM + 64)) a.frac = 0; /* out of range */
+ else a.frac = (a.frac << (ubexp - UF_V_NM - 1)) & M64; /* no rnd bit */
+ if (ir & I_FTRP_V) /* trap enabled? */
+ arith_trap (TRAP_IOV, ir); /* set overflow */
+ }
+return (a.sign? NEG_Q (a.frac): a.frac);
+}
+
+/* VAX floating add */
+
+t_uint64 vax_fadd (t_uint64 s1, t_uint64 s2, uint32 ir, uint32 dp, t_bool sub)
+{
+UFP a, b, t;
+uint32 sticky;
+int32 ediff;
+
+if (vax_unpack (s1, &a, ir)) return 0; /* unpack, rsv? */
+if (vax_unpack (s2, &b, ir)) return 0; /* unpack, rsv? */
+if (sub) b.sign = b.sign ^ 1; /* sub? invert b sign */
+if (a.exp == 0) a = b; /* s1 = 0? */
+else if (b.exp) { /* s2 != 0? */
+ if ((a.exp < b.exp) || /* |s1| < |s2|? swap */
+ ((a.exp == b.exp) && (a.frac < b.frac))) {
+ t = a;
+ a = b;
+ b = t;
+ }
+ ediff = a.exp - b.exp; /* exp diff */
+ if (a.sign ^ b.sign) { /* eff sub? */
+ if (ediff > 63) b.frac = 1; /* >63? retain sticky */
+ else if (ediff) { /* [1,63]? shift */
+ sticky = ((b.frac << (64 - ediff)) & M64)? 1: 0; /* lost bits */
+ b.frac = (b.frac >> ediff) | sticky;
+ }
+ a.frac = (a.frac - b.frac) & M64; /* subtract fractions */
+ vax_norm (&a); /* normalize */
+ }
+ else { /* eff add */
+ if (ediff > 63) b.frac = 0; /* >63? b disappears */
+ else if (ediff) b.frac = b.frac >> ediff; /* denormalize */
+ a.frac = (a.frac + b.frac) & M64; /* add frac */
+ if (a.frac < b.frac) { /* chk for carry */
+ a.frac = UF_NM | (a.frac >> 1); /* shift in carry */
+ a.exp = a.exp + 1; /* skip norm */
+ }
+ }
+ } /* end else if */
+return vax_rpack (&a, ir, dp); /* round and pack */
+}
+
+/* VAX floating multiply */
+
+t_uint64 vax_fmul (t_uint64 s1, t_uint64 s2, uint32 ir, uint32 dp)
+{
+UFP a, b;
+
+if (vax_unpack (s1, &a, ir)) return 0; /* unpack, rsv? */
+if (vax_unpack (s2, &b, ir)) return 0; /* unpack, rsv? */
+if ((a.exp == 0) || (b.exp == 0)) return 0; /* zero argument? */
+a.sign = a.sign ^ b.sign; /* sign of result */
+a.exp = a.exp + b.exp - G_BIAS; /* add exponents */
+uemul64 (a.frac, b.frac, &a.frac); /* mpy fractions */
+vax_norm (&a); /* normalize */
+return vax_rpack (&a, ir, dp); /* round and pack */
+}
+
+/* VAX floating divide
+ Needs to develop at least one rounding bit. Since the first
+ divide step can fail, develop 2 more bits than the precision of
+ the fraction. */
+
+t_uint64 vax_fdiv (t_uint64 s1, t_uint64 s2, uint32 ir, uint32 dp)
+{
+UFP a, b;
+
+if (vax_unpack (s1, &a, ir)) return 0; /* unpack, rsv? */
+if (vax_unpack (s2, &b, ir)) return 0; /* unpack, rsv? */
+if (b.exp == 0) { /* divr = 0? */
+ arith_trap (TRAP_DZE, ir); /* dze trap */
+ return 0;
+ }
+if (a.exp == 0) return 0; /* divd = 0? */
+a.sign = a.sign ^ b.sign; /* result sign */
+a.exp = a.exp - b.exp + G_BIAS + 1; /* unbiased exp */
+a.frac = a.frac >> 1; /* allow 1 bit left */
+b.frac = b.frac >> 1;
+a.frac = ufdiv64 (a.frac, b.frac, 55, NULL); /* divide */
+vax_norm (&a); /* normalize */
+return vax_rpack (&a, ir, dp); /* round and pack */
+}
+
+/* VAX floating square root */
+
+t_uint64 vax_sqrt (uint32 ir, uint32 dp)
+{
+t_uint64 op;
+UFP b;
+
+op = FR[I_GETRB (ir)]; /* get F[rb] */
+if (vax_unpack (op, &b, ir)) return 0; /* unpack, rsv? */
+if (b.exp == 0) return 0; /* zero? */
+if (b.sign) { /* minus? */
+ arith_trap (TRAP_INV, ir); /* invalid operand */
+ return 0;
+ }
+b.exp = ((b.exp + 1 - G_BIAS) >> 1) + G_BIAS; /* result exponent */
+b.frac = fsqrt64 (b.frac, b.exp); /* result fraction */
+return vax_rpack (&b, ir, dp); /* round and pack */
+}
+
+/* Support routines */
+
+t_bool vax_unpack (t_uint64 op, UFP *r, uint32 ir)
+{
+r->sign = FPR_GETSIGN (op); /* get sign */
+r->exp = FPR_GETEXP (op); /* get exponent */
+r->frac = FPR_GETFRAC (op); /* get fraction */
+if (r->exp == 0) { /* exp = 0? */
+ if (op != 0) arith_trap (TRAP_INV, ir); /* rsvd op? */
+ r->frac = r->sign = 0;
+ return TRUE;
+ }
+r->frac = (r->frac | FPR_HB) << FPR_GUARD; /* ins hidden bit, guard */
+return FALSE;
+}
+
+t_bool vax_unpack_d (t_uint64 op, UFP *r, uint32 ir)
+{
+r->sign = FDR_GETSIGN (op); /* get sign */
+r->exp = FDR_GETEXP (op); /* get exponent */
+r->frac = FDR_GETFRAC (op); /* get fraction */
+if (r->exp == 0) { /* exp = 0? */
+ if (op != 0) arith_trap (TRAP_INV, ir); /* rsvd op? */
+ r->frac = r->sign = 0;
+ return TRUE;
+ }
+r->exp = r->exp + G_BIAS - D_BIAS; /* change to G bias */
+r->frac = (r->frac | FDR_HB) << FDR_GUARD; /* ins hidden bit, guard */
+return FALSE;
+}
+
+/* VAX normalize */
+
+void vax_norm (UFP *r)
+{
+int32 i;
+static t_uint64 normmask[5] = {
+ 0xc000000000000000, 0xf000000000000000, 0xff00000000000000,
+ 0xffff000000000000, 0xffffffff00000000
+ };
+static int32 normtab[6] = { 1, 2, 4, 8, 16, 32 };
+
+r->frac = r->frac & M64;
+if (r->frac == 0) { /* if fraction = 0 */
+ r->sign = r->exp = 0; /* result is 0 */
+ return;
+ }
+while ((r->frac & UF_NM) == 0) { /* normalized? */
+ for (i = 0; i < 5; i++) { /* find first 1 */
+ if (r->frac & normmask[i]) break;
+ }
+ r->frac = r->frac << normtab[i]; /* shift frac */
+ r->exp = r->exp - normtab[i]; /* decr exp */
+ }
+return;
+}
+
+/* VAX round and pack */
+
+t_uint64 vax_rpack (UFP *r, uint32 ir, uint32 dp)
+{
+uint32 rndm = I_GETFRND (ir);
+static const t_uint64 roundbit[2] = { UF_FRND, UF_GRND };
+static const int32 expmax[2] = { G_BIAS - F_BIAS + F_M_EXP, G_M_EXP };
+static const int32 expmin[2] = { G_BIAS - F_BIAS, 0 };
+
+if (r->frac == 0) return 0; /* result 0? */
+if (rndm) { /* round? */
+ r->frac = (r->frac + roundbit[dp]) & M64; /* add round bit */
+ if ((r->frac & UF_NM) == 0) { /* carry out? */
+ r->frac = (r->frac >> 1) | UF_NM; /* renormalize */
+ r->exp = r->exp + 1;
+ }
+ }
+if (r->exp > expmax[dp]) { /* ovflo? */
+ arith_trap (TRAP_OVF, ir); /* set trap */
+ r->exp = expmax[dp]; /* return max */
+ }
+if (r->exp <= expmin[dp]) { /* underflow? */
+ if (ir & I_FTRP_V) arith_trap (TRAP_UNF, ir); /* enabled? set trap */
+ return 0; /* underflow to 0 */
+ }
+return (((t_uint64) r->sign) << FPR_V_SIGN) |
+ (((t_uint64) r->exp) << FPR_V_EXP) |
+ ((r->frac >> FPR_GUARD) & FPR_FRAC);
+}
+
+t_uint64 vax_rpack_d (UFP *r, uint32 ir)
+{
+if (r->frac == 0) return 0; /* result 0? */
+r->exp = r->exp + D_BIAS - G_BIAS; /* rebias */
+if (r->exp > FDR_M_EXP) { /* ovflo? */
+ arith_trap (TRAP_OVF, ir); /* set trap */
+ r->exp = FDR_M_EXP; /* return max */
+ }
+if (r->exp <= 0) { /* underflow? */
+ if (ir & I_FTRP_V) arith_trap (TRAP_UNF, ir); /* enabled? set trap */
+ return 0; /* underflow to 0 */
+ }
+return (((t_uint64) r->sign) << FDR_V_SIGN) |
+ (((t_uint64) r->exp) << FDR_V_EXP) |
+ ((r->frac >> FDR_GUARD) & FDR_FRAC);
+}
diff --git a/alpha/alpha_io.c b/alpha/alpha_io.c
new file mode 100644
index 00000000..aa021d41
--- /dev/null
+++ b/alpha/alpha_io.c
@@ -0,0 +1,214 @@
+/* alpha_io.c: Alpha I/O and miscellaneous devices
+
+ Copyright (c) 2006, Robert M. Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ rom boot ROM
+*/
+
+#include "alpha_defs.h"
+#include "alpha_sys_defs.h"
+
+t_uint64 *rom = NULL; /* boot ROM */
+
+extern DEVICE *sim_devices[];
+
+t_bool rom_rd (t_uint64 pa, t_uint64 *val, uint32 lnt);
+t_bool rom_wr (t_uint64 pa, t_uint64 val, uint32 lnt);
+t_stat rom_ex (t_value *vptr, t_addr exta, UNIT *uptr, int32 sw);
+t_stat rom_dep (t_value val, t_addr exta, UNIT *uptr, int32 sw);
+t_stat rom_reset (DEVICE *dptr);
+
+/* ROM data structures
+
+ rom_dev ROM device descriptor
+ rom_unit ROM units
+ rom_reg ROM register list
+*/
+
+DIB rom_dib = {
+ ROMBASE, ROMBASE + ROMSIZE, &rom_rd, &rom_wr, 0
+ };
+
+UNIT rom_unit = {
+ UDATA (NULL, UNIT_FIX+UNIT_BINK, ROMSIZE)
+ };
+
+REG rom_reg[] = {
+ { NULL }
+ };
+
+DEVICE rom_dev = {
+ "ROM", &rom_unit, rom_reg, NULL,
+ 1, 16, 24, 8, 16, 64,
+ &rom_ex, &rom_dep, &rom_reset,
+ NULL, NULL, NULL,
+ &rom_dib, DEV_DIB
+ };
+
+/* ReadIO - read IO space
+
+ Inputs:
+ pa = physical address
+ *dat = pointer to data
+ lnt = length (BWLQ)
+ Output:
+ TRUE if read succeeds, else FALSE
+*/
+
+t_bool ReadIO (t_uint64 pa, t_uint64 *dat, uint32 lnt)
+{
+DEVICE *dptr;
+uint32 i;
+
+for (i = 0; sim_devices[i] != NULL; i++) {
+ dptr = sim_devices[i];
+ if (dptr->flags & DEV_DIB) {
+ DIB *dibp = (DIB *) dptr->ctxt;
+ if ((pa >= dibp->low) && (pa < dibp->high))
+ return dibp->read (pa, dat, lnt);
+ }
+ }
+return FALSE;
+}
+
+/* WriteIO - write register space
+
+ Inputs:
+ ctx = CPU context
+ pa = physical address
+ val = data to write, right justified in 64b quadword
+ lnt = length (BWLQ)
+ Output:
+ TRUE if write succeeds, else FALSE
+*/
+
+t_bool WriteIO (t_uint64 pa, t_uint64 dat, uint32 lnt)
+{
+DEVICE *dptr;
+uint32 i;
+
+for (i = 0; sim_devices[i] != NULL; i++) {
+ dptr = sim_devices[i];
+ if (dptr->flags & DEV_DIB) {
+ DIB *dibp = (DIB *) dptr->ctxt;
+ if ((pa >= dibp->low) && (pa < dibp->high))
+ return dibp->write (pa, dat, lnt);
+ }
+ }
+return FALSE;
+}
+
+/* Boot ROM read */
+
+t_bool rom_rd (t_uint64 pa, t_uint64 *val, uint32 lnt)
+{
+uint32 sc, rg = ((uint32) ((pa - ROMBASE) & (ROMSIZE - 1))) >> 3;
+
+switch (lnt) {
+
+ case L_BYTE:
+ sc = (((uint32) pa) & 7) * 8;
+ *val = (rom[rg] >> sc) & M8;
+ break;
+
+ case L_WORD:
+ sc = (((uint32) pa) & 6) * 8;
+ *val = (rom[rg] >> sc) & M16;
+ break;
+
+ case L_LONG:
+ if (pa & 4) *val = (rom[rg] >> 32) & M32;
+ else *val = rom[rg] & M32;
+ break;
+
+ case L_QUAD:
+ *val = rom[rg];
+ break;
+ }
+
+return TRUE;
+}
+
+/* Boot ROM write */
+
+t_bool rom_wr (t_uint64 pa, t_uint64 val, uint32 lnt)
+{
+uint32 sc, rg = ((uint32) ((pa - ROMBASE) & (ROMSIZE - 1))) >> 3;
+
+switch (lnt) {
+
+ case L_BYTE:
+ sc = (((uint32) pa) & 7) * 8;
+ rom[rg] = (rom[rg] & ~(((t_uint64) M8) << sc)) | (((t_uint64) (val & M8)) << sc);
+ break;
+
+ case L_WORD:
+ sc = (((uint32) pa) & 6) * 8;
+ rom[rg] = (rom[rg] & ~(((t_uint64) M16) << sc)) | (((t_uint64) (val & M16)) << sc);
+ break;
+
+ case L_LONG:
+ if (pa & 4) rom[rg] = ((t_uint64) (rom[rg] & M32)) | (((t_uint64) (val & M32)) << 32);
+ else rom[rg] = (rom[rg] & ~((t_uint64) M32)) | ((t_uint64) val & M32);
+ break;
+
+ case L_QUAD:
+ rom[rg] = val;
+ break;
+ }
+
+return TRUE;
+}
+
+/* ROM examine */
+
+t_stat rom_ex (t_value *vptr, t_addr exta, UNIT *uptr, int32 sw)
+{
+uint32 addr = (uint32) exta;
+
+if (vptr == NULL) return SCPE_ARG;
+if (addr >= ROMSIZE) return SCPE_NXM;
+*vptr = rom[addr >> 3];
+return SCPE_OK;
+}
+
+/* ROM deposit */
+
+t_stat rom_dep (t_value val, t_addr exta, UNIT *uptr, int32 sw)
+{
+uint32 addr = (uint32) exta;
+
+if (addr >= ROMSIZE) return SCPE_NXM;
+rom[addr >> 3] = val;
+return SCPE_OK;
+}
+
+/* ROM reset */
+
+t_stat rom_reset (DEVICE *dptr)
+{
+if (rom == NULL) rom = (t_uint64 *) calloc (ROMSIZE >> 3, sizeof (t_uint64));
+if (rom == NULL) return SCPE_MEM;
+return SCPE_OK;
+}
diff --git a/alpha/alpha_mmu.c b/alpha/alpha_mmu.c
new file mode 100644
index 00000000..8d000b09
--- /dev/null
+++ b/alpha/alpha_mmu.c
@@ -0,0 +1,308 @@
+/* alpha_mmu.c - Alpha memory management simulator
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ This module contains the routines for
+
+ ReadB,W,L,Q - read aligned virtual
+ ReadAccL,Q - read aligned virtual, special access check
+ ReadPB,W,L,Q - read aligned physical
+ WriteB,W,L,Q - write aligned virtual
+ WriteAccL,Q - write aligned virtual, special access check
+ WritePB,W,L,Q - write aligned physical
+
+ The TLB is organized for optimum lookups and is broken up into three fields:
+
+ tag VA<42:13> for an 8KB page system
+ pte PTE<31:0>, <31:16> are zero; FOE, FOR, FOW stored inverted
+ pfn PFN<31:0> left shifted by page size
+
+ The inversion of FOE, FOR, FOW means that all checked bits must be one
+ for a reference to proceed.
+
+ All Alpha implementations provide support for a 43b superpage for Unix,
+ and a 32b superpage for NT:
+
+ 43b superpage 0xFFFFFC0000000000:0xFFFFFDFFFFFFFFFF
+ 32b superpage 0xFFFFFFFF80000000:0xFFFFFFFFBFFFFFFF
+*/
+
+#include "alpha_defs.h"
+
+extern t_uint64 trans_i (t_uint64 va);
+extern t_uint64 trans_d (t_uint64 va, uint32 acc);
+
+extern t_uint64 *M;
+extern t_uint64 p1;
+extern uint32 pal_mode, dmapen;
+extern uint32 cm_eacc, cm_racc, cm_wacc;
+extern jmp_buf save_env;
+extern UNIT cpu_unit;
+
+/* Read virtual aligned
+
+ Inputs:
+ va = virtual address
+ Output:
+ returned data, right justified
+*/
+
+t_uint64 ReadB (t_uint64 va)
+{
+t_uint64 pa;
+
+if (dmapen) pa = trans_d (va, cm_racc); /* mapping on? */
+else pa = va;
+return ReadPB (pa);
+}
+
+t_uint64 ReadW (t_uint64 va)
+{
+t_uint64 pa;
+
+if (va & 1) ABORT1 (va, EXC_ALIGN); /* must be W aligned */
+if (dmapen) pa = trans_d (va, cm_racc); /* mapping on? */
+else pa = va;
+return ReadPW (pa);
+}
+
+t_uint64 ReadL (t_uint64 va)
+{
+t_uint64 pa;
+
+if (va & 3) ABORT1 (va, EXC_ALIGN); /* must be L aligned */
+if (dmapen) pa = trans_d (va, cm_racc); /* mapping on? */
+else pa = va;
+return ReadPL (pa);
+}
+
+t_uint64 ReadQ (t_uint64 va)
+{
+t_uint64 pa;
+
+if (va & 7) ABORT1 (va, EXC_ALIGN); /* must be Q aligned */
+if (dmapen) pa = trans_d (va, cm_racc); /* mapping on? */
+else pa = va;
+return ReadPQ (pa);
+}
+
+/* Read with generalized access controls - used by PALcode */
+
+t_uint64 ReadAccL (t_uint64 va, uint32 acc)
+{
+t_uint64 pa;
+
+if (va & 3) ABORT1 (va, EXC_ALIGN); /* must be L aligned */
+if (dmapen) pa = trans_d (va, acc); /* mapping on? */
+else pa = va;
+return ReadPL (pa);
+}
+
+t_uint64 ReadAccQ (t_uint64 va, uint32 acc)
+{
+t_uint64 pa;
+
+if (va & 7) ABORT1 (va, EXC_ALIGN); /* must be Q aligned */
+if (dmapen) pa = trans_d (va, acc); /* mapping on? */
+else pa = va;
+return ReadPQ (pa);
+}
+
+/* Read instruction */
+
+uint32 ReadI (t_uint64 va)
+{
+t_uint64 pa;
+
+if (!pal_mode) pa = trans_i (va); /* mapping on? */
+else pa = va;
+return (uint32) ReadPL (pa);
+}
+
+/* Write virtual aligned
+
+ Inputs:
+ va = virtual address
+ val = data to be written, right justified in 32b or 64b
+ Output:
+ none
+*/
+
+void WriteB (t_uint64 va, t_uint64 dat)
+{
+t_uint64 pa;
+
+if (dmapen) pa = trans_d (va, cm_wacc); /* mapping on? */
+else pa = va;
+WritePB (pa, dat);
+return;
+}
+
+void WriteW (t_uint64 va, t_uint64 dat)
+{
+t_uint64 pa;
+
+if (va & 1) ABORT1 (va, EXC_ALIGN); /* must be W aligned */
+if (dmapen) pa = trans_d (va, cm_wacc); /* mapping on? */
+else pa = va;
+WritePW (pa, dat);
+return;
+}
+
+void WriteL (t_uint64 va, t_uint64 dat)
+{
+t_uint64 pa;
+
+if (va & 3) ABORT1 (va, EXC_ALIGN); /* must be L aligned */
+if (dmapen) pa = trans_d (va, cm_wacc); /* mapping on? */
+else pa = va;
+WritePL (pa, dat);
+return;
+}
+
+void WriteQ (t_uint64 va, t_uint64 dat)
+{
+t_uint64 pa;
+
+if (va & 7) ABORT1 (va, EXC_ALIGN); /* must be Q aligned */
+if (dmapen) pa = trans_d (va, cm_wacc); /* mapping on? */
+else pa = va;
+WritePQ (pa, dat);
+return;
+}
+
+/* Write with generalized access controls - used by PALcode */
+
+void WriteAccL (t_uint64 va, t_uint64 dat, uint32 acc)
+{
+t_uint64 pa;
+
+if (va & 3) ABORT1 (va, EXC_ALIGN); /* must be L aligned */
+if (dmapen) pa = trans_d (va, acc); /* mapping on? */
+else pa = va;
+WritePL (pa, dat);
+return;
+}
+
+void WriteAccQ (t_uint64 va, t_uint64 dat, uint32 acc)
+{
+t_uint64 pa;
+
+if (va & 7) ABORT1 (va, EXC_ALIGN); /* must be Q aligned */
+if (dmapen) pa = trans_d (va, acc); /* mapping on? */
+else pa = va;
+WritePQ (pa, dat);
+return;
+}
+
+/* Read and write physical aligned - access point to I/O */
+
+INLINE t_uint64 ReadPB (t_uint64 pa)
+{
+t_uint64 val;
+
+if (ADDR_IS_MEM (pa)) {
+ uint32 bo = ((uint32) pa) & 07;
+ return (((M[pa >> 3] >> (bo << 3))) & M8);
+ }
+if (ReadIO (pa, &val, L_BYTE)) return val;
+return 0;
+}
+
+INLINE t_uint64 ReadPW (t_uint64 pa)
+{
+t_uint64 val;
+
+if (ADDR_IS_MEM (pa)) {
+ uint32 bo = ((uint32) pa) & 06;
+ return (((M[pa >> 3] >> (bo << 3))) & M16);
+ }
+if (ReadIO (pa, &val, L_WORD)) return val;
+return 0;
+}
+
+INLINE t_uint64 ReadPL (t_uint64 pa)
+{
+t_uint64 val;
+
+if (ADDR_IS_MEM (pa)) {
+ if (pa & 4) return (((M[pa >> 3] >> 32)) & M32);
+ return ((M[pa >> 3]) & M32);
+ }
+if (ReadIO (pa, &val, L_LONG)) return val;
+return 0;
+}
+
+INLINE t_uint64 ReadPQ (t_uint64 pa)
+{
+t_uint64 val;
+
+if (ADDR_IS_MEM (pa)) return M[pa >> 3];
+if (ReadIO (pa, &val, L_QUAD)) return val;
+return 0;
+}
+
+INLINE void WritePB (t_uint64 pa, t_uint64 dat)
+{
+dat = dat & M8;
+if (ADDR_IS_MEM (pa)) {
+ uint32 bo = ((uint32) pa) & 07;
+ M[pa >> 3] = (M[pa >> 3] & ~(((t_uint64) M8) << (bo << 3))) |
+ (dat << (bo << 3));
+ }
+else WriteIO (pa, dat, L_BYTE);
+return;
+}
+
+INLINE void WritePW (t_uint64 pa, t_uint64 dat)
+{
+dat = dat & M16;
+if (ADDR_IS_MEM (pa)) {
+ uint32 bo = ((uint32) pa) & 07;
+ M[pa >> 3] = (M[pa >> 3] & ~(((t_uint64) M16) << (bo << 3))) |
+ (dat << (bo << 3));
+ }
+else WriteIO (pa, dat, L_WORD);
+return;
+}
+
+INLINE void WritePL (t_uint64 pa, t_uint64 dat)
+{
+dat = dat & M32;
+if (ADDR_IS_MEM (pa)) {
+ if (pa & 4) M[pa >> 3] = (M[pa >> 3] & M32) |
+ (dat << 32);
+ else M[pa >> 3] = (M[pa >> 3] & ~((t_uint64) M32)) | dat;
+ }
+else WriteIO (pa, dat, L_LONG);
+return;
+}
+
+INLINE void WritePQ (t_uint64 pa, t_uint64 dat)
+{
+if (ADDR_IS_MEM (pa)) M[pa >> 3] = dat;
+else WriteIO (pa, dat, L_QUAD);
+return;
+}
+
diff --git a/alpha/alpha_sys.c b/alpha/alpha_sys.c
new file mode 100644
index 00000000..630f9df5
--- /dev/null
+++ b/alpha/alpha_sys.c
@@ -0,0 +1,814 @@
+/* alpha_sys.c: Alpha simulator interface
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+*/
+
+#include "alpha_defs.h"
+#include
+
+extern UNIT cpu_unit;
+extern REG cpu_reg[];
+extern int32 sim_switches;
+extern uint32 pal_type;
+
+t_stat fprint_sym_m (FILE *of, t_addr addr, uint32 inst);
+t_stat parse_sym_m (char *cptr, t_addr addr, t_value *inst);
+int32 parse_reg (char *cptr);
+
+extern t_stat fprint_pal_hwre (FILE *of, uint32 inst);
+extern t_stat parse_pal_hwre (char *cptr, t_value *inst);
+extern t_bool rom_wr (t_uint64 pa, t_uint64 val, uint32 lnt);
+
+/* SCP data structures and interface routines
+
+ sim_PC pointer to saved PC register descriptor
+ sim_emax number of words for examine
+ sim_stop_messages array of pointers to stop messages
+ sim_load binary loader
+*/
+
+REG *sim_PC = &cpu_reg[0];
+
+int32 sim_emax = 1;
+
+const char *sim_stop_messages[] = {
+ "Unknown error",
+ "HALT instruction",
+ "Breakpoint",
+ "Unsupported PAL variation",
+ "Kernel stack not valid",
+ "Unknown abort code",
+ "Memory management error"
+ };
+
+/* Binary loader
+
+ The binary loader handles absolute system images, that is, system
+ images linked /SYSTEM. These are simply a byte stream, with no
+ origin or relocation information.
+
+ -r load ROM
+ -o specify origin
+*/
+
+t_stat sim_load (FILE *fileref, char *cptr, char *fnam, int flag)
+{
+t_stat r;
+int32 i;
+t_uint64 origin;
+
+if (flag) return SCPE_ARG; /* dump? */
+origin = 0; /* memory */
+if (sim_switches & SWMASK ('O')) { /* origin? */
+ origin = get_uint (cptr, 16, 0xFFFFFFFF, &r);
+ if (r != SCPE_OK) return SCPE_ARG;
+ }
+
+while ((i = getc (fileref)) != EOF) { /* read byte stream */
+ if (sim_switches & SWMASK ('R')) { /* ROM? */
+ if (!rom_wr (origin, i, L_BYTE))
+ return SCPE_NXM;
+ }
+ else if (ADDR_IS_MEM (origin)) /* valid memory? */
+ WritePB (origin, i);
+ else return SCPE_NXM;
+ origin = origin + 1;
+ }
+return SCPE_OK;
+}
+
+/* Opcode mnemonics table */
+
+#define CL_NO 0 /* no operand */
+#define CL_BR 1 /* branch */
+#define CL_MR 2 /* memory reference */
+#define CL_IO 3 /* integer opr */
+#define CL_FO 4 /* floating opr */
+#define CL_MO 5 /* memory opr */
+#define CL_JP 6 /* jump */
+#define CL_HW 7 /* hardware */
+#define CL_M_PAL 0x00F0
+#define CL_V_PAL 4
+#define CL_VMS (1u << (PAL_VMS + CL_V_PAL))
+#define CL_UNIX (1u << (PAL_UNIX + CL_V_PAL))
+#define CL_NT (1u << (PAL_NT + CL_V_PAL))
+#define FL_RA 0x0100
+#define FL_RB 0x0200
+#define FL_RC 0x0400
+#define FL_RBI 0x0800
+#define FL_MDP 0x1000
+#define FL_BDP 0x2000
+#define FL_JDP 0x4000
+#define FL_LIT 0x8000
+#define CL_CLASS 0x000F
+#define PAL_MASK(x) (1u << (pal_type + CL_V_PAL))
+
+#define C_NO CL_NO
+#define C_PCM CL_NO | CL_VMS | CL_UNIX | CL_NT
+#define C_PVM CL_NO | CL_VMS
+#define C_PUN CL_NO | CL_UNIX
+#define C_PNT CL_NO | CL_NT
+#define C_BR CL_BR | FL_RA | FL_BDP
+#define C_MR CL_MR | FL_RA | FL_RB | FL_RBI | FL_MDP
+#define C_FE CL_MO | FL_RB | FL_RBI
+#define C_RV CL_MO | FL_RA
+#define C_MO CL_MO | FL_RA | FL_RB
+#define C_IO CL_IO | FL_RA | FL_RB | FL_RC | FL_LIT
+#define C_IAC CL_IO | FL_RA | FL_RC
+#define C_IBC CL_IO | FL_RB | FL_RC | FL_LIT
+#define C_FO CL_FO | FL_RA | FL_RB | FL_RC
+#define C_FAC CL_FO | FL_RA | FL_RC
+#define C_FBC CL_FO | FL_RB | FL_RC
+#define C_JP CL_JP | FL_RA | FL_RB | FL_RBI | FL_JDP
+#define C_HW CL_HW
+
+uint32 masks[8] = {
+ 0xFFFFFFFF, 0xFC000000,
+ 0xFC000000, 0xFC000FE0,
+ 0xFC00FFE0, 0xFC00FFFF,
+ 0xFC00C000, 0xFC000000
+ };
+
+const char *opcode[] = {
+ "HALT", "DRAINA", "CFLUSH", "LDQP", /* VMS PALcode */
+ "STQP", "SWPCTX", "MFPR_ASN", "MTPR_ASTEN",
+ "MTPR_ASTSR", "CSERVE", "SWPPAL", "MFPR_FEN",
+ "MTPR_FEN", "MTPR_IPIR", "MFPR_IPL", "MTPR_IPL",
+ "MFPR_MCES", "MTPR_MCES", "MFPR_PCBB", "MFPR_PRBR",
+ "MTPR_PRBR", "MFPR_PTBR", "MFPR_SCBB", "MTPR_SCBB",
+ "MTPR_SIRR", "MFPR_SISR", "MFPR_TBCHK", "MTPR_TBIA",
+ "MTPR_TBIAP", "MTPR_TBIS", "MFPR_ESP", "MTPR_ESP",
+ "MFPR_SSP", "MTPR_SSP", "MFPR_USP", "MTPR_USP",
+ "MTPR_TBISD", "MTPR_TBISI", "MFPR_ASTEN", "MFPR_ASTSR",
+ "MFPR_VTBR", "MTPR_VTBR", "MTPR_PERFMON", "MTPR_DATFX",
+ "MFPR_VIRBND", "MTPR_VIRBND", "MFPR_SYSPTBR", "MTPR_SYSPTBR",
+ "WTINT", "MFPR_WHAMI",
+ "BPT", "BUGCHK", "CHME", "CHMK",
+ "CHMS", "CHMU", "IMB", "INSQHIL",
+ "INSQTIL", "INSQHIQ", "INSQTIQ", "INSQUEL",
+ "INSQUEQ", "INSQUEL/D", "INSQUEQ/D", "PROBER",
+ "PROBEW", "RD_PS", "REI", "REMQHIL",
+ "REMQTIL", "REMQHIQ", "REMQTIQ", "REMQUEL",
+ "REMQUEQ", "REMQUEL/D", "REMQUEQ/D", "SWASTEN",
+ "WR_PS_SW", "RSCC", "RD_UNQ", "WR_UNQ",
+ "AMOVRR", "AMOVRM", "INSQHILR", "INSQTILR",
+ "INSQHIQR", "INSQTIQR", "REMQHILR", "REMQTILR",
+ "REMQHIQR", "REMQTIQR", "GENTRAP", "CLRFEN",
+ "RDMCES", "WRMCES", "WRVIRBND", "WRSYSPTBR", /* UNIX PALcode */
+ "WRFEN", "WRVPTPTR", "WRASN",
+ "SWPCTX", "WRVAL", "RDVAL", "TBI",
+ "WRENT", "SWPIPL", "RDPS", "WRKGP",
+ "WRUSP", "WRPERFMON", "RDUSP",
+ "WHAMI", "RETSYS", "RTI",
+ "URTI", "RDUNIQUE", "WRUNIQUE",
+ "LDA", "LDAH", "LDBU", "LDQ_U",
+ "LDWU", "STW", "STB", "STQ_U",
+ "ADDL", "S4ADDL", "SUBL", "S4SUBL",
+ "CMPBGE", "S8ADDL", "S8SUBL", "CMPULT",
+ "ADDQ", "S4ADDQ", "SUBQ", "S4SUBQ",
+ "CMPEQ", "S8ADDQ", "S8SUBQ", "CMPULE",
+ "ADDL/V", "SUBL/V", "CMPLT",
+ "ADDQ/V", "SUBQ/V", "CMPLE",
+ "AND", "BIC", "CMOVLBS", "CMOVLBC",
+ "BIS", "CMOVEQ", "CMOVNE", "ORNOT",
+ "XOR", "CMOVLT", "CMOVGE", "EQV",
+ "CMOVLE", "CMOVGT",
+ "MSKBL", "EXTBL", "INSBL",
+ "MSKWL", "EXTWL", "INSWL",
+ "MSKLL", "EXTLL", "INSLL",
+ "ZAP", "ZAPNOT", "MSKQL", "SRL",
+ "EXTQL", "SLL", "INSQL", "SRA",
+ "MSKWQ", "EXTWQ", "INSWQ",
+ "MSKLQ", "EXTLQ", "INSLQ",
+ "MSKQH", "EXTQH", "INSQH",
+ "MULL", "MULQ", "UMULH",
+ "MULL/V", "MULLQ/V",
+ "ITOFS", "ITOFF", "ITOFT",
+ "SQRTF/C", "SQRTF", "SQRTF/UC", "SQRTF/U",
+ "SQRTF/SC", "SQRTF/S", "SQRTF/SUC", "SQRTF/SU",
+ "SQRTG/C", "SQRTG", "SQRTG/UC", "SQRTG/U",
+ "SQRTG/SC", "SQRTG/S", "SQRTG/SUC", "SQRTG/SU",
+ "SQRTS/C", "SQRTS/M", "SQRTS", "SQRTS/D",
+ "SQRTS/UC", "SQRTS/UM", "SQRTS/U", "SQRTS/UD",
+ "SQRTS/SUC", "SQRTS/SUM", "SQRTS/SU", "SQRTS/SUD",
+ "SQRTS/SUIC", "SQRTS/SUIM", "SQRTS/SUI", "SQRTS/SUID",
+ "SQRTT/C", "SQRTT/M", "SQRTT", "SQRTT/D",
+ "SQRTT/UC", "SQRTT/UM", "SQRTT/U", "SQRTT/UD",
+ "SQRTT/SUC", "SQRTT/SUM", "SQRTT/SU", "SQRTT/SUD",
+ "SQRTT/SUIC", "SQRTT/SUIM", "SQRTT/SUI", "SQRTT/SUID",
+ "ADDF/C", "ADDF", "ADDF/UC", "ADDF/U",
+ "ADDF/SC", "ADDF/S", "ADDF/SUC", "ADDF/SU",
+ "SUBF/C", "SUBF", "SUBF/UC", "SUBF/U",
+ "SUBF/SC", "SUBF/S", "SUBF/SUC", "SUBF/SU",
+ "MULF/C", "MULF", "MULF/UC", "MULF/U",
+ "MULF/SC", "MULF/S", "MULF/SUC", "MULF/SU",
+ "DIVF/C", "DIVF", "DIVF/UC", "DIVF/U",
+ "DIVF/SC", "DIVF/S", "DIVF/SUC", "DIVF/SU",
+ "ADDG/C", "ADDG", "ADDG/UC", "ADDG/U",
+ "ADDG/SC", "ADDG/S", "ADDG/SUC", "ADDG/SU",
+ "SUBG/C", "SUBG", "SUBG/UC", "SUBG/U",
+ "SUBG/SC", "SUBG/S", "SUBG/SUC", "SUBG/SU",
+ "MULG/C", "MULG", "MULG/UC", "MULG/U",
+ "MULG/SC", "MULG/S", "MULG/SUC", "MULG/SU",
+ "DIVG/C", "DIVG", "DIVG/UC", "DIVG/U",
+ "DIVG/SC", "DIVG/S", "DIVG/SUC", "DIVG/SU",
+ "CVTDG/C", "CVTDG", "CVTDG/UC", "CVTDG/U",
+ "CVTDG/SC", "CVTDG/S", "CVTDG/SUC", "CVTDG/SU",
+ "CVTGF/C", "CVTGF", "CVTGF/UC", "CVTGF/U",
+ "CVTGF/SC", "CVTGF/S", "CVTGF/SUC", "CVTGF/SU",
+ "CVTGD/C", "CVTGD", "CVTGD/UC", "CVTGD/U",
+ "CVTGD/SC", "CVTGD/S", "CVTGD/SUC", "CVTGD/SU",
+ "CVTGQ/C", "CVTGQ", "CVTGQ/VC", "CVTGQ/V",
+ "CVTGQ/SC", "CVTGQ/S", "CVTGQ/SVC", "CVTGQ/SV",
+ "CVTQF/C", "CVTQF", "CVTQG/C", "CVTQG",
+ "CMPGEQ/C", "CMPGEQ/SC", "CMPGLT/C", "CMPGLT/SC",
+ "CMPGLE/C", "CMPGLE/SC",
+ "ADDS/C", "ADDS/M", "ADDS", "ADDS/D",
+ "ADDS/UC", "ADDS/UM", "ADDS/U", "ADDS/UD",
+ "ADDS/SUC", "ADDS/SUM", "ADDS/SU", "ADDS/SUD",
+ "ADDS/SUIC", "ADDS/SUIM", "ADDS/SUI", "ADDS/SUID",
+ "SUBS/C", "SUBS/M", "SUBS", "SUBS/D",
+ "SUBS/UC", "SUBS/UM", "SUBS/U", "SUBS/UD",
+ "SUBS/SUC", "SUBS/SUM", "SUBS/SU", "SUBS/SUD",
+ "SUBS/SUIC", "SUBS/SUIM", "SUBS/SUI", "SUBS/SUID",
+ "MULS/C", "MULS/M", "MULS", "MULS/D",
+ "MULS/UC", "MULS/UM", "MULS/U", "MULS/UD",
+ "MULS/SUC", "MULS/SUM", "MULS/SU", "MULS/SUD",
+ "MULS/SUIC", "MULS/SUIM", "MULS/SUI", "MULS/SUID",
+ "DIVS/C", "DIVS/M", "DIVS", "DIVS/D",
+ "DIVS/UC", "DIVS/UM", "DIVS/U", "DIVS/UD",
+ "DIVS/SUC", "DIVS/SUM", "DIVS/SU", "DIVS/SUD",
+ "DIVS/SUIC", "DIVS/SUIM", "DIVS/SUI", "DIVS/SUID",
+ "ADDT/C", "ADDT/M", "ADDT", "ADDT/D",
+ "ADDT/UC", "ADDT/UM", "ADDT/U", "ADDT/UD",
+ "ADDT/SUC", "ADDT/SUM", "ADDT/SU", "ADDT/SUD",
+ "ADDT/SUIC", "ADDT/SUIM", "ADDT/SUI", "ADDT/SUID",
+ "SUBT/C", "SUBT/M", "SUBT", "SUBT/D",
+ "SUBT/UC", "SUBT/UM", "SUBT/U", "SUBT/UD",
+ "SUBT/SUC", "SUBT/SUM", "SUBT/SU", "SUBT/SUD",
+ "SUBT/SUIC", "SUBT/SUIM", "SUBT/SUI", "SUBT/SUID",
+ "MULT/C", "MULT/M", "MULT", "MULT/D",
+ "MULT/UC", "MULT/UM", "MULT/U", "MULT/UD",
+ "MULT/SUC", "MULT/SUM", "MULT/SU", "MULT/SUD",
+ "MULT/SUIC", "MULT/SUIM", "MULT/SUI", "MULT/SUID",
+ "DIVT/C", "DIVT/M", "DIVT", "DIVT/D",
+ "DIVT/UC", "DIVT/UM", "DIVT/U", "DIVT/UD",
+ "DIVT/SUC", "DIVT/SUM", "DIVT/SU", "DIVT/SUD",
+ "DIVT/SUIC", "DIVT/SUIM", "DIVT/SUI", "DIVT/SUID",
+ "CVTTS/C", "CVTTS/M", "CVTTS", "CVTTS/D",
+ "CVTTS/UC", "CVTTS/UM", "CVTTS/U", "CVTTS/UD",
+ "CVTTS/SUC", "CVTTS/SUM", "CVTTS/SU", "CVTTS/SUD",
+ "CVTTS/SUIC", "CVTTS/SUIM", "CVTTS/SUI", "CVTTS/SUID",
+ "CVTTQ/C", "CVTTQ/M", "CVTTQ", "CVTTQ/D",
+ "CVTTQ/VC", "CVTTQ/VM", "CVTTQ/V", "CVTTQ/VD",
+ "CVTTQ/SVC", "CVTTQ/SVM", "CVTTQ/SV", "CVTTQ/SVD",
+ "CVTTQ/SVIC", "CVTTQ/SVIM", "CVTTQ/SVI", "CVTTQ/SVID",
+ "CVTQS/C", "CVTQS/M", "CVTQS", "CVTQS/D",
+ "CVTQS/SUIC", "CVTQS/SUIM", "CVTQS/SUI", "CVTQS/SUID",
+ "CVTQT/C", "CVTQT/M", "CVTQT", "CVTQT/D",
+ "CVTQT/SUIC", "CVTQT/SUIM", "CVTQT/SUI", "CVTQT/SUID",
+ "CMPTUN/C", "CMPTUN/S", "CMPTEQ/C", "CMPTEQ/S",
+ "CMPTLT/C", "CMPTLT/S", "CMPTLE/C", "CMPTLE/S",
+ "CVTLQ", "CPYS", "CPYSN", "CPYSE",
+ "MT_FPCR", "MF_FPCR",
+ "FCMOVEQ", "FCMOVNE", "FCMOVLT",
+ "FCMOVGE", "FCMOVLE", "FCMOVGT",
+ "CVTQL", "CVTQL/V", "CVTQL/SV",
+ "TRAPB", "EXCB", "MB", "WMB",
+ "FETCH", "FETCH_M", "RPCC",
+ "RC", "RS",
+ "JMP", "JSR", "RET", "JSR_COROUTINE",
+ "SEXTB", "SEXTW",
+ "CTPOP", "PERR", "CTLZ", "CTTZ",
+ "UNPKBW", "UNPKBL", "PKWB", "PKLB",
+ "MINSB8", "MINSW4", "MINUB8", "MINUW4",
+ "MAXSB8", "MAXSW4", "MAXUB8", "MAXUW4",
+ "FTOIT", "FTOIS",
+ "LDF", "LDG", "LDS", "LDT",
+ "STS", "STG", "STS", "STT",
+ "LDL", "LDQ", "LDL_L", "LDQ_L",
+ "STL", "STQ", "STL_L", "STQ_L",
+ "BR", "FBEQ", "FBLT", "FBLE",
+ "BSR", "FBNE", "BFGE", "FBGT",
+ "BLBC", "BEQ", "BLT", "BLE",
+ "BLBS", "BNE", "BGE", "BGT",
+ NULL
+ };
+
+const uint32 opval[] = {
+ 0x00000000, C_PCM, 0x00000001, C_PCM, 0x00000002, C_PCM, 0x00000003, C_PVM,
+ 0x00000004, C_PVM, 0x00000005, C_PVM, 0x00000006, C_PVM, 0x00000007, C_PVM,
+ 0x00000008, C_PVM, 0x00000009, C_PCM, 0x0000000A, C_PCM, 0x0000000B, C_PVM,
+ 0x0000000C, C_PVM, 0x0000000D, C_PVM, 0x0000000E, C_PVM, 0x0000000F, C_PVM,
+ 0x00000010, C_PVM, 0x00000011, C_PVM, 0x00000012, C_PVM, 0x00000013, C_PVM,
+ 0x00000014, C_PVM, 0x00000015, C_PVM, 0x00000016, C_PVM, 0x00000017, C_PVM,
+ 0x00000018, C_PVM, 0x00000019, C_PVM, 0x0000001A, C_PVM, 0x0000001B, C_PVM,
+ 0x0000001C, C_PVM, 0x0000001D, C_PVM, 0x0000001E, C_PVM, 0x0000001F, C_PVM,
+ 0x00000020, C_PVM, 0x00000021, C_PVM, 0x00000022, C_PVM, 0x00000023, C_PVM,
+ 0x00000024, C_PVM, 0x00000025, C_PVM, 0x00000026, C_PVM, 0x00000027, C_PVM,
+ 0x00000029, C_PVM, 0x0000002A, C_PVM, 0x0000002B, C_PVM, 0x0000002E, C_PVM,
+ 0x00000030, C_PVM, 0x00000031, C_PVM, 0x00000032, C_PVM, 0x00000033, C_PVM,
+ 0x0000003E, C_PCM, 0x0000003F, C_PVM,
+ 0x00000080, C_PCM, 0x00000081, C_PCM, 0x00000082, C_PVM, 0x00000083, C_PVM,
+ 0x00000084, C_PVM, 0x00000085, C_PVM, 0x00000086, C_PCM, 0x00000087, C_PVM,
+ 0x00000088, C_PVM, 0x00000089, C_PVM, 0x0000008A, C_PVM, 0x0000008B, C_PVM,
+ 0x0000008C, C_PVM, 0x0000008D, C_PVM, 0x0000008E, C_PVM, 0x0000008F, C_PVM,
+ 0x00000090, C_PVM, 0x00000091, C_PVM, 0x00000092, C_PVM, 0x00000093, C_PVM,
+ 0x00000094, C_PVM, 0x00000095, C_PVM, 0x00000096, C_PVM, 0x00000097, C_PVM,
+ 0x00000098, C_PVM, 0x00000099, C_PVM, 0x0000009A, C_PVM, 0x0000009B, C_PVM,
+ 0x0000009C, C_PVM, 0x0000009D, C_PVM, 0x0000009E, C_PVM, 0x0000009F, C_PVM,
+ 0x000000A0, C_PVM, 0x000000A1, C_PVM, 0x000000A2, C_PVM, 0x000000A3, C_PVM,
+ 0x000000A4, C_PVM, 0x000000A5, C_PVM, 0x000000A6, C_PVM, 0x000000A7, C_PVM,
+ 0x000000A8, C_PVM, 0x000000A9, C_PVM, 0x000000AA, C_PCM, 0x000000AE, C_PCM,
+ 0x00000010, C_PUN, 0x00000011, C_PUN, 0x00000013, C_PUN, 0x00000014, C_PUN,
+ 0x0000002B, C_PUN, 0x0000002D, C_PUN, 0x0000002E, C_PUN,
+ 0x00000030, C_PUN, 0x00000031, C_PUN, 0x00000032, C_PUN, 0x00000033, C_PUN,
+ 0x00000034, C_PUN, 0x00000035, C_PUN, 0x00000036, C_PUN, 0x00000037, C_PUN,
+ 0x00000038, C_PUN, 0x00000039, C_PUN, 0x0000003A, C_PUN,
+ 0x0000003C, C_PUN, 0x0000003D, C_PUN, 0x0000003F, C_PUN,
+ 0x00000092, C_PUN, 0x0000009E, C_PUN, 0x0000009F, C_PUN,
+ 0x20000000, C_MR, 0x24000000, C_MR, 0x28000000, C_MR, 0x2C000000, C_MR,
+ 0x30000000, C_MR, 0x34000000, C_MR, 0x38000000, C_MR, 0x3C000000, C_MR,
+ 0x40000000, C_IO, 0x40000040, C_IO, 0x40000120, C_IO, 0x40000160, C_IO,
+ 0x400001C0, C_IO, 0x40000240, C_IO, 0x40000360, C_IO, 0x400003A0, C_IO,
+ 0x40000400, C_IO, 0x40000440, C_IO, 0x40000520, C_IO, 0x40000560, C_IO,
+ 0x400005A0, C_IO, 0x40000640, C_IO, 0x40000760, C_IO, 0x400007A0, C_IO,
+ 0x40000800, C_IO, 0x40000920, C_IO, 0x400009A0, C_IO,
+ 0x40000C00, C_IO, 0x40000D20, C_IO, 0x40000DA0, C_IO,
+ 0x44000000, C_IO, 0x44000100, C_IO, 0x44000280, C_IO, 0x440002C0, C_IO,
+ 0x44000400, C_IO, 0x44000480, C_IO, 0x440004C0, C_IO, 0x44000500, C_IO,
+ 0x44000800, C_IO, 0x44000880, C_IO, 0x440008C0, C_IO, 0x44000900, C_IO,
+ 0x44000C80, C_IO, 0x44000CC0, C_IO,
+ 0x48000040, C_IO, 0x480000C0, C_IO, 0x48000160, C_IO,
+ 0x48000240, C_IO, 0x480002C0, C_IO, 0x48000360, C_IO,
+ 0x48000440, C_IO, 0x480004C0, C_IO, 0x48000560, C_IO,
+ 0x48000600, C_IO, 0x48000620, C_IO, 0x48000640, C_IO, 0x48000680, C_IO,
+ 0x480006C0, C_IO, 0x48000720, C_IO, 0x48000760, C_IO, 0x48000780, C_IO,
+ 0x48000A40, C_IO, 0x48000AE0, C_IO, 0x48000B40, C_IO,
+ 0x48000C40, C_IO, 0x48000CE0, C_IO, 0x48000D40, C_IO,
+ 0x48000E40, C_IO, 0x48000EE0, C_IO, 0x48000F40, C_IO,
+ 0x4C000000, C_IO, 0x4C000400, C_IO, 0x4C000600, C_IO,
+ 0x4C000800, C_IO, 0x4C000C00, C_IO,
+ 0x501F0080, C_FAC, 0x501F0280, C_FAC, 0x501F0480, C_FAC,
+ 0x53E00140, C_FBC, 0x53E01140, C_FBC, 0x53E02140, C_FBC, 0x53E03140, C_FBC,
+ 0x53E08140, C_FBC, 0x53E09140, C_FBC, 0x53E0A140, C_FBC, 0x53E0B140, C_FBC,
+ 0x53E00540, C_FBC, 0x53E01540, C_FBC, 0x53E02540, C_FBC, 0x53E03540, C_FBC,
+ 0x53E08540, C_FBC, 0x53E09540, C_FBC, 0x53E0A540, C_FBC, 0x53E0B540, C_FBC,
+ 0x53E00160, C_FBC, 0x53E00960, C_FBC, 0x53E01160, C_FBC, 0x53E01960, C_FBC,
+ 0x53E02160, C_FBC, 0x53E02960, C_FBC, 0x53E03160, C_FBC, 0x53E03960, C_FBC,
+ 0x53E0A160, C_FBC, 0x53E0A960, C_FBC, 0x53E0B160, C_FBC, 0x53E0B960, C_FBC,
+ 0x53E0E160, C_FBC, 0x53E0E960, C_FBC, 0x53E0F160, C_FBC, 0x53E0F960, C_FBC,
+ 0x53E00560, C_FBC, 0x53E00D60, C_FBC, 0x53E01560, C_FBC, 0x53E01D60, C_FBC,
+ 0x53E02560, C_FBC, 0x53E02D60, C_FBC, 0x53E03560, C_FBC, 0x53E03D60, C_FBC,
+ 0x53E0A560, C_FBC, 0x53E0AD60, C_FBC, 0x53E0B560, C_FBC, 0x53E0BD60, C_FBC,
+ 0x53E0E560, C_FBC, 0x53E0ED60, C_FBC, 0x53E0F560, C_FBC, 0x53E0FD60, C_FBC,
+ 0x54000000, C_FO, 0x54001000, C_FO, 0x54002000, C_FO, 0x54003000, C_FO,
+ 0x54008000, C_FO, 0x54009000, C_FO, 0x5400A000, C_FO, 0x5400B000, C_FO,
+ 0x54000020, C_FO, 0x54001020, C_FO, 0x54002020, C_FO, 0x54003020, C_FO,
+ 0x54008020, C_FO, 0x54009020, C_FO, 0x5400A020, C_FO, 0x5400B020, C_FO,
+ 0x54000040, C_FO, 0x54001040, C_FO, 0x54002040, C_FO, 0x54003040, C_FO,
+ 0x54008040, C_FO, 0x54009040, C_FO, 0x5400A040, C_FO, 0x5400B040, C_FO,
+ 0x54000060, C_FO, 0x54001060, C_FO, 0x54002060, C_FO, 0x54003060, C_FO,
+ 0x54008060, C_FO, 0x54009060, C_FO, 0x5400A060, C_FO, 0x5400B060, C_FO,
+ 0x54000400, C_FO, 0x54001400, C_FO, 0x54002400, C_FO, 0x54003400, C_FO,
+ 0x54008400, C_FO, 0x54009400, C_FO, 0x5400A400, C_FO, 0x5400B400, C_FO,
+ 0x54000420, C_FO, 0x54001420, C_FO, 0x54002420, C_FO, 0x54003420, C_FO,
+ 0x54008420, C_FO, 0x54009420, C_FO, 0x5400A420, C_FO, 0x5400B420, C_FO,
+ 0x54000440, C_FO, 0x54001440, C_FO, 0x54002440, C_FO, 0x54003440, C_FO,
+ 0x54008440, C_FO, 0x54009440, C_FO, 0x5400A440, C_FO, 0x5400B440, C_FO,
+ 0x54000460, C_FO, 0x54001460, C_FO, 0x54002460, C_FO, 0x54003460, C_FO,
+ 0x54008460, C_FO, 0x54009460, C_FO, 0x5400A460, C_FO, 0x5400B460, C_FO,
+ 0x57E003C0, C_FBC, 0x57E013C0, C_FBC, 0x57E023C0, C_FBC, 0x57E033C0, C_FBC,
+ 0x57E083C0, C_FBC, 0x57E093C0, C_FBC, 0x57E0A3C0, C_FBC, 0x57E0B3C0, C_FBC,
+ 0x57E00580, C_FBC, 0x57E01580, C_FBC, 0x57E02580, C_FBC, 0x57E03580, C_FBC,
+ 0x57E08580, C_FBC, 0x57E09580, C_FBC, 0x57E0A580, C_FBC, 0x57E0B580, C_FBC,
+ 0x57E005A0, C_FBC, 0x57E015A0, C_FBC, 0x57E025A0, C_FBC, 0x57E035A0, C_FBC,
+ 0x57E085A0, C_FBC, 0x57E095A0, C_FBC, 0x57E0A5A0, C_FBC, 0x57E0B5A0, C_FBC,
+ 0x57E005E0, C_FBC, 0x57E015E0, C_FBC, 0x57E025E0, C_FBC, 0x57E035E0, C_FBC,
+ 0x57E085E0, C_FBC, 0x57E095E0, C_FBC, 0x57E0A5E0, C_FBC, 0x57E0B5E0, C_FBC,
+ 0x57E00780, C_FBC, 0x57E01780, C_FBC, 0x57E007C0, C_FBC, 0x57E017C0, C_FBC,
+ 0x540014A0, C_FO, 0x540094A0, C_FO, 0x540014C0, C_FO, 0x540094C0, C_FO,
+ 0x540014E0, C_FO, 0x540094E0, C_FO,
+ 0x58000000, C_FO, 0x58000800, C_FO, 0x58001000, C_FO, 0x58001800, C_FO,
+ 0x58002000, C_FO, 0x58002800, C_FO, 0x58003000, C_FO, 0x58003800, C_FO,
+ 0x5800A000, C_FO, 0x5800A800, C_FO, 0x5800B000, C_FO, 0x5800B800, C_FO,
+ 0x5800E000, C_FO, 0x5800E800, C_FO, 0x5800F000, C_FO, 0x5800F800, C_FO,
+ 0x58000020, C_FO, 0x58000820, C_FO, 0x58001020, C_FO, 0x58001820, C_FO,
+ 0x58002020, C_FO, 0x58002820, C_FO, 0x58003020, C_FO, 0x58003820, C_FO,
+ 0x5800A020, C_FO, 0x5800A820, C_FO, 0x5800B020, C_FO, 0x5800B820, C_FO,
+ 0x5800E020, C_FO, 0x5800E820, C_FO, 0x5800F020, C_FO, 0x5800F820, C_FO,
+ 0x58000040, C_FO, 0x58000840, C_FO, 0x58001040, C_FO, 0x58001840, C_FO,
+ 0x58002040, C_FO, 0x58002840, C_FO, 0x58003040, C_FO, 0x58003840, C_FO,
+ 0x5800A040, C_FO, 0x5800A840, C_FO, 0x5800B040, C_FO, 0x5800B840, C_FO,
+ 0x5800E040, C_FO, 0x5800E840, C_FO, 0x5800F040, C_FO, 0x5800F840, C_FO,
+ 0x58000060, C_FO, 0x58000860, C_FO, 0x58001060, C_FO, 0x58001860, C_FO,
+ 0x58002060, C_FO, 0x58002860, C_FO, 0x58003060, C_FO, 0x58003860, C_FO,
+ 0x5800A060, C_FO, 0x5800A860, C_FO, 0x5800B060, C_FO, 0x5800B860, C_FO,
+ 0x5800E060, C_FO, 0x5800E860, C_FO, 0x5800F060, C_FO, 0x5800F860, C_FO,
+ 0x58000400, C_FO, 0x58000C00, C_FO, 0x58001400, C_FO, 0x58001C00, C_FO,
+ 0x58002400, C_FO, 0x58002C00, C_FO, 0x58003400, C_FO, 0x58003C00, C_FO,
+ 0x5800A400, C_FO, 0x5800AC00, C_FO, 0x5800B400, C_FO, 0x5800BC00, C_FO,
+ 0x5800E400, C_FO, 0x5800EC00, C_FO, 0x5800F400, C_FO, 0x5800FC00, C_FO,
+ 0x58000420, C_FO, 0x58000C20, C_FO, 0x58001420, C_FO, 0x58001C20, C_FO,
+ 0x58002420, C_FO, 0x58002C20, C_FO, 0x58003420, C_FO, 0x58003C20, C_FO,
+ 0x5800A420, C_FO, 0x5800AC20, C_FO, 0x5800B420, C_FO, 0x5800BC20, C_FO,
+ 0x5800E420, C_FO, 0x5800EC20, C_FO, 0x5800F420, C_FO, 0x5800FC20, C_FO,
+ 0x58000440, C_FO, 0x58000C40, C_FO, 0x58001440, C_FO, 0x58001C40, C_FO,
+ 0x58002440, C_FO, 0x58002C40, C_FO, 0x58003440, C_FO, 0x58003C40, C_FO,
+ 0x5800A440, C_FO, 0x5800AC40, C_FO, 0x5800B440, C_FO, 0x5800BC40, C_FO,
+ 0x5800E440, C_FO, 0x5800EC40, C_FO, 0x5800F440, C_FO, 0x5800FC40, C_FO,
+ 0x58000460, C_FO, 0x58000C60, C_FO, 0x58001460, C_FO, 0x58001C60, C_FO,
+ 0x58002460, C_FO, 0x58002C60, C_FO, 0x58003460, C_FO, 0x58003C60, C_FO,
+ 0x5800A460, C_FO, 0x5800AC60, C_FO, 0x5800B460, C_FO, 0x5800BC60, C_FO,
+ 0x5800E460, C_FO, 0x5800EC60, C_FO, 0x5800F460, C_FO, 0x5800FC60, C_FO,
+ 0x5BE00580, C_FBC, 0x5BE00D80, C_FBC, 0x5BE01580, C_FBC, 0x5BE01D80, C_FBC,
+ 0x5BE02580, C_FBC, 0x5BE02D80, C_FBC, 0x5BE03580, C_FBC, 0x5BE03D80, C_FBC,
+ 0x5BE0A580, C_FBC, 0x5BE0AD80, C_FBC, 0x5BE0B580, C_FBC, 0x5BE0BD80, C_FBC,
+ 0x5BE0E580, C_FBC, 0x5BE0ED80, C_FBC, 0x5BE0F580, C_FBC, 0x5BE0FD80, C_FBC,
+ 0x5BE005E0, C_FBC, 0x5BE00DE0, C_FBC, 0x5BE015E0, C_FBC, 0x5BE01DE0, C_FBC,
+ 0x5BE025E0, C_FBC, 0x5BE02DE0, C_FBC, 0x5BE035E0, C_FBC, 0x5BE03DE0, C_FBC,
+ 0x5BE0A5E0, C_FBC, 0x5BE0ADE0, C_FBC, 0x5BE0B5E0, C_FBC, 0x5BE0BDE0, C_FBC,
+ 0x5BE0E5E0, C_FBC, 0x5BE0EDE0, C_FBC, 0x5BE0F5E0, C_FBC, 0x5BE0FDE0, C_FBC,
+ 0x5BE00780, C_FBC, 0x5BE00F80, C_FBC, 0x5BE01780, C_FBC, 0x5BE01F80, C_FBC,
+ 0x5BE0E780, C_FBC, 0x5BE0EF80, C_FBC, 0x5BE0F780, C_FBC, 0x5BE0FF80, C_FBC,
+ 0x5BE007C0, C_FBC, 0x5BE00FC0, C_FBC, 0x5BE017C0, C_FBC, 0x5BE01FC0, C_FBC,
+ 0x5BE0E7C0, C_FBC, 0x5BE0EFC0, C_FBC, 0x5BE0F7C0, C_FBC, 0x5BE0FFC0, C_FBC,
+ 0x58001480, C_FO, 0x58009480, C_FO, 0x580014A0, C_FO, 0x580094A0, C_FO,
+ 0x580014C0, C_FO, 0x580094C0, C_FO, 0x580014E0, C_FO, 0x580094E0, C_FO,
+ 0x5FE00200, C_IBC, 0x5C000400, C_IO, 0x5C000420, C_IO, 0x5C000440, C_IO,
+ 0x5C000480, C_IO, 0x5C0004A0, C_IO,
+ 0x5C000540, C_IO, 0x5C000560, C_IO, 0x5C000580, C_IO,
+ 0x5C0005A0, C_IO, 0x5C0005C0, C_IO, 0x5C0005E0, C_IO,
+ 0x5FE00060, C_IBC, 0x5FE00260, C_IBC, 0x5FE00A60, C_IBC,
+ 0x60000000, C_NO, 0x60000400, C_NO, 0x60004000, C_NO, 0x60004400, C_NO,
+ 0x60008000, C_FE, 0x6000A000, C_FE, 0x6000C000, C_NO,
+ 0x6000E000, C_RV, 0x6000F000, C_RV,
+ 0x68000000, C_JP, 0x68004000, C_JP, 0x68008000, C_JP, 0x6800C000, C_JP,
+ 0x73E00000, C_IBC, 0x73E00020, C_IBC,
+ 0x73E00600, C_IBC, 0x70000620, C_IO, 0x73E00640, C_IBC, 0x73E00660, C_IBC,
+ 0x73E00680, C_IBC, 0x73E006A0, C_IBC, 0x73E006C0, C_IBC, 0x73E006E0, C_IBC,
+ 0x70000700, C_IO, 0x70000720, C_IO, 0x70000740, C_IO, 0x70000780, C_IO,
+ 0x70000780, C_IO, 0x700007A0, C_IO, 0x700007C0, C_IO, 0x700007E0, C_IO,
+ 0x701F0E00, C_IAC, 0x701F0F00, C_IAC,
+ 0x80000000, C_MR, 0x84000000, C_MR, 0x88000000, C_MR, 0x8C000000, C_MR,
+ 0x90000000, C_MR, 0x94000000, C_MR, 0x98000000, C_MR, 0x9C000000, C_MR,
+ 0xA0000000, C_MR, 0xA4000000, C_MR, 0xA8000000, C_MR, 0xAC000000, C_MR,
+ 0xB0000000, C_MR, 0xB4000000, C_MR, 0xB8000000, C_MR, 0xBC000000, C_MR,
+ 0xC0000000, C_BR, 0xC4000000, C_BR, 0xC8000000, C_BR, 0xCC000000, C_BR,
+ 0xD0000000, C_BR, 0xD4000000, C_BR, 0xD8000000, C_BR, 0xDC000000, C_BR,
+ 0xE0000000, C_BR, 0xE4000000, C_BR, 0xE8000000, C_BR, 0xEC000000, C_BR,
+ 0xF0000000, C_BR, 0xF4000000, C_BR, 0xF8000000, C_BR, 0xFC000000, C_BR,
+ M32, 0
+ };
+
+/* Symbolic decode
+
+ Inputs:
+ *of = output stream
+ addr = current PC
+ *val = values to decode
+ *uptr = pointer to unit
+ sw = switches
+ Outputs:
+ return = if >= 0, error code
+ if < 0, number of extra bytes retired
+*/
+
+t_stat fprint_sym (FILE *of, t_addr addr, t_value *val,
+ UNIT *uptr, int32 sw)
+{
+uint32 c, sc, rdx;
+t_stat r;
+DEVICE *dptr;
+
+if (uptr == NULL) uptr = &cpu_unit; /* anon = CPU */
+else if (uptr != &cpu_unit) return SCPE_ARG; /* CPU only */
+dptr = find_dev_from_unit (uptr); /* find dev */
+if (dptr == NULL) return SCPE_IERR;
+if (sw & SWMASK ('D')) rdx = 10; /* get radix */
+else if (sw & SWMASK ('O')) rdx = 8;
+else if (sw & SWMASK ('H')) rdx = 16;
+else rdx = dptr->dradix;
+
+if (sw & SWMASK ('A')) { /* ASCII? */
+ sc = (uint32) (addr & 0x7) * 8; /* shift count */
+ c = (uint32) (val[0] >> sc) & 0x7F;
+ fprintf (of, (c < 0x20)? "<%02X>": "%c", c);
+ return 0;
+ }
+if (sw & SWMASK ('B')) { /* byte? */
+ sc = (uint32) (addr & 0x7) * 8; /* shift count */
+ c = (uint32) (val[0] >> sc) & M8;
+ fprintf (of, "%02X", c);
+ return 0;
+ }
+if (sw & SWMASK ('W')) { /* word? */
+ sc = (uint32) (addr & 0x6) * 8; /* shift count */
+ c = (uint32) (val[0] >> sc) & M16;
+ fprintf (of, "%04X", c);
+ return -1;
+ }
+if (sw & SWMASK ('L')) { /* long? */
+ if (addr & 4) c = (uint32) (val[0] >> 32) & M32;
+ else c = (uint32) val[0] & M32;
+ fprintf (of, "%08X", c);
+ return -3;
+ }
+if (sw & SWMASK ('C')) { /* char format? */
+ for (sc = 0; sc < 64; sc = sc + 8) { /* print string */
+ c = (uint32) (val[0] >> sc) & 0x7F;
+ fprintf (of, (c < 0x20)? "<%02X>": "%c", c);
+ }
+ return -7; /* return # chars */
+ }
+if (sw & SWMASK ('M')) { /* inst format? */
+ if (addr & 4) c = (uint32) (val[0] >> 32) & M32;
+ else c = (uint32) val[0] & M32;
+ r = fprint_sym_m (of, addr, c); /* decode inst */
+ if (r <= 0) return r;
+ }
+
+fprint_val (of, val[0], rdx, 64, PV_RZRO);
+return -7;
+}
+
+/* Symbolic decode for -m
+
+ Inputs:
+ of = output stream
+ addr = current PC
+ inst = instruction to decode
+ Outputs:
+ return = if >= 0, error code
+ if < 0, number of extra bytes retired (-3)
+*/
+
+t_stat fprint_sym_m (FILE *of, t_addr addr, uint32 inst)
+{
+uint32 i, j, k, fl, ra, rb, rc, md, bd, jd, lit8, any;
+t_stat r;
+
+if ((r = fprint_pal_hwre (of, inst)) < 0) return r; /* PAL instruction? */
+for (i = 0; opval[i] != M32; i = i + 2) { /* loop thru ops */
+ fl = opval[i + 1]; /* flags */
+ j = fl & CL_CLASS; /* get class */
+ k = i >> 1;
+ if (((opval[i] & masks[j]) == (inst & masks[j])) && /* match? */
+ ((j != CL_NO) || (fl & PAL_MASK (pal_type)))) {
+ ra = I_GETRA (inst); /* all fields */
+ rb = I_GETRB (inst);
+ rc = I_GETRC (inst);
+ lit8 = I_GETLIT8 (inst);
+ md = I_GETMDSP (inst);
+ bd = I_GETBDSP (inst);
+ jd = inst & 0x3FFF;
+ any = 0;
+ fprintf (of, "%s", opcode[k]); /* opcode */
+ if (fl & FL_RA) /* ra? */
+ any = fprintf (of, " R%d", ra);
+ if (fl & FL_BDP) { /* branch? */
+ addr = (addr + (SEXT_BDSP (bd) << 2) + 4) & M64;
+ any = fprintf (of, (any? ",": " "));
+ fprint_val (of, addr, 16, 64, PV_LEFT);
+ }
+ else if (fl & FL_MDP) { /* mem ref? */
+ if ((fl & FL_RBI) && (rb != 31))
+ any = fprintf (of, (any? ",%X(R%d)": " %X(R%d)"), md, rb);
+ else any = fprintf (of, (any? ",%X": " %X"), md);
+ }
+ else if (fl & FL_RB) { /* rb? */
+ if (fl & FL_RBI)
+ any = fprintf (of, (any? ",(R%d)": " (R%d)"), rb);
+ else if ((fl & FL_LIT) && (inst & I_ILIT))
+ any = fprintf (of, (any? ",#%X": " #%X"), lit8);
+ else any = fprintf (of, (any? ",R%d": " R%d"), rb);
+ }
+ if ((fl & FL_JDP) && jd) /* jmp? */
+ any = fprintf (of, (any? ",%X": " %X"), jd);
+ else if (fl & FL_RC) /* rc? */
+ any = fprintf (of, (any? ",R%d": " R%d"), rc);
+ return -3;
+ } /* end if */
+ } /* end for */
+return SCPE_ARG;
+}
+
+/* Symbolic input
+
+ Inputs:
+ *cptr = pointer to input string
+ addr = current PC
+ *uptr = pointer to unit
+ *val = pointer to output values
+ sw = switches
+ Outputs:
+ status = > 0 error code
+ <= 0 -number of extra words
+*/
+
+t_stat parse_sym (char *cptr, t_addr addr, UNIT *uptr, t_value *val, int32 sw)
+{
+t_value num;
+uint32 i, sc, rdx;
+t_stat r;
+DEVICE *dptr;
+
+if (uptr == NULL) uptr = &cpu_unit; /* anon = CPU */
+else if (uptr != &cpu_unit) return SCPE_ARG; /* CPU only */
+dptr = find_dev_from_unit (uptr); /* find dev */
+if (dptr == NULL) return SCPE_IERR;
+if (sw & SWMASK ('D')) rdx = 10; /* get radix */
+else if (sw & SWMASK ('O')) rdx = 8;
+else if (sw & SWMASK ('H')) rdx = 16;
+else rdx = dptr->dradix;
+
+if ((sw & SWMASK ('A')) || ((*cptr == '\'') && cptr++)) { /* ASCII char? */
+ if (cptr[0] == 0) return SCPE_ARG; /* must have 1 char */
+ sc = (uint32) (addr & 0x7) * 8; /* shift count */
+ val[0] = (val[0] & ~(((t_uint64) M8) << sc)) |
+ (((t_uint64) cptr[0]) << sc);
+ return 0;
+ }
+if (sw & SWMASK ('B')) { /* byte? */
+ num = get_uint (cptr, rdx, M8, &r); /* get byte */
+ if (r != SCPE_OK) return SCPE_ARG;
+ sc = (uint32) (addr & 0x7) * 8; /* shift count */
+ val[0] = (val[0] & ~(((t_uint64) M8) << sc)) |
+ (num << sc);
+ return 0;
+ }
+if (sw & SWMASK ('W')) { /* word? */
+ num = get_uint (cptr, rdx, M16, &r); /* get word */
+ if (r != SCPE_OK) return SCPE_ARG;
+ sc = (uint32) (addr & 0x6) * 8; /* shift count */
+ val[0] = (val[0] & ~(((t_uint64) M16) << sc)) |
+ (num << sc);
+ return -1;
+ }
+if (sw & SWMASK ('L')) { /* longword? */
+ num = get_uint (cptr, rdx, M32, &r); /* get longword */
+ if (r != SCPE_OK) return SCPE_ARG;
+ sc = (uint32) (addr & 0x4) * 8; /* shift count */
+ val[0] = (val[0] & ~(((t_uint64) M32) << sc)) |
+ (num << sc);
+ return -3;
+ }
+if ((sw & SWMASK ('C')) || ((*cptr == '"') && cptr++)) { /* ASCII chars? */
+ if (cptr[0] == 0) return SCPE_ARG; /* must have 1 char */
+ for (i = 0; i < 8; i++) {
+ if (cptr[i] == 0) break;
+ sc = i * 8;
+ val[0] = (val[0] & ~(((t_uint64) M8) << sc)) |
+ (((t_uint64) cptr[i]) << sc);
+ }
+ return -7;
+ }
+
+if ((addr & 3) == 0) { /* aligned only */
+ r = parse_sym_m (cptr, addr, &num); /* try to parse inst */
+ if (r <= 0) { /* ok? */
+ sc = (uint32) (addr & 0x4) * 8; /* shift count */
+ val[0] = (val[0] & ~(((t_uint64) M32) << sc)) |
+ (num << sc);
+ return -3;
+ }
+ }
+
+val[0] = get_uint (cptr, rdx, M64, &r); /* get number */
+if (r != SCPE_OK) return r;
+return -7;
+}
+
+/* Symbolic input
+
+ Inputs:
+ *cptr = pointer to input string
+ addr = current PC
+ *val = pointer to output values
+ Outputs:
+ status = > 0 error code
+ <= 0 -number of extra words
+*/
+
+t_stat parse_sym_m (char *cptr, t_addr addr, t_value *inst)
+{
+t_uint64 bra, df, db;
+uint32 i, k, lit8, fl;
+int32 reg;
+t_stat r;
+char *tptr, gbuf[CBUFSIZE];
+
+if ((r = parse_pal_hwre (cptr, inst)) < 0) return r; /* PAL hardware? */
+cptr = get_glyph (cptr, gbuf, 0); /* get opcode */
+for (i = 0; opcode[i] != NULL; i++) { /* loop thru opcodes */
+ if (strcmp (opcode[i], gbuf) == 0) { /* string match? */
+ k = i << 1; /* index to opval */
+ fl = opval[k + 1]; /* get flags */
+ if (((fl & CL_CLASS) != CL_NO) || /* not PAL or */
+ (fl & PAL_MASK (pal_type))) break; /* PAL type match? */
+ }
+ }
+if (opcode[i] == NULL) return SCPE_ARG;
+*inst = opval[k]; /* save base op */
+
+if (fl & FL_RA) { /* need Ra? */
+ cptr = get_glyph (cptr, gbuf, ','); /* get reg */
+ if ((reg = parse_reg (gbuf)) < 0) return SCPE_ARG;
+ *inst = *inst | (reg << I_V_RA);
+ }
+if (fl & FL_BDP) { /* need branch disp? */
+ cptr = get_glyph (cptr, gbuf, 0);
+ bra = get_uint (gbuf, 16, M64, &r);
+ if ((r != SCPE_OK) || (bra & 3)) return SCPE_ARG;
+ df = ((bra - (addr + 4)) >> 2) & I_M_BDSP;
+ db = ((addr + 4 - bra) >> 2) & I_M_BDSP;
+ if (bra == ((addr + 4 + (SEXT_BDSP (df) << 2)) & M64))
+ *inst = *inst | (uint32) df;
+ else if (bra == ((addr + 4 + (SEXT_BDSP (db) << 2)) & M64))
+ *inst = *inst | (uint32) db;
+ else return SCPE_ARG;
+ }
+else if (fl & FL_MDP) { /* need mem disp? */
+ cptr = get_glyph (cptr, gbuf, 0);
+ df = strtotv (gbuf, &tptr, 16);
+ if ((gbuf == tptr) || (df > I_M_MDSP)) return SCPE_ARG;
+ *inst = *inst | (uint32) df;
+ if (*tptr == '(') {
+ tptr = get_glyph (tptr + 1, gbuf, ')');
+ if ((reg = parse_reg (gbuf)) < 0) return SCPE_ARG;
+ *inst = *inst | (reg << I_V_RB);
+ }
+ else *inst = *inst | (31 << I_V_RB);
+ if (*tptr != 0) return SCPE_ARG;
+ }
+else if (fl & FL_RBI) { /* indexed? */
+ cptr = get_glyph (cptr, gbuf, ',');
+ if (gbuf[0] != '(') return SCPE_ARG;
+ tptr = get_glyph (gbuf + 1, gbuf, ')');
+ if ((reg = parse_reg (gbuf)) < 0) return SCPE_ARG;
+ *inst = *inst | (reg << I_V_RB);
+ if (*tptr != 0) return SCPE_ARG;
+ }
+else if (fl & FL_RB) {
+ cptr = get_glyph (cptr, gbuf, ','); /* get reg/lit */
+ if ((gbuf[0] == '#') && (fl & FL_LIT)) { /* literal? */
+ lit8 = (uint32) get_uint (gbuf + 1, 16, I_M_LIT8, &r);
+ if (r != SCPE_OK) return r;
+ *inst = *inst | I_ILIT | (lit8 << I_V_LIT8);
+ }
+ else { /* rb */
+ if ((reg = parse_reg (gbuf)) < 0) return SCPE_ARG;
+ *inst = *inst | (reg << I_V_RB);
+ }
+ }
+if (fl & FL_JDP) { /* jmp? */
+ cptr = get_glyph (cptr, gbuf, 0); /* get disp */
+ df = get_uint (gbuf, 16, 0x3FFF, &r);
+ if (r != SCPE_OK) return r;
+ *inst = *inst | df;
+ }
+else if (fl & FL_RC) { /* rc? */
+ cptr = get_glyph (cptr, gbuf, ','); /* get reg */
+ if ((reg = parse_reg (gbuf)) < 0) return SCPE_ARG;
+ *inst = *inst | (reg << I_V_RC);
+ }
+
+if (*cptr != 0) return SCPE_ARG; /* any leftovers? */
+return -3;
+}
+
+/* Parse a register */
+
+int32 parse_reg (char *cptr)
+{
+t_stat r;
+int32 reg;
+
+if ((*cptr == 'R') || (*cptr == 'r') ||
+ (*cptr == 'F') || (*cptr == 'f')) cptr++;
+reg = (int32) get_uint (cptr, 10, 31, &r);
+if (r != SCPE_OK) return -1;
+return reg;
+}
+
diff --git a/alpha/alpha_sys_defs.h b/alpha/alpha_sys_defs.h
new file mode 100644
index 00000000..7af5a908
--- /dev/null
+++ b/alpha/alpha_sys_defs.h
@@ -0,0 +1,43 @@
+/* alpha_system_defs.h: Alpha system definitions file
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ Respectfully dedicated to the great people of the Alpha chip, systems, and
+ software development projects; and to the memory of Peter Conklin, of the
+ Alpha Program Office.
+
+ This is a STUB!
+*/
+
+#ifndef _ALPHA_SYS_DEFS_H_
+#define _ALPHA_SYS_DEFS_H_ 0
+
+#define PA_SIZE 36 /* PA size */
+#define PA_MASK 0x0000000FFFFFFFFF
+
+#define ROMBASE 0x000000FFFC000000
+#define ROMSIZE 0x0000000004000000
+
+#endif
+
diff --git a/alpha/old_pal/alpha_pal_defs.h b/alpha/old_pal/alpha_pal_defs.h
new file mode 100644
index 00000000..6471e36b
--- /dev/null
+++ b/alpha/old_pal/alpha_pal_defs.h
@@ -0,0 +1,208 @@
+/* alpha_pal_defs.h: Alpha architecture PAL definitions file
+
+ Copyright (c) 2003-2006, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ Respectfully dedicated to the great people of the Alpha chip, systems, and
+ software development projects; and to the memory of Peter Conklin, of the
+ Alpha Program Office.
+*/
+
+#ifndef _ALPHA_PAL_DEFS_H_
+#define _ALPHA_PAL_DEFS_H_ 0
+
+/* VA - NT software format */
+
+#define NTVA_N_PDE (VA_N_OFF - 2) /* PDE width */
+#define NTVA_M_PDE ((1u << NTVA_N_PDE) - 1) /* PDE mask */
+#define NTVA_N_PTD (32 - VA_N_OFF - NTVA_N_PDE) /* PTD width */
+#define NTVA_M_PTD ((1u << NTVA_N_PTD) - 1) /* PTD mask */
+#define NTVA_M_VPN (M32 >> VA_N_OFF) /* 32b VPN mask */
+#define NTVPN_N_SEXT (VA_WIDTH - 32 + 1) /* VPN sext size */
+#define NTVPN_V_SEXT (VA_N_VPN - NTVPN_N_SEXT) /* VPN sext start */
+#define NTVPN_M_SEXT ((1u << NTVPN_N_SEXT) - 1) /* VPN sext mask */
+#define NTVPN_GETSEXT(x) (((x) >> NTVPN_V_SEXT) & NTVPN_M_SEXT)
+
+/* PTE - NT software format */
+
+#define NT_VPTB 0xFFFFFFFFC0000000 /* virt page tbl base */
+#define NTP_V_PFN 9 /* PFN */
+#define NTP_M_PFN 0x7FFFFF
+#define NTP_PFN (NTP_M_PFN << NTP_V_PFN)
+#define NTP_V_GH 5
+#define NTP_M_GH 0x3
+#define NTP_V_GBL 4 /* global = ASM */
+#define NTP_V_DIRTY 2 /* dirty = !FOW */
+#define NTP_V_OWNER 1 /* owner */
+#define NTP_V_V 0 /* valid */
+#define NTP_GBL (1u << NTP_V_GBL)
+#define NTP_DIRTY (1u << NTP_V_DIRTY)
+#define NTP_OWNER (1u << NTP_V_OWNER)
+#define NTP_V (1u << NTP_V_V)
+#define NT_VPNPTD(x) (((x) >> (NTVA_N_PDE - 2)) & (NTVA_M_PTD << 2))
+#define NT_VPNPDE(x) (((x) << 2) & (NTVA_M_PDE << 2))
+
+/* VMS PALcode */
+
+#define PSV_V_SPA 56 /* VMS PS: stack align */
+#define PSV_M_SPA 0x3F
+#define PSV_V_IPL 8 /* interrupt priority */
+#define PSV_M_IPL 0x1F
+#define PSV_V_VMM 7 /* virt machine monitor */
+#define PSV_V_CM 3 /* current mode */
+#define PSV_M_CM 0x3
+#define PSV_V_IP 2 /* intr in progress */
+#define PSV_V_SW 0 /* software */
+#define PSV_M_SW 0x3
+#define PSV_VMM (1u << PSV_V_VMM)
+#define PSV_IP (1u << PSV_V_IP)
+#define PSV_MASK (PSV_VMM | PSV_IP | PSV_M_SW)
+#define PSV_MBZ 0xC0FFFFFFFFFFE0E4 /* must be zero */
+
+#define PCBV_FLAGS 56 /* PCB flags word */
+
+#define SISR_MASK 0xFFFE /* SISR bits */
+
+#define IPL_SMAX 0x0F /* highest swre level */
+
+#define SCB_FDIS 0x010 /* SCB offsets */
+#define SCB_ACV 0x080
+#define SCB_TNV 0x090
+#define SCB_FOR 0x0A0
+#define SCB_FOW 0x0B0
+#define SCB_FOE 0x0C0
+#define SCB_ARITH 0x200
+#define SCB_KAST 0x240
+#define SCB_EAST 0x250
+#define SCB_SAST 0x260
+#define SCB_UAST 0x270
+#define SCB_ALIGN 0x280
+#define SCB_BPT 0x400
+#define SCB_BUG 0x410
+#define SCB_RSVI 0x420
+#define SCB_RSVO 0x430
+#define SCB_GENTRAP 0x440
+#define SCB_CHMK 0x480
+#define SCB_CHME 0x490
+#define SCB_CHMS 0x4A0
+#define SCB_CHMU 0x4B0
+#define SCB_SISR0 0x500
+#define SCB_CLOCK 0x600
+#define SCB_IPIR 0x610
+#define SCB_SCRD 0x620
+#define SCB_PCRD 0x630
+#define SCB_POWER 0x640
+#define SCB_PERFM 0x650
+#define SCB_SMCHK 0x660
+#define SCB_PMCHK 0x670
+#define SCB_PASVR 0x6F0
+#define SCB_IO 0x800
+
+#define VMS_L_STKF (8 * 8) /* stack frame length */
+#define VMS_MME_E 0x0000000000000001 /* mem mgt error flags */
+#define VMS_MME_R 0x0000000000000000
+#define VMS_MME_W 0x8000000000000000
+
+/* VAX compatible data length definitions (for ReadUna, WriteUna) */
+
+#define L_BYTE 1
+#define L_WORD 2
+#define L_LONG 4
+#define L_QUAD 8
+
+/* Unix PALcode */
+
+#define PSU_V_CM 3 /* Unix PS: curr mode */
+#define PSU_M_CM 0x1
+#define PSU_CM (PSU_M_CM << PSU_V_CM)
+#define PSU_V_IPL 0 /* IPL */
+#define PSU_M_IPL 0x7
+#define PSU_IPL (PSU_M_IPL << PSU_V_IPL)
+
+#define PCBU_FLAGS 40 /* PCB flags word */
+
+#define UNIX_L_STKF (6 * 8) /* kernel stack frame */
+#define UNIX_IF_BPT 0 /* entIF a0 values */
+#define UNIX_IF_BUG 1
+#define UNIX_IF_GEN 2
+#define UNIX_IF_FDIS 3
+#define UNIX_IF_RSVI 4
+#define UNIX_INT_IPIR 0 /* entInt a0 values */
+#define UNIX_INT_CLK 1
+#define UNIX_INT_MCRD 2
+#define UNIX_INT_IO 3
+#define UNIX_INT_PERF 4
+#define UNIX_MMCSR_TNV 0 /* entMM a1 values */
+#define UNIX_MMCSR_ACV 1
+#define UNIX_MMCSR_FOR 2
+#define UNIX_MMCSR_FOW 3
+#define UNIX_MMCSR_FOE 4
+#define UNIX_MME_E M64 /* entMM a2 values */
+#define UNIX_MME_R 0
+#define UNIX_MME_W 1
+
+enum vms_pal_opcodes {
+ OP_HALT, OP_DRAINA, OP_CFLUSH, OP_LDQP,
+ OP_STQP, OP_SWPCTX, MF_ASN, MT_ASTEN,
+ MT_ASTSR, OP_CSERVE, OP_SWPPAL, MF_FEN,
+ MT_FEN, MT_IPIR, MF_IPL, MT_IPL,
+ MF_MCES, MT_MCES, MF_PCBB, MF_PRBR,
+ MT_PRBR, MF_PTBR, MF_SCBB, MT_SCBB,
+ MT_SIRR, MF_SISR, MF_TBCHK, MT_TBIA,
+ MT_TBIAP, MT_TBIS, MF_ESP, MT_ESP,
+ MF_SSP, MT_SSP, MF_USP, MT_USP,
+ MT_TBISD, MT_TBISI, MF_ASTEN, MF_ASTSR,
+ MF_VTBR = 0x29, MT_VTBR,MT_PERFMON, MT_DATFX = 0x2E,
+ MF_VIRBND = 0x30, MT_VIRBND, MF_SYSPTBR, MT_SYSPTBR,
+ OP_WTINT = 0x3E, MF_WHAMI = 0x3F,
+ OP_BPT = 0x80, OP_BUGCHK, OP_CHME, OP_CHMK,
+ OP_CHMS, OP_CHMU, OP_IMB, OP_INSQHIL,
+ OP_INSQTIL, OP_INSQHIQ, OP_INSQTIQ, OP_INSQUEL,
+ OP_INSQUEQ, OP_INSQUELD,OP_INSQUEQD,OP_PROBER,
+ OP_PROBEW, OP_RD_PS, OP_REI, OP_REMQHIL,
+ OP_REMQTIL, OP_REMQHIQ, OP_REMQTIQ, OP_REMQUEL,
+ OP_REMQUEQ, OP_REMQUELD,OP_REMQUEQD,OP_SWASTEN,
+ OP_WR_PS_SW,OP_RSCC, OP_RD_UNQ, OP_WR_UNQ,
+ OP_AMOVRR, OP_AMOVRM, OP_INSQHILR,OP_INSQTILR,
+ OP_INSQHIQR,OP_INSQTIQR,OP_REMQHILR,OP_REMQTILR,
+ OP_REMQHIQR,OP_REMQTIQR,OP_GENTRAP,
+ OP_CLRFEN = 0xAE
+ };
+
+enum unix_pal_opcodes {
+ OP_halt, OP_draina, OP_cflush,
+ OP_cserve = 0x9, OP_swppal,
+ OP_rdmces = 0x10, OP_wrmces,
+ OP_wrvirbnd = 0x13, OP_wrsysptbr = 0x14,
+ OP_wrfen = 0x2B, OP_wrvptptr = 0x2D, OP_wrasn,
+ OP_swpctx = 0x30, OP_wrval, OP_rdval, OP_tbi,
+ OP_wrent, OP_swpipl, OP_rdps, OP_wrkgp,
+ OP_wrusp, OP_wrperfmon, OP_rdusp,
+ OP_whami = 0x3C, OP_retsys, OP_wtint, OP_rti,
+ OP_bpt = 0x80, OP_bugchk, OP_syscall = 0x83,
+ OP_imb = 0x86,
+ OP_urti = 0x92, OP_rdunique = 0x9E, OP_wrunique,
+ OP_gentrap = 0xAA, OP_clrfen = 0xAE
+ };
+
+#endif
diff --git a/alpha/old_pal/alpha_pal_unix.c b/alpha/old_pal/alpha_pal_unix.c
new file mode 100644
index 00000000..73fa8011
--- /dev/null
+++ b/alpha/old_pal/alpha_pal_unix.c
@@ -0,0 +1,702 @@
+/* alpha_pal_unix.c - Alpha Unix PAL code simulator
+
+ Copyright (c) 2003-2005, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ This module contains the PALcode implementation for Alpha Unix, except for
+ the console, which is always done in hardware mode.
+
+ Alpha Unix/Linux requires the following privileged state:
+
+ ps<3:0> processor status
+ cm<0> current mode - in base
+ ipl<2:0> interrupt level - in base
+ ksp<63:0> kernel stack pointer
+ kgp<63:0> kernel global pointer
+ usp<63:0> user stack pointer
+ pcbb<63:0> process control block base
+ ptptr<63:0> page table base
+ vptptr<63:0> virtual page table base
+ virbnd<63:0> virtual address boundary
+ sysptbr<63:0> system page table base register
+ sysval<63:0> processor base (sysvalue)
+ unique<63:0> thread-unique value
+ entArith<63:0> entry vector, arithmetic trap
+ entIF<63:0> entry vector, instruction
+ entInt<63:0> entry vector, interrupt
+ entSys<63:0> entry vector, system call
+ entMM<63:0> entry vector, memory management fault
+ entUna<63:0> entry vector, unaligned
+
+ Unix maps kernel/user to the hardware's kernel/executive. It maps the
+ 8 IPL's to the hardware IPL's as follows:
+
+ 0 0
+ 1 1
+ 2 2
+ 3 IPL_HMIN
+ 4 IPL_HMIN+1
+ 5 IPL_HMIN+2
+ 6 IPL_HMIN+3
+ 7 IPL_1F
+*/
+
+#include "alpha_defs.h"
+
+#define GET_PSU (((unix_cm & PSU_M_CM) << PSU_V_CM) | \
+ ((unix_ipl & PSU_M_IPL) << PSU_V_IPL))
+
+// kludge for debugging...
+#define io_get_vec(x) 0
+
+#define ksp unix_stkp[MODE_K]
+#define usp unix_stkp[MODE_E]
+#define entInt unix_entVec[0]
+#define entArith unix_entVec[1]
+#define entMM unix_entVec[2]
+#define entIF unix_entVec[3]
+#define entUna unix_entVec[4]
+#define entSys unix_entVec[5]
+#define v0 R[0]
+#define a0 R[16]
+#define a1 R[17]
+#define a2 R[18]
+#define a3 R[19]
+#define at R[28]
+#define gp R[29]
+
+t_uint64 unix_ptptr = 0; /* page table base */
+t_uint64 unix_vptptr = 0; /* virt page table base */
+t_uint64 unix_virbnd = M64; /* virtual boundary */
+t_uint64 unix_sysptbr = 0; /* system page table base */
+t_uint64 unix_hwpcb = 0; /* hardware PCB */
+t_uint64 unix_unique = 0; /* thread unique */
+t_uint64 unix_sysval = 0; /* processor unique */
+t_uint64 unix_mces = 0; /* machine check err summ */
+t_uint64 unix_stkp[2] = { 0 };
+t_uint64 unix_entVec[6] = { 0 };
+t_uint64 unix_kgp = 0;
+uint32 unix_ipl = 0;
+uint32 unix_cm = 0;
+
+static const uint32 map_ipl[8] = {
+ 0, 1, 2, IPL_HMIN, IPL_HMIN + 1, IPL_HMIN + 2, IPL_HMIN + 3, IPL_1F
+ };
+
+extern t_uint64 R[32];
+extern t_uint64 PC, trap_mask;
+extern t_uint64 p1;
+extern uint32 vax_flag, lock_flag;
+extern uint32 fpen;
+extern uint32 ir, pcc_h, pcc_l, pcc_enb;
+extern uint32 cm_racc, cm_wacc;
+extern uint32 mmu_ispage, mmu_dspage;
+extern jmp_buf save_env;
+extern uint32 int_req[IPL_HLVL];
+
+t_stat unix_syscall (void);
+t_stat unix_retsys (void);
+t_stat unix_rti (void);
+void unix_urti (void);
+void unix_swpctx (void);
+t_stat unix_intexc (t_uint64 vec, t_uint64 arg);
+t_stat unix_mm_intexc (t_uint64 par1, t_uint64 par2);
+t_stat pal_proc_reset_unix (DEVICE *dptr);
+uint32 pal_find_pte_unix (uint32 vpn, t_uint64 *l3pte);
+
+extern t_stat (*pal_eval_intr) (uint32 ipl);
+extern t_stat (*pal_proc_excp) (uint32 type);
+extern t_stat (*pal_proc_trap) (uint32 type);
+extern t_stat (*pal_proc_intr) (uint32 type);
+extern t_stat (*pal_proc_inst) (uint32 fnc);
+extern uint32 (*pal_find_pte) (uint32 vpn, t_uint64 *pte);
+extern uint32 Test (t_uint64 va, uint32 acc, t_uint64 *pa);
+
+/* UNIXPAL data structures
+
+ unixpal_dev device descriptor
+ unixpal_unit unit
+ unixpal_reg register list
+*/
+
+UNIT unixpal_unit = { UDATA (NULL, 0, 0) };
+
+REG unixpal_reg[] = {
+ { HRDATA (KSP, ksp, 64) },
+ { HRDATA (USP, usp, 64) },
+ { HRDATA (ENTARITH, entArith, 64) },
+ { HRDATA (ENTIF, entIF, 64) },
+ { HRDATA (ENTINT, entInt, 64) },
+ { HRDATA (ENTMM, entMM, 64) },
+ { HRDATA (ENTSYS, entSys, 64) },
+ { HRDATA (ENTUNA, entUna, 64) },
+ { HRDATA (KGP, unix_kgp, 64) },
+ { HRDATA (PTPTR, unix_ptptr, 64) },
+ { HRDATA (VPTPTR, unix_vptptr, 64) },
+ { HRDATA (VIRBND, unix_virbnd, 64) },
+ { HRDATA (SYSPTBR, unix_sysptbr, 64) },
+ { HRDATA (UNIQUE, unix_unique, 64) },
+ { HRDATA (SYSVAL, unix_sysval, 64) },
+ { HRDATA (HWPCB, unix_hwpcb, 64) },
+ { HRDATA (MCES, unix_mces, 64) },
+ { HRDATA (IPL, unix_ipl, 3) },
+ { HRDATA (CM, unix_cm, 0) },
+ { NULL }
+ };
+
+DEVICE unixpal_dev = {
+ "UNIXPAL", &unixpal_unit, unixpal_reg, NULL,
+ 1, 16, 1, 1, 16, 8,
+ NULL, NULL, &pal_proc_reset_unix,
+ NULL, NULL, NULL,
+ NULL, DEV_DIS
+ };
+
+/* Unix interrupt evaluator - returns IPL of highest priority interrupt */
+
+uint32 pal_eval_intr_unix (uint32 lvl)
+{
+uint32 i;
+uint32 mipl = map_ipl[lvl & PSU_M_IPL];
+
+for (i = IPL_HMAX; i >= IPL_HMIN; i--) { /* chk hwre int */
+ if (i <= mipl) return 0; /* at ipl? no int */
+ if (int_req[i - IPL_HMIN]) return i; /* req != 0? int */
+ }
+return 0;
+}
+
+/* Unix interrupt dispatch - reached from top of execute loop */
+
+t_stat pal_proc_intr_unix (uint32 lvl)
+{
+t_stat r;
+
+if (lvl > IPL_HMAX) return SCPE_IERR; /* above max? */
+else if (lvl >= IPL_HMIN) a1 = io_get_vec (lvl); /* hwre? get vector */
+else return SCPE_IERR; /* bug */
+r = unix_intexc (entInt, UNIX_INT_IO); /* do interrupt */
+if (a1 == SCB_CLOCK) a0 = UNIX_INT_CLK;
+if (a1 == SCB_IPIR) a0 = UNIX_INT_IPIR;
+unix_ipl = lvl;
+return r;
+}
+
+/* Unix trap dispatch - reached synchronously from bottom of execute loop */
+
+t_stat pal_proc_trap_unix (uint32 tsum)
+{
+t_stat r;
+
+r = unix_intexc (entArith, tsum); /* arithmetic trap */
+a1 = trap_mask; /* set parameter */
+return r;
+}
+
+/* Unix exception dispatch - reached from the ABORT handler */
+
+t_stat pal_proc_excp_unix (uint32 abval)
+{
+t_stat r;
+
+switch (abval) {
+
+ case EXC_RSVI: /* reserved instruction */
+ return unix_intexc (entIF, UNIX_IF_RSVI); /* trap */
+
+ case EXC_RSVO: /* reserved operand */
+ return unix_intexc (entIF, UNIX_IF_RSVI); /* trap */
+
+ case EXC_ALIGN: /* unaligned */
+ PC = (PC - 4) & M64; /* back up PC */
+ r = unix_intexc (entUna, PC); /* fault */
+ a1 = I_GETOP (ir); /* get opcode */
+ a2 = I_GETRA (ir); /* get ra */
+ return r;
+
+ case EXC_FPDIS: /* fp disabled */
+ PC = (PC - 4) & M64; /* backup PC */
+ return unix_intexc (entIF, UNIX_IF_FDIS); /* fault */
+
+ case EXC_FOX+EXC_E: /* FOE */
+ tlb_is (p1, TLB_CI);
+ return unix_mm_intexc (UNIX_MMCSR_FOE, UNIX_MME_E);
+
+ case EXC_FOX+EXC_R: /* FOR */
+ PC = (PC - 4) & M64; /* back up PC */
+ return unix_mm_intexc (UNIX_MMCSR_FOR, UNIX_MME_R);
+
+ case EXC_FOX+EXC_W: /* FOW */
+ PC = (PC - 4) & M64; /* back up PC */
+ return unix_mm_intexc (UNIX_MMCSR_FOW, UNIX_MME_W);
+
+ case EXC_BVA+EXC_E:
+ case EXC_ACV+EXC_E: /* instr ACV */
+ return unix_mm_intexc (UNIX_MMCSR_ACV, UNIX_MME_E);
+
+ case EXC_BVA+EXC_R:
+ case EXC_ACV+EXC_R: /* data read ACV */
+ PC = (PC - 4) & M64; /* back up PC */
+ return unix_mm_intexc (UNIX_MMCSR_ACV, UNIX_MME_R);
+
+ case EXC_BVA+EXC_W:
+ case EXC_ACV+EXC_W: /* data write ACV */
+ PC = (PC - 4) & M64; /* back up PC */
+ return unix_mm_intexc (UNIX_MMCSR_ACV, UNIX_MME_W);
+
+ case EXC_TNV+EXC_E: /* instr TNV */
+ tlb_is (p1, TLB_CI);
+ return unix_mm_intexc (UNIX_MMCSR_TNV, UNIX_MME_E);
+
+ case EXC_TNV+EXC_R: /* data read TNV */
+ tlb_is (p1, TLB_CD);
+ PC = (PC - 4) & M64; /* back up PC */
+ return unix_mm_intexc (UNIX_MMCSR_TNV, UNIX_MME_R);
+
+ case EXC_TNV+EXC_W: /* data write TNV */
+ tlb_is (p1, TLB_CD);
+ PC = (PC - 4) & M64; /* back up PC */
+ return unix_mm_intexc (UNIX_MMCSR_TNV, UNIX_MME_W);
+
+ case EXC_TBM + EXC_E: /* TLB miss */
+ case EXC_TBM + EXC_R:
+ case EXC_TBM + EXC_W:
+ return SCPE_IERR; /* should not occur */
+
+ default:
+ return STOP_INVABO;
+ }
+return SCPE_OK;
+}
+
+/* PALcode instruction dispatcher - function code verified in CPU */
+
+t_stat pal_proc_inst_unix (uint32 fnc)
+{
+uint32 arg32 = (uint32) a0;
+
+if ((fnc < 0x40) && (unix_cm != MODE_K)) ABORT (EXC_RSVI);
+switch (fnc) {
+
+ case OP_halt:
+ return STOP_HALT;
+
+ case OP_cflush:
+ case OP_draina:
+ break;
+
+ case OP_cserve:
+ //tbd
+ break;
+
+ case OP_swppal:
+ v0 = 0;
+ break;
+
+ case OP_rdmces:
+ v0 = unix_mces;
+ break;
+
+ case OP_wrmces:
+ unix_mces = (unix_mces | (arg32 & MCES_DIS)) & ~(arg32 & MCES_W1C);
+ break;
+
+ case OP_wrvirbnd:
+ unix_virbnd = a0;
+ break;
+
+ case OP_wrsysptbr:
+ unix_sysptbr = a0;
+ break;
+
+ case OP_wrfen:
+ fpen = arg32 & 1;
+ arg32 = ReadPL (unix_hwpcb + PCBU_FLAGS);
+ arg32 = (arg32 & ~1) | fpen;
+ WritePL (unix_hwpcb + PCBU_FLAGS, arg32);
+ break;
+
+ case OP_wrvptptr:
+ unix_vptptr = a0;
+ break;
+
+ case OP_wrasn:
+ itlb_set_asn (arg32 & M16);
+ dtlb_set_asn (arg32 & M16);
+ WritePL (unix_hwpcb + 28, arg32 & M16);
+ break;
+
+ case OP_swpctx:
+ unix_swpctx ();
+ break;
+
+ case OP_wrval:
+ unix_sysval = a0;
+ break;
+
+ case OP_rdval:
+ v0 = unix_sysval;
+ break;
+
+ case OP_tbi:
+ switch (a0 + 2) {
+ case 0: /* -2 = tbia */
+ tlb_ia (TLB_CI | TLB_CD | TLB_CA);
+ break;
+ case 1: /* -1 = tbiap */
+ tlb_ia (TLB_CI | TLB_CD);
+ break;
+ case 3: /* +1 = tbis */
+ tlb_is (a1, TLB_CI | TLB_CD);
+ break;
+ case 4: /* +2 = tbisd */
+ tlb_is (a1, TLB_CD);
+ break;
+ case 5: /* +3 = tbisi */
+ tlb_is (a1, TLB_CI);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case OP_wrent:
+ if (a0 <= 5) unix_entVec[arg32] = a0;
+ break;
+
+ case OP_swpipl:
+ v0 = unix_ipl;
+ unix_ipl = arg32 & PSU_M_IPL;
+ break;
+
+ case OP_rdps:
+ v0 = GET_PSU;
+ break;
+
+ case OP_wrkgp:
+ unix_kgp = a0;
+ break;
+
+ case OP_wrusp:
+ usp = a0;
+ break;
+
+ case OP_wrperfmon:
+ // tbd
+ break;
+
+ case OP_rdusp:
+ v0 = usp;
+ break;
+
+ case OP_whami:
+ v0 = 0;
+ break;
+
+ case OP_retsys:
+ unix_retsys ();
+ break;
+
+ case OP_wtint:
+ v0 = 0;
+ break;
+
+ case OP_rti:
+ unix_rti ();
+ break;
+
+/* Non-privileged */
+
+ case OP_bpt:
+ return unix_intexc (entIF, UNIX_IF_BPT);
+
+ case OP_bugchk:
+ return unix_intexc (entIF, UNIX_IF_BUG);
+
+ case OP_syscall:
+ if (unix_cm == MODE_K) {
+ //tbd
+ }
+ return unix_syscall ();
+
+ case OP_imb:
+ break;
+
+ case OP_urti:
+ if (unix_cm == MODE_K) {
+ //tbd
+ }
+ unix_urti ();
+ break;
+
+ case OP_rdunique:
+ v0 = unix_unique;
+ break;
+
+ case OP_wrunique:
+ unix_unique = a0;
+ break;
+
+ case OP_gentrap:
+ return unix_intexc (entIF, UNIX_IF_GEN);
+
+ case OP_clrfen:
+ fpen = 0;
+ arg32 = ReadPL (unix_hwpcb + PCBU_FLAGS);
+ arg32 = arg32 & ~1;
+ WritePL (unix_hwpcb + PCBU_FLAGS, arg32);
+ break;
+
+ default:
+ ABORT (EXC_RSVI);
+ }
+
+return SCPE_OK;
+}
+
+/* Swap privileged context */
+
+void unix_swpctx (void)
+{
+t_uint64 val;
+uint32 tmp1;
+
+WritePQ (unix_hwpcb + 0, SP); /* save stack ptrs */
+WritePQ (unix_hwpcb + 8, usp);
+tmp1 = (pcc_h + pcc_l) & M32; /* elapsed time */
+WritePL (unix_hwpcb + 24, tmp1); /* save PCC */
+WritePQ (unix_hwpcb + 32, unix_unique); /* save unique */
+v0 = unix_hwpcb; /* return curr PCBB */
+unix_hwpcb = a0; /* new PCBB */
+SP = ksp = ReadPQ (unix_hwpcb + 0); /* read stack ptrs */
+usp = ReadPQ (unix_hwpcb + 8);
+val = ReadPQ (unix_hwpcb + 16) << VA_N_OFF; /* read new PTBR */
+if (val != unix_ptptr) tlb_ia (TLB_CI | TLB_CD); /* ptbr change? zap TLB */
+unix_ptptr = val;
+tmp1 = ReadPL (unix_hwpcb + 24); /* restore PCC */
+pcc_h = (tmp1 - pcc_l) & M32;
+tmp1 = ReadPL (unix_hwpcb + 28) & M16; /* read ASN */
+itlb_set_asn (tmp1);
+dtlb_set_asn (tmp1);
+unix_unique = ReadPQ (unix_hwpcb + 32); /* read unique */
+fpen = ReadPL (unix_hwpcb + PCBU_FLAGS) & 1; /* read FEN */
+return;
+}
+
+/* Unix interrupt or exception - always to kernel mode
+
+ Inputs:
+ vec = entry vector
+ arg = argument for a0
+ Outputs:
+ reason = possible processor halt
+*/
+
+t_stat unix_intexc (t_uint64 vec, t_uint64 arg)
+{
+t_uint64 sav_ps = GET_PSU; /* old PS */
+
+if ((unix_cm & PSU_M_CM) != MODE_K) { /* not kernel? */
+ usp = SP; /* save SP */
+ SP = ksp; /* load new SP */
+ unix_cm = mmu_set_cm (MODE_K); /* PS = 0 */
+ unix_ipl = 0;
+ }
+SP = (SP - UNIX_L_STKF) & M64; /* decr stack */
+if (Test (SP, cm_wacc, NULL)) return STOP_KSNV; /* validate writes */
+if (Test (SP + UNIX_L_STKF - 8, cm_wacc, NULL) < 0) return STOP_KSNV;
+WriteQ (SP, sav_ps); /* save PS, PC, gp */
+WriteQ (SP + 8, PC);
+WriteQ (SP + 16, gp);
+WriteQ (SP + 24, a0); /* save a0-a2 */
+WriteQ (SP + 32, a1);
+WriteQ (SP + 40, a2);
+PC = vec; /* new PC */
+gp = unix_kgp; /* kernel GP */
+a0 = arg; /* argument */
+return SCPE_OK;
+}
+
+/* Memory management fault */
+
+t_stat unix_mm_intexc (t_uint64 par1, t_uint64 par2)
+{
+t_stat r;
+
+r = unix_intexc (entMM, p1); /* do exception */
+a1 = par1; /* set arguments */
+a2 = par2;
+tlb_is (p1, TLB_CI | TLB_CD); /* zap TLB entry */
+return r;
+}
+
+/* System call - always user to kernel, abbreviated stack frame, no arguments */
+
+t_stat unix_syscall (void)
+{
+t_uint64 sav_ps = GET_PSU; /* save PS */
+
+usp = SP; /* save user SP */
+SP = ksp; /* load kernel SP */
+unix_cm = mmu_set_cm (MODE_K); /* PS = 0 */
+unix_ipl = 0;
+SP = (SP - UNIX_L_STKF) & M64; /* decr stack */
+if (Test (SP, cm_wacc, NULL)) return STOP_KSNV; /* validate writes */
+if (Test (SP + UNIX_L_STKF - 8, cm_wacc, NULL)) return STOP_KSNV;
+WriteQ (SP, sav_ps); /* save PS, PC, gp */
+WriteQ (SP + 8, PC);
+WriteQ (SP + 16, gp);
+PC = entSys; /* new PC */
+gp = unix_kgp; /* kernel GP */
+return SCPE_OK;
+}
+
+/* Return from trap or interrupt - always from kernel */
+
+t_stat unix_rti (void)
+{
+t_uint64 tpc;
+uint32 tps, newm;
+
+if (Test (SP, cm_racc, NULL)) return STOP_KSNV; /* validate reads */
+if (Test (SP + UNIX_L_STKF - 8, cm_racc, NULL)) return STOP_KSNV;
+tps = (uint32) ReadQ (SP); /* read PS, PC */
+tpc = ReadQ (SP + 8);
+gp = ReadQ (SP + 16); /* restore gp, a0-a2 */
+a0 = ReadQ (SP + 24);
+a1 = ReadQ (SP + 32);
+a2 = ReadQ (SP + 40);
+SP = (SP + UNIX_L_STKF); /* incr stack */
+newm = (tps >> PSU_V_CM) & PSU_M_CM;
+unix_cm = mmu_set_cm (newm); /* new current mode */
+if (newm) { /* to user? */
+ ksp = SP; /* save kernel stack */
+ SP = usp; /* load user stack */
+ unix_ipl = 0; /* ipl = 0 */
+ }
+else unix_ipl = (tps >> PSU_V_IPL) & PSU_V_IPL; /* restore ipl */
+PC = tpc; /* restore PC */
+vax_flag = 0; /* clear VAX, lock flags */
+lock_flag = 0;
+return SCPE_OK;
+}
+
+/* Return from system call - always from kernel to user */
+
+t_stat unix_retsys (void)
+{
+t_uint64 tpc;
+
+if (Test (SP + 8, cm_racc, NULL)) return STOP_KSNV; /* validate reads */
+if (Test (SP + 16, cm_racc, NULL)) return STOP_KSNV;
+tpc = ReadQ (SP + 8); /* read PC */
+gp = ReadQ (SP + 16); /* restore GP */
+ksp = (SP + UNIX_L_STKF); /* update kernel stack */
+SP = usp; /* restore user stack */
+unix_cm = mmu_set_cm (MODE_E); /* PS = 8 */
+unix_ipl = 0;
+PC = tpc; /* restore PC */
+vax_flag = 0; /* clear VAX, lock flags */
+lock_flag = 0;
+return SCPE_OK;
+}
+
+/* Return from user mode trap - always from user to user */
+
+void unix_urti (void)
+{
+t_uint64 tsp, tpc;
+uint32 tps;
+
+if (SP & 0x3F) ABORT (EXC_RSVO); /* not aligned? */
+tps = ReadL (SP + 16); /* read PS */
+if (!(tps & PSU_CM) || (tps & PSU_IPL)) ABORT (EXC_RSVO);
+at = ReadQ (SP + 0); /* restore at */
+tsp = ReadQ (SP + 8); /* read SP, PC */
+tpc = ReadQ (SP + 24);
+gp = ReadQ (SP + 32); /* restore gp, a0-a2 */
+a0 = ReadQ (SP + 40);
+a1 = ReadQ (SP + 48);
+a2 = ReadQ (SP + 56);
+SP = tsp; /* restore SP */
+PC = tpc; /* restore PC */
+vax_flag = 0; /* clear VAX, lock flags */
+lock_flag = 0;
+return;
+}
+
+/* Unix 3-level PTE lookup
+
+ Inputs:
+ vpn = virtual page number (30b, sext)
+ *pte = pointer to pte to be returned
+ Output:
+ status = 0 for successful fill
+ EXC_ACV for ACV on intermediate level
+ EXC_TNV for TNV on intermediate level
+*/
+
+uint32 pal_find_pte_unix (uint32 vpn, t_uint64 *l3pte)
+{
+t_uint64 vptea, l1ptea, l2ptea, l3ptea, l1pte, l2pte;
+uint32 vpte_vpn;
+TLBENT *vpte_p;
+
+vptea = unix_vptptr | (((t_uint64) (vpn & VA_M_VPN)) << 3); /* try virtual lookup */
+vpte_vpn = VA_GETVPN (vptea); /* get vpte vpn */
+vpte_p = dtlb_lookup (vpte_vpn); /* get vpte tlb ptr */
+if (vpte_p && ((vpte_p->pte & (PTE_KRE|PTE_V)) == (PTE_KRE|PTE_V)))
+ l3ptea = vpte_p->pfn | VA_GETOFF (vptea);
+else {
+ l1ptea = unix_ptptr + VPN_GETLVL1 (vpn);
+ l1pte = ReadPQ (l1ptea);
+ if ((l1pte & PTE_V) == 0)
+ return ((l1pte & PTE_KRE)? EXC_TNV: EXC_ACV);
+ l2ptea = (l1pte & PFN_MASK) >> (PTE_V_PFN - VA_N_OFF);
+ l2ptea = l2ptea + VPN_GETLVL2 (vpn);
+ l2pte = ReadPQ (l2ptea);
+ if ((l2pte & PTE_V) == 0)
+ return ((l2pte & PTE_KRE)? EXC_TNV: EXC_ACV);
+ l3ptea = (l2pte & PFN_MASK) >> (PTE_V_PFN - VA_N_OFF);
+ l3ptea = l3ptea + VPN_GETLVL3 (vpn);
+ }
+*l3pte = ReadPQ (l3ptea);
+return 0;
+}
+
+/* Unix PALcode reset */
+
+t_stat pal_proc_reset_unix (DEVICE *dptr)
+{
+mmu_ispage = mmu_dspage = SPEN_43;
+unix_ipl = PSU_M_IPL;
+unix_cm = mmu_set_cm (MODE_K);
+pcc_enb = 1;
+pal_eval_intr = &pal_eval_intr_unix;
+pal_proc_intr = &pal_proc_intr_unix;
+pal_proc_trap = &pal_proc_trap_unix;
+pal_proc_excp = &pal_proc_excp_unix;
+pal_proc_inst = &pal_proc_inst_unix;
+pal_find_pte = &pal_find_pte_unix;
+return SCPE_OK;
+}
diff --git a/alpha/old_pal/alpha_pal_vms.c b/alpha/old_pal/alpha_pal_vms.c
new file mode 100644
index 00000000..df0612da
--- /dev/null
+++ b/alpha/old_pal/alpha_pal_vms.c
@@ -0,0 +1,1780 @@
+/* alpha_pal_vms.c - Alpha VMS PAL code simulator
+
+ Copyright (c) 2003-2005, Robert M Supnik
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ Except as contained in this notice, the name of Robert M Supnik shall not be
+ used in advertising or otherwise to promote the sale, use or other dealings
+ in this Software without prior written authorization from Robert M Supnik.
+
+ This module contains the PALcode implementation for Alpha VMS, except for
+ the console, which is always done in hardware mode.
+
+ Alpha VMS requires a complex privileged state, modelled after the VAX:
+
+ PS<12:0> processor status
+ IPL<4:0> interrupt level - in base
+ VMM<0> virtual machine mode
+ CM<1:0> current mode - in base
+ IP<0> interrupt in progress
+ SW<1:0> software controlled
+ KSP<63:0> kernel stack pointer
+ ESP<63:0> executive stack pointer
+ SSP<63:0> supervisor stack pointer
+ USP<63:0> user stack pointer
+ SSC<63:0> system cycle counter
+ PCBB<63:0> process control block base
+ SCBB<63:0> system control block base
+ PTBR<63:0> page table base
+ VTBR<63:0> virtual page table base
+ VIRBND<63:0> virtual address boundary
+ SYSPTBR<63:0> system page table base register
+ PRBR<63:0> processor base register
+ THREAD<63:0> thread-unique value
+ SIRR<15:1> software interrupt requests
+ ASTEN<3:0> AST enables
+ ASTRQ<3:0> AST requests
+ FEN<0> floating enable
+ DATFX<0> data alignment trap enable
+
+ Note that some of this state exists in the hardware implementations and
+ so is declared in the base CPU.
+*/
+
+#include "alpha_defs.h"
+
+/* Alignment table */
+
+#define ALG_W 1 /* word inst */
+#define ALG_L 2 /* long inst */
+#define ALG_Q 3 /* quad inst */
+#define ALG_ST 0x10 /* store */
+#define ALG_INV -1 /* invalid inst */
+#define ALG_ERR 0 /* internal error */
+#define ALG_GETLNT(x) ((x) & 3)
+
+#define GET_PSV ((vms_ipl << PSV_V_IPL) | (vms_cm << PSV_V_CM) | \
+ (vms_ps & PSV_MASK))
+#define AST_TST(l) (((l) < IPL_AST) && (vms_asten & vms_astsr & ast_map[vms_cm]))
+#define MOST_PRIV(m1,m2) (((m1) < (m2))? (m1): (m2))
+
+#define ksp vms_stkp[MODE_K]
+#define esp vms_stkp[MODE_E]
+#define ssp vms_stkp[MODE_S]
+#define usp vms_stkp[MODE_U]
+
+// kludge for debugging...
+#define io_get_vec(x) 0
+
+t_uint64 vms_ptbr = 0; /* page table base */
+t_uint64 vms_vtbr = 0; /* virt page table base */
+t_uint64 vms_virbnd = M64; /* virtual boundary */
+t_uint64 vms_sysptbr = 0; /* system page table base */
+t_uint64 vms_hwpcb = 0; /* hardware PCB */
+t_uint64 vms_thread = 0; /* thread unique */
+t_uint64 vms_prbr = 0; /* processor unique */
+t_uint64 vms_stkp[4]; /* stack pointers */
+t_uint64 vms_scbb = 0; /* SCB base */
+t_uint64 vms_scc = 0; /* system cycle ctr */
+t_uint64 vms_mces = 0; /* machine check err summ */
+uint32 vms_ipl = 0; /* hardware IPL */
+uint32 vms_cm = 0; /* inst current mode */
+uint32 vms_sisr = 0; /* software int req */
+uint32 vms_asten = 0; /* AST enables */
+uint32 vms_astsr = 0; /* AST requests */
+uint32 vms_last_pcc = 0; /* last pcc_l */
+uint32 vms_datfx = 0; /* data alignment */
+uint32 vms_ps = 0; /* static PS */
+
+const uint32 ast_map[4] = { 0x1, 0x3, 0x7, 0xF };
+const uint32 ast_pri[16] = {
+ 0, MODE_K, MODE_E, MODE_K, MODE_S, MODE_K, MODE_E, MODE_K,
+ MODE_U, MODE_K, MODE_E, MODE_K, MODE_S, MODE_K, MODE_E, MODE_K
+ };
+static const uint32 lnt_map[4] = { L_BYTE, L_WORD, L_LONG, L_QUAD };
+static const int8 alg_map[64] = {
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR,
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR,
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR,
+ ALG_W, ALG_W|ALG_ST, ALG_ERR, ALG_ERR,
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR,
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR,
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR,
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR,
+ ALG_L, ALG_Q, ALG_L, ALG_Q,
+ ALG_L|ALG_ST, ALG_Q|ALG_ST, ALG_L|ALG_ST, ALG_Q|ALG_ST,
+ ALG_L, ALG_Q, ALG_INV, ALG_INV,
+ ALG_L|ALG_ST, ALG_Q|ALG_ST, ALG_INV, ALG_INV,
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR,
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR,
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR,
+ ALG_ERR, ALG_ERR, ALG_ERR, ALG_ERR
+ };
+
+extern t_uint64 R[32];
+extern t_uint64 PC, trap_mask;
+extern t_uint64 p1;
+extern uint32 vax_flag, lock_flag;
+extern uint32 fpen;
+extern uint32 ir, pcc_h, pcc_l, pcc_enb;
+extern uint32 cm_racc, cm_wacc, cm_macc;
+extern uint32 mmu_ispage, mmu_dspage;
+extern jmp_buf save_env;
+extern uint32 int_req[IPL_HLVL];
+
+t_int64 vms_insqhil (void);
+t_int64 vms_insqtil (void);
+t_int64 vms_insqhiq (void);
+t_int64 vms_insqtiq (void);
+t_int64 vms_insquel (uint32 defer);
+t_int64 vms_insqueq (uint32 defer);
+t_int64 vms_remqhil (void);
+t_int64 vms_remqtil (void);
+t_int64 vms_remqhiq (void);
+t_int64 vms_remqtiq (void);
+t_int64 vms_remquel (uint32 defer);
+t_int64 vms_remqueq (uint32 defer);
+t_int64 vms_insqhilr (void);
+t_int64 vms_insqtilr (void);
+t_int64 vms_insqhiqr (void);
+t_int64 vms_insqtiqr (void);
+t_int64 vms_remqhilr (void);
+t_int64 vms_remqtilr (void);
+t_int64 vms_remqhiqr (void);
+t_int64 vms_remqtiqr (void);
+uint32 vms_probe (uint32 acc);
+uint32 vms_amovrr (void);
+uint32 vms_amovrm (void);
+t_stat vms_rei (void);
+void vms_swpctx (void);
+t_stat vms_intexc (uint32 vec, uint32 newmode, uint32 newipl);
+t_stat vms_mm_intexc (uint32 vec, t_uint64 par2);
+t_stat pal_proc_reset_vms (DEVICE *dptr);
+t_uint64 ReadUna (t_uint64 va, uint32 lnt, uint32 acc);
+void WriteUna (t_uint64 va, t_uint64 val, uint32 lnt, uint32 acc);
+uint32 tlb_check (t_uint64 va);
+uint32 Test (t_uint64 va, uint32 acc, t_uint64 *pa);
+
+extern t_stat (*pal_eval_intr) (uint32 ipl);
+extern t_stat (*pal_proc_excp) (uint32 type);
+extern t_stat (*pal_proc_trap) (uint32 type);
+extern t_stat (*pal_proc_intr) (uint32 type);
+extern t_stat (*pal_proc_inst) (uint32 fnc);
+extern uint32 (*pal_find_pte) (uint32 vpn, t_uint64 *pte);
+
+/* VMSPAL data structures
+
+ vmspal_dev device descriptor
+ vmspal_unit unit
+ vmspal_reg register list
+*/
+
+UNIT vmspal_unit = { UDATA (NULL, 0, 0) };
+
+REG vmspal_reg[] = {
+ { HRDATA (KSP, ksp, 64) },
+ { HRDATA (ESP, esp, 64) },
+ { HRDATA (SSP, ssp, 64) },
+ { HRDATA (USP, usp, 64) },
+ { HRDATA (PTBR, vms_ptbr, 64) },
+ { HRDATA (VTBR, vms_vtbr, 64) },
+ { HRDATA (VIRBND, vms_virbnd, 64) },
+ { HRDATA (SYSPTBR, vms_sysptbr, 64) },
+ { HRDATA (THREAD, vms_thread, 64) },
+ { HRDATA (PRBR, vms_prbr, 64) },
+ { HRDATA (HWPCB, vms_hwpcb, 64) },
+ { HRDATA (SCBB, vms_scbb, 64) },
+ { HRDATA (SCC, vms_scc, 64) },
+ { HRDATA (LASTPCC, vms_last_pcc, 32), REG_HRO },
+ { HRDATA (MCES, vms_mces, 64) },
+ { HRDATA (PS, vms_ps, 13) },
+ { HRDATA (IPL, vms_ipl, 5) },
+ { HRDATA (CM, vms_cm, 2) },
+ { HRDATA (SISR, vms_sisr, 16) },
+ { HRDATA (ASTEN, vms_asten, 4) },
+ { HRDATA (ASTSR, vms_astsr, 4) },
+ { FLDATA (DATFX, vms_datfx, 0) },
+ { NULL }
+ };
+
+DEVICE vmspal_dev = {
+ "VMSPAL", &vmspal_unit, vmspal_reg, NULL,
+ 1, 16, 1, 1, 16, 8,
+ NULL, NULL, &pal_proc_reset_vms,
+ NULL, NULL, NULL,
+ NULL, 0
+ };
+
+/* VMS interrupt evaluator - returns IPL of highest priority interrupt */
+
+uint32 pal_eval_intr_vms (uint32 lvl)
+{
+uint32 i;
+static const int32 sw_int_mask[32] = {
+ 0xFFFE, 0xFFFC, 0xFFF8, 0xFFF0, /* 0 - 3 */
+ 0xFFE0, 0xFFC0, 0xFF80, 0xFF00, /* 4 - 7 */
+ 0xFE00, 0xFC00, 0xF800, 0xF000, /* 8 - B */
+ 0xE000, 0xC000, 0x8000, 0x0000, /* C - F */
+ 0x0000, 0x0000, 0x0000, 0x0000, /* 10+ */
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000
+ };
+
+vms_scc = vms_scc + ((pcc_l - vms_last_pcc) & M32); /* update scc */
+vms_last_pcc = pcc_l;
+for (i = IPL_HMAX; i >= IPL_HMIN; i--) { /* chk hwre int */
+ if (i <= lvl) return 0; /* at ipl? no int */
+ if (int_req[i - IPL_HMIN]) return i; /* req != 0? int */
+ }
+if (vms_sisr & sw_int_mask[lvl]) { /* swre interrupt? */
+ for (i = IPL_SMAX; i > lvl; i--) { /* check swre int */
+ if ((vms_sisr >> i) & 1) /* req != 0? int */
+ return (AST_TST (i)? IPL_AST: i); /* check for AST */
+ }
+ }
+return (AST_TST (lvl)? IPL_AST: 0); /* no swre, check AST */
+}
+
+/* VMS interrupt dispatch - reached from top of execute loop */
+
+t_stat pal_proc_intr_vms (uint32 lvl)
+{
+uint32 vec;
+t_stat r;
+
+if (lvl > IPL_HMAX) return SCPE_IERR; /* above max? */
+else if (lvl >= IPL_HMIN) vec = io_get_vec (lvl); /* hwre? get vector */
+else if (lvl > IPL_SMAX) return SCPE_IERR; /* above swre max? */
+else if (lvl > 0) { /* swre int? */
+ if ((lvl == IPL_AST) && (vms_asten & vms_astsr & ast_map[vms_cm])) {
+ uint32 astm = ast_pri[vms_astsr & 0xF]; /* get AST priority */
+ vms_astsr = vms_astsr & ~(1u << astm); /* clear hi pri */
+ vec = SCB_KAST + (astm << 4);
+ }
+ else { /* swre int */
+ vms_sisr = vms_sisr & ~(1u << lvl);
+ vec = SCB_SISR0 + (lvl << 4);
+ }
+ }
+else return SCPE_IERR; /* bug */
+if (vec == 0) vec = SCB_PASVR; /* passive release? */
+r = vms_intexc (vec, MODE_K, lvl); /* do interrupt */
+vms_ps = vms_ps | PSV_IP; /* set int in prog */
+return r;
+}
+
+/* VMS trap dispatch - reached synchronously from bottom of execute loop */
+
+t_stat pal_proc_trap_vms (uint32 tsum)
+{
+t_stat r;
+
+r = vms_intexc (SCB_ARITH, MODE_K, vms_ipl); /* arithmetic trap */
+R[4] = trap_mask; /* set parameters */
+R[5] = tsum;
+return r;
+}
+
+/* VMS exception dispatch - reached from the ABORT handler */
+
+t_stat pal_proc_excp_vms (uint32 abval)
+{
+uint32 op, ra, lntc;
+int8 fl;
+t_stat r;
+
+switch (abval) {
+
+ case EXC_RSVI: /* reserved instr */
+ return vms_intexc (SCB_RSVI, MODE_K, vms_ipl); /* trap */
+
+ case EXC_RSVO: /* reserved operand */
+ return vms_intexc (SCB_RSVO, MODE_K, vms_ipl); /* trap */
+
+ case EXC_ALIGN: /* unaligned */
+ op = I_GETOP (ir); /* get opcode */
+ ra = I_GETRA (ir); /* get RA */
+ fl = alg_map[op]; /* get alignment map */
+ if (fl == ALG_ERR) return SCPE_IERR; /* impossible? */
+ if (fl == ALG_INV) return (SCB_RSVI, MODE_K, vms_ipl); /* conditional? */
+ lntc = ALG_GETLNT (fl); /* get length code */
+ if (fl & ALG_ST) /* store? */
+ WriteUna (p1, R[ra], lnt_map[lntc], cm_wacc);
+ else if (ra != 31)
+ R[ra] = ReadUna (p1, lnt_map[lntc], cm_racc);
+ if (vms_datfx) break; /* trap? */
+ r = vms_intexc (SCB_ALIGN, MODE_K, vms_ipl); /* do trap */
+ R[4] = p1; /* R4 = va */
+ R[5] = (fl & ALG_ST)? 1: 0; /* R5 = load/store */
+ return r;
+
+ case EXC_FPDIS: /* fp disabled */
+ PC = (PC - 4) & M64; /* back up PC */
+ return vms_intexc (SCB_FDIS, MODE_K, vms_ipl); /* fault */
+
+ case EXC_FOX+EXC_E: /* FOE */
+ tlb_is (p1, TLB_CI);
+ return vms_mm_intexc (SCB_FOE, VMS_MME_E);
+
+ case EXC_FOX+EXC_R: /* FOR */
+ PC = (PC - 4) & M64; /* back up PC */
+ return vms_mm_intexc (SCB_FOR, VMS_MME_R);
+
+ case EXC_FOX+EXC_W: /* FOW */
+ PC = (PC - 4) & M64; /* back up PC */
+ return vms_mm_intexc (SCB_FOW, VMS_MME_W);
+
+ case EXC_BVA+EXC_E:
+ case EXC_ACV+EXC_E: /* instr ACV */
+ return vms_mm_intexc (SCB_ACV, VMS_MME_E);
+
+ case EXC_BVA+EXC_R:
+ case EXC_ACV+EXC_R: /* data read ACV */
+ PC = (PC - 4) & M64; /* back up PC */
+ return vms_mm_intexc (SCB_ACV, VMS_MME_R);
+
+ case EXC_BVA+EXC_W:
+ case EXC_ACV+EXC_W: /* data write ACV */
+ PC = (PC - 4) & M64; /* back up PC */
+ return vms_mm_intexc (SCB_ACV, VMS_MME_W);
+
+ case EXC_TNV+EXC_E: /* instr TNV */
+ tlb_is (p1, TLB_CI);
+ return vms_mm_intexc (SCB_TNV, VMS_MME_E);
+
+ case EXC_TNV+EXC_R: /* data read TNV */
+ tlb_is (p1, TLB_CD);
+ PC = (PC - 4) & M64; /* back up PC */
+ return vms_mm_intexc (SCB_TNV, VMS_MME_R);
+
+ case EXC_TNV+EXC_W: /* data write TNV */
+ tlb_is (p1, TLB_CD);
+ PC = (PC - 4) & M64; /* back up PC */
+ return vms_mm_intexc (SCB_TNV, VMS_MME_W);
+
+ case EXC_TBM + EXC_E: /* TLB miss */
+ case EXC_TBM + EXC_R:
+ case EXC_TBM + EXC_W:
+ return SCPE_IERR; /* should not occur */
+
+ default:
+ return STOP_INVABO;
+ }
+
+return SCPE_OK;
+}
+
+/* PALcode instruction dispatcher - function code verified in CPU */
+
+t_stat pal_proc_inst_vms (uint32 fnc)
+{
+t_uint64 val;
+uint32 arg32 = (uint32) R[16];
+
+if ((fnc < 0x40) && (vms_cm != MODE_K)) ABORT (EXC_RSVI);
+switch (fnc) {
+
+ case OP_HALT:
+ return STOP_HALT;
+
+ case OP_CFLUSH:
+ case OP_DRAINA:
+ break;
+
+ case OP_LDQP:
+ R[0] = ReadPQ (R[16]);
+ break;
+
+ case OP_STQP:
+ WritePQ (R[16], R[17]);
+ break;
+
+ case OP_SWPCTX:
+ vms_swpctx ();
+ break;
+
+ case MF_ASN:
+ R[0] = itlb_read_asn ();
+ break;
+
+ case MT_ASTEN:
+ R[0] = vms_asten & AST_MASK;
+ vms_asten = ((vms_asten & arg32) | (arg32 >> 4)) & AST_MASK;
+ break;
+
+ case MT_ASTSR:
+ R[0] = vms_astsr & AST_MASK;
+ vms_astsr = ((vms_astsr & arg32) | (arg32 >> 4)) & AST_MASK;
+ break;
+
+ case OP_CSERVE:
+ // tbd
+ break;
+
+ case OP_SWPPAL:
+ R[0] = 0;
+ break;
+
+ case MF_FEN:
+ R[0] = fpen & 1;
+ break;
+
+ case MT_FEN:
+ fpen = arg32 & 1;
+ arg32 = ReadPL (vms_hwpcb + PCBV_FLAGS);
+ arg32 = (arg32 & ~1) | fpen;
+ WritePL (vms_hwpcb + PCBV_FLAGS, arg32);
+ break;
+
+ case MT_IPIR:
+ //tbd
+ break;
+
+ case MF_IPL:
+ R[0] = vms_ipl & PSV_M_IPL;
+ break;
+
+ case MT_IPL:
+ R[0] = vms_ipl & PSV_M_IPL;
+ vms_ipl = arg32 & PSV_M_IPL;
+ break;
+
+ case MF_MCES:
+ R[0] = vms_mces;
+ break;
+
+ case MT_MCES:
+ vms_mces = (vms_mces | (arg32 & MCES_DIS)) & ~(arg32 & MCES_W1C);
+ break;
+
+ case MF_PCBB:
+ R[0] = vms_hwpcb;
+ break;
+
+ case MF_PRBR:
+ R[0] = vms_prbr;
+ break;
+
+ case MT_PRBR:
+ vms_prbr = R[16];
+ break;
+
+ case MF_PTBR:
+ R[0] = (vms_ptbr >> VA_N_OFF); /* PFN only */
+ break;
+
+ case MF_SCBB:
+ R[0] = vms_scbb;
+ break;
+
+ case MT_SCBB:
+ vms_scbb = R[16];
+ break;
+
+ case MF_SISR:
+ R[0] = vms_sisr & SISR_MASK;
+ break;
+
+ case MT_SIRR:
+ vms_sisr = (vms_sisr | (1u << (arg32 & 0xF))) & SISR_MASK;
+ break;
+
+ case MF_TBCHK:
+ if (tlb_check (R[16])) R[0] = Q_SIGN + 1;
+ else R[0] = Q_SIGN;
+ break;
+
+ case MT_TBIA:
+ tlb_ia (TLB_CI | TLB_CD | TLB_CA);
+ break;
+
+ case MT_TBIAP:
+ tlb_ia (TLB_CI | TLB_CD);
+ break;
+
+ case MT_TBIS:
+ tlb_is (R[16], TLB_CI | TLB_CD | TLB_CA);
+ break;
+
+ case MF_ESP:
+ R[0] = esp;
+ break;
+
+ case MT_ESP:
+ esp = R[16];
+ break;
+
+ case MF_SSP:
+ R[0] = ssp;
+ break;
+
+ case MT_SSP:
+ ssp = R[16];
+ break;
+
+ case MF_USP:
+ R[0] = usp;
+ break;
+
+ case MT_USP:
+ usp = R[16];
+ break;
+
+ case MT_TBISI:
+ tlb_is (R[16], TLB_CI | TLB_CA);
+ break;
+
+ case MT_TBISD:
+ tlb_is (R[16], TLB_CD | TLB_CA);
+ break;
+
+ case MF_ASTEN:
+ R[0] = vms_asten & AST_MASK;
+ break;
+
+ case MF_ASTSR:
+ R[0] = vms_astsr & AST_MASK;
+ break;
+
+ case MF_VTBR:
+ R[0] = vms_vtbr;
+ break;
+
+ case MT_VTBR:
+ vms_vtbr = R[16];
+ break;
+
+ case MT_PERFMON:
+ // tbd
+ break;
+
+ case MT_DATFX:
+ vms_datfx = arg32 & 1;
+ val = ReadPQ (vms_hwpcb + PCBV_FLAGS);
+ val = (val & ~0x8000000000000000) | (((t_uint64) vms_datfx) << 63);
+ WritePQ (vms_hwpcb + PCBV_FLAGS, val);
+ break;
+
+ case MF_VIRBND:
+ R[0] = vms_virbnd;
+ break;
+
+ case MT_VIRBND:
+ vms_virbnd = R[16];
+ break;
+
+ case MF_SYSPTBR:
+ R[0] = vms_sysptbr;
+ break;
+
+ case MT_SYSPTBR:
+ vms_sysptbr = R[16];
+ break;
+
+ case OP_WTINT:
+ R[0] = 0;
+ break;
+
+ case MF_WHAMI:
+ R[0] = 0;
+ break;
+
+/* Non-privileged */
+
+ case OP_BPT:
+ return vms_intexc (SCB_BPT, MODE_K, vms_ipl);
+
+ case OP_BUGCHK:
+ return vms_intexc (SCB_BUG, MODE_K, vms_ipl);
+
+ case OP_CHME:
+ return vms_intexc (SCB_CHME, MOST_PRIV (MODE_E, vms_cm), vms_ipl);
+
+ case OP_CHMK:
+ return vms_intexc (SCB_CHMK, MODE_K, vms_ipl);
+
+ case OP_CHMS:
+ return vms_intexc (SCB_CHMS, MOST_PRIV (MODE_S, vms_cm), vms_ipl);
+
+ case OP_CHMU:
+ return vms_intexc (SCB_CHMU, vms_cm, vms_ipl);
+ break;
+
+ case OP_IMB:
+ break;
+
+ case OP_INSQHIL:
+ R[0] = vms_insqhil ();
+ break;
+
+ case OP_INSQTIL:
+ R[0] = vms_insqtil ();
+ break;
+
+ case OP_INSQHIQ:
+ R[0] = vms_insqhiq ();
+ break;
+
+ case OP_INSQTIQ:
+ R[0] = vms_insqtiq ();
+ break;
+
+ case OP_INSQUEL:
+ R[0] = vms_insquel (0);
+ break;
+
+ case OP_INSQUEQ:
+ R[0] = vms_insqueq (0);
+ break;
+
+ case OP_INSQUELD:
+ R[0] = vms_insquel (1);
+ break;
+
+ case OP_INSQUEQD:
+ R[0] = vms_insqueq (1);
+ break;
+
+ case OP_PROBER:
+ R[0] = vms_probe (PTE_KRE);
+ break;
+
+ case OP_PROBEW:
+ R[0] = vms_probe (PTE_KRE|PTE_KWE);
+ break;
+
+ case OP_RD_PS:
+ R[0] = GET_PSV;
+ break;
+
+ case OP_REI:
+ return vms_rei ();
+
+ case OP_REMQHIL:
+ R[0] = vms_insqhil ();
+ break;
+
+ case OP_REMQTIL:
+ R[0] = vms_remqtil ();
+ break;
+
+ case OP_REMQHIQ:
+ R[0] = vms_remqhiq ();
+ break;
+
+ case OP_REMQTIQ:
+ R[0] = vms_remqtiq ();
+ break;
+
+ case OP_REMQUEL:
+ R[0] = vms_remquel (0);
+ break;
+
+ case OP_REMQUEQ:
+ R[0] = vms_remqueq (0);
+ break;
+
+ case OP_REMQUELD:
+ R[0] = vms_remquel (1);
+ break;
+
+ case OP_REMQUEQD:
+ R[0] = vms_remqueq (1);
+ break;
+
+ case OP_SWASTEN:
+ R[0] = (vms_asten >> vms_cm) & 1;
+ vms_asten = (vms_asten & ~(1u << vms_cm)) | ((arg32 & 1) << vms_cm);
+ break;
+
+ case OP_WR_PS_SW:
+ vms_ps = (vms_ps & ~PSV_M_SW) | (arg32 & PSV_M_SW);
+ break;
+
+ case OP_RSCC:
+ vms_scc = vms_scc + ((pcc_l - vms_last_pcc) & M32); /* update scc */
+ vms_last_pcc = pcc_l;
+ R[0] = vms_scc;
+ break;
+
+ case OP_RD_UNQ:
+ R[0] = vms_thread;
+ break;
+
+ case OP_WR_UNQ:
+ vms_thread = R[16];
+ break;
+
+ case OP_AMOVRR:
+ R[18] = vms_amovrr ();
+ break;
+
+ case OP_AMOVRM:
+ R[18] = vms_amovrm ();
+ break;
+
+ case OP_INSQHILR:
+ R[0] = vms_insqhilr ();
+ break;
+
+ case OP_INSQTILR:
+ R[0] = vms_insqtilr ();
+ break;
+
+ case OP_INSQHIQR:
+ R[0] = vms_insqhiqr ();
+ break;
+
+ case OP_INSQTIQR:
+ R[0] = vms_insqtiqr ();
+ break;
+
+ case OP_REMQHILR:
+ R[0] = vms_insqhilr ();
+ break;
+
+ case OP_REMQTILR:
+ R[0] = vms_remqtilr ();
+ break;
+
+ case OP_REMQHIQR:
+ R[0] = vms_remqhiqr ();
+ break;
+
+ case OP_REMQTIQR:
+ R[0] = vms_remqtiqr ();
+ break;
+
+ case OP_GENTRAP:
+ return vms_intexc (SCB_GENTRAP, MODE_K, vms_ipl);
+
+ case OP_CLRFEN:
+ fpen = 0;
+ arg32 = ReadPL (vms_hwpcb + PCBV_FLAGS);
+ arg32 = arg32 & ~1;
+ WritePL (vms_hwpcb + PCBV_FLAGS, arg32);
+ break;
+
+ default:
+ ABORT (EXC_RSVI);
+ }
+
+return SCPE_OK;
+}
+
+/* Interlocked insert instructions
+
+ R[16] = entry
+ R[17] = header
+
+ Pictorially:
+
+ BEFORE AFTER INSQHI AFTER INSQTI
+
+ H: A-H H: D-H W H: A-H W for interlock
+ H+4/8: C-H H+4/8: C-H H+4/8: D-H W
+
+ A: B-A A: B-A A: B-A
+ A+4/8: H-A A+4/8: D-A W A+4/8: H-A
+
+ B: C-B B: C-B B: C-B
+ B+4/8: A-B B+4/8: A-B B+4/8: A-B
+
+ C: H-C C: H-C C: D-C W
+ C+4/8: B-C C+4/8: B-C C+4/8: B-C
+
+ D: --- D: A-D W D: H-D W
+ D+4/8: --- D+4/8: H-D W D+4/8: C-D W
+
+ Note that the queue header, the entry to be inserted, and all
+ the intermediate entries that are "touched" in any way must be
+ QUAD(OCTA)WORD aligned. In addition, the header and the entry
+ must not be equal.
+
+ Note that the offset arithmetic (+4, +8) cannot overflow 64b,
+ because the entries are quad or octa aligned.
+*/
+
+t_int64 vms_insqhil (void)
+{
+t_uint64 h = R[16];
+t_uint64 d = R[17];
+t_uint64 ar, a;
+
+if ((h == d) || ((h | d) & 07) || /* h, d quad align? */
+ ((SEXT_L_Q (h) & M64) != h) ||
+ ((SEXT_L_Q (d) & M64) != d)) ABORT (EXC_RSVO);
+ReadAccQ (d, cm_wacc); /* wchk (d) */
+ar = ReadQ (h); /* a <- (h) */
+if (ar & 06) ABORT (EXC_RSVO); /* a quad align? */
+if (ar & 01) return -1; /* busy, ret -1 */
+WriteQ (h, ar | 1); /* get interlock */
+a = (SEXT_L_Q (ar + h)) & M64; /* abs addr of a */
+if (Test (a, cm_wacc, NULL)) WriteQ (h, ar); /* wtst a, rls if err */
+WriteL (a + 4, (uint32) (d - a)); /* (a+4) <- d-a, flt ok */
+WriteL (d, (uint32) (a - d)); /* (d) <- a-d */
+WriteL (d + 4, (uint32) (h - d)); /* (d+4) <- h-d */
+WriteL (h, (uint32) (d - h)); /* (h) <- d-h, rls int */
+return ((ar & M32) == 0)? 0: +1; /* ret 0 if q was empty */
+}
+
+t_int64 vms_insqhilr (void)
+{
+t_uint64 h = R[16];
+t_uint64 d = R[17];
+t_uint64 ar, a;
+
+ar = ReadQ (h); /* a <- (h) */
+if (ar & 01) return -1; /* busy, ret -1 */
+WriteQ (h, ar | 1); /* get interlock */
+a = (SEXT_L_Q (ar + h)) & M64; /* abs addr of a */
+WriteL (a + 4, (uint32) (d - a)); /* (a+4) <- d-a, flt ok */
+WriteL (d, (uint32) (a - d)); /* (d) <- a-d */
+WriteL (d + 4, (uint32) (h - d)); /* (d+4) <- h-d */
+WriteL (h, (uint32) (d - h)); /* (h) <- d-h, rls int */
+return ((ar & M32) == 0)? 0: +1; /* ret 0 if q was empty */
+}
+
+t_int64 vms_insqhiq (void)
+{
+t_uint64 h = R[16];
+t_uint64 d = R[17];
+t_uint64 ar, a;
+
+if ((h == d) || ((h | d) & 0xF)) ABORT (EXC_RSVO); /* h, d octa align? */
+ReadAccQ (d, cm_wacc); /* wchk (d) */
+ar = ReadQ (h); /* a <- (h) */
+if (ar & 0xE) ABORT (EXC_RSVO); /* a octa align? */
+if (ar & 01) return -1; /* busy, ret -1 */
+WriteQ (h, ar | 1); /* get interlock */
+a = (ar + h) & M64; /* abs addr of a */
+if (Test (a, cm_wacc, NULL)) WriteQ (h, ar); /* wtst a, rls if err */
+WriteQ (a + 8, (d - a) & M64); /* (a+8) <- d-a, flt ok */
+WriteQ (d, (a - d) & M64); /* (d) <- a-d */
+WriteQ (d + 8, (h - d) & M64); /* (d+8) <- h-d */
+WriteQ (h, (d - h) & M64); /* (h) <- d-h, rls int */
+return (ar == 0)? 0: +1; /* ret 0 if q was empty */
+}
+
+t_int64 vms_insqhiqr (void)
+{
+t_uint64 h = R[16];
+t_uint64 d = R[17];
+t_uint64 ar, a;
+
+ar = ReadQ (h); /* a <- (h) */
+if (ar & 01) return -1; /* busy, ret -1 */
+WriteQ (h, ar | 1); /* get interlock */
+a = (ar + h) & M64; /* abs addr of a */
+WriteQ (a + 8, (d - a) & M64); /* (a+8) <- d-a, flt ok */
+WriteQ (d, (a - d) & M64); /* (d) <- a-d */
+WriteQ (d + 8, (h - d) & M64); /* (d+8) <- h-d */
+WriteQ (h, (d - h) & M64); /* (h) <- d-h, rls int */
+return (ar == 0)? 0: +1; /* ret 0 if q was empty */
+}
+
+t_int64 vms_insqtil (void)
+{
+t_uint64 h = R[16];
+t_uint64 d = R[17];
+t_uint64 ar, c;
+
+if ((h == d) || ((h | d) & 07) || /* h, d quad align? */
+ ((SEXT_L_Q (h) & M64) != h) ||
+ ((SEXT_L_Q (d) & M64) != d)) ABORT (EXC_RSVO);
+ReadAccQ (d, cm_wacc); /* wchk (d) */
+ar = ReadQ (h); /* a <- (h) */
+if ((ar & M32) == 0) return vms_insqhil (); /* if empty, ins hd */
+if (ar & 06) ABORT (EXC_RSVO); /* a quad align? */
+if (ar & 01) return -1; /* busy, ret -1 */
+WriteQ (h, ar | 1); /* acquire interlock */
+c = ar >> 32; /* c <- (h+4) */
+c = (SEXT_L_Q (c + h)) & M64; /* abs addr of c */
+if (c & 07) { /* c quad aligned? */
+ WriteQ (h, ar); /* release interlock */
+ ABORT (EXC_RSVO); /* fault */
+ }
+if (Test (c, cm_wacc, NULL)) WriteQ (h, ar); /* wtst c, rls if err */
+WriteL (c, (uint32) (d - c)); /* (c) <- d-c, flt ok */
+WriteL (d, (uint32) (h - d)); /* (d) <- h-d */
+WriteL (d + 4, (uint32) (c - d)); /* (d+4) <- c-d */
+WriteL (h + 4, (uint32) (d - h)); /* (h+4) <- d-h */
+WriteL (h, (uint32) ar); /* release interlock */
+return 0; /* q was not empty */
+}
+
+t_int64 vms_insqtilr (void)
+{
+t_uint64 h = R[16];
+t_uint64 d = R[17];
+t_uint64 ar, c;
+
+ar = ReadQ (h); /* a <- (h) */
+if ((ar & M32) == 0) return vms_insqhilr (); /* if empty, ins hd */
+if (ar & 01) return -1; /* busy, ret -1 */
+WriteQ (h, ar | 1); /* acquire interlock */
+c = ar >> 32; /* c <- (h+4) */
+c = (SEXT_L_Q (c + h)) & M64; /* abs addr of c */
+WriteL (c, (uint32) (d - c)); /* (c) <- d-c */
+WriteL (d, (uint32) (h - d)); /* (d) <- h-d */
+WriteL (d + 4, (uint32) (c - d)); /* (d+4) <- c-d */
+WriteL (h + 4, (uint32) (d - h)); /* (h+4) <- d-h */
+WriteL (h, (uint32) ar); /* release interlock */
+return 0; /* q was not empty */
+}
+
+t_int64 vms_insqtiq (void)
+{
+t_uint64 h = R[16];
+t_uint64 d = R[17];
+t_uint64 ar, c;
+
+if ((h == d) || ((h | d) & 0xF)) ABORT (EXC_RSVO); /* h, d octa align? */
+ReadAccQ (d, cm_wacc); /* wchk ent */
+ar = ReadQ (h); /* a <- (h) */
+if (ar == 0) return vms_insqhiq (); /* if empty, ins hd */
+if (ar & 0xE) ABORT (EXC_RSVO); /* a octa align? */
+if (ar & 01) return -1; /* busy, ret -1 */
+WriteQ (h, ar | 1); /* acquire interlock */
+c = ReadQ (h + 8); /* c <- (h+8) */
+c = (c + h) & M64; /* abs addr of C */
+if (c & 0xF) { /* c octa aligned? */
+ WriteQ (h, ar); /* release interlock */
+ ABORT (EXC_RSVO); /* fault */
+ }
+if (Test (c, cm_wacc, NULL)) WriteQ (h, ar); /* wtst c, rls if err */
+WriteQ (c, (d - c) & M64); /* (c) <- d-c, flt ok */
+WriteQ (d, (h - d) & M64); /* (d) <- h-d */
+WriteQ (d + 8, (c - d) & M64); /* (d+8) <- c-d */
+WriteQ (h + 8, (d - h) & M64); /* (h+8) <- d-h */
+WriteQ (h, ar); /* release interlock */
+return 0; /* q was not empty */
+}
+
+t_int64 vms_insqtiqr (void)
+{
+t_uint64 h = R[16];
+t_uint64 d = R[17];
+t_uint64 ar, c;
+
+ar = ReadQ (h); /* a <- (h) */
+if (ar == 0) return vms_insqhiqr (); /* if empty, ins hd */
+if (ar & 01) return -1; /* busy, ret -1 */
+WriteQ (h, ar | 1); /* acquire interlock */
+c = ReadQ (h + 8); /* c <- (h+8) */
+c = (c + h) & M64; /* abs addr of C */
+WriteQ (c, (d - c) & M64); /* (c) <- d-c */
+WriteQ (d, (h - d) & M64); /* (d) <- h-d */
+WriteQ (d + 8, (c - d) & M64); /* (d+8) <- c-d */
+WriteQ (h + 8, (d - h) & M64); /* (h+8) <- d-h */
+WriteQ (h, ar); /* release interlock */
+return 0; /* q was not empty */
+}
+
+/* Interlocked remove instructions
+
+ R[16] = header (hdr.aq)
+ R[1] ] = receives destination address
+
+ Pictorially:
+
+ BEFORE AFTER REMQHI AFTER REMQTI
+
+ H: A-H H: B-H W H: A-H W for interlock
+ H+4/8: C-H H+4/8: C-H H+4/8: B-H W
+
+ A: B-A A: B-A R A: B-A
+ A+4/8: H-A A+4/8: H-A A+4/8: H-A
+
+ B: C-B B: C-B B: H-B W
+ B+4/8: A-B B+4/8: H-B W B+4/8: A-B
+
+ C: H-C C: H-C C: H-C
+ C+4/8: B-C C+4/8: B-C C+4/8: B-C R
+
+ Note that the queue header and all the entries that are
+ "touched" in any way must be QUAD(OCTA)WORD aligned.
+*/
+
+t_int64 vms_remqhil (void)
+{
+t_uint64 h = R[16];
+t_uint64 ar, a, b;
+
+if ((h & 07) || ((SEXT_L_Q (h) & M64) != h)) /* h quad aligned? */
+ ABORT (EXC_RSVO);
+ar = ReadQ (h); /* ar <- (h) */
+if (ar & 06) ABORT (EXC_RSVO); /* a quad aligned? */
+if (ar & 01) return -1; /* busy, ret -1 */
+if ((ar & M32) == 0) return 0; /* queue empty? */
+WriteQ (h, ar | 1); /* acquire interlock */
+a = (SEXT_L_Q (ar + h)) & M64; /* abs addr of a */
+if (Test (a, cm_racc, NULL)) WriteQ (h, ar); /* rtst a, rls if err */
+b = ReadL (a); /* b <- (a), flt ok */
+b = (SEXT_L_Q (b + a)) & M64; /* abs addr of b */
+if (b & 07) { /* b quad aligned? */
+ WriteQ (h, ar); /* release interlock */
+ ABORT (EXC_RSVO); /* fault */
+ }
+if (Test (b, cm_wacc, NULL)) WriteQ (h, ar); /* wtst b, rls if err */
+WriteL (b + 4, (uint32) (h - b)); /* (b+4) <- h-b, flt ok */
+WriteL (h, (uint32) (b - h)); /* (h) <- b-h, rls int */
+R[1] = a; /* address of entry */
+return ((b & M32) == (h & M32))? +2: +1; /* if b = h, q empty */
+}
+
+t_int64 vms_remqhilr (void)
+{
+t_uint64 h = R[16];
+t_uint64 ar, a, b;
+
+ar = ReadQ (h); /* ar <- (h) */
+if (ar & 01) return -1; /* busy, ret -1 */
+if ((ar & M32) == 0) return 0; /* queue empty? */
+WriteQ (h, ar | 1); /* acquire interlock */
+a = (SEXT_L_Q (ar + h)) & M64; /* abs addr of a */
+b = ReadL (a); /* b <- (a), flt ok */
+b = (SEXT_L_Q (b + a)) & M64; /* abs addr of b */
+WriteL (b + 4, (uint32) (h - b)); /* (b+4) <- h-b, flt ok */
+WriteL (h, (uint32) (b - h)); /* (h) <- b-h, rls int */
+R[1] = a; /* address of entry */
+return ((b & M32) == (h & M32))? +2: +1; /* if b = h, q empty */
+}
+
+t_int64 vms_remqhiq (void)
+{
+t_uint64 h = R[16];
+t_uint64 ar, a, b;
+
+if (h & 0xF) ABORT (EXC_RSVO); /* h octa aligned? */
+ar = ReadQ (h); /* ar <- (h) */
+if (ar & 0xE) ABORT (EXC_RSVO); /* a octa aligned? */
+if (ar & 01) return -1; /* busy, ret -1 */
+if (ar == 0) return 0; /* queue empty? */
+WriteQ (h, ar | 1); /* acquire interlock */
+a = (ar + h) & M64; /* abs addr of a */
+if (Test (a, cm_racc, NULL)) WriteQ (h, ar); /* rtst a, rls if err */
+b = ReadQ (a); /* b <- (a), flt ok */
+b = (b + a) & M64; /* abs addr of b */
+if (b & 0xF) { /* b octa aligned? */
+ WriteQ (h, ar); /* release interlock */
+ ABORT (EXC_RSVO); /* fault */
+ }
+if (Test (b, cm_wacc, NULL)) WriteQ (h, ar); /* wtst b, rls if err */
+WriteQ (b + 8, (h - b) & M64); /* (b+8) <- h-b, flt ok */
+WriteQ (h, (b - h) & M64); /* (h) <- b-h, rls int */
+R[1] = a; /* address of entry */
+return (b == h)? +2: +1; /* if b = h, q empty */
+}
+
+t_int64 vms_remqhiqr (void)
+{
+t_uint64 h = R[16];
+t_uint64 ar, a, b;
+
+ar = ReadQ (h); /* ar <- (h) */
+if (ar & 01) return -1; /* busy, ret -1 */
+if (ar == 0) return 0; /* queue empty? */
+WriteQ (h, ar | 1); /* acquire interlock */
+a = (ar + h) & M64; /* abs addr of a */
+b = ReadQ (a); /* b <- (a) */
+b = (b + a) & M64; /* abs addr of b */
+WriteQ (b + 8, (h - b) & M64); /* (b+8) <- h-b, flt ok */
+WriteQ (h, (b - h) & M64); /* (h) <- b-h, rls int */
+R[1] = a; /* address of entry */
+return (b == h)? +2: +1; /* if b = h, q empty */
+}
+
+t_int64 vms_remqtil (void)
+{
+t_uint64 h = R[16];
+t_uint64 ar, b, c;
+
+if ((h & 07) || ((SEXT_L_Q (h) & M64) != h)) /* h quad aligned? */
+ ABORT (EXC_RSVO);
+ar = ReadQ (h); /* a <- (h) */
+if (ar & 06) ABORT (EXC_RSVO); /* a quad aligned? */
+if (ar & 01) return -1; /* busy, return - 1*/
+if ((ar & M32) == 0) return 0; /* empty, return 0 */
+WriteQ (h, ar | 1); /* acquire interlock */
+c = ar >> 32; /* c <- (h+4) */
+if (c & 07) { /* c quad aligned? */
+ WriteQ (h, ar); /* release interlock */
+ ABORT (EXC_RSVO); /* fault */
+ }
+if ((ar & M32) == (c & M32)) { /* single entry? */
+ WriteQ (h, ar); /* release interlock */
+ return vms_remqhil (); /* treat as remqhil */
+ }
+c = (SEXT_L_Q (c + h)) & M64; /* abs addr of c */
+if (Test (c + 4, cm_racc, NULL)) WriteQ (h, ar); /* rtst c+4, rls if err */
+b = ReadL (c + 4); /* b <- (c+4), flt ok */
+b = (SEXT_L_Q (b + c)) & M64; /* abs addr of b */
+if (b & 07) { /* b quad aligned? */
+ WriteQ (h, ar); /* release interlock */
+ ABORT (EXC_RSVO); /* fault */
+ }
+if (Test (b, cm_wacc, NULL)) WriteQ (h, ar); /* wtst b, rls if err */
+WriteL (b, (uint32) (h - b)); /* (b) <- h-b, flt ok */
+WriteL (h + 4, (uint32) (b - h)); /* (h+4) <- b-h */
+WriteL (h, (uint32) ar); /* release interlock */
+R[1] = c; /* store result */
+return +1; /* q can't be empty */
+}
+
+t_int64 vms_remqtilr (void)
+{
+t_uint64 h = R[16];
+t_uint64 ar, b, c;
+
+ar = ReadQ (h); /* a <- (h) */
+if (ar & 01) return -1; /* busy, return - 1*/
+if ((ar & M32) == 0) return 0; /* emtpy, return 0 */
+WriteQ (h, ar | 1); /* acquire interlock */
+c = ar >> 32; /* c <- (h+4) */
+if ((ar & M32) == (c & M32)) { /* single entry? */
+ WriteQ (h, ar); /* release interlock */
+ return vms_remqhilr (); /* treat as remqhil */
+ }
+c = (SEXT_L_Q (c + h)) & M64; /* abs addr of c */
+b = ReadL (c + 4); /* b <- (c+4) */
+b = (SEXT_L_Q (b) + c) & M64; /* abs addr of b */
+WriteL (b, (uint32) (h - b)); /* (b) <- h-b */
+WriteL (h + 4, (uint32) (b - h)); /* (h+4) <- b-h */
+WriteL (h, (uint32) ar); /* release interlock */
+R[1] = c; /* store result */
+return +1; /* q can't be empty */
+}
+
+t_int64 vms_remqtiq (void)
+{
+t_uint64 h = R[16];
+t_uint64 ar, b, c;
+
+if (h & 0xF) ABORT (EXC_RSVO); /* h octa aligned? */
+ar = ReadQ (h); /* a <- (h) */
+if (ar & 0xE) ABORT (EXC_RSVO); /* a quad aligned? */
+if (ar & 01) return -1; /* busy, return - 1*/
+if (ar == 0) return 0; /* emtpy, return 0 */
+WriteQ (h, ar | 1); /* acquire interlock */
+c = ReadQ (h + 8); /* c <- (h+8) */
+if (c & 0xF) { /* c octa aligned? */
+ WriteQ (h, ar); /* release interlock */
+ ABORT (EXC_RSVO); /* fault */
+ }
+if (ar == c) { /* single entry? */
+ WriteQ (h, ar); /* release interlock */
+ return vms_remqhiq (); /* treat as remqhil */
+ }
+c = (c + h) & M64; /* abs addr of c */
+if (Test (c + 8, cm_racc, NULL)) WriteQ (h, ar); /* rtst c+8, rls if err */
+b = ReadQ (c + 8); /* b <- (c+8), flt ok */
+b = (b + c) & M64; /* abs addr of b */
+if (b & 0xF) { /* b octa aligned? */
+ WriteQ (h, ar); /* release interlock */
+ ABORT (EXC_RSVO); /* fault */
+ }
+if (Test (b, cm_wacc, NULL)) WriteQ (h, ar); /* wtst b, rls if err */
+WriteQ (b, (h - b) & M64); /* (b) <- h-b, flt ok */
+WriteQ (h + 8, (b - h) & M64); /* (h+8) <- b-h */
+WriteQ (h, ar); /* release interlock */
+R[1] = c; /* store result */
+return +1; /* q can't be empty */
+}
+
+t_int64 vms_remqtiqr (void)
+{
+t_uint64 h = R[16];
+t_uint64 ar, b, c;
+
+ar = ReadQ (h); /* a <- (h) */
+if (ar & 01) return -1; /* busy, return - 1*/
+if (ar == 0) return 0; /* emtpy, return 0 */
+WriteQ (h, ar | 1); /* acquire interlock */
+c = ReadQ (h + 8); /* c <- (h+8) */
+if (ar == c) { /* single entry? */
+ WriteQ (h, ar); /* release interlock */
+ return vms_remqhiq (); /* treat as remqhil */
+ }
+c = (c + h) & M64; /* abs addr of c */
+b = ReadQ (c + 8); /* b <- (c+8) */
+b = (b + c) & M64; /* abs addr of b */
+WriteQ (b, (h - b) & M64); /* (b) <- h-b */
+WriteQ (h + 8, (b - h) & M64); /* (h+8) <- b-h */
+WriteQ (h, ar); /* release interlock */
+R[1] = c; /* store result */
+return +1; /* q can't be empty */
+}
+
+/* INSQUE
+
+ R[16] = predecessor address
+ R[17] = entry address
+
+ All writes must be checked before any writes are done.
+
+ Pictorially:
+
+ BEFORE AFTER
+
+ P: S P: E W
+ P+4/8: (n/a) P+4/8: (n/a)
+
+ E: --- E: S W
+ E+4/8: --- E+4/8: P W
+
+ S: (n/a) S: (n/a)
+ S+4/8: P S+4/8: E W
+
+ For longword queues, operands can be misaligned.
+ Quadword queues must be octaword aligned, and the
+ address addition cannot overflow 64b.
+ Note that WriteUna masks data to its proper length.
+*/
+
+t_int64 vms_insquel (uint32 defer)
+{
+t_uint64 p = SEXT_L_Q (R[16]) & M64;
+t_uint64 e = SEXT_L_Q (R[17]) & M64;
+t_uint64 s;
+
+if (defer) { /* defer? */
+ p = ReadUna (p, L_LONG, cm_racc); /* get address */
+ p = SEXT_L_Q (p) & M64; /* make 64b */
+ }
+s = ReadUna (p, L_LONG, cm_macc); /* s <- (p), wchk */
+s = SEXT_L_Q (s) & M64; /* make 64b */
+ReadUna ((s + 4) & M64, L_LONG, cm_wacc); /* wchk s+4 */
+ReadUna ((e + 4) & M64, L_LONG, cm_wacc); /* wchk e+4 */
+WriteUna (e, s, L_LONG, cm_wacc); /* (e) <- s, last unchecked */
+WriteUna ((e + 4) & M64, p, L_LONG, cm_wacc); /* (e+4) <- p */
+WriteUna ((s + 4) & M64, e, L_LONG, cm_wacc); /* (s+4) <- ent */
+WriteUna (p, e, L_LONG, cm_wacc); /* (p) <- e */
+return (((s & M32) == (p & M32))? +1: 0); /* return status */
+}
+
+t_int64 vms_insqueq (uint32 defer)
+{
+t_uint64 p = R[16];
+t_uint64 e = R[17];
+t_uint64 s;
+
+if (defer) { /* defer? */
+ if (p & 07) ABORT (EXC_RSVO);
+ p = ReadQ (p);
+ }
+if ((e | p) & 0xF) ABORT (EXC_RSVO); /* p, e octa aligned? */
+s = ReadAccQ (p, cm_macc); /* s <- (p), wchk */
+if (s & 0xF) ABORT (EXC_RSVO); /* s octa aligned? */
+ReadAccQ (s + 8, cm_wacc); /* wchk s+8 */
+ReadAccQ (e + 8, cm_wacc); /* wchk e+8 */
+WriteQ (e, s); /* (e) <- s */
+WriteQ (e + 8, p); /* (e+8) <- p */
+WriteQ (s + 8, e); /* (s+8) <- ent */
+WriteQ (p, e); /* (p) <- e */
+return ((s == p)? +1: 0); /* return status */
+}
+
+/* REMQUE
+
+ R[16] = entry address
+
+ All writes must be checked before any writes are done.
+
+ Pictorially:
+
+ BEFORE AFTER
+
+ P: E P: S W
+ P+4/8: (n/a) P+4/8: (n/a)
+
+ E: S W E: S
+ E+4/8: P W E+4/8: P
+
+ S: (n/a) S: (n/a)
+ S+4/8: E W S+4/8: P
+
+*/
+
+t_int64 vms_remquel (uint32 defer)
+{
+t_uint64 e = SEXT_L_Q (R[16]) & M64;
+t_uint64 s, p;
+
+if (defer) { /* defer? */
+ e = ReadUna (e, L_LONG, cm_racc); /* get address */
+ e = SEXT_L_Q (e) & M64; /* make 64b */
+ }
+s = ReadUna (e, L_LONG, cm_racc); /* s <- (e) */
+p = ReadUna ((e + 4) & M64, L_LONG, cm_racc); /* p <- (e+4) */
+s = SEXT_L_Q (s) & M64;
+p = SEXT_L_Q (p) & M64;
+if (e == p) return -1; /* queue empty? */
+ReadUna ((s + 4) & M64, L_LONG, cm_wacc); /* wchk (s+4) */
+WriteUna (p, s, L_LONG, cm_wacc); /* (p) <- s */
+WriteUna ((s + 4) & M64, p, L_LONG, cm_wacc); /* (s+4) <- p */
+return ((s == p)? 0: +1);
+}
+
+t_int64 vms_remqueq (uint32 defer)
+{
+t_uint64 e = R[16];
+t_uint64 s, p;
+
+if (defer) { /* defer? */
+ if (e & 07) ABORT (EXC_RSVO);
+ e = ReadQ (e);
+ }
+if (e & 0xF) ABORT (EXC_RSVO); /* e octa aligned? */
+s = ReadQ (e); /* s <- (e) */
+p = ReadQ (e + 8); /* p <- (e+8) */
+if ((s | p) & 0xF) ABORT (EXC_RSVO); /* s, p octa aligned? */
+if (e == p) return -1; /* queue empty? */
+ReadAccQ (s + 8, cm_wacc); /* wchk (s+8) */
+WriteQ (p, s); /* (p) <- s */
+WriteQ (s + 8, p); /* (s+8) <- p */
+return ((s == p)? 0: +1);
+}
+
+/* Probe */
+
+uint32 vms_probe (uint32 acc)
+{
+uint32 pm = ((uint32) R[18]) & 3;
+
+if (pm <= vms_cm) pm = vms_cm; /* least privileged */
+acc = (acc << pm) | PTE_V; /* access test - no FOR/W */
+if (Test (R[16], acc, NULL)) return 0; /* test start */
+if (Test ((R[16] + R[17]) & M64, acc, NULL)) return 0; /* test end */
+return 1;
+}
+
+/* VMS TIE support instructions */
+
+uint32 vms_amovrr (void)
+{
+uint32 lnt1 = ((uint32) R[18]) & 3;
+uint32 lnt2 = ((uint32) R[21]) & 3;
+
+if (vax_flag == 0) return 0; /* stop if !vax_flag */
+vax_flag = 0; /* clear vax_flag */
+ReadUna (R[17], lnt_map[lnt1], cm_wacc); /* verify writes */
+ReadUna (R[20], lnt_map[lnt2], cm_wacc);
+WriteUna (R[17], R[16], lnt_map[lnt1], cm_wacc); /* do both writes */
+WriteUna (R[20], R[21], lnt_map[lnt2], cm_wacc); /* WriteUna masks data */
+return 1;
+}
+
+uint32 vms_amovrm (void)
+{
+t_uint64 va, va1;
+uint32 lnt1 = ((uint32) R[18]) & 3;
+uint32 lnt2 = ((uint32) R[21]) & 0x3F;
+uint32 i, dat;
+
+if (vax_flag == 0) return 0; /* stop if !vax_flag */
+vax_flag = 0; /* clear vax_flag */
+if (lnt2 && ((R[19] | R[20]) & 3)) ABORT (EXC_RSVO); /* lw aligned? */
+ReadUna (R[17], lnt_map[lnt1], cm_wacc); /* verify first write */
+if (lnt2) { /* if second length */
+ va = (R[19] + (lnt2 << 2) - 4) & M64;
+ va1 = (R[20] + (lnt2 << 2) - 4) & M64;
+ ReadL (R[19]); /* verify source */
+ ReadL (va);
+ ReadAccL (R[20], cm_wacc); /* verify destination */
+ ReadAccL (va1, cm_wacc);
+ }
+WriteUna (R[17], R[16], lnt_map[lnt1], cm_wacc); /* do first write */
+for (i = 0, va = R[19], va1 = R[20]; i < lnt2; i++) { /* move data */
+ dat = ReadL (va);
+ WriteL (va1, dat);
+ va = (va + 4) & M64;
+ va1 = (va1 + 4) & M64;
+ }
+return 1;
+}
+
+/* Swap privileged context */
+
+void vms_swpctx (void)
+{
+t_uint64 val;
+uint32 tmp;
+
+if (R[16] & 0x7F) ABORT (EXC_RSVO); /* must be 128B aligned */
+WritePQ (vms_hwpcb + 0, SP); /* save stack ptrs */
+WritePQ (vms_hwpcb + 8, esp);
+WritePQ (vms_hwpcb + 16, ssp);
+WritePQ (vms_hwpcb + 24, usp);
+WritePQ (vms_hwpcb + 48, (vms_astsr << 4) | vms_asten); /* save AST */
+WritePQ (vms_hwpcb + 64, (pcc_h + pcc_l) & M32); /* save PCC */
+WritePQ (vms_hwpcb + 72, vms_thread); /* save UNIQUE */
+vms_hwpcb = R[16]; /* new PCB */
+SP = ksp = ReadPQ (vms_hwpcb + 0); /* read stack ptrs */
+esp = ReadPQ (vms_hwpcb + 8);
+ssp = ReadPQ (vms_hwpcb + 16);
+usp = ReadPQ (vms_hwpcb + 24);
+val = ReadPQ (vms_hwpcb + 32) << VA_N_OFF; /* read PTBR */
+if (val != vms_ptbr) tlb_ia (TLB_CI | TLB_CD); /* if changed, zap TLB */
+vms_ptbr = val;
+tmp = ReadPL (vms_hwpcb + 40) & M16; /* read ASN */
+itlb_set_asn (tmp);
+dtlb_set_asn (tmp);
+tmp = ReadPL (vms_hwpcb + 48); /* read AST */
+vms_astsr = (tmp >> 4) & AST_MASK; /* separate ASTSR, ASTEN */
+vms_asten = tmp & AST_MASK;
+val = ReadPQ (vms_hwpcb + PCBV_FLAGS); /* read flags */
+fpen = ((uint32) val) & 1; /* set FEN */
+vms_datfx = ((uint32) (val >> 63)) & 1; /* set DATFX */
+tmp = ReadL (vms_hwpcb + 64);
+pcc_h = (tmp - pcc_l) & M32;
+vms_thread = ReadPQ (vms_hwpcb + 72); /* read UNIQUE */
+return;
+}
+
+/* VMS interrupt or exception
+
+ Inputs:
+ vec = SCB vector
+ newmode = new mode (usually kernel)
+ newipl = new IPL
+ Outputs:
+ reason = possible processor halt
+*/
+
+t_stat vms_intexc (uint32 vec, uint32 newmode, uint32 newipl)
+{
+t_uint64 pa = (vms_scbb + vec) & ~0xF; /* vector */
+t_uint64 sav_ps = GET_PSV; /* old PS */
+uint32 wacc = ACC_W (newmode);
+uint32 exc;
+
+vms_stkp[vms_cm] = SP; /* save SP */
+SP = vms_stkp[newmode]; /* load new SP */
+sav_ps = sav_ps | ((SP & PSV_M_SPA) << PSV_V_SPA); /* save SP align */
+SP = SP & ~PSV_M_SPA; /* align SP */
+SP = (SP - VMS_L_STKF) & M64;
+if (exc = Test (SP, wacc, NULL)) { /* check writes */
+ if (newmode == MODE_K) return STOP_KSNV; /* error? stop if kernel */
+ ABORT1 (SP, exc + EXC_W); /* else, force fault */
+ }
+if (exc = Test (SP + VMS_L_STKF - 8, wacc, NULL)) {
+ if (newmode == MODE_K) return STOP_KSNV;
+ ABORT1 (SP + VMS_L_STKF - 8, exc + EXC_W);
+ }
+vms_cm = mmu_set_cm (newmode); /* switch mode */
+WriteQ (SP, R[2]); /* save R2-R7 */
+WriteQ (SP + 8, R[3]);
+WriteQ (SP + 16, R[4]);
+WriteQ (SP + 24, R[5]);
+WriteQ (SP + 32, R[6]);
+WriteQ (SP + 40, R[7]);
+WriteQ (SP + 48, PC); /* save PC */
+WriteQ (SP + 56, sav_ps); /* save PS */
+PC = R[2] = ReadPQ (pa); /* set new PC */
+R[3] = ReadPQ (pa + 8); /* set argument */
+vms_ipl = newipl; /* change IPL */
+vms_ps = vms_ps & ~PSV_M_SW;
+return SCPE_OK;
+}
+
+/* Memory management fault */
+
+t_stat vms_mm_intexc (uint32 vec, t_uint64 par2)
+{
+t_stat r;
+
+r = vms_intexc (vec, MODE_K, vms_ipl); /* take exception */
+R[4] = p1; /* R[4] = va */
+R[5] = par2; /* R[5] = MME */
+tlb_is (p1, TLB_CI | TLB_CD); /* zap TLB entry */
+return r;
+}
+
+/* Return from exception of interrupt */
+
+t_stat vms_rei (void)
+{
+t_uint64 t1, t2, t3, t4, t5, t6, t7, t8;
+uint32 newmode;
+
+if (SP & PSV_M_SPA) ABORT (EXC_RSVO); /* check alignment */
+if (vms_cm == MODE_K) { /* in kernel mode? */
+ if (Test (SP, cm_racc, NULL)) return STOP_KSNV; /* must be accessible */
+ if (Test (SP + VMS_L_STKF - 8, cm_racc, NULL)) return STOP_KSNV;
+ }
+t1 = ReadQ (SP); /* pop stack */
+t2 = ReadQ (SP + 8);
+t3 = ReadQ (SP + 16);
+t4 = ReadQ (SP + 24);
+t5 = ReadQ (SP + 32);
+t6 = ReadQ (SP + 40);
+t7 = ReadQ (SP + 48);
+t8 = ReadQ (SP + 56);
+newmode = (((uint32) t8) >> PSV_V_CM) && PSV_M_CM; /* get new mode */
+if ((vms_cm != MODE_K) && /* not kernel? check new PS */
+ ((newmode < vms_cm) || (t8 & PSV_MBZ))) ABORT (EXC_RSVO);
+SP = (SP + VMS_L_STKF) | ((t8 >> PSV_V_SPA) & PSV_M_SPA);
+vms_stkp[vms_cm] = SP; /* save SP */
+SP = vms_stkp[newmode]; /* load new SP */
+R[2] = t1; /* restore R2-R7 */
+R[3] = t2;
+R[4] = t3;
+R[5] = t4;
+R[6] = t5;
+R[7] = t6;
+PC = t7 & ~3; /* restore PC */
+vms_ps = ((uint32) t8) & PSV_MASK; /* restore PS */
+vms_cm = mmu_set_cm (newmode); /* switch modes */
+vms_ipl = (((uint32) t8) >> PSV_V_IPL) & PSV_M_IPL; /* new IPL */
+vax_flag = 0; /* clear vax, lock flags */
+lock_flag = 0;
+return SCPE_OK;
+}
+
+/* Unaligned read virtual - for VMS PALcode only
+
+ Inputs:
+ va = virtual address
+ lnt = length code (BWLQ)
+ acc = access code (includes FOR, FOW)
+ Output:
+ returned data, right justified
+*/
+
+t_uint64 ReadUna (t_uint64 va, uint32 lnt, uint32 acc)
+{
+t_uint64 pa, pa1, wl, wh;
+uint32 exc, bo, sc;
+
+if (exc = Test (va, acc, &pa)) /* test, translate */
+ ABORT1 (va, exc + EXC_R);
+if ((pa & (lnt - 1)) == 0) { /* aligned? */
+ if (lnt == L_QUAD) return ReadPQ (pa); /* quad? */
+ if (lnt == L_LONG) return ReadPL (pa); /* long? */
+ if (lnt == L_WORD) return ReadPW (pa); /* word? */
+ return ReadPB (pa); /* byte */
+ }
+if ((VA_GETOFF (va) + lnt) > VA_PAGSIZE) { /* cross page? */
+ if (exc = Test (va + 8, acc, &pa1)) /* test, translate */
+ ABORT1 (va + 8, exc + EXC_R);
+ }
+else pa1 = (pa + 8) & PA_MASK; /* not cross page */
+bo = ((uint32) pa) & 7; /* byte in qw */
+sc = bo << 3; /* shift count */
+wl = ReadPQ (pa); /* get low qw */
+if (lnt == L_QUAD) { /* qw unaligned? */
+ wh = ReadPQ (pa1); /* get high qw */
+ return ((((wl >> sc) & (((t_uint64) M64) >> sc)) |
+ (wh << (64 - sc))) & M64); /* extract data */
+ }
+if (lnt == L_LONG) { /* lw unaligned? */
+ if (bo <= 4) return ((wl >> sc) & M32); /* all in one qw? */
+ wh = ReadPQ (pa1); /* get high qw */
+ return ((((wl >> sc) & (M32 >> (sc - 32))) |
+ (wh << (64 - sc))) & M32);
+ }
+if (bo < 7) return ((wl >> sc) & M16); /* wd, all in one qw? */
+wh = ReadPQ (pa1); /* get hi qw, extract */
+return (((wl >> 56) & 0xFF) | ((wh & 0xFF) << 8));
+}
+
+/* Unaligned write virtual - for VMS PALcode only
+
+ Inputs:
+ va = virtual address
+ val = data to be written, right justified in 64b
+ lnt = length code (BWLQ)
+ acc = access code (includes FOW)
+ Output:
+ none
+*/
+
+void WriteUna (t_uint64 va, t_uint64 val, uint32 lnt, uint32 acc)
+{
+t_uint64 pa, pa1, wl, wh, mask;
+uint32 exc, bo, sc;
+
+if (exc = Test (va, acc, &pa)) /* test, translate */
+ ABORT1 (va, exc + EXC_W);
+if ((pa & (lnt - 1)) == 0) { /* aligned? */
+ if (lnt == L_QUAD) WritePQ (pa, val); /* quad? */
+ else if (lnt == L_LONG) WritePL (pa, (uint32) val); /* long? */
+ else if (lnt == L_WORD) WritePW (pa, (uint32) val); /* word? */
+ else WritePB (pa, (uint32) val); /* byte */
+ return;
+ }
+if ((VA_GETOFF (va) + lnt) > VA_PAGSIZE) { /* cross page? */
+ if (exc = Test (va + 8, acc, &pa1)) /* test, translate */
+ ABORT1 (va + 8, exc + EXC_W);
+ }
+else pa1 = (pa + 8) & PA_MASK; /* not cross page */
+bo = ((uint32) pa) & 7; /* byte in qw */
+sc = bo << 3; /* shift count */
+wl = ReadPQ (pa); /* get low qw */
+if (lnt == L_QUAD) { /* qw unaligned? */
+ val = val & M64; /* mask data */
+ mask = ((t_uint64) M64) << sc; /* low qw mask */
+ wl = (wl & ~mask) | ((val << sc) & mask); /* insert low */
+ wh = ReadPQ (pa1); /* hi qw */
+ mask = ((t_uint64) M64) >> (64 - sc); /* hi qw mask */
+ wh = (wh & ~mask) | ((val >> (64 - sc)) & mask);
+ WritePQ (pa, wl); /* write low */
+ WritePQ (pa, wh); /* write high */
+ }
+else if (lnt == L_LONG) { /* lw unaligned? */
+ val = val & M32;
+ mask = ((t_uint64) M32) << sc; /* low qw mask */
+ wl = (wl & ~mask) | (val << sc); /* insert low */
+ WritePQ (pa, wl); /* write low */
+ if (bo >= 4) { /* 2nd qw? */
+ wh = ReadPQ (pa1); /* read hi qw */
+ mask = ((t_uint64) M32) >> (sc - 32); /* hi qw mask */
+ wh = (wh & ~mask) | (val >> (sc - 32)); /* insert high */
+ WritePQ (pa1, wh); /* write hi */
+ }
+ }
+else {
+ val = val & M16; /* mask data */
+ mask = ((t_uint64) M16) << sc; /* word, low qw mask */
+ wl = (wl & ~mask) | ((val & M16) << sc); /* insert low */
+ WritePQ (pa, wl); /* write low */
+ if (bo >= 7) { /* 2nd qw? */
+ wh = ReadPQ (pa1); /* read hi */
+ mask = 0xFF; /* hi qw mask */
+ wh = (wh & ~mask) | (val >> 8); /* insert high */
+ WritePQ (pa1, wh); /* write hi */
+ }
+ }
+return;
+}
+
+/* Test the accessibility of an address (VMS and UNIX PALcode only)
+
+ - In VMS, superpage is always 0
+ - In Unix, current mode is always kernel
+ - Hence, superpages are always accessible */
+
+uint32 Test (t_uint64 va, uint32 acc, t_uint64 *pa)
+{
+uint32 va_sext = VA_GETSEXT (va);
+uint32 vpn = VA_GETVPN (va);
+t_uint64 pte;
+uint32 exc;
+TLBENT *tlbp;
+
+if (!dmapen) { /* mapping off? */
+ if (pa) *pa = va & PA_MASK; /* pa = va */
+ return 0;
+ }
+if ((va_sext != 0) && (va_sext != VA_M_SEXT)) /* invalid virt addr? */
+ return EXC_BVA;
+if ((mmu_dspage & SPEN_43) && (VPN_GETSP43 (vpn) == 2)) {
+ if (pa) *pa = va & SP43_MASK; /* 43b superpage? */
+ return 0;
+ }
+if ((mmu_dspage & SPEN_32) && (VPN_GETSP32 (vpn) == 0x1FFE)) {
+ if (pa) *pa = va & SP32_MASK; /* 32b superpage? */
+ return 0;
+ }
+if (!(tlbp = dtlb_lookup (vpn))) { /* lookup vpn; miss? */
+ if (exc = pal_find_pte (vpn, &pte)) return exc; /* fetch pte; error? */
+ tlbp = dtlb_load (vpn, pte); /* load new entry */
+ }
+if (acc & ~tlbp->pte) return mm_exc (acc & ~tlbp->pte); /* check access */
+if (pa) *pa = PHYS_ADDR (tlbp->pfn, va); /* return phys addr */
+return 0; /* ok */
+}
+
+/* TLB check - VMS PALcode only */
+
+uint32 tlb_check (t_uint64 va)
+{
+uint32 va_sext = VA_GETSEXT (va);
+uint32 vpn = VA_GETVPN (va);
+
+if ((va_sext != 0) && (va_sext != VA_M_SEXT)) return 0;
+if (itlb_lookup (vpn)) return 1;
+if (dtlb_lookup (vpn)) return 1;
+return 0;
+}
+
+/* VMS 3-level PTE lookup
+
+ Inputs:
+ vpn = virtual page number (30b, sext)
+ *pte = pointer to pte to be returned
+ Output:
+ status = 0 for successful fill
+ EXC_ACV for ACV on intermediate level
+ EXC_TNV for TNV on intermediate level
+*/
+
+uint32 pal_find_pte_vms (uint32 vpn, t_uint64 *l3pte)
+{
+t_uint64 vptea, l1ptea, l2ptea, l3ptea, l1pte, l2pte;
+uint32 vpte_vpn;
+TLBENT *vpte_p;
+
+vptea = vms_vtbr | (((t_uint64) (vpn & VA_M_VPN)) << 3);/* try virtual lookup */
+vpte_vpn = VA_GETVPN (vptea); /* get vpte vpn */
+vpte_p = dtlb_lookup (vpte_vpn); /* get vpte tlb ptr */
+if ((vpte_p->tag == vpte_vpn) && /* TLB hit? */
+ ((vpte_p->pte & (PTE_KRE|PTE_V)) == (PTE_KRE|PTE_V)))
+ l3ptea = vpte_p->pfn | VA_GETOFF (vptea);
+else {
+ l1ptea = vms_ptbr + VPN_GETLVL1 (vpn);
+ l1pte = ReadPQ (l1ptea);
+ if ((l1pte & PTE_V) == 0)
+ return ((l1pte & PTE_KRE)? EXC_TNV: EXC_ACV);
+ l2ptea = (l1pte & PFN_MASK) >> (PTE_V_PFN - VA_N_OFF);
+ l2ptea = l2ptea + VPN_GETLVL2 (vpn);
+ l2pte = ReadPQ (l2ptea);
+ if ((l2pte & PTE_V) == 0)
+ return ((l2pte & PTE_KRE)? EXC_TNV: EXC_ACV);
+ l3ptea = (l2pte & PFN_MASK) >> (PTE_V_PFN - VA_N_OFF);
+ l3ptea = l3ptea + VPN_GETLVL3 (vpn);
+ }
+*l3pte = ReadPQ (l3ptea);
+return 0;
+}
+
+/* VMS PALcode reset */
+
+t_stat pal_proc_reset_vms (DEVICE *dptr)
+{
+mmu_ispage = mmu_dspage = 0;
+vms_cm = mmu_set_cm (MODE_K);
+vms_ipl = IPL_1F;
+vms_ps = 0;
+vms_datfx = 0;
+vms_scbb = 0;
+vms_prbr = 0;
+vms_scc = 0;
+vms_last_pcc = pcc_l;
+pcc_enb = 1;
+pal_eval_intr = &pal_eval_intr_vms;
+pal_proc_intr = &pal_proc_intr_vms;
+pal_proc_trap = &pal_proc_trap_vms;
+pal_proc_excp = &pal_proc_excp_vms;
+pal_proc_inst = &pal_proc_inst_vms;
+pal_find_pte = &pal_find_pte_vms;
+return SCPE_OK;
+}