diff --git a/README.md b/README.md index 7528baaf..bbb63867 100644 --- a/README.md +++ b/README.md @@ -121,6 +121,20 @@ Host platforms which have libSDL available can leverage this functionality. Asynchronous support exists for console I/O and most multiplexer devices. (Still experimental - not currently by default) +#### Clock/Timer Enhancements + * Asynchronhous clocks ticks exist to better support modern processors + that have variable clock speeds. The initial clock calibration model + presumed a constant simulated instruction execution rate. + Modern processors have variable processor speeds which breaks this + key assumption. + * Strategies to make up for missed clock ticks are now available + (independent of asynchronous tick generation). These strategies + generate catch-up clock ticks to keep the simulator passage of + time consistent with wall clock time. Simulator time while idling + or throttling is now consistent. Reasonable idling behavior is + now possible without requiring that the host system clock tick be + 10ms or less. + #### Ethernet Transport Enhancements * UDP packet transport. Direct simulator connections to HECnet can be made without running a local packet bridge program. diff --git a/VAX/vax610_stddev.c b/VAX/vax610_stddev.c index 1436f32a..e2d7d9df 100644 --- a/VAX/vax610_stddev.c +++ b/VAX/vax610_stddev.c @@ -248,6 +248,8 @@ void iccs_wr (int32 data) { if ((data & CSR_IE) == 0) CLR_INT (CLK); +if (data & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ clk_csr = (clk_csr & ~CLKCSR_RW) | (data & CLKCSR_RW); return; } diff --git a/VAX/vax630_stddev.c b/VAX/vax630_stddev.c index aa390704..d7476cad 100644 --- a/VAX/vax630_stddev.c +++ b/VAX/vax630_stddev.c @@ -226,6 +226,8 @@ void iccs_wr (int32 data) { if ((data & CSR_IE) == 0) CLR_INT (CLK); +if (data & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ clk_csr = (clk_csr & ~CLKCSR_RW) | (data & CLKCSR_RW); return; } diff --git a/VAX/vax730_stddev.c b/VAX/vax730_stddev.c index b06d781e..5926e014 100644 --- a/VAX/vax730_stddev.c +++ b/VAX/vax730_stddev.c @@ -644,6 +644,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */ if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */ tmr_icr = icr_rd (TRUE); /* update itr */ } +if (val & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */ tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */ (val & TMR_CSR_WR); @@ -859,7 +861,7 @@ int32 todr_rd (void) TOY *toy = (TOY *)clk_unit.filebuf; struct timespec base, now, val; -clock_gettime(CLOCK_REALTIME, &now); /* get curr time */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ base.tv_sec = toy->toy_gmtbase; base.tv_nsec = toy->toy_gmtbasemsec * 1000000; sim_timespec_diff (&val, &now, &base); @@ -874,8 +876,7 @@ struct timespec now, val, base; /* Save the GMT time when set value was 0 to record the base for future read operations in "battery backed-up" state */ -if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */ - return; /* error? */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ val.tv_sec = ((uint32)data) / 100; val.tv_nsec = (((uint32)data) % 100) * 10000000; sim_timespec_diff (&base, &now, &val); /* base = now - data */ diff --git a/VAX/vax750_stddev.c b/VAX/vax750_stddev.c index a61e9a71..9e62811c 100644 --- a/VAX/vax750_stddev.c +++ b/VAX/vax750_stddev.c @@ -666,6 +666,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */ if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */ tmr_icr = icr_rd (TRUE); /* update itr */ } +if (val & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */ tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */ (val & TMR_CSR_WR); @@ -895,7 +897,7 @@ int32 todr_rd (void) TOY *toy = (TOY *)clk_unit.filebuf; struct timespec base, now, val; -clock_gettime(CLOCK_REALTIME, &now); /* get curr time */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ base.tv_sec = toy->toy_gmtbase; base.tv_nsec = toy->toy_gmtbasemsec * 1000000; sim_timespec_diff (&val, &now, &base); @@ -911,8 +913,7 @@ struct timespec now, val, base; /* Save the GMT time when set value was 0 to record the base for future read operations in "battery backed-up" state */ -if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */ - return; /* error? */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ val.tv_sec = ((uint32)data) / 100; val.tv_nsec = (((uint32)data) % 100) * 10000000; sim_timespec_diff (&base, &now, &val); /* base = now - data */ diff --git a/VAX/vax780_stddev.c b/VAX/vax780_stddev.c index 4694d54f..a6bf3738 100644 --- a/VAX/vax780_stddev.c +++ b/VAX/vax780_stddev.c @@ -623,6 +623,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */ if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */ tmr_icr = icr_rd (); /* update itr */ } +if (val & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */ tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */ (val & TMR_CSR_WR); @@ -835,7 +837,7 @@ int32 todr_rd (void) TOY *toy = (TOY *)clk_unit.filebuf; struct timespec base, now, val; -clock_gettime(CLOCK_REALTIME, &now); /* get curr time */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ base.tv_sec = toy->toy_gmtbase; base.tv_nsec = toy->toy_gmtbasemsec * 1000000; sim_timespec_diff (&val, &now, &base); @@ -851,8 +853,7 @@ struct timespec now, val, base; /* Save the GMT time when set value was 0 to record the base for future read operations in "battery backed-up" state */ -if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */ - return; /* error? */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ val.tv_sec = ((uint32)data) / 100; val.tv_nsec = (((uint32)data) % 100) * 10000000; sim_timespec_diff (&base, &now, &val); /* base = now - data */ diff --git a/VAX/vax860_stddev.c b/VAX/vax860_stddev.c index d6a1c1da..4178f036 100644 --- a/VAX/vax860_stddev.c +++ b/VAX/vax860_stddev.c @@ -747,6 +747,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */ if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */ tmr_icr = icr_rd (TRUE); /* update itr */ } +if (val & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */ tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */ (val & TMR_CSR_WR); @@ -962,7 +964,7 @@ int32 todr_rd (void) TOY *toy = (TOY *)clk_unit.filebuf; struct timespec base, now, val; -clock_gettime(CLOCK_REALTIME, &now); /* get curr time */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ base.tv_sec = toy->toy_gmtbase; base.tv_nsec = toy->toy_gmtbasemsec * 1000000; sim_timespec_diff (&val, &now, &base); @@ -977,8 +979,7 @@ struct timespec now, val, base; /* Save the GMT time when set value was 0 to record the base for future read operations in "battery backed-up" state */ -if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */ - return; /* error? */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ val.tv_sec = ((uint32)data) / 100; val.tv_nsec = (((uint32)data) % 100) * 10000000; sim_timespec_diff (&base, &now, &val); /* base = now - data */ diff --git a/VAX/vax_cpu.c b/VAX/vax_cpu.c index 0249f216..788dd9b0 100644 --- a/VAX/vax_cpu.c +++ b/VAX/vax_cpu.c @@ -428,12 +428,12 @@ MTAB cpu_mod[] = { }; DEBTAB cpu_deb[] = { - { "INTEXC", LOG_CPU_I }, - { "REI", LOG_CPU_R }, - { "CONTEXT", LOG_CPU_P }, - { "EVENT", SIM_DBG_EVENT }, - { "ACTIVATE", SIM_DBG_ACTIVATE }, - { "ASYNCH", SIM_DBG_AIO_QUEUE }, + { "INTEXC", LOG_CPU_I, "interrupt and exception activities" }, + { "REI", LOG_CPU_R, "REI activities" }, + { "CONTEXT", LOG_CPU_P, "context switching activities" }, + { "EVENT", SIM_DBG_EVENT, "event dispatch activities" }, + { "ACTIVATE", SIM_DBG_ACTIVATE, "queue insertion activities" }, + { "ASYNCH", SIM_DBG_AIO_QUEUE, "asynch queue activities" }, { NULL, 0 } }; diff --git a/VAX/vax_stddev.c b/VAX/vax_stddev.c index 97db3661..eb124cc3 100644 --- a/VAX/vax_stddev.c +++ b/VAX/vax_stddev.c @@ -299,6 +299,8 @@ void iccs_wr (int32 data) { if ((data & CSR_IE) == 0) CLR_INT (CLK); +if (data & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ clk_csr = (clk_csr & ~CLKCSR_RW) | (data & CLKCSR_RW); return; } @@ -484,7 +486,7 @@ if (0 == todr_reg) { /* clock running? */ in the 32bit TODR. This is the 33bit value 0x100000000/100 to get seconds */ #define TOY_MAX_SECS (0x40000000/25) -clock_gettime(CLOCK_REALTIME, &now); /* get curr time */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ base.tv_sec = toy->toy_gmtbase; base.tv_nsec = toy->toy_gmtbasemsec * 1000000; sim_timespec_diff (&val, &now, &base); @@ -507,8 +509,7 @@ struct timespec now, val, base; /* Save the GMT time when set value was 0 to record the base for future read operations in "battery backed-up" state */ -if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */ - return; /* error? */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ val.tv_sec = ((uint32)data) / 100; val.tv_nsec = (((uint32)data) % 100) * 10000000; sim_timespec_diff (&base, &now, &val); /* base = now - data */ diff --git a/VAX/vax_watch.c b/VAX/vax_watch.c index 03ee3207..afbb1660 100644 --- a/VAX/vax_watch.c +++ b/VAX/vax_watch.c @@ -162,11 +162,13 @@ int32 wtc_rd (int32 pa) int32 rg = (pa >> 1) & 0xF; int32 val = 0; time_t curr; +struct timespec now; static int mdays[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; struct tm *ctm = NULL; if (rg < 10) { /* time reg? */ - curr = time (NULL); /* get curr time */ + sim_rtcn_get_time (&now, TMR_CLK); + curr = now.tv_sec; /* get curr time */ if (curr == (time_t) -1) /* error? */ return 0; ctm = localtime (&curr); /* decompose */ @@ -296,7 +298,8 @@ return SCPE_OK; t_stat wtc_set (UNIT *uptr, int32 val, CONST char *cptr, void *desc) { -if (cptr != NULL) wtc_mode = strcmp(cptr, "STD"); +if (cptr != NULL) + wtc_mode = ((strcmp(cptr, "STD") != 0) ? WTC_MODE_VMS : WTC_MODE_STD); return SCPE_OK; } diff --git a/doc/simh.doc b/doc/simh.doc index 4eaac45a..89a1fadb 100644 Binary files a/doc/simh.doc and b/doc/simh.doc differ diff --git a/scp.c b/scp.c index 67a144db..47e6db9d 100644 --- a/scp.c +++ b/scp.c @@ -328,12 +328,65 @@ pthread_cond_t sim_tmxr_poll_cond = PTHREAD_COND_INITIALIZER; int32 sim_tmxr_poll_count; pthread_t sim_asynch_main_threadid; UNIT * volatile sim_asynch_queue; -UNIT * volatile sim_wallclock_queue; -UNIT * volatile sim_wallclock_entry; t_bool sim_asynch_enabled = TRUE; int32 sim_asynch_check; int32 sim_asynch_latency = 4000; /* 4 usec interrupt latency */ int32 sim_asynch_inst_latency = 20; /* assume 5 mip simulator */ + +int sim_aio_update_queue (void) +{ +int migrated = 0; + +if (AIO_QUEUE_VAL != QUEUE_LIST_END) { /* List !Empty */ + UNIT *q, *uptr; + int32 a_event_time; + do + q = AIO_QUEUE_VAL; + while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q)); /* Grab current queue */ + while (q != QUEUE_LIST_END) { /* List !Empty */ + sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Migrating Asynch event for %s after %d instructions\n", sim_uname(q), q->a_event_time); + ++migrated; + uptr = q; + q = q->a_next; + uptr->a_next = NULL; /* hygiene */ + if (uptr->a_activate_call != &sim_activate_notbefore) { + a_event_time = uptr->a_event_time-((sim_asynch_inst_latency+1)/2); + if (a_event_time < 0) + a_event_time = 0; + } + else + a_event_time = uptr->a_event_time; + uptr->a_activate_call (uptr, a_event_time); + if (uptr->a_check_completion) { + sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Calling Completion Check for asynch event on %s\n", sim_uname(uptr)); + uptr->a_check_completion (uptr); + } + } + } +return migrated; +} + +void sim_aio_activate (ACTIVATE_API caller, UNIT *uptr, int32 event_time) +{ +sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch event for %s after %d instructions\n", sim_uname(uptr), event_time); +if (uptr->a_next) { + uptr->a_activate_call = sim_activate_abs; + } +else { + UNIT *q; + uptr->a_event_time = event_time; + uptr->a_activate_call = caller; + do { + q = AIO_QUEUE_VAL; + uptr->a_next = q; /* Mark as on list */ + } while (q != AIO_QUEUE_SET(uptr, q)); + } +sim_asynch_check = 0; /* try to force check */ +if (sim_idle_wait) { + sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(uptr), event_time); + pthread_cond_signal (&sim_asynch_wake); + } +} #else t_bool sim_asynch_enabled = FALSE; #endif diff --git a/scp.h b/scp.h index d20239df..9f7d8161 100644 --- a/scp.h +++ b/scp.h @@ -281,6 +281,10 @@ extern t_addr sim_brk_match_addr; extern BRKTYPTAB *sim_brk_type_desc; /* type descriptions */ extern FILE *stdnul; extern t_bool sim_asynch_enabled; +#if defined(SIM_ASYNCH_IO) +int sim_aio_update_queue (void); +void sim_aio_activate (ACTIVATE_API caller, UNIT *uptr, int32 event_time); +#endif /* VM interface */ diff --git a/sim_defs.h b/sim_defs.h index f8d0f323..4c804c56 100644 --- a/sim_defs.h +++ b/sim_defs.h @@ -541,10 +541,10 @@ struct UNIT { void *up7; /* device specific */ void *up8; /* device specific */ void *tmxr; /* TMXR linkage */ + void (*cancel)(UNIT *); #ifdef SIM_ASYNCH_IO void (*a_check_completion)(UNIT *); t_bool (*a_is_active)(UNIT *); - void (*a_cancel)(UNIT *); UNIT *a_next; /* next asynch active */ int32 a_event_time; ACTIVATE_API a_activate_call; @@ -931,6 +931,8 @@ struct FILEREF { #if defined (SIM_ASYNCH_IO) #include +#define SIM_ASYNCH_CLOCKS 1 + extern pthread_mutex_t sim_asynch_lock; extern pthread_cond_t sim_asynch_wake; extern pthread_mutex_t sim_timer_lock; @@ -941,8 +943,6 @@ extern pthread_cond_t sim_tmxr_poll_cond; extern pthread_mutex_t sim_tmxr_poll_lock; extern pthread_t sim_asynch_main_threadid; extern UNIT * volatile sim_asynch_queue; -extern UNIT * volatile sim_wallclock_queue; -extern UNIT * volatile sim_wallclock_entry; extern volatile t_bool sim_idle_wait; extern int32 sim_asynch_check; extern int32 sim_asynch_latency; @@ -958,101 +958,35 @@ extern int32 sim_asynch_inst_latency; /* It is primarily used only used in debugging messages */ #define AIO_TLS #endif -#define AIO_QUEUE_CHECK(que, lock) \ - if (1) { \ - UNIT *_cptr; \ - if (lock) \ - pthread_mutex_lock (lock); \ - for (_cptr = que; \ - (_cptr != QUEUE_LIST_END); \ - _cptr = _cptr->next) \ - if (!_cptr->next) { \ - if (sim_deb) { \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Queue Corruption detected\n");\ - fclose(sim_deb); \ - } \ - sim_printf("Queue Corruption detected\n"); \ - abort(); \ - } \ - if (lock) \ - pthread_mutex_unlock (lock); \ - } else (void)0 +#define AIO_QUEUE_CHECK(que, lock) \ + do { \ + UNIT *_cptr; \ + if (lock) \ + pthread_mutex_lock (lock); \ + for (_cptr = que; \ + (_cptr != QUEUE_LIST_END); \ + _cptr = _cptr->next) \ + if (!_cptr->next) { \ + if (sim_deb) { \ + sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Queue Corruption detected\n");\ + fclose(sim_deb); \ + } \ + sim_printf("Queue Corruption detected\n"); \ + abort(); \ + } \ + if (lock) \ + pthread_mutex_unlock (lock); \ + } while (0) #define AIO_MAIN_THREAD (pthread_equal ( pthread_self(), sim_asynch_main_threadid )) #define AIO_LOCK \ pthread_mutex_lock(&sim_asynch_lock) #define AIO_UNLOCK \ pthread_mutex_unlock(&sim_asynch_lock) #define AIO_IS_ACTIVE(uptr) (((uptr)->a_is_active ? (uptr)->a_is_active (uptr) : FALSE) || ((uptr)->a_next)) -#if !defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) +#if defined(SIM_ASYNCH_MUX) #define AIO_CANCEL(uptr) \ - if ((uptr)->a_cancel) \ - (uptr)->a_cancel (uptr); \ - else \ - (void)0 -#endif /* !defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) */ -#if !defined(SIM_ASYNCH_MUX) && defined(SIM_ASYNCH_CLOCKS) -#define AIO_CANCEL(uptr) \ - if ((uptr)->a_cancel) \ - (uptr)->a_cancel (uptr); \ - else { \ - AIO_UPDATE_QUEUE; \ - if ((uptr)->a_next) { \ - UNIT *cptr; \ - pthread_mutex_lock (&sim_timer_lock); \ - if ((uptr) == sim_wallclock_queue) { \ - sim_wallclock_queue = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\ - sim_timer_event_canceled = TRUE; \ - pthread_cond_signal (&sim_timer_wake); \ - } \ - else \ - for (cptr = sim_wallclock_queue; \ - (cptr != QUEUE_LIST_END); \ - cptr = cptr->a_next) \ - if (cptr->a_next == (uptr)) { \ - cptr->a_next = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\ - break; \ - } \ - if ((uptr)->a_next == NULL) \ - (uptr)->a_due_time = (uptr)->a_usec_delay = 0; \ - else { \ - int tmr; \ - for (tmr=0; tmra_next; \ - (uptr)->a_next = NULL; \ - } \ - else \ - for (cptr = sim_clock_cosched_queue[tmr]; \ - (cptr != QUEUE_LIST_END); \ - cptr = cptr->a_next) \ - if (cptr->a_next == (uptr)) { \ - cptr->a_next = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - break; \ - } \ - if ((uptr)->a_next == NULL) { \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Clock Coscheduling Event for %s\n", sim_uname(uptr));\ - } \ - } \ - } \ - while (sim_timer_event_canceled) { \ - pthread_mutex_unlock (&sim_timer_lock); \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Waiting for Timer Event cancelation for %s\n", sim_uname(uptr));\ - sim_os_ms_sleep (0); \ - pthread_mutex_lock (&sim_timer_lock); \ - } \ - pthread_mutex_unlock (&sim_timer_lock); \ - } \ - } -#endif -#if defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) -#define AIO_CANCEL(uptr) \ - if ((uptr)->a_cancel) \ - (uptr)->a_cancel (uptr); \ + if ((uptr)->cancel) \ + (uptr)->cancel (uptr); \ else { \ if (((uptr)->dynflags & UNIT_TM_POLL) && \ !((uptr)->next) && !((uptr)->a_next)) { \ @@ -1061,92 +995,19 @@ extern int32 sim_asynch_inst_latency; (uptr)->a_poll_waiter_count = 0; \ } \ } -#endif /* defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) */ -#if defined(SIM_ASYNCH_MUX) && defined(SIM_ASYNCH_CLOCKS) +#endif /* defined(SIM_ASYNCH_MUX) */ +#if !defined(AIO_CANCEL) #define AIO_CANCEL(uptr) \ - if ((uptr)->a_cancel) \ - (uptr)->a_cancel (uptr); \ - else { \ - AIO_UPDATE_QUEUE; \ - if (((uptr)->dynflags & UNIT_TM_POLL) && \ - !((uptr)->next) && !((uptr)->a_next)) { \ - (uptr)->a_polling_now = FALSE; \ - sim_tmxr_poll_count -= (uptr)->a_poll_waiter_count; \ - (uptr)->a_poll_waiter_count = 0; \ - } \ - if ((uptr)->a_next) { \ - UNIT *cptr; \ - pthread_mutex_lock (&sim_timer_lock); \ - if ((uptr) == sim_wallclock_queue) { \ - sim_wallclock_queue = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\ - sim_timer_event_canceled = TRUE; \ - pthread_cond_signal (&sim_timer_wake); \ - } \ - else \ - for (cptr = sim_wallclock_queue; \ - (cptr != QUEUE_LIST_END); \ - cptr = cptr->a_next) \ - if (cptr->a_next == (uptr)) { \ - cptr->a_next = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\ - break; \ - } \ - if ((uptr)->a_next == NULL) \ - (uptr)->a_due_time = (uptr)->a_usec_delay = 0; \ - else { \ - if ((uptr) == sim_clock_cosched_queue) { \ - sim_clock_cosched_queue = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - } \ - else \ - for (cptr = sim_clock_cosched_queue; \ - (cptr != QUEUE_LIST_END); \ - cptr = cptr->a_next) \ - if (cptr->a_next == (uptr)) { \ - cptr->a_next = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - break; \ - } \ - if ((uptr)->a_next == NULL) { \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Clock Coscheduling Event for %s\n", sim_uname(uptr));\ - } \ - } \ - while (sim_timer_event_canceled) { \ - pthread_mutex_unlock (&sim_timer_lock); \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Waiting for Timer Event cancelation for %s\n", sim_uname(uptr));\ - sim_os_ms_sleep (0); \ - pthread_mutex_lock (&sim_timer_lock); \ - } \ - pthread_mutex_unlock (&sim_timer_lock); \ - } \ - } -#endif + if ((uptr)->cancel) \ + (uptr)->cancel (uptr) +#endif /* !defined(AIO_CANCEL) */ #if defined(SIM_ASYNCH_CLOCKS) #define AIO_RETURN_TIME(uptr) \ - if (1) { \ - pthread_mutex_lock (&sim_timer_lock); \ - for (cptr = sim_wallclock_queue; \ - cptr != QUEUE_LIST_END; \ - cptr = cptr->a_next) \ - if ((uptr) == cptr) { \ - double inst_per_sec = sim_timer_inst_per_sec (); \ - int32 result; \ - \ - result = (int32)(((uptr)->a_due_time - sim_timenow_double())*inst_per_sec);\ - if (result < 0) \ - result = 0; \ - pthread_mutex_unlock (&sim_timer_lock); \ - return result + 1; \ - } \ - pthread_mutex_unlock (&sim_timer_lock); \ - if ((uptr)->a_next) /* On asynch queue? */ \ - return (uptr)->a_event_time + 1; \ - } \ - else \ - (void)0 + do { \ + int32 rtime = sim_timer_activate_time (uptr); \ + if (rtime >= 0) \ + return rtime; \ + } while (0) #else #define AIO_RETURN_TIME(uptr) (void)0 #endif @@ -1188,31 +1049,25 @@ extern int32 sim_asynch_inst_latency; /* which avoids the potential ABA issues. */ #define AIO_QUEUE_MODE "Lock free asynchronous event queue access" #define AIO_INIT \ - if (1) { \ + do { \ int tmr; \ sim_asynch_main_threadid = pthread_self(); \ /* Empty list/list end uses the point value (void *)1. \ This allows NULL in an entry's a_next pointer to \ indicate that the entry is not currently in any list */ \ sim_asynch_queue = QUEUE_LIST_END; \ - sim_wallclock_queue = QUEUE_LIST_END; \ - sim_wallclock_entry = NULL; \ for (tmr=0; tmra_event_time);\ - uptr = q; \ - q = q->a_next; \ - uptr->a_next = NULL; /* hygiene */ \ - if (uptr->a_activate_call != &sim_activate_notbefore) { \ - a_event_time = uptr->a_event_time-((sim_asynch_inst_latency+1)/2); \ - if (a_event_time < 0) \ - a_event_time = 0; \ - } \ - else \ - a_event_time = uptr->a_event_time; \ - uptr->a_activate_call (uptr, a_event_time); \ - if (uptr->a_check_completion) { \ - sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Calling Completion Check for asynch event on %s\n", sim_uname(uptr));\ - uptr->a_check_completion (uptr); \ - } \ - } \ - } else (void)0 +#define AIO_UPDATE_QUEUE sim_aio_update_queue () #define AIO_ACTIVATE(caller, uptr, event_time) \ if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \ - UNIT *ouptr = (uptr); \ - sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch event for %s after %d instructions\n", sim_uname(ouptr), event_time);\ - if (ouptr->a_next) { \ - ouptr->a_activate_call = sim_activate_abs; \ - } else { \ - UNIT *q, *qe; \ - ouptr->a_event_time = event_time; \ - ouptr->a_activate_call = (ACTIVATE_API)&caller; \ - ouptr->a_next = QUEUE_LIST_END; /* Mark as on list */ \ - do { \ - do \ - q = AIO_QUEUE_VAL; \ - while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q));/* Grab current list */\ - for (qe = ouptr; qe->a_next != QUEUE_LIST_END; qe = qe->a_next); \ - qe->a_next = q; /* append current list */\ - do \ - q = AIO_QUEUE_VAL; \ - while (q != AIO_QUEUE_SET(ouptr, q)); \ - ouptr = q; \ - } while (ouptr != QUEUE_LIST_END); \ - } \ - sim_asynch_check = 0; /* try to force check */ \ - if (sim_idle_wait) { \ - sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(ouptr), event_time);\ - pthread_cond_signal (&sim_asynch_wake); \ - } \ + sim_aio_activate ((ACTIVATE_API)caller, uptr, event_time); \ return SCPE_OK; \ } else (void)0 -#define AIO_ACTIVATE_LIST(caller, list, event_time) \ - if (list) { \ - UNIT *ouptr, *q, *qe; \ - sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch events for %s after %d instructions\n", sim_uname(list), event_time);\ - for (qe=(list); qe->a_next != QUEUE_LIST_END;) { \ - qe->a_event_time = event_time; \ - qe->a_activate_call = (ACTIVATE_API)&caller; \ - qe = qe->a_next; \ - } \ - qe->a_event_time = event_time; \ - qe->a_activate_call = (ACTIVATE_API)&caller; \ - ouptr = (list); \ - do { \ - do \ - q = AIO_QUEUE_VAL; \ - while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q));/* Grab current list */ \ - for (qe = ouptr; qe->a_next != QUEUE_LIST_END; qe = qe->a_next); \ - qe->a_next = q; /* append current list */ \ - do \ - q = AIO_QUEUE_VAL; \ - while (q != AIO_QUEUE_SET(ouptr, q)); \ - ouptr = q; \ - } while (ouptr != QUEUE_LIST_END); \ - sim_asynch_check = 0; /* try to force check */ \ - if (sim_idle_wait) { \ - sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(ouptr), event_time);\ - pthread_cond_signal (&sim_asynch_wake); \ - } \ - } else (void)0 #else /* !USE_AIO_INTRINSICS */ /* This approach uses a pthread mutex to manage access to the link list */ /* head sim_asynch_queue. It will always work, but may be slower than the */ /* lock free approach when using USE_AIO_INTRINSICS */ #define AIO_QUEUE_MODE "Lock based asynchronous event queue access" #define AIO_INIT \ - if (1) { \ + do { \ int tmr; \ pthread_mutexattr_t attr; \ \ @@ -1327,26 +1103,20 @@ extern int32 sim_asynch_inst_latency; This allows NULL in an entry's a_next pointer to \ indicate that the entry is not currently in any list */ \ sim_asynch_queue = QUEUE_LIST_END; \ - sim_wallclock_queue = QUEUE_LIST_END; \ - sim_wallclock_entry = NULL; \ for (tmr=0; tmra_check_completion (uptr); \ } \ AIO_LOCK; \ - } \ + } \ AIO_UNLOCK; \ - } else (void)0 + } while (0) #define AIO_ACTIVATE(caller, uptr, event_time) \ if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \ sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch event for %s after %d instructions\n", sim_uname(uptr), event_time);\ @@ -1392,40 +1162,19 @@ extern int32 sim_asynch_inst_latency; sim_asynch_check = 0; \ return SCPE_OK; \ } else (void)0 -#define AIO_ACTIVATE_LIST(caller, list, event_time) \ - if (list) { \ - UNIT *qe; \ - sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch events for %s after %d instructions\n", sim_uname(list), event_time);\ - for (qe=list; qe->a_next != QUEUE_LIST_END;) { \ - qe->a_event_time = event_time; \ - qe->a_activate_call = (ACTIVATE_API)&caller; \ - qe = qe->a_next; \ - } \ - qe->a_event_time = event_time; \ - qe->a_activate_call = (ACTIVATE_API)&caller; \ - AIO_LOCK; \ - qe->a_next = sim_asynch_queue; \ - sim_asynch_queue = list; \ - sim_asynch_check = 0; /* try to force check */ \ - if (sim_idle_wait) { \ - sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(list), event_time);\ - pthread_cond_signal (&sim_asynch_wake); \ - } \ - AIO_UNLOCK; \ - } else (void)0 #endif /* USE_AIO_INTRINSICS */ #define AIO_VALIDATE if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) {sim_printf("Improper thread context for operation\n"); abort();} #define AIO_CHECK_EVENT \ if (0 > --sim_asynch_check) { \ AIO_UPDATE_QUEUE; \ sim_asynch_check = sim_asynch_inst_latency; \ - } else (void)0 + } else (void)0 #define AIO_SET_INTERRUPT_LATENCY(instpersec) \ - if (1) { \ + do { \ sim_asynch_inst_latency = (int32)((((double)(instpersec))*sim_asynch_latency)/1000000000);\ if (sim_asynch_inst_latency == 0) \ sim_asynch_inst_latency = 1; \ - } else (void)0 + } while (0) #else /* !SIM_ASYNCH_IO */ #define AIO_QUEUE_MODE "Asynchronous I/O is not available" #define AIO_UPDATE_QUEUE @@ -1441,7 +1190,9 @@ extern int32 sim_asynch_inst_latency; #define AIO_EVENT_BEGIN(uptr) #define AIO_EVENT_COMPLETE(uptr, reason) #define AIO_IS_ACTIVE(uptr) FALSE -#define AIO_CANCEL(uptr) +#define AIO_CANCEL(uptr) \ + if ((uptr)->cancel) \ + (uptr)->cancel (uptr) #define AIO_SET_INTERRUPT_LATENCY(instpersec) #define AIO_TLS #endif /* SIM_ASYNCH_IO */ diff --git a/sim_disk.c b/sim_disk.c index b946f22a..10bf33ec 100644 --- a/sim_disk.c +++ b/sim_disk.c @@ -467,7 +467,7 @@ if (ctx->asynch_io) { } uptr->a_check_completion = _disk_completion_dispatch; uptr->a_is_active = _disk_is_active; -uptr->a_cancel = _disk_cancel; +uptr->cancel = _disk_cancel; return SCPE_OK; #endif } diff --git a/sim_tape.c b/sim_tape.c index a1d66c65..2b769032 100644 --- a/sim_tape.c +++ b/sim_tape.c @@ -388,7 +388,7 @@ if (ctx->asynch_io) { } uptr->a_check_completion = _tape_completion_dispatch; uptr->a_is_active = _tape_is_active; -uptr->a_cancel = _tape_cancel; +uptr->cancel = _tape_cancel; return SCPE_OK; #endif } diff --git a/sim_timer.c b/sim_timer.c index 70a524da..18181773 100644 --- a/sim_timer.c +++ b/sim_timer.c @@ -63,7 +63,6 @@ sim_timer_init - initialize timing system sim_rtc_init - initialize calibration sim_rtc_calb - calibrate clock - sim_timer_init - initialize timing system sim_idle - virtual machine idle sim_os_msec - return elapsed time in msec sim_os_sleep - sleep specified number of seconds @@ -85,6 +84,15 @@ #include #include +#define SIM_INTERNAL_CLK (SIM_NTIMERS+(1<<30)) +#define SIM_INTERNAL_UNIT sim_timer_units[SIM_NTIMERS] +#ifndef MIN +#define MIN(a,b) (((a) < (b)) ? (a) : (b)) +#endif + +//#define MS_MIN_GRANULARITY 20 +#define MS_MIN_GRANULARITY 1 + t_bool sim_idle_enab = FALSE; /* global flag */ volatile t_bool sim_idle_wait = FALSE; /* global flag */ @@ -92,9 +100,11 @@ static int32 sim_calb_tmr = -1; /* the system calibrated tim static uint32 sim_idle_rate_ms = 0; static uint32 sim_os_sleep_min_ms = 0; +static uint32 sim_os_sleep_inc_ms = 0; static uint32 sim_os_clock_resoluton_ms = 0; +static uint32 sim_os_tick_hz = 0; static uint32 sim_idle_stable = SIM_IDLE_STDFLT; -static t_bool sim_idle_idled = FALSE; +static uint32 sim_idle_calib_pct = 0; static uint32 sim_throt_ms_start = 0; static uint32 sim_throt_ms_stop = 0; static uint32 sim_throt_type = 0; @@ -104,13 +114,18 @@ static double sim_throt_cps; static double sim_throt_inst_start; static uint32 sim_throt_sleep_time = 0; static int32 sim_throt_wait = 0; -static UNIT *sim_clock_unit[SIM_NTIMERS] = {NULL}; -UNIT * volatile sim_clock_cosched_queue[SIM_NTIMERS] = {NULL}; -t_bool sim_asynch_timer = +static UNIT *sim_clock_unit[SIM_NTIMERS+1] = {NULL}; +UNIT * volatile sim_clock_cosched_queue[SIM_NTIMERS+1] = {NULL}; +static int32 sim_cosched_interval[SIM_NTIMERS+1]; +static t_bool sim_catchup_ticks = FALSE; +#if defined (SIM_ASYNCH_CLOCKS) && !defined (SIM_ASYNCH_IO) +#undef SIM_ASYNCH_CLOCKS +#endif +t_bool sim_asynch_timer = FALSE; + #if defined (SIM_ASYNCH_CLOCKS) - TRUE; -#else - FALSE; +UNIT * volatile sim_wallclock_queue = QUEUE_LIST_END; +UNIT * volatile sim_wallclock_entry = NULL; #endif t_stat sim_throt_svc (UNIT *uptr); @@ -123,12 +138,14 @@ t_stat sim_timer_tick_svc (UNIT *uptr); #define DBG_CAL 0x010 /* calibration activities */ #define DBG_TIM 0x020 /* timer thread activities */ #define DBG_THR 0x040 /* throttle activities */ +#define DBG_ACK 0x080 /* interrupt acknowledgement activities */ DEBTAB sim_timer_debug[] = { {"TRACE", DBG_TRC, "Trace routine calls"}, {"IDLE", DBG_IDL, "Idling activities"}, {"QUEUE", DBG_QUE, "Event queuing activities"}, + {"IACK", DBG_ACK, "interrupt acknowledgement activities"}, {"CALIB", DBG_CAL, "Calibration activities"}, - {"TIME", DBG_TIM, "Activation an scheduling activities"}, + {"TIME", DBG_TIM, "Activation and scheduling activities"}, {"THROT", DBG_THR, "Throttling activities"}, {"MUX", DBG_MUX, "Tmxr scheduling activities"}, {0} @@ -141,6 +158,10 @@ uint32 start_time = sim_os_msec(); struct timespec done_time; t_bool timedout = FALSE; +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +msec = MS_MIN_GRANULARITY*((msec+MS_MIN_GRANULARITY-1)/MS_MIN_GRANULARITY); +#endif + clock_gettime(CLOCK_REALTIME, &done_time); done_time.tv_sec += (msec/1000); done_time.tv_nsec += 1000000*(msec%1000); @@ -219,6 +240,25 @@ return SCPE_OK; #endif #endif /* defined(USE_READER_THREAD) */ +#define sleep1Samples 100 + +static uint32 _compute_minimum_sleep (void) +{ +uint32 i, tot, tim; + +SIM_IDLE_MS_SLEEP (1); /* Start sampling on a tick boundary */ +for (i = 0, tot = 0; i < sleep1Samples; i++) + tot += SIM_IDLE_MS_SLEEP (1); +tim = (tot + (sleep1Samples - 1)) / sleep1Samples; +sim_os_sleep_min_ms = tim; +SIM_IDLE_MS_SLEEP (1); /* Start sampling on a tick boundary */ +for (i = 0, tot = 0; i < sleep1Samples; i++) + tot += SIM_IDLE_MS_SLEEP (sim_os_sleep_min_ms + 1); +tim = (tot + (sleep1Samples - 1)) / sleep1Samples; +sim_os_sleep_inc_ms = tim - sim_os_sleep_min_ms; +return sim_os_sleep_min_ms; +} + /* OS-dependent timer and clock routines */ /* VMS */ @@ -263,6 +303,9 @@ for (i = 0; i < 64; i++) { /* 64b quo */ quo = quo | 1; /* set quo bit */ } } +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +quo = (quo/MS_MIN_GRANULARITY)*MS_MIN_GRANULARITY; +#endif return quo; } @@ -274,12 +317,7 @@ return; uint32 sim_os_ms_sleep_init (void) { -#if defined (__VAX) -sim_os_sleep_min_ms = 10; /* VAX/VMS is 10ms */ -#else -sim_os_sleep_min_ms = 1; /* Alpha/VMS is 1ms */ -#endif -return sim_os_sleep_min_ms; +return _compute_minimum_sleep (); } uint32 sim_os_ms_sleep (unsigned int msec) @@ -289,6 +327,10 @@ uint32 qtime[2]; int32 nsfactor = -10000; static int32 zero = 0; +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +msec = MS_MIN_GRANULARITY*((msec+MS_MIN_GRANULARITY-1)/MS_MIN_GRANULARITY); +#endif + lib$emul (&msec, &nsfactor, &zero, qtime); sys$setimr (2, qtime, 0, 0); sys$waitfr (2); @@ -320,9 +362,13 @@ const t_bool rtc_avail = TRUE; uint32 sim_os_msec (void) { -if (sim_idle_rate_ms) - return timeGetTime (); -else return GetTickCount (); +uint32 t = (sim_idle_rate_ms ? timeGetTime () : GetTickCount ()); + +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +t = (t/MS_MIN_GRANULARITY)*MS_MIN_GRANULARITY; +#endif + +return t; } void sim_os_sleep (unsigned int sec) @@ -343,24 +389,23 @@ TIMECAPS timers; if (timeGetDevCaps (&timers, sizeof (timers)) != TIMERR_NOERROR) return 0; -sim_os_sleep_min_ms = timers.wPeriodMin; -if ((timers.wPeriodMin == 0) || (timers.wPeriodMin > SIM_IDLE_MAX)) +if (timers.wPeriodMin == 0) return 0; if (timeBeginPeriod (timers.wPeriodMin) != TIMERR_NOERROR) return 0; atexit (sim_timer_exit); -Sleep (1); -Sleep (1); -Sleep (1); -Sleep (1); -Sleep (1); -return sim_os_sleep_min_ms; /* sim_idle_rate_ms */ +/* return measured actual minimum sleep time */ +return _compute_minimum_sleep (); } uint32 sim_os_ms_sleep (unsigned int msec) { uint32 stime = sim_os_msec(); +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +msec = MS_MIN_GRANULARITY*((msec+MS_MIN_GRANULARITY-1)/MS_MIN_GRANULARITY); +#endif + Sleep (msec); return sim_os_msec () - stime; } @@ -431,6 +476,9 @@ unsigned long millis; Microseconds (&macMicros); micros = *((unsigned long long *) &macMicros); millis = micros / 1000LL; +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +millis = (millis/MS_MIN_GRANULARITY)*MS_MIN_GRANULARITY; +#endif return (uint32) millis; } @@ -442,7 +490,7 @@ return; uint32 sim_os_ms_sleep_init (void) { -return sim_os_sleep_min_ms = 1; +return _compute_minimum_sleep (); } uint32 sim_os_ms_sleep (unsigned int milliseconds) @@ -450,6 +498,10 @@ uint32 sim_os_ms_sleep (unsigned int milliseconds) uint32 stime = sim_os_msec (); struct timespec treq; +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +milliseconds = MS_MIN_GRANULARITY*((milliseconds+MS_MIN_GRANULARITY-1)/MS_MIN_GRANULARITY); +#endif + treq.tv_sec = milliseconds / MILLIS_PER_SEC; treq.tv_nsec = (milliseconds % MILLIS_PER_SEC) * NANOS_PER_MILLI; (void) nanosleep (&treq, NULL); @@ -460,11 +512,10 @@ return sim_os_msec () - stime; int clock_gettime(int clk_id, struct timespec *tp) { struct timeval cur; -struct timezone foo; if (clk_id != CLOCK_REALTIME) return -1; -gettimeofday (&cur, &foo); +gettimeofday (&cur, NULL); tp->tv_sec = cur.tv_sec; tp->tv_nsec = cur.tv_usec*1000; return 0; @@ -480,7 +531,6 @@ return 0; #include #define NANOS_PER_MILLI 1000000 #define MILLIS_PER_SEC 1000 -#define sleep1Samples 100 const t_bool rtc_avail = TRUE; @@ -492,6 +542,9 @@ uint32 msec; gettimeofday (&cur, &foo); msec = (((uint32) cur.tv_sec) * 1000) + (((uint32) cur.tv_usec) / 1000); +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +msec = (msec/MS_MIN_GRANULARITY)*MS_MIN_GRANULARITY; +#endif return msec; } @@ -503,21 +556,9 @@ return; uint32 sim_os_ms_sleep_init (void) { -uint32 i, t1, t2, tot, tim; - -SIM_IDLE_MS_SLEEP (1); /* Start sampling on a tick boundary */ -for (i = 0, tot = 0; i < sleep1Samples; i++) { - t1 = sim_os_msec (); - SIM_IDLE_MS_SLEEP (1); - t2 = sim_os_msec (); - tot += (t2 - t1); - } -tim = (tot + (sleep1Samples - 1)) / sleep1Samples; -sim_os_sleep_min_ms = tim; -if (tim > SIM_IDLE_MAX) - tim = 0; -return tim; +return _compute_minimum_sleep (); } + #if !defined(_POSIX_SOURCE) #ifdef NEED_CLOCK_GETTIME typedef int clockid_t; @@ -541,6 +582,10 @@ uint32 sim_os_ms_sleep (unsigned int milliseconds) uint32 stime = sim_os_msec (); struct timespec treq; +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +milliseconds = MS_MIN_GRANULARITY*((milliseconds+MS_MIN_GRANULARITY-1)/MS_MIN_GRANULARITY); +#endif + treq.tv_sec = milliseconds / MILLIS_PER_SEC; treq.tv_nsec = (milliseconds % MILLIS_PER_SEC) * NANOS_PER_MILLI; (void) nanosleep (&treq, NULL); @@ -608,7 +653,17 @@ while (diff->tv_nsec > 1000000000) { } } -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) +/* Forward declarations */ + +static double _timespec_to_double (struct timespec *time); +static void _double_to_timespec (struct timespec *time, double dtime); +static t_bool _rtcn_tick_catchup_check (int32 tmr, int32 time); +static void _rtcn_configure_calibrated_clock (int32 newtmr); +static void _sim_coschedule_cancel(UNIT *uptr); +static void _sim_wallclock_cancel (UNIT *uptr); +static t_bool _sim_wallclock_is_active (UNIT *uptr); + +#if defined(SIM_ASYNCH_CLOCKS) static int sim_timespec_compare (struct timespec *a, struct timespec *b) { while (a->tv_nsec > 1000000000) { @@ -630,34 +685,56 @@ if (a->tv_nsec > b->tv_nsec) else return 0; } -#endif /* defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) */ +#endif /* defined(SIM_ASYNCH_CLOCKS) */ /* OS independent clock calibration package */ -static int32 rtc_ticks[SIM_NTIMERS] = { 0 }; /* ticks */ -static int32 rtc_hz[SIM_NTIMERS] = { 0 }; /* tick rate */ -static uint32 rtc_rtime[SIM_NTIMERS] = { 0 }; /* real time */ -static uint32 rtc_vtime[SIM_NTIMERS] = { 0 }; /* virtual time */ -static double rtc_gtime[SIM_NTIMERS] = { 0 }; /* instruction time */ -static uint32 rtc_nxintv[SIM_NTIMERS] = { 0 }; /* next interval */ -static int32 rtc_based[SIM_NTIMERS] = { 0 }; /* base delay */ -static int32 rtc_currd[SIM_NTIMERS] = { 0 }; /* current delay */ -static int32 rtc_initd[SIM_NTIMERS] = { 0 }; /* initial delay */ -static uint32 rtc_elapsed[SIM_NTIMERS] = { 0 }; /* sec since init */ -static uint32 rtc_calibrations[SIM_NTIMERS] = { 0 }; /* calibration count */ -static double rtc_clock_skew_max[SIM_NTIMERS] = { 0 }; /* asynchronous max skew */ +static int32 rtc_ticks[SIM_NTIMERS+1] = { 0 }; /* ticks */ +static uint32 rtc_hz[SIM_NTIMERS+1] = { 0 }; /* tick rate */ +static uint32 rtc_rtime[SIM_NTIMERS+1] = { 0 }; /* real time */ +static uint32 rtc_vtime[SIM_NTIMERS+1] = { 0 }; /* virtual time */ +static double rtc_gtime[SIM_NTIMERS+1] = { 0 }; /* instruction time */ +static uint32 rtc_nxintv[SIM_NTIMERS+1] = { 0 }; /* next interval */ +static int32 rtc_based[SIM_NTIMERS+1] = { 0 }; /* base delay */ +static int32 rtc_currd[SIM_NTIMERS+1] = { 0 }; /* current delay */ +static int32 rtc_initd[SIM_NTIMERS+1] = { 0 }; /* initial delay */ +static uint32 rtc_elapsed[SIM_NTIMERS+1] = { 0 }; /* sec since init */ +static uint32 rtc_calibrations[SIM_NTIMERS+1] = { 0 }; /* calibration count */ +static double rtc_clock_skew_max[SIM_NTIMERS+1] = { 0 }; /* asynchronous max skew */ +static double rtc_clock_start_gtime[SIM_NTIMERS+1] = { 0 };/* reference instruction time for clock */ +static double rtc_clock_tick_size[SIM_NTIMERS+1] = { 0 }; /* 1/hz */ +static uint32 rtc_calib_initializations[SIM_NTIMERS+1] = { 0 };/* Initialization Count */ +static double rtc_calib_tick_time[SIM_NTIMERS+1] = { 0 }; /* ticks time */ +static double rtc_calib_tick_time_tot[SIM_NTIMERS+1] = { 0 };/* ticks time - total*/ +static uint32 rtc_calib_ticks_acked[SIM_NTIMERS+1] = { 0 };/* ticks Acked */ +static uint32 rtc_calib_ticks_acked_tot[SIM_NTIMERS+1] = { 0 };/* ticks Acked - total */ +static uint32 rtc_clock_ticks[SIM_NTIMERS+1] = { 0 };/* ticks delivered since catchup base */ +static uint32 rtc_clock_ticks_tot[SIM_NTIMERS+1] = { 0 };/* ticks delivered since catchup base - total */ +static double rtc_clock_catchup_base_time[SIM_NTIMERS+1] = { 0 };/* reference time for catchup ticks */ +static uint32 rtc_clock_catchup_ticks[SIM_NTIMERS+1] = { 0 };/* Record of catchups */ +static uint32 rtc_clock_catchup_ticks_tot[SIM_NTIMERS+1] = { 0 };/* Record of catchups - total */ +static t_bool rtc_clock_catchup_pending[SIM_NTIMERS+1] = { 0 };/* clock tick catchup pending */ +static t_bool rtc_clock_catchup_eligible[SIM_NTIMERS+1] = { 0 };/* clock tick catchup eligible */ +static uint32 rtc_clock_time_idled[SIM_NTIMERS+1] = { 0 };/* total time idled */ +static uint32 rtc_clock_time_idled_last[SIM_NTIMERS+1] = { 0 };/* total time idled */ -UNIT sim_timer_units[SIM_NTIMERS+2]; /* one for each timer and one for throttle */ - /* plus one for an internal clock if no clocks are registered */ +UNIT sim_timer_units[SIM_NTIMERS+1]; /* one for each timer and one for an */ + /* internal clock if no clocks are registered */ +UNIT sim_tick_units[SIM_NTIMERS]; /* one for each timer to schedule asynchronously */ +UNIT sim_throttle_unit; /* one for throttle */ + +/* Forward device declarations */ +extern DEVICE sim_timer_dev; +extern DEVICE sim_throttle_dev; void sim_rtcn_init_all (void) { -uint32 i; +int32 tmr; -for (i = 0; i < SIM_NTIMERS; i++) - if (rtc_initd[i] != 0) - sim_rtcn_init (rtc_initd[i], i); +for (tmr = 0; tmr <= SIM_NTIMERS; tmr++) + if (rtc_initd[tmr] != 0) + sim_rtcn_init (rtc_initd[tmr], tmr); return; } @@ -668,54 +745,85 @@ return sim_rtcn_init_unit (NULL, time, tmr); int32 sim_rtcn_init_unit (UNIT *uptr, int32 time, int32 tmr) { -sim_debug (DBG_CAL, &sim_timer_dev, "sim_rtcn_init(time=%d, tmr=%d)\n", time, tmr); +sim_debug (DBG_CAL, &sim_timer_dev, "_sim_rtcn_init_unit(unit=%s, time=%d, tmr=%d)\n", sim_uname(uptr), time, tmr); if (time == 0) time = 1; -if ((tmr < 0) || (tmr >= SIM_NTIMERS)) - return time; +if (tmr == SIM_INTERNAL_CLK) + tmr = SIM_NTIMERS; +else { + if ((tmr < 0) || (tmr >= SIM_NTIMERS)) + return time; + } +/* + * If we'd previously succeeded in calibrating a tick value, then use that + * delay as a better default to setup when we're re-initialized. + * Re-initializing happens on any boot or after any breakpoint/continue. + */ +if (rtc_currd[tmr]) + time = rtc_currd[tmr]; +if (!uptr) + uptr = sim_clock_unit[tmr]; if (uptr) { - if (uptr != &sim_timer_units[SIM_NTIMERS+1]) { /* New unit not internal timer */ - if ((tmr == SIM_NTIMERS-1) && /* but replacing default internal timer */ - (sim_clock_unit[tmr] == &sim_timer_units[SIM_NTIMERS+1])) { /* remove internal timer */ - sim_cancel (sim_clock_unit[tmr]); - sim_clock_unit[tmr] = NULL; - rtc_initd[tmr] = rtc_currd[tmr] = 0; - if (tmr == sim_calb_tmr) - sim_calb_tmr = -1; - } - } if (!sim_clock_unit[tmr]) { sim_clock_unit[tmr] = uptr; sim_clock_cosched_queue[tmr] = QUEUE_LIST_END; } } +rtc_clock_start_gtime[tmr] = sim_gtime(); rtc_rtime[tmr] = sim_os_msec (); rtc_vtime[tmr] = rtc_rtime[tmr]; rtc_nxintv[tmr] = 1000; rtc_ticks[tmr] = 0; rtc_hz[tmr] = 0; -if (rtc_currd[tmr]) /* A previously calibrated value is better than a constant */ - time = rtc_currd[tmr]; rtc_based[tmr] = time; rtc_currd[tmr] = time; rtc_initd[tmr] = time; rtc_elapsed[tmr] = 0; rtc_calibrations[tmr] = 0; -if (sim_calb_tmr == -1) /* save first initialized clock as the system timer */ - sim_calb_tmr = tmr; +rtc_clock_ticks_tot[tmr] += rtc_clock_ticks[tmr]; +rtc_clock_ticks[tmr] = 0; +rtc_calib_tick_time_tot[tmr] += rtc_calib_tick_time[tmr]; +rtc_calib_tick_time[tmr] = 0; +rtc_clock_catchup_pending[tmr] = FALSE; +rtc_clock_catchup_eligible[tmr] = FALSE; +rtc_clock_catchup_ticks_tot[tmr] += rtc_clock_catchup_ticks[tmr]; +rtc_clock_catchup_ticks[tmr] = 0; +rtc_calib_ticks_acked_tot[tmr] += rtc_calib_ticks_acked[tmr]; +rtc_calib_ticks_acked[tmr] = 0; +++rtc_calib_initializations[tmr]; +_rtcn_configure_calibrated_clock (tmr); return time; } int32 sim_rtcn_calb (int32 ticksper, int32 tmr) { -uint32 new_rtime, delta_rtime; +uint32 new_rtime, delta_rtime, last_idle_pct; int32 delta_vtime; double new_gtime; int32 new_currd; -if ((tmr < 0) || (tmr >= SIM_NTIMERS)) - return 10000; -rtc_hz[tmr] = ticksper; +if (tmr == SIM_INTERNAL_CLK) + tmr = SIM_NTIMERS; +else { + if ((tmr < 0) || (tmr >= SIM_NTIMERS)) + return 10000; + } +if (rtc_hz[tmr] != ticksper) { /* changing tick rate? */ + rtc_hz[tmr] = ticksper; + rtc_clock_tick_size[tmr] = 1.0/ticksper; + _rtcn_configure_calibrated_clock (tmr); + rtc_currd[tmr] = (int32)(sim_timer_inst_per_sec()/ticksper); + } +if (sim_clock_unit[tmr] == NULL) { /* Not using TIMER units? */ + rtc_clock_ticks[tmr] += 1; + rtc_calib_tick_time[tmr] += rtc_clock_tick_size[tmr]; + } +if (rtc_clock_catchup_pending[tmr]) { /* catchup tick? */ + ++rtc_clock_catchup_ticks[tmr]; /* accumulating which were catchups */ + rtc_clock_catchup_pending[tmr] = FALSE; + if (!sim_asynch_timer) /* non asynch timers? */ + return rtc_currd[tmr]; /* return now avoiding counting catchup tick in calibration */ + } rtc_ticks[tmr] = rtc_ticks[tmr] + 1; /* count ticks */ if (rtc_ticks[tmr] < ticksper) { /* 1 sec yet? */ return rtc_currd[tmr]; @@ -739,11 +847,12 @@ if (sim_calb_tmr != tmr) { } new_rtime = sim_os_msec (); /* wall time */ sim_debug (DBG_TRC, &sim_timer_dev, "sim_rtcn_calb(ticksper=%d, tmr=%d)\n", ticksper, tmr); -if (sim_idle_idled) { +last_idle_pct = MIN(100,(uint32)(((double)(rtc_clock_time_idled[tmr] - rtc_clock_time_idled_last[tmr])) / 10.0)); +rtc_clock_time_idled_last[tmr] = rtc_clock_time_idled[tmr]; +if (last_idle_pct > (100 - sim_idle_calib_pct)) { rtc_rtime[tmr] = new_rtime; /* save wall time */ rtc_vtime[tmr] = rtc_vtime[tmr] + 1000; /* adv sim time */ rtc_gtime[tmr] = sim_gtime(); /* save instruction time */ - sim_idle_idled = FALSE; /* reset idled flag */ sim_debug (DBG_CAL, &sim_timer_dev, "skipping calibration due to idling - result: %d\n", rtc_currd[tmr]); return rtc_currd[tmr]; /* avoid calibrating idle checks */ } @@ -770,31 +879,20 @@ if (delta_rtime > 30000) { /* gap too big? */ return rtc_currd[tmr]; /* can't calibr */ } new_gtime = sim_gtime(); -if (sim_asynch_enabled && sim_asynch_timer) { - if (rtc_elapsed[tmr] > sim_idle_stable) { - /* An asynchronous clock, merely needs to divide the number of */ - /* instructions actually executed by the clock rate. */ - new_currd = (int32)((new_gtime - rtc_gtime[tmr])/ticksper); - /* avoid excessive swings in the calibrated result */ - if (new_currd > 10*rtc_currd[tmr]) /* don't swing big too fast */ - new_currd = 10*rtc_currd[tmr]; - else - if (new_currd < rtc_currd[tmr]/10) /* don't swing small too fast */ - new_currd = rtc_currd[tmr]/10; - rtc_currd[tmr] = new_currd; - rtc_gtime[tmr] = new_gtime; /* save instruction time */ - if (rtc_currd[tmr] == 127) { - sim_debug (DBG_CAL, &sim_timer_dev, "asynch calibration small: %d\n", rtc_currd[tmr]); - } - sim_debug (DBG_CAL, &sim_timer_dev, "asynch calibration result: %d\n", rtc_currd[tmr]); - return rtc_currd[tmr]; /* calibrated result */ - } - else { - rtc_currd[tmr] = rtc_initd[tmr]; - rtc_gtime[tmr] = new_gtime; /* save instruction time */ - sim_debug (DBG_CAL, &sim_timer_dev, "asynch not stable calibration result: %d\n", rtc_initd[tmr]); - return rtc_initd[tmr]; /* initial result until stable */ - } +if (sim_asynch_timer) { + /* An asynchronous clock, merely needs to divide the number of */ + /* instructions actually executed by the clock rate. */ + new_currd = (int32)((new_gtime - rtc_gtime[tmr])/ticksper); + /* avoid excessive swings in the calibrated result */ + if (new_currd > 10*rtc_currd[tmr]) /* don't swing big too fast */ + new_currd = 10*rtc_currd[tmr]; + else + if (new_currd < rtc_currd[tmr]/10) /* don't swing small too fast */ + new_currd = rtc_currd[tmr]/10; + rtc_currd[tmr] = new_currd; + rtc_gtime[tmr] = new_gtime; /* save instruction time */ + sim_debug (DBG_CAL, &sim_timer_dev, "asynch calibration result: %d\n", rtc_currd[tmr]); + return rtc_currd[tmr]; /* calibrated result */ } rtc_gtime[tmr] = new_gtime; /* save instruction time */ /* This self regulating algorithm depends directly on the assumption */ @@ -837,17 +935,19 @@ return sim_rtcn_calb (ticksper, 0); t_bool sim_timer_init (void) { -int i; +int tmr; uint32 clock_start, clock_last, clock_now; sim_debug (DBG_TRC, &sim_timer_dev, "sim_timer_init()\n"); -for (i=0; ia_next) { - if ((dptr = find_dev_from_unit (uptr)) != NULL) { - fprintf (st, " %s", sim_dname (dptr)); - if (dptr->numunits > 1) - fprintf (st, " unit %d", (int32) (uptr - dptr->units)); - } - else fprintf (st, " Unknown"); - fprintf (st, " after "); - fprint_val (st, (t_value)uptr->a_usec_delay, 10, 0, PV_RCOMMA); - fprintf (st, " usec\n"); - } - } if (sim_asynch_timer) { - for (tmr=0; tmra_next) { - if ((dptr = find_dev_from_unit (uptr)) != NULL) { - fprintf (st, " %s", sim_dname (dptr)); - if (dptr->numunits > 1) - fprintf (st, " unit %d", (int32) (uptr - dptr->units)); - } - else fprintf (st, " Unknown"); - fprintf (st, "\n"); + const char *tim; + + if (sim_wallclock_queue == QUEUE_LIST_END) + fprintf (st, "%s wall clock event queue empty\n", sim_name); + else { + fprintf (st, "%s wall clock event queue status\n", sim_name); + for (uptr = sim_wallclock_queue; uptr != QUEUE_LIST_END; uptr = uptr->a_next) { + if ((dptr = find_dev_from_unit (uptr)) != NULL) { + fprintf (st, " %s", sim_dname (dptr)); + if (dptr->numunits > 1) + fprintf (st, " unit %d", (int32) (uptr - dptr->units)); } + else + fprintf (st, " Unknown"); + tim = sim_fmt_secs(uptr->a_usec_delay/1000000.0); + fprintf (st, " after %s\n", tim); } } } +#endif /* SIM_ASYNCH_CLOCKS */ +for (tmr=0; tmr<=SIM_NTIMERS; ++tmr) { + if (sim_clock_unit[tmr] == NULL) + continue; + if (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) { + int32 accum; + + fprintf (st, "%s clock (%s) co-schedule event queue status\n", + sim_name, sim_uname(sim_clock_unit[tmr])); + accum = 0; + for (uptr = sim_clock_cosched_queue[tmr]; uptr != QUEUE_LIST_END; uptr = uptr->next) { + if ((dptr = find_dev_from_unit (uptr)) != NULL) { + fprintf (st, " %s", sim_dname (dptr)); + if (dptr->numunits > 1) + fprintf (st, " unit %d", (int32) (uptr - dptr->units)); + } + else + fprintf (st, " Unknown"); + if (accum > 0) + fprintf (st, " after %d ticks", accum); + fprintf (st, "\n"); + accum = accum + uptr->time; + } + } + } +#if defined (SIM_ASYNCH_IO) pthread_mutex_unlock (&sim_timer_lock); #endif /* SIM_ASYNCH_IO */ return SCPE_OK; @@ -970,9 +1113,14 @@ REG sim_timer_reg[] = { { FLDATAD (IDLE_ENAB, sim_idle_enab, 0, "Idle Enabled"), REG_RO}, { DRDATAD (IDLE_RATE_MS, sim_idle_rate_ms, 32, "Idle Rate Milliseconds"), PV_RSPC|REG_RO}, { DRDATAD (OS_SLEEP_MIN_MS, sim_os_sleep_min_ms, 32, "Minimum Sleep Resolution"), PV_RSPC|REG_RO}, - { DRDATAD (IDLE_STABLE, sim_idle_stable, 32, "Idle Stable"), PV_RSPC}, - { FLDATAD (IDLE_IDLED, sim_idle_idled, 0, ""), REG_RO}, - { DRDATAD (TMR, sim_calb_tmr, 32, ""), PV_RSPC|REG_RO}, + { DRDATAD (OS_SLEEP_INC_MS, sim_os_sleep_inc_ms, 32, "Minimum Sleep Increment Resolution"), PV_RSPC|REG_RO}, + { DRDATAD (IDLE_STABLE, sim_idle_stable, 32, "Idle Stable"), PV_RSPC|REG_RO}, + { DRDATAD (IDLE_CALIB_PCT, sim_idle_calib_pct, 32, "Minimum Idled percentage allowing calibration"), PV_RSPC|REG_RO}, + { DRDATAD (TMR, sim_calb_tmr, 32, "Calibrated Timer"), PV_RSPC|REG_RO}, + { NULL } + }; + +REG sim_throttle_reg[] = { { DRDATAD (THROT_MS_START, sim_throt_ms_start, 32, ""), PV_RSPC|REG_RO}, { DRDATAD (THROT_MS_STOP, sim_throt_ms_stop, 32, ""), PV_RSPC|REG_RO}, { DRDATAD (THROT_TYPE, sim_throt_type, 32, ""), PV_RSPC|REG_RO}, @@ -983,11 +1131,62 @@ REG sim_timer_reg[] = { { NULL } }; +/* Clear, Set and show catchup */ + +/* Clear catchup */ + +t_stat sim_timer_clr_catchup (UNIT *uptr, int32 val, CONST char *cptr, void *desc) +{ +if (sim_catchup_ticks) + sim_catchup_ticks = FALSE; +return SCPE_OK; +} + +t_stat sim_timer_set_catchup (UNIT *uptr, int32 val, CONST char *cptr, void *desc) +{ +if (!sim_catchup_ticks) + sim_catchup_ticks = TRUE; +return SCPE_OK; +} + +t_stat sim_timer_show_catchup (FILE *st, UNIT *uptr, int32 val, CONST void *desc) +{ +fprintf (st, "Calibrated Ticks%s", sim_catchup_ticks ? " with Catchup Ticks" : ""); +return SCPE_OK; +} + +/* Set and show idle calibration threshold */ + +t_stat sim_timer_set_idle_pct (UNIT *uptr, int32 val, CONST char *cptr, void *desc) +{ +t_stat r; +int32 newpct; + +if (cptr == NULL) + return SCPE_ARG; +newpct = (int32) get_uint (cptr, 10, 100, &r); +if ((r != SCPE_OK) || (newpct == (int32)(sim_idle_calib_pct))) + return r; +if (newpct == 0) + return SCPE_ARG; +sim_idle_calib_pct = (uint32)newpct; +return SCPE_OK; +} + +t_stat sim_timer_show_idle_pct (FILE *st, UNIT *uptr, int32 val, CONST void *desc) +{ +if (sim_idle_calib_pct == 0) + fprintf (st, "Calibration Always"); +else + fprintf (st, "Calibration Skipped when Idle exceeds %d%%", sim_idle_calib_pct); +return SCPE_OK; +} + /* Clear, Set and show asynch */ /* Clear asynch */ -t_stat sim_timer_clr_async (UNIT *uptr, int32 val, char *cptr, void *desc) +t_stat sim_timer_clr_async (UNIT *uptr, int32 val, CONST char *cptr, void *desc) { if (sim_asynch_timer) { sim_asynch_timer = FALSE; @@ -996,26 +1195,29 @@ if (sim_asynch_timer) { return SCPE_OK; } -t_stat sim_timer_set_async (UNIT *uptr, int32 val, char *cptr, void *desc) +t_stat sim_timer_set_async (UNIT *uptr, int32 val, CONST char *cptr, void *desc) { -if (!sim_asynch_timer) { +if (sim_asynch_enabled && (!sim_asynch_timer)) { sim_asynch_timer = TRUE; sim_timer_change_asynch (); } return SCPE_OK; } -t_stat sim_timer_show_async (FILE *st, UNIT *uptr, int32 val, void *desc) +t_stat sim_timer_show_async (FILE *st, UNIT *uptr, int32 val, CONST void *desc) { -fprintf (st, "%s", (sim_asynch_enabled && sim_asynch_timer) ? "Asynchronous" : "Synchronous"); +fprintf (st, "%s", sim_asynch_timer ? "Asynchronous" : "Synchronous"); return SCPE_OK; } MTAB sim_timer_mod[] = { -#if defined (SIM_ASYNCH_IO) && defined (SIM_ASYNCH_CLOCKS) - { MTAB_VDV, MTAB_VDV, "ASYNC", "ASYNC", &sim_timer_set_async, &sim_timer_show_async, NULL, "Enables/Displays Asynchronous Timer operation mode" }, - { MTAB_VDV, 0, NULL, "NOASYNC", &sim_timer_clr_async, NULL, NULL, "Disables Asynchronous Timer operation" }, +#if defined (SIM_ASYNCH_CLOCKS) + { MTAB_VDV, MTAB_VDV, "ASYNCH", "ASYNCH", &sim_timer_set_async, &sim_timer_show_async, NULL, "Enables/Displays Asynchronous Timer mode" }, + { MTAB_VDV, 0, NULL, "NOASYNCH", &sim_timer_clr_async, NULL, NULL, "Disables Asynchronous Timer operation" }, #endif + { MTAB_VDV, MTAB_VDV, "CATCHUP", "CATCHUP", &sim_timer_set_catchup, &sim_timer_show_catchup, NULL, "Enables/Displays Clock Tick catchup mode" }, + { MTAB_VDV, 0, NULL, "NOCATCHUP", &sim_timer_clr_catchup, NULL, NULL, "Disables Clock Tick catchup mode" }, + { MTAB_XTD|MTAB_VDV|MTAB_VALR, 0, "CALIB", "CALIB=nn", &sim_timer_set_idle_pct, &sim_timer_show_idle_pct, NULL, "Configure/Display Calibration Idle Suppression %" }, { 0 }, }; @@ -1023,10 +1225,13 @@ static t_stat sim_timer_clock_reset (DEVICE *dptr); DEVICE sim_timer_dev = { "TIMER", sim_timer_units, sim_timer_reg, sim_timer_mod, - SIM_NTIMERS+2, 0, 0, 0, 0, 0, + SIM_NTIMERS+1, 0, 0, 0, 0, 0, NULL, NULL, &sim_timer_clock_reset, NULL, NULL, NULL, NULL, DEV_DEBUG | DEV_NOSAVE, 0, sim_timer_debug}; +DEVICE sim_throttle_dev = { + "THROTTLE", &sim_throttle_unit, sim_throttle_reg, NULL, 1}; + /* sim_idle - idle simulator until next event or for specified interval @@ -1047,12 +1252,29 @@ static uint32 cyc_ms = 0; uint32 w_ms, w_idle, act_ms; int32 act_cyc; +if (rtc_clock_catchup_pending[tmr]) { /* Catchup clock tick pending? */ + sim_debug (DBG_CAL, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d) - accelerating pending catch-up tick before idling %s\n", tmr, sin_cyc, sim_uname (sim_clock_unit[tmr])); + sim_activate_abs (&sim_timer_units[tmr], 0); + if (sin_cyc) + sim_interval = sim_interval - 1; + return FALSE; + } if ((!sim_idle_enab) || /* idling disabled */ ((sim_clock_queue == QUEUE_LIST_END) && /* or clock queue empty? */ - (!(sim_asynch_enabled && sim_asynch_timer)))|| /* and not asynch? */ + (!sim_asynch_timer))|| /* and not asynch? */ ((sim_clock_queue != QUEUE_LIST_END) && /* or clock queue not empty */ ((sim_clock_queue->flags & UNIT_IDLE) == 0))|| /* and event not idle-able? */ (rtc_elapsed[tmr] < sim_idle_stable)) { /* or timer not stable? */ + sim_debug (DBG_IDL, &sim_timer_dev, "Can't idle: %s - elapsed: %d.%03d\n", !sim_idle_enab ? "idle disabled" : + ((rtc_elapsed[tmr] < sim_idle_stable) ? "not stable" : + ((sim_clock_queue != QUEUE_LIST_END) ? sim_uname (sim_clock_queue) : + "")), rtc_elapsed[tmr], rtc_ticks[tmr]); + if (sin_cyc) + sim_interval = sim_interval - 1; + return FALSE; + } +if (_rtcn_tick_catchup_check(tmr, 0)) { + sim_debug (DBG_CAL, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d) - rescheduling catchup tick for %s\n", tmr, sin_cyc, sim_uname (sim_clock_unit[tmr])); if (sin_cyc) sim_interval = sim_interval - 1; return FALSE; @@ -1069,10 +1291,11 @@ if ((!sim_idle_enab) || /* idling disabled */ the actual idle time, so consistent calibrated numbers produce better adjustments. - To negate this effect, we set a flag (sim_idle_idled) here and the - sim_rtcn_calb routine checks this flag before performing an actual - calibration and skips calibration if the flag was set and then clears - the flag. Thus recalibration only happens if things didn't idle. + To negate this effect, we accumulate the time actually idled here. + sim_rtcn_calb compares the accumulated idle time during the most recent + second and if it exceeds the percentage defined by and sim_idle_calib_pct + calibration is suppressed. Thus recalibration only happens if things + didn't idle too much. we also check check sim_idle_enab above so that all simulators can avoid directly checking sim_idle_enab before calling sim_idle so that all of @@ -1081,8 +1304,7 @@ if ((!sim_idle_enab) || /* idling disabled */ */ //sim_idle_idled = TRUE; /* record idle attempt */ sim_debug (DBG_TRC, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d)\n", tmr, sin_cyc); -if (cyc_ms == 0) /* not computed yet? */ - cyc_ms = (rtc_currd[tmr] * rtc_hz[tmr]) / 1000; /* cycles per msec */ +cyc_ms = (rtc_currd[tmr] * rtc_hz[tmr]) / 1000; /* cycles per msec */ if ((sim_idle_rate_ms == 0) || (cyc_ms == 0)) { /* not possible? */ if (sin_cyc) sim_interval = sim_interval - 1; @@ -1090,8 +1312,11 @@ if ((sim_idle_rate_ms == 0) || (cyc_ms == 0)) { /* not possible? */ return FALSE; } w_ms = (uint32) sim_interval / cyc_ms; /* ms to wait */ -w_idle = w_ms / sim_idle_rate_ms; /* intervals to wait */ -if (w_idle == 0) { /* none? */ +if (sim_os_tick_hz < rtc_hz[tmr]) + w_idle = (w_ms * 1000); /* intervals to wait * 1000 */ +else + w_idle = (w_ms * 1000) / sim_idle_rate_ms; /* intervals to wait * 1000 */ +if (w_idle < 500) { /* shorter than 1/2 a minimum sleep? */ if (sin_cyc) sim_interval = sim_interval - 1; sim_debug (DBG_IDL, &sim_timer_dev, "no wait\n"); @@ -1102,12 +1327,14 @@ if (sim_clock_queue == QUEUE_LIST_END) else sim_debug (DBG_IDL, &sim_timer_dev, "sleeping for %d ms - pending event on %s in %d instructions\n", w_ms, sim_uname(sim_clock_queue), sim_interval); act_ms = SIM_IDLE_MS_SLEEP (w_ms); /* wait */ +rtc_clock_time_idled[tmr] += act_ms; act_cyc = act_ms * cyc_ms; if (act_ms < w_ms) /* awakened early? */ act_cyc += (cyc_ms * sim_idle_rate_ms) / 2; /* account for half an interval's worth of cycles */ if (sim_interval > act_cyc) sim_interval = sim_interval - act_cyc; /* count down sim_interval */ -else sim_interval = 0; /* or fire immediately */ +else + sim_interval = 0; /* or fire immediately */ if (sim_clock_queue == QUEUE_LIST_END) sim_debug (DBG_IDL, &sim_timer_dev, "slept for %d ms - pending event in %d instructions\n", act_ms, sim_interval); else @@ -1122,10 +1349,6 @@ t_stat sim_set_idle (UNIT *uptr, int32 val, CONST char *cptr, void *desc) t_stat r; uint32 v; -if (sim_idle_rate_ms == 0) - return sim_messagef (SCPE_NOFNC, "Idling is not available, Minimum OS sleep time is %dms\n", sim_os_sleep_min_ms); -if ((val != 0) && (sim_idle_rate_ms > (uint32) val)) - return sim_messagef (SCPE_NOFNC, "Idling is not available, Minimum OS sleep time is %dms, Requied minimum OS sleep is %dms\n", sim_os_sleep_min_ms, val); if (cptr && *cptr) { v = (uint32) get_uint (cptr, 10, SIM_IDLE_STMAX, &r); if ((r != SCPE_OK) || (v < SIM_IDLE_STMIN)) @@ -1264,12 +1487,12 @@ void sim_throt_sched (void) { sim_throt_state = 0; if (sim_throt_type) - sim_activate (&sim_timer_units[SIM_NTIMERS], SIM_THROT_WINIT); + sim_activate (&sim_throttle_unit, SIM_THROT_WINIT); } void sim_throt_cancel (void) { -sim_cancel (&sim_timer_units[SIM_NTIMERS]); +sim_cancel (&sim_throttle_unit); } /* Throttle service @@ -1369,12 +1592,141 @@ sim_activate (uptr, sim_throt_wait); /* reschedule */ return SCPE_OK; } +/* Clock assist activites */ t_stat sim_timer_tick_svc (UNIT *uptr) { +int tmr = (int)(sim_timer_units-uptr); +t_stat stat; + +rtc_clock_ticks[tmr] += 1; +rtc_calib_tick_time[tmr] += rtc_clock_tick_size[tmr]; +/* + * Some devices may depend on executing during the same instruction or + * immediately after the clock tick event. To satisfy this, we directly + * run the clock event here and if it completes successfully, schedule any + * currently coschedule units to run now. Ticks should never return a + * non-success status, while co-schedule activities might, so they are + * queued to run from sim_process_event + */ +sim_debug (DBG_QUE, &sim_timer_dev, "sim_timer_tick_svc - scheduling %s\n", sim_uname (sim_clock_unit[tmr])); +if (sim_clock_unit[tmr]->action == NULL) + return SCPE_IERR; +stat = sim_clock_unit[tmr]->action (sim_clock_unit[tmr]); +--sim_cosched_interval[tmr]; /* Countdown ticks */ +if (stat == SCPE_OK) { + if (rtc_clock_catchup_eligible[tmr]) { /* calibration started? */ + struct timespec now; + double skew; + + clock_gettime(CLOCK_REALTIME, &now); + skew = (_timespec_to_double(&now) - (rtc_calib_tick_time[tmr]+rtc_clock_catchup_base_time[tmr])); + + if (fabs(skew) > fabs(rtc_clock_skew_max[tmr])) + rtc_clock_skew_max[tmr] = skew; + } + while ((sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) && + (sim_cosched_interval[tmr] < sim_clock_cosched_queue[tmr]->time)) { + UNIT *cptr = sim_clock_cosched_queue[tmr]; + sim_clock_cosched_queue[tmr] = cptr->next; + cptr->next = NULL; + cptr->cancel = NULL; + sim_debug (DBG_QUE, &sim_timer_dev, "sim_timer_tick_svc(tmr=%d) - coactivating %s\n", tmr, sim_uname (cptr)); + _sim_activate (cptr, 0); + } + if (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) + sim_cosched_interval[tmr] = sim_clock_cosched_queue[tmr]->time; + else + sim_cosched_interval[tmr] = 0; + } +sim_timer_activate_after (uptr, 1000000/rtc_hz[tmr]); +return stat; +} + +void sim_rtcn_get_time (struct timespec *now, int tmr) +{ +sim_debug (DBG_CAL, &sim_timer_dev, "sim_rtcn_get_time(tmr=%d)\n", tmr); +clock_gettime (CLOCK_REALTIME, now); +} + +/* + * If the host system has a relatively large clock tick (as compared to + * the desired simulated hz) ticks will naturally be scheduled late and + * these delays will accumulate. The net result will be unreasonably + * slow ticks being delivered to the simulated system. + * Additionally, when a simulator is idling and/or throttling, it will + * deliberately call sim_os_ms_sleep and those sleep operations will be + * variable and subject to the host system's minimum sleep resolution + * which can exceed the desired sleep interval and add to the concept + * of slow tick delivery to the simulated system. + * We accomodate these problems and make up for lost ticks by injecting + * catch-up ticks to the simulator. + * + * We avoid excessive co-scheduled polling during these catch-up ticks + * to minimize what is likely excessive overhead, thus 'coschedule + * polling' only occurs on every fourth clock tick when processing + * catch-up ticks. + * + * When necessary, catch-up ticks are scheduled to run under one + * of two conditions: + * 1) after indicated number of instructions in a call by the simulator + * to sim_rtcn_tick_ack. sim_rtcn_tick_ack exists to provide a + * mechanism to inform the simh timer facilities when the simulated + * system has accepted the most recent clock tick interrupt. + * 2) immediately when the simulator calls sim_idle + */ + +/* _rtcn_tick_catchup_check - idle simulator until next event or for specified interval + + Inputs: + tmr = calibrated timer to check/schedule + time = instruction delay for next tick + + Returns TRUE if a catchup tick has been scheduled +*/ + +static t_bool _rtcn_tick_catchup_check (int32 tmr, int32 time) +{ +double tnow; + +if ((!sim_catchup_ticks) || + ((tmr < 0) || (tmr >= SIM_NTIMERS))) + return FALSE; +tnow = sim_timenow_double(); +if (sim_catchup_ticks && + (!rtc_clock_catchup_eligible[tmr])) { + rtc_clock_catchup_base_time[tmr] = tnow; + rtc_clock_ticks_tot[tmr] += rtc_clock_ticks[tmr]; + rtc_clock_ticks[tmr] = 0; + rtc_calib_tick_time_tot[tmr] += rtc_calib_tick_time[tmr]; + rtc_calib_tick_time[tmr] = 0.0; + rtc_clock_catchup_ticks_tot[tmr] += rtc_clock_catchup_ticks[tmr]; + rtc_clock_catchup_ticks[tmr] = 0; + rtc_calib_ticks_acked_tot[tmr] += rtc_calib_ticks_acked[tmr]; + rtc_calib_ticks_acked[tmr] = 0; + rtc_clock_catchup_eligible[tmr] = TRUE; + sim_debug (DBG_QUE, &sim_timer_dev, "_rtcn_tick_catchup_check() - Enabling catchup ticks for %s\n", sim_uname (sim_clock_unit[tmr])); + return TRUE; + } +if (rtc_clock_catchup_eligible[tmr] && + (tnow > (rtc_clock_catchup_base_time[tmr] + (rtc_calib_tick_time[tmr] + rtc_clock_tick_size[tmr])))) { + sim_debug (DBG_QUE, &sim_timer_dev, "_rtcn_tick_catchup_check(%d) - scheduling catchup tick for %s which is behind %s\n", time, sim_uname (sim_clock_unit[tmr]), sim_fmt_secs (tnow > (rtc_clock_catchup_base_time[tmr] + (rtc_calib_tick_time[tmr] + rtc_clock_tick_size[tmr])))); + rtc_clock_catchup_pending[tmr] = TRUE; + sim_activate_abs (&sim_timer_units[tmr], time); + return TRUE; + } +return FALSE; +} + +t_stat sim_rtcn_tick_ack (int32 time, int32 tmr) +{ +if ((tmr < 0) || (tmr >= SIM_NTIMERS)) + return SCPE_TIMER; +sim_debug (DBG_ACK, &sim_timer_dev, "sim_rtcn_tick_ack - for %s\n", sim_uname (sim_clock_unit[tmr])); +_rtcn_tick_catchup_check (tmr, time); +++rtc_calib_ticks_acked[tmr]; return SCPE_OK; } -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) static double _timespec_to_double (struct timespec *time) { @@ -1391,17 +1743,18 @@ double sim_timenow_double (void) { struct timespec now; -clock_gettime(CLOCK_REALTIME, &now); +clock_gettime (CLOCK_REALTIME, &now); return _timespec_to_double (&now); } +#if defined(SIM_ASYNCH_CLOCKS) + extern UNIT * volatile sim_wallclock_queue; extern UNIT * volatile sim_wallclock_entry; pthread_t sim_timer_thread; /* Wall Clock Timing Thread Id */ pthread_cond_t sim_timer_startup_cond; t_bool sim_timer_thread_running = FALSE; -t_bool sim_timer_event_canceled = FALSE; static void * _timer_thread(void *arg) @@ -1420,19 +1773,18 @@ sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - starting\n"); pthread_mutex_lock (&sim_timer_lock); pthread_cond_signal (&sim_timer_startup_cond); /* Signal we're ready to go */ -while (sim_asynch_enabled && sim_asynch_timer && sim_is_running) { +while (sim_asynch_timer && sim_is_running) { struct timespec start_time, stop_time; struct timespec due_time; double wait_usec; int32 inst_delay; double inst_per_sec; - UNIT *uptr; + UNIT *uptr, *cptr, *prvptr; if (sim_wallclock_entry) { /* something to insert in queue? */ - UNIT *cptr, *prvptr; - sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - timing %s for %d usec\n", - sim_uname(sim_wallclock_entry), sim_wallclock_entry->time); + sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - timing %s for %s\n", + sim_uname(sim_wallclock_entry), sim_fmt_secs (sim_wallclock_entry->time/1000000.0)); uptr = sim_wallclock_entry; sim_wallclock_entry = NULL; @@ -1472,7 +1824,6 @@ while (sim_asynch_enabled && sim_asynch_timer && sim_is_running) { sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - waiting for %.0f usecs until %.6f for %s\n", wait_usec, sim_wallclock_queue->a_due_time, sim_uname(sim_wallclock_queue)); if ((wait_usec <= 0.0) || (0 != pthread_cond_timedwait (&sim_timer_wake, &sim_timer_lock, &due_time))) { - int tmr; if (sim_wallclock_queue == QUEUE_LIST_END) /* queue empty? */ continue; /* wait again */ @@ -1493,26 +1844,9 @@ while (sim_asynch_enabled && sim_asynch_timer && sim_is_running) { } sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - slept %.0fms - activating(%s,%d)\n", 1000.0*(_timespec_to_double (&stop_time)-_timespec_to_double (&start_time)), sim_uname(uptr), inst_delay); - for (tmr=0; tmra_next = sim_clock_cosched_queue[tmr]; - sim_clock_cosched_queue[tmr] = QUEUE_LIST_END; - AIO_ACTIVATE_LIST(sim_activate, uptr, inst_delay); - } - else - sim_activate (uptr, inst_delay); + sim_activate (uptr, inst_delay); } else {/* Something wants to adjust the queue since the wait condition was signaled */ - if (sim_timer_event_canceled) - sim_timer_event_canceled = FALSE; /* reset flag and continue */ } } pthread_mutex_unlock (&sim_timer_lock); @@ -1522,81 +1856,83 @@ sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - exiting\n"); return NULL; } -#endif /* defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) */ +#endif /* defined(SIM_ASYNCH_CLOCKS) */ /* In the event that there are no active clock devices, no instruction rate calibration will be performed. This is more likely on simpler simulators which don't have a full spectrum of standard devices or possibly when a clock device exists but its use is optional. + + Additonally, when a host system has a natural clock tick (or minimal + sleep time) which is greater than the tick size that a simulator + wants to run a clock at, we run this clock at the rate implied by + the host system's minimal sleep time or 50Hz. - To solve this we merely run an internal clock at 50Hz. + To solve this we merely run an internal clock at 10Hz. */ -#define CLK_TPS 50 -#define CLK_INIT 20000 + +#define CLK_TPS 10 +#define CLK_INIT 100000 +static int32 sim_int_clk_tps; + static t_stat sim_timer_clock_tick_svc (UNIT *uptr) { -sim_rtcn_calb (CLK_TPS, SIM_NTIMERS-1); -sim_activate_after (uptr, 1000000/CLK_TPS); /* reactivate unit */ +sim_rtcn_calb (sim_int_clk_tps, SIM_INTERNAL_CLK); +sim_activate_after (uptr, 1000000/sim_int_clk_tps); /* reactivate unit */ return SCPE_OK; } +/* + This routine exists to assure that there is a single reliably calibrated + clock properly counting instruction execution relative to time. The best + way to assure reliable calibration is to use a clock which ticks no + faster than the host system's clock. This is optimal so that accurate + time measurements are taken. If the simulated system doesn't have a + clock with an appropriate tick rate, an internal clock is run that meets + this requirement, + */ +static void _rtcn_configure_calibrated_clock (int32 newtmr) +{ +int32 tmr; + +sim_int_clk_tps = MIN(CLK_TPS, sim_os_tick_hz); +for (tmr=0; tmra_next) { - if (cptr == sim_wallclock_queue) { /* Handle first entry */ - struct timespec now; - double due_time; - - clock_gettime(CLOCK_REALTIME, &now); - due_time = _timespec_to_double(&now) + ((double)(cptr->a_usec_delay)/1000000.0); - delta_due_time = due_time - cptr->a_due_time; - } - cptr->a_due_time += delta_due_time; - } sim_debug (DBG_TRC, &sim_timer_dev, "sim_start_timer_services() - starting\n"); pthread_cond_init (&sim_timer_startup_cond, NULL); pthread_attr_init (&attr); @@ -1613,8 +1949,33 @@ pthread_mutex_unlock (&sim_timer_lock); void sim_stop_timer_services (void) { +int tmr; + sim_debug (DBG_TRC, &sim_timer_dev, "sim_stop_timer_services()\n"); -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) + +for (tmr=0; tmr<=SIM_NTIMERS; tmr++) { + int32 accum; + + if (sim_clock_unit[tmr]) { + /* Stop clock assist unit and make sure the clock unit has a tick queued */ + sim_cancel (&sim_timer_units[tmr]); + if (rtc_hz[tmr]) + sim_activate (sim_clock_unit[tmr], rtc_currd[tmr]); + /* Move coschedule'd units to the standard event queue */ + accum = 1; + while (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) { + UNIT *cptr = sim_clock_cosched_queue[tmr]; + + sim_clock_cosched_queue[tmr] = cptr->next; + cptr->next = NULL; + cptr->cancel = NULL; + + accum += cptr->time; + _sim_activate (cptr, accum*rtc_currd[tmr]); + } + } + } +#if defined(SIM_ASYNCH_CLOCKS) pthread_mutex_lock (&sim_timer_lock); if (sim_timer_thread_running) { sim_debug (DBG_TRC, &sim_timer_dev, "sim_stop_timer_services() - stopping\n"); @@ -1622,6 +1983,26 @@ if (sim_timer_thread_running) { pthread_mutex_unlock (&sim_timer_lock); pthread_join (sim_timer_thread, NULL); sim_timer_thread_running = FALSE; + /* Any wallclock queued events are now migrated to the normal event queue */ + while (sim_wallclock_queue != QUEUE_LIST_END) { + UNIT *uptr = sim_wallclock_queue; + double d_due_delta = uptr->a_due_time - sim_timenow_double (); + int32 inst_delay; + double inst_delay_d; + + uptr->cancel (uptr); + if (d_due_delta < 0.0) + d_due_delta = 0.0; + inst_delay_d = sim_timer_inst_per_sec () * d_due_delta; + /* Bound delay to avoid overflow. */ + /* Long delays are usually canceled before they expire */ + if (inst_delay_d > (double)0x7fffffff) + inst_delay_d = (double)0x7fffffff; + inst_delay = (int32)inst_delay_d; + if ((inst_delay == 0) && (inst_delay_d != 0.0)) + inst_delay = 1; /* Minimum non-zero delay is 1 instruction */ + _sim_activate (uptr, inst_delay); /* queue it now */ + } } else pthread_mutex_unlock (&sim_timer_lock); @@ -1630,26 +2011,11 @@ else t_stat sim_timer_change_asynch (void) { -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) +#if defined(SIM_ASYNCH_CLOCKS) if (sim_asynch_enabled && sim_asynch_timer) sim_start_timer_services (); -else { - UNIT *uptr; - uint32 accum = 0; - +else sim_stop_timer_services (); - while (1) { - uptr = sim_wallclock_queue; - if (uptr == QUEUE_LIST_END) - break; - sim_wallclock_queue = uptr->a_next; - accum += uptr->time; - uptr->a_next = NULL; - uptr->a_due_time = 0; - uptr->a_usec_delay = 0; - sim_activate_after (uptr, accum); - } - } #endif return SCPE_OK; } @@ -1672,10 +2038,16 @@ return inst_per_sec; t_stat sim_timer_activate_after (UNIT *uptr, uint32 usec_delay) { -int inst_delay; +int inst_delay, tmr; double inst_delay_d, inst_per_sec; AIO_VALIDATE; +/* If this is a clock unit, we need to schedule the related timer unit instead */ +for (tmr=0; tmr (double)0x7fffffff) inst_delay = (int32)inst_delay_d; if ((inst_delay == 0) && (usec_delay != 0)) inst_delay = 1; /* Minimum non-zero delay is 1 instruction */ -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) +#if defined(SIM_ASYNCH_CLOCKS) if ((sim_calb_tmr == -1) || /* if No timer initialized */ (inst_delay < rtc_currd[sim_calb_tmr]) || /* or sooner than next clock tick? */ - (rtc_elapsed[sim_calb_tmr] < sim_idle_stable) || /* or not idle stable yet */ - (!(sim_asynch_enabled && sim_asynch_timer))) { /* or asynch disabled */ + (rtc_calibrations[sim_calb_tmr] == 0) || /* or haven't calibrated yet */ + (!sim_asynch_timer)) { /* or asynch disabled */ sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after() - activating %s after %d instructions\n", sim_uname(uptr), inst_delay); return _sim_activate (uptr, inst_delay); /* queue it now */ } if (1) { - struct timespec now; - double d_now; + double d_now = sim_timenow_double (); - clock_gettime (CLOCK_REALTIME, &now); - d_now = _timespec_to_double (&now); /* Determine if this is a clock tick like invocation - or an ocaisional measured device delay */ + or an occasional measured device delay */ if ((uptr->a_usec_delay == usec_delay) && - (uptr->a_due_time != 0.0) && - (1)) { + (uptr->a_due_time != 0.0)) { double d_delay = ((double)usec_delay)/1000000.0; uptr->a_due_time += d_delay; @@ -1718,7 +2086,7 @@ if (1) { uptr->a_skew = uptr->a_last_fired_time = 0.0; uptr->a_due_time = d_now + (double)(usec_delay)/1000000.0; } - if (uptr->a_skew > rtc_clock_skew_max[sim_calb_tmr]) + if (fabs (uptr->a_skew) > fabs (rtc_clock_skew_max[sim_calb_tmr])) rtc_clock_skew_max[sim_calb_tmr] = uptr->a_skew; } else { @@ -1740,12 +2108,19 @@ if (1) { uptr->a_due_time = d_now + (double)(usec_delay)/1000000.0; } uptr->time = usec_delay; + uptr->cancel = &_sim_wallclock_cancel; /* bind cleanup method */ + uptr->a_is_active = &_sim_wallclock_is_active; + if (tmr < SIM_NTIMERS) { /* Timer Unit? */ + sim_clock_unit[tmr]->cancel = &_sim_wallclock_cancel; + sim_clock_unit[tmr]->a_is_active = &_sim_wallclock_is_active; + } - sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after() - queue addition %s at %.6f\n", + sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after() - queue wallclock addition %s at %.6f\n", sim_uname(uptr), uptr->a_due_time); } pthread_mutex_lock (&sim_timer_lock); -while (sim_wallclock_entry) { +uptr->a_next = QUEUE_LIST_END; /* Temporarily mark as active */ +while (sim_wallclock_entry) { /* wait for any prior entry has been digested */ sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after() - queue insert entry %s busy waiting for 1ms\n", sim_uname(sim_wallclock_entry)); pthread_mutex_unlock (&sim_timer_lock); @@ -1770,9 +2145,21 @@ t_stat sim_register_clock_unit_tmr (UNIT *uptr, int32 tmr) if (NULL == sim_clock_unit[tmr]) sim_clock_cosched_queue[tmr] = QUEUE_LIST_END; sim_clock_unit[tmr] = uptr; +sim_timer_units[tmr].flags = (sim_clock_unit[tmr] ? 0 : UNIT_DIS | UNIT_IDLE); +sim_tick_units[tmr].flags = (sim_clock_unit[tmr] ? 0 : UNIT_DIS); return SCPE_OK; } +static int32 _tick_size () +{ +return (sim_calb_tmr != -1) ? rtc_currd[sim_calb_tmr] : 10000; +} + +int32 sim_rtcn_tick_size (int32 tmr) +{ +return (rtc_currd[tmr]) ? rtc_currd[tmr] : 10000; +} + t_stat sim_register_clock_unit (UNIT *uptr) { return sim_register_clock_unit_tmr (uptr, 0); @@ -1780,57 +2167,204 @@ return sim_register_clock_unit_tmr (uptr, 0); t_stat sim_clock_coschedule (UNIT *uptr, int32 interval) { -return sim_clock_coschedule_tmr (uptr, sim_calb_tmr, interval); +int32 ticks = (interval + (_tick_size ()/2))/_tick_size ();/* Convert to ticks */ + +sim_debug (DBG_QUE, &sim_timer_dev, "sim_clock_coschedule(interval=%d, ticks=%d)\n", interval, ticks); +return sim_clock_coschedule_tmr (uptr, sim_calb_tmr, ticks); } t_stat sim_clock_coschedule_abs (UNIT *uptr, int32 interval) { +int32 ticks = (interval + (_tick_size ()/2))/_tick_size ();/* Convert to ticks */ + +sim_debug (DBG_QUE, &sim_timer_dev, "sim_clock_coschedule_abs(interval=%d, ticks=%d)\n", interval, ticks); sim_cancel (uptr); -return sim_clock_coschedule_tmr (uptr, sim_calb_tmr, interval); +return sim_clock_coschedule_tmr (uptr, sim_calb_tmr, ticks); } -t_stat sim_clock_coschedule_tmr (UNIT *uptr, int32 tmr, int32 interval) +t_stat sim_clock_coschedule_tmr (UNIT *uptr, int32 tmr, int32 ticks) { -if ((tmr < 0) || (tmr >= SIM_NTIMERS) || - (NULL == sim_clock_unit[tmr])) - return sim_activate (uptr, interval); -else - if (sim_asynch_enabled && sim_asynch_timer) { - if (!sim_is_active (uptr)) { /* already active? */ -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) - if ((sim_calb_tmr != -1) && - (rtc_elapsed[sim_calb_tmr ] >= sim_idle_stable)) { - sim_debug (DBG_TIM, &sim_timer_dev, "sim_clock_coschedule() - queueing %s for clock co-schedule\n", sim_uname (uptr)); - pthread_mutex_lock (&sim_timer_lock); - uptr->a_next = sim_clock_cosched_queue[tmr]; - sim_clock_cosched_queue[tmr] = uptr; - pthread_mutex_unlock (&sim_timer_lock); - return SCPE_OK; - } - else { -#else - if (1) { -#endif - int32 t; +if (ticks < 0) + return SCPE_ARG; +if (sim_is_active (uptr)) { + sim_debug (DBG_TIM, &sim_timer_dev, "sim_clock_coschedule_tmr(tmr=%d) - %s is already active\n", tmr, sim_uname (uptr)); + return SCPE_OK; + } +if (tmr == SIM_INTERNAL_CLK) + tmr = SIM_NTIMERS; +else { + if ((tmr < 0) || (tmr >= SIM_NTIMERS)) + return sim_activate (uptr, ticks * 10000); + } +if (NULL == sim_clock_unit[tmr]) + return sim_activate (uptr, ticks * (rtc_currd[tmr] ? rtc_currd[tmr] : _tick_size ())); +else { + UNIT *cptr, *prvptr; + int32 accum; - t = sim_activate_time (sim_clock_unit[tmr]); - return sim_activate (uptr, t? t - 1: interval); - } - } - sim_debug (DBG_TIM, &sim_timer_dev, "sim_clock_coschedule() - %s is already active\n", sim_uname (uptr)); - return SCPE_OK; + sim_debug (DBG_QUE, &sim_timer_dev, "sim_clock_coschedule_tmr(tmr=%d) - queueing %s for clock co-schedule (ticks=%d)\n", tmr, sim_uname (uptr), ticks); + prvptr = NULL; + accum = 0; + for (cptr = sim_clock_cosched_queue[tmr]; cptr != QUEUE_LIST_END; cptr = cptr->next) { + if (ticks < (accum + cptr->time)) + break; + accum = accum + cptr->time; + prvptr = cptr; + } + if (prvptr == NULL) { + cptr = uptr->next = sim_clock_cosched_queue[tmr]; + sim_clock_cosched_queue[tmr] = uptr; } else { - int32 t; - - t = sim_activate_time (sim_clock_unit[tmr]); - return sim_activate (uptr, t? t - 1: interval); + cptr = uptr->next = prvptr->next; + prvptr->next = uptr; } + uptr->time = ticks - accum; + if (cptr != QUEUE_LIST_END) + cptr->time = cptr->time - uptr->time; + uptr->cancel = &_sim_coschedule_cancel; /* bind cleanup method */ + sim_cosched_interval[tmr] = sim_clock_cosched_queue[tmr]->time; + } +return SCPE_OK; } -t_stat sim_clock_coschedule_tmr_abs (UNIT *uptr, int32 tmr, int32 interval) +t_stat sim_clock_coschedule_tmr_abs (UNIT *uptr, int32 tmr, int32 ticks) { sim_cancel (uptr); -return sim_clock_coschedule_tmr (uptr, tmr, interval); +return sim_clock_coschedule_tmr (uptr, tmr, ticks); } +/* Cancel a unit on the coschedule queue */ +static void _sim_coschedule_cancel (UNIT *uptr) +{ +AIO_UPDATE_QUEUE; +if (uptr->next) { /* On a queue? */ + int tmr; + + for (tmr=0; tmrnext; + uptr->next = NULL; + } + else { + UNIT *cptr; + + for (cptr = sim_clock_cosched_queue[tmr]; + (cptr != QUEUE_LIST_END); + cptr = cptr->next) + if (cptr->next == (uptr)) { + cptr->next = (uptr)->next; + uptr->next = NULL; + break; + } + } + if (uptr->next == NULL) { /* found? */ + uptr->cancel = NULL; + sim_debug (SIM_DBG_EVENT, &sim_timer_dev, "Canceled Clock Coscheduled Event for %s\n", sim_uname(uptr)); + return; + } + } + } +} + +#if defined(SIM_ASYNCH_CLOCKS) +static void _sim_wallclock_cancel (UNIT *uptr) +{ +int32 tmr; + +AIO_UPDATE_QUEUE; +pthread_mutex_lock (&sim_timer_lock); +/* If this is a clock unit, we need to cancel both this and the related timer unit */ +for (tmr=0; tmra_next) { + UNIT *cptr; + + if (uptr == sim_wallclock_entry) { /* Pending on the queue? */ + sim_wallclock_entry = NULL; + uptr->a_next = NULL; + } + else { + if (uptr == sim_wallclock_queue) { + sim_wallclock_queue = uptr->a_next; + uptr->a_next = NULL; + sim_debug (SIM_DBG_EVENT, &sim_timer_dev, "Canceling Timer Event for %s\n", sim_uname(uptr)); + pthread_cond_signal (&sim_timer_wake); + } + else { + for (cptr = sim_wallclock_queue; + (cptr != QUEUE_LIST_END); + cptr = cptr->a_next) { + if (cptr->a_next == (uptr)) { + cptr->a_next = (uptr)->a_next; + uptr->a_next = NULL; + sim_debug (SIM_DBG_EVENT, &sim_timer_dev, "Canceled Timer Event for %s\n", sim_uname(uptr)); + break; + } + } + } + } + if (uptr->a_next == NULL) { + uptr->a_due_time = uptr->a_skew = uptr->a_last_fired_time = uptr->a_usec_delay = 0; + uptr->cancel = NULL; + uptr->a_is_active = NULL; + if (tmr < SIM_NTIMERS) { /* Timer Unit? */ + sim_clock_unit[tmr]->cancel = NULL; + sim_clock_unit[tmr]->a_is_active = NULL; + } + } + } +pthread_mutex_unlock (&sim_timer_lock); +} + +int32 sim_timer_activate_time (UNIT *uptr) +{ +UNIT *cptr; +double inst_per_sec = sim_timer_inst_per_sec (); +double d_result; + +pthread_mutex_lock (&sim_timer_lock); +if (uptr == sim_wallclock_entry) { + d_result = ((uptr)->a_due_time - sim_timenow_double())*inst_per_sec; + if (d_result < 0.0) + d_result = 0.0; + if (d_result > (double)0x7FFFFFFE) + d_result = (double)0x7FFFFFFE; + pthread_mutex_unlock (&sim_timer_lock); + return ((int32)d_result) + 1; + } +for (cptr = sim_wallclock_queue; + cptr != QUEUE_LIST_END; + cptr = cptr->a_next) + if (uptr == cptr) { + d_result = ((uptr)->a_due_time - sim_timenow_double())*inst_per_sec; + if (d_result < 0.0) + d_result = 0.0; + if (d_result > (double)0x7FFFFFFE) + d_result = (double)0x7FFFFFFE; + pthread_mutex_unlock (&sim_timer_lock); + return ((int32)d_result) + 1; + } +pthread_mutex_unlock (&sim_timer_lock); +if (uptr->a_next) + return uptr->a_event_time + 1; +return -1; /* Not found. */ +} + +static t_bool _sim_wallclock_is_active (UNIT *uptr) +{ +int32 tmr; + +if (uptr->a_next) + return TRUE; +/* If this is a clock unit, we need to examine the related timer unit instead */ +for (tmr=0; tmrtmxr; +int32 ticks = (interval + (sim_rtcn_tick_size (tmr)/2))/sim_rtcn_tick_size (tmr);/* Convert to ticks */ #if defined(SIM_ASYNCH_MUX) if ((!(uptr->dynflags & UNIT_TM_POLL)) || (!sim_asynch_enabled)) { - return sim_clock_coschedule (uptr, tmr, interval); + return sim_clock_coschedule (uptr, tmr, ticks); } return SCPE_OK; #else @@ -3937,7 +3938,7 @@ if (mp) { } } sim_debug (TIMER_DBG_MUX, &sim_timer_dev, "scheduling %s after interval %d instructions\n", sim_uname (uptr), interval); -return sim_clock_coschedule_tmr (uptr, tmr, interval); +return sim_clock_coschedule_tmr (uptr, tmr, ticks); #endif }