From 39d2944ede21f1d32b588a319740c2e315753164 Mon Sep 17 00:00:00 2001 From: Mark Pizzolato Date: Wed, 16 Nov 2016 23:50:53 -0800 Subject: [PATCH] TIMER: Add support for catchup clock ticks and cleaned up asynchronous clocks Asynchronous clocks are now built for all simulators which are built with SIM_ASYNCH_IO defined. The default behavior has asynchronous clocks disabled since this is still experimental, but it can be enabled with SET TIMER ASYNC. Catchup clock ticks are now available, but since they're experimental, they aren't enabled by default. Catchup ticks are only available if the simulators clock device calls sim_rtcn_tick_ack to acknowledge processing of clock ticks. The VAX simulators have been modified to leverage this. Catchup clock ticks can be enabled with SET TIMER CATCHUP Additionally, an idle threshold is provided which can be used to influence when clock calibration may be suppressed. The default is not to suppress calibration activities. The various timer behaviors are visible with the SHOW TIMER command. The state of the operating timer facilities is visible with: SHOW CLOCK Timer events which are queued are visible with the SHOW QUEUE command. --- README.md | 14 + VAX/vax610_stddev.c | 2 + VAX/vax630_stddev.c | 2 + VAX/vax730_stddev.c | 7 +- VAX/vax750_stddev.c | 7 +- VAX/vax780_stddev.c | 7 +- VAX/vax860_stddev.c | 7 +- VAX/vax_cpu.c | 12 +- VAX/vax_stddev.c | 7 +- VAX/vax_watch.c | 7 +- doc/simh.doc | Bin 233984 -> 241152 bytes scp.c | 57 +- scp.h | 4 + sim_defs.h | 357 ++----------- sim_disk.c | 2 +- sim_tape.c | 2 +- sim_timer.c | 1214 +++++++++++++++++++++++++++++++------------ sim_timer.h | 15 +- sim_tmxr.c | 5 +- 19 files changed, 1049 insertions(+), 679 deletions(-) diff --git a/README.md b/README.md index 7528baaf..bbb63867 100644 --- a/README.md +++ b/README.md @@ -121,6 +121,20 @@ Host platforms which have libSDL available can leverage this functionality. Asynchronous support exists for console I/O and most multiplexer devices. (Still experimental - not currently by default) +#### Clock/Timer Enhancements + * Asynchronhous clocks ticks exist to better support modern processors + that have variable clock speeds. The initial clock calibration model + presumed a constant simulated instruction execution rate. + Modern processors have variable processor speeds which breaks this + key assumption. + * Strategies to make up for missed clock ticks are now available + (independent of asynchronous tick generation). These strategies + generate catch-up clock ticks to keep the simulator passage of + time consistent with wall clock time. Simulator time while idling + or throttling is now consistent. Reasonable idling behavior is + now possible without requiring that the host system clock tick be + 10ms or less. + #### Ethernet Transport Enhancements * UDP packet transport. Direct simulator connections to HECnet can be made without running a local packet bridge program. diff --git a/VAX/vax610_stddev.c b/VAX/vax610_stddev.c index 1436f32a..e2d7d9df 100644 --- a/VAX/vax610_stddev.c +++ b/VAX/vax610_stddev.c @@ -248,6 +248,8 @@ void iccs_wr (int32 data) { if ((data & CSR_IE) == 0) CLR_INT (CLK); +if (data & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ clk_csr = (clk_csr & ~CLKCSR_RW) | (data & CLKCSR_RW); return; } diff --git a/VAX/vax630_stddev.c b/VAX/vax630_stddev.c index aa390704..d7476cad 100644 --- a/VAX/vax630_stddev.c +++ b/VAX/vax630_stddev.c @@ -226,6 +226,8 @@ void iccs_wr (int32 data) { if ((data & CSR_IE) == 0) CLR_INT (CLK); +if (data & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ clk_csr = (clk_csr & ~CLKCSR_RW) | (data & CLKCSR_RW); return; } diff --git a/VAX/vax730_stddev.c b/VAX/vax730_stddev.c index b06d781e..5926e014 100644 --- a/VAX/vax730_stddev.c +++ b/VAX/vax730_stddev.c @@ -644,6 +644,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */ if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */ tmr_icr = icr_rd (TRUE); /* update itr */ } +if (val & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */ tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */ (val & TMR_CSR_WR); @@ -859,7 +861,7 @@ int32 todr_rd (void) TOY *toy = (TOY *)clk_unit.filebuf; struct timespec base, now, val; -clock_gettime(CLOCK_REALTIME, &now); /* get curr time */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ base.tv_sec = toy->toy_gmtbase; base.tv_nsec = toy->toy_gmtbasemsec * 1000000; sim_timespec_diff (&val, &now, &base); @@ -874,8 +876,7 @@ struct timespec now, val, base; /* Save the GMT time when set value was 0 to record the base for future read operations in "battery backed-up" state */ -if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */ - return; /* error? */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ val.tv_sec = ((uint32)data) / 100; val.tv_nsec = (((uint32)data) % 100) * 10000000; sim_timespec_diff (&base, &now, &val); /* base = now - data */ diff --git a/VAX/vax750_stddev.c b/VAX/vax750_stddev.c index a61e9a71..9e62811c 100644 --- a/VAX/vax750_stddev.c +++ b/VAX/vax750_stddev.c @@ -666,6 +666,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */ if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */ tmr_icr = icr_rd (TRUE); /* update itr */ } +if (val & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */ tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */ (val & TMR_CSR_WR); @@ -895,7 +897,7 @@ int32 todr_rd (void) TOY *toy = (TOY *)clk_unit.filebuf; struct timespec base, now, val; -clock_gettime(CLOCK_REALTIME, &now); /* get curr time */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ base.tv_sec = toy->toy_gmtbase; base.tv_nsec = toy->toy_gmtbasemsec * 1000000; sim_timespec_diff (&val, &now, &base); @@ -911,8 +913,7 @@ struct timespec now, val, base; /* Save the GMT time when set value was 0 to record the base for future read operations in "battery backed-up" state */ -if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */ - return; /* error? */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ val.tv_sec = ((uint32)data) / 100; val.tv_nsec = (((uint32)data) % 100) * 10000000; sim_timespec_diff (&base, &now, &val); /* base = now - data */ diff --git a/VAX/vax780_stddev.c b/VAX/vax780_stddev.c index 4694d54f..a6bf3738 100644 --- a/VAX/vax780_stddev.c +++ b/VAX/vax780_stddev.c @@ -623,6 +623,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */ if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */ tmr_icr = icr_rd (); /* update itr */ } +if (val & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */ tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */ (val & TMR_CSR_WR); @@ -835,7 +837,7 @@ int32 todr_rd (void) TOY *toy = (TOY *)clk_unit.filebuf; struct timespec base, now, val; -clock_gettime(CLOCK_REALTIME, &now); /* get curr time */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ base.tv_sec = toy->toy_gmtbase; base.tv_nsec = toy->toy_gmtbasemsec * 1000000; sim_timespec_diff (&val, &now, &base); @@ -851,8 +853,7 @@ struct timespec now, val, base; /* Save the GMT time when set value was 0 to record the base for future read operations in "battery backed-up" state */ -if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */ - return; /* error? */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ val.tv_sec = ((uint32)data) / 100; val.tv_nsec = (((uint32)data) % 100) * 10000000; sim_timespec_diff (&base, &now, &val); /* base = now - data */ diff --git a/VAX/vax860_stddev.c b/VAX/vax860_stddev.c index d6a1c1da..4178f036 100644 --- a/VAX/vax860_stddev.c +++ b/VAX/vax860_stddev.c @@ -747,6 +747,8 @@ if ((val & TMR_CSR_RUN) == 0) { /* clearing run? */ if (tmr_iccs & TMR_CSR_RUN) /* run 1 -> 0? */ tmr_icr = icr_rd (TRUE); /* update itr */ } +if (val & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ tmr_iccs = tmr_iccs & ~(val & TMR_CSR_W1C); /* W1C csr */ tmr_iccs = (tmr_iccs & ~TMR_CSR_WR) | /* new r/w */ (val & TMR_CSR_WR); @@ -962,7 +964,7 @@ int32 todr_rd (void) TOY *toy = (TOY *)clk_unit.filebuf; struct timespec base, now, val; -clock_gettime(CLOCK_REALTIME, &now); /* get curr time */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ base.tv_sec = toy->toy_gmtbase; base.tv_nsec = toy->toy_gmtbasemsec * 1000000; sim_timespec_diff (&val, &now, &base); @@ -977,8 +979,7 @@ struct timespec now, val, base; /* Save the GMT time when set value was 0 to record the base for future read operations in "battery backed-up" state */ -if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */ - return; /* error? */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ val.tv_sec = ((uint32)data) / 100; val.tv_nsec = (((uint32)data) % 100) * 10000000; sim_timespec_diff (&base, &now, &val); /* base = now - data */ diff --git a/VAX/vax_cpu.c b/VAX/vax_cpu.c index 0249f216..788dd9b0 100644 --- a/VAX/vax_cpu.c +++ b/VAX/vax_cpu.c @@ -428,12 +428,12 @@ MTAB cpu_mod[] = { }; DEBTAB cpu_deb[] = { - { "INTEXC", LOG_CPU_I }, - { "REI", LOG_CPU_R }, - { "CONTEXT", LOG_CPU_P }, - { "EVENT", SIM_DBG_EVENT }, - { "ACTIVATE", SIM_DBG_ACTIVATE }, - { "ASYNCH", SIM_DBG_AIO_QUEUE }, + { "INTEXC", LOG_CPU_I, "interrupt and exception activities" }, + { "REI", LOG_CPU_R, "REI activities" }, + { "CONTEXT", LOG_CPU_P, "context switching activities" }, + { "EVENT", SIM_DBG_EVENT, "event dispatch activities" }, + { "ACTIVATE", SIM_DBG_ACTIVATE, "queue insertion activities" }, + { "ASYNCH", SIM_DBG_AIO_QUEUE, "asynch queue activities" }, { NULL, 0 } }; diff --git a/VAX/vax_stddev.c b/VAX/vax_stddev.c index 97db3661..eb124cc3 100644 --- a/VAX/vax_stddev.c +++ b/VAX/vax_stddev.c @@ -299,6 +299,8 @@ void iccs_wr (int32 data) { if ((data & CSR_IE) == 0) CLR_INT (CLK); +if (data & CSR_DONE) /* Interrupt Acked? */ + sim_rtcn_tick_ack (20, TMR_CLK); /* Let timers know */ clk_csr = (clk_csr & ~CLKCSR_RW) | (data & CLKCSR_RW); return; } @@ -484,7 +486,7 @@ if (0 == todr_reg) { /* clock running? */ in the 32bit TODR. This is the 33bit value 0x100000000/100 to get seconds */ #define TOY_MAX_SECS (0x40000000/25) -clock_gettime(CLOCK_REALTIME, &now); /* get curr time */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ base.tv_sec = toy->toy_gmtbase; base.tv_nsec = toy->toy_gmtbasemsec * 1000000; sim_timespec_diff (&val, &now, &base); @@ -507,8 +509,7 @@ struct timespec now, val, base; /* Save the GMT time when set value was 0 to record the base for future read operations in "battery backed-up" state */ -if (-1 == clock_gettime(CLOCK_REALTIME, &now)) /* get curr time */ - return; /* error? */ +sim_rtcn_get_time(&now, TMR_CLK); /* get curr time */ val.tv_sec = ((uint32)data) / 100; val.tv_nsec = (((uint32)data) % 100) * 10000000; sim_timespec_diff (&base, &now, &val); /* base = now - data */ diff --git a/VAX/vax_watch.c b/VAX/vax_watch.c index 03ee3207..afbb1660 100644 --- a/VAX/vax_watch.c +++ b/VAX/vax_watch.c @@ -162,11 +162,13 @@ int32 wtc_rd (int32 pa) int32 rg = (pa >> 1) & 0xF; int32 val = 0; time_t curr; +struct timespec now; static int mdays[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; struct tm *ctm = NULL; if (rg < 10) { /* time reg? */ - curr = time (NULL); /* get curr time */ + sim_rtcn_get_time (&now, TMR_CLK); + curr = now.tv_sec; /* get curr time */ if (curr == (time_t) -1) /* error? */ return 0; ctm = localtime (&curr); /* decompose */ @@ -296,7 +298,8 @@ return SCPE_OK; t_stat wtc_set (UNIT *uptr, int32 val, CONST char *cptr, void *desc) { -if (cptr != NULL) wtc_mode = strcmp(cptr, "STD"); +if (cptr != NULL) + wtc_mode = ((strcmp(cptr, "STD") != 0) ? WTC_MODE_VMS : WTC_MODE_STD); return SCPE_OK; } diff --git a/doc/simh.doc b/doc/simh.doc index 4eaac45a1665e15c3be217f63d68daa73679219b..89a1fadb1a4bdeffd517d0ada7272ba5b1a09cd9 100644 GIT binary patch delta 34562 zcmdVjd0qqckB`pL`W=S-^pqzMX0T{*IG+NG9+Yc7CWP*cC`#{TUB(?#Zsz- zDn(KI)}oZ!)f!5XwwB-Xb?4q>lHl`xe?PxJzTb?;%$+-T?mg#q&OPVcJ9ozTPm7E{ zU1WAZ&BGSO_2;E1KQk>)pFDl~RA%ny$pIY1A&b_zyLJ4U#HyCLYMCPAo?4V5%D^E5 zhx{|@pHbS<-fD60JSCNoE2{FISLeQB+(T_nAFH2pfSSh{s|GnPu?6{)$G+uU(=U1~ z;aWt$Cqa6O5{>sg>oJ8?UMjCBwN*tiUvD@_QEKwstD@pilJY3Z+f`j_ecofgYKn3_ zpQ5y>t0<3HVaouP=kYhYqOcz243F~npFR$ZR+RO;vDDDQit>f=MjvMvwRj;j?=1s) zTGAm~mOt~cWuWW1`Dkv@H8*z%%^aDp%k$rOa+egBy@03vi>rP5}a8k=8DYp zna?Ez?tjaJoZIr&uWP0+_sdz^d)ulgX%UJtQPl#+tFJnW8mlr_Z5HqNVTHd6YkHJ; zyZ+4i;iNHuxs9U~rM3AYk3X>Q$TRkO2Oi?|9WUrURYddDSqcgPhmB>KN0(&T`(t#&n3Y zmv?<*I@CGeyPh#!-?`5_)R=DI%wMdDG2PJlYB6&?jhy3(1-a%21~}7-NqR^_V4(AI zu^L5P(?Pbr(Kbtoz+h+D;#G`!!N$B$XP4sDjCmoM^ClNJ-y3SoYvBB$c$KEE_0>1$ z`FMuZ4Xhhnuw_DgihV>%!Q!^gVQpGQ?9c<)-d1HNz?FJk92x{P5<6FyEpP7$F&Kthf{q;Bp zoB3!Y#S%)fw6(`3Ivpi_jrW*&YUJ!zs*y3z%u^%hhoy`>1siz^CPt-d8_OAaVqU@0 zHGTB$g&27X4)E~|ts7WS^Qvg^ao+a!cD*IU$R2MQTiQr=h><RvDw@Lyhc(ggUE~HNp^TWRH3M%LW+RFtQiYz`3Zb z5!O&6dm#;-Co<<5ISOgG*1Oz>P971FDKWz$QtaUo1CzD$B`hVJ+hX@RJ>yC_Yzf2c zNl7tL_GDX1wA~h&nv`UZPl+9AOSH#F#l#P`4UdRPk;g=PQcOZrT~E)H@Z^+;6kBpk zTzJ-M2c_7PY}LDWY}d_JGc_?KskSXOCO(AM5Oan?8|NRmu&+hlI>Bpgm^t2tUWb0A|)YdZn7;hA~t4V5~+`}MaCvX4rM)I zHrIO+2H65^tTNUrgjj`moTXaG)+e?T`Ol4!qw&;jq zvQmmVW|&=ST>@`RvL`3RQb(u@naOtyr68NWYzqvCORgdDsKsv6R85c3q7H=opDG9Ea4T?yX znw=8uh;UU*SAyJ9$flpMRkqeHo@7PIn-gq_NeRO!I*NcDjGC+`%`CL!+O`PUom>^( zmK>ds8XIMcPoSJf>OmwQF%EV?qmU))?0^z_<6R^;c_ayqBc92&sF=Zao=|pd*FG#J zAvKxE4z|bJ-F6~Zq1dwR&Pgee@!?Wc*s+J&s=Ibzy%N{9ITxk)Ibz~nl}a`(iIOy> z4m38MnW#7^;v{=yX3a7}Nu;u;*%eDk@<6*iJ}cb=UD?i+Im&>NdDgZgDV@ZG6xlZ- zV##cBWVAghm5LET>yZ)}ohp?fk}4E4i1LlJCnrZFjg&GPIBxRsIGu&%Sq=sAad)o9r=Ye4rOHxHs^!?jb zy=r9aP<@j4E*w_hd3{(7M?tckt6_CpFi?h=0Ri>ot$LoqX_>=mWfS6~YBcurELg9m zvDYNo2gm5Oms&@TZ8huhs;oUnIR^#Q<{d-h6XfmA62m7rJi8I()Oady>_{nO*^!d6 zs^9RK*jQUcYDz*JyDhCV0Z&Se*CUy|rKXLd*3tgC7d+JsEm2E`+gbZSyR?FtU7y}Uy7sBsZaa7sdosmnH2aAOyis3WJT0lMI;BsW zNqv_+#Gb^iNjpx9k`j{`>#&bVps9^WvfJz^9 zvag%15owK*f->8k2%2&!1?e9$OUT$^rO>2?q{c@KONfbz$TyG*GSKcE(AC#Lap?Vs z-bT<3#L2EB>q(K3L3BhS!w$V;(N`K7EB%(fTj@PZ5`Ca6BblukD-5IiOO1@yv*eZo zqaDp`3Mlp%M-t71E6-WIp-~p}VuPi}8y=Gqt=EyP_P~s~gwT~niCRSbNWGZZGo)uT zqD^NsD977ft%eMpN@b0m^tFdaCb-h)3>aOqu-SXouq|-zj&0(gV$jm)D~qxZil8+0 zSW&}`)<1KkVZKxPe|B3LEJ&`XhtX0i;#1=WQkNxR`kt(}8xpyAnlhu)moDAzHkxW| z^Nwd!CH->dLeB1Ssl{bLk(ut;tRdz96UnbHgWfLxt3hw}f+GMaaY_16tvD04B~}c3 z^$0(s9J2<8u9C>CY*{a46_$Gi9B5A&E}djd{ICSN9>zv8ilYGay(5xhPm%J?8Vxt5 z2Xhr)UVRxUJKvmTM~X8K3I4B7zFv0q{>!qPuyRz|%2AebK1CB~0O*J!674pc`eZF| zwxxL5omrO1h&%;2OQw0=wM^3H-?h|o_E!sOcRbX5ugLkMd<85eqf>(tl@In%uUwUqQ2>lTN! zI}a^ot$HSSt-Lu=(qpJKD{cDQs;6^)_j#$!l|${Yw^}fBZQ9{~Ed@(IBQmb!rrrJ5 z;%muV+T(QItSz~=9qu+KXVK-}_LRq#q8*;GHTO{DTubi2^h=VbbdI>{)f(&nJ4iy1=NWJqoMkpS4h~82wM_ z(OW0)Vp$nZyHi*#>eetkzd7_L%#+igzHQp>q(RrknMj{i}L^15w9yW^?;omrf#kdBq+*2L}Cb1@C1bu6=gNHq4S%H(i`jW1L`FyN+V3eLIfl$N)vpJjN2Jn zcqO%J_>tX5Hk;NlH~WoIM3Iuw6AKJeI=!kYU7(cki9QfIhGwBe`w>}l>00G^Xo0uP z1$iHHwDEyz8Es1=HNUp6k{V)Zv%a!gsz6uC>SIV2tHI3L1G4lGUy_}#(3Whp!%D0| zb+SvgeVY_TilW4%D#|d7##l_n z-*|+_P=-;h&>np;1Vb?sjt{u|1J_Y&I4>X;aae)%*o#BBfa|!0$0I0zJcTk+QSze{ zY^Z|4NXCQo>*){9r=Lte{L}Y8eV@MiWcq{j!|5kKTQq;B+#K^4y*+=?@S(#O4PWHz z6a96RCj~3_ZUsq!XU#WDS1D04dQ8-E#aF-2oJ*m9R{KI$wcxqD*Zp;QSLXBfnJbU! zmM3dAN!iuxN289_&#F@_oi#smB}Uz=uYFfZEmP7}@3PkAdR1FJSS_I)j8hA0mz$}6 zku`f!a#DIjF_2P<#3Tw~GRy)ngPS^vLd3$+it-`8#%dfzV`XkK{1%=j3l-^MIx_!{YWlS7j4@FPy*0PPaQmUVb^B;wr*F} zZRM_YpUG{}XY1bAUz}~+D{g<<)@Ew?U>Tcd)-7t27V~4a=Q%65v~v9;)Y=YetyFH; zj^S2oSx2@?pL3_6Rxk6vZ|r!PTh81w+;^b*tBi>5=kWk;!H8}%C5cd)A^eswP9$>g z!0RnV@kS5yMEnHmD9%h&l;1IUlA=UU(&h}LzAmXj-E`g6X3EQ7t)_X^QbV-G!+F|& z7Eh_eey%!va=YqT_66Wy)-S{iv|iVtu}pfnKjmW>E5ZjQo{0^5+;v=BDE`nUhRj#uG?J6EFqG zCsVKSJ09R4{ENS)C`$NLdR&~qMLdE!jSF`0I%1K84cLjRNJo?DiqdL2JIxa(b#|Vj zjKM_2&sUV;7=uZein-9R6uTF2X&R=yrzmr95myoT0avu(y@iVM35I^CC?hat5jzJG zKT?!?H!j>bb>!48Y+1Ku<*6g<988^J?q`{nGWG1+(i;?CzLL?>{z_l}u;|2WwS9os z;H>AgrBa%1odfTXIqtRgKxhpa9!Sw;&gPBsJo_7&ley!#nsrHp+cHYCXt#nIKp{w> z9KvBVC*W(cg08eC_)lk$o0)i*>VbG9AY>Np23pMKLMS}JQ-o1YE%I>pa6vz{w5AUb zT&=U-M9baCH?!TU#;P$3#)zreWk5HZ6E4RK^=#6*w-Neew7aQpch&nmT6IwV6yeUaZZDSL?gC5)zNYiL?kE@~dImnd(gG!>+OUctIb9J$EYn zO%|rD@z&0Ss-+x^xk~AKwH_2};no&K@s}Dt7LS+k(o#iffR`{4Q?LbRdu+#XoW>uxg+K8S#g-92ltqJOG?Y!52tx#-5sS%Ru&dz;Zs8uDK>d;i6@4)t z?_f37;Vdqq^H++}4RfJkCDz~&e#I@^L6PN(;_Kk9HUiKQJum{}umgLcu3)H#X_$>8 zc#wW2{S4%G@>u$QMtD2Yzfb>mhw<-$JU+;FlVwciD@s20ugdhpY_!pBYi77^DNJ%WkS)9WicoW$A7=Yx^czG2S z6ThL>*R+S|i$%DK>Z{p@u@d>#uYwN`g2qV+<}63-q6uSU@|RpjL4BNor0QDe^W_6=|ewosqh~jTE*tQaRU1=~*MS z0i;k;+y;_rJ4V6*$-&2vT-=7_q_&Zpgw^b)lBIQ!EVVYWC0SaH_3&NGPL0>m3)`>@ zmv9ZQeM{lsdu+!(9ER^Y-iL~)hNC!vee2m@aRwLrad$9{OU%(`6D1A5?>Ux52)ZC? zGgt0n7Y-sFccE-yw1UF$K^auU6ll1-MJv}`EmibNw%zrDR-wD<+v0*gQ|>QtK=%OV zjqX*tPPZ|qTaP}aa5u9f)=o83t2)jRf%8Zs#~V>_!?ati)*)`i@exXmTjELU{l7K9KHK#=gM%9 ze!Zt!#F5oSmU8W}Wn^{nNPrpt&Xj=}`>w=)2(ccD@x=5kbohZz5`FOtj-k$0POsoP z9wGxt+i1gZ4977o&vr%mOnbAfT2otGnf8Y}k{HDl$8U^>tI_A_2X^MuYVxY5Rxg=c zu&}2?;g7wjc~xUiat(U*p{#|M%3LOS*YOAn$tr{*@PR+fq@Q8>I?}c($|jh}9z`-I zVFR|}5PrrqN%?lhqN9K02{bIiR_wqL9K#7*#IPL{F_z#{+(HH(qrgtauULQ&(SH}O z?WXGNp=HEJIJuWS7M6YVQFslV_Bq%KndpIjh`=BW!4aH8?E^HK(6AU44ze@hb@V}h z3_>!-VlvELi?bWtT=%X=*Iwd!o&BS6`jHDqSU`4|te51udE1jt;B)*LU14G$wXAj^ zO)aMF8L9ec86{OO?Ph7UctU$3t1Lq@flD!h7*C)QuneDLm=UzEc>ET(@CY6RuLz1F z0L{@C64Y?C`HAiYW`IlVah=@SGM8%S6-Mpy(z`-AAY1t<6|ig=m9j${p57|M6g?w* zkUxFjrZ?46+Krj2SAu^DO6Ulsa}V|6xgR8LhvhkhFSc?h_qdd$!)u9P&{;Sm?%+QDMIO>p2+h$Nl9qR&;Q)#pr9u0d5kG1lrx(PU+{z_&}Qr+QuiVh#;Q3>799dF7T_O#up`GJ4~EoQB^qCBM@57-NiMTA)+yCpERBtMpwJS5s$c=hmr3wHrNEFGraV zSoAlPC-yZU1yTpgAq8^*l_-lC48baxWmCi`pQ%O}X>R3Imbjbc5zO=wEXB9P|L@G* zT#IW=lfGn$RM0SKs$XM2TJjQ4%PwbX@g}zy=%@7MIx#QD3+KXAn?u&T4pPcHu?w}% zkR!Z=NX*7YTtK1U88qW{Bw-;o;{uAERg`AvixF6a?f3&8=O{Jw!sE=GDAJ!6&Fq-^ zXw7@lOg!uETnveoYG@|XgT#2DGO8mO4bTFuF`5lIunwE>2X4c5p5YN95rZZ86qj)w z8L(Vn-S9yu8e-f9x}(WVe1UJV7sqiKkD*+ouS7d^zyiF7FYpzDZsC0=8 zAQ}@f2MbW~GMPhl48XL@4*EYPcH;m#USYh3WPAtp4|aYOLn-vXPUS;zI_C}09|?FQ zCtdE{IK}PWe|7V{cjKsjleg`?@koC&_uBW})k@mfK()MfI#uJi|8CT^j3?;`^(#X2xLh#M;MYI z0e%M(Ip6y|02vHnt{zr+SSg%^pG z3{-)ci6u;bf;4=Go!Eo@IEwisejytC$siAoiQG-WZtOz_9;3lsPA4E537C(C*oDIKGVQtlP8BU~+)}wkD_Q-A+Mfp0d>ES&nnszKnLWnp&WwKWoma9D}1S)<~f> z*H-sdLmj1NW>9U3+pD;Mi?IH~$O}Ue3n%8`I&Q%BFX_c(Ou>4jL3zY_Q4Fon8u5>* z6j*_kDEX8V^RCv2du>s?blO>;Q2Z~Z%F14)pPAIB^plt7nteIy)OUaO6m}H-l&n98 zYEolTun^y%5J~gG2Uvn?W8tXBerfCkY;{k$Hi_!oCF$jb44Q}EV{=z@_7mC%QL?Q}tNJ273VFNZ|J9Z;t z4|j**k;kHVqXhh6LuF*#$++`}aXWkGMAr0)gF6qN*nDCg4xU)~*-BpHNw#}=Uxqd{ zg4XI(1TEc=KJIwqfyHQ&fU*Zg?Q zQ}f{DnH-2wy%Oj;6BB5guhG3|S0Ys(*98Y>EpJde*s8w-Q!;P^H;qi}Clhl>v}DB0 zghFETp?*P&@)BBO96A&x)94F3reFr@dQdxHM*>nX7LMcGox(&I zK32jkA25~Cz8}oNaKi{jGkqc`h_*~Q+%Gr?lX}n_Bd`fqP{J+D%b31~dW9^?aE!o7 zoWdo-eHj5nG0=$QK_ilOqBIC2uo$~=8GkFiU%ja-!e?rvVOd+sWyx z16tTvwTc$dox{CRgVnCu>1JxB*p5;T;+VCTDaw)U)bbY+Il&Mq(b;;8#3CfImO!(n55*I0C9;fJ^Z@e2ytj43^J~IQ^X2x6r}ppCeV%$Oh*DyHFg5+R)(1aIUCtW~xh?srGjm=b-(h zx=AA6!v<`@cKnDEB+wr{(H}39s4xt|P|U;}T*3`ZC6Ti+*hY26YNX*jt|E#I6spK_ z71=>QWMX_Ja)*^zixc<_r*R&4a32|Xgr1dYAutN#FcY(phM(|i720s5U>L?>BEH8~ z?85<6tojNyu_|xIJej7)xPylnSc76fqnZ?5O|4^`TDIjM85c87 zWgNwAxNf)gd3*J_=Ddp;7k6G{nTvm1Ea>{MhU*+Sbht70f*d;dX`?!DmU&m4+9!ace=6wyG}|Is3wz3G=e*{@^tNQRRsy|-p9Iki-Y^3A zHGV~Y0$CUJ;R-4(P%WBm{EoBmt3x$JAcBy9MEKXWC}pqzd zGc#1pJwry;Qnhon)FOGMSG8)AScfF`B@)vfuOl2&F^+`1gCjJXaVShe zilZ_Du^&fq9+z+#14w8x&f+R61ki8by#V%$rA(~A3FHg3C`A#BCP>B<+{a^72(l^EK#AllkqhEQfMJ#H=jL%f2OOro8R3;l$+_IFx^b)eL6e%P-6>aJh zJ2Yy@ZjCPJirmKq9{MMG+DbW1d}tVru)gDDJx!O-H1%C$$rM^*o>r5m4Bx8JWvBO~ zN4edOj|8R0(IfCr@t5FrLImuHM-s;19ZbedoWVupC8Y&X9u*OSh6qC|#57=x1uHr5 zz)#qR!}uAu8&b{wV#1>l)eF^82kp@ro3RxgUZTlG9}L1gXjp@F*nxw11hp|k8HC{- zOu=%jMT;h^8*wi)bcSD3s#&?F)I*%bmS)sTeAb-fGV~9#DA8DhG}LXuO0cXYxxqQy zz+Rg1eQ=GJb=)N8p* ztrGbX01^zKC~Q(~RItgtety(W)JTYoxwPGlSx<(Pg1! zd&bfD9cSAUyUJ>48avhii$jY2FmB^7WA)~$ceCn~xQrXPhrjW#4F?ja#3n<~4sT!x z5-|pEV>Q0TIb6b3+(0#wSPS*qu>%D&5h-a#57Hcsv6zK@IE0^Z5}xG68_}`n4j?s51Y-=|h7$`QCuXi*y87GIpSkVj z(VW`LGr7r}-;C+)8}-}j?Q)Yh882}Pdb-+H`*bV^bkPxvOZ4Ms{o?uLKAa$D=ScQk z%|#`youieRN|h8=AQ2Vu9X6ptC!V86XPOfnzy(~yFI_m+#H(G21o~kdmSZpO;yxZA z?seKm{PwzD6Z|tTtY?%TL=!7#@zS2oP%RdR#9=vBp&6Tg6@A%U96rHvn8|p`bU~6( zjZM@;OSD0ML}4F}AeBu|z-%nQ25iC!oWc)1spy#Ao7yp>H#-e>y+KXDvOesZxP@AM z$pfy!uOH(W)IkuIVinHg0{%h<9#Y`e2#eBfApHaqkc!E0;%ltMUK|`q^V%ko7w{pz z!f9N>g#s%ZdwhUFT=mS|!iDw+xqR*BWrGGZ>WdYapdLCt(%J5o` zX66+}oF&yR&f%2Hu?}iEZNhXunLdz8=O?Ga9c?K*E4S>3qyp}*@go{k#Li214REcH%; zELWBwm&0N#L3@JP0rQC2d~76U-=VslVHZy04Dt-3Q$}<&oiFa-Pdr8S82VHs;|rWd zsUi9xwNo11KnsgQilQ9KV}r5!xvX|EzQGp!fZcH4P&big z4&jKz49vp@e2@J&oJeIo$HXNRdXt|)#VcrwC`4l#W??nfVHft{7Ve@%62mRjLIAp= z4^lA#Q!xWyVI}V4Z+IrtEG9dski!^^peE|Vj-mJzUtkCJp~-Mg&|oWe;2PxG*DI&4 z97Xy;PLllBx8m8};<}E`^-5EN8mk_p%=pO7QVnC{E1Sr zyaz+E45#oEL2)dPY4{e$@B~%jIgN*f_!Yhsp>fJB?frRbFN<89;3wyii#W>4i#_lj zKExty!uPm>r>H}b_Q!OjVIR(-AVt;^k(i3Fu@g5@oMNeoE|B7rqWcm*AnW{}R=Y7D zb-6By(9|#G`UyS}&aiNWZWw#jIZd@C$UC;81cmH_N@$4~%*KznhjJ;5elZgVP%xFo z3-fUSSMUe&QvUf+15x+{QZJ-VP#;|9rOeArK6G6l#aCDKF*s*K7OH^>Qf;KlG#bJ9 z6npRhbw_fV0PkQDCSwX7kD_D{HJXw~+8A~oyfoILG{zAe#XbCmvE%5xa1y7+S+qZU zFx2ZWpEB_YIv>%>lsrx2DV>xg8_7ChB<+cjyv8JF8NR~TSdUXUi<`KI zwj{L^KE^WKzyk~==_Bz2wqY|_*gHm!0hoAzFw))uAK*ind73~zX5kkc$5~v(E9|nZ z(GH!_7n3jzpWrhn;~BOfABv$TdZP~_Fl7A83^SS7gCn?#>Tgl|5Q1jtiGFw=i|{?R zVE0=Vt$cU2Y{+GOpzpe-O76wUUrrw4=6cM0{XzOq`k0@O5IN9c#J2QJb`(rKaoRnT*75Ee}@?3 zeSGi^Rdt=5LVb;jz%ImCmh33|aU5507a3^4Ive8+^u=gQf^2g(7UBkO;UWG-UbbHn zz0n6F@eU+yv#sDb){ zUu;)OQMqemR5G#>LrCXA(su|gNJ~pBCOu1VZYD!IJb9O<89}oM0#@N`w3|(kFn$jG z2z=+#f1sSxqHSxzS#7-)kxOvWmV7EzMl!kvdvO@2aRHt)=#(%Q^RO11ag~i{z?+Tx zq7rIg24-UgRznh3i3C+gZ3Lnx`eFd=7&MD26U&68z6hx`)a zQ?LLV@dNUZ!vZJ{A4rbNqb|CjCw5^kj^PBVIxR}5lPc4TiT>Dui&!>~_5{=C(*olD z0{Rk7s)KwhMoLmj)to-IKl7%~KeIb?0aTRh z2S9wMXCr zj#Z@iYs`F)BW29O1CI&R?2MeajhlFcXDWz=`wuEo`_ z%B8>N1H3QQtquv35B#wnyWzgkJ*;#;PT_ZqWCK&M03Tu#wqiH-;~Xxb@<$w@qBo{v zK0d`x?7=ns{SlR^*kTGFo$xw_VH74}J~m(rZs0abETNHu4MBJfT`&#rViSJA9-PKg zhM*fdE&xuep!ciPYI{rl4PdG}zql`!Q9&st3 z>kF2rZXDsZN55TQiZ5sCbDs0)ufNf?h`f;-g~mbE=VL}{i5-lV$6?k!J~c2apK(g6 zlazQ8j$tN2eHZ6(0b_{QSd=0LrLhg$;mSLOL-OCiUEIU!Pia}iXSB~azl`;uXtyue zi}5pl`GU)V=JL6L*Csyu&AK9%3)Q%gb`vK(^-F)xE$4%3iBANQu>c=pC-xv6cOV=8 z2L;*iU<}0wjK)Mv#&WEJnZON9??H1C`3kzA8~R}YCSvku)Go=%M(jpaa#9lw@Df_0 zHHIJ#*YFSC{G1AbxA6|%#T=}|2J|HeL$MT}VHMWk0FK}{+(hFqsa?&!q^96C48~9_ z!U`P6DU|$*;z1>ZU^qr24d3A=9)Sb#KQeyjcIw!v!^SP!^lklBS;93{A#2i{Wj?wV zxUGHki5lVPN|`G2Ny!5Ev!yb)gv*$_oU;j-u!6Xv&PrNo_^o1D)c%@z7{8kS3mv}U zg9!BdmPQ&EaS1Ed(O2Rw?qT$L^ROcAy?JWyY==0K+xl$tCA36aOvP+`fE9QOE9voo zH@c!Xreh(F;Uq5L4^$@2wGfIn_y8Z_bF9EM+(E@v6k7u__C6M28J6QJZlgSzt%VT0 zhF#c;pYbbv2tpaWh8~!L_hDN@B2Wi`Xo{AYfOoMIM__Rfy`pG>_UMji6j)1c;e*ns zidqbQM+!c|$GCwKAOaY-KIw2XuBe{#+LW(eVJL5xK!F@bL=^xpz&YL_7nXRPC%QzmTqJ9T1F)wtmPDL+JZB!HE86}zF3`w4?s4># zJ#Y*j;W4%n*KO!X96O;rVY1;2eiwx0EW!!H08}EIl`#+VaUD0%m|Ql&PuLC1Hb(Ba zf~*q>X&=s0>$2AyU&5#+ABNZxWSKv3*@#T79keib4a2bpw^4g1T^B}UGcKSsk$4#r zsdJFXJU~UF(i4-h1rn`6yD5d;mNY(+$!l@@2weY6kHL0`#@B}f9TL!M@L}<)Fy5xb z?xTufEZ#!t{fwJYA5HKV{)XiMqYuPj1m4FYY{mgx!A(>;NY%xw=!jm3IYv)KQM`_9M66_d+IE;aexXmx-$rn~#Us-j1c~zzlnlrOMUf*+lhki?! zse`x%qMOOAh8OB84feM;Xku4AnQCc*D04#hs(!& z`ba2M^J>Yt9|lGd4v9+_7VZc0jf0sUfd;>@rkp!G@iBJ_d- z6R-f+u;&zAAkN@-e0he}7~RejVR&5N@bp4^_7f(;@#x{B+mEhuFE^PIa(k%X-#&l) zSDAC3iLBe<+mCMV+P-V!+Fi?bDO%z-z7eON%GuLQuC^bn7IS2~xW@ewPjdjxcchJh zbC{Hx8C)}{(F8CCzu`366Sxjok2JhSYzU+tw?#Z2g2b&dF{pCVQbu`&iB^zcU4;ba z32L6=61CwR%CB2 zCRX7HJWdmDe2h=<1!ZBD$@fgJI!kAVN9U-y=PcUzjcOKDJ z-zCYjM=k9LB3jBfxQvqIt}S}N%<&|qXJ7#~AsrRSaW}k$>6ioeh@9iOM52`|_j=}! z7y-D=V-Oj1_1Eqr^o={!BC%!VwQ#rxCYb=FxCP@~rekh%fQ1SKMuO86{V*0EVKI*5 zB>sjk!E1nqh{UWvISclx+9GY`UiEd0gQv>JNW)Lq1BrJF;w>@nfu3mh7p*ADJ)j#y zSO&{r-M{S2*n_>e@Q8|m4P?obT%+kHj}M?fU%@N{(r(pcQ0k$ zj^90g_tD)wcekZgIH>wqw7+(%L$yuY)FNIBhIuPKl4#>?+ITs8DBrnF3pl8LXGyz# zNFApZuCFxY|Hx}3H|_8(HObR$2DASBSxwZJ3E@AYZfNf0hUQFeXwKw@=1gv=o?NfYeO(Eqk>E78wO3Nf@l(Co0jbt&mbD z^Sh^MOY&I9c@`K)lURiJYgLO`+dDo_=F6EQ`BLO~*ALvu&kf11q{z=e$k*59>#Fiq zJo)Zao?>*nWjFw-$+#?(GX}5F19o&>;UXWg%BO4cafzJe*3Z>`P#g6Sj0SiKP0$Q2 z(Hd>h30=?)z0n8#5RM4Cc_osGC=5mn;_#+6xwy4~Lw>+iehpH7B2Rt;PJX6Ke#}aK zK1zP!M}8(pex*cy_(OgJLVj;SeyO0>IoFrG<(tj&)l~UPr+nouumEEVGALgsk?(ED z7aQbCbh&t2F1?k@9p!37x%iN44INx$=(;peuKtn>d*mV-xl}@~PS8L9C;alks(f4~ zpMc1DYB|j)CmrMo>$QBm9W-p6vSi*s0d1ZkYt5aDvj|nnxZ+v&;nEzr4`y}WhYquyLV=?E69w} z9X-$!z3>M5qCW;;AnX`~XpnJbC}I&0f}kWKDQ&rr^`WX)@#3EIo~ErXWsSFJ=So@g zY32N^e>&Vd--e2(60USp5|l_f+xpxxdun#X+^?BkVZaMs3w*(AK?=W5lyigTN`qhU z#*i1h7W#tM>gReb+!$6c(rA$D#jMw!6&W+3&yP$XqnFq+)>?XyU5f`Q&ksnTLO`Aq z55@{FcGNJB~ab&jUuH1)ag(g>I7~H*J zgU^pl=nEp#GxWKkkz72#p$UCKWEdN2mnv8T9lc`t!G>WjO(wX0yz_0KyGRfHL`T@E-hxj=JKJhO1Grw@~3*)b|jGC@6n&S6gQ1h!?3A-9P%^H+fQ%5nc>#Yl{kOHaBD?%OJ`+Ubv|8T+;zMbr?nYtEu+7Gm%jci z{riEoj7BYaV2+1zD>{*8U-D2gurJvbK<{?ILT($=VsRc8074khQJVEe`#Ah?B|M1F{xI z)^?G#&t(|XijxLp?Iu~Xkgb}qGB){~tc@XSr^(uPWNijn>rU2oyRydcn_pS+VnkYk z^0}{PSF!TLUBY~=IIsDjU|Fqb73-=(OPca?a1Ui!tzK2@Ky7DLtC#m!eFhs<^5}22 zrCq6N9d2RO+5ghwsx#j4ZdIs}bWobNBMg_r1NnlpFruik#cJ*13iv-!+h5 zG?kywkl!YsQ8-pfDHN~BulASio2x9Cl65Oa3&h=}|eRB4HeHK@uvDBsQ({o7UAHvhI$g5h3iPj}~R>`~#6Rl;suV*R^ z8?X`IVH0H8%{*?w51?^VIC@mJ<44HrJ9*p%4(gTN*n_>;r~Nt6>g#iaN!mkwQCXaJ zQF$^aV)=qe*4TpQ26%iWng2ygokSp0wEH~ddxu5Z$7-V{GwX`>&t&T;%lBI96rSzS z?oY8MwDGbR(%&|ISYiE)ltW4RWodV(TBGt-ekC*{q<(0FfI2N-Y1N=kNW<1aby~dAqGg>{Edl}> T1q8j)x_)4A+NfF9N$URxb&aPE delta 29273 zcmcKD2V4|a8}RMfSqmsA7DOq6v5SSKfW7zL5@YYZ_kwGS(WtSkI?-6L#@`YH5O3wXfQ^7|M%_=EGRzD`+mQV^_rcXnVmW3K4;FHnVnrH+|D*(b+$QvC65>s z>z|9F9A{i!e*E<5lT19wi$nMwhY{(&&bZnzPxTBupI}h3DP8(>=`(7_s2$c7*bf~u zmrsR3A3cEwC#OgdTvKb zfAUt8LaL(Z^SgFcloGt&Qa~{&Loz6ePZ8@{(|dl;{HK`|CBBrR{L2dOm0@|FGqEDp zqd4#^fB)W_zl~qyJ+1}`CZl>nSqUZgdA20KmWXtlWKi~7SzSf`hEn263 z9zs1w`h0nPix*y`I1kV90oFNbpJn}?iV~PyCi;r>^y#l91onUN0TgYy(t7$npPjP3 z9!5p!&{0tasoF$?THiFLi*=s9YQ1=Wcc|Mki|<9^ZT-`ye;|!#nR>3XqSVx9^8A|h zNHqy0X%a3eZu!f}^jt*K9RTfh%5i3$&pPTksPkq|yDX-0*nU@9VX-=7(dZV9D zXXBOAO{46Nm7NqN&NHH+L%bnFsTSP_MU>6*BxB~F;6T6NaslQ-E=6tQf#&8eC2iwD z<{w;2+s4b9_qi0ejh8b&btz>V4>sq|5oj9^F@Kk%oNYYRydXyz>v*uA`Am+owsC)R z=A0F6;{oRSIc@6+4m6L?8DN_pWRA>fOHXiFb9~O?*)8b|F6Yy(myaR0e}LI5S7F<< zVB54nbF*AUZPP+*(}K)D=F;CAYMWNpd?;7p%9izo_}QkFb9D|X?O!_3vBtmw!@7?e z=9tT;X|+1Fn%AoB)3N2iu0iFji~H+~o4?6j*1D;XfaGaCau=~p3$#rOHvgL2mduc# z8|B$@71#yYCh+xZ&%M^sM(mOly$jK zTNVTS%vJK((h_RR2h)b;DQH{HmX82`^O`)xY}0J{2naCW&Qr)X&6W?QWp}fsFVvQg zfU@RVZnkiT+Va7)FgJa>wtNJXGjDhEwXM&VkAM*KGq++MmN zJyW6P?Vi>O7UFNqR8WBVg{QUfL;P)-3JNk8_Ofj^z?Lbdb@1}DZX>{!si1P^rCzpR z2G}wc6l}hhJk6Gmpx}rc`S&!57%8Fl0L(Fbsr!D zSyR`}1AIF78#*v$l|H>K8!PSOW8JDx@1YS<6BetJJI{#hJj3AOnoHg~kiih2VVwtc z_o>#j-q6UI;fCRzwWSx3i5Wi)G4T9SzR#yvi_9tcNxR*&Md<4&3AffSsVe_diC(2Yo>HRORWDmGbJ&;YF~^jJ ztWm9FHzjSl{%eWm_%iH zbhWFHPHVHJY}J~M;&xNgrXy{Ux;y>X@UXS7Rd`o5U-Dj;$H^^+U26RD*#)ea7oE@l zH9X#Sb%!Ry{lC?nm-*DcQoqGVbMmkYlUABnU#jEouD#TNTefQ2+C%mJ(n6ob=)a}M z(mLhHnUdkimmX?%yN2Ntr!smltfe-|Zc5rxOS@|obNZ|t|ECmXwA+UE(o21pT&J7{ zxus3eUz|zmNlcrd&jOa8dDRlpD34Q09vTg09xB5O!;TG$bFJ*zWiTf;CIjsk& z$9YrUV~M#olcxPkiT*r$auE31UQj^wwkzH5JkoBdVL|)KIDl7Q+V1~bua+Hgf#hD#&3Sb})GK!=fv(ixNRGxCj6l)|I5{DG>kR%^PN)8koEV$yw^M4sif zbxNYPBT#K*XuUf~&Fj#-8M$kY4CE>!R$(<9$c-Z=V+!sdR>sNQ572Y=2f2$PX9qEu zoD3PF8GO~^TA51fY;8~_wXxR0Uv<{JeAS}b&!ZVz5~4br+pC$i#I9;aQ!hh@DZLEY zh*l9LYwhfc^Bo+t56gTG9ikXpm_cts*9Tj%4f#haiZ^CM8AF+l(fkjoo?7@+H7I#d zO{-d%998_095pJeR4TG(a;f zz)I}LVPqyzxe<&?n1T6dG+9wv;>={OC0=4AYKo%#fit**>$rzUcnP>3d0xWP!Uy88=cV${V)~O+DANMlijqumT74pGUp=8Urfq*k-@o} zNynKI#d~)}C26zUs(G~bV=1+jX4OM$REMG)Q$x+E#ST^rnCj?JTtFlnk5iN;g1&lF zMByMNk7oyfI7QZs7X>ZuN1XO9hRjx!q4))h zFrF$gajueGnM9SCjKerGH@Tf~*Sva?>Q5^fY_7<_<=b?|8ig?aVtieXa;4(fT!y=-E04&EU+`}XM3uQ5lCWkoN<%H!e_B~zYWy<29`wgu8C?QN@O(`MJ8vV1>QbsI5KDL^v=UYcsf6>^Qtj0c zwW%D>Xj&p@C=ZV3$X2x(FT;KyO<_&h^WwUzj#^A4jY`~hHM?q1v0SWoZj$FjjF^Is@m7$=vNfta0J?I+=d_iJf2;&e2 zJ??dg`C%MEA>v%vF6M6-KSHdJLgE{RgLs3tI8JQ;z&nwgE=8-{jD&^nX5S$>9DaLp z#A;NAu}1CfrqKvAtw0*~PNJ3G)}TBaj(1yObE zacf0>6#FRQCiV70LM*Hgd96QACH&6#DIT#r$0kIqj?j20TINl{0l&O&*Rzw4|MSFCB3A3>g66h^hxSInC^q_Yo z;64Pg0K5sJ9!x*ROAtd}_>pISR6nR0da5<#Kw&9+DoO-n3Hzj{Q&{Rv1Or}j@aRh& zi7}}T+OzKLf{xZkG>7fnu9KYKc~obWy)X;Au^$I;6SwdWya;g()Wkpx!Zk<{co2FC z|8H1lD~1jf#YC(_H2y{TBblijl+9#q@ONqnEv}@xM4LUIq`Le@{dMsrsr0DZYR|i} zKRlkRI-3;MJehasr8}qf||Ni|;TL>+l4=$7rgt5r3itm0L{Db%K z|AQ_w_FykQz_xd4i+8Kjw75;`aO>__b)fYCoRv;}hJ}lb8Kl3Ld{pw3HOm?m){qki zaTZlhC`vnAJIM}*9;fI~U;u_-6;@+C45vA=p+6?#Bh)h-Fi;-9UDg@_*ORU% z{lRJ7jtxnrq8N`#+JLp!mnO|mTADPI$I>(p*=eR_ZK;J1W~Vt)Ty@nnxs@-k9ug+KM%5E^T&aHNT1enZA?~Eajs|w7o5=*KLt~WEb6jc2S(f zcp_fHNH8)$BB%#%4C7-l9XoIr4$;&Nw29W{?k3%O+nlELrA0nU#Z8%;(n?!;M|#`b zSbA=mO$Jr(;*bLS%F~8TO~OhX##1<*BMIn?pRo}q@EkcP%W`OoiTDj0uo0&q5E*22E4Niwr)pB5Q2YImEd1^P(&OW99BH2y}Di&R9! z<8L@#q7^_X3_&FJz~u_*L^pKDY|O!79KmHcUZp;w*;UODO_DapNs`*rAn0S9xydw3 zHiYT~>_T$O{h?z(YKR)_J1p zQ}j`SJmgRqYdJ{1wH+$oCPd$Sn{bTj2NHHB?9`?YR?Epg>n7c~9NN;}l)~LcYJmZI zy4#cH?bv~{h=zKN{wtbe7}nt-+^^GUV-ZfnKt6rZ49($vo5LUUyqn4MOT46!bvd6guUz4D>1(;P@`|^+5|uoVi0RRCA`;GU zM+JP3(f9?2@BmrkXbABgreYf+uoD+?32ygjDDPR@32U{rb>E6wuLxN#3;kaWU_9L`eEWzxhue>-j|65B{0T+?nxh5w zVL$HU0YV8 z-5wMlmil1jA=?;;LAZ)*$Vl8Xp$VjbI)g&8lu^`kY8c+*Uv#&X$#Pq{+)kt?j{^B# zavFGZFzvwq0 z2i%Ys-e`+X=!!lVh@lvd^8aB&SObqYBnR))Vli4!dX}uW6rzUxN z4a+h>n=@E-&&2r5to^h>HPn(?{gG-_nLwnq2_x0g{gjH52NNXyYawYbPkQzAH(-1j zmg5k9#|M=Eo4Sv6IDstWsVure&*K}$L*5Z5{P9k%SeKJewmA2#z@X}A2Au0mlG7YM z*{aLHawfzHPgd>Ba$Y8caT)LSXw}tZpfb7t!Fo=h7-1@o!1we4VEUJ|Aj?Nib&wxB zu?r`V0M}0}i-A~y)mVe0IEHr!Pz*{}tUwe#SQ`k2+LDrL2RTL+m9zD>+Rv+G&zz+8 z(&Bn@c4oPtN5k%EQms3nhm>R(Nt=kBIEbS-i^3$eI4*Ht$;xo0m;J2Pqe9$k57=i1Vi%EACjkaIF6Hu#s$CB{7?hf*j~JZCRouo~{EJK(4T=+9;1w*N zuF5B@@|o2A*rY3rM)UC#53TBS)y?yMY$6k*^{G+JKQ3>Q0@8k(s5aO3HR7`hcNNuoWu)q+8CYhp+c{}+6fxwsT)!p5DHo?r z`J|k3keWP@6n+CqWot-EmqJo|8dsK3y zF2UqT12CD9xiDi(7K0LjXk13_tOmsct?)gjVHWlv3RiI(kMJ0ooeYXIav&$Vq7T+$ zGiEv)lsVXieYo9%K^y|JktIam(Q5R=oO+{3?6a~YH( zuvBfW$#fc!^aj~vr+A$7An7Kb$SNt@yh zh?95#g@6@B1DGJelt4-lC4dq<37iBg7s1GlCTQwp(3;&+%UVC4a~UqpcNw{8Rff7X zTRyV0e1e}cXyitHi{o-87hF<4^~j(6F`FxGIxAI1V=J-{;OPj*eLR3SLH0pCEX6t8 z!+q?q1^NWf3PCQ7#%K)*{`b%efc9Ab)7szS7K`)pS+8{9z5Kr5-19%yauHdN@F{A;5a^_Xg+d>F#LdNnHWsZXVB*Qs>Q5V z<1CjlET0BjF3IUv=)B1T5P~6MB_H@LJ!6wg0K!paU9XOgsiT#AaI8#2D=(e z%5X;R;sHt%wLn|s^r#gkLM0)QtAg6lBRI_(DCJk4S7HmUBifx71U??T4}Clh$^=Zs z92`e9zW1UD>f&WkhGGId^3!U=4}l26RvbZTZ(0GoO^COgp35nEJRhpOO}N1|Z2j8x zTlt{n>^+%tOoqoS%gC7i*dHOIQg7+B8I!NU&8(q17R|`@Y>LA%`HCBh*jG! zmvO(TnM}!N#Rh7eOkag=Ny!$Pv9VH(+)j&Q8NP8`E!JjN5OAR8;O9y{<( zuEG?NE4;Q3<1HW6@fqT5?b8729XF;qYcU;F7xVdy0a{*-&n@(;y!NeKUJ1`~I2EU5 z!V;{-2JFHEJc3ffpkzQUc%cEBVHW1#GOpntsJ;b`mkTZ<+VnknKJ%9^ffH=x->oNWGfv15-Xi zp(xejk=YijuMM?SUKo)8WxSjS;WGfx8%n+D28_E1Xshg2&X!5$QP;1zolGdOI zEHP=u^B}T17>;B$3kG2@){xb;cG(Res|CqcAuPgTY(g~tK|Zop4K+~<2M~o4WHaTc z%fT}F9!XnAQ0|fBdb{-#=tw$)27^i7cfX4922hLlTHT z@f6z{(eJ`(oWV;JYfN_n>u~^|P_9X;L~2Xr)@S+#jiqlu%PjX)@Zs`Hla$YY;98#= ziZ}QONr)t(1RD)TZ}h=9OvFokfFB9)M-9|L?WPV@U>M~>)8vP$KOrzt~uC= zt4M;+_q1I25nFH`eaDF1I7aZB>0tQgJ-sjWP&C8?=WR z8X^#h8;HdxWMFHqa7RT{Ls#^|7?`jE8z3n>0ZHX8yu-h!*@fMyE=g{OPH5eot|&$$ z3_Gz8f8lTB>cIvv1GAC6C({su8kmMTxQ4sP+lxjXwa^G*_z7?D4j+55l8n76bd*K~ zRELITc!f78--nf8A5J1$U-nHn^`l`x4b;LMEWw|6h64RrAO45SJ96D8Q&u^8SjB4L zSE%9AIW~PK<(eH`(G5c|4|{MIIlHnQ^v5#XhG#eG5EkMg{0U%LltW9j!YD-GH6*wa zSPAOqyIwxqu4lRF&UTXz_qIh)nPRr9{sZM*QVdcA5`GE2gk3^@372sNS8)vuiDDx} zU?(yWt<0E?pWs8(^yt-K{4AnTYyhP-z@Q!Op|+EoR&9qc{c4T7HvT$QyJ`fjfdu{{ zVo{K{6-H?U!;A&^13s74jZrydy!`lS%nXRF%qLOc@S0m zXGYdy6Q1BXvJU1505^D`Fv_AD>R>83*o*ooaZfP;7s!wA|3w8FO-f?*hgaX5rCC^U+5c{CVBm1)LEdvwJw zqd9osB+lXO7*0jT(%`@wzVJsobizG6fI5y1BRGtNq7!;x2u9%+?&3W@q49X;VdQwK z%veSyU@bOcD|TWR_TvNEvs{0KgR*?g*Mee`ZeF>0-tu6IypH8r<~_Cytz-66W&Gr! z%tHcENkkcP;)g~gP;xX8 z2O$amh+jzTBE(Ff4}cyMIrgLfBuiuBVZ8%hT93%^eNq=BciT~rg^QvX0uTf!IKhlig>NFC9zmaz)I|z!S6i(tA;_(bwX3@aI4IU_iAPmJMtV1jw&m!k<8FBlW z1yKPF&<%aC0&8#*XK@vek!7~!Qm0(Gl=Ju-mg|@IF5f$U?+g!lzHEEQnB_WX%5p~& zfQv-<5-JkeO89_}xJ`ubAT~{;lZf{IpV;LQID;snC0ZerNSDX& zNP8$Pss3!eIM3OPvvtgJrL*cG4q5Vs!ze`L?;;L?L^TNK5Q9EMxG!?ep+AT%*ouww zIMSe|nIkXM`E*>x0(xr*T*!K`R->+=@GsOq*d+? z>gSuuQ)I6F5;qIzZSDD+FO%C{#$#;@K7Oe!JFXVimPhf)&AH-gChIpdWlyqRI?@$5fm`G#nSRQ=&2&V;W{+0T$yH?&CQ=qS>$PaOi-pSdERigBK{egu^6)5NcvD z0%71}x#N!Ilxe5u& z1oR_w{V|@*m~fEN6Wd0e3 zgi`=8VKU}m0j?kpZfvI@TA(w2#xnedefWMQC5@iwi%|$e6i(t3vXaIMsD}}ly2`}K zDS%~|SdLY=f(OXAmi`A?qX%|jFWi5l)X@puunzm+ zxQ?dJ#9%aj#9t$XZWVepfl<>+@54#X`}$pp)vVsf}Es37s9augGufXlp#ref=&BlJ9c0= zn;wBjc#Mjhh%;v4XXM*VoX}z$$4P{2*DH+W(#eudYH36AwF+((h~_XO+1Y>u{Db_g z=Nr^SQw+l>EWipJ$0;0SJ2803HeMrS2jN3K^hH0+g&BKs2p2GC7wf@FoZ7`9*7&%Y7$z%@kB)Da}v>6Ge%3SOiT8mf{qQJ83sD z9-9!2>;yIleK8vn{Qv?j!LEy8SdSZUA;3}qda&&}E!=~sg_l&{T5lrcPC{q71`wph z#HjuzDVX!$8w5280k+v%*m$-N>#WE7xuoL$Xk3^`fvM_wn3T-eR6Cj)X35!sY zP5Pk{s-hPLLr>0F#%CggBvnK`w8iAZROp$E=;=Mg_&Ka1&0C?Te=p;wG5sieFMh@n zoWLb~#3y7rM%_bQ)I(Er#SvUV+2i!d&;ebr_PB`(%!tb$9GS5UYp@+xa03Z=3*!lP zb@(F?t@m&1taYZe! zU2e-aS(0yp>*hdQ7qr7bS)xF&Yz_ zCg-`ava#wBNjo#i(36+S8c68$ut~^v*uv&V$Ruo%qf+4RbIt$|lfY2|FJdi^3ny$(dr!z?*NKf-n*5l3Vm7ZQofsn>7daqWztpJB zh*!I0wcd)=L2k#=KHlbbtAlsA-|$2VsfEGBOXWXs zB#8Oo0SWAU=)qmj_^%IW`7tP-Lk!Hsay~K>J3WS;j0X|JvPeK8^1Ps-KxLdGu6#!z z60`XAZEAzlq&7HhYJ)#dZE)Ju2B%GJaGKNxE1#t{qq)lx|+SG=mNo`2l)P{VX z+K{xV4N05YkTj_c`8>6upQko7ZE8c)rZzN9YD3eeHuUqHaFErB#E+IC0d>&Qb6#=eHgRauN34Te&YMknKPm&k{n#&OOL{KpjX({bmt zUL}kTP4Y8w@^em~R`6w;oz~wFknb4Fw=CsLX7Xhn`Fexg%pB}(y%SPykdoU@37L-P!1=2qM^vW=6tWma#Fx|b+o^i8l4SB&EMDP zCimMbS>XaVcpyItpfHM~B+4KFWf6)BsDzrRg9d1frf7~&!*oZd-iTzY)c4U;aB98={0 zCx;g)1?jHvO)Etu_l_V3C6IPE+@|LZzO{Y)J7J+kxRB7LoI=-Tzk&E*|8LsP|tL;n#Jrt=k6 zoNGTNxG++Ri!U7OJvqi_tHM=OdM3?Kl7qS2@v&zs^XPTF=W^eP%bYUb;B@!_y^lu* z?foL-2dzd$qo3&*SzAihgqf_}Bx?y|?HySwLe`d&HTgxu^<=FUS^JBubtY@2$y!^o z_K>ViAZxkET3fOdTopS!+Vp{K#4bveuMrjYe~__Li)jBWtC}n$vNv9g(%)$eOz~YjmOLHagMk z$<6N;J+x$(-)P8eq0h_3JXbh+X@e^nx0ota;kT*qClcXRm7nxLtNL_<@I5-ACwk*d z1AY<#5Ag^+zvUMf5z|dkE@MP@e)mSU&VsysS&oiAMPNx!2GSMC=hx*b8(V2r zDjS2e8I_GLIbxC*$zWN`J91NH<4A*csj@L=h7Nf;SJk2`8}mjst4cpBGOC*K&)i!0 z_r^?GbQfc&wydVHLS+7LjW=DiYxRtkG?%tUCz)7B2K^%k^flH~4c?J^`Wyc;X!{2m z#~Zx)@3^WNKYjAnrVKK+=bNwc)4}|h6C4l^`Q!Vdid;i_=8;3;e_>JnY+77t(7Fm= zzHO<<4`J||4GO>Epm?<#%2$hqDek6Wihsp0MZV(qSHl&G+#MsI>#xcdrPOV7R_R|h zT3KE7rovbLxC=C%FYU@12)~T@$Y3Nw+J(VJSM~g($R~r1^VGnStLp#w;-{0D>M6!d z{Dwnaep6u?7c4DjFGR^@C||KlJ#}d0=3$&?WxgKIU3c;F-~Lt7nvO7LF*roF9bxQl zaGb`*SeN3hof>I0X#oq>S-Zy=`x(^WKyAQSW6|mtUwot{D^;Q75BPqOa>`&CA2Flw zM9=fq@!vN!lK&u%t&sk1o0D8C8f{qklQAx*Bvaa_event_time); + ++migrated; + uptr = q; + q = q->a_next; + uptr->a_next = NULL; /* hygiene */ + if (uptr->a_activate_call != &sim_activate_notbefore) { + a_event_time = uptr->a_event_time-((sim_asynch_inst_latency+1)/2); + if (a_event_time < 0) + a_event_time = 0; + } + else + a_event_time = uptr->a_event_time; + uptr->a_activate_call (uptr, a_event_time); + if (uptr->a_check_completion) { + sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Calling Completion Check for asynch event on %s\n", sim_uname(uptr)); + uptr->a_check_completion (uptr); + } + } + } +return migrated; +} + +void sim_aio_activate (ACTIVATE_API caller, UNIT *uptr, int32 event_time) +{ +sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch event for %s after %d instructions\n", sim_uname(uptr), event_time); +if (uptr->a_next) { + uptr->a_activate_call = sim_activate_abs; + } +else { + UNIT *q; + uptr->a_event_time = event_time; + uptr->a_activate_call = caller; + do { + q = AIO_QUEUE_VAL; + uptr->a_next = q; /* Mark as on list */ + } while (q != AIO_QUEUE_SET(uptr, q)); + } +sim_asynch_check = 0; /* try to force check */ +if (sim_idle_wait) { + sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(uptr), event_time); + pthread_cond_signal (&sim_asynch_wake); + } +} #else t_bool sim_asynch_enabled = FALSE; #endif diff --git a/scp.h b/scp.h index d20239df..9f7d8161 100644 --- a/scp.h +++ b/scp.h @@ -281,6 +281,10 @@ extern t_addr sim_brk_match_addr; extern BRKTYPTAB *sim_brk_type_desc; /* type descriptions */ extern FILE *stdnul; extern t_bool sim_asynch_enabled; +#if defined(SIM_ASYNCH_IO) +int sim_aio_update_queue (void); +void sim_aio_activate (ACTIVATE_API caller, UNIT *uptr, int32 event_time); +#endif /* VM interface */ diff --git a/sim_defs.h b/sim_defs.h index f8d0f323..4c804c56 100644 --- a/sim_defs.h +++ b/sim_defs.h @@ -541,10 +541,10 @@ struct UNIT { void *up7; /* device specific */ void *up8; /* device specific */ void *tmxr; /* TMXR linkage */ + void (*cancel)(UNIT *); #ifdef SIM_ASYNCH_IO void (*a_check_completion)(UNIT *); t_bool (*a_is_active)(UNIT *); - void (*a_cancel)(UNIT *); UNIT *a_next; /* next asynch active */ int32 a_event_time; ACTIVATE_API a_activate_call; @@ -931,6 +931,8 @@ struct FILEREF { #if defined (SIM_ASYNCH_IO) #include +#define SIM_ASYNCH_CLOCKS 1 + extern pthread_mutex_t sim_asynch_lock; extern pthread_cond_t sim_asynch_wake; extern pthread_mutex_t sim_timer_lock; @@ -941,8 +943,6 @@ extern pthread_cond_t sim_tmxr_poll_cond; extern pthread_mutex_t sim_tmxr_poll_lock; extern pthread_t sim_asynch_main_threadid; extern UNIT * volatile sim_asynch_queue; -extern UNIT * volatile sim_wallclock_queue; -extern UNIT * volatile sim_wallclock_entry; extern volatile t_bool sim_idle_wait; extern int32 sim_asynch_check; extern int32 sim_asynch_latency; @@ -958,101 +958,35 @@ extern int32 sim_asynch_inst_latency; /* It is primarily used only used in debugging messages */ #define AIO_TLS #endif -#define AIO_QUEUE_CHECK(que, lock) \ - if (1) { \ - UNIT *_cptr; \ - if (lock) \ - pthread_mutex_lock (lock); \ - for (_cptr = que; \ - (_cptr != QUEUE_LIST_END); \ - _cptr = _cptr->next) \ - if (!_cptr->next) { \ - if (sim_deb) { \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Queue Corruption detected\n");\ - fclose(sim_deb); \ - } \ - sim_printf("Queue Corruption detected\n"); \ - abort(); \ - } \ - if (lock) \ - pthread_mutex_unlock (lock); \ - } else (void)0 +#define AIO_QUEUE_CHECK(que, lock) \ + do { \ + UNIT *_cptr; \ + if (lock) \ + pthread_mutex_lock (lock); \ + for (_cptr = que; \ + (_cptr != QUEUE_LIST_END); \ + _cptr = _cptr->next) \ + if (!_cptr->next) { \ + if (sim_deb) { \ + sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Queue Corruption detected\n");\ + fclose(sim_deb); \ + } \ + sim_printf("Queue Corruption detected\n"); \ + abort(); \ + } \ + if (lock) \ + pthread_mutex_unlock (lock); \ + } while (0) #define AIO_MAIN_THREAD (pthread_equal ( pthread_self(), sim_asynch_main_threadid )) #define AIO_LOCK \ pthread_mutex_lock(&sim_asynch_lock) #define AIO_UNLOCK \ pthread_mutex_unlock(&sim_asynch_lock) #define AIO_IS_ACTIVE(uptr) (((uptr)->a_is_active ? (uptr)->a_is_active (uptr) : FALSE) || ((uptr)->a_next)) -#if !defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) +#if defined(SIM_ASYNCH_MUX) #define AIO_CANCEL(uptr) \ - if ((uptr)->a_cancel) \ - (uptr)->a_cancel (uptr); \ - else \ - (void)0 -#endif /* !defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) */ -#if !defined(SIM_ASYNCH_MUX) && defined(SIM_ASYNCH_CLOCKS) -#define AIO_CANCEL(uptr) \ - if ((uptr)->a_cancel) \ - (uptr)->a_cancel (uptr); \ - else { \ - AIO_UPDATE_QUEUE; \ - if ((uptr)->a_next) { \ - UNIT *cptr; \ - pthread_mutex_lock (&sim_timer_lock); \ - if ((uptr) == sim_wallclock_queue) { \ - sim_wallclock_queue = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\ - sim_timer_event_canceled = TRUE; \ - pthread_cond_signal (&sim_timer_wake); \ - } \ - else \ - for (cptr = sim_wallclock_queue; \ - (cptr != QUEUE_LIST_END); \ - cptr = cptr->a_next) \ - if (cptr->a_next == (uptr)) { \ - cptr->a_next = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\ - break; \ - } \ - if ((uptr)->a_next == NULL) \ - (uptr)->a_due_time = (uptr)->a_usec_delay = 0; \ - else { \ - int tmr; \ - for (tmr=0; tmra_next; \ - (uptr)->a_next = NULL; \ - } \ - else \ - for (cptr = sim_clock_cosched_queue[tmr]; \ - (cptr != QUEUE_LIST_END); \ - cptr = cptr->a_next) \ - if (cptr->a_next == (uptr)) { \ - cptr->a_next = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - break; \ - } \ - if ((uptr)->a_next == NULL) { \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Clock Coscheduling Event for %s\n", sim_uname(uptr));\ - } \ - } \ - } \ - while (sim_timer_event_canceled) { \ - pthread_mutex_unlock (&sim_timer_lock); \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Waiting for Timer Event cancelation for %s\n", sim_uname(uptr));\ - sim_os_ms_sleep (0); \ - pthread_mutex_lock (&sim_timer_lock); \ - } \ - pthread_mutex_unlock (&sim_timer_lock); \ - } \ - } -#endif -#if defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) -#define AIO_CANCEL(uptr) \ - if ((uptr)->a_cancel) \ - (uptr)->a_cancel (uptr); \ + if ((uptr)->cancel) \ + (uptr)->cancel (uptr); \ else { \ if (((uptr)->dynflags & UNIT_TM_POLL) && \ !((uptr)->next) && !((uptr)->a_next)) { \ @@ -1061,92 +995,19 @@ extern int32 sim_asynch_inst_latency; (uptr)->a_poll_waiter_count = 0; \ } \ } -#endif /* defined(SIM_ASYNCH_MUX) && !defined(SIM_ASYNCH_CLOCKS) */ -#if defined(SIM_ASYNCH_MUX) && defined(SIM_ASYNCH_CLOCKS) +#endif /* defined(SIM_ASYNCH_MUX) */ +#if !defined(AIO_CANCEL) #define AIO_CANCEL(uptr) \ - if ((uptr)->a_cancel) \ - (uptr)->a_cancel (uptr); \ - else { \ - AIO_UPDATE_QUEUE; \ - if (((uptr)->dynflags & UNIT_TM_POLL) && \ - !((uptr)->next) && !((uptr)->a_next)) { \ - (uptr)->a_polling_now = FALSE; \ - sim_tmxr_poll_count -= (uptr)->a_poll_waiter_count; \ - (uptr)->a_poll_waiter_count = 0; \ - } \ - if ((uptr)->a_next) { \ - UNIT *cptr; \ - pthread_mutex_lock (&sim_timer_lock); \ - if ((uptr) == sim_wallclock_queue) { \ - sim_wallclock_queue = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\ - sim_timer_event_canceled = TRUE; \ - pthread_cond_signal (&sim_timer_wake); \ - } \ - else \ - for (cptr = sim_wallclock_queue; \ - (cptr != QUEUE_LIST_END); \ - cptr = cptr->a_next) \ - if (cptr->a_next == (uptr)) { \ - cptr->a_next = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Timer Event for %s\n", sim_uname(uptr));\ - break; \ - } \ - if ((uptr)->a_next == NULL) \ - (uptr)->a_due_time = (uptr)->a_usec_delay = 0; \ - else { \ - if ((uptr) == sim_clock_cosched_queue) { \ - sim_clock_cosched_queue = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - } \ - else \ - for (cptr = sim_clock_cosched_queue; \ - (cptr != QUEUE_LIST_END); \ - cptr = cptr->a_next) \ - if (cptr->a_next == (uptr)) { \ - cptr->a_next = (uptr)->a_next; \ - (uptr)->a_next = NULL; \ - break; \ - } \ - if ((uptr)->a_next == NULL) { \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Canceling Clock Coscheduling Event for %s\n", sim_uname(uptr));\ - } \ - } \ - while (sim_timer_event_canceled) { \ - pthread_mutex_unlock (&sim_timer_lock); \ - sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Waiting for Timer Event cancelation for %s\n", sim_uname(uptr));\ - sim_os_ms_sleep (0); \ - pthread_mutex_lock (&sim_timer_lock); \ - } \ - pthread_mutex_unlock (&sim_timer_lock); \ - } \ - } -#endif + if ((uptr)->cancel) \ + (uptr)->cancel (uptr) +#endif /* !defined(AIO_CANCEL) */ #if defined(SIM_ASYNCH_CLOCKS) #define AIO_RETURN_TIME(uptr) \ - if (1) { \ - pthread_mutex_lock (&sim_timer_lock); \ - for (cptr = sim_wallclock_queue; \ - cptr != QUEUE_LIST_END; \ - cptr = cptr->a_next) \ - if ((uptr) == cptr) { \ - double inst_per_sec = sim_timer_inst_per_sec (); \ - int32 result; \ - \ - result = (int32)(((uptr)->a_due_time - sim_timenow_double())*inst_per_sec);\ - if (result < 0) \ - result = 0; \ - pthread_mutex_unlock (&sim_timer_lock); \ - return result + 1; \ - } \ - pthread_mutex_unlock (&sim_timer_lock); \ - if ((uptr)->a_next) /* On asynch queue? */ \ - return (uptr)->a_event_time + 1; \ - } \ - else \ - (void)0 + do { \ + int32 rtime = sim_timer_activate_time (uptr); \ + if (rtime >= 0) \ + return rtime; \ + } while (0) #else #define AIO_RETURN_TIME(uptr) (void)0 #endif @@ -1188,31 +1049,25 @@ extern int32 sim_asynch_inst_latency; /* which avoids the potential ABA issues. */ #define AIO_QUEUE_MODE "Lock free asynchronous event queue access" #define AIO_INIT \ - if (1) { \ + do { \ int tmr; \ sim_asynch_main_threadid = pthread_self(); \ /* Empty list/list end uses the point value (void *)1. \ This allows NULL in an entry's a_next pointer to \ indicate that the entry is not currently in any list */ \ sim_asynch_queue = QUEUE_LIST_END; \ - sim_wallclock_queue = QUEUE_LIST_END; \ - sim_wallclock_entry = NULL; \ for (tmr=0; tmra_event_time);\ - uptr = q; \ - q = q->a_next; \ - uptr->a_next = NULL; /* hygiene */ \ - if (uptr->a_activate_call != &sim_activate_notbefore) { \ - a_event_time = uptr->a_event_time-((sim_asynch_inst_latency+1)/2); \ - if (a_event_time < 0) \ - a_event_time = 0; \ - } \ - else \ - a_event_time = uptr->a_event_time; \ - uptr->a_activate_call (uptr, a_event_time); \ - if (uptr->a_check_completion) { \ - sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Calling Completion Check for asynch event on %s\n", sim_uname(uptr));\ - uptr->a_check_completion (uptr); \ - } \ - } \ - } else (void)0 +#define AIO_UPDATE_QUEUE sim_aio_update_queue () #define AIO_ACTIVATE(caller, uptr, event_time) \ if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \ - UNIT *ouptr = (uptr); \ - sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch event for %s after %d instructions\n", sim_uname(ouptr), event_time);\ - if (ouptr->a_next) { \ - ouptr->a_activate_call = sim_activate_abs; \ - } else { \ - UNIT *q, *qe; \ - ouptr->a_event_time = event_time; \ - ouptr->a_activate_call = (ACTIVATE_API)&caller; \ - ouptr->a_next = QUEUE_LIST_END; /* Mark as on list */ \ - do { \ - do \ - q = AIO_QUEUE_VAL; \ - while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q));/* Grab current list */\ - for (qe = ouptr; qe->a_next != QUEUE_LIST_END; qe = qe->a_next); \ - qe->a_next = q; /* append current list */\ - do \ - q = AIO_QUEUE_VAL; \ - while (q != AIO_QUEUE_SET(ouptr, q)); \ - ouptr = q; \ - } while (ouptr != QUEUE_LIST_END); \ - } \ - sim_asynch_check = 0; /* try to force check */ \ - if (sim_idle_wait) { \ - sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(ouptr), event_time);\ - pthread_cond_signal (&sim_asynch_wake); \ - } \ + sim_aio_activate ((ACTIVATE_API)caller, uptr, event_time); \ return SCPE_OK; \ } else (void)0 -#define AIO_ACTIVATE_LIST(caller, list, event_time) \ - if (list) { \ - UNIT *ouptr, *q, *qe; \ - sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch events for %s after %d instructions\n", sim_uname(list), event_time);\ - for (qe=(list); qe->a_next != QUEUE_LIST_END;) { \ - qe->a_event_time = event_time; \ - qe->a_activate_call = (ACTIVATE_API)&caller; \ - qe = qe->a_next; \ - } \ - qe->a_event_time = event_time; \ - qe->a_activate_call = (ACTIVATE_API)&caller; \ - ouptr = (list); \ - do { \ - do \ - q = AIO_QUEUE_VAL; \ - while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q));/* Grab current list */ \ - for (qe = ouptr; qe->a_next != QUEUE_LIST_END; qe = qe->a_next); \ - qe->a_next = q; /* append current list */ \ - do \ - q = AIO_QUEUE_VAL; \ - while (q != AIO_QUEUE_SET(ouptr, q)); \ - ouptr = q; \ - } while (ouptr != QUEUE_LIST_END); \ - sim_asynch_check = 0; /* try to force check */ \ - if (sim_idle_wait) { \ - sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(ouptr), event_time);\ - pthread_cond_signal (&sim_asynch_wake); \ - } \ - } else (void)0 #else /* !USE_AIO_INTRINSICS */ /* This approach uses a pthread mutex to manage access to the link list */ /* head sim_asynch_queue. It will always work, but may be slower than the */ /* lock free approach when using USE_AIO_INTRINSICS */ #define AIO_QUEUE_MODE "Lock based asynchronous event queue access" #define AIO_INIT \ - if (1) { \ + do { \ int tmr; \ pthread_mutexattr_t attr; \ \ @@ -1327,26 +1103,20 @@ extern int32 sim_asynch_inst_latency; This allows NULL in an entry's a_next pointer to \ indicate that the entry is not currently in any list */ \ sim_asynch_queue = QUEUE_LIST_END; \ - sim_wallclock_queue = QUEUE_LIST_END; \ - sim_wallclock_entry = NULL; \ for (tmr=0; tmra_check_completion (uptr); \ } \ AIO_LOCK; \ - } \ + } \ AIO_UNLOCK; \ - } else (void)0 + } while (0) #define AIO_ACTIVATE(caller, uptr, event_time) \ if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \ sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch event for %s after %d instructions\n", sim_uname(uptr), event_time);\ @@ -1392,40 +1162,19 @@ extern int32 sim_asynch_inst_latency; sim_asynch_check = 0; \ return SCPE_OK; \ } else (void)0 -#define AIO_ACTIVATE_LIST(caller, list, event_time) \ - if (list) { \ - UNIT *qe; \ - sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch events for %s after %d instructions\n", sim_uname(list), event_time);\ - for (qe=list; qe->a_next != QUEUE_LIST_END;) { \ - qe->a_event_time = event_time; \ - qe->a_activate_call = (ACTIVATE_API)&caller; \ - qe = qe->a_next; \ - } \ - qe->a_event_time = event_time; \ - qe->a_activate_call = (ACTIVATE_API)&caller; \ - AIO_LOCK; \ - qe->a_next = sim_asynch_queue; \ - sim_asynch_queue = list; \ - sim_asynch_check = 0; /* try to force check */ \ - if (sim_idle_wait) { \ - sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(list), event_time);\ - pthread_cond_signal (&sim_asynch_wake); \ - } \ - AIO_UNLOCK; \ - } else (void)0 #endif /* USE_AIO_INTRINSICS */ #define AIO_VALIDATE if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) {sim_printf("Improper thread context for operation\n"); abort();} #define AIO_CHECK_EVENT \ if (0 > --sim_asynch_check) { \ AIO_UPDATE_QUEUE; \ sim_asynch_check = sim_asynch_inst_latency; \ - } else (void)0 + } else (void)0 #define AIO_SET_INTERRUPT_LATENCY(instpersec) \ - if (1) { \ + do { \ sim_asynch_inst_latency = (int32)((((double)(instpersec))*sim_asynch_latency)/1000000000);\ if (sim_asynch_inst_latency == 0) \ sim_asynch_inst_latency = 1; \ - } else (void)0 + } while (0) #else /* !SIM_ASYNCH_IO */ #define AIO_QUEUE_MODE "Asynchronous I/O is not available" #define AIO_UPDATE_QUEUE @@ -1441,7 +1190,9 @@ extern int32 sim_asynch_inst_latency; #define AIO_EVENT_BEGIN(uptr) #define AIO_EVENT_COMPLETE(uptr, reason) #define AIO_IS_ACTIVE(uptr) FALSE -#define AIO_CANCEL(uptr) +#define AIO_CANCEL(uptr) \ + if ((uptr)->cancel) \ + (uptr)->cancel (uptr) #define AIO_SET_INTERRUPT_LATENCY(instpersec) #define AIO_TLS #endif /* SIM_ASYNCH_IO */ diff --git a/sim_disk.c b/sim_disk.c index b946f22a..10bf33ec 100644 --- a/sim_disk.c +++ b/sim_disk.c @@ -467,7 +467,7 @@ if (ctx->asynch_io) { } uptr->a_check_completion = _disk_completion_dispatch; uptr->a_is_active = _disk_is_active; -uptr->a_cancel = _disk_cancel; +uptr->cancel = _disk_cancel; return SCPE_OK; #endif } diff --git a/sim_tape.c b/sim_tape.c index a1d66c65..2b769032 100644 --- a/sim_tape.c +++ b/sim_tape.c @@ -388,7 +388,7 @@ if (ctx->asynch_io) { } uptr->a_check_completion = _tape_completion_dispatch; uptr->a_is_active = _tape_is_active; -uptr->a_cancel = _tape_cancel; +uptr->cancel = _tape_cancel; return SCPE_OK; #endif } diff --git a/sim_timer.c b/sim_timer.c index 70a524da..18181773 100644 --- a/sim_timer.c +++ b/sim_timer.c @@ -63,7 +63,6 @@ sim_timer_init - initialize timing system sim_rtc_init - initialize calibration sim_rtc_calb - calibrate clock - sim_timer_init - initialize timing system sim_idle - virtual machine idle sim_os_msec - return elapsed time in msec sim_os_sleep - sleep specified number of seconds @@ -85,6 +84,15 @@ #include #include +#define SIM_INTERNAL_CLK (SIM_NTIMERS+(1<<30)) +#define SIM_INTERNAL_UNIT sim_timer_units[SIM_NTIMERS] +#ifndef MIN +#define MIN(a,b) (((a) < (b)) ? (a) : (b)) +#endif + +//#define MS_MIN_GRANULARITY 20 +#define MS_MIN_GRANULARITY 1 + t_bool sim_idle_enab = FALSE; /* global flag */ volatile t_bool sim_idle_wait = FALSE; /* global flag */ @@ -92,9 +100,11 @@ static int32 sim_calb_tmr = -1; /* the system calibrated tim static uint32 sim_idle_rate_ms = 0; static uint32 sim_os_sleep_min_ms = 0; +static uint32 sim_os_sleep_inc_ms = 0; static uint32 sim_os_clock_resoluton_ms = 0; +static uint32 sim_os_tick_hz = 0; static uint32 sim_idle_stable = SIM_IDLE_STDFLT; -static t_bool sim_idle_idled = FALSE; +static uint32 sim_idle_calib_pct = 0; static uint32 sim_throt_ms_start = 0; static uint32 sim_throt_ms_stop = 0; static uint32 sim_throt_type = 0; @@ -104,13 +114,18 @@ static double sim_throt_cps; static double sim_throt_inst_start; static uint32 sim_throt_sleep_time = 0; static int32 sim_throt_wait = 0; -static UNIT *sim_clock_unit[SIM_NTIMERS] = {NULL}; -UNIT * volatile sim_clock_cosched_queue[SIM_NTIMERS] = {NULL}; -t_bool sim_asynch_timer = +static UNIT *sim_clock_unit[SIM_NTIMERS+1] = {NULL}; +UNIT * volatile sim_clock_cosched_queue[SIM_NTIMERS+1] = {NULL}; +static int32 sim_cosched_interval[SIM_NTIMERS+1]; +static t_bool sim_catchup_ticks = FALSE; +#if defined (SIM_ASYNCH_CLOCKS) && !defined (SIM_ASYNCH_IO) +#undef SIM_ASYNCH_CLOCKS +#endif +t_bool sim_asynch_timer = FALSE; + #if defined (SIM_ASYNCH_CLOCKS) - TRUE; -#else - FALSE; +UNIT * volatile sim_wallclock_queue = QUEUE_LIST_END; +UNIT * volatile sim_wallclock_entry = NULL; #endif t_stat sim_throt_svc (UNIT *uptr); @@ -123,12 +138,14 @@ t_stat sim_timer_tick_svc (UNIT *uptr); #define DBG_CAL 0x010 /* calibration activities */ #define DBG_TIM 0x020 /* timer thread activities */ #define DBG_THR 0x040 /* throttle activities */ +#define DBG_ACK 0x080 /* interrupt acknowledgement activities */ DEBTAB sim_timer_debug[] = { {"TRACE", DBG_TRC, "Trace routine calls"}, {"IDLE", DBG_IDL, "Idling activities"}, {"QUEUE", DBG_QUE, "Event queuing activities"}, + {"IACK", DBG_ACK, "interrupt acknowledgement activities"}, {"CALIB", DBG_CAL, "Calibration activities"}, - {"TIME", DBG_TIM, "Activation an scheduling activities"}, + {"TIME", DBG_TIM, "Activation and scheduling activities"}, {"THROT", DBG_THR, "Throttling activities"}, {"MUX", DBG_MUX, "Tmxr scheduling activities"}, {0} @@ -141,6 +158,10 @@ uint32 start_time = sim_os_msec(); struct timespec done_time; t_bool timedout = FALSE; +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +msec = MS_MIN_GRANULARITY*((msec+MS_MIN_GRANULARITY-1)/MS_MIN_GRANULARITY); +#endif + clock_gettime(CLOCK_REALTIME, &done_time); done_time.tv_sec += (msec/1000); done_time.tv_nsec += 1000000*(msec%1000); @@ -219,6 +240,25 @@ return SCPE_OK; #endif #endif /* defined(USE_READER_THREAD) */ +#define sleep1Samples 100 + +static uint32 _compute_minimum_sleep (void) +{ +uint32 i, tot, tim; + +SIM_IDLE_MS_SLEEP (1); /* Start sampling on a tick boundary */ +for (i = 0, tot = 0; i < sleep1Samples; i++) + tot += SIM_IDLE_MS_SLEEP (1); +tim = (tot + (sleep1Samples - 1)) / sleep1Samples; +sim_os_sleep_min_ms = tim; +SIM_IDLE_MS_SLEEP (1); /* Start sampling on a tick boundary */ +for (i = 0, tot = 0; i < sleep1Samples; i++) + tot += SIM_IDLE_MS_SLEEP (sim_os_sleep_min_ms + 1); +tim = (tot + (sleep1Samples - 1)) / sleep1Samples; +sim_os_sleep_inc_ms = tim - sim_os_sleep_min_ms; +return sim_os_sleep_min_ms; +} + /* OS-dependent timer and clock routines */ /* VMS */ @@ -263,6 +303,9 @@ for (i = 0; i < 64; i++) { /* 64b quo */ quo = quo | 1; /* set quo bit */ } } +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +quo = (quo/MS_MIN_GRANULARITY)*MS_MIN_GRANULARITY; +#endif return quo; } @@ -274,12 +317,7 @@ return; uint32 sim_os_ms_sleep_init (void) { -#if defined (__VAX) -sim_os_sleep_min_ms = 10; /* VAX/VMS is 10ms */ -#else -sim_os_sleep_min_ms = 1; /* Alpha/VMS is 1ms */ -#endif -return sim_os_sleep_min_ms; +return _compute_minimum_sleep (); } uint32 sim_os_ms_sleep (unsigned int msec) @@ -289,6 +327,10 @@ uint32 qtime[2]; int32 nsfactor = -10000; static int32 zero = 0; +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +msec = MS_MIN_GRANULARITY*((msec+MS_MIN_GRANULARITY-1)/MS_MIN_GRANULARITY); +#endif + lib$emul (&msec, &nsfactor, &zero, qtime); sys$setimr (2, qtime, 0, 0); sys$waitfr (2); @@ -320,9 +362,13 @@ const t_bool rtc_avail = TRUE; uint32 sim_os_msec (void) { -if (sim_idle_rate_ms) - return timeGetTime (); -else return GetTickCount (); +uint32 t = (sim_idle_rate_ms ? timeGetTime () : GetTickCount ()); + +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +t = (t/MS_MIN_GRANULARITY)*MS_MIN_GRANULARITY; +#endif + +return t; } void sim_os_sleep (unsigned int sec) @@ -343,24 +389,23 @@ TIMECAPS timers; if (timeGetDevCaps (&timers, sizeof (timers)) != TIMERR_NOERROR) return 0; -sim_os_sleep_min_ms = timers.wPeriodMin; -if ((timers.wPeriodMin == 0) || (timers.wPeriodMin > SIM_IDLE_MAX)) +if (timers.wPeriodMin == 0) return 0; if (timeBeginPeriod (timers.wPeriodMin) != TIMERR_NOERROR) return 0; atexit (sim_timer_exit); -Sleep (1); -Sleep (1); -Sleep (1); -Sleep (1); -Sleep (1); -return sim_os_sleep_min_ms; /* sim_idle_rate_ms */ +/* return measured actual minimum sleep time */ +return _compute_minimum_sleep (); } uint32 sim_os_ms_sleep (unsigned int msec) { uint32 stime = sim_os_msec(); +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +msec = MS_MIN_GRANULARITY*((msec+MS_MIN_GRANULARITY-1)/MS_MIN_GRANULARITY); +#endif + Sleep (msec); return sim_os_msec () - stime; } @@ -431,6 +476,9 @@ unsigned long millis; Microseconds (&macMicros); micros = *((unsigned long long *) &macMicros); millis = micros / 1000LL; +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +millis = (millis/MS_MIN_GRANULARITY)*MS_MIN_GRANULARITY; +#endif return (uint32) millis; } @@ -442,7 +490,7 @@ return; uint32 sim_os_ms_sleep_init (void) { -return sim_os_sleep_min_ms = 1; +return _compute_minimum_sleep (); } uint32 sim_os_ms_sleep (unsigned int milliseconds) @@ -450,6 +498,10 @@ uint32 sim_os_ms_sleep (unsigned int milliseconds) uint32 stime = sim_os_msec (); struct timespec treq; +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +milliseconds = MS_MIN_GRANULARITY*((milliseconds+MS_MIN_GRANULARITY-1)/MS_MIN_GRANULARITY); +#endif + treq.tv_sec = milliseconds / MILLIS_PER_SEC; treq.tv_nsec = (milliseconds % MILLIS_PER_SEC) * NANOS_PER_MILLI; (void) nanosleep (&treq, NULL); @@ -460,11 +512,10 @@ return sim_os_msec () - stime; int clock_gettime(int clk_id, struct timespec *tp) { struct timeval cur; -struct timezone foo; if (clk_id != CLOCK_REALTIME) return -1; -gettimeofday (&cur, &foo); +gettimeofday (&cur, NULL); tp->tv_sec = cur.tv_sec; tp->tv_nsec = cur.tv_usec*1000; return 0; @@ -480,7 +531,6 @@ return 0; #include #define NANOS_PER_MILLI 1000000 #define MILLIS_PER_SEC 1000 -#define sleep1Samples 100 const t_bool rtc_avail = TRUE; @@ -492,6 +542,9 @@ uint32 msec; gettimeofday (&cur, &foo); msec = (((uint32) cur.tv_sec) * 1000) + (((uint32) cur.tv_usec) / 1000); +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +msec = (msec/MS_MIN_GRANULARITY)*MS_MIN_GRANULARITY; +#endif return msec; } @@ -503,21 +556,9 @@ return; uint32 sim_os_ms_sleep_init (void) { -uint32 i, t1, t2, tot, tim; - -SIM_IDLE_MS_SLEEP (1); /* Start sampling on a tick boundary */ -for (i = 0, tot = 0; i < sleep1Samples; i++) { - t1 = sim_os_msec (); - SIM_IDLE_MS_SLEEP (1); - t2 = sim_os_msec (); - tot += (t2 - t1); - } -tim = (tot + (sleep1Samples - 1)) / sleep1Samples; -sim_os_sleep_min_ms = tim; -if (tim > SIM_IDLE_MAX) - tim = 0; -return tim; +return _compute_minimum_sleep (); } + #if !defined(_POSIX_SOURCE) #ifdef NEED_CLOCK_GETTIME typedef int clockid_t; @@ -541,6 +582,10 @@ uint32 sim_os_ms_sleep (unsigned int milliseconds) uint32 stime = sim_os_msec (); struct timespec treq; +#if defined(MS_MIN_GRANULARITY) && (MS_MIN_GRANULARITY != 1) +milliseconds = MS_MIN_GRANULARITY*((milliseconds+MS_MIN_GRANULARITY-1)/MS_MIN_GRANULARITY); +#endif + treq.tv_sec = milliseconds / MILLIS_PER_SEC; treq.tv_nsec = (milliseconds % MILLIS_PER_SEC) * NANOS_PER_MILLI; (void) nanosleep (&treq, NULL); @@ -608,7 +653,17 @@ while (diff->tv_nsec > 1000000000) { } } -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) +/* Forward declarations */ + +static double _timespec_to_double (struct timespec *time); +static void _double_to_timespec (struct timespec *time, double dtime); +static t_bool _rtcn_tick_catchup_check (int32 tmr, int32 time); +static void _rtcn_configure_calibrated_clock (int32 newtmr); +static void _sim_coschedule_cancel(UNIT *uptr); +static void _sim_wallclock_cancel (UNIT *uptr); +static t_bool _sim_wallclock_is_active (UNIT *uptr); + +#if defined(SIM_ASYNCH_CLOCKS) static int sim_timespec_compare (struct timespec *a, struct timespec *b) { while (a->tv_nsec > 1000000000) { @@ -630,34 +685,56 @@ if (a->tv_nsec > b->tv_nsec) else return 0; } -#endif /* defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) */ +#endif /* defined(SIM_ASYNCH_CLOCKS) */ /* OS independent clock calibration package */ -static int32 rtc_ticks[SIM_NTIMERS] = { 0 }; /* ticks */ -static int32 rtc_hz[SIM_NTIMERS] = { 0 }; /* tick rate */ -static uint32 rtc_rtime[SIM_NTIMERS] = { 0 }; /* real time */ -static uint32 rtc_vtime[SIM_NTIMERS] = { 0 }; /* virtual time */ -static double rtc_gtime[SIM_NTIMERS] = { 0 }; /* instruction time */ -static uint32 rtc_nxintv[SIM_NTIMERS] = { 0 }; /* next interval */ -static int32 rtc_based[SIM_NTIMERS] = { 0 }; /* base delay */ -static int32 rtc_currd[SIM_NTIMERS] = { 0 }; /* current delay */ -static int32 rtc_initd[SIM_NTIMERS] = { 0 }; /* initial delay */ -static uint32 rtc_elapsed[SIM_NTIMERS] = { 0 }; /* sec since init */ -static uint32 rtc_calibrations[SIM_NTIMERS] = { 0 }; /* calibration count */ -static double rtc_clock_skew_max[SIM_NTIMERS] = { 0 }; /* asynchronous max skew */ +static int32 rtc_ticks[SIM_NTIMERS+1] = { 0 }; /* ticks */ +static uint32 rtc_hz[SIM_NTIMERS+1] = { 0 }; /* tick rate */ +static uint32 rtc_rtime[SIM_NTIMERS+1] = { 0 }; /* real time */ +static uint32 rtc_vtime[SIM_NTIMERS+1] = { 0 }; /* virtual time */ +static double rtc_gtime[SIM_NTIMERS+1] = { 0 }; /* instruction time */ +static uint32 rtc_nxintv[SIM_NTIMERS+1] = { 0 }; /* next interval */ +static int32 rtc_based[SIM_NTIMERS+1] = { 0 }; /* base delay */ +static int32 rtc_currd[SIM_NTIMERS+1] = { 0 }; /* current delay */ +static int32 rtc_initd[SIM_NTIMERS+1] = { 0 }; /* initial delay */ +static uint32 rtc_elapsed[SIM_NTIMERS+1] = { 0 }; /* sec since init */ +static uint32 rtc_calibrations[SIM_NTIMERS+1] = { 0 }; /* calibration count */ +static double rtc_clock_skew_max[SIM_NTIMERS+1] = { 0 }; /* asynchronous max skew */ +static double rtc_clock_start_gtime[SIM_NTIMERS+1] = { 0 };/* reference instruction time for clock */ +static double rtc_clock_tick_size[SIM_NTIMERS+1] = { 0 }; /* 1/hz */ +static uint32 rtc_calib_initializations[SIM_NTIMERS+1] = { 0 };/* Initialization Count */ +static double rtc_calib_tick_time[SIM_NTIMERS+1] = { 0 }; /* ticks time */ +static double rtc_calib_tick_time_tot[SIM_NTIMERS+1] = { 0 };/* ticks time - total*/ +static uint32 rtc_calib_ticks_acked[SIM_NTIMERS+1] = { 0 };/* ticks Acked */ +static uint32 rtc_calib_ticks_acked_tot[SIM_NTIMERS+1] = { 0 };/* ticks Acked - total */ +static uint32 rtc_clock_ticks[SIM_NTIMERS+1] = { 0 };/* ticks delivered since catchup base */ +static uint32 rtc_clock_ticks_tot[SIM_NTIMERS+1] = { 0 };/* ticks delivered since catchup base - total */ +static double rtc_clock_catchup_base_time[SIM_NTIMERS+1] = { 0 };/* reference time for catchup ticks */ +static uint32 rtc_clock_catchup_ticks[SIM_NTIMERS+1] = { 0 };/* Record of catchups */ +static uint32 rtc_clock_catchup_ticks_tot[SIM_NTIMERS+1] = { 0 };/* Record of catchups - total */ +static t_bool rtc_clock_catchup_pending[SIM_NTIMERS+1] = { 0 };/* clock tick catchup pending */ +static t_bool rtc_clock_catchup_eligible[SIM_NTIMERS+1] = { 0 };/* clock tick catchup eligible */ +static uint32 rtc_clock_time_idled[SIM_NTIMERS+1] = { 0 };/* total time idled */ +static uint32 rtc_clock_time_idled_last[SIM_NTIMERS+1] = { 0 };/* total time idled */ -UNIT sim_timer_units[SIM_NTIMERS+2]; /* one for each timer and one for throttle */ - /* plus one for an internal clock if no clocks are registered */ +UNIT sim_timer_units[SIM_NTIMERS+1]; /* one for each timer and one for an */ + /* internal clock if no clocks are registered */ +UNIT sim_tick_units[SIM_NTIMERS]; /* one for each timer to schedule asynchronously */ +UNIT sim_throttle_unit; /* one for throttle */ + +/* Forward device declarations */ +extern DEVICE sim_timer_dev; +extern DEVICE sim_throttle_dev; void sim_rtcn_init_all (void) { -uint32 i; +int32 tmr; -for (i = 0; i < SIM_NTIMERS; i++) - if (rtc_initd[i] != 0) - sim_rtcn_init (rtc_initd[i], i); +for (tmr = 0; tmr <= SIM_NTIMERS; tmr++) + if (rtc_initd[tmr] != 0) + sim_rtcn_init (rtc_initd[tmr], tmr); return; } @@ -668,54 +745,85 @@ return sim_rtcn_init_unit (NULL, time, tmr); int32 sim_rtcn_init_unit (UNIT *uptr, int32 time, int32 tmr) { -sim_debug (DBG_CAL, &sim_timer_dev, "sim_rtcn_init(time=%d, tmr=%d)\n", time, tmr); +sim_debug (DBG_CAL, &sim_timer_dev, "_sim_rtcn_init_unit(unit=%s, time=%d, tmr=%d)\n", sim_uname(uptr), time, tmr); if (time == 0) time = 1; -if ((tmr < 0) || (tmr >= SIM_NTIMERS)) - return time; +if (tmr == SIM_INTERNAL_CLK) + tmr = SIM_NTIMERS; +else { + if ((tmr < 0) || (tmr >= SIM_NTIMERS)) + return time; + } +/* + * If we'd previously succeeded in calibrating a tick value, then use that + * delay as a better default to setup when we're re-initialized. + * Re-initializing happens on any boot or after any breakpoint/continue. + */ +if (rtc_currd[tmr]) + time = rtc_currd[tmr]; +if (!uptr) + uptr = sim_clock_unit[tmr]; if (uptr) { - if (uptr != &sim_timer_units[SIM_NTIMERS+1]) { /* New unit not internal timer */ - if ((tmr == SIM_NTIMERS-1) && /* but replacing default internal timer */ - (sim_clock_unit[tmr] == &sim_timer_units[SIM_NTIMERS+1])) { /* remove internal timer */ - sim_cancel (sim_clock_unit[tmr]); - sim_clock_unit[tmr] = NULL; - rtc_initd[tmr] = rtc_currd[tmr] = 0; - if (tmr == sim_calb_tmr) - sim_calb_tmr = -1; - } - } if (!sim_clock_unit[tmr]) { sim_clock_unit[tmr] = uptr; sim_clock_cosched_queue[tmr] = QUEUE_LIST_END; } } +rtc_clock_start_gtime[tmr] = sim_gtime(); rtc_rtime[tmr] = sim_os_msec (); rtc_vtime[tmr] = rtc_rtime[tmr]; rtc_nxintv[tmr] = 1000; rtc_ticks[tmr] = 0; rtc_hz[tmr] = 0; -if (rtc_currd[tmr]) /* A previously calibrated value is better than a constant */ - time = rtc_currd[tmr]; rtc_based[tmr] = time; rtc_currd[tmr] = time; rtc_initd[tmr] = time; rtc_elapsed[tmr] = 0; rtc_calibrations[tmr] = 0; -if (sim_calb_tmr == -1) /* save first initialized clock as the system timer */ - sim_calb_tmr = tmr; +rtc_clock_ticks_tot[tmr] += rtc_clock_ticks[tmr]; +rtc_clock_ticks[tmr] = 0; +rtc_calib_tick_time_tot[tmr] += rtc_calib_tick_time[tmr]; +rtc_calib_tick_time[tmr] = 0; +rtc_clock_catchup_pending[tmr] = FALSE; +rtc_clock_catchup_eligible[tmr] = FALSE; +rtc_clock_catchup_ticks_tot[tmr] += rtc_clock_catchup_ticks[tmr]; +rtc_clock_catchup_ticks[tmr] = 0; +rtc_calib_ticks_acked_tot[tmr] += rtc_calib_ticks_acked[tmr]; +rtc_calib_ticks_acked[tmr] = 0; +++rtc_calib_initializations[tmr]; +_rtcn_configure_calibrated_clock (tmr); return time; } int32 sim_rtcn_calb (int32 ticksper, int32 tmr) { -uint32 new_rtime, delta_rtime; +uint32 new_rtime, delta_rtime, last_idle_pct; int32 delta_vtime; double new_gtime; int32 new_currd; -if ((tmr < 0) || (tmr >= SIM_NTIMERS)) - return 10000; -rtc_hz[tmr] = ticksper; +if (tmr == SIM_INTERNAL_CLK) + tmr = SIM_NTIMERS; +else { + if ((tmr < 0) || (tmr >= SIM_NTIMERS)) + return 10000; + } +if (rtc_hz[tmr] != ticksper) { /* changing tick rate? */ + rtc_hz[tmr] = ticksper; + rtc_clock_tick_size[tmr] = 1.0/ticksper; + _rtcn_configure_calibrated_clock (tmr); + rtc_currd[tmr] = (int32)(sim_timer_inst_per_sec()/ticksper); + } +if (sim_clock_unit[tmr] == NULL) { /* Not using TIMER units? */ + rtc_clock_ticks[tmr] += 1; + rtc_calib_tick_time[tmr] += rtc_clock_tick_size[tmr]; + } +if (rtc_clock_catchup_pending[tmr]) { /* catchup tick? */ + ++rtc_clock_catchup_ticks[tmr]; /* accumulating which were catchups */ + rtc_clock_catchup_pending[tmr] = FALSE; + if (!sim_asynch_timer) /* non asynch timers? */ + return rtc_currd[tmr]; /* return now avoiding counting catchup tick in calibration */ + } rtc_ticks[tmr] = rtc_ticks[tmr] + 1; /* count ticks */ if (rtc_ticks[tmr] < ticksper) { /* 1 sec yet? */ return rtc_currd[tmr]; @@ -739,11 +847,12 @@ if (sim_calb_tmr != tmr) { } new_rtime = sim_os_msec (); /* wall time */ sim_debug (DBG_TRC, &sim_timer_dev, "sim_rtcn_calb(ticksper=%d, tmr=%d)\n", ticksper, tmr); -if (sim_idle_idled) { +last_idle_pct = MIN(100,(uint32)(((double)(rtc_clock_time_idled[tmr] - rtc_clock_time_idled_last[tmr])) / 10.0)); +rtc_clock_time_idled_last[tmr] = rtc_clock_time_idled[tmr]; +if (last_idle_pct > (100 - sim_idle_calib_pct)) { rtc_rtime[tmr] = new_rtime; /* save wall time */ rtc_vtime[tmr] = rtc_vtime[tmr] + 1000; /* adv sim time */ rtc_gtime[tmr] = sim_gtime(); /* save instruction time */ - sim_idle_idled = FALSE; /* reset idled flag */ sim_debug (DBG_CAL, &sim_timer_dev, "skipping calibration due to idling - result: %d\n", rtc_currd[tmr]); return rtc_currd[tmr]; /* avoid calibrating idle checks */ } @@ -770,31 +879,20 @@ if (delta_rtime > 30000) { /* gap too big? */ return rtc_currd[tmr]; /* can't calibr */ } new_gtime = sim_gtime(); -if (sim_asynch_enabled && sim_asynch_timer) { - if (rtc_elapsed[tmr] > sim_idle_stable) { - /* An asynchronous clock, merely needs to divide the number of */ - /* instructions actually executed by the clock rate. */ - new_currd = (int32)((new_gtime - rtc_gtime[tmr])/ticksper); - /* avoid excessive swings in the calibrated result */ - if (new_currd > 10*rtc_currd[tmr]) /* don't swing big too fast */ - new_currd = 10*rtc_currd[tmr]; - else - if (new_currd < rtc_currd[tmr]/10) /* don't swing small too fast */ - new_currd = rtc_currd[tmr]/10; - rtc_currd[tmr] = new_currd; - rtc_gtime[tmr] = new_gtime; /* save instruction time */ - if (rtc_currd[tmr] == 127) { - sim_debug (DBG_CAL, &sim_timer_dev, "asynch calibration small: %d\n", rtc_currd[tmr]); - } - sim_debug (DBG_CAL, &sim_timer_dev, "asynch calibration result: %d\n", rtc_currd[tmr]); - return rtc_currd[tmr]; /* calibrated result */ - } - else { - rtc_currd[tmr] = rtc_initd[tmr]; - rtc_gtime[tmr] = new_gtime; /* save instruction time */ - sim_debug (DBG_CAL, &sim_timer_dev, "asynch not stable calibration result: %d\n", rtc_initd[tmr]); - return rtc_initd[tmr]; /* initial result until stable */ - } +if (sim_asynch_timer) { + /* An asynchronous clock, merely needs to divide the number of */ + /* instructions actually executed by the clock rate. */ + new_currd = (int32)((new_gtime - rtc_gtime[tmr])/ticksper); + /* avoid excessive swings in the calibrated result */ + if (new_currd > 10*rtc_currd[tmr]) /* don't swing big too fast */ + new_currd = 10*rtc_currd[tmr]; + else + if (new_currd < rtc_currd[tmr]/10) /* don't swing small too fast */ + new_currd = rtc_currd[tmr]/10; + rtc_currd[tmr] = new_currd; + rtc_gtime[tmr] = new_gtime; /* save instruction time */ + sim_debug (DBG_CAL, &sim_timer_dev, "asynch calibration result: %d\n", rtc_currd[tmr]); + return rtc_currd[tmr]; /* calibrated result */ } rtc_gtime[tmr] = new_gtime; /* save instruction time */ /* This self regulating algorithm depends directly on the assumption */ @@ -837,17 +935,19 @@ return sim_rtcn_calb (ticksper, 0); t_bool sim_timer_init (void) { -int i; +int tmr; uint32 clock_start, clock_last, clock_now; sim_debug (DBG_TRC, &sim_timer_dev, "sim_timer_init()\n"); -for (i=0; ia_next) { - if ((dptr = find_dev_from_unit (uptr)) != NULL) { - fprintf (st, " %s", sim_dname (dptr)); - if (dptr->numunits > 1) - fprintf (st, " unit %d", (int32) (uptr - dptr->units)); - } - else fprintf (st, " Unknown"); - fprintf (st, " after "); - fprint_val (st, (t_value)uptr->a_usec_delay, 10, 0, PV_RCOMMA); - fprintf (st, " usec\n"); - } - } if (sim_asynch_timer) { - for (tmr=0; tmra_next) { - if ((dptr = find_dev_from_unit (uptr)) != NULL) { - fprintf (st, " %s", sim_dname (dptr)); - if (dptr->numunits > 1) - fprintf (st, " unit %d", (int32) (uptr - dptr->units)); - } - else fprintf (st, " Unknown"); - fprintf (st, "\n"); + const char *tim; + + if (sim_wallclock_queue == QUEUE_LIST_END) + fprintf (st, "%s wall clock event queue empty\n", sim_name); + else { + fprintf (st, "%s wall clock event queue status\n", sim_name); + for (uptr = sim_wallclock_queue; uptr != QUEUE_LIST_END; uptr = uptr->a_next) { + if ((dptr = find_dev_from_unit (uptr)) != NULL) { + fprintf (st, " %s", sim_dname (dptr)); + if (dptr->numunits > 1) + fprintf (st, " unit %d", (int32) (uptr - dptr->units)); } + else + fprintf (st, " Unknown"); + tim = sim_fmt_secs(uptr->a_usec_delay/1000000.0); + fprintf (st, " after %s\n", tim); } } } +#endif /* SIM_ASYNCH_CLOCKS */ +for (tmr=0; tmr<=SIM_NTIMERS; ++tmr) { + if (sim_clock_unit[tmr] == NULL) + continue; + if (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) { + int32 accum; + + fprintf (st, "%s clock (%s) co-schedule event queue status\n", + sim_name, sim_uname(sim_clock_unit[tmr])); + accum = 0; + for (uptr = sim_clock_cosched_queue[tmr]; uptr != QUEUE_LIST_END; uptr = uptr->next) { + if ((dptr = find_dev_from_unit (uptr)) != NULL) { + fprintf (st, " %s", sim_dname (dptr)); + if (dptr->numunits > 1) + fprintf (st, " unit %d", (int32) (uptr - dptr->units)); + } + else + fprintf (st, " Unknown"); + if (accum > 0) + fprintf (st, " after %d ticks", accum); + fprintf (st, "\n"); + accum = accum + uptr->time; + } + } + } +#if defined (SIM_ASYNCH_IO) pthread_mutex_unlock (&sim_timer_lock); #endif /* SIM_ASYNCH_IO */ return SCPE_OK; @@ -970,9 +1113,14 @@ REG sim_timer_reg[] = { { FLDATAD (IDLE_ENAB, sim_idle_enab, 0, "Idle Enabled"), REG_RO}, { DRDATAD (IDLE_RATE_MS, sim_idle_rate_ms, 32, "Idle Rate Milliseconds"), PV_RSPC|REG_RO}, { DRDATAD (OS_SLEEP_MIN_MS, sim_os_sleep_min_ms, 32, "Minimum Sleep Resolution"), PV_RSPC|REG_RO}, - { DRDATAD (IDLE_STABLE, sim_idle_stable, 32, "Idle Stable"), PV_RSPC}, - { FLDATAD (IDLE_IDLED, sim_idle_idled, 0, ""), REG_RO}, - { DRDATAD (TMR, sim_calb_tmr, 32, ""), PV_RSPC|REG_RO}, + { DRDATAD (OS_SLEEP_INC_MS, sim_os_sleep_inc_ms, 32, "Minimum Sleep Increment Resolution"), PV_RSPC|REG_RO}, + { DRDATAD (IDLE_STABLE, sim_idle_stable, 32, "Idle Stable"), PV_RSPC|REG_RO}, + { DRDATAD (IDLE_CALIB_PCT, sim_idle_calib_pct, 32, "Minimum Idled percentage allowing calibration"), PV_RSPC|REG_RO}, + { DRDATAD (TMR, sim_calb_tmr, 32, "Calibrated Timer"), PV_RSPC|REG_RO}, + { NULL } + }; + +REG sim_throttle_reg[] = { { DRDATAD (THROT_MS_START, sim_throt_ms_start, 32, ""), PV_RSPC|REG_RO}, { DRDATAD (THROT_MS_STOP, sim_throt_ms_stop, 32, ""), PV_RSPC|REG_RO}, { DRDATAD (THROT_TYPE, sim_throt_type, 32, ""), PV_RSPC|REG_RO}, @@ -983,11 +1131,62 @@ REG sim_timer_reg[] = { { NULL } }; +/* Clear, Set and show catchup */ + +/* Clear catchup */ + +t_stat sim_timer_clr_catchup (UNIT *uptr, int32 val, CONST char *cptr, void *desc) +{ +if (sim_catchup_ticks) + sim_catchup_ticks = FALSE; +return SCPE_OK; +} + +t_stat sim_timer_set_catchup (UNIT *uptr, int32 val, CONST char *cptr, void *desc) +{ +if (!sim_catchup_ticks) + sim_catchup_ticks = TRUE; +return SCPE_OK; +} + +t_stat sim_timer_show_catchup (FILE *st, UNIT *uptr, int32 val, CONST void *desc) +{ +fprintf (st, "Calibrated Ticks%s", sim_catchup_ticks ? " with Catchup Ticks" : ""); +return SCPE_OK; +} + +/* Set and show idle calibration threshold */ + +t_stat sim_timer_set_idle_pct (UNIT *uptr, int32 val, CONST char *cptr, void *desc) +{ +t_stat r; +int32 newpct; + +if (cptr == NULL) + return SCPE_ARG; +newpct = (int32) get_uint (cptr, 10, 100, &r); +if ((r != SCPE_OK) || (newpct == (int32)(sim_idle_calib_pct))) + return r; +if (newpct == 0) + return SCPE_ARG; +sim_idle_calib_pct = (uint32)newpct; +return SCPE_OK; +} + +t_stat sim_timer_show_idle_pct (FILE *st, UNIT *uptr, int32 val, CONST void *desc) +{ +if (sim_idle_calib_pct == 0) + fprintf (st, "Calibration Always"); +else + fprintf (st, "Calibration Skipped when Idle exceeds %d%%", sim_idle_calib_pct); +return SCPE_OK; +} + /* Clear, Set and show asynch */ /* Clear asynch */ -t_stat sim_timer_clr_async (UNIT *uptr, int32 val, char *cptr, void *desc) +t_stat sim_timer_clr_async (UNIT *uptr, int32 val, CONST char *cptr, void *desc) { if (sim_asynch_timer) { sim_asynch_timer = FALSE; @@ -996,26 +1195,29 @@ if (sim_asynch_timer) { return SCPE_OK; } -t_stat sim_timer_set_async (UNIT *uptr, int32 val, char *cptr, void *desc) +t_stat sim_timer_set_async (UNIT *uptr, int32 val, CONST char *cptr, void *desc) { -if (!sim_asynch_timer) { +if (sim_asynch_enabled && (!sim_asynch_timer)) { sim_asynch_timer = TRUE; sim_timer_change_asynch (); } return SCPE_OK; } -t_stat sim_timer_show_async (FILE *st, UNIT *uptr, int32 val, void *desc) +t_stat sim_timer_show_async (FILE *st, UNIT *uptr, int32 val, CONST void *desc) { -fprintf (st, "%s", (sim_asynch_enabled && sim_asynch_timer) ? "Asynchronous" : "Synchronous"); +fprintf (st, "%s", sim_asynch_timer ? "Asynchronous" : "Synchronous"); return SCPE_OK; } MTAB sim_timer_mod[] = { -#if defined (SIM_ASYNCH_IO) && defined (SIM_ASYNCH_CLOCKS) - { MTAB_VDV, MTAB_VDV, "ASYNC", "ASYNC", &sim_timer_set_async, &sim_timer_show_async, NULL, "Enables/Displays Asynchronous Timer operation mode" }, - { MTAB_VDV, 0, NULL, "NOASYNC", &sim_timer_clr_async, NULL, NULL, "Disables Asynchronous Timer operation" }, +#if defined (SIM_ASYNCH_CLOCKS) + { MTAB_VDV, MTAB_VDV, "ASYNCH", "ASYNCH", &sim_timer_set_async, &sim_timer_show_async, NULL, "Enables/Displays Asynchronous Timer mode" }, + { MTAB_VDV, 0, NULL, "NOASYNCH", &sim_timer_clr_async, NULL, NULL, "Disables Asynchronous Timer operation" }, #endif + { MTAB_VDV, MTAB_VDV, "CATCHUP", "CATCHUP", &sim_timer_set_catchup, &sim_timer_show_catchup, NULL, "Enables/Displays Clock Tick catchup mode" }, + { MTAB_VDV, 0, NULL, "NOCATCHUP", &sim_timer_clr_catchup, NULL, NULL, "Disables Clock Tick catchup mode" }, + { MTAB_XTD|MTAB_VDV|MTAB_VALR, 0, "CALIB", "CALIB=nn", &sim_timer_set_idle_pct, &sim_timer_show_idle_pct, NULL, "Configure/Display Calibration Idle Suppression %" }, { 0 }, }; @@ -1023,10 +1225,13 @@ static t_stat sim_timer_clock_reset (DEVICE *dptr); DEVICE sim_timer_dev = { "TIMER", sim_timer_units, sim_timer_reg, sim_timer_mod, - SIM_NTIMERS+2, 0, 0, 0, 0, 0, + SIM_NTIMERS+1, 0, 0, 0, 0, 0, NULL, NULL, &sim_timer_clock_reset, NULL, NULL, NULL, NULL, DEV_DEBUG | DEV_NOSAVE, 0, sim_timer_debug}; +DEVICE sim_throttle_dev = { + "THROTTLE", &sim_throttle_unit, sim_throttle_reg, NULL, 1}; + /* sim_idle - idle simulator until next event or for specified interval @@ -1047,12 +1252,29 @@ static uint32 cyc_ms = 0; uint32 w_ms, w_idle, act_ms; int32 act_cyc; +if (rtc_clock_catchup_pending[tmr]) { /* Catchup clock tick pending? */ + sim_debug (DBG_CAL, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d) - accelerating pending catch-up tick before idling %s\n", tmr, sin_cyc, sim_uname (sim_clock_unit[tmr])); + sim_activate_abs (&sim_timer_units[tmr], 0); + if (sin_cyc) + sim_interval = sim_interval - 1; + return FALSE; + } if ((!sim_idle_enab) || /* idling disabled */ ((sim_clock_queue == QUEUE_LIST_END) && /* or clock queue empty? */ - (!(sim_asynch_enabled && sim_asynch_timer)))|| /* and not asynch? */ + (!sim_asynch_timer))|| /* and not asynch? */ ((sim_clock_queue != QUEUE_LIST_END) && /* or clock queue not empty */ ((sim_clock_queue->flags & UNIT_IDLE) == 0))|| /* and event not idle-able? */ (rtc_elapsed[tmr] < sim_idle_stable)) { /* or timer not stable? */ + sim_debug (DBG_IDL, &sim_timer_dev, "Can't idle: %s - elapsed: %d.%03d\n", !sim_idle_enab ? "idle disabled" : + ((rtc_elapsed[tmr] < sim_idle_stable) ? "not stable" : + ((sim_clock_queue != QUEUE_LIST_END) ? sim_uname (sim_clock_queue) : + "")), rtc_elapsed[tmr], rtc_ticks[tmr]); + if (sin_cyc) + sim_interval = sim_interval - 1; + return FALSE; + } +if (_rtcn_tick_catchup_check(tmr, 0)) { + sim_debug (DBG_CAL, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d) - rescheduling catchup tick for %s\n", tmr, sin_cyc, sim_uname (sim_clock_unit[tmr])); if (sin_cyc) sim_interval = sim_interval - 1; return FALSE; @@ -1069,10 +1291,11 @@ if ((!sim_idle_enab) || /* idling disabled */ the actual idle time, so consistent calibrated numbers produce better adjustments. - To negate this effect, we set a flag (sim_idle_idled) here and the - sim_rtcn_calb routine checks this flag before performing an actual - calibration and skips calibration if the flag was set and then clears - the flag. Thus recalibration only happens if things didn't idle. + To negate this effect, we accumulate the time actually idled here. + sim_rtcn_calb compares the accumulated idle time during the most recent + second and if it exceeds the percentage defined by and sim_idle_calib_pct + calibration is suppressed. Thus recalibration only happens if things + didn't idle too much. we also check check sim_idle_enab above so that all simulators can avoid directly checking sim_idle_enab before calling sim_idle so that all of @@ -1081,8 +1304,7 @@ if ((!sim_idle_enab) || /* idling disabled */ */ //sim_idle_idled = TRUE; /* record idle attempt */ sim_debug (DBG_TRC, &sim_timer_dev, "sim_idle(tmr=%d, sin_cyc=%d)\n", tmr, sin_cyc); -if (cyc_ms == 0) /* not computed yet? */ - cyc_ms = (rtc_currd[tmr] * rtc_hz[tmr]) / 1000; /* cycles per msec */ +cyc_ms = (rtc_currd[tmr] * rtc_hz[tmr]) / 1000; /* cycles per msec */ if ((sim_idle_rate_ms == 0) || (cyc_ms == 0)) { /* not possible? */ if (sin_cyc) sim_interval = sim_interval - 1; @@ -1090,8 +1312,11 @@ if ((sim_idle_rate_ms == 0) || (cyc_ms == 0)) { /* not possible? */ return FALSE; } w_ms = (uint32) sim_interval / cyc_ms; /* ms to wait */ -w_idle = w_ms / sim_idle_rate_ms; /* intervals to wait */ -if (w_idle == 0) { /* none? */ +if (sim_os_tick_hz < rtc_hz[tmr]) + w_idle = (w_ms * 1000); /* intervals to wait * 1000 */ +else + w_idle = (w_ms * 1000) / sim_idle_rate_ms; /* intervals to wait * 1000 */ +if (w_idle < 500) { /* shorter than 1/2 a minimum sleep? */ if (sin_cyc) sim_interval = sim_interval - 1; sim_debug (DBG_IDL, &sim_timer_dev, "no wait\n"); @@ -1102,12 +1327,14 @@ if (sim_clock_queue == QUEUE_LIST_END) else sim_debug (DBG_IDL, &sim_timer_dev, "sleeping for %d ms - pending event on %s in %d instructions\n", w_ms, sim_uname(sim_clock_queue), sim_interval); act_ms = SIM_IDLE_MS_SLEEP (w_ms); /* wait */ +rtc_clock_time_idled[tmr] += act_ms; act_cyc = act_ms * cyc_ms; if (act_ms < w_ms) /* awakened early? */ act_cyc += (cyc_ms * sim_idle_rate_ms) / 2; /* account for half an interval's worth of cycles */ if (sim_interval > act_cyc) sim_interval = sim_interval - act_cyc; /* count down sim_interval */ -else sim_interval = 0; /* or fire immediately */ +else + sim_interval = 0; /* or fire immediately */ if (sim_clock_queue == QUEUE_LIST_END) sim_debug (DBG_IDL, &sim_timer_dev, "slept for %d ms - pending event in %d instructions\n", act_ms, sim_interval); else @@ -1122,10 +1349,6 @@ t_stat sim_set_idle (UNIT *uptr, int32 val, CONST char *cptr, void *desc) t_stat r; uint32 v; -if (sim_idle_rate_ms == 0) - return sim_messagef (SCPE_NOFNC, "Idling is not available, Minimum OS sleep time is %dms\n", sim_os_sleep_min_ms); -if ((val != 0) && (sim_idle_rate_ms > (uint32) val)) - return sim_messagef (SCPE_NOFNC, "Idling is not available, Minimum OS sleep time is %dms, Requied minimum OS sleep is %dms\n", sim_os_sleep_min_ms, val); if (cptr && *cptr) { v = (uint32) get_uint (cptr, 10, SIM_IDLE_STMAX, &r); if ((r != SCPE_OK) || (v < SIM_IDLE_STMIN)) @@ -1264,12 +1487,12 @@ void sim_throt_sched (void) { sim_throt_state = 0; if (sim_throt_type) - sim_activate (&sim_timer_units[SIM_NTIMERS], SIM_THROT_WINIT); + sim_activate (&sim_throttle_unit, SIM_THROT_WINIT); } void sim_throt_cancel (void) { -sim_cancel (&sim_timer_units[SIM_NTIMERS]); +sim_cancel (&sim_throttle_unit); } /* Throttle service @@ -1369,12 +1592,141 @@ sim_activate (uptr, sim_throt_wait); /* reschedule */ return SCPE_OK; } +/* Clock assist activites */ t_stat sim_timer_tick_svc (UNIT *uptr) { +int tmr = (int)(sim_timer_units-uptr); +t_stat stat; + +rtc_clock_ticks[tmr] += 1; +rtc_calib_tick_time[tmr] += rtc_clock_tick_size[tmr]; +/* + * Some devices may depend on executing during the same instruction or + * immediately after the clock tick event. To satisfy this, we directly + * run the clock event here and if it completes successfully, schedule any + * currently coschedule units to run now. Ticks should never return a + * non-success status, while co-schedule activities might, so they are + * queued to run from sim_process_event + */ +sim_debug (DBG_QUE, &sim_timer_dev, "sim_timer_tick_svc - scheduling %s\n", sim_uname (sim_clock_unit[tmr])); +if (sim_clock_unit[tmr]->action == NULL) + return SCPE_IERR; +stat = sim_clock_unit[tmr]->action (sim_clock_unit[tmr]); +--sim_cosched_interval[tmr]; /* Countdown ticks */ +if (stat == SCPE_OK) { + if (rtc_clock_catchup_eligible[tmr]) { /* calibration started? */ + struct timespec now; + double skew; + + clock_gettime(CLOCK_REALTIME, &now); + skew = (_timespec_to_double(&now) - (rtc_calib_tick_time[tmr]+rtc_clock_catchup_base_time[tmr])); + + if (fabs(skew) > fabs(rtc_clock_skew_max[tmr])) + rtc_clock_skew_max[tmr] = skew; + } + while ((sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) && + (sim_cosched_interval[tmr] < sim_clock_cosched_queue[tmr]->time)) { + UNIT *cptr = sim_clock_cosched_queue[tmr]; + sim_clock_cosched_queue[tmr] = cptr->next; + cptr->next = NULL; + cptr->cancel = NULL; + sim_debug (DBG_QUE, &sim_timer_dev, "sim_timer_tick_svc(tmr=%d) - coactivating %s\n", tmr, sim_uname (cptr)); + _sim_activate (cptr, 0); + } + if (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) + sim_cosched_interval[tmr] = sim_clock_cosched_queue[tmr]->time; + else + sim_cosched_interval[tmr] = 0; + } +sim_timer_activate_after (uptr, 1000000/rtc_hz[tmr]); +return stat; +} + +void sim_rtcn_get_time (struct timespec *now, int tmr) +{ +sim_debug (DBG_CAL, &sim_timer_dev, "sim_rtcn_get_time(tmr=%d)\n", tmr); +clock_gettime (CLOCK_REALTIME, now); +} + +/* + * If the host system has a relatively large clock tick (as compared to + * the desired simulated hz) ticks will naturally be scheduled late and + * these delays will accumulate. The net result will be unreasonably + * slow ticks being delivered to the simulated system. + * Additionally, when a simulator is idling and/or throttling, it will + * deliberately call sim_os_ms_sleep and those sleep operations will be + * variable and subject to the host system's minimum sleep resolution + * which can exceed the desired sleep interval and add to the concept + * of slow tick delivery to the simulated system. + * We accomodate these problems and make up for lost ticks by injecting + * catch-up ticks to the simulator. + * + * We avoid excessive co-scheduled polling during these catch-up ticks + * to minimize what is likely excessive overhead, thus 'coschedule + * polling' only occurs on every fourth clock tick when processing + * catch-up ticks. + * + * When necessary, catch-up ticks are scheduled to run under one + * of two conditions: + * 1) after indicated number of instructions in a call by the simulator + * to sim_rtcn_tick_ack. sim_rtcn_tick_ack exists to provide a + * mechanism to inform the simh timer facilities when the simulated + * system has accepted the most recent clock tick interrupt. + * 2) immediately when the simulator calls sim_idle + */ + +/* _rtcn_tick_catchup_check - idle simulator until next event or for specified interval + + Inputs: + tmr = calibrated timer to check/schedule + time = instruction delay for next tick + + Returns TRUE if a catchup tick has been scheduled +*/ + +static t_bool _rtcn_tick_catchup_check (int32 tmr, int32 time) +{ +double tnow; + +if ((!sim_catchup_ticks) || + ((tmr < 0) || (tmr >= SIM_NTIMERS))) + return FALSE; +tnow = sim_timenow_double(); +if (sim_catchup_ticks && + (!rtc_clock_catchup_eligible[tmr])) { + rtc_clock_catchup_base_time[tmr] = tnow; + rtc_clock_ticks_tot[tmr] += rtc_clock_ticks[tmr]; + rtc_clock_ticks[tmr] = 0; + rtc_calib_tick_time_tot[tmr] += rtc_calib_tick_time[tmr]; + rtc_calib_tick_time[tmr] = 0.0; + rtc_clock_catchup_ticks_tot[tmr] += rtc_clock_catchup_ticks[tmr]; + rtc_clock_catchup_ticks[tmr] = 0; + rtc_calib_ticks_acked_tot[tmr] += rtc_calib_ticks_acked[tmr]; + rtc_calib_ticks_acked[tmr] = 0; + rtc_clock_catchup_eligible[tmr] = TRUE; + sim_debug (DBG_QUE, &sim_timer_dev, "_rtcn_tick_catchup_check() - Enabling catchup ticks for %s\n", sim_uname (sim_clock_unit[tmr])); + return TRUE; + } +if (rtc_clock_catchup_eligible[tmr] && + (tnow > (rtc_clock_catchup_base_time[tmr] + (rtc_calib_tick_time[tmr] + rtc_clock_tick_size[tmr])))) { + sim_debug (DBG_QUE, &sim_timer_dev, "_rtcn_tick_catchup_check(%d) - scheduling catchup tick for %s which is behind %s\n", time, sim_uname (sim_clock_unit[tmr]), sim_fmt_secs (tnow > (rtc_clock_catchup_base_time[tmr] + (rtc_calib_tick_time[tmr] + rtc_clock_tick_size[tmr])))); + rtc_clock_catchup_pending[tmr] = TRUE; + sim_activate_abs (&sim_timer_units[tmr], time); + return TRUE; + } +return FALSE; +} + +t_stat sim_rtcn_tick_ack (int32 time, int32 tmr) +{ +if ((tmr < 0) || (tmr >= SIM_NTIMERS)) + return SCPE_TIMER; +sim_debug (DBG_ACK, &sim_timer_dev, "sim_rtcn_tick_ack - for %s\n", sim_uname (sim_clock_unit[tmr])); +_rtcn_tick_catchup_check (tmr, time); +++rtc_calib_ticks_acked[tmr]; return SCPE_OK; } -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) static double _timespec_to_double (struct timespec *time) { @@ -1391,17 +1743,18 @@ double sim_timenow_double (void) { struct timespec now; -clock_gettime(CLOCK_REALTIME, &now); +clock_gettime (CLOCK_REALTIME, &now); return _timespec_to_double (&now); } +#if defined(SIM_ASYNCH_CLOCKS) + extern UNIT * volatile sim_wallclock_queue; extern UNIT * volatile sim_wallclock_entry; pthread_t sim_timer_thread; /* Wall Clock Timing Thread Id */ pthread_cond_t sim_timer_startup_cond; t_bool sim_timer_thread_running = FALSE; -t_bool sim_timer_event_canceled = FALSE; static void * _timer_thread(void *arg) @@ -1420,19 +1773,18 @@ sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - starting\n"); pthread_mutex_lock (&sim_timer_lock); pthread_cond_signal (&sim_timer_startup_cond); /* Signal we're ready to go */ -while (sim_asynch_enabled && sim_asynch_timer && sim_is_running) { +while (sim_asynch_timer && sim_is_running) { struct timespec start_time, stop_time; struct timespec due_time; double wait_usec; int32 inst_delay; double inst_per_sec; - UNIT *uptr; + UNIT *uptr, *cptr, *prvptr; if (sim_wallclock_entry) { /* something to insert in queue? */ - UNIT *cptr, *prvptr; - sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - timing %s for %d usec\n", - sim_uname(sim_wallclock_entry), sim_wallclock_entry->time); + sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - timing %s for %s\n", + sim_uname(sim_wallclock_entry), sim_fmt_secs (sim_wallclock_entry->time/1000000.0)); uptr = sim_wallclock_entry; sim_wallclock_entry = NULL; @@ -1472,7 +1824,6 @@ while (sim_asynch_enabled && sim_asynch_timer && sim_is_running) { sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - waiting for %.0f usecs until %.6f for %s\n", wait_usec, sim_wallclock_queue->a_due_time, sim_uname(sim_wallclock_queue)); if ((wait_usec <= 0.0) || (0 != pthread_cond_timedwait (&sim_timer_wake, &sim_timer_lock, &due_time))) { - int tmr; if (sim_wallclock_queue == QUEUE_LIST_END) /* queue empty? */ continue; /* wait again */ @@ -1493,26 +1844,9 @@ while (sim_asynch_enabled && sim_asynch_timer && sim_is_running) { } sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - slept %.0fms - activating(%s,%d)\n", 1000.0*(_timespec_to_double (&stop_time)-_timespec_to_double (&start_time)), sim_uname(uptr), inst_delay); - for (tmr=0; tmra_next = sim_clock_cosched_queue[tmr]; - sim_clock_cosched_queue[tmr] = QUEUE_LIST_END; - AIO_ACTIVATE_LIST(sim_activate, uptr, inst_delay); - } - else - sim_activate (uptr, inst_delay); + sim_activate (uptr, inst_delay); } else {/* Something wants to adjust the queue since the wait condition was signaled */ - if (sim_timer_event_canceled) - sim_timer_event_canceled = FALSE; /* reset flag and continue */ } } pthread_mutex_unlock (&sim_timer_lock); @@ -1522,81 +1856,83 @@ sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - exiting\n"); return NULL; } -#endif /* defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) */ +#endif /* defined(SIM_ASYNCH_CLOCKS) */ /* In the event that there are no active clock devices, no instruction rate calibration will be performed. This is more likely on simpler simulators which don't have a full spectrum of standard devices or possibly when a clock device exists but its use is optional. + + Additonally, when a host system has a natural clock tick (or minimal + sleep time) which is greater than the tick size that a simulator + wants to run a clock at, we run this clock at the rate implied by + the host system's minimal sleep time or 50Hz. - To solve this we merely run an internal clock at 50Hz. + To solve this we merely run an internal clock at 10Hz. */ -#define CLK_TPS 50 -#define CLK_INIT 20000 + +#define CLK_TPS 10 +#define CLK_INIT 100000 +static int32 sim_int_clk_tps; + static t_stat sim_timer_clock_tick_svc (UNIT *uptr) { -sim_rtcn_calb (CLK_TPS, SIM_NTIMERS-1); -sim_activate_after (uptr, 1000000/CLK_TPS); /* reactivate unit */ +sim_rtcn_calb (sim_int_clk_tps, SIM_INTERNAL_CLK); +sim_activate_after (uptr, 1000000/sim_int_clk_tps); /* reactivate unit */ return SCPE_OK; } +/* + This routine exists to assure that there is a single reliably calibrated + clock properly counting instruction execution relative to time. The best + way to assure reliable calibration is to use a clock which ticks no + faster than the host system's clock. This is optimal so that accurate + time measurements are taken. If the simulated system doesn't have a + clock with an appropriate tick rate, an internal clock is run that meets + this requirement, + */ +static void _rtcn_configure_calibrated_clock (int32 newtmr) +{ +int32 tmr; + +sim_int_clk_tps = MIN(CLK_TPS, sim_os_tick_hz); +for (tmr=0; tmra_next) { - if (cptr == sim_wallclock_queue) { /* Handle first entry */ - struct timespec now; - double due_time; - - clock_gettime(CLOCK_REALTIME, &now); - due_time = _timespec_to_double(&now) + ((double)(cptr->a_usec_delay)/1000000.0); - delta_due_time = due_time - cptr->a_due_time; - } - cptr->a_due_time += delta_due_time; - } sim_debug (DBG_TRC, &sim_timer_dev, "sim_start_timer_services() - starting\n"); pthread_cond_init (&sim_timer_startup_cond, NULL); pthread_attr_init (&attr); @@ -1613,8 +1949,33 @@ pthread_mutex_unlock (&sim_timer_lock); void sim_stop_timer_services (void) { +int tmr; + sim_debug (DBG_TRC, &sim_timer_dev, "sim_stop_timer_services()\n"); -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) + +for (tmr=0; tmr<=SIM_NTIMERS; tmr++) { + int32 accum; + + if (sim_clock_unit[tmr]) { + /* Stop clock assist unit and make sure the clock unit has a tick queued */ + sim_cancel (&sim_timer_units[tmr]); + if (rtc_hz[tmr]) + sim_activate (sim_clock_unit[tmr], rtc_currd[tmr]); + /* Move coschedule'd units to the standard event queue */ + accum = 1; + while (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) { + UNIT *cptr = sim_clock_cosched_queue[tmr]; + + sim_clock_cosched_queue[tmr] = cptr->next; + cptr->next = NULL; + cptr->cancel = NULL; + + accum += cptr->time; + _sim_activate (cptr, accum*rtc_currd[tmr]); + } + } + } +#if defined(SIM_ASYNCH_CLOCKS) pthread_mutex_lock (&sim_timer_lock); if (sim_timer_thread_running) { sim_debug (DBG_TRC, &sim_timer_dev, "sim_stop_timer_services() - stopping\n"); @@ -1622,6 +1983,26 @@ if (sim_timer_thread_running) { pthread_mutex_unlock (&sim_timer_lock); pthread_join (sim_timer_thread, NULL); sim_timer_thread_running = FALSE; + /* Any wallclock queued events are now migrated to the normal event queue */ + while (sim_wallclock_queue != QUEUE_LIST_END) { + UNIT *uptr = sim_wallclock_queue; + double d_due_delta = uptr->a_due_time - sim_timenow_double (); + int32 inst_delay; + double inst_delay_d; + + uptr->cancel (uptr); + if (d_due_delta < 0.0) + d_due_delta = 0.0; + inst_delay_d = sim_timer_inst_per_sec () * d_due_delta; + /* Bound delay to avoid overflow. */ + /* Long delays are usually canceled before they expire */ + if (inst_delay_d > (double)0x7fffffff) + inst_delay_d = (double)0x7fffffff; + inst_delay = (int32)inst_delay_d; + if ((inst_delay == 0) && (inst_delay_d != 0.0)) + inst_delay = 1; /* Minimum non-zero delay is 1 instruction */ + _sim_activate (uptr, inst_delay); /* queue it now */ + } } else pthread_mutex_unlock (&sim_timer_lock); @@ -1630,26 +2011,11 @@ else t_stat sim_timer_change_asynch (void) { -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) +#if defined(SIM_ASYNCH_CLOCKS) if (sim_asynch_enabled && sim_asynch_timer) sim_start_timer_services (); -else { - UNIT *uptr; - uint32 accum = 0; - +else sim_stop_timer_services (); - while (1) { - uptr = sim_wallclock_queue; - if (uptr == QUEUE_LIST_END) - break; - sim_wallclock_queue = uptr->a_next; - accum += uptr->time; - uptr->a_next = NULL; - uptr->a_due_time = 0; - uptr->a_usec_delay = 0; - sim_activate_after (uptr, accum); - } - } #endif return SCPE_OK; } @@ -1672,10 +2038,16 @@ return inst_per_sec; t_stat sim_timer_activate_after (UNIT *uptr, uint32 usec_delay) { -int inst_delay; +int inst_delay, tmr; double inst_delay_d, inst_per_sec; AIO_VALIDATE; +/* If this is a clock unit, we need to schedule the related timer unit instead */ +for (tmr=0; tmr (double)0x7fffffff) inst_delay = (int32)inst_delay_d; if ((inst_delay == 0) && (usec_delay != 0)) inst_delay = 1; /* Minimum non-zero delay is 1 instruction */ -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) +#if defined(SIM_ASYNCH_CLOCKS) if ((sim_calb_tmr == -1) || /* if No timer initialized */ (inst_delay < rtc_currd[sim_calb_tmr]) || /* or sooner than next clock tick? */ - (rtc_elapsed[sim_calb_tmr] < sim_idle_stable) || /* or not idle stable yet */ - (!(sim_asynch_enabled && sim_asynch_timer))) { /* or asynch disabled */ + (rtc_calibrations[sim_calb_tmr] == 0) || /* or haven't calibrated yet */ + (!sim_asynch_timer)) { /* or asynch disabled */ sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after() - activating %s after %d instructions\n", sim_uname(uptr), inst_delay); return _sim_activate (uptr, inst_delay); /* queue it now */ } if (1) { - struct timespec now; - double d_now; + double d_now = sim_timenow_double (); - clock_gettime (CLOCK_REALTIME, &now); - d_now = _timespec_to_double (&now); /* Determine if this is a clock tick like invocation - or an ocaisional measured device delay */ + or an occasional measured device delay */ if ((uptr->a_usec_delay == usec_delay) && - (uptr->a_due_time != 0.0) && - (1)) { + (uptr->a_due_time != 0.0)) { double d_delay = ((double)usec_delay)/1000000.0; uptr->a_due_time += d_delay; @@ -1718,7 +2086,7 @@ if (1) { uptr->a_skew = uptr->a_last_fired_time = 0.0; uptr->a_due_time = d_now + (double)(usec_delay)/1000000.0; } - if (uptr->a_skew > rtc_clock_skew_max[sim_calb_tmr]) + if (fabs (uptr->a_skew) > fabs (rtc_clock_skew_max[sim_calb_tmr])) rtc_clock_skew_max[sim_calb_tmr] = uptr->a_skew; } else { @@ -1740,12 +2108,19 @@ if (1) { uptr->a_due_time = d_now + (double)(usec_delay)/1000000.0; } uptr->time = usec_delay; + uptr->cancel = &_sim_wallclock_cancel; /* bind cleanup method */ + uptr->a_is_active = &_sim_wallclock_is_active; + if (tmr < SIM_NTIMERS) { /* Timer Unit? */ + sim_clock_unit[tmr]->cancel = &_sim_wallclock_cancel; + sim_clock_unit[tmr]->a_is_active = &_sim_wallclock_is_active; + } - sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after() - queue addition %s at %.6f\n", + sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after() - queue wallclock addition %s at %.6f\n", sim_uname(uptr), uptr->a_due_time); } pthread_mutex_lock (&sim_timer_lock); -while (sim_wallclock_entry) { +uptr->a_next = QUEUE_LIST_END; /* Temporarily mark as active */ +while (sim_wallclock_entry) { /* wait for any prior entry has been digested */ sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after() - queue insert entry %s busy waiting for 1ms\n", sim_uname(sim_wallclock_entry)); pthread_mutex_unlock (&sim_timer_lock); @@ -1770,9 +2145,21 @@ t_stat sim_register_clock_unit_tmr (UNIT *uptr, int32 tmr) if (NULL == sim_clock_unit[tmr]) sim_clock_cosched_queue[tmr] = QUEUE_LIST_END; sim_clock_unit[tmr] = uptr; +sim_timer_units[tmr].flags = (sim_clock_unit[tmr] ? 0 : UNIT_DIS | UNIT_IDLE); +sim_tick_units[tmr].flags = (sim_clock_unit[tmr] ? 0 : UNIT_DIS); return SCPE_OK; } +static int32 _tick_size () +{ +return (sim_calb_tmr != -1) ? rtc_currd[sim_calb_tmr] : 10000; +} + +int32 sim_rtcn_tick_size (int32 tmr) +{ +return (rtc_currd[tmr]) ? rtc_currd[tmr] : 10000; +} + t_stat sim_register_clock_unit (UNIT *uptr) { return sim_register_clock_unit_tmr (uptr, 0); @@ -1780,57 +2167,204 @@ return sim_register_clock_unit_tmr (uptr, 0); t_stat sim_clock_coschedule (UNIT *uptr, int32 interval) { -return sim_clock_coschedule_tmr (uptr, sim_calb_tmr, interval); +int32 ticks = (interval + (_tick_size ()/2))/_tick_size ();/* Convert to ticks */ + +sim_debug (DBG_QUE, &sim_timer_dev, "sim_clock_coschedule(interval=%d, ticks=%d)\n", interval, ticks); +return sim_clock_coschedule_tmr (uptr, sim_calb_tmr, ticks); } t_stat sim_clock_coschedule_abs (UNIT *uptr, int32 interval) { +int32 ticks = (interval + (_tick_size ()/2))/_tick_size ();/* Convert to ticks */ + +sim_debug (DBG_QUE, &sim_timer_dev, "sim_clock_coschedule_abs(interval=%d, ticks=%d)\n", interval, ticks); sim_cancel (uptr); -return sim_clock_coschedule_tmr (uptr, sim_calb_tmr, interval); +return sim_clock_coschedule_tmr (uptr, sim_calb_tmr, ticks); } -t_stat sim_clock_coschedule_tmr (UNIT *uptr, int32 tmr, int32 interval) +t_stat sim_clock_coschedule_tmr (UNIT *uptr, int32 tmr, int32 ticks) { -if ((tmr < 0) || (tmr >= SIM_NTIMERS) || - (NULL == sim_clock_unit[tmr])) - return sim_activate (uptr, interval); -else - if (sim_asynch_enabled && sim_asynch_timer) { - if (!sim_is_active (uptr)) { /* already active? */ -#if defined(SIM_ASYNCH_IO) && defined(SIM_ASYNCH_CLOCKS) - if ((sim_calb_tmr != -1) && - (rtc_elapsed[sim_calb_tmr ] >= sim_idle_stable)) { - sim_debug (DBG_TIM, &sim_timer_dev, "sim_clock_coschedule() - queueing %s for clock co-schedule\n", sim_uname (uptr)); - pthread_mutex_lock (&sim_timer_lock); - uptr->a_next = sim_clock_cosched_queue[tmr]; - sim_clock_cosched_queue[tmr] = uptr; - pthread_mutex_unlock (&sim_timer_lock); - return SCPE_OK; - } - else { -#else - if (1) { -#endif - int32 t; +if (ticks < 0) + return SCPE_ARG; +if (sim_is_active (uptr)) { + sim_debug (DBG_TIM, &sim_timer_dev, "sim_clock_coschedule_tmr(tmr=%d) - %s is already active\n", tmr, sim_uname (uptr)); + return SCPE_OK; + } +if (tmr == SIM_INTERNAL_CLK) + tmr = SIM_NTIMERS; +else { + if ((tmr < 0) || (tmr >= SIM_NTIMERS)) + return sim_activate (uptr, ticks * 10000); + } +if (NULL == sim_clock_unit[tmr]) + return sim_activate (uptr, ticks * (rtc_currd[tmr] ? rtc_currd[tmr] : _tick_size ())); +else { + UNIT *cptr, *prvptr; + int32 accum; - t = sim_activate_time (sim_clock_unit[tmr]); - return sim_activate (uptr, t? t - 1: interval); - } - } - sim_debug (DBG_TIM, &sim_timer_dev, "sim_clock_coschedule() - %s is already active\n", sim_uname (uptr)); - return SCPE_OK; + sim_debug (DBG_QUE, &sim_timer_dev, "sim_clock_coschedule_tmr(tmr=%d) - queueing %s for clock co-schedule (ticks=%d)\n", tmr, sim_uname (uptr), ticks); + prvptr = NULL; + accum = 0; + for (cptr = sim_clock_cosched_queue[tmr]; cptr != QUEUE_LIST_END; cptr = cptr->next) { + if (ticks < (accum + cptr->time)) + break; + accum = accum + cptr->time; + prvptr = cptr; + } + if (prvptr == NULL) { + cptr = uptr->next = sim_clock_cosched_queue[tmr]; + sim_clock_cosched_queue[tmr] = uptr; } else { - int32 t; - - t = sim_activate_time (sim_clock_unit[tmr]); - return sim_activate (uptr, t? t - 1: interval); + cptr = uptr->next = prvptr->next; + prvptr->next = uptr; } + uptr->time = ticks - accum; + if (cptr != QUEUE_LIST_END) + cptr->time = cptr->time - uptr->time; + uptr->cancel = &_sim_coschedule_cancel; /* bind cleanup method */ + sim_cosched_interval[tmr] = sim_clock_cosched_queue[tmr]->time; + } +return SCPE_OK; } -t_stat sim_clock_coschedule_tmr_abs (UNIT *uptr, int32 tmr, int32 interval) +t_stat sim_clock_coschedule_tmr_abs (UNIT *uptr, int32 tmr, int32 ticks) { sim_cancel (uptr); -return sim_clock_coschedule_tmr (uptr, tmr, interval); +return sim_clock_coschedule_tmr (uptr, tmr, ticks); } +/* Cancel a unit on the coschedule queue */ +static void _sim_coschedule_cancel (UNIT *uptr) +{ +AIO_UPDATE_QUEUE; +if (uptr->next) { /* On a queue? */ + int tmr; + + for (tmr=0; tmrnext; + uptr->next = NULL; + } + else { + UNIT *cptr; + + for (cptr = sim_clock_cosched_queue[tmr]; + (cptr != QUEUE_LIST_END); + cptr = cptr->next) + if (cptr->next == (uptr)) { + cptr->next = (uptr)->next; + uptr->next = NULL; + break; + } + } + if (uptr->next == NULL) { /* found? */ + uptr->cancel = NULL; + sim_debug (SIM_DBG_EVENT, &sim_timer_dev, "Canceled Clock Coscheduled Event for %s\n", sim_uname(uptr)); + return; + } + } + } +} + +#if defined(SIM_ASYNCH_CLOCKS) +static void _sim_wallclock_cancel (UNIT *uptr) +{ +int32 tmr; + +AIO_UPDATE_QUEUE; +pthread_mutex_lock (&sim_timer_lock); +/* If this is a clock unit, we need to cancel both this and the related timer unit */ +for (tmr=0; tmra_next) { + UNIT *cptr; + + if (uptr == sim_wallclock_entry) { /* Pending on the queue? */ + sim_wallclock_entry = NULL; + uptr->a_next = NULL; + } + else { + if (uptr == sim_wallclock_queue) { + sim_wallclock_queue = uptr->a_next; + uptr->a_next = NULL; + sim_debug (SIM_DBG_EVENT, &sim_timer_dev, "Canceling Timer Event for %s\n", sim_uname(uptr)); + pthread_cond_signal (&sim_timer_wake); + } + else { + for (cptr = sim_wallclock_queue; + (cptr != QUEUE_LIST_END); + cptr = cptr->a_next) { + if (cptr->a_next == (uptr)) { + cptr->a_next = (uptr)->a_next; + uptr->a_next = NULL; + sim_debug (SIM_DBG_EVENT, &sim_timer_dev, "Canceled Timer Event for %s\n", sim_uname(uptr)); + break; + } + } + } + } + if (uptr->a_next == NULL) { + uptr->a_due_time = uptr->a_skew = uptr->a_last_fired_time = uptr->a_usec_delay = 0; + uptr->cancel = NULL; + uptr->a_is_active = NULL; + if (tmr < SIM_NTIMERS) { /* Timer Unit? */ + sim_clock_unit[tmr]->cancel = NULL; + sim_clock_unit[tmr]->a_is_active = NULL; + } + } + } +pthread_mutex_unlock (&sim_timer_lock); +} + +int32 sim_timer_activate_time (UNIT *uptr) +{ +UNIT *cptr; +double inst_per_sec = sim_timer_inst_per_sec (); +double d_result; + +pthread_mutex_lock (&sim_timer_lock); +if (uptr == sim_wallclock_entry) { + d_result = ((uptr)->a_due_time - sim_timenow_double())*inst_per_sec; + if (d_result < 0.0) + d_result = 0.0; + if (d_result > (double)0x7FFFFFFE) + d_result = (double)0x7FFFFFFE; + pthread_mutex_unlock (&sim_timer_lock); + return ((int32)d_result) + 1; + } +for (cptr = sim_wallclock_queue; + cptr != QUEUE_LIST_END; + cptr = cptr->a_next) + if (uptr == cptr) { + d_result = ((uptr)->a_due_time - sim_timenow_double())*inst_per_sec; + if (d_result < 0.0) + d_result = 0.0; + if (d_result > (double)0x7FFFFFFE) + d_result = (double)0x7FFFFFFE; + pthread_mutex_unlock (&sim_timer_lock); + return ((int32)d_result) + 1; + } +pthread_mutex_unlock (&sim_timer_lock); +if (uptr->a_next) + return uptr->a_event_time + 1; +return -1; /* Not found. */ +} + +static t_bool _sim_wallclock_is_active (UNIT *uptr) +{ +int32 tmr; + +if (uptr->a_next) + return TRUE; +/* If this is a clock unit, we need to examine the related timer unit instead */ +for (tmr=0; tmrtmxr; +int32 ticks = (interval + (sim_rtcn_tick_size (tmr)/2))/sim_rtcn_tick_size (tmr);/* Convert to ticks */ #if defined(SIM_ASYNCH_MUX) if ((!(uptr->dynflags & UNIT_TM_POLL)) || (!sim_asynch_enabled)) { - return sim_clock_coschedule (uptr, tmr, interval); + return sim_clock_coschedule (uptr, tmr, ticks); } return SCPE_OK; #else @@ -3937,7 +3938,7 @@ if (mp) { } } sim_debug (TIMER_DBG_MUX, &sim_timer_dev, "scheduling %s after interval %d instructions\n", sim_uname (uptr), interval); -return sim_clock_coschedule_tmr (uptr, tmr, interval); +return sim_clock_coschedule_tmr (uptr, tmr, ticks); #endif }