diff --git a/CMakeLists.txt b/CMakeLists.txt index 120da3960..bcbb71e4a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -226,6 +226,9 @@ option(SIMH_PACKAGE_SUFFIX option(MAC_UNIVERSAL "macOS universal binary flag: TRUE -> build universal binaries, FALSE -> don't." ${MAC_UNIVERSAL_OPTVAL}) +option(DONT_USE_AIO_INTRINSICS + "Don't use compiler/platform intrinsics for AIO, revert to lock-based AIO" + FALSE) # Places where CMake should look for dependent package configuration fragments and artifacts: set(SIMH_PREFIX_PATH_LIST) diff --git a/README-CMake.md b/README-CMake.md index f5778fdd8..6d2d5f786 100644 --- a/README-CMake.md +++ b/README-CMake.md @@ -517,25 +517,22 @@ or video support. # List the supported command line flags: $ cmake/cmake-builder.sh --help - Configure and build simh simulators on Linux and *nix-like platforms. + ** cmake version 3.18.4 - Subdirectories: - cmake/build-unix: Makefile-based build simulators - cmake/build-ninja: Ninja build-based simulators + CMake suite maintained and supported by Kitware (kitware.com/cmake). + Configure and build simh simulators on Linux and *nix-like platforms. - Options: - -------- + Compile/Build options: + ---------------------- --clean (-x) Remove the build subdirectory --generate (-g) Generate the build environment, don't compile/build --parallel (-p) Enable build parallelism (parallel builds) - --nonetwork Build simulators without network support - --novideo Build simulators without video support --notest Do not execute 'ctest' test cases --noinstall Do not install SIMH simulators. --testonly Do not build, execute the 'ctest' test cases --installonly Do not build, install the SIMH simulators - --flavor (-f) Specifies the build flavor. Valid flavors are: + --flavor (-f) [Required] Specifies the build flavor. Valid flavors are: unix ninja xcode @@ -547,8 +544,7 @@ or video support. --config (-c) Specifies the build configuration: 'Release' or 'Debug' --target Build a specific simulator or simulators. Separate multiple - targets by separating with a comma, - e.g. "--target pdp8,pdp11,vax750,altairz80,3b2" + targets with a comma, e.g. "--target pdp8,pdp11,vax750,altairz80,3b2" --lto Enable Link Time Optimization (LTO) in Release builds --debugWall Enable maximal warnings in Debug builds --cppcheck Enable cppcheck static code analysis rules @@ -559,6 +555,17 @@ or video support. --verbose Turn on verbose build output + SIMH feature control options: + ----------------------------- + --nonetwork Build simulators without network support + --novideo Build simulators without video support + --no-aio-intrinsics + Do not use compiler/platform intrinsics to implement AIO + functions (aka "lock-free" AIO), reverts to lock-based AIO + if threading libraries are detected. + + Other options: + -------------- --help (-h) Print this help. ``` @@ -575,7 +582,7 @@ or video support. PS C:\...\open-simh> Get-Help -deatailed cmake\cmake-builder.ps1 NAME - C:\Users\bsm21317\play\open-simh\cmake\cmake-builder.ps1 + C:\...\play\open-simh\cmake\cmake-builder.ps1 SYNOPSIS Configure and build SIMH's dependencies and simulators using the Microsoft Visual @@ -583,9 +590,9 @@ or video support. SYNTAX - C:\Users\bsm21317\play\open-simh\cmake\cmake-builder.ps1 [[-flavor] ] [[-config] ] [[-cpack_suffix] ] [[-target] ] - [-clean] [-help] [-nonetwork] [-novideo] [-notest] [-noinstall] [-parallel] [-generate] [-regenerate] [-testonly] [-installOnly] [-windeprecation] - [-package] [-lto] [-debugWall] [-cppcheck] [] + C:\...\play\open-simh\cmake\cmake-builder.ps1 [[-flavor] ] [[-config] ] [[-cpack_suffix] ] [[-target] + ] [-clean] [-help] [-nonetwork] [-novideo] [-noaioinstrinsics] [-notest] [-noinstall] [-parallel] [-generate] [-testonly] + [-installOnly] [-windeprecation] [-lto] [-debugWall] [-cppcheck] [] DESCRIPTION @@ -594,9 +601,9 @@ or video support. 1. Configure and generate the build environment selected by '-flavor' option. 2. Build missing runtime dependencies and the simulator suite with the compiler - configuration selected by the '-config' option. The "Release" configuration - generates optimized executables; the "Debug" configuration generates - development executables with debugger information. + configuration selected by the '-config' option. The "Release" configuration + generates optimized executables; the "Debug" configuration generates + development executables with debugger information. 3. Test the simulators There is an install phase that can be invoked separately as part of the SIMH @@ -630,6 +637,9 @@ or video support. mingw-make MinGW GCC/mingw32-make mingw-ninja MinGW GCC/ninja + -config + The target build configuration. Valid values: "Release" and "Debug" + [...truncated for brevity...] ``` diff --git a/cmake/cmake-builder.sh b/cmake/cmake-builder.sh index fc9015f1c..b4a3fffd2 100755 --- a/cmake/cmake-builder.sh +++ b/cmake/cmake-builder.sh @@ -7,8 +7,7 @@ showHelp() cat <time = sim_interval; \ - AIO_UNLOCK; \ } \ else \ (void)0 @@ -378,43 +376,73 @@ pthread_mutex_t sim_tmxr_poll_lock = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t sim_tmxr_poll_cond = PTHREAD_COND_INITIALIZER; int32 sim_tmxr_poll_count; pthread_t sim_asynch_main_threadid; -UNIT * volatile sim_asynch_queue; +sim_unit_atomic_t sim_asynch_queue; t_bool sim_asynch_enabled = TRUE; -int32 sim_asynch_check; int32 sim_asynch_latency = 4000; /* 4 usec interrupt latency */ int32 sim_asynch_inst_latency = 20; /* assume 5 mip simulator */ +/* Debug flush mutex to serialize debug output. */ +pthread_mutex_t sim_debug_io_lock = PTHREAD_MUTEX_INITIALIZER; + +/* aio_queue_worklist: Grab the current queue and replace sim_asynch_queue with + * the empty queue. + * + * Returns the UNIT worklist to which sim_asynch_queue previously pointed. + */ +static SIM_INLINE sim_unit_atomic_t aio_queue_worklist() +{ + UNIT *q; + + do { + /* Atomically load, wash-rinse-repeat if there is thread interference */ + q = unit_ptr_load_atomic(&sim_asynch_queue); + } while (!unit_ptr_cmpxchg(&sim_asynch_queue, QUEUE_LIST_END, q)); + + return q; +} + +/* aio_enqueue_unit: Atomically add a UNIT to the sim_asynch_queue list. + */ +static SIM_INLINE void aio_enqueue_unit(UNIT *unit) +{ + UNIT *q; + + do { + q = unit_ptr_load_atomic(&sim_asynch_queue); + unit->a_next = (UNIT *) q; /* Mark as on list */ + } while (!unit_ptr_cmpxchg(&sim_asynch_queue, unit, q)); +} + int sim_aio_update_queue (void) { int migrated = 0; AIO_ILOCK; -if (AIO_QUEUE_VAL != QUEUE_LIST_END) { /* List !Empty */ - UNIT *q, *uptr; +if (!AIO_QUEUE_EMPTY()) { + sim_unit_atomic_t q; + UNIT *uptr; int32 a_event_time; - do { /* Grab current queue */ - q = AIO_QUEUE_VAL; - } while (q != AIO_QUEUE_SET(QUEUE_LIST_END, q)); - while (q != QUEUE_LIST_END) { /* List !Empty */ - sim_debug (SIM_DBG_AIO_QUEUE, &sim_scp_dev, "Migrating Asynch event for %s after %d %s\n", sim_uname(q), q->a_event_time, sim_vm_interval_units); + for (q = aio_queue_worklist(); q != QUEUE_LIST_END; /* empty */) { + uptr = unit_ptr_load_atomic(&q); + sim_debug (SIM_DBG_AIO_QUEUE, &sim_scp_dev, "Migrating Asynch event for %s after %d %s\n", + sim_uname(uptr), uptr->a_event_time, sim_vm_interval_units); ++migrated; - uptr = q; q = q->a_next; - uptr->a_next = NULL; /* hygiene */ + uptr->a_next = NULL; /* hygiene */ if (uptr->a_activate_call != &sim_activate_notbefore) { - a_event_time = uptr->a_event_time-((sim_asynch_inst_latency+1)/2); + a_event_time = uptr->a_event_time - ((sim_asynch_inst_latency + 1) / 2); if (a_event_time < 0) a_event_time = 0; } else a_event_time = uptr->a_event_time; - AIO_IUNLOCK; + uptr->a_activate_call (uptr, a_event_time); + if (uptr->a_check_completion) { sim_debug (SIM_DBG_AIO_QUEUE, &sim_scp_dev, "Calling Completion Check for asynch event on %s\n", sim_uname(uptr)); uptr->a_check_completion (uptr); } - AIO_ILOCK; } } AIO_IUNLOCK; @@ -423,22 +451,19 @@ return migrated; void sim_aio_activate (ACTIVATE_API caller, UNIT *uptr, int32 event_time) { -AIO_ILOCK; sim_debug (SIM_DBG_AIO_QUEUE, &sim_scp_dev, "Queueing Asynch event for %s after %d %s\n", sim_uname(uptr), event_time, sim_vm_interval_units); -if (uptr->a_next) { + +AIO_ILOCK; +if (NULL != uptr->a_next) { uptr->a_activate_call = sim_activate_abs; } else { - UNIT *q; uptr->a_event_time = event_time; uptr->a_activate_call = caller; - do { - q = AIO_QUEUE_VAL; - uptr->a_next = q; /* Mark as on list */ - } while (q != AIO_QUEUE_SET(uptr, q)); + aio_enqueue_unit(uptr); } AIO_IUNLOCK; -sim_asynch_check = 0; /* try to force check */ + if (sim_idle_wait) { sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d %s\n", sim_uname(uptr), event_time, sim_vm_interval_units); pthread_cond_signal (&sim_asynch_wake); @@ -2808,7 +2833,7 @@ sim_quiet = sim_switches & SWMASK ('Q'); /* -q means quiet */ sim_on_inherit = sim_switches & SWMASK ('O'); /* -o means inherit on state */ sim_init_sock (); /* init socket capabilities */ -AIO_INIT; /* init Asynch I/O */ +aio_initialization(); /* init Asynch I/O */ sim_finit (); /* init fio package */ sim_disk_init (); /* init disk package */ sim_tape_init (); /* init tape package */ @@ -2982,7 +3007,7 @@ sim_set_logoff (0, NULL); /* close log */ sim_set_notelnet (0, NULL); /* close Telnet */ vid_close_all (); /* close video */ sim_ttclose (); /* close console */ -AIO_CLEANUP; /* Asynch I/O */ +aio_termination (); /* Asynch I/O */ sim_cleanup_sock (); /* cleanup sockets */ fclose (stdnul); /* close bit bucket file handle */ free (targv); /* release any argv copy that was made */ @@ -7038,17 +7063,20 @@ sim_show_clock_queues (st, dnotused, unotused, flag, cptr); pthread_mutex_lock (&sim_asynch_lock); sim_mfile = &buf; fprintf (st, "asynchronous pending event queue\n"); -if (sim_asynch_queue == QUEUE_LIST_END) +if (AIO_QUEUE_EMPTY()) fprintf (st, " Empty\n"); else { - for (uptr = sim_asynch_queue; uptr != QUEUE_LIST_END; uptr = uptr->a_next) { - if ((dptr = find_dev_from_unit (uptr)) != NULL) { + UNIT *p_unit; + for (p_unit = unit_ptr_load_atomic(&sim_asynch_queue); + p_unit != QUEUE_LIST_END; + p_unit = unit_ptr_load_atomic(&p_unit->a_next)) { + if ((dptr = find_dev_from_unit (p_unit)) != NULL) { fprintf (st, " %s", sim_dname (dptr)); if (dptr->numunits > 1) fprintf (st, " unit %d", - (int32) (uptr - dptr->units)); + (int32) (p_unit - dptr->units)); } else fprintf (st, " Unknown"); - fprintf (st, " event delay %d\n", uptr->a_event_time); + fprintf (st, " event delay %d\n", p_unit->a_event_time); } } fprintf (st, "asynch latency: %d nanoseconds\n", sim_asynch_latency); @@ -13660,7 +13688,8 @@ if (sim_deb_switches & SWMASK ('F')) { /* filtering disabled? */ _debug_fwrite (buf, len); /* output now. */ return; /* done */ } -AIO_LOCK; + +AIO_DEBUG_IO_ACTIVE; if (debug_line_offset + len + 1 > debug_line_bufsize) { /* realloc(NULL, size) == malloc(size). Initialize the malloc()-ed space. Only need to test debug_line_buf since SIMH allocates both buffers at the same @@ -13745,7 +13774,7 @@ while (NULL != (eol = strchr (debug_line_buf, '\n')) || flush) { memmove (debug_line_buf, eol + 1, debug_line_offset); debug_line_buf[debug_line_offset] = '\0'; } -AIO_UNLOCK; +AIO_DEBUG_IO_DONE; } static void _sim_debug_write (const char *buf, size_t len) @@ -14656,15 +14685,15 @@ for (hblock = astrings; (htext = *hblock) != NULL; hblock++) { } excluded = FALSE; if (*start == '?') { /* Conditional topic? */ - size_t n = 0; + size_t m = 0; start++; while (sim_isdigit (*start)) /* Get param # */ - n += (n * 10) + (*start++ - '0'); - if (!*start || *start == '\n'|| n == 0 || n >= VSMAX) + m += (m * 10) + (*start++ - '0'); + if (!*start || *start == '\n'|| m == 0 || n >= VSMAX) FAIL (SCPE_ARG, Invalid parameter number, start); - while (n > vsnum) /* Get arg pointer if not cached */ + while (m > vsnum) /* Get arg pointer if not cached */ vstrings[vsnum++] = va_arg (ap, char *); - end = vstrings[n-1]; /* Check for True */ + end = vstrings[m-1]; /* Check for True */ if (!end || !(sim_toupper (*end) == 'T' || *end == '1')) { excluded = TRUE; /* False, skip topic this time */ if (*htext) diff --git a/sim_defs.h b/sim_defs.h index fbb32ecd1..5bb71d398 100644 --- a/sim_defs.h +++ b/sim_defs.h @@ -291,6 +291,31 @@ typedef uint32 t_addr; #define STACKBUFSIZE 2048 #endif +/* Atomic/synchronized types: + * + * For C11/C23 and beyond, use the standard's atomic type qualifier, otherwise revert + * to volatile (which is about the best that can be done.) + * + * Pointers: This is a macro wrapping around the pointer type. Type-specific pointers + * can convert to/from "void *". + * + * sim_atomic_t: The ordinary atomic/synchronized type. Microsoft nails this down to + * the LONG type. long seems reasonable for other platforms. + */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) +# define SIM_ATOMIC_PTR(TYPE) _Atomic(TYPE *) + typedef _Atomic(long) sim_atomic_t; +#else +/* No standard type, use volatile. This doesn't give the same gaurantees as the + * standard's atomic support, but it's the best that can be done. */ +# define SIM_ATOMIC_PTR(TYPE) volatile TYPE * +# if defined(_MSC_VER) + typedef LONG sim_atomic_t; +# else + typedef long sim_atomic_t; +# endif +#endif + #if defined (_WIN32) /* Actually, a GCC issue */ #define LL_TYPE long long #else @@ -491,6 +516,7 @@ typedef struct MEMFILE MEMFILE; typedef struct BITFIELD BITFIELD; typedef t_stat (*ACTIVATE_API)(UNIT *unit, int32 interval); +typedef SIM_ATOMIC_PTR(UNIT) sim_unit_atomic_t; /* Device data structure */ @@ -620,7 +646,7 @@ struct UNIT { #ifdef SIM_ASYNCH_IO void (*a_check_completion)(UNIT *); t_bool (*a_is_active)(UNIT *); - UNIT *a_next; /* next asynch active */ + sim_unit_atomic_t a_next; /* next asynch active */ int32 a_event_time; ACTIVATE_API a_activate_call; /* Asynchronous Polling control */ @@ -1150,6 +1176,38 @@ struct MEMFILE { sim_printf("%s failed at %s line %d\n", #_Expression, __FILE__, __LINE__); \ abort();} + +/*== Atomic/synchronized functions ==*/ +/* sim_ptr_cmpxchg: Wrapper function around platform-dependent atomic compare/exchange + * primitives that assigns *dest to src iff *dest == current. + * + * Returns: + * 0: *dest != current (failed, need to retry due to thread interference) + * 1: *dest == current (success) + */ +static SIM_INLINE int sim_ptr_cmpxchg(SIM_ATOMIC_PTR(void) *dest, void *src, void *current); + +/* sim_cmpxchg: Wrapper function around platform-dependent atomic compare/exchange for + * sim_atomic_t values. + * + * Returns: + * 0: *dest != current (failed, need to retry due to thread interference) + * 1: *dest == current (success) + */ +static SIM_INLINE int sim_cmpxchg(sim_atomic_t *dest, sim_atomic_t src, sim_atomic_t current); + +/* sim_ptr_load_atomic: Wrapper function around platform-dependent atomic load + * instrinsics for pointers. + */ +static SIM_INLINE void *sim_ptr_load_atomic(SIM_ATOMIC_PTR(void) *src); + +/* sim_load_atomic: Wrapper function around platform-dependent atomic load + * instrinsics for sim_atomic_t values. + */ +static SIM_INLINE sim_atomic_t sim_load_atomic(sim_atomic_t *src); + +static SIM_INLINE void sim_store_atomic(sim_atomic_t *dest, sim_atomic_t src); + /* Asynch/Threaded I/O support */ #if defined (SIM_ASYNCH_IO) @@ -1166,12 +1224,16 @@ extern int32 sim_tmxr_poll_count; extern pthread_cond_t sim_tmxr_poll_cond; extern pthread_mutex_t sim_tmxr_poll_lock; extern pthread_t sim_asynch_main_threadid; -extern UNIT * volatile sim_asynch_queue; +extern sim_unit_atomic_t sim_asynch_queue; extern volatile t_bool sim_idle_wait; -extern int32 sim_asynch_check; extern int32 sim_asynch_latency; extern int32 sim_asynch_inst_latency; +/* Debug I/O serialization lock (sim_asynch_lock was used for debug I/O + * serialization, now separate for semantic clarity. Not performance + * critical. */ +extern pthread_mutex_t sim_debug_io_lock; + /* Thread local storage */ #if defined(thread_local) #define AIO_TLS thread_local @@ -1186,6 +1248,7 @@ extern int32 sim_asynch_inst_latency; /* It is primarily used only used in debugging messages */ #define AIO_TLS #endif + #define AIO_QUEUE_CHECK(que, lock) \ do { \ UNIT *_cptr; \ @@ -1194,7 +1257,7 @@ extern int32 sim_asynch_inst_latency; for (_cptr = que; \ (_cptr != QUEUE_LIST_END); \ _cptr = _cptr->next) \ - if (!_cptr->next) { \ + if (_cptr->next == NULL) { \ if (sim_deb) { \ sim_debug (SIM_DBG_EVENT, sim_dflt_dev, "Queue Corruption detected\n");\ fclose(sim_deb); \ @@ -1206,12 +1269,15 @@ extern int32 sim_asynch_inst_latency; if (lock) \ pthread_mutex_unlock (lock); \ } while (0) -#define AIO_MAIN_THREAD (pthread_equal ( pthread_self(), sim_asynch_main_threadid )) -#define AIO_LOCK \ - pthread_mutex_lock(&sim_asynch_lock) -#define AIO_UNLOCK \ - pthread_mutex_unlock(&sim_asynch_lock) + +#define AIO_MAIN_THREAD (pthread_equal ( pthread_self(), sim_asynch_main_threadid )) +#define AIO_LOCK pthread_mutex_lock(&sim_asynch_lock) +#define AIO_UNLOCK pthread_mutex_unlock(&sim_asynch_lock) +#define AIO_DEBUG_IO_ACTIVE pthread_mutex_lock(&sim_debug_io_lock) +#define AIO_DEBUG_IO_DONE pthread_mutex_unlock(&sim_debug_io_lock) + #define AIO_IS_ACTIVE(uptr) (((uptr)->a_is_active ? (uptr)->a_is_active (uptr) : FALSE) || ((uptr)->a_next)) + #if defined(SIM_ASYNCH_MUX) #define AIO_CANCEL(uptr) \ if (((uptr)->dynflags & UNIT_TM_POLL) && \ @@ -1224,9 +1290,11 @@ extern int32 sim_asynch_inst_latency; #if !defined(AIO_CANCEL) #define AIO_CANCEL(uptr) #endif /* !defined(AIO_CANCEL) */ + #define AIO_EVENT_BEGIN(uptr) \ do { \ int __was_poll = uptr->dynflags & UNIT_TM_POLL + #define AIO_EVENT_COMPLETE(uptr, reason) \ if (__was_poll) { \ pthread_mutex_lock (&sim_tmxr_poll_lock); \ @@ -1242,154 +1310,337 @@ extern int32 sim_asynch_inst_latency; AIO_UPDATE_QUEUE; \ } while (0) -#if defined(__DECC_VER) -#include -#if defined(__IA64) -#define USE_AIO_INTRINSICS 1 -#endif -#endif -#if defined(_WIN32) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) -#define USE_AIO_INTRINSICS 1 -#endif -/* Provide a way to test both Intrinsic and Lock based queue manipulations */ -/* when both are available on a particular platform */ -#if defined(DONT_USE_AIO_INTRINSICS) && defined(USE_AIO_INTRINSICS) -#undef USE_AIO_INTRINSICS -#endif -#ifdef USE_AIO_INTRINSICS -/* This approach uses intrinsics to manage access to the link list head */ -/* sim_asynch_queue. This implementation is a completely lock free design */ -/* which avoids the potential ABA issues. */ -#define AIO_QUEUE_MODE "Lock free asynchronous event queue" -#define AIO_INIT \ - do { \ - sim_asynch_main_threadid = pthread_self(); \ - /* Empty list/list end uses the point value (void *)1. \ - This allows NULL in an entry's a_next pointer to \ - indicate that the entry is not currently in any list */ \ - sim_asynch_queue = QUEUE_LIST_END; \ - } while (0) -#define AIO_CLEANUP \ - do { \ - pthread_mutex_destroy(&sim_asynch_lock); \ - pthread_cond_destroy(&sim_asynch_wake); \ - pthread_mutex_destroy(&sim_timer_lock); \ - pthread_cond_destroy(&sim_timer_wake); \ - pthread_mutex_destroy(&sim_tmxr_poll_lock); \ - pthread_cond_destroy(&sim_tmxr_poll_cond); \ - } while (0) -#ifdef _WIN32 -#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) -#define InterlockedCompareExchangePointer(Destination, Exchange, Comparand) __sync_val_compare_and_swap(Destination, Comparand, Exchange) -#elif defined(__DECC_VER) -#define InterlockedCompareExchangePointer(Destination, Exchange, Comparand) (void *)((int32)_InterlockedCompareExchange64(Destination, Exchange, Comparand)) +/* Interior AIO locking: */ +#if !defined(DONT_USE_AIO_INTRINSICS) && \ + (defined(_WIN32) || \ + (defined(__ATOMIC_ACQ_REL) && defined(__ATOMIC_SEQ_CST) && defined(__ATOMIC_ACQUIRE)) || \ + (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) || \ + (defined(__DECC_VER) && defined(_IA64))) +/* Atomic compare/exchange exists and is sufficient to manage the AIO queue across + * threads, no extra mutex locking needed. */ +# define AIO_ILOCK +# define AIO_IUNLOCK + +# define AIO_QUEUE_MODE "Thread-based asynchronous event queue with intrinsics" +# define AIO_MUTEX_ONLY 0 #else -#error "Implementation of function InterlockedCompareExchangePointer() is needed to build with USE_AIO_INTRINSICS" +/* Acquire the sim_asynch_lock mutex to ensure exclusion when manipulating + * sim_asynch_queue. */ +# define AIO_ILOCK AIO_LOCK +# define AIO_IUNLOCK AIO_UNLOCK + +# define AIO_QUEUE_MODE "Thread-based asynchronous event queue, mutex-only" +# define AIO_MUTEX_ONLY 1 #endif -#define AIO_ILOCK AIO_LOCK -#define AIO_IUNLOCK AIO_UNLOCK -#define AIO_QUEUE_VAL (UNIT *)(InterlockedCompareExchangePointer((void * volatile *)&sim_asynch_queue, (void *)sim_asynch_queue, NULL)) -#define AIO_QUEUE_SET(newval, oldval) (UNIT *)(InterlockedCompareExchangePointer((void * volatile *)&sim_asynch_queue, (void *)newval, oldval)) -#define AIO_UPDATE_QUEUE sim_aio_update_queue () -#define AIO_ACTIVATE(caller, uptr, event_time) \ - if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \ - sim_aio_activate ((ACTIVATE_API)caller, uptr, event_time); \ - return SCPE_OK; \ - } else (void)0 -#else /* !USE_AIO_INTRINSICS */ -/* This approach uses a pthread mutex to manage access to the link list */ -/* head sim_asynch_queue. It will always work, but may be slower than the */ -/* lock free approach when using USE_AIO_INTRINSICS */ -#define AIO_QUEUE_MODE "Lock based asynchronous event queue" -#define AIO_INIT \ - do { \ - pthread_mutexattr_t attr; \ - \ - pthread_mutexattr_init (&attr); \ - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); \ - pthread_mutex_init (&sim_asynch_lock, &attr); \ - pthread_mutexattr_destroy (&attr); \ - sim_asynch_main_threadid = pthread_self(); \ - /* Empty list/list end uses the point value (void *)1. \ - This allows NULL in an entry's a_next pointer to \ - indicate that the entry is not currently in any list */ \ - sim_asynch_queue = QUEUE_LIST_END; \ - } while (0) -#define AIO_CLEANUP \ - do { \ - pthread_mutex_destroy(&sim_asynch_lock); \ - pthread_cond_destroy(&sim_asynch_wake); \ - pthread_mutex_destroy(&sim_timer_lock); \ - pthread_cond_destroy(&sim_timer_wake); \ - pthread_mutex_destroy(&sim_tmxr_poll_lock); \ - pthread_cond_destroy(&sim_tmxr_poll_cond); \ - } while (0) -#define AIO_ILOCK AIO_LOCK -#define AIO_IUNLOCK AIO_UNLOCK -#define AIO_QUEUE_VAL sim_asynch_queue -#define AIO_QUEUE_SET(newval, oldval) ((sim_asynch_queue = newval),oldval) + #define AIO_UPDATE_QUEUE sim_aio_update_queue () -#define AIO_ACTIVATE(caller, uptr, event_time) \ - if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \ - sim_debug (SIM_DBG_AIO_QUEUE, sim_dflt_dev, "Queueing Asynch event for %s after %d instructions\n", sim_uname(uptr), event_time);\ - AIO_LOCK; \ - if (uptr->a_next) { /* already queued? */ \ - uptr->a_activate_call = sim_activate_abs; \ - } else { \ - uptr->a_next = sim_asynch_queue; \ - uptr->a_event_time = event_time; \ - uptr->a_activate_call = (ACTIVATE_API)&caller; \ - sim_asynch_queue = uptr; \ - } \ - if (sim_idle_wait) { \ - if (sim_deb) { /* only while debug do lock/unlock overhead */ \ - AIO_UNLOCK; \ - sim_debug (TIMER_DBG_IDLE, &sim_timer_dev, "waking due to event on %s after %d instructions\n", sim_uname(uptr), event_time);\ - AIO_LOCK; \ - } \ - pthread_cond_signal (&sim_asynch_wake); \ - } \ - AIO_UNLOCK; \ - sim_asynch_check = 0; \ - return SCPE_OK; \ - } else (void)0 -#endif /* USE_AIO_INTRINSICS */ -#define AIO_VALIDATE(uptr) \ - if (!pthread_equal ( pthread_self(), sim_asynch_main_threadid )) { \ - sim_printf("Improper thread context for operation on %s in %s line %d\n", \ - sim_uname(uptr), __FILE__, __LINE__); \ - abort(); \ - } else (void)0 -#define AIO_CHECK_EVENT \ - if (0 > --sim_asynch_check) { \ - AIO_UPDATE_QUEUE; \ - sim_asynch_check = sim_asynch_inst_latency; \ - } else (void)0 -#define AIO_SET_INTERRUPT_LATENCY(instpersec) \ - do { \ - sim_asynch_inst_latency = (int32)((((double)(instpersec))*sim_asynch_latency)/1000000000);\ - if (sim_asynch_inst_latency == 0) \ - sim_asynch_inst_latency = 1; \ - } while (0) + +/* AIO_ACTIVATE has to be a macro so that the enclosing function returns + * when not within the main thread's context. + * + * Somewhat questionable style because it's not obvious that this will cause + * the enclosing function to return early when not in the simulator's main + * thread. + */ +#define AIO_ACTIVATE(caller, uptr, event_time) \ + if (!AIO_MAIN_THREAD) { \ + return sim_do_aio_activate((caller), (uptr), (event_time)); \ + } \ + +/* AIO_CHECK_EVENT remains a macro to minimize code disruption. If it were + * an inline function, would need to go through the simulators and add + * parentheses so that it looks like a function invocation. + */ +#define AIO_CHECK_EVENT sim_do_check_event () + +/*=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~*/ +/* Inline code hair: */ +/*=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~*/ + +static SIM_INLINE t_stat sim_do_aio_activate(ACTIVATE_API caller, UNIT *uptr, int32 event_time) +{ + AIO_LOCK; + sim_aio_activate (caller, uptr, event_time); + AIO_UNLOCK; + return SCPE_OK; +} + +static SIM_INLINE void AIO_VALIDATE(UNIT *uptr) +{ + if (!AIO_MAIN_THREAD) { + sim_printf("Improper thread context for operation on %s in %s line %d\n", + sim_uname(uptr), __FILE__, __LINE__); + abort(); + } +} + +/*=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~*/ +/* AIO_QUEUE_EMPTY: Return true (1) if sim_asynch_queue is empty (points to + * QUEUE_LIST_END). + */ +static SIM_INLINE int AIO_QUEUE_EMPTY() +{ + return (sim_ptr_load_atomic((volatile void **) &sim_asynch_queue) == QUEUE_LIST_END); +} + +/*=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~*/ +/* AIO_CHECK_EVENT: Check the event queue for work and process work items if + * necessary. + */ +static SIM_INLINE void sim_do_check_event() +{ + AIO_ILOCK; + if (!AIO_QUEUE_EMPTY()) { + AIO_UPDATE_QUEUE; + } + AIO_IUNLOCK; +} + #else /* !SIM_ASYNCH_IO */ #define AIO_QUEUE_MODE "Asynchronous I/O is not available" #define AIO_UPDATE_QUEUE #define AIO_ACTIVATE(caller, uptr, event_time) #define AIO_VALIDATE(uptr) #define AIO_CHECK_EVENT -#define AIO_INIT #define AIO_MAIN_THREAD TRUE #define AIO_LOCK #define AIO_UNLOCK -#define AIO_CLEANUP +#define AIO_DEBUG_IO_ACTIVE +#define AIO_DEBUG_IO_DONE #define AIO_EVENT_BEGIN(uptr) #define AIO_EVENT_COMPLETE(uptr, reason) #define AIO_IS_ACTIVE(uptr) FALSE #define AIO_CANCEL(uptr) -#define AIO_SET_INTERRUPT_LATENCY(instpersec) #define AIO_TLS #endif /* SIM_ASYNCH_IO */ +/*=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ + * Inline code hair for functions common across both SIM_ASYNCH_IO + * implementations: + *=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~*/ +/* AIO initialization: + * (Note: Could be moved to scp.c, which is the only place this function + * is referenced.) */ +static SIM_INLINE void aio_initialization() +{ +#if defined(SIM_ASYNCH_IO) + pthread_mutexattr_t attr; + + pthread_mutexattr_init (&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init (&sim_asynch_lock, &attr); + pthread_mutexattr_destroy (&attr); + sim_asynch_main_threadid = pthread_self(); + /* Empty list/list end uses the point value (void *)1. + This allows NULL in an entry's a_next pointer to + indicate that the entry is not currently in any list */ + sim_asynch_queue = QUEUE_LIST_END; +#endif +} + +/*=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ + * AIO cleanup: + * (Note: Could be moved to scp.c, which is the only place this function + * is referenced.) */ +static SIM_INLINE void aio_termination() +{ +#if defined(SIM_ASYNCH_IO) + pthread_mutex_destroy(&sim_asynch_lock); + pthread_cond_destroy(&sim_asynch_wake); + pthread_mutex_destroy(&sim_timer_lock); + pthread_cond_destroy(&sim_timer_wake); + pthread_mutex_destroy(&sim_tmxr_poll_lock); + pthread_cond_destroy(&sim_tmxr_poll_cond); + pthread_mutex_destroy(&sim_debug_io_lock); +#endif +} + +/*=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ + * Interrupt latency delay update: + */ +static SIM_INLINE void AIO_SET_INTERRUPT_LATENCY(uint32 instpersec) +{ +#if defined(SIM_ASYNCH_IO) + static const double NSEC_PER_SEC = 1.0e9; + double inst_latency = ((double) instpersec * (double) sim_asynch_latency) / NSEC_PER_SEC; + + sim_asynch_latency = (inst_latency >= 1.0 ? ((int32) inst_latency) : 1); +#endif +} + +/*=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ + * Atomic primitives. Unfortunately, these have to be type-specific which + * makes the code repetitive. + *=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~*/ + +static SIM_INLINE int sim_ptr_cmpxchg(SIM_ATOMIC_PTR(void) *dest, void *src, void *current) +{ +#if defined(SIM_ASYNCH_IO) && !AIO_MUTEX_ONLY +# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) + /**/ C11/C23 atomics: */ + void *temp = current; + + return atomic_compare_exchange_strong((_Atomic(void*) *) dest, &temp, src); +# elif defined(__GNUC__) +# if defined(__has_builtin) +# if __has_builtin(__atomic_compare_exchange) + void *temp = current; + + return __atomic_compare_exchange(dest, &temp, &src, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +# else + void *temp = current; + + return __sync_bool_compare_and_swap(dest, temp, src); +# endif +# else + void *temp = current; + + return __atomic_compare_exchange(dest, &temp, &src, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +# endif +# elif defined(_MSC_VER) + return (InterlockedCompareExchangePointer((volatile PVOID *) dest, src, current) == current); +# elif defined(__DECC_VER) + return (_InterlockedCompareExchange64(dest, src, current) == current); +# endif +#else + /* Fallback if not using asynchronous I/O or mutex-only version. */ + if (*dest == current) { + *dest = src; + return 1; + } else + return 0; +#endif +} + +static SIM_INLINE void *sim_ptr_load_atomic(SIM_ATOMIC_PTR(void) *src) +{ +#if defined(SIM_ASYNCH_IO) && !AIO_MUTEX_ONLY +# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) + return atomic_load((_Atomic(void *) *) src); +# elif defined(__GNUC__) +# if defined(__has_builtin) +# if __has_builtin(__atomic_load_n) + /* GCC/Clang: use __atomic_load_n for an atomic read with seq_cst ordering */ + return ((void *) __atomic_load_n(src, __ATOMIC_SEQ_CST)); +# else + /* Fallback. Technically, should never encounter this branch. */ + return __sync_val_compare_and_swap(src, NULL, NULL); +# endif +# else + return __sync_val_compare_and_swap(src, NULL, NULL); +# endif +# elif defined(_WIN32) + return InterlockedCompareExchangePointer((volatile PVOID *) src, NULL, NULL); +# elif defined(__DECC_VER) + return _InterlockedCompareExchange64(src, NULL, NULL); +# endif +#else + /* No asynchronous I/O or mutex-only. */ + return ((void *) *src); +#endif +} + +static SIM_INLINE int sim_cmpxchg(sim_atomic_t *dest, sim_atomic_t src, sim_atomic_t current) +{ +#if defined(SIM_ASYNCH_IO) && !AIO_MUTEX_ONLY +# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) + /**/ C11/C23 atomics: */ + return atomic_compare_exchange_strong(dest, ¤t, src); +# elif defined(__GNUC__) +# if defined(__has_builtin) +# if __has_builtin(__atomic_compare_exchange) + return __atomic_compare_exchange(dest, ¤t, &src, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +# else + return __sync_bool_compare_and_swap(dest, current, src); +# endif +# else + return __atomic_compare_exchange(dest, ¤t, &src, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); +# endif +# elif defined(_MSC_VER) + return (InterlockedCompareExchange(dest, src, current) == current); +# elif defined(__DECC_VER) + return (_InterlockedCompareExchange(dest, src, current) == current); +# endif +#else + /* Fallback if not using asynchronous I/O or mutex-only version. */ + if (*dest == current) { + *dest = src; + return 1; + } else + return 0; +#endif +} + +static SIM_INLINE sim_atomic_t sim_load_atomic(sim_atomic_t *src) +{ +#if defined(SIM_ASYNCH_IO) && !AIO_MUTEX_ONLY +# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) + return atomic_load(src); +# elif defined(__GNUC__) +# if defined(__has_builtin) +# if __has_builtin(__atomic_load_n) + /* GCC/Clang: use __atomic_load_n for an atomic read with seq_cst ordering */ + return __atomic_load_n(src, __ATOMIC_SEQ_CST); +# else + /* Fallback. Technically, should never encounter this branch. */ + return __sync_val_compare_and_swap(src, 0, 0); +# endif +# else + return __sync_val_compare_and_swap(src, 0, 0); +# endif +# elif defined(_WIN32) + return InterlockedCompareExchange(src, 0, 0); +# elif defined(__DECC_VER) + return _InterlockedCompareExchange64(src, 0, 0); +# endif +#else + /* No asynchronous I/O or mutex-only. */ + return *src; +#endif +} + +static SIM_INLINE void sim_store_atomic(sim_atomic_t *dest, sim_atomic_t src) +{ +#if defined(SIM_ASYNCH_IO) && !AIO_MUTEX_ONLY +# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) + atomic_store(dest, src); +# elif defined(__GNUC__) +# if defined(__has_builtin) +# if __has_builtin(__atomic_load_n) + /* GCC/Clang: use __atomic_load_n for an atomic read with seq_cst ordering */ + __atomic_store_n(dest, src, __ATOMIC_SEQ_CST); +# else + /* Fallback. Technically, should never encounter this branch. */ + (void)__sync_lock_test_and_set(dest, src); +# endif +# else + (void)__sync_lock_test_and_set(dest, src); +# endif +# elif defined(_WIN32) + InterlockedExchange(dest, src); +# elif defined(__DECC_VER) + _InterlockedExchange(dest, src); +# endif +#else + /* No asynchronous I/O or mutex-only. */ + *dest = src; +#endif +} + +/*=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ + * Type-specific atomic inlines. + *=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~*/ + +static SIM_INLINE UNIT *unit_ptr_load_atomic(sim_unit_atomic_t *unit) +{ + return ((UNIT *) sim_ptr_load_atomic((SIM_ATOMIC_PTR(void) *) unit)); +} + +static SIM_INLINE int unit_ptr_cmpxchg(sim_unit_atomic_t *dest, UNIT *src, UNIT *current) +{ + return sim_ptr_cmpxchg((SIM_ATOMIC_PTR(void) *) dest, (void *) src, (void *) current); +} + #ifdef __cplusplus } #endif diff --git a/sim_timer.c b/sim_timer.c index 333d26a97..473c0bc59 100644 --- a/sim_timer.c +++ b/sim_timer.c @@ -247,8 +247,8 @@ t_bool sim_asynch_timer = FALSE; #if defined (SIM_ASYNCH_CLOCKS) -UNIT * volatile sim_wallclock_queue = QUEUE_LIST_END; -UNIT * volatile sim_wallclock_entry = NULL; +sim_unit_atomic_t sim_wallclock_queue = QUEUE_LIST_END; +sim_unit_atomic_t sim_wallclock_entry = NULL; #endif #define sleep1Samples 100 @@ -311,8 +311,6 @@ pthread_mutex_lock (&sim_asynch_lock); sim_idle_wait = TRUE; if (pthread_cond_timedwait (&sim_asynch_wake, &sim_asynch_lock, &end_time)) timedout = TRUE; -else - sim_asynch_check = 0; /* force check of asynch queue now */ sim_idle_wait = FALSE; pthread_mutex_unlock (&sim_asynch_lock); clock_gettime(CLOCK_REALTIME, &done_time); @@ -1254,7 +1252,9 @@ if (sim_asynch_timer) { fprintf (st, "%s wall clock event queue empty\n", sim_name); else { fprintf (st, "%s wall clock event queue status\n", sim_name); - for (uptr = sim_wallclock_queue; uptr != QUEUE_LIST_END; uptr = uptr->a_next) { + for (uptr = unit_ptr_load_atomic(&sim_wallclock_queue); + uptr != QUEUE_LIST_END; + uptr = unit_ptr_load_atomic(&uptr->a_next)) { if ((dptr = find_dev_from_unit (uptr)) != NULL) { fprintf (st, " %s", sim_dname (dptr)); if (dptr->numunits > 1) @@ -2312,28 +2312,29 @@ while (sim_asynch_timer && sim_is_running) { double wait_usec; int32 inst_delay; double inst_per_sec; - UNIT *uptr, *cptr, *prvptr; + UNIT *uptr, *prvptr; + sim_unit_atomic_t cptr; if (sim_wallclock_entry) { /* something to insert in queue? */ sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - timing %s for %s\n", - sim_uname(sim_wallclock_entry), sim_fmt_secs (sim_wallclock_entry->a_usec_delay/1000000.0)); + sim_uname((UNIT *) sim_wallclock_entry), sim_fmt_secs (sim_wallclock_entry->a_usec_delay/1000000.0)); - uptr = sim_wallclock_entry; + uptr = unit_ptr_load_atomic(&sim_wallclock_entry); sim_wallclock_entry = NULL; prvptr = NULL; for (cptr = sim_wallclock_queue; cptr != QUEUE_LIST_END; cptr = cptr->a_next) { if (uptr->a_due_time < cptr->a_due_time) break; - prvptr = cptr; + prvptr = unit_ptr_load_atomic(&cptr); } if (prvptr == NULL) { /* insert at head */ cptr = uptr->a_next = sim_wallclock_queue; sim_wallclock_queue = uptr; } else { - cptr = uptr->a_next = prvptr->a_next; /* insert at prvptr */ + cptr = uptr->a_next = unit_ptr_load_atomic(&prvptr->a_next); /* insert at prvptr */ prvptr->a_next = uptr; } } @@ -2354,7 +2355,8 @@ while (sim_asynch_timer && sim_is_running) { if (sim_wallclock_queue == QUEUE_LIST_END) sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - waiting forever\n"); else - sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - waiting for %.0f usecs until %.6f for %s\n", wait_usec, sim_wallclock_queue->a_due_time, sim_uname(sim_wallclock_queue)); + sim_debug (DBG_TIM, &sim_timer_dev, "_timer_thread() - waiting for %.0f usecs until %.6f for %s\n", + wait_usec, sim_wallclock_queue->a_due_time, sim_uname(unit_ptr_load_atomic(&sim_wallclock_queue))); if ((wait_usec <= 0.0) || (0 != pthread_cond_timedwait (&sim_timer_wake, &sim_timer_lock, &due_time))) { @@ -2362,7 +2364,7 @@ while (sim_asynch_timer && sim_is_running) { continue; /* wait again */ inst_per_sec = sim_timer_inst_per_sec (); - uptr = sim_wallclock_queue; + uptr = unit_ptr_load_atomic(&sim_wallclock_queue); sim_wallclock_queue = uptr->a_next; uptr->a_next = NULL; /* hygiene */ @@ -2651,7 +2653,7 @@ if (sim_timer_thread_running) { pthread_join (sim_timer_thread, NULL); /* Any wallclock queued events are now migrated to the normal event queue */ while (sim_wallclock_queue != QUEUE_LIST_END) { - UNIT *uptr = sim_wallclock_queue; + UNIT *uptr = unit_ptr_load_atomic(&sim_wallclock_queue); double inst_delay_d = uptr->a_due_gtime - sim_gtime (); int32 inst_delay; @@ -2826,7 +2828,9 @@ if ((sim_asynch_timer) && sim_uname(uptr), usec_delay, uptr->a_due_time); pthread_mutex_lock (&sim_timer_lock); - for (cptr = sim_wallclock_queue, prvptr = NULL; cptr != QUEUE_LIST_END; cptr = cptr->a_next) { + for (cptr = unit_ptr_load_atomic(&sim_wallclock_queue), prvptr = NULL; + cptr != QUEUE_LIST_END; + cptr = unit_ptr_load_atomic(&cptr->a_next)) { if (uptr->a_due_time < cptr->a_due_time) break; prvptr = cptr; @@ -2836,7 +2840,7 @@ if ((sim_asynch_timer) && if (sim_timer_thread_running) { while (sim_wallclock_entry) { /* wait for any prior entry has been digested */ sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after(%s, %.0f usecs) - queue insert entry %s busy waiting for 1ms\n", - sim_uname(uptr), usec_delay, sim_uname(sim_wallclock_entry)); + sim_uname(uptr), usec_delay, sim_uname((UNIT *) sim_wallclock_entry)); pthread_mutex_unlock (&sim_timer_lock); sim_os_ms_sleep (1); pthread_mutex_lock (&sim_timer_lock); @@ -2880,12 +2884,12 @@ if (NULL == uptr) { /* deregistering? */ /* Migrate any coscheduled devices to the standard queue */ /* they will fire and subsequently requeue themselves */ while (rtc->clock_cosched_queue != QUEUE_LIST_END) { - UNIT *uptr = rtc->clock_cosched_queue; - double usecs_remaining = sim_timer_activate_time_usecs (uptr); + UNIT *rtc_unit = rtc->clock_cosched_queue; + double usecs_remaining = sim_timer_activate_time_usecs (rtc_unit); - _sim_coschedule_cancel (uptr); - _sim_activate (uptr, 1); - uptr->usecs_remaining = usecs_remaining; + _sim_coschedule_cancel (rtc_unit); + _sim_activate (rtc_unit, 1); + rtc_unit->usecs_remaining = usecs_remaining; } if (rtc->clock_unit) { sim_cancel (rtc->clock_unit); @@ -3106,9 +3110,9 @@ if (uptr->a_next) { pthread_cond_signal (&sim_timer_wake); } else { - for (cptr = sim_wallclock_queue; + for (cptr = unit_ptr_load_atomic(&sim_wallclock_queue); (cptr != QUEUE_LIST_END); - cptr = cptr->a_next) { + cptr = unit_ptr_load_atomic(&cptr->a_next)) { if (cptr->a_next == (uptr)) { cptr->a_next = (uptr)->a_next; uptr->a_next = NULL; @@ -3171,9 +3175,9 @@ if (uptr->a_is_active == &_sim_wallclock_is_active) { pthread_mutex_unlock (&sim_timer_lock); return ((int32)d_result) + 1; } - for (cptr = sim_wallclock_queue; + for (cptr = unit_ptr_load_atomic(&sim_wallclock_queue); cptr != QUEUE_LIST_END; - cptr = cptr->a_next) + cptr = unit_ptr_load_atomic(&cptr->a_next)) if (uptr == cptr) { d_result = uptr->a_due_gtime - sim_gtime (); if (d_result < 0.0) @@ -3247,9 +3251,9 @@ if (uptr->a_is_active == &_sim_wallclock_is_active) { sim_debug (DBG_QUE, &sim_timer_dev, "sim_timer_activate_time_usecs(%s) wallclock_entry - %.0f usecs, inst_per_sec=%.0f\n", sim_uname (uptr), result, sim_timer_inst_per_sec ()); return result; } - for (cptr = sim_wallclock_queue; + for (cptr = unit_ptr_load_atomic(&sim_wallclock_queue); cptr != QUEUE_LIST_END; - cptr = cptr->a_next) + cptr = unit_ptr_load_atomic(&cptr->a_next)) if (uptr == cptr) { result = uptr->a_due_gtime - sim_gtime (); if (result < 0.0)