Merge libdispatch-743

Signed-off-by: Daniel A. Steffen <dsteffen@apple.com>
diff --git a/.gitmodules b/.gitmodules
index 009b5fb..e6068b4 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,6 +1,3 @@
 [submodule "libpwq"]
 	path = libpwq
 	url = https://github.com/mheily/libpwq.git
-[submodule "libkqueue"]
-	path = libkqueue
-	url = https://github.com/mheily/libkqueue.git
diff --git a/INSTALL.md b/INSTALL.md
index fd999e7..410aefa 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -159,7 +159,7 @@
     `sudo apt-get install systemtap-sdt-dev`
 
  1c. Install additional libdispatch dependencies
-    `sudo apt-get install libblocksruntime-dev libkqueue-dev libbsd-dev`
+    `sudo apt-get install libblocksruntime-dev libbsd-dev`
 
     Note: compiling libdispatch requires clang 3.8 or better and
 the gold linker. If the default clang on your Ubuntu version is
diff --git a/config/config.h b/config/config.h
index ca3a1db..2e3cb7e 100644
--- a/config/config.h
+++ b/config/config.h
@@ -5,10 +5,18 @@
    you don't. */
 #define HAVE_DECL_CLOCK_MONOTONIC 0
 
+/* Define to 1 if you have the declaration of `CLOCK_REALTIME', and to 0 if
+   you don't. */
+#define CLOCK_REALTIME 0
+
 /* Define to 1 if you have the declaration of `CLOCK_UPTIME', and to 0 if you
    don't. */
 #define HAVE_DECL_CLOCK_UPTIME 0
 
+/* Define to 1 if you have the declaration of `HAVE_DECL_CLOCK_UPTIME_FAST',
+    and to 0 if you don't. */
+#define HAVE_DECL_CLOCK_UPTIME_FAST 0
+
 /* Define to 1 if you have the declaration of `FD_COPY', and to 0 if you
    don't. */
 #define HAVE_DECL_FD_COPY 1
@@ -87,6 +95,9 @@
 /* Define to 1 if you have the `mach_absolute_time' function. */
 #define HAVE_MACH_ABSOLUTE_TIME 1
 
+/* Define to 1 if you have the `mach_approximate_time' function. */
+#define HAVE_MACH_APPROXIMATE_TIME 1
+
 /* Define to 1 if you have the `mach_port_construct' function. */
 #define HAVE_MACH_PORT_CONSTRUCT 1
 
diff --git a/configure.ac b/configure.ac
index 4ea4028..62f0e03 100644
--- a/configure.ac
+++ b/configure.ac
@@ -295,19 +295,6 @@
 AC_SEARCH_LIBS(clock_gettime, rt)
 AC_SEARCH_LIBS(pthread_create, pthread)
 
-AS_IF([test -f $srcdir/libkqueue/configure.ac],
-  [AC_DEFINE(BUILD_OWN_KQUEUES, 1, [Define if building libkqueue from source])
-   ac_configure_args="--disable-libkqueue-install $ac_configure_args"
-   AC_CONFIG_SUBDIRS([libkqueue])
-   build_own_kqueues=true],
-  [build_own_kqueues=false
-   AC_CHECK_HEADER(sys/event.h, [],
-     [PKG_CHECK_MODULES(KQUEUE, libkqueue)]
-   )
-  ]
-)
-AM_CONDITIONAL(BUILD_OWN_KQUEUES, $build_own_kqueues)
-
 AC_CHECK_FUNCS([strlcpy getprogname], [],
   [PKG_CHECK_MODULES(BSD_OVERLAY, libbsd-overlay,[
     AC_DEFINE(HAVE_STRLCPY, 1, [])
@@ -423,7 +410,7 @@
 #
 # Find functions and declarations we care about.
 #
-AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC], [], [],
+AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_UPTIME_FAST], [], [],
   [[#include <time.h>]])
 AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_REVOKE, NOTE_SIGNAL, NOTE_LOWAT], [], [],
   [[#include <sys/event.h>]])
@@ -431,7 +418,7 @@
 AC_CHECK_DECLS([SIGEMT], [], [], [[#include <signal.h>]])
 AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA], [], [], [[#include <sys/mount.h>]])
 AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include <errno.h>]])
-AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf])
+AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time mach_approximate_time malloc_create_zone sysconf])
 
 AC_CHECK_DECLS([POSIX_SPAWN_START_SUSPENDED],
   [have_posix_spawn_start_suspended=true], [have_posix_spawn_start_suspended=false],
diff --git a/dispatch/base.h b/dispatch/base.h
index 8adfb0b..f55a062 100644
--- a/dispatch/base.h
+++ b/dispatch/base.h
@@ -204,10 +204,8 @@
 #endif
 
 #if __has_feature(enumerator_attributes)
-#define DISPATCH_ENUM_AVAILABLE_STARTING __OSX_AVAILABLE_STARTING
-#define DISPATCH_ENUM_AVAILABLE(os, version) __##os##_AVAILABLE(version)
+#define DISPATCH_ENUM_AVAILABLE(...) API_AVAILABLE(__VA_ARGS__)
 #else
-#define DISPATCH_ENUM_AVAILABLE_STARTING(...)
 #define DISPATCH_ENUM_AVAILABLE(...)
 #endif
 
diff --git a/dispatch/block.h b/dispatch/block.h
index cd56b23..8a74df4 100644
--- a/dispatch/block.h
+++ b/dispatch/block.h
@@ -101,17 +101,17 @@
  */
 DISPATCH_ENUM(dispatch_block_flags, unsigned long,
 	DISPATCH_BLOCK_BARRIER
-			DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x1,
+			DISPATCH_ENUM_AVAILABLE(macos(10.10), ios(8.0)) = 0x1,
 	DISPATCH_BLOCK_DETACHED
-			DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x2,
+			DISPATCH_ENUM_AVAILABLE(macos(10.10), ios(8.0)) = 0x2,
 	DISPATCH_BLOCK_ASSIGN_CURRENT
-			DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x4,
+			DISPATCH_ENUM_AVAILABLE(macos(10.10), ios(8.0)) = 0x4,
 	DISPATCH_BLOCK_NO_QOS_CLASS
-			DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x8,
+			DISPATCH_ENUM_AVAILABLE(macos(10.10), ios(8.0)) = 0x8,
 	DISPATCH_BLOCK_INHERIT_QOS_CLASS
-			DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x10,
+			DISPATCH_ENUM_AVAILABLE(macos(10.10), ios(8.0)) = 0x10,
 	DISPATCH_BLOCK_ENFORCE_QOS_CLASS
-			DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x20,
+			DISPATCH_ENUM_AVAILABLE(macos(10.10), ios(8.0)) = 0x20,
 );
 
 /*!
@@ -164,7 +164,7 @@
  * When not building with Objective-C ARC, must be released with a -[release]
  * message or the Block_release() function.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_RETURNS_RETAINED_BLOCK
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_block_t
@@ -236,7 +236,7 @@
  * When not building with Objective-C ARC, must be released with a -[release]
  * message or the Block_release() function.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_RETURNS_RETAINED_BLOCK
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_block_t
@@ -269,7 +269,7 @@
  * @param block
  * The block to create the temporary block object from.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
 void
 dispatch_block_perform(dispatch_block_flags_t flags,
@@ -320,7 +320,7 @@
  * Returns zero on success (the dispatch block object completed within the
  * specified timeout) or non-zero on error (i.e. timed out).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 long
 dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout);
@@ -361,7 +361,7 @@
  * @param notification_block
  * The notification block to submit when the observed block object completes.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue,
@@ -393,7 +393,7 @@
  * The result of passing NULL or a block object not returned by one of the
  * dispatch_block_create* functions is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_block_cancel(dispatch_block_t block);
@@ -412,7 +412,7 @@
  * @result
  * Non-zero if canceled and zero if not canceled.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
 DISPATCH_NOTHROW
 long
diff --git a/dispatch/data.h b/dispatch/data.h
index 7ceee06..55b32b4 100644
--- a/dispatch/data.h
+++ b/dispatch/data.h
@@ -50,7 +50,7 @@
  */
 #define dispatch_data_empty \
 		DISPATCH_GLOBAL_OBJECT(dispatch_data_t, _dispatch_data_empty)
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty;
 
 /*!
@@ -83,7 +83,7 @@
  * was allocated by the malloc() family and should be destroyed with free(3).
  */
 #define DISPATCH_DATA_DESTRUCTOR_FREE (_dispatch_data_destructor_free)
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free);
 
 /*!
@@ -92,7 +92,7 @@
  * from buffers that require deallocation with munmap(2).
  */
 #define DISPATCH_DATA_DESTRUCTOR_MUNMAP (_dispatch_data_destructor_munmap)
-__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap);
 
 #ifdef __BLOCKS__
@@ -117,7 +117,7 @@
  *			is no longer needed.
  * @result		A newly created dispatch data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
 dispatch_data_create(const void *buffer,
@@ -134,7 +134,7 @@
  * @param data	The dispatch data object to query.
  * @result	The number of bytes represented by the data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_NONNULL1 DISPATCH_NOTHROW
 size_t
 dispatch_data_get_size(dispatch_data_t data);
@@ -158,7 +158,7 @@
  *			size of the mapped contiguous memory region, or NULL.
  * @result		A newly created dispatch data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
@@ -181,7 +181,7 @@
  * @result	A newly created object representing the concatenation of the
  *		data1 and data2 objects.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
@@ -202,7 +202,7 @@
  * @result		A newly created object representing the specified
  *			subrange of the data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
@@ -247,7 +247,7 @@
  * @result		A Boolean indicating whether traversal completed
  *			successfully.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 bool
 dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier);
@@ -267,7 +267,7 @@
  *			start of the queried data object.
  * @result		A newly created dispatch data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h
index e8d69f8..52cd3e7 100644
--- a/dispatch/dispatch.h
+++ b/dispatch/dispatch.h
@@ -24,19 +24,14 @@
 #ifdef __APPLE__
 #include <Availability.h>
 #include <TargetConditionals.h>
-#else
-#define __OSX_AVAILABLE_STARTING(x, y)
-#define __OSX_AVAILABLE_BUT_DEPRECATED(...)
-#define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(...)
-#define __OSX_AVAILABLE(...)
-#define __IOS_AVAILABLE(...)
-#define __TVOS_AVAILABLE(...)
-#define __WATCHOS_AVAILABLE(...)
-#define __OSX_DEPRECATED(...)
-#define __IOS_DEPRECATED(...)
-#define __TVOS_DEPRECATED(...)
-#define __WATCHOS_DEPRECATED(...)
-#endif // __APPLE__
+#endif
+
+#ifndef API_AVAILABLE
+#define API_AVAILABLE(...)
+#define API_DEPRECATED(...)
+#define API_UNAVAILABLE(...)
+#define API_DEPRECATED_WITH_REPLACEMENT(...)
+#endif // !API_AVAILABLE
 
 #include <sys/cdefs.h>
 #include <sys/types.h>
@@ -55,7 +50,7 @@
 #endif
 #endif
 
-#define DISPATCH_API_VERSION 20160712
+#define DISPATCH_API_VERSION 20160831
 
 #ifndef __DISPATCH_BUILDING_DISPATCH__
 
diff --git a/dispatch/group.h b/dispatch/group.h
index c50ad89..8d74ada 100644
--- a/dispatch/group.h
+++ b/dispatch/group.h
@@ -51,7 +51,7 @@
  * @result
  * The newly created group, or NULL on failure.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_group_t
@@ -81,7 +81,7 @@
  * The block to perform asynchronously.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_group_async(dispatch_group_t group,
@@ -115,7 +115,7 @@
  * parameter passed to this function is the context provided to
  * dispatch_group_async_f().
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4
 DISPATCH_NOTHROW
 void
@@ -158,7 +158,7 @@
  * Returns zero on success (all blocks associated with the group completed
  * within the specified timeout) or non-zero on error (i.e. timed out).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 long
 dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout);
@@ -194,7 +194,7 @@
  * The block to submit when the group completes.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_group_notify(dispatch_group_t group,
@@ -224,7 +224,7 @@
  * parameter passed to this function is the context provided to
  * dispatch_group_notify_f().
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4
 DISPATCH_NOTHROW
 void
@@ -248,7 +248,7 @@
  * The dispatch group to update.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_group_enter(dispatch_group_t group);
@@ -267,7 +267,7 @@
  * The dispatch group to update.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_group_leave(dispatch_group_t group);
diff --git a/dispatch/introspection.h b/dispatch/introspection.h
index 9cfb4d1..ea7dcd8 100644
--- a/dispatch/introspection.h
+++ b/dispatch/introspection.h
@@ -49,7 +49,7 @@
  * The newly created dispatch queue.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_create(dispatch_queue_t queue);
@@ -65,7 +65,7 @@
  * The dispatch queue about to be destroyed.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_destroy(dispatch_queue_t queue);
@@ -84,7 +84,7 @@
  * The object about to be enqueued.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_item_enqueue(dispatch_queue_t queue,
@@ -104,7 +104,7 @@
  * The dequeued object.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue,
@@ -126,7 +126,7 @@
  * Opaque dentifier for completed item. Must NOT be dereferenced.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_7_1)
+API_AVAILABLE(macos(10.10), ios(7.1))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_item_complete(dispatch_object_t item);
@@ -150,7 +150,7 @@
  * this is the block object's invoke function.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue,
@@ -175,7 +175,7 @@
  * this is the block object's invoke function.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT
 void
 dispatch_introspection_hook_queue_callout_end(dispatch_queue_t queue,
diff --git a/dispatch/io.h b/dispatch/io.h
index 5814bc0..a9e6892 100644
--- a/dispatch/io.h
+++ b/dispatch/io.h
@@ -102,7 +102,7 @@
  *		param error	An errno condition for the read operation or
  *				zero if the read was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW
 void
 dispatch_read(dispatch_fd_t fd,
@@ -140,7 +140,7 @@
  *		param error	An errno condition for the write operation or
  *				zero if the write was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL4
 DISPATCH_NOTHROW
 void
@@ -211,7 +211,7 @@
  * @result	The newly created dispatch I/O channel or NULL if an error
  *		occurred (invalid type specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_io_t
@@ -247,7 +247,7 @@
  * @result	The newly created dispatch I/O channel or NULL if an error
  *		occurred (invalid type or non-absolute path specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_io_t
@@ -287,7 +287,7 @@
  * @result	The newly created dispatch I/O channel or NULL if an error
  *		occurred (invalid type specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_io_t
@@ -349,7 +349,7 @@
  *	param error	An errno condition for the read operation or zero if
  *			the read was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL5
 DISPATCH_NOTHROW
 void
@@ -402,7 +402,7 @@
  *	param error	An errno condition for the write operation or zero
  *			if the write was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4
 DISPATCH_NONNULL5 DISPATCH_NOTHROW
 void
@@ -441,7 +441,7 @@
  * @param channel	The dispatch I/O channel to close.
  * @param flags		The flags for the close operation.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags);
@@ -468,7 +468,7 @@
  * @param channel	The dispatch I/O channel to schedule the barrier on.
  * @param barrier	The barrier block.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier);
@@ -488,7 +488,7 @@
  * @param channel	The dispatch I/O channel to query.
  * @result		The file descriptor underlying the channel, or -1.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_fd_t
 dispatch_io_get_descriptor(dispatch_io_t channel);
@@ -509,7 +509,7 @@
  * @param channel	The dispatch I/O channel on which to set the policy.
  * @param high_water	The number of bytes to use as a high water mark.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water);
@@ -540,7 +540,7 @@
  * @param channel	The dispatch I/O channel on which to set the policy.
  * @param low_water	The number of bytes to use as a low water mark.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water);
@@ -579,7 +579,7 @@
  * @param flags		Flags indicating desired data delivery behavior at
  *					interval time.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_io_set_interval(dispatch_io_t channel,
diff --git a/dispatch/object.h b/dispatch/object.h
index 8b20301..2be1c9b 100644
--- a/dispatch/object.h
+++ b/dispatch/object.h
@@ -92,7 +92,6 @@
 	struct dispatch_source_s *_ds;
 	struct dispatch_mach_s *_dm;
 	struct dispatch_mach_msg_s *_dmsg;
-	struct dispatch_timer_aggregate_s *_dta;
 	struct dispatch_source_attr_s *_dsa;
 	struct dispatch_semaphore_s *_dsema;
 	struct dispatch_data_s *_ddata;
@@ -201,7 +200,7 @@
  * The object to retain.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC")
 void
@@ -229,7 +228,7 @@
  * The object to release.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC")
 void
@@ -253,7 +252,7 @@
  * @result
  * The context of the object; may be NULL.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 void *_Nullable
@@ -272,7 +271,7 @@
  * The new client defined context for the object. This may be NULL.
  *
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 dispatch_set_context(dispatch_object_t object, void *_Nullable context);
@@ -298,7 +297,7 @@
  * The context parameter passed to the finalizer function is the current
  * context of the dispatch object at the time the finalizer call is made.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 dispatch_set_finalizer_f(dispatch_object_t object,
@@ -326,8 +325,7 @@
  * The object to be activated.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_activate(dispatch_object_t object);
@@ -350,7 +348,7 @@
  * The object to be suspended.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_suspend(dispatch_object_t object);
@@ -379,7 +377,7 @@
  * The object to be resumed.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_resume(dispatch_object_t object);
@@ -541,13 +539,13 @@
  * @param message
  * The message to log above and beyond the introspection.
  */
-__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0)
+API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
 __attribute__((__format__(printf,2,3)))
 void
 dispatch_debug(dispatch_object_t object, const char *message, ...);
 
-__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0)
+API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
 __attribute__((__format__(printf,2,0)))
 void
diff --git a/dispatch/once.h b/dispatch/once.h
index a8f5644..68acfe8 100644
--- a/dispatch/once.h
+++ b/dispatch/once.h
@@ -58,7 +58,7 @@
  * initialized by the block.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead")
 void
@@ -82,7 +82,7 @@
 #define dispatch_once _dispatch_once
 #endif
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead")
 void
diff --git a/dispatch/queue.h b/dispatch/queue.h
index 264c344..111dfc7 100644
--- a/dispatch/queue.h
+++ b/dispatch/queue.h
@@ -103,7 +103,7 @@
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_async(dispatch_queue_t queue, dispatch_block_t block);
@@ -133,7 +133,7 @@
  * dispatch_async_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_async_f(dispatch_queue_t queue,
@@ -171,7 +171,7 @@
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block);
@@ -199,7 +199,7 @@
  * dispatch_sync_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_sync_f(dispatch_queue_t queue,
@@ -232,7 +232,7 @@
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_apply(size_t iterations, dispatch_queue_t queue,
@@ -265,7 +265,7 @@
  * current index of iteration.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW
 void
 dispatch_apply_f(size_t iterations, dispatch_queue_t queue,
@@ -301,12 +301,12 @@
  * @result
  * Returns the current queue.
  */
-__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0)
+API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0))
 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_queue_t
 dispatch_get_current_queue(void);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q;
 
 /*!
@@ -415,7 +415,7 @@
  * Returns the requested global queue or NULL if the requested global queue
  * does not exist.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_queue_t
 dispatch_get_global_queue(long identifier, unsigned long flags);
@@ -454,7 +454,7 @@
 #define DISPATCH_QUEUE_CONCURRENT \
 		DISPATCH_GLOBAL_OBJECT(dispatch_queue_attr_t, \
 		_dispatch_queue_attr_concurrent)
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT
 struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent;
 
@@ -498,8 +498,7 @@
  * The new value combines the attributes specified by the 'attr' parameter with
  * the initially inactive attribute.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW
 dispatch_queue_attr_t
 dispatch_queue_attr_make_initially_inactive(
@@ -556,21 +555,9 @@
  * asynchronously. This is the behavior of the global concurrent queues.
  */
 DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long,
-	DISPATCH_AUTORELEASE_FREQUENCY_INHERIT
-			DISPATCH_ENUM_AVAILABLE(OSX, 10.12)
-			DISPATCH_ENUM_AVAILABLE(IOS, 10.0)
-			DISPATCH_ENUM_AVAILABLE(TVOS, 10.0)
-			DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 0,
-	DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM
-			DISPATCH_ENUM_AVAILABLE(OSX, 10.12)
-			DISPATCH_ENUM_AVAILABLE(IOS, 10.0)
-			DISPATCH_ENUM_AVAILABLE(TVOS, 10.0)
-			DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 1,
-	DISPATCH_AUTORELEASE_FREQUENCY_NEVER
-			DISPATCH_ENUM_AVAILABLE(OSX, 10.12)
-			DISPATCH_ENUM_AVAILABLE(IOS, 10.0)
-			DISPATCH_ENUM_AVAILABLE(TVOS, 10.0)
-			DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 2,
+	DISPATCH_AUTORELEASE_FREQUENCY_INHERIT DISPATCH_ENUM_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0,
+	DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM DISPATCH_ENUM_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 1,
+	DISPATCH_AUTORELEASE_FREQUENCY_NEVER DISPATCH_ENUM_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 2,
 );
 
 /*!
@@ -610,8 +597,7 @@
  * This new value combines the attributes specified by the 'attr' parameter and
  * the chosen autorelease frequency.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW
 dispatch_queue_attr_t
 dispatch_queue_attr_make_with_autorelease_frequency(
@@ -671,7 +657,7 @@
  * The new value combines the attributes specified by the 'attr' parameter and
  * the new QOS class and relative priority.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW
 dispatch_queue_attr_t
 dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr,
@@ -736,8 +722,7 @@
  * @result
  * The newly created dispatch queue.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_queue_t
@@ -788,7 +773,7 @@
  * @result
  * The newly created dispatch queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_queue_t
@@ -818,7 +803,7 @@
  * @result
  * The label of the queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 const char *
 dispatch_queue_get_label(dispatch_queue_t _Nullable queue);
@@ -857,7 +842,7 @@
  *	- QOS_CLASS_BACKGROUND
  *	- QOS_CLASS_UNSPECIFIED
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 dispatch_qos_class_t
 dispatch_queue_get_qos_class(dispatch_queue_t queue,
@@ -922,7 +907,7 @@
  * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, set the object's target queue
  * to the default target queue for the given object type.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 dispatch_set_target_queue(dispatch_object_t object,
@@ -941,7 +926,7 @@
  * Applications that call NSApplicationMain() or CFRunLoopRun() on the
  * main thread do not need to call dispatch_main().
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NORETURN
 void
 dispatch_main(void);
@@ -969,7 +954,7 @@
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_after(dispatch_time_t when,
@@ -1002,7 +987,7 @@
  * dispatch_after_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW
 void
 dispatch_after_f(dispatch_time_t when,
@@ -1049,7 +1034,7 @@
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block);
@@ -1083,7 +1068,7 @@
  * dispatch_barrier_async_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_barrier_async_f(dispatch_queue_t queue,
@@ -1111,7 +1096,7 @@
  * The result of passing NULL in this parameter is undefined.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_barrier_sync(dispatch_queue_t queue,
@@ -1143,7 +1128,7 @@
  * dispatch_barrier_sync_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_barrier_sync_f(dispatch_queue_t queue,
@@ -1186,7 +1171,7 @@
  * The destructor function pointer. This may be NULL and is ignored if context
  * is NULL.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_queue_set_specific(dispatch_queue_t queue, const void *key,
@@ -1215,7 +1200,7 @@
  * @result
  * The context for the specified key or NULL if no context was found.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_PURE DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 void *_Nullable
@@ -1242,7 +1227,7 @@
  * @result
  * The context for the specified key or NULL if no context was found.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
+API_AVAILABLE(macos(10.7), ios(5.0))
 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 void *_Nullable
 dispatch_get_specific(const void *key);
@@ -1296,8 +1281,7 @@
  * The dispatch queue that the current block is expected to run on.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1
 void
 dispatch_assert_queue(dispatch_queue_t queue)
@@ -1323,8 +1307,7 @@
  * The dispatch queue that the current block is expected to run as a barrier on.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1
 void
 dispatch_assert_queue_barrier(dispatch_queue_t queue);
@@ -1347,8 +1330,7 @@
  * The dispatch queue that the current block is expected not to run on.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1
 void
 dispatch_assert_queue_not(dispatch_queue_t queue)
diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h
index b6139d7..f5394b4 100644
--- a/dispatch/semaphore.h
+++ b/dispatch/semaphore.h
@@ -57,7 +57,7 @@
  * @result
  * The newly created semaphore, or NULL on failure.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_semaphore_t
@@ -83,7 +83,7 @@
  * @result
  * Returns zero on success, or non-zero if the timeout occurred.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 long
 dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout);
@@ -105,7 +105,7 @@
  * This function returns non-zero if a thread is woken. Otherwise, zero is
  * returned.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 long
 dispatch_semaphore_signal(dispatch_semaphore_t dsema);
diff --git a/dispatch/source.h b/dispatch/source.h
index 63b3ff3..8fd6344 100644
--- a/dispatch/source.h
+++ b/dispatch/source.h
@@ -79,7 +79,7 @@
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_DATA_ADD (&_dispatch_source_type_data_add)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(data_add);
 
 /*!
@@ -90,7 +90,7 @@
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_DATA_OR (&_dispatch_source_type_data_or)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(data_or);
 
 /*!
@@ -101,7 +101,7 @@
  * The mask is a mask of desired events from dispatch_source_mach_send_flags_t.
  */
 #define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_SOURCE_TYPE_DECL(mach_send);
 
 /*!
@@ -111,7 +111,7 @@
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_SOURCE_TYPE_DECL(mach_recv);
 
 /*!
@@ -124,7 +124,7 @@
  */
 #define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \
 		(&_dispatch_source_type_memorypressure)
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_8_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.9), ios(8.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_SOURCE_TYPE_DECL(memorypressure);
 
 /*!
@@ -135,7 +135,7 @@
  * The mask is a mask of desired events from dispatch_source_proc_flags_t.
  */
 #define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_SOURCE_TYPE_DECL(proc);
 
 /*!
@@ -146,7 +146,7 @@
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_READ (&_dispatch_source_type_read)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(read);
 
 /*!
@@ -156,7 +156,7 @@
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_SIGNAL (&_dispatch_source_type_signal)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(signal);
 
 /*!
@@ -167,7 +167,7 @@
  * The mask specifies which flags from dispatch_source_timer_flags_t to apply.
  */
 #define DISPATCH_SOURCE_TYPE_TIMER (&_dispatch_source_type_timer)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(timer);
 
 /*!
@@ -178,7 +178,7 @@
  * The mask is a mask of desired events from dispatch_source_vnode_flags_t.
  */
 #define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_SOURCE_TYPE_DECL(vnode);
 
 /*!
@@ -189,7 +189,7 @@
  * The mask is unused (pass zero for now).
  */
 #define DISPATCH_SOURCE_TYPE_WRITE (&_dispatch_source_type_write)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_SOURCE_TYPE_DECL(write);
 
 /*!
@@ -361,7 +361,7 @@
  * @result
  * The newly created dispatch source. Or NULL if invalid arguments are passed.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_source_t
@@ -384,7 +384,7 @@
  * The event handler block to submit to the source's target queue.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_event_handler(dispatch_source_t source,
@@ -406,7 +406,7 @@
  * The context parameter passed to the event handler function is the context of
  * the dispatch source current at the time the event handler was set.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_event_handler_f(dispatch_source_t source,
@@ -425,12 +425,13 @@
  * the source's event handler block has returned.
  *
  * IMPORTANT:
- * A cancellation handler is required for file descriptor and mach port based
- * sources in order to safely close the descriptor or destroy the port. Closing
- * the descriptor or port before the cancellation handler may result in a race
- * condition. If a new descriptor is allocated with the same value as the
- * recently closed descriptor while the source's event handler is still running,
- * the event handler may read/write data to the wrong descriptor.
+ * Source cancellation and a cancellation handler are required for file
+ * descriptor and mach port based sources in order to safely close the
+ * descriptor or destroy the port.
+ * Closing the descriptor or port before the cancellation handler is invoked may
+ * result in a race condition. If a new descriptor is allocated with the same
+ * value as the recently closed descriptor while the source's event handler is
+ * still running, the event handler may read/write data to the wrong descriptor.
  *
  * @param source
  * The dispatch source to modify.
@@ -440,7 +441,7 @@
  * The cancellation handler block to submit to the source's target queue.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_cancel_handler(dispatch_source_t source,
@@ -465,7 +466,7 @@
  * The context parameter passed to the event handler function is the current
  * context of the dispatch source at the time the handler call is made.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_cancel_handler_f(dispatch_source_t source,
@@ -493,7 +494,7 @@
  * The dispatch source to be canceled.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_source_cancel(dispatch_source_t source);
@@ -511,7 +512,7 @@
  * @result
  * Non-zero if canceled and zero if not canceled.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
 DISPATCH_NOTHROW
 long
@@ -542,7 +543,7 @@
  *  DISPATCH_SOURCE_TYPE_VNODE:           file descriptor (int)
  *  DISPATCH_SOURCE_TYPE_WRITE:           file descriptor (int)
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
 DISPATCH_NOTHROW
 uintptr_t
@@ -573,7 +574,7 @@
  *  DISPATCH_SOURCE_TYPE_VNODE:           dispatch_source_vnode_flags_t
  *  DISPATCH_SOURCE_TYPE_WRITE:           n/a
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
 DISPATCH_NOTHROW
 unsigned long
@@ -611,7 +612,7 @@
  *  DISPATCH_SOURCE_TYPE_VNODE:           dispatch_source_vnode_flags_t
  *  DISPATCH_SOURCE_TYPE_WRITE:           estimated buffer space available
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
 DISPATCH_NOTHROW
 unsigned long
@@ -633,7 +634,7 @@
  * as specified by the dispatch source type. A value of zero has no effect
  * and will not result in the submission of the event handler block.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_source_merge_data(dispatch_source_t source, unsigned long value);
@@ -685,7 +686,7 @@
  * @param leeway
  * The nanosecond leeway for the timer.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_source_set_timer(dispatch_source_t source,
@@ -715,7 +716,7 @@
  * The registration handler block to submit to the source's target queue.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_registration_handler(dispatch_source_t source,
@@ -740,7 +741,7 @@
  * The context parameter passed to the registration handler function is the
  * current context of the dispatch source at the time the handler call is made.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+API_AVAILABLE(macos(10.7), ios(4.3))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_source_set_registration_handler_f(dispatch_source_t source,
diff --git a/dispatch/time.h b/dispatch/time.h
index c2152ea..ce99f27 100644
--- a/dispatch/time.h
+++ b/dispatch/time.h
@@ -89,7 +89,7 @@
  * @result
  * A new dispatch_time_t.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_time_t
 dispatch_time(dispatch_time_t when, int64_t delta);
@@ -113,7 +113,7 @@
  * @result
  * A new dispatch_time_t.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_time_t
 dispatch_walltime(const struct timespec *_Nullable when, int64_t delta);
diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj
index 85e8aa4..8c98e56 100644
--- a/libdispatch.xcodeproj/project.pbxproj
+++ b/libdispatch.xcodeproj/project.pbxproj
@@ -85,6 +85,26 @@
 		6E21F2E81BBB23FA0000C6A5 /* firehose_server_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */; };
 		6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */; };
 		6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; };
+		6E4BACBD1D48A41500B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+		6E4BACC21D48A42000B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+		6E4BACC31D48A42100B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+		6E4BACC41D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+		6E4BACC51D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+		6E4BACC61D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+		6E4BACC71D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+		6E4BACC81D48A42400B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; };
+		6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; };
+		6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+		6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+		6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+		6E4BACF81D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+		6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+		6E4BACFA1D49A04900B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+		6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+		6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; };
+		6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; };
+		6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; };
+		6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; };
 		6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; settings = {ATTRIBUTES = (Server, ); }; };
 		6E9955581C3AF7710071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; };
 		6E99558A1C3AF7900071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; };
@@ -99,6 +119,25 @@
 		6E9956091C3B21B40071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; };
 		6E9B6B5F1BB4F3C8009E324D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; };
 		6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */ = {isa = PBXBuildFile; fileRef = 6EA283D01CAB93270041B2E0 /* libdispatch.codes */; };
+		6EA793891D458A5800929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; };
+		6EA7938E1D458A5C00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; };
+		6EA7938F1D458A5E00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; };
+		6EA962971D48622600759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+		6EA962981D48622700759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+		6EA962991D48622800759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+		6EA9629A1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+		6EA9629B1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+		6EA9629C1D48622A00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+		6EA9629D1D48622B00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+		6EA9629E1D48622C00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; };
+		6EA9629F1D48625000759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+		6EA962A01D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+		6EA962A11D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+		6EA962A21D48625200759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+		6EA962A31D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+		6EA962A41D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+		6EA962A51D48625400759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
+		6EA962A61D48625500759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; };
 		6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; };
 		6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; };
 		6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; };
@@ -142,6 +181,7 @@
 		6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; };
 		6EF2CAB41C889D65001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; };
 		6EF2CAB51C889D67001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; };
+		6EFBDA4B1D61A0D600282887 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; };
 		721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; };
 		721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; };
 		72CC94300ECCD8750031B751 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; };
@@ -463,13 +503,6 @@
 			remoteGlobalIDString = 92F3FECA1BEC69E500025962;
 			remoteInfo = darwintests;
 		};
-		C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */ = {
-			isa = PBXContainerItemProxy;
-			containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
-			proxyType = 2;
-			remoteGlobalIDString = E4D01CB9108E6C7200FAA873;
-			remoteInfo = dispatch_deadname;
-		};
 		C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = {
 			isa = PBXContainerItemProxy;
 			containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */;
@@ -588,7 +621,12 @@
 		6E326B171C239431002A6505 /* dispatch_timer_timeout.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_timeout.c; sourceTree = "<group>"; };
 		6E326B441C239B61002A6505 /* dispatch_priority.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_priority.c; sourceTree = "<group>"; };
 		6E4130C91B431697001A152D /* backward-compat.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "backward-compat.xcconfig"; sourceTree = "<group>"; };
+		6E4BACBC1D48A41500B562AE /* mach.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mach.c; sourceTree = "<group>"; };
+		6E4BACC91D48A89500B562AE /* mach_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mach_internal.h; sourceTree = "<group>"; };
 		6E4FC9D11C84123600520351 /* os_venture_basic.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = os_venture_basic.c; sourceTree = "<group>"; };
+		6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_kevent.c; sourceTree = "<group>"; };
+		6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event_internal.h; sourceTree = "<group>"; };
+		6E5ACCBD1D3C6719007DA2B4 /* event.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event.c; sourceTree = "<group>"; };
 		6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_trysync.c; sourceTree = "<group>"; };
 		6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_apply.c; sourceTree = "<group>"; };
 		6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_cf_main.c; sourceTree = "<group>"; };
@@ -614,15 +652,19 @@
 		6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_internal.h; sourceTree = "<group>"; };
 		6EA283D01CAB93270041B2E0 /* libdispatch.codes */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.codes; sourceTree = "<group>"; };
 		6EA2CB841C005DEF0076794A /* dispatch_source.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_source.c; sourceTree = "<group>"; };
+		6EA7937D1D456D1300929B1B /* event_epoll.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_epoll.c; sourceTree = "<group>"; };
+		6EA793881D458A5800929B1B /* event_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event_config.h; sourceTree = "<group>"; };
 		6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_server.a; sourceTree = BUILT_PRODUCTS_DIR; };
 		6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = "<group>"; };
 		6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = "<group>"; };
+		6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_deadname.c; sourceTree = "<group>"; };
 		6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = "<group>"; };
 		6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = "<group>"; };
 		6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = "<group>"; };
 		6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_internal.h; sourceTree = "<group>"; };
 		6EF2CAA41C88998A001ABE83 /* lock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lock.h; sourceTree = "<group>"; };
 		6EF2CAAB1C8899D5001ABE83 /* lock.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = lock.c; path = shims/lock.c; sourceTree = "<group>"; };
+		6EFBDA4A1D61A0D600282887 /* priority.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = priority.h; sourceTree = "<group>"; };
 		721F5C5C0F15520500FF03A6 /* semaphore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore.h; sourceTree = "<group>"; };
 		721F5CCE0F15553500FF03A6 /* semaphore.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = semaphore.c; sourceTree = "<group>"; xcLanguageSpecificationIdentifier = xcode.lang.c; };
 		72406A031AF95DF800DF4E2B /* firehose_reply.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose_reply.defs; sourceTree = "<group>"; };
@@ -701,8 +743,6 @@
 		E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
 		E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = "<group>"; };
 		E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = "<group>"; };
-		E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = "libdispatch-resolver_iphoneos.order"; sourceTree = "<group>"; };
-		E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_iphoneos.order; sourceTree = "<group>"; };
 		E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_activity_private.h; sourceTree = "<group>"; };
 		E4B515D6164B2DA300E003AF /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
 		E4B515D7164B2DFB00E003AF /* introspection_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_private.h; sourceTree = "<group>"; };
@@ -771,8 +811,6 @@
 				FC7BEDAF0E83626100161930 /* Dispatch Private Headers */,
 				FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */,
 				08FB7795FE84155DC02AAC07 /* Dispatch Source */,
-				6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */,
-				6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */,
 				92F3FEC91BEC687200025962 /* Darwin Tests */,
 				C6A0FF2B0290797F04C91782 /* Documentation */,
 				1AB674ADFE9D54B511CA2CBB /* Products */,
@@ -798,6 +836,7 @@
 				E4B515DC164B32E000E003AF /* introspection.c */,
 				5A27262510F26F1900751FBC /* io.c */,
 				6EF2CAAB1C8899D5001ABE83 /* lock.c */,
+				6E4BACBC1D48A41500B562AE /* mach.c */,
 				9661E56A0F3E7DDF00749F3E /* object.c */,
 				E4FC3263145F46C9002FBDDB /* object.m */,
 				96DF70BD0F38FE3C0074BD99 /* once.c */,
@@ -811,6 +850,8 @@
 				6EA283D01CAB93270041B2E0 /* libdispatch.codes */,
 				FC7BED950E8361E600161930 /* protocol.defs */,
 				E43570B8126E93380097AB9F /* provider.d */,
+				6E5ACCAF1D3BF2A0007DA2B4 /* event */,
+				6EF0B2641BA8C3A0007FA4F6 /* firehose */,
 			);
 			name = "Dispatch Source";
 			path = src;
@@ -840,11 +881,29 @@
 				4552540519B1384900B88766 /* jsgc_bench */,
 				4552540719B1384900B88766 /* async_bench */,
 				4552540919B1384900B88766 /* apply_bench */,
-				C00B0E111C5AEBBE000330B3 /* dispatch_deadname */,
 			);
 			name = Products;
 			sourceTree = "<group>";
 		};
+		6E5ACCAE1D3BF27F007DA2B4 /* event */ = {
+			isa = PBXGroup;
+			children = (
+				6EA793881D458A5800929B1B /* event_config.h */,
+				6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */,
+			);
+			path = event;
+			sourceTree = "<group>";
+		};
+		6E5ACCAF1D3BF2A0007DA2B4 /* event */ = {
+			isa = PBXGroup;
+			children = (
+				6E5ACCBD1D3C6719007DA2B4 /* event.c */,
+				6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */,
+				6EA7937D1D456D1300929B1B /* event_epoll.c */,
+			);
+			path = event;
+			sourceTree = "<group>";
+		};
 		6E9B6AE21BB39793009E324D /* OS Public Headers */ = {
 			isa = PBXGroup;
 			children = (
@@ -854,7 +913,7 @@
 			path = os;
 			sourceTree = "<group>";
 		};
-		6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */ = {
+		6EF0B2641BA8C3A0007FA4F6 /* firehose */ = {
 			isa = PBXGroup;
 			children = (
 				72406A391AF9926000DF4E2B /* firehose_types.defs */,
@@ -864,11 +923,10 @@
 				6E21F2E51BBB23F00000C6A5 /* firehose_server.c */,
 				72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */,
 			);
-			name = "Firehose Source";
-			path = src/firehose;
+			path = firehose;
 			sourceTree = "<group>";
 		};
-		6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */ = {
+		6EF0B2661BA8C43D007FA4F6 /* firehose */ = {
 			isa = PBXGroup;
 			children = (
 				6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */,
@@ -876,8 +934,7 @@
 				6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */,
 				6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */,
 			);
-			name = "Firehose Project Headers";
-			path = src/firehose;
+			path = firehose;
 			sourceTree = "<group>";
 		};
 		92F3FEC91BEC687200025962 /* Darwin Tests */ = {
@@ -894,6 +951,7 @@
 				6E326ADE1C23451A002A6505 /* dispatch_concur.c */,
 				6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */,
 				6E8E4EC71C1A61680004F5CC /* dispatch_data.m */,
+				6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */,
 				6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */,
 				6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */,
 				6E67D90F1C16CF0B00FC98AC /* dispatch_group.c */,
@@ -986,8 +1044,6 @@
 				6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */,
 				E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */,
 				E448727914C6215D00BB45C2 /* libdispatch.order */,
-				E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */,
-				E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */,
 				E421E5FD1716BEA70090DC9B /* libdispatch.interposable */,
 			);
 			path = xcodeconfig;
@@ -1008,8 +1064,8 @@
 			isa = PBXGroup;
 			children = (
 				E47D6BB5125F0F800070D91C /* resolved.h */,
-				E44EBE371251656400645D88 /* resolver.c */,
 				E44EBE331251654000645D88 /* resolver.h */,
+				E44EBE371251656400645D88 /* resolver.c */,
 			);
 			path = resolver;
 			sourceTree = "<group>";
@@ -1058,6 +1114,7 @@
 				E4128ED513BA9A1700ABB2CB /* hw_config.h */,
 				6EF2CAA41C88998A001ABE83 /* lock.h */,
 				FC1832A2109923C7003403D5 /* perfmon.h */,
+				6EFBDA4A1D61A0D600282887 /* priority.h */,
 				FC1832A3109923C7003403D5 /* time.h */,
 				FC1832A4109923C7003403D5 /* tsd.h */,
 				E48EC97B1835BADD00EAC4F1 /* yield.h */,
@@ -1113,6 +1170,7 @@
 				E44757D917F4572600B82CA1 /* inline_internal.h */,
 				E4C1ED6E1263E714000D3C8B /* data_internal.h */,
 				5A0095A110F274B0000E2A31 /* io_internal.h */,
+				6E4BACC91D48A89500B562AE /* mach_internal.h */,
 				965ECC200F3EAB71004DDD89 /* object_internal.h */,
 				96929D950F3EA2170041FF5D /* queue_internal.h */,
 				5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */,
@@ -1122,6 +1180,8 @@
 				E422A0D412A557B5005E5BDB /* trace.h */,
 				E44F9DA816543F79001DCD38 /* introspection_internal.h */,
 				96929D830F3EA1020041FF5D /* shims.h */,
+				6E5ACCAE1D3BF27F007DA2B4 /* event */,
+				6EF0B2661BA8C43D007FA4F6 /* firehose */,
 				FC1832A0109923B3003403D5 /* shims */,
 			);
 			name = "Dispatch Project Headers";
@@ -1166,6 +1226,7 @@
 				E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */,
 				721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */,
 				FC5C9C1E0EADABE3006E462D /* group.h in Headers */,
+				6EFBDA4B1D61A0D600282887 /* priority.h in Headers */,
 				96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */,
 				5AAB45C410D30CC7004407EA /* io.h in Headers */,
 				E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */,
@@ -1190,9 +1251,11 @@
 				E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */,
 				5A0095A210F274B0000E2A31 /* io_internal.h in Headers */,
 				FC1832A8109923C7003403D5 /* tsd.h in Headers */,
+				6EA793891D458A5800929B1B /* event_config.h in Headers */,
 				96929D840F3EA1020041FF5D /* atomic.h in Headers */,
 				96929D850F3EA1020041FF5D /* shims.h in Headers */,
 				FC1832A7109923C7003403D5 /* time.h in Headers */,
+				6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */,
 				6ED64B511BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */,
 				E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */,
 				2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */,
@@ -1202,6 +1265,7 @@
 				6EF2CAA51C88998A001ABE83 /* lock.h in Headers */,
 				E422A0D512A557B5005E5BDB /* trace.h in Headers */,
 				E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */,
+				6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */,
 				6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */,
 				E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */,
 				E454569314746F1B00106147 /* object_private.h in Headers */,
@@ -1219,6 +1283,7 @@
 			files = (
 				E49F24AB125D57FA0057C971 /* dispatch.h in Headers */,
 				E49F24AC125D57FA0057C971 /* base.h in Headers */,
+				6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */,
 				E49F24AD125D57FA0057C971 /* object.h in Headers */,
 				E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */,
 				E49F24AE125D57FA0057C971 /* queue.h in Headers */,
@@ -1244,6 +1309,7 @@
 				E49F24BE125D57FA0057C971 /* source_internal.h in Headers */,
 				E49F24BD125D57FA0057C971 /* semaphore_internal.h in Headers */,
 				E4C1ED701263E714000D3C8B /* data_internal.h in Headers */,
+				6EA7938F1D458A5E00929B1B /* event_config.h in Headers */,
 				6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */,
 				E49F24BF125D57FA0057C971 /* io_internal.h in Headers */,
 				E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */,
@@ -1285,6 +1351,7 @@
 				E44F9DB51654403F001DCD38 /* source_internal.h in Headers */,
 				E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */,
 				E44F9DB01654402B001DCD38 /* data_internal.h in Headers */,
+				6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */,
 				6E9956081C3B21B30071D40C /* venture_internal.h in Headers */,
 				E44F9DB11654402E001DCD38 /* io_internal.h in Headers */,
 				E4630251176162D200E11F4C /* atomic_sfb.h in Headers */,
@@ -1293,6 +1360,7 @@
 				6ED64B591BBD8A3F00C35F4D /* firehose_inline_internal.h in Headers */,
 				6EF2CAB51C889D67001ABE83 /* lock.h in Headers */,
 				E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */,
+				6EA7938E1D458A5C00929B1B /* event_config.h in Headers */,
 				6ED64B4F1BBD8A1400C35F4D /* firehose_internal.h in Headers */,
 				E44F9DB71654404F001DCD38 /* shims.h in Headers */,
 				E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */,
@@ -1514,7 +1582,7 @@
 			isa = PBXProject;
 			attributes = {
 				BuildIndependentTargetsInParallel = YES;
-				LastUpgradeCheck = 0800;
+				LastUpgradeCheck = 0810;
 				TargetAttributes = {
 					3F3C9326128E637B0042B1F7 = {
 						ProvisioningStyle = Manual;
@@ -1606,10 +1674,10 @@
 				6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */,
 				C927F35A10FD7F0600C5AB8B /* libdispatch_tools */,
 				4552540A19B1389700B88766 /* libdispatch_tests */,
+				92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */,
+				92F3FECA1BEC69E500025962 /* darwintests */,
 				6E040C621C499B1B00411A2E /* libfirehose_kernel */,
 				6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */,
-				92F3FECA1BEC69E500025962 /* darwintests */,
-				92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */,
 			);
 		};
 /* End PBXProject section */
@@ -1643,13 +1711,6 @@
 			remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */;
 			sourceTree = BUILT_PRODUCTS_DIR;
 		};
-		C00B0E111C5AEBBE000330B3 /* dispatch_deadname */ = {
-			isa = PBXReferenceProxy;
-			fileType = "compiled.mach-o.executable";
-			path = dispatch_deadname;
-			remoteRef = C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */;
-			sourceTree = BUILT_PRODUCTS_DIR;
-		};
 		C927F36710FD7F1000C5AB8B /* ddt */ = {
 			isa = PBXReferenceProxy;
 			fileType = "compiled.mach-o.executable";
@@ -1938,6 +1999,7 @@
 			files = (
 				C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */,
 				C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */,
+				6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */,
 				6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */,
 				C00B0DF41C5AEBBE000330B3 /* init.c in Sources */,
 				C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */,
@@ -1945,6 +2007,7 @@
 				C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */,
 				C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */,
 				C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */,
+				6E4BACC81D48A42400B562AE /* mach.c in Sources */,
 				C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */,
 				C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */,
 				C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */,
@@ -1954,8 +2017,10 @@
 				C00B0E001C5AEBBE000330B3 /* source.c in Sources */,
 				C00B0E011C5AEBBE000330B3 /* time.c in Sources */,
 				C00B0E021C5AEBBE000330B3 /* data.c in Sources */,
+				6EA962A61D48625500759D53 /* event_kevent.c in Sources */,
 				C00B0E031C5AEBBE000330B3 /* io.c in Sources */,
 				C00B0E041C5AEBBE000330B3 /* transform.c in Sources */,
+				6EA9629E1D48622C00759D53 /* event.c in Sources */,
 				C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -1966,6 +2031,7 @@
 			files = (
 				C01866A61C5973210040FC07 /* protocol.defs in Sources */,
 				C01866A71C5973210040FC07 /* resolver.c in Sources */,
+				6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */,
 				6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */,
 				C01866A81C5973210040FC07 /* init.c in Sources */,
 				C01866A91C5973210040FC07 /* queue.c in Sources */,
@@ -1973,6 +2039,7 @@
 				C01866AB1C5973210040FC07 /* firehose.defs in Sources */,
 				C01866AC1C5973210040FC07 /* block.cpp in Sources */,
 				C01866AD1C5973210040FC07 /* semaphore.c in Sources */,
+				6E4BACC71D48A42300B562AE /* mach.c in Sources */,
 				C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */,
 				C01866AF1C5973210040FC07 /* once.c in Sources */,
 				C01866B01C5973210040FC07 /* voucher.c in Sources */,
@@ -1982,8 +2049,10 @@
 				C01866B41C5973210040FC07 /* source.c in Sources */,
 				C01866B51C5973210040FC07 /* time.c in Sources */,
 				C01866B61C5973210040FC07 /* data.c in Sources */,
+				6EA962A51D48625400759D53 /* event_kevent.c in Sources */,
 				C01866B71C5973210040FC07 /* io.c in Sources */,
 				C01866B81C5973210040FC07 /* transform.c in Sources */,
+				6EA9629D1D48622B00759D53 /* event.c in Sources */,
 				C01866B91C5973210040FC07 /* allocator.c in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
@@ -1997,7 +2066,9 @@
 				6E9955CF1C3B218E0071D40C /* venture.c in Sources */,
 				6ED64B471BBD89AF00C35F4D /* firehose.defs in Sources */,
 				6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */,
+				6EA9629F1D48625000759D53 /* event_kevent.c in Sources */,
 				E49F2499125D48D80057C971 /* resolver.c in Sources */,
+				6E4BACBD1D48A41500B562AE /* mach.c in Sources */,
 				E44EBE3E1251659900645D88 /* init.c in Sources */,
 				FC7BED990E8361E600161930 /* queue.c in Sources */,
 				721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */,
@@ -2007,11 +2078,13 @@
 				9676A0E10F3E755D00713ADB /* apply.c in Sources */,
 				9661E56B0F3E7DDF00749F3E /* object.c in Sources */,
 				965CD6350F3E806200D4E28D /* benchmark.c in Sources */,
+				6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */,
 				96A8AA870F41E7A400CD570B /* source.c in Sources */,
 				96032E4B0F5CC8C700241C5F /* time.c in Sources */,
 				5AAB45C010D30B79004407EA /* data.c in Sources */,
 				5A27262610F26F1900751FBC /* io.c in Sources */,
 				E43A72501AF85BBC00BAA921 /* block.cpp in Sources */,
+				6EA962971D48622600759D53 /* event.c in Sources */,
 				C9C5F80E143C1771006DC718 /* transform.c in Sources */,
 				E4FC3264145F46C9002FBDDB /* object.m in Sources */,
 				2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */,
@@ -2024,12 +2097,15 @@
 			isa = PBXSourcesBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
+				6E4BACC61D48A42300B562AE /* mach.c in Sources */,
 				E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */,
 				E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */,
 				6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */,
 				E46DBC4214EE10C80001F9F6 /* init.c in Sources */,
 				E46DBC4314EE10C80001F9F6 /* queue.c in Sources */,
+				6EA962A41D48625300759D53 /* event_kevent.c in Sources */,
 				6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */,
+				6EA9629C1D48622A00759D53 /* event.c in Sources */,
 				6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */,
 				E43A72881AF85BE900BAA921 /* block.cpp in Sources */,
 				E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */,
@@ -2046,6 +2122,7 @@
 				E46DBC4C14EE10C80001F9F6 /* io.c in Sources */,
 				E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */,
 				2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */,
+				6E4BACFA1D49A04900B562AE /* event_epoll.c in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -2058,7 +2135,9 @@
 				6E9956051C3B219B0071D40C /* venture.c in Sources */,
 				6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */,
 				6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */,
+				6EA962A01D48625100759D53 /* event_kevent.c in Sources */,
 				E49F24C9125D57FA0057C971 /* resolver.c in Sources */,
+				6E4BACC21D48A42000B562AE /* mach.c in Sources */,
 				E49F24CA125D57FA0057C971 /* init.c in Sources */,
 				E49F24CB125D57FA0057C971 /* queue.c in Sources */,
 				E49F24CC125D57FA0057C971 /* semaphore.c in Sources */,
@@ -2068,11 +2147,13 @@
 				E49F24CE125D57FA0057C971 /* apply.c in Sources */,
 				E49F24CF125D57FA0057C971 /* object.c in Sources */,
 				E49F24D0125D57FA0057C971 /* benchmark.c in Sources */,
+				6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */,
 				E49F24D1125D57FA0057C971 /* source.c in Sources */,
 				E49F24D2125D57FA0057C971 /* time.c in Sources */,
 				E49F24D3125D57FA0057C971 /* data.c in Sources */,
 				E49F24D4125D57FA0057C971 /* io.c in Sources */,
 				E43A72841AF85BCB00BAA921 /* block.cpp in Sources */,
+				6EA962981D48622700759D53 /* event.c in Sources */,
 				C93D6165143E190E00EB9023 /* transform.c in Sources */,
 				E4FC3265145F46C9002FBDDB /* object.m in Sources */,
 				2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */,
@@ -2086,11 +2167,13 @@
 			buildActionMask = 2147483647;
 			files = (
 				E4B515BD164B2DA300E003AF /* provider.d in Sources */,
+				6EA962A31D48625300759D53 /* event_kevent.c in Sources */,
 				E4B515BE164B2DA300E003AF /* protocol.defs in Sources */,
 				E4B515BF164B2DA300E003AF /* resolver.c in Sources */,
 				6ED64B4B1BBD89BE00C35F4D /* firehose_reply.defs in Sources */,
 				6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */,
 				E4B515C0164B2DA300E003AF /* init.c in Sources */,
+				6EA9629B1D48622900759D53 /* event.c in Sources */,
 				E4B515C1164B2DA300E003AF /* queue.c in Sources */,
 				6E9956021C3B21990071D40C /* venture.c in Sources */,
 				E4B515C2164B2DA300E003AF /* semaphore.c in Sources */,
@@ -2102,6 +2185,7 @@
 				E4B515C6164B2DA300E003AF /* benchmark.c in Sources */,
 				E4B515C7164B2DA300E003AF /* source.c in Sources */,
 				E4B515C8164B2DA300E003AF /* time.c in Sources */,
+				6E4BACC51D48A42200B562AE /* mach.c in Sources */,
 				E4B515C9164B2DA300E003AF /* data.c in Sources */,
 				E4B515CA164B2DA300E003AF /* io.c in Sources */,
 				E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */,
@@ -2109,6 +2193,7 @@
 				6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */,
 				E4B515CC164B2DA300E003AF /* object.m in Sources */,
 				E4B515CD164B2DA300E003AF /* allocator.c in Sources */,
+				6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */,
 				E4B515CE164B2DA300E003AF /* data.m in Sources */,
 				E4B515DD164B32E000E003AF /* introspection.c in Sources */,
 			);
@@ -2123,7 +2208,9 @@
 				6E9956031C3B219A0071D40C /* venture.c in Sources */,
 				6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */,
 				6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */,
+				6EA962A21D48625200759D53 /* event_kevent.c in Sources */,
 				E49F2424125D3C970057C971 /* resolver.c in Sources */,
+				6E4BACC41D48A42200B562AE /* mach.c in Sources */,
 				E44EBE5512517EBE00645D88 /* init.c in Sources */,
 				E4EC11AE12514302000DDBD1 /* queue.c in Sources */,
 				E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */,
@@ -2133,11 +2220,13 @@
 				E4EC11B112514302000DDBD1 /* apply.c in Sources */,
 				E4EC11B212514302000DDBD1 /* object.c in Sources */,
 				E4EC11B312514302000DDBD1 /* benchmark.c in Sources */,
+				6E4BACF81D49A04800B562AE /* event_epoll.c in Sources */,
 				E4EC11B412514302000DDBD1 /* source.c in Sources */,
 				E4EC11B512514302000DDBD1 /* time.c in Sources */,
 				E4EC11B712514302000DDBD1 /* data.c in Sources */,
 				E4EC11B812514302000DDBD1 /* io.c in Sources */,
 				E43A72861AF85BCC00BAA921 /* block.cpp in Sources */,
+				6EA9629A1D48622900759D53 /* event.c in Sources */,
 				C93D6166143E190F00EB9023 /* transform.c in Sources */,
 				E4FC3266145F46C9002FBDDB /* object.m in Sources */,
 				2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */,
@@ -2155,7 +2244,9 @@
 				6E9956041C3B219B0071D40C /* venture.c in Sources */,
 				6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */,
 				6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */,
+				6EA962A11D48625100759D53 /* event_kevent.c in Sources */,
 				E49F2423125D3C960057C971 /* resolver.c in Sources */,
+				6E4BACC31D48A42100B562AE /* mach.c in Sources */,
 				E44EBE5712517EBE00645D88 /* init.c in Sources */,
 				E4EC121A12514715000DDBD1 /* queue.c in Sources */,
 				E4EC121B12514715000DDBD1 /* semaphore.c in Sources */,
@@ -2165,11 +2256,13 @@
 				E4EC121D12514715000DDBD1 /* apply.c in Sources */,
 				E4EC121E12514715000DDBD1 /* object.c in Sources */,
 				E4EC121F12514715000DDBD1 /* benchmark.c in Sources */,
+				6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */,
 				E4EC122012514715000DDBD1 /* source.c in Sources */,
 				E4EC122112514715000DDBD1 /* time.c in Sources */,
 				E4EC122312514715000DDBD1 /* data.c in Sources */,
 				E4EC122412514715000DDBD1 /* io.c in Sources */,
 				E43A72851AF85BCC00BAA921 /* block.cpp in Sources */,
+				6EA962991D48622800759D53 /* event.c in Sources */,
 				C93D6167143E190F00EB9023 /* transform.c in Sources */,
 				E4FC3267145F46C9002FBDDB /* object.m in Sources */,
 				2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */,
@@ -2413,12 +2506,20 @@
 		E49F24D9125D57FA0057C971 /* Release */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
+				WARNING_CFLAGS = (
+					"-Weverything",
+					"$(inherited)",
+				);
 			};
 			name = Release;
 		};
 		E49F24DA125D57FA0057C971 /* Debug */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
+				WARNING_CFLAGS = (
+					"-Weverything",
+					"$(inherited)",
+				);
 			};
 			name = Debug;
 		};
diff --git a/libkqueue b/libkqueue
deleted file mode 160000
index b3f81ec..0000000
--- a/libkqueue
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit b3f81ecf680e826c2dc834316b5d77fc1be5a5c7
diff --git a/man/dispatch_object.3 b/man/dispatch_object.3
index 95ba1c3..cddcf32 100644
--- a/man/dispatch_object.3
+++ b/man/dispatch_object.3
@@ -23,6 +23,10 @@
 .Fo dispatch_resume
 .Fa "dispatch_object_t object"
 .Fc
+.Ft void
+.Fo dispatch_activate
+.Fa "dispatch_object_t object"
+.Fc
 .Ft "void *"
 .Fo dispatch_get_context
 .Fa "dispatch_object_t object"
@@ -40,7 +44,7 @@
 .Sh DESCRIPTION
 Dispatch objects share functions for coordinating memory management, suspension,
 cancellation and context pointers.
-.Sh MEMORY MANGEMENT
+.Sh MEMORY MANAGEMENT
 Objects returned by creation functions in the dispatch framework may be
 uniformly retained and released with the functions
 .Fn dispatch_retain
@@ -123,6 +127,17 @@
 	dispatch_release(object);
 });
 .Ed
+.Sh ACTIVATION
+Dispatch objects such as queues and sources may be created in an inactive
+state. Objects in this state must be activated before any blocks
+associated with them will be invoked. Calling
+.Fn dispatch_activate
+on an active object has no effect.
+.Pp
+Changing attributes such as the target queue or a source handler is no longer permitted
+once the object has been activated (see
+.Xr dispatch_set_target_queue 3 ,
+.Xr dispatch_source_set_event_handler 3 ).
 .Sh SUSPENSION
 The invocation of blocks on dispatch queues or dispatch sources may be suspended
 or resumed with the functions
@@ -148,7 +163,7 @@
 .Fn dispatch_resume
 such that the dispatch object is fully resumed when the last reference is
 released. The result of releasing all references to a dispatch object while in
-a suspended state is undefined.
+an inactive or suspended state is undefined.
 .Sh CONTEXT POINTERS
 Dispatch objects support supplemental context pointers. The value of the
 context pointer may be retrieved and updated with
diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3
index 81c2915..da26365 100644
--- a/man/dispatch_semaphore_create.3
+++ b/man/dispatch_semaphore_create.3
@@ -23,6 +23,13 @@
 .Fc
 .Sh DESCRIPTION
 Dispatch semaphores are used to synchronize threads.
+.Pp
+The
+.Fn dispatch_semaphore_wait
+function decrements the semaphore. If the resulting value is less than zero,
+it waits for a signal from a thread that increments the semaphore by calling
+.Fn dispatch_semaphore_signal
+before returning.
 The
 .Fa timeout
 parameter is creatable with the
@@ -30,6 +37,13 @@
 or
 .Xr dispatch_walltime 3
 functions.
+.Pp
+The
+.Fn dispatch_semaphore_signal
+function increments the counting semaphore. If the previous value was less than zero,
+it wakes one of the threads that are waiting in
+.Fn dispatch_semaphore_wait
+before returning.
 .Sh COMPLETION SYNCHRONIZATION
 If the
 .Fa count
diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3
index 4da708c..e9b0fb7 100644
--- a/man/dispatch_source_create.3
+++ b/man/dispatch_source_create.3
@@ -295,7 +295,7 @@
 .Pp
 The data returned by
 .Fn dispatch_source_get_data
-indicates which of the events in the
+is a bitmask that indicates which of the events in the
 .Fa mask
 were observed.  Note that because this source type will request notifications on
 the provided port, it should not be mixed with the use of
@@ -372,7 +372,7 @@
 .Pp
 The data returned by
 .Fn dispatch_source_get_data
-indicates which of the events in the
+is a bitmask that indicates which of the events in the
 .Fa mask
 were observed.
 .Pp
@@ -548,7 +548,7 @@
 .Pp
 The data returned by
 .Fn dispatch_source_get_data
-indicates which of the events in the
+is a bitmask that indicates which of the events in the
 .Fa mask
 were observed.
 .Pp
diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h
index 2c6466f..e027a8c 100644
--- a/os/firehose_buffer_private.h
+++ b/os/firehose_buffer_private.h
@@ -26,6 +26,7 @@
 #include <stdint.h>
 #else
 #include <os/base.h>
+#include <os/availability.h>
 #include <os/base_private.h>
 #include <dispatch/dispatch.h>
 #endif
@@ -38,39 +39,9 @@
  * Layout of structs is subject to change without notice
  */
 
-#define FIREHOSE_BUFFER_CHUNK_SIZE				4096ul
 #define FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE	2048ul
 #define FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT		16
 
-typedef union {
-	uint64_t fbc_atomic_pos;
-#define FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC		(1ULL <<  0)
-#define FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC	(1ULL << 16)
-#define FIREHOSE_BUFFER_POS_REFCNT_INC			(1ULL << 32)
-#define FIREHOSE_BUFFER_POS_FULL_BIT			(1ULL << 56)
-#define FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(pos, stream) \
-		((((pos).fbc_atomic_pos >> 48) & 0x1ff) == (uint16_t)stream)
-	struct {
-		uint16_t fbc_next_entry_offs;
-		uint16_t fbc_private_offs;
-		uint8_t  fbc_refcnt;
-		uint8_t  fbc_qos_bits;
-		uint8_t  fbc_stream;
-		uint8_t  fbc_flag_full : 1;
-		uint8_t  fbc_flag_io : 1;
-		uint8_t  _fbc_flag_unused : 6;
-	};
-} firehose_buffer_pos_u;
-
-typedef struct firehose_buffer_chunk_s {
-	uint8_t  fbc_start[0];
-	firehose_buffer_pos_u volatile fbc_pos;
-	uint64_t fbc_timestamp;
-	uint8_t  fbc_data[FIREHOSE_BUFFER_CHUNK_SIZE
-			- sizeof(firehose_buffer_pos_u)
-			- sizeof(uint64_t)];
-} __attribute__((aligned(8))) *firehose_buffer_chunk_t;
-
 typedef struct firehose_buffer_range_s {
 	uint16_t fbr_offset; // offset from the start of the buffer
 	uint16_t fbr_length;
@@ -78,6 +49,8 @@
 
 #ifdef KERNEL
 
+typedef struct firehose_chunk_s *firehose_chunk_t;
+
 // implemented by the kernel
 extern void __firehose_buffer_push_to_logd(firehose_buffer_t fb, bool for_io);
 extern void __firehose_critical_region_enter(void);
@@ -89,19 +62,10 @@
 __firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream,
 		uint16_t pubsize, uint16_t privsize, uint8_t **privptr);
 
-firehose_tracepoint_t
-__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc,
-		uint64_t stamp, firehose_stream_t stream,
-		uint16_t pubsize, uint16_t privsize, uint8_t **privptr);
-
 void
 __firehose_buffer_tracepoint_flush(firehose_tracepoint_t vat,
 		firehose_tracepoint_id_u vatid);
 
-void
-__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc,
-		firehose_tracepoint_t vat, firehose_tracepoint_id_u vatid);
-
 firehose_buffer_t
 __firehose_buffer_create(size_t *size);
 
@@ -118,13 +82,12 @@
 
 OS_ALWAYS_INLINE
 static inline const uint8_t *
-_firehose_tracepoint_reader_init(firehose_buffer_chunk_t fbc,
-		const uint8_t **endptr)
+_firehose_tracepoint_reader_init(firehose_chunk_t fc, const uint8_t **endptr)
 {
-	const uint8_t *start = fbc->fbc_data;
-	const uint8_t *end = fbc->fbc_start + fbc->fbc_pos.fbc_next_entry_offs;
+	const uint8_t *start = fc->fc_data;
+	const uint8_t *end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs;
 
-	if (end > fbc->fbc_start + FIREHOSE_BUFFER_CHUNK_SIZE) {
+	if (end > fc->fc_start + FIREHOSE_CHUNK_SIZE) {
 		end = start;
 	}
 	*endptr = end;
@@ -165,13 +128,13 @@
 
 OS_ALWAYS_INLINE
 static inline bool
-firehose_buffer_range_validate(firehose_buffer_chunk_t fbc,
-		firehose_tracepoint_t ft, firehose_buffer_range_t range)
+firehose_buffer_range_validate(firehose_chunk_t fc, firehose_tracepoint_t ft,
+		firehose_buffer_range_t range)
 {
-	if (range->fbr_offset + range->fbr_length > FIREHOSE_BUFFER_CHUNK_SIZE) {
+	if (range->fbr_offset + range->fbr_length > FIREHOSE_CHUNK_SIZE) {
 		return false;
 	}
-	if (fbc->fbc_start + range->fbr_offset < ft->ft_data + ft->ft_length) {
+	if (fc->fc_start + range->fbr_offset < ft->ft_data + ft->ft_length) {
 		return false;
 	}
 	return true;
diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h
index 4bff8ab..2ed986c 100644
--- a/os/firehose_server_private.h
+++ b/os/firehose_server_private.h
@@ -235,7 +235,7 @@
 void
 firehose_client_metadata_stream_peek(firehose_client_t client,
 		firehose_event_t context, OS_NOESCAPE bool (^peek_should_start)(void),
-		OS_NOESCAPE bool (^peek)(firehose_buffer_chunk_t fbc));
+		OS_NOESCAPE bool (^peek)(firehose_chunk_t fbc));
 
 #pragma mark - Firehose Server
 
@@ -246,7 +246,7 @@
  * Type of the handler block for firehose_server_init()
  */
 typedef void (^firehose_handler_t)(firehose_client_t client,
-		firehose_event_t event, firehose_buffer_chunk_t page);
+		firehose_event_t event, firehose_chunk_t page);
 
 /*!
  * @function firehose_server_init
@@ -289,11 +289,32 @@
 void
 firehose_server_resume(void);
 
+/*!
+ * @typedef firehose_server_queue_t
+ *
+ * @abstract
+ * Values to pass to firehose_server_get_queue()
+ */
+OS_ENUM(firehose_server_queue, unsigned long,
+	FIREHOSE_SERVER_QUEUE_UNKNOWN,
+	FIREHOSE_SERVER_QUEUE_IO,
+	FIREHOSE_SERVER_QUEUE_MEMORY,
+);
+
+/*!
+ * @function firehose_server_copy_queue
+ *
+ * @abstract
+ * Returns internal queues to the firehose server subsystem.
+ */
+OS_NOTHROW OS_OBJECT_RETURNS_RETAINED
+dispatch_queue_t
+firehose_server_copy_queue(firehose_server_queue_t which);
+
 #pragma mark - Firehose Snapshot
 
 /*!
  * @typedef firehose_snapshot_event
- *
  */
 OS_ENUM(firehose_snapshot_event, unsigned long,
 	FIREHOSE_SNAPSHOT_EVENT_IO_START = 1,
@@ -310,7 +331,7 @@
  * Type of the handler block for firehose_snapshot
  */
 typedef void (^firehose_snapshot_handler_t)(firehose_client_t client,
-		firehose_snapshot_event_t event, firehose_buffer_chunk_t page);
+		firehose_snapshot_event_t event, firehose_chunk_t page);
 
 /*!
  * @function firehose_snapshot
diff --git a/os/object.h b/os/object.h
index f3faa62..68a6a33 100644
--- a/os/object.h
+++ b/os/object.h
@@ -24,6 +24,7 @@
 #ifdef __APPLE__
 #include <Availability.h>
 #include <TargetConditionals.h>
+#include <os/availability.h>
 #endif
 #ifndef __linux__
 #include <os/base.h>
@@ -232,7 +233,7 @@
  * @result
  * The retained object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_EXPORT OS_SWIFT_UNAVAILABLE("Can't be used with ARC")
 void*
 os_retain(void *object);
@@ -254,7 +255,7 @@
  * @param object
  * The object to release.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_EXPORT
 void OS_SWIFT_UNAVAILABLE("Can't be used with ARC")
 os_release(void *object);
diff --git a/os/object_private.h b/os/object_private.h
index dc2af83..36a807c 100644
--- a/os/object_private.h
+++ b/os/object_private.h
@@ -31,8 +31,8 @@
 #include <stddef.h>
 #include <os/object.h>
 
-#ifndef __OSX_AVAILABLE_STARTING
-#define __OSX_AVAILABLE_STARTING(x, y)
+#ifndef API_AVAILABLE
+#define API_AVAILABLE(...)
 #endif
 
 #if __GNUC__
@@ -112,7 +112,7 @@
 #define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) \
 		OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super)
 #elif OS_OBJECT_USE_OBJC
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT
 @interface OS_OBJECT_CLASS(object) : NSObject
 - (void)_xref_dispose;
@@ -136,48 +136,48 @@
 
 #if !_OS_OBJECT_OBJC_ARC
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 _os_object_t
 _os_object_alloc(const void *cls, size_t size);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 _os_object_t
 _os_object_alloc_realized(const void *cls, size_t size);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 void _os_object_dealloc(_os_object_t object);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 _os_object_t
 _os_object_retain(_os_object_t object);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 _os_object_t
 _os_object_retain_with_resurrect(_os_object_t obj);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 void
 _os_object_release(_os_object_t object);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 _os_object_t
 _os_object_retain_internal(_os_object_t object);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
 OS_SWIFT_UNAVAILABLE("Unavailable in Swift")
 void
diff --git a/os/voucher_activity_private.h b/os/voucher_activity_private.h
index 8f233b3..6bb8a0b 100644
--- a/os/voucher_activity_private.h
+++ b/os/voucher_activity_private.h
@@ -28,11 +28,13 @@
 #endif
 #ifndef __linux__
 #include <os/base.h>
+#include <os/availability.h>
 #endif
+#include <sys/uio.h>
 #include <os/object.h>
 #include "voucher_private.h"
 
-#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20160329
+#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20161003
 
 #if OS_VOUCHER_WEAK_IMPORT
 #define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT
@@ -79,8 +81,7 @@
  * The current activity identifier, if any. When 0 is returned, parent_id will
  * also always be 0.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW
 firehose_activity_id_t
 voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id);
@@ -109,15 +110,14 @@
  * The current activity identifier, if any. When 0 is returned, parent_id will
  * also always be 0.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW
 firehose_activity_id_t
 voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid,
 		firehose_activity_id_t *parent_id);
 
 /*!
- * @function voucher_activity_create
+ * @function voucher_activity_create_with_data
  *
  * @abstract
  * Creates a voucher object with a new activity identifier.
@@ -151,22 +151,24 @@
  * @param flags
  * See voucher_activity_flag_t documentation for effect.
  *
- * @param location
- * Location identifier for the automatic tracepoint generated as part of
- * creating the new activity.
+ * @param pubdata
+ * Pointer to packed buffer of tracepoint data.
+ *
+ * @param publen
+ * Length of data at 'pubdata'.
  *
  * @result
  * A new voucher with an activity identifier.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t
-voucher_activity_create(firehose_tracepoint_id_t trace_id,
-		voucher_t base, firehose_activity_flags_t flags, uint64_t location);
+voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id,
+		voucher_t base, firehose_activity_flags_t flags,
+		const void *pubdata, size_t publen);
 
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_DEPRECATED_WITH_REPLACEMENT("voucher_activity_create_with_data",
+		macos(10.12,10.12), ios(10.0,10.0), tvos(10.0,10.0), watchos(3.0,3.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t
 voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id,
@@ -192,8 +194,7 @@
  * @param stream
  * The stream to flush.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW
 void
 voucher_activity_flush(firehose_stream_t stream);
@@ -219,8 +220,7 @@
  * @param publen
  * Length of data at 'pubdata'.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4
 firehose_tracepoint_id_t
 voucher_activity_trace(firehose_stream_t stream,
@@ -228,7 +228,7 @@
 		const void *pubdata, size_t publen);
 
 /*!
- * @function voucher_activity_trace_with_private_strings
+ * @function voucher_activity_trace_v
  *
  * @abstract
  * Add a tracepoint to the specified stream, with private data.
@@ -242,20 +242,29 @@
  * @param timestamp
  * The mach_approximate_time()/mach_absolute_time() value for this tracepoint.
  *
- * @param pubdata
- * Pointer to packed buffer of tracepoint data.
+ * @param iov
+ * Array of `struct iovec` pointing to the data to layout.
+ * The total size of this iovec must span exactly `publen + privlen` bytes.
+ * The `publen` boundary must coincide with the end of an iovec (each iovec
+ * must either be pure public or pure private data).
  *
  * @param publen
- * Length of data at 'pubdata'.
- *
- * @param privdata
- * Pointer to packed buffer of private tracepoint data.
+ * Total length of data to read from the iovec for the public data.
  *
  * @param privlen
- * Length of data at 'privdata'.
+ * Length of data to read from the iovec after the public data for the private
+ * data.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4
+firehose_tracepoint_id_t
+voucher_activity_trace_v(firehose_stream_t stream,
+		firehose_tracepoint_id_t trace_id, uint64_t timestamp,
+		const struct iovec *iov, size_t publen, size_t privlen);
+
+
+API_DEPRECATED_WITH_REPLACEMENT("voucher_activity_trace_v",
+		macos(10.12,10.12), ios(10.0,10.0), tvos(10.0,10.0), watchos(3.0,3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 OS_NONNULL6
 firehose_tracepoint_id_t
 voucher_activity_trace_with_private_strings(firehose_stream_t stream,
@@ -263,14 +272,11 @@
 		const void *pubdata, size_t publen,
 		const void *privdata, size_t privlen);
 
-typedef struct voucher_activity_hooks_s {
+typedef const struct voucher_activity_hooks_s {
 #define VOUCHER_ACTIVITY_HOOKS_VERSION     3
 	long vah_version;
-	// version 1
 	mach_port_t (*vah_get_logd_port)(void);
-	// version 2
 	dispatch_mach_handler_function_t vah_debug_channel_handler;
-	// version 3
 	kern_return_t (*vah_get_reconnect_info)(mach_vm_address_t *, mach_vm_size_t *);
 } *voucher_activity_hooks_t;
 
@@ -283,8 +289,7 @@
  * @param hooks
  * A pointer to a voucher_activity_hooks_s structure.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL_ALL
 void
 voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks);
@@ -302,7 +307,7 @@
  * @result
  * Address of metadata buffer.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL
 void*
 voucher_activity_get_metadata_buffer(size_t *length);
@@ -314,8 +319,7 @@
  * Return the current voucher activity ID. Available for the dyld client stub
  * only.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW
 firehose_activity_id_t
 voucher_get_activity_id_4dyld(void);
diff --git a/os/voucher_private.h b/os/voucher_private.h
index 562a704..3f9b88c 100644
--- a/os/voucher_private.h
+++ b/os/voucher_private.h
@@ -23,6 +23,7 @@
 
 #ifndef __linux__
 #include <os/base.h>
+#include <os/availability.h>
 #endif
 #if __has_include(<mach/mach.h>)
 #include <os/object.h>
@@ -100,7 +101,7 @@
  * @result
  * The previously adopted voucher object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE
 OS_NOTHROW
 voucher_t _Nullable
@@ -116,7 +117,7 @@
  * @result
  * The currently adopted voucher object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t _Nullable
 voucher_copy(void);
@@ -135,7 +136,7 @@
  * @result
  * A copy of the currently adopted voucher object, with importance removed.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t _Nullable
 voucher_copy_without_importance(void);
@@ -161,7 +162,7 @@
  *
  * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_NOTHROW
 void
 voucher_replace_default_voucher(void);
@@ -179,7 +180,7 @@
  *
  * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_NOTHROW
 void
 voucher_decrement_importance_count4CF(voucher_t _Nullable voucher);
@@ -263,7 +264,7 @@
  * When not building with Objective-C ARC, must be released with a -[release]
  * message or the Block_release() function.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_block_t
@@ -346,7 +347,7 @@
  * When not building with Objective-C ARC, must be released with a -[release]
  * message or the Block_release() function.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_block_t
@@ -407,7 +408,7 @@
  * @result
  * The newly created dispatch queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+API_AVAILABLE(macos(10.11), ios(9.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_queue_t
@@ -440,7 +441,7 @@
  * The newly created voucher object or NULL if the message was not carrying a
  * mach voucher.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t _Nullable
 voucher_create_with_mach_msg(mach_msg_header_t *msg);
@@ -475,7 +476,7 @@
  * or the persona identifier of the current process
  * or PERSONA_ID_NONE
  */
-__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2)
+API_AVAILABLE(ios(9.2))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW
 uid_t
 voucher_get_current_persona(void);
@@ -498,7 +499,7 @@
  * 0 on success: currently adopted voucher has a PERSONA_TOKEN
  * -1 on failure: persona_info is untouched/uninitialized
  */
-__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2)
+API_AVAILABLE(ios(9.2))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1
 int
 voucher_get_current_persona_originator_info(
@@ -522,7 +523,7 @@
  * 0 on success: currently adopted voucher has a PERSONA_TOKEN
  * -1 on failure: persona_info is untouched/uninitialized
  */
-__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2)
+API_AVAILABLE(ios(9.2))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1
 int
 voucher_get_current_persona_proximate_info(
diff --git a/private/benchmark.h b/private/benchmark.h
index ef3cdbd..ab57156 100644
--- a/private/benchmark.h
+++ b/private/benchmark.h
@@ -70,13 +70,13 @@
  *     cache-line.
  */
 #ifdef __BLOCKS__
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
 uint64_t
 dispatch_benchmark(size_t count, dispatch_block_t block);
 #endif
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW
 uint64_t
 dispatch_benchmark_f(size_t count, void *_Nullable ctxt,
diff --git a/private/data_private.h b/private/data_private.h
index 7485525..364a8ff 100644
--- a/private/data_private.h
+++ b/private/data_private.h
@@ -43,7 +43,7 @@
  * encapsulate buffers that should not be copied or freed by the system.
  */
 #define DISPATCH_DATA_DESTRUCTOR_NONE (_dispatch_data_destructor_none)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none);
 
 /*!
@@ -53,7 +53,7 @@
  */
 #define DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE \
 		(_dispatch_data_destructor_vm_deallocate)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate);
 
 /*!
@@ -77,7 +77,7 @@
  *			data buffer when it is no longer needed.
  * @result		A newly created dispatch data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
 dispatch_data_create_f(const void *buffer,
@@ -100,7 +100,7 @@
  *			location of the newly allocated memory region, or NULL.
  * @result		A newly created dispatch data object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
@@ -142,7 +142,7 @@
  * @result		A Boolean indicating whether traversal completed
  *			successfully.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 bool
 dispatch_data_apply_f(dispatch_data_t data, void *_Nullable context,
@@ -163,7 +163,7 @@
  * @result		A mach port for the newly made memory entry, or
  *			MACH_PORT_NULL if an error occurred.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 mach_port_t
 dispatch_data_make_memory_entry(dispatch_data_t data);
@@ -198,7 +198,7 @@
  * or should be, comprised of raw data bytes with no given encoding.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_NONE (&_dispatch_data_format_type_none)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(none);
 
 /*!
@@ -209,7 +209,7 @@
  * types.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_BASE32 (&_dispatch_data_format_type_base32)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(base32);
 
 /*!
@@ -221,7 +221,7 @@
  */
 #define DISPATCH_DATA_FORMAT_TYPE_BASE32HEX \
 		(&_dispatch_data_format_type_base32hex)
-__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(base32hex);
 
 /*!
@@ -232,7 +232,7 @@
  * types.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_BASE64 (&_dispatch_data_format_type_base64)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(base64);
 
 /*!
@@ -242,7 +242,7 @@
  * with other UTF format types.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_UTF8 (&_dispatch_data_format_type_utf8)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(utf8);
 
 /*!
@@ -252,7 +252,7 @@
  * conjunction with other UTF format types.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_UTF16LE (&_dispatch_data_format_type_utf16le)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(utf16le);
 
 /*!
@@ -262,7 +262,7 @@
  * conjunction with other UTF format types.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_UTF16BE (&_dispatch_data_format_type_utf16be)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(utf16be);
 
 /*!
@@ -274,7 +274,7 @@
  * format.
  */
 #define DISPATCH_DATA_FORMAT_TYPE_UTF_ANY (&_dispatch_data_format_type_utf_any)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any);
 
 /*!
@@ -295,7 +295,7 @@
  * produced, or NULL if an error occurred.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_data_t
diff --git a/private/introspection_private.h b/private/introspection_private.h
index fa8e49a..972c688 100644
--- a/private/introspection_private.h
+++ b/private/introspection_private.h
@@ -68,8 +68,8 @@
 typedef struct dispatch_source_s *dispatch_source_t;
 typedef struct dispatch_group_s *dispatch_group_t;
 typedef struct dispatch_object_s *dispatch_object_t;
-#ifndef __OSX_AVAILABLE_STARTING
-#define __OSX_AVAILABLE_STARTING(x,y)
+#ifndef API_AVAILABLE
+#define API_AVAILABLE(...)
 #endif
 #ifndef DISPATCH_EXPORT
 #define DISPATCH_EXPORT extern
@@ -135,7 +135,7 @@
  * Size of dispatch_introspection_source_s structure.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT const struct dispatch_introspection_versions_s {
 	unsigned long introspection_version;
 	unsigned long hooks_version;
@@ -716,7 +716,7 @@
  * hooks on output.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT void
 dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks);
 
diff --git a/private/io_private.h b/private/io_private.h
index 0bb1e3b..2932581 100644
--- a/private/io_private.h
+++ b/private/io_private.h
@@ -79,7 +79,7 @@
  *		param error	An errno condition for the read operation or
  *				zero if the read was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW
 void
 dispatch_read_f(dispatch_fd_t fd,
@@ -121,7 +121,7 @@
  *		param error	An errno condition for the write operation or
  *				zero if the write was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL5
 DISPATCH_NOTHROW
 void
@@ -160,7 +160,7 @@
  * @result	The newly created dispatch I/O channel or NULL if an error
  *		occurred (invalid type specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_io_t
@@ -200,7 +200,7 @@
  * @result	The newly created dispatch I/O channel or NULL if an error
  *		occurred (invalid type or non-absolute path specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_io_t
@@ -244,7 +244,7 @@
  * @result	The newly created dispatch I/O channel or NULL if an error
  *		occurred (invalid type specified).
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED
 DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_io_t
@@ -311,7 +311,7 @@
  *	param error	An errno condition for the read operation or zero if
  *			the read was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL6
 DISPATCH_NOTHROW
 void
@@ -368,7 +368,7 @@
  *	param error	An errno condition for the write operation or zero
  *			if the write was successful.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4
 DISPATCH_NONNULL6 DISPATCH_NOTHROW
 void
@@ -402,7 +402,7 @@
  *			the barrier function.
  * @param barrier	The barrier function.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_io_barrier_f(dispatch_io_t channel,
diff --git a/private/layout_private.h b/private/layout_private.h
index bf93ee9..0c0cd94 100644
--- a/private/layout_private.h
+++ b/private/layout_private.h
@@ -29,7 +29,7 @@
 __BEGIN_DECLS
 
 #if !TARGET_OS_WIN32
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT const struct dispatch_queue_offsets_s {
 	// always add new fields at the end
 	const uint16_t dqo_version;
@@ -60,7 +60,7 @@
  * SPI intended for CoreSymbolication only
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT const struct dispatch_tsd_indexes_s {
 	// always add new fields at the end
 	const uint16_t dti_version;
diff --git a/private/mach_private.h b/private/mach_private.h
index 2228436..343a9d8 100644
--- a/private/mach_private.h
+++ b/private/mach_private.h
@@ -36,7 +36,7 @@
 
 #if DISPATCH_MACH_SPI
 
-#define DISPATCH_MACH_SPI_VERSION 20160505
+#define DISPATCH_MACH_SPI_VERSION 20160915
 
 #include <mach/mach.h>
 
@@ -109,6 +109,12 @@
  * result operation and never passed to a channel handler. Indicates that the
  * message passed to the send operation must not be disposed of until it is
  * returned via the channel handler.
+ *
+ * @const DISPATCH_MACH_SIGTERM_RECEIVED
+ * A SIGTERM signal has been received. This notification is delivered at most
+ * once during the lifetime of the channel. This event is sent only for XPC
+ * channels (i.e. channels that were created by calling
+ * dispatch_mach_create_4libxpc()).
  */
 DISPATCH_ENUM(dispatch_mach_reason, unsigned long,
 	DISPATCH_MACH_CONNECTED = 1,
@@ -121,6 +127,7 @@
 	DISPATCH_MACH_CANCELED,
 	DISPATCH_MACH_REPLY_RECEIVED,
 	DISPATCH_MACH_NEEDS_DEFERRED_SEND,
+	DISPATCH_MACH_SIGTERM_RECEIVED,
 	DISPATCH_MACH_REASON_LAST, /* unused */
 );
 
@@ -202,7 +209,7 @@
  *						buffer, or NULL.
  * @result				A newly created dispatch mach message object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_mach_msg_t
@@ -219,7 +226,7 @@
  *					size of the message buffer, or NULL.
  * @result			Pointer to message buffer underlying the object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 mach_msg_header_t*
 dispatch_mach_msg_get_msg(dispatch_mach_msg_t message,
@@ -267,7 +274,7 @@
  * @result
  * The newly created dispatch mach channel.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 dispatch_mach_t
@@ -321,7 +328,7 @@
  * @result
  * The newly created dispatch mach channel.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NONNULL4 DISPATCH_NOTHROW
 dispatch_mach_t
@@ -354,7 +361,7 @@
  * to channel cancellation or reconnection) and the channel handler has
  * returned. May be NULL.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive,
@@ -385,7 +392,7 @@
  * is complete (or not peformed due to channel cancellation or reconnection)
  * and the channel handler has returned. May be NULL.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
 void
 dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send,
@@ -408,7 +415,7 @@
  * @param channel
  * The mach channel to cancel.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_mach_cancel(dispatch_mach_t channel);
@@ -451,7 +458,7 @@
  * Additional send options to pass to mach_msg() when performing the send
  * operation.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW
 void
 dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message,
@@ -519,8 +526,7 @@
  * Out parameter to return the error from the immediate send attempt.
  * If a deferred send is required, returns 0. Must not be NULL.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5
 DISPATCH_NONNULL6 DISPATCH_NOTHROW
 void
@@ -580,7 +586,7 @@
  * @result
  * The received reply message object, or NULL if the channel was canceled.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+API_AVAILABLE(macos(10.11), ios(9.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW
 dispatch_mach_msg_t _Nullable
@@ -662,8 +668,7 @@
  * @result
  * The received reply message object, or NULL if the channel was canceled.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 DISPATCH_NONNULL6
 DISPATCH_NOTHROW
@@ -688,7 +693,7 @@
  * @param barrier
  * The barrier block to submit to the channel target queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier);
@@ -711,7 +716,7 @@
  * @param barrier
  * The barrier function to submit to the channel target queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *_Nullable context,
@@ -731,7 +736,7 @@
  * @param barrier
  * The barrier block to submit to the channel target queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_mach_receive_barrier(dispatch_mach_t channel,
@@ -754,7 +759,7 @@
  * @param barrier
  * The barrier function to submit to the channel target queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *_Nullable context,
@@ -781,11 +786,99 @@
  * @result
  * The most recently specified check-in port for the channel.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 mach_port_t
 dispatch_mach_get_checkin_port(dispatch_mach_t channel);
 
+// SPI for libxpc
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+typedef const struct dispatch_mach_xpc_hooks_s {
+#define DISPATCH_MACH_XPC_HOOKS_VERSION     1
+	unsigned long version;
+
+	/* Fields available in version 1. */
+
+	/*
+	 * Called to handle a Mach message event inline if possible. Returns true
+	 * if the event was handled, false if the event should be delivered to the
+	 * channel event handler. The implementation should not make any assumptions
+	 * about the thread in which the function is called and cannot assume that
+	 * invocations of this function are serialized relative to each other or
+	 * relative to the channel's event handler function. In addition, the
+	 * handler must not throw an exception or call out to any code that might
+	 * throw an exception.
+	 */
+	bool (* _Nonnull dmxh_direct_message_handler)(void *_Nullable context,
+		dispatch_mach_reason_t reason, dispatch_mach_msg_t message,
+		mach_error_t error);
+} *dispatch_mach_xpc_hooks_t;
+
+/*!
+ * @function dispatch_mach_xpc_hooks_install_4libxpc
+ *
+ * @abstract
+ * installs XPC callbacks for dispatch Mach channels.
+ *
+ * @discussion
+ * In order to improve the performance of the XPC/dispatch interface, it is
+ * sometimes useful for dispatch to be able to call directly into XPC. The
+ * channel hooks structure should be initialized with pointers to XPC callback
+ * functions, or NULL for callbacks that XPC does not support. The version
+ * number in the structure must be set to reflect the fields that have been
+ * initialized. This function may be called only once.
+ *
+ * @param hooks
+ * A pointer to the channel hooks structure. This must remain valid once set.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
+void
+dispatch_mach_xpc_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks);
+
+/*!
+ * @function dispatch_mach_create_4libxpc
+ * Create a dispatch mach channel to asynchronously receive and send mach
+ * messages, specifically for libxpc.
+ *
+ * The specified handler will be called with the corresponding reason parameter
+ * for each message received and for each message that was successfully sent,
+ * that failed to be sent, or was not sent; as well as when a barrier block
+ * has completed, or when channel connection, reconnection or cancellation has
+ * taken effect. However, the handler will not be called for messages that 
+ * were passed to the XPC hooks dmxh_direct_message_handler function if that
+ * function returned true.
+ *
+ * Dispatch mach channels are created in a disconnected state, they must be
+ * connected via dispatch_mach_connect() to begin receiving and sending
+ * messages.
+ *
+ * @param label
+ * An optional string label to attach to the channel. The string is not copied,
+ * if it is non-NULL it must point to storage that remains valid for the
+ * lifetime of the channel object. May be NULL.
+ *
+ * @param queue
+ * The target queue of the channel, where the handler and barrier blocks will
+ * be submitted.
+ *
+ * @param context
+ * The application-defined context to pass to the handler.
+ *
+ * @param handler
+ * The handler function to submit when a message has been sent or received.
+ *
+ * @result
+ * The newly created dispatch mach channel.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
+DISPATCH_NONNULL4 DISPATCH_NOTHROW
+dispatch_mach_t
+dispatch_mach_create_4libxpc(const char *_Nullable label,
+		dispatch_queue_t _Nullable queue, void *_Nullable context,
+		dispatch_mach_handler_function_t handler);
+
 DISPATCH_ASSUME_NONNULL_END
 
 #endif // DISPATCH_MACH_SPI
diff --git a/private/private.h b/private/private.h
index 3c37bed..82da15e 100644
--- a/private/private.h
+++ b/private/private.h
@@ -66,7 +66,7 @@
 #endif /* !__DISPATCH_BUILDING_DISPATCH__ */
 
 // <rdar://problem/9627726> Check that public and private dispatch headers match
-#if DISPATCH_API_VERSION != 20160712 // Keep in sync with <dispatch/dispatch.h>
+#if DISPATCH_API_VERSION != 20160831 // Keep in sync with <dispatch/dispatch.h>
 #error "Dispatch header mismatch between /usr/include and /usr/local/include"
 #endif
 
@@ -93,7 +93,7 @@
  * Boolean indicating whether the process has used libdispatch and become
  * multithreaded.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
+API_AVAILABLE(macos(10.8), ios(6.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 bool _dispatch_is_multithreaded(void);
 
@@ -117,7 +117,7 @@
  * Boolean indicating whether the parent process had used libdispatch and
  * become multithreaded at the time of fork.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 bool _dispatch_is_fork_of_multithreaded_parent(void);
 
@@ -144,8 +144,7 @@
  * If the program already used dispatch before the guard is enabled, then
  * this function will abort immediately.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void _dispatch_prohibit_transition_to_multithreaded(bool prohibit);
 
@@ -187,31 +186,23 @@
 #endif
 
 #if TARGET_OS_MAC
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_runloop_handle_t
 _dispatch_get_main_queue_port_4CF(void);
 #endif
 
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 dispatch_runloop_handle_t
 _dispatch_get_main_queue_handle_4CF(void);
 
-#if TARGET_OS_MAC
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
-DISPATCH_EXPORT DISPATCH_NOTHROW
-void
-_dispatch_main_queue_callback_4CF(mach_msg_header_t *_Null_unspecified msg);
-#else
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 _dispatch_main_queue_callback_4CF(void *_Null_unspecified msg);
-#endif
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_queue_t
@@ -219,33 +210,33 @@
 		unsigned long flags);
 
 #if TARGET_OS_MAC
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 mach_port_t
 _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue);
 #endif
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t queue);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 bool
 _dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t queue);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 _dispatch_source_set_runloop_timer_4CF(dispatch_source_t source,
 		dispatch_time_t start, uint64_t interval, uint64_t leeway);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT
 void *_Nonnull (*_Nullable _dispatch_begin_NSAutoReleasePool)(void);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+API_AVAILABLE(macos(10.6), ios(4.0))
 DISPATCH_EXPORT
 void (*_Nullable _dispatch_end_NSAutoReleasePool)(void *);
 
diff --git a/private/queue_private.h b/private/queue_private.h
index 33de371..14d6477 100644
--- a/private/queue_private.h
+++ b/private/queue_private.h
@@ -79,7 +79,7 @@
  * This new value combines the attributes specified by the 'attr' parameter and
  * the overcommit flag.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW
 dispatch_queue_attr_t
 dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr,
@@ -99,6 +99,39 @@
 #define DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE INT8_MIN
 
 /*!
+ * @function dispatch_queue_set_label_nocopy
+ *
+ * @abstract
+ * Set the label for a given queue, without copying the input string.
+ *
+ * @discussion
+ * The queue must have been initially created with a NULL label, else using
+ * this function to set the queue label is undefined.
+ *
+ * The caller of this function must make sure the label pointer remains valid
+ * while it is used as the queue label and while any callers to
+ * dispatch_queue_get_label() may have obtained it. Since the queue lifetime
+ * may extend past the last release, it is advised to call this function with
+ * a constant string or NULL before the queue is released, or to destroy the
+ * label from a finalizer for that queue.
+ *
+ * This function should be called before any work item could call
+ * dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL) or from the context of
+ * the queue itself.
+ *
+ * @param queue
+ * The queue to adjust. Attempts to set the label of the main queue or a global
+ * concurrent queue will be ignored.
+ *
+ * @param label
+ * The new label for the queue.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
+void
+dispatch_queue_set_label_nocopy(dispatch_queue_t queue, const char *label);
+
+/*!
  * @function dispatch_queue_set_width
  *
  * @abstract
@@ -115,8 +148,8 @@
  * with the desired concurrency width.
  *
  * @param queue
- * The queue to adjust. Passing the main queue or a global concurrent queue
- * will be ignored.
+ * The queue to adjust. Attempts to set the width of the main queue or a global
+ * concurrent queue will be ignored.
  *
  * @param width
  * The new maximum width of concurrency depending on available resources.
@@ -128,8 +161,8 @@
 #define DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS	-2
 #define DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS	-3
 
-__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_6,__MAC_10_10,__IPHONE_4_0,__IPHONE_8_0, \
-		"Use dispatch_queue_create(name, DISPATCH_QUEUE_CONCURRENT) instead")
+API_DEPRECATED("Use dispatch_queue_create(name, DISPATCH_QUEUE_CONCURRENT)",
+		macos(10.6,10.10), ios(4.0,8.0))
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 void
 dispatch_queue_set_width(dispatch_queue_t dq, long width);
@@ -189,7 +222,7 @@
  * @result
  * The newly created dispatch pthread root queue.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+API_AVAILABLE(macos(10.9), ios(6.0))
 DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
 DISPATCH_NOTHROW
 dispatch_queue_t
@@ -238,8 +271,7 @@
  * @result
  * A new reference to a pthread root queue object or NULL.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW
 dispatch_queue_t _Nullable
 dispatch_pthread_root_queue_copy_current(void);
@@ -284,7 +316,7 @@
  * dispatch_async_f().
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+API_AVAILABLE(macos(10.11), ios(9.0))
 DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
 void
 dispatch_async_enforce_qos_class_f(dispatch_queue_t queue,
diff --git a/private/source_private.h b/private/source_private.h
index bb13702..228a23e 100644
--- a/private/source_private.h
+++ b/private/source_private.h
@@ -37,17 +37,6 @@
 __BEGIN_DECLS
 
 /*!
- * @const DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE
- * @discussion A dispatch timer source that is part of a timer aggregate.
- * The handle is the dispatch timer aggregate object.
- * The mask specifies which flags from dispatch_source_timer_flags_t to apply.
- */
-#define DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE \
-		(&_dispatch_source_type_timer_with_aggregate)
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
-DISPATCH_SOURCE_TYPE_DECL(timer_with_aggregate);
-
-/*!
  * @const DISPATCH_SOURCE_TYPE_INTERVAL
  * @discussion A dispatch source that submits the event handler block at a
  * specified time interval, phase-aligned with all other interval sources on
@@ -69,7 +58,7 @@
  * The mask specifies which flags from dispatch_source_timer_flags_t to apply.
  */
 #define DISPATCH_SOURCE_TYPE_INTERVAL (&_dispatch_source_type_interval)
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+API_AVAILABLE(macos(10.9), ios(7.0))
 DISPATCH_SOURCE_TYPE_DECL(interval);
 
 /*!
@@ -79,8 +68,8 @@
  * The handle is a process identifier (pid_t).
  */
 #define DISPATCH_SOURCE_TYPE_VFS (&_dispatch_source_type_vfs)
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
-DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs;
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
+DISPATCH_SOURCE_TYPE_DECL(vfs);
 
 /*!
  * @const DISPATCH_SOURCE_TYPE_VM
@@ -89,10 +78,9 @@
  * This type is deprecated, use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead.
  */
 #define DISPATCH_SOURCE_TYPE_VM (&_dispatch_source_type_vm)
-__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_7, __MAC_10_10, __IPHONE_4_3,
-		__IPHONE_8_0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead")
-DISPATCH_LINUX_UNAVAILABLE()
-DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm;
+API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_SOURCE_TYPE_MEMORYPRESSURE",
+		macos(10.7,10.10), ios(4.3,8.0)) DISPATCH_LINUX_UNAVAILABLE()
+DISPATCH_SOURCE_TYPE_DECL(vm);
 
 /*!
  * @const DISPATCH_SOURCE_TYPE_MEMORYSTATUS
@@ -101,21 +89,18 @@
  * dispatch_source_memorystatus_flags_t.
  */
 #define DISPATCH_SOURCE_TYPE_MEMORYSTATUS (&_dispatch_source_type_memorystatus)
-__OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead")
-__IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead")
-__TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead")
-__WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead")
+API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_SOURCE_TYPE_MEMORYPRESSURE",
+		macos(10.9, 10.12), ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0))
 DISPATCH_LINUX_UNAVAILABLE()
-DISPATCH_EXPORT const struct dispatch_source_type_s
-		_dispatch_source_type_memorystatus;
+DISPATCH_SOURCE_TYPE_DECL(memorystatus);
 
 /*!
  * @const DISPATCH_SOURCE_TYPE_SOCK
  * @discussion A dispatch source that monitors events on socket state changes.
  */
 #define DISPATCH_SOURCE_TYPE_SOCK (&_dispatch_source_type_sock)
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE()
-DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock;
+API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE()
+DISPATCH_SOURCE_TYPE_DECL(sock);
 
 __END_DECLS
 
@@ -271,8 +256,8 @@
  * This flag is deprecated and will be removed in a future release.
  */
 enum {
-	DISPATCH_PROC_REAP __OSX_AVAILABLE_BUT_DEPRECATED(
-			__MAC_10_6, __MAC_10_9, __IPHONE_4_0, __IPHONE_7_0) = 0x10000000,
+	DISPATCH_PROC_REAP API_DEPRECATED("unsupported flag",
+			macos(10.6,10.9), ios(4.0,7.0)) = 0x10000000,
 };
 
 /*!
@@ -283,9 +268,9 @@
  */
 
 enum {
-	DISPATCH_VM_PRESSURE __OSX_AVAILABLE_BUT_DEPRECATED_MSG(
-			__MAC_10_7, __MAC_10_10, __IPHONE_4_3, __IPHONE_8_0,
-			"Use DISPATCH_MEMORYPRESSURE_WARN instead") = 0x80000000,
+	DISPATCH_VM_PRESSURE
+			API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_WARN",
+					macos(10.7, 10.10), ios(4.3, 8.0)) = 0x80000000,
 };
 
 /*!
@@ -298,7 +283,7 @@
  */
 enum {
 	DISPATCH_MEMORYPRESSURE_LOW_SWAP
-			__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x08,
+			DISPATCH_ENUM_AVAILABLE(macos(10.10), ios(8.0)) = 0x08,
 };
 
 /*!
@@ -306,30 +291,14 @@
  * @warning Deprecated, see DISPATCH_MEMORYPRESSURE_*
  */
 enum {
-	DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL
-			__OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead")
-			__IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead")
-			__TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead")
-			__WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead")
-			= 0x01,
-	DISPATCH_MEMORYSTATUS_PRESSURE_WARN
-			__OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_WARN instead")
-			__IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead")
-			__TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead")
-			__WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead")
-			= 0x02,
-	DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL
-			__OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead")
-			__IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead")
-			__TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead")
-			__WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead")
-			= 0x04,
-	DISPATCH_MEMORYSTATUS_LOW_SWAP
-			__OSX_DEPRECATED(10.10, 10.12, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead")
-			__IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead")
-			__TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead")
-			__WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead")
-			= 0x08,
+	DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_NORMAL", macos(10.9, 10.12), ios(6.0, 10.0),
+			tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x01,
+	DISPATCH_MEMORYSTATUS_PRESSURE_WARN API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_WARN", macos(10.9, 10.12), ios(6.0, 10.0),
+			tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x02,
+	DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_CRITICAL", macos(10.9, 10.12), ios(6.0, 10.0),
+			tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x04,
+	DISPATCH_MEMORYSTATUS_LOW_SWAP API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_LOW_SWAP", macos(10.9, 10.12), ios(6.0, 10.0),
+			tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x08,
 };
 
 /*!
@@ -343,19 +312,87 @@
  * The memory of the process has reached 100% of its high watermark limit.
  */
 enum {
-	DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN
-			__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10)
-			__TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x10,
+	DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN DISPATCH_ENUM_AVAILABLE(macos(10.12), ios(10.10), tvos(10.10), watchos(3.0)) = 0x10,
 
-	DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL
-		__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10)
-		__TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x20,
+	DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL DISPATCH_ENUM_AVAILABLE(macos(10.12), ios(10.10), tvos(10.10), watchos(3.0)) = 0x20,
 };
 
 
 __BEGIN_DECLS
 
 /*!
+ * @function dispatch_source_set_mandatory_cancel_handler
+ *
+ * @abstract
+ * Sets the event handler block for the given dispatch source, and indicates
+ * that calling dispatch_source_cancel() is mandatory for this source object.
+ *
+ * @discussion
+ * The cancellation handler (if specified) will be submitted to the source's
+ * target queue in response to a call to dispatch_source_cancel() once the
+ * system has released all references to the source's underlying handle and
+ * the source's event handler block has returned.
+ *
+ * When this function has been used used to set a cancellation handler, then
+ * the following result in an assertion and the process being terminated:
+ * - releasing the last reference on the dispatch source without having
+ *   cancelled it by calling dispatch_source_cancel();
+ * - changing any handler after the source has been activated;
+ * - changing the target queue of the source after it has been activated.
+ *
+ * IMPORTANT:
+ * Source cancellation and a cancellation handler are required for file
+ * descriptor and mach port based sources in order to safely close the
+ * descriptor or destroy the port. Making the cancellation handler of such
+ * sources mandatory is strongly recommended.
+ * Closing the descriptor or port before the cancellation handler is invoked may
+ * result in a race condition. If a new descriptor is allocated with the same
+ * value as the recently closed descriptor while the source's event handler is
+ * still running, the event handler may read/write data to the wrong descriptor.
+ *
+ * @param source
+ * The dispatch source to modify.
+ * The result of passing NULL in this parameter is undefined.
+ *
+ * @param handler
+ * The cancellation handler block to submit to the source's target queue.
+ * The result of passing NULL in this parameter is undefined.
+ */
+#ifdef __BLOCKS__
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
+void
+dispatch_source_set_mandatory_cancel_handler(dispatch_source_t source,
+		dispatch_block_t handler);
+#endif /* __BLOCKS__ */
+
+/*!
+ * @function dispatch_source_set_mandatory_cancel_handler_f
+ *
+ * @abstract
+ * Sets the event handler function for the given dispatch source, and causes an
+ * assertion if this source is released before having been explicitly canceled.
+ *
+ * @discussion
+ * See dispatch_source_set_mandatory_cancel_handler() for more details.
+ *
+ * @param source
+ * The dispatch source to modify.
+ * The result of passing NULL in this parameter is undefined.
+ *
+ * @param handler
+ * The cancellation handler function to submit to the source's target queue.
+ * The context parameter passed to the event handler function is the current
+ * context of the dispatch source at the time the handler call is made.
+ * The result of passing NULL in this parameter is undefined.
+ */
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
+void
+dispatch_source_set_mandatory_cancel_handler_f(dispatch_source_t source,
+		dispatch_function_t handler);
+
+/*!
  * @function dispatch_source_cancel_and_wait
  *
  * @abstract
@@ -400,64 +437,11 @@
  * The dispatch source to be canceled.
  * The result of passing NULL in this parameter is undefined.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10)
-__TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0)
+API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 DISPATCH_EXPORT DISPATCH_NOTHROW
 void
 dispatch_source_cancel_and_wait(dispatch_source_t source);
 
-/*!
- * @typedef dispatch_timer_aggregate_t
- *
- * @abstract
- * Dispatch timer aggregates are sets of related timers.
- */
-DISPATCH_DECL(dispatch_timer_aggregate);
-
-/*!
- * @function dispatch_timer_aggregate_create
- *
- * @abstract
- * Creates a new dispatch timer aggregate.
- *
- * @discussion
- * A dispatch timer aggregate is a set of related timers whose overall timing
- * parameters can be queried.
- *
- * Timers are added to an aggregate when a timer source is created with type
- * DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE.
- *
- * @result
- * The newly created dispatch timer aggregate.
- */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
-DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
-DISPATCH_NOTHROW
-dispatch_timer_aggregate_t
-dispatch_timer_aggregate_create(void);
-
-/*!
- * @function dispatch_timer_aggregate_get_delay
- *
- * @abstract
- * Retrieves the delay until a timer in the given aggregate will next fire.
- *
- * @param aggregate
- * The dispatch timer aggregate to query.
- *
- * @param leeway_ptr
- * Optional pointer to a variable filled with the leeway (in ns) that will be
- * applied to the return value. May be NULL.
- *
- * @result
- * Delay in ns from now.
- */
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
-DISPATCH_EXPORT DISPATCH_NOTHROW
-uint64_t
-dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate,
-		uint64_t *_Nullable leeway_ptr);
-
 #if __has_include(<mach/mach.h>)
 /*!
  * @typedef dispatch_mig_callback_t
@@ -468,7 +452,7 @@
 typedef boolean_t (*dispatch_mig_callback_t)(mach_msg_header_t *message,
 		mach_msg_header_t *reply);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
 mach_msg_return_t
 dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz,
@@ -480,7 +464,7 @@
  * @abstract
  * Extract the context pointer from a mach message trailer.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE()
+API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE()
 DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL
 DISPATCH_NOTHROW
 void *_Nullable
diff --git a/src/Makefile.am b/src/Makefile.am
index a574288..6517b74 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -9,38 +9,45 @@
 lib_LTLIBRARIES=libdispatch.la
 endif
 
-libdispatch_la_SOURCES=		\
-	allocator.c				\
-	apply.c					\
-	benchmark.c				\
-	data.c					\
+libdispatch_la_SOURCES=			\
+	allocator.c			\
+	apply.c				\
+	benchmark.c			\
+	data.c				\
+	init.c				\
 	introspection.c			\
-	init.c					\
-	io.c					\
-	object.c				\
-	once.c					\
-	queue.c					\
-	semaphore.c				\
-	source.c				\
-	time.c					\
-	transform.c				\
-	voucher.c				\
+	io.c				\
+	mach.c				\
+	object.c			\
+	once.c				\
+	queue.c				\
+	semaphore.c			\
+	source.c			\
+	time.c				\
+	transform.c			\
+	voucher.c			\
 	protocol.defs			\
-	provider.d				\
-	allocator_internal.h	\
+	provider.d			\
+	allocator_internal.h		\
 	data_internal.h			\
 	inline_internal.h		\
-	internal.h				\
+	internal.h			\
 	introspection_internal.h	\
 	io_internal.h			\
+	mach_internal.h			\
 	object_internal.h		\
 	queue_internal.h		\
-	semaphore_internal.h	\
-	shims.h					\
+	semaphore_internal.h		\
+	shims.h				\
 	source_internal.h		\
-	trace.h					\
+	trace.h				\
 	voucher_internal.h		\
-	firehose/firehose_internal.h \
+	event/event.c			\
+	event/event_config.h		\
+	event/event_epoll.c		\
+	event/event_internal.h		\
+	event/event_kevent.c		\
+	firehose/firehose_internal.h	\
 	shims/android_stubs.h	\
 	shims/atomic.h			\
 	shims/atomic_sfb.h		\
@@ -52,7 +59,7 @@
 	shims/lock.h			\
 	shims/perfmon.h			\
 	shims/time.h			\
-	shims/tsd.h				\
+	shims/tsd.h			\
 	shims/yield.h
 
 EXTRA_libdispatch_la_SOURCES=
@@ -65,9 +72,9 @@
 if DISPATCH_ENABLE_ASSERTS
 DISPATCH_CFLAGS+=-DDISPATCH_DEBUG=1
 endif
-AM_CFLAGS= $(KQUEUE_CFLAGS) $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS)
+AM_CFLAGS= $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS)
 AM_OBJCFLAGS=$(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS)
-AM_CXXFLAGS=$(KQUEUE_CFLAGS) $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS)
+AM_CXXFLAGS=$(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS)
 AM_OBJCXXFLAGS=$(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS)
 
 if BUILD_OWN_KQUEUES
@@ -94,7 +101,7 @@
 endif
 
 libdispatch_la_LDFLAGS=-avoid-version
-libdispatch_la_LIBADD=$(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(BLOCKS_RUNTIME_LIBS)
+libdispatch_la_LIBADD=$(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(BLOCKS_RUNTIME_LIBS)
 
 if HAVE_DARWIN_LD
 libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 \
diff --git a/src/apply.c b/src/apply.c
index e051a16..2b83c10 100644
--- a/src/apply.c
+++ b/src/apply.c
@@ -52,10 +52,10 @@
 	_dispatch_thread_context_push(&apply_ctxt);
 
 	dispatch_thread_frame_s dtf;
-	pthread_priority_t old_dp;
+	dispatch_priority_t old_dbp = 0;
 	if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) {
 		_dispatch_thread_frame_push(&dtf, dq);
-		old_dp = _dispatch_set_defaultpriority(dq->dq_priority, NULL);
+		old_dbp = _dispatch_set_basepri(dq->dq_priority);
 	}
 	dispatch_invoke_flags_t flags = da->da_flags;
 
@@ -70,7 +70,7 @@
 	} while (fastpath(idx < iter));
 
 	if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) {
-		_dispatch_reset_defaultpriority(old_dp);
+		_dispatch_reset_basepri(old_dbp);
 		_dispatch_thread_frame_pop(&dtf);
 	}
 
@@ -182,8 +182,8 @@
 
 	_dispatch_thread_event_init(&da->da_event);
 
-	_dispatch_queue_push_list(dq, head, tail, head->dc_priority,
-			continuation_cnt);
+	dispatch_qos_t qos = _dispatch_qos_from_pp(head->dc_priority);
+	_dispatch_queue_push_list(dq, head, tail, qos, continuation_cnt);
 	// Call the first element directly
 	_dispatch_apply_invoke_and_wait(da);
 }
@@ -252,7 +252,7 @@
 	}
 	if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) {
 		dq = old_dq ? old_dq : _dispatch_get_root_queue(
-				_DISPATCH_QOS_CLASS_DEFAULT, false);
+				DISPATCH_QOS_DEFAULT, false);
 		while (slowpath(dq->do_targetq)) {
 			dq = dq->do_targetq;
 		}
diff --git a/src/data.c b/src/data.c
index 6443289..adcfbb2 100644
--- a/src/data.c
+++ b/src/data.c
@@ -433,7 +433,7 @@
 
 	// find the record containing the end of the current range
 	// and optimize the case when you just remove bytes at the origin
-	size_t count, last_length;
+	size_t count, last_length = 0;
 
 	if (to_the_end) {
 		count = dd_num_records - i;
diff --git a/src/data.m b/src/data.m
index 190b1ed..9971f18 100644
--- a/src/data.m
+++ b/src/data.m
@@ -29,8 +29,8 @@
 #include <Foundation/NSString.h>
 
 @interface DISPATCH_CLASS(data) () <DISPATCH_CLASS(data)>
-@property (readonly) NSUInteger length;
-@property (readonly) const void *bytes NS_RETURNS_INNER_POINTER;
+@property (readonly,nonatomic) NSUInteger length;
+@property (readonly,nonatomic) const void *bytes NS_RETURNS_INNER_POINTER;
 
 - (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy
 		freeWhenDone:(BOOL)freeBytes bytesAreVM:(BOOL)vm;
@@ -124,9 +124,9 @@
 	if (!nsstring) return nil;
 	char buf[2048];
 	_dispatch_data_debug(self, buf, sizeof(buf));
-	return [nsstring stringWithFormat:
-			[nsstring stringWithUTF8String:"<%s: %s>"],
-			class_getName([self class]), buf];
+	NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"];
+	if (!format) return nil;
+	return [nsstring stringWithFormat:format, class_getName([self class]), buf];
 }
 
 - (NSUInteger)length {
diff --git a/src/event/event.c b/src/event/event.c
new file mode 100644
index 0000000..580c15b
--- /dev/null
+++ b/src/event/event.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "internal.h"
+
+DISPATCH_NOINLINE
+static dispatch_unote_t
+_dispatch_unote_create(dispatch_source_type_t dst,
+		uintptr_t handle, unsigned long mask)
+{
+	dispatch_unote_linkage_t dul;
+	dispatch_unote_class_t du;
+
+	if (mask & ~dst->dst_mask) {
+		return DISPATCH_UNOTE_NULL;
+	}
+
+	if (dst->dst_filter != DISPATCH_EVFILT_TIMER) {
+		if (dst->dst_mask && !mask) {
+			return DISPATCH_UNOTE_NULL;
+		}
+	}
+
+	if ((dst->dst_flags & EV_UDATA_SPECIFIC) ||
+			(dst->dst_filter == DISPATCH_EVFILT_TIMER)) {
+		du = _dispatch_calloc(1u, dst->dst_size);
+	} else {
+		dul = _dispatch_calloc(1u, sizeof(*dul) + dst->dst_size);
+		du = _dispatch_unote_linkage_get_unote(dul)._du;
+	}
+	du->du_type = dst;
+	du->du_ident = (uint32_t)handle;
+	du->du_filter = dst->dst_filter;
+	du->du_fflags = (typeof(du->du_fflags))mask;
+	if (dst->dst_flags & EV_UDATA_SPECIFIC) {
+		du->du_is_direct = true;
+	}
+	if (dst->dst_flags & (EV_DISPATCH | EV_ONESHOT)) {
+		du->du_needs_rearm = true;
+	}
+	return (dispatch_unote_t){ ._du = du };
+}
+
+DISPATCH_NOINLINE
+dispatch_unote_t
+_dispatch_unote_create_with_handle(dispatch_source_type_t dst,
+		uintptr_t handle, unsigned long mask)
+{
+	if (!handle) {
+		return DISPATCH_UNOTE_NULL;
+	}
+	return _dispatch_unote_create(dst, handle, mask);
+}
+
+DISPATCH_NOINLINE
+dispatch_unote_t
+_dispatch_unote_create_with_fd(dispatch_source_type_t dst,
+		uintptr_t handle, unsigned long mask)
+{
+#if !TARGET_OS_MAC // <rdar://problem/27756657>
+	if (handle > INT_MAX) {
+		return DISPATCH_UNOTE_NULL;
+	}
+#endif
+	dispatch_unote_t du = _dispatch_unote_create(dst, handle, mask);
+	if (du._du) {
+		int16_t filter = dst->dst_filter;
+		du._du->du_is_level = (filter == EVFILT_READ || filter == EVFILT_WRITE);
+	}
+	return du;
+}
+
+DISPATCH_NOINLINE
+dispatch_unote_t
+_dispatch_unote_create_without_handle(dispatch_source_type_t dst,
+		uintptr_t handle, unsigned long mask)
+{
+	if (handle) {
+		return DISPATCH_UNOTE_NULL;
+	}
+	return _dispatch_unote_create(dst, handle, mask);
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_unote_dispose(dispatch_unote_t du)
+{
+	void *ptr = du._du;
+#if HAVE_MACH
+	if (du._du->dmrr_handler_is_block) {
+		Block_release(du._dmrr->dmrr_handler_ctxt);
+	}
+#endif
+	if (du._du->du_is_timer) {
+		if (du._dt->dt_pending_config) {
+			free(du._dt->dt_pending_config);
+		}
+	} else if (!du._du->du_is_direct) {
+		ptr = _dispatch_unote_get_linkage(du);
+	}
+	free(ptr);
+}
+
+#pragma mark data or / add
+
+static dispatch_unote_t
+_dispatch_source_data_create(dispatch_source_type_t dst, uintptr_t handle,
+		unsigned long mask)
+{
+	if (handle || mask) {
+		return DISPATCH_UNOTE_NULL;
+	}
+
+	// bypass _dispatch_unote_create() because this is always "direct"
+	// even when EV_UDATA_SPECIFIC is 0
+	dispatch_unote_class_t du = _dispatch_calloc(1u, dst->dst_size);
+	du->du_type = dst;
+	du->du_filter = dst->dst_filter;
+	du->du_is_direct = true;
+	return (dispatch_unote_t){ ._du = du };
+}
+
+const dispatch_source_type_s _dispatch_source_type_data_add = {
+	.dst_kind       = "data-add",
+	.dst_filter     = DISPATCH_EVFILT_CUSTOM_ADD,
+	.dst_flags      = EV_UDATA_SPECIFIC|EV_CLEAR,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_source_data_create,
+	.dst_merge_evt  = NULL,
+};
+
+const dispatch_source_type_s _dispatch_source_type_data_or = {
+	.dst_kind       = "data-or",
+	.dst_filter     = DISPATCH_EVFILT_CUSTOM_OR,
+	.dst_flags      = EV_UDATA_SPECIFIC|EV_CLEAR,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_source_data_create,
+	.dst_merge_evt  = NULL,
+};
+
+#pragma mark file descriptors
+
+const dispatch_source_type_s _dispatch_source_type_read = {
+	.dst_kind       = "read",
+	.dst_filter     = EVFILT_READ,
+	.dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
+#if DISPATCH_EVENT_BACKEND_KEVENT
+#if HAVE_DECL_NOTE_LOWAT
+	.dst_fflags     = NOTE_LOWAT,
+#endif
+	.dst_data       = 1,
+#endif // DISPATCH_EVENT_BACKEND_KEVENT
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_unote_create_with_fd,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+const dispatch_source_type_s _dispatch_source_type_write = {
+	.dst_kind       = "write",
+	.dst_filter     = EVFILT_WRITE,
+	.dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
+#if DISPATCH_EVENT_BACKEND_KEVENT
+#if HAVE_DECL_NOTE_LOWAT
+	.dst_fflags     = NOTE_LOWAT,
+#endif
+	.dst_data       = 1,
+#endif // DISPATCH_EVENT_BACKEND_KEVENT
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_unote_create_with_fd,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+#pragma mark signals
+
+static dispatch_unote_t
+_dispatch_source_signal_create(dispatch_source_type_t dst, uintptr_t handle,
+		unsigned long mask)
+{
+	if (handle >= NSIG) {
+		return DISPATCH_UNOTE_NULL;
+	}
+	dispatch_unote_t du = _dispatch_unote_create_with_handle(dst, handle, mask);
+	if (du._du) {
+		du._du->du_is_adder = true;
+	}
+	return du;
+}
+
+const dispatch_source_type_s _dispatch_source_type_signal = {
+	.dst_kind       = "signal",
+	.dst_filter     = EVFILT_SIGNAL,
+	.dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_source_signal_create,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+#pragma mark timers
+
+bool _dispatch_timers_reconfigure, _dispatch_timers_expired;
+uint32_t _dispatch_timers_processing_mask;
+#if DISPATCH_USE_DTRACE
+uint32_t _dispatch_timers_will_wake;
+#endif
+#define DISPATCH_TIMER_HEAP_INITIALIZER(tidx) \
+	[tidx] = { \
+		.dth_target = UINT64_MAX, \
+		.dth_deadline = UINT64_MAX, \
+	}
+#define DISPATCH_TIMER_HEAP_INIT(kind, qos) \
+		DISPATCH_TIMER_HEAP_INITIALIZER(DISPATCH_TIMER_INDEX( \
+		DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos))
+
+struct dispatch_timer_heap_s _dispatch_timers_heap[] =  {
+	DISPATCH_TIMER_HEAP_INIT(WALL, NORMAL),
+	DISPATCH_TIMER_HEAP_INIT(MACH, NORMAL),
+#if DISPATCH_HAVE_TIMER_QOS
+	DISPATCH_TIMER_HEAP_INIT(WALL, CRITICAL),
+	DISPATCH_TIMER_HEAP_INIT(MACH, CRITICAL),
+	DISPATCH_TIMER_HEAP_INIT(WALL, BACKGROUND),
+	DISPATCH_TIMER_HEAP_INIT(MACH, BACKGROUND),
+#endif
+};
+
+static dispatch_unote_t
+_dispatch_source_timer_create(dispatch_source_type_t dst,
+		uintptr_t handle, unsigned long mask)
+{
+	uint32_t fflags = dst->dst_fflags;
+	dispatch_unote_t du;
+
+	// normalize flags
+	if (mask & DISPATCH_TIMER_STRICT) {
+		mask &= ~(unsigned long)DISPATCH_TIMER_BACKGROUND;
+	}
+
+	if (fflags & DISPATCH_TIMER_INTERVAL) {
+		if (!handle) return DISPATCH_UNOTE_NULL;
+		du = _dispatch_unote_create_without_handle(dst, 0, mask);
+	} else {
+		du = _dispatch_unote_create_without_handle(dst, handle, mask);
+	}
+
+	if (du._dt) {
+		du._dt->du_is_timer = true;
+		du._dt->du_is_adder = true;
+		du._dt->du_needs_rearm = true;
+		du._dt->du_fflags |= fflags;
+		du._dt->du_ident = _dispatch_source_timer_idx(du);
+		du._dt->dt_timer.target = UINT64_MAX;
+		du._dt->dt_timer.deadline = UINT64_MAX;
+		du._dt->dt_timer.interval = UINT64_MAX;
+	}
+	return du;
+}
+
+const dispatch_source_type_s _dispatch_source_type_timer = {
+	.dst_kind       = "timer",
+	.dst_filter     = DISPATCH_EVFILT_TIMER,
+	.dst_mask       = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND,
+	.dst_fflags     = 0,
+	.dst_size       = sizeof(struct dispatch_timer_source_refs_s),
+
+	.dst_create     = _dispatch_source_timer_create,
+};
+
+const dispatch_source_type_s _dispatch_source_type_after = {
+	.dst_kind       = "timer (after)",
+	.dst_filter     = DISPATCH_EVFILT_TIMER,
+	.dst_mask       = 0,
+	.dst_fflags     = DISPATCH_TIMER_AFTER,
+	.dst_size       = sizeof(struct dispatch_timer_source_refs_s),
+
+	.dst_create     = _dispatch_source_timer_create,
+};
+
+const dispatch_source_type_s _dispatch_source_type_interval = {
+	.dst_kind       = "timer (interval)",
+	.dst_filter     = DISPATCH_EVFILT_TIMER,
+	.dst_mask       = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND
+			|DISPATCH_INTERVAL_UI_ANIMATION,
+	.dst_fflags     = DISPATCH_TIMER_INTERVAL|DISPATCH_TIMER_CLOCK_MACH,
+	.dst_size       = sizeof(struct dispatch_timer_source_refs_s),
+
+	.dst_create     = _dispatch_source_timer_create,
+};
diff --git a/src/event/event_config.h b/src/event/event_config.h
new file mode 100644
index 0000000..6a82a70
--- /dev/null
+++ b/src/event/event_config.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __DISPATCH_EVENT_EVENT_CONFIG__
+#define __DISPATCH_EVENT_EVENT_CONFIG__
+
+#if defined(__linux__)
+#	include <sys/eventfd.h>
+#	define DISPATCH_EVENT_BACKEND_EPOLL 1
+#	define DISPATCH_EVENT_BACKEND_KEVENT 0
+#elif __has_include(<sys/event.h>)
+#	include <sys/event.h>
+#	define DISPATCH_EVENT_BACKEND_EPOLL 0
+#	define DISPATCH_EVENT_BACKEND_KEVENT 1
+#else
+#	error unsupported event loop
+#endif
+
+#ifndef DISPATCH_MGR_QUEUE_DEBUG
+#define DISPATCH_MGR_QUEUE_DEBUG 0
+#endif
+
+#ifndef DISPATCH_MACHPORT_DEBUG
+#define DISPATCH_MACHPORT_DEBUG 0
+#endif
+
+#ifndef EV_VANISHED
+#define EV_VANISHED 0x0200
+#endif
+
+#if DISPATCH_EVENT_BACKEND_KEVENT
+#	if defined(EV_SET_QOS)
+#		define DISPATCH_USE_KEVENT_QOS 1
+#	else
+#		define DISPATCH_USE_KEVENT_QOS 0
+#	endif
+
+#	ifdef NOTE_LEEWAY
+#		define DISPATCH_HAVE_TIMER_COALESCING 1
+#   else
+#		define NOTE_LEEWAY 0
+#		define DISPATCH_HAVE_TIMER_COALESCING 0
+#	endif // !NOTE_LEEWAY
+#	if defined(NOTE_CRITICAL) && defined(NOTE_BACKGROUND)
+#		define DISPATCH_HAVE_TIMER_QOS 1
+#	else
+#		undef  NOTE_CRITICAL
+#		define NOTE_CRITICAL 0
+#		undef  NOTE_BACKGROUND
+#		define NOTE_BACKGROUND 0
+#		define DISPATCH_HAVE_TIMER_QOS 0
+#	endif // !defined(NOTE_CRITICAL) || !defined(NOTE_BACKGROUND)
+
+#	ifndef NOTE_FUNLOCK
+#	define NOTE_FUNLOCK 0x00000100
+#	endif
+
+#	if HAVE_DECL_NOTE_REAP
+#	if defined(NOTE_REAP) && defined(__APPLE__)
+#	undef NOTE_REAP
+#	define NOTE_REAP 0x10000000 // <rdar://problem/13338526>
+#	endif
+#	endif // HAVE_DECL_NOTE_REAP
+
+#	ifndef VQ_QUOTA
+#	undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982
+#	endif // VQ_QUOTA
+
+#	ifndef NOTE_MEMORYSTATUS_LOW_SWAP
+#	define NOTE_MEMORYSTATUS_LOW_SWAP 0x8
+#	endif
+
+#	if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || \
+		!DISPATCH_HOST_SUPPORTS_OSX(101200)
+#	undef NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
+#	define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0
+#	endif // NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
+
+#	if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) || \
+		!DISPATCH_HOST_SUPPORTS_OSX(101200)
+#	undef NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL
+#	define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0
+#	endif // NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL
+
+#	ifndef DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
+#	if TARGET_OS_MAC && !DISPATCH_HOST_SUPPORTS_OSX(101200)
+	// deferred delete can return bogus ENOENTs on older kernels
+#	define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 1
+#	else
+#	define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 0
+#	endif
+#	endif
+#else // DISPATCH_EVENT_BACKEND_KEVENT
+#	define EV_ADD					0x0001
+#	define EV_DELETE				0x0002
+#	define EV_ENABLE				0x0004
+
+#	define EV_ONESHOT				0x0010
+#	define EV_CLEAR					0x0020
+#	define EV_DISPATCH				0x0080
+#	define EV_UDATA_SPECIFIC		0x0100
+
+#	define EVFILT_READ				(-1)
+#	define EVFILT_WRITE				(-2)
+#	define EVFILT_SIGNAL			(-3)
+#	define EVFILT_TIMER				(-4)
+#	define EVFILT_SYSCOUNT			4
+
+#	define DISPATCH_HAVE_TIMER_QOS 0
+#	define DISPATCH_HAVE_TIMER_COALESCING 0
+#endif // !DISPATCH_EVENT_BACKEND_KEVENT
+
+#ifdef EV_UDATA_SPECIFIC
+#	define DISPATCH_EV_DIRECT		(EV_UDATA_SPECIFIC|EV_DISPATCH)
+#else
+#	define DISPATCH_EV_DIRECT		0x0000
+#	define EV_UDATA_SPECIFIC		0x0000
+#	undef  EV_VANISHED
+#	define EV_VANISHED				0x0000
+#endif
+
+#define DISPATCH_EV_MSG_NEEDS_FREE	0x10000 // mach message needs to be freed()
+
+#define DISPATCH_EVFILT_TIMER				(-EVFILT_SYSCOUNT - 1)
+#define DISPATCH_EVFILT_CUSTOM_ADD			(-EVFILT_SYSCOUNT - 2)
+#define DISPATCH_EVFILT_CUSTOM_OR			(-EVFILT_SYSCOUNT - 3)
+#define DISPATCH_EVFILT_MACH_NOTIFICATION	(-EVFILT_SYSCOUNT - 4)
+#define DISPATCH_EVFILT_SYSCOUNT			( EVFILT_SYSCOUNT + 4)
+
+#if HAVE_MACH
+#	if !EV_UDATA_SPECIFIC
+#	error mach support requires EV_UDATA_SPECIFIC
+#	endif
+
+#	ifndef MACH_RCV_VOUCHER
+#	define MACH_RCV_VOUCHER 0x00000800
+#	endif
+
+#	ifndef MACH_NOTIFY_SEND_POSSIBLE
+#	undef  MACH_NOTIFY_SEND_POSSIBLE
+#	define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME
+#	endif
+
+#	ifndef NOTE_MACH_CONTINUOUS_TIME
+#	define NOTE_MACH_CONTINUOUS_TIME 0
+#	endif // NOTE_MACH_CONTINUOUS_TIME
+
+#	ifndef HOST_NOTIFY_CALENDAR_SET
+#	define HOST_NOTIFY_CALENDAR_SET HOST_NOTIFY_CALENDAR_CHANGE
+#	endif // HOST_NOTIFY_CALENDAR_SET
+
+#	ifndef HOST_CALENDAR_SET_REPLYID
+#	define HOST_CALENDAR_SET_REPLYID 951
+#	endif // HOST_CALENDAR_SET_REPLYID
+
+#	ifndef MACH_SEND_OVERRIDE
+#	define MACH_SEND_OVERRIDE 0x00000020
+typedef unsigned int mach_msg_priority_t;
+#	define MACH_MSG_PRIORITY_UNSPECIFIED ((mach_msg_priority_t)0)
+#	endif // MACH_SEND_OVERRIDE
+
+#	define DISPATCH_MACH_TRAILER_SIZE sizeof(dispatch_mach_trailer_t)
+#	define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX
+#	define DISPATCH_MACH_RCV_OPTIONS ( \
+		MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \
+		MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \
+		MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | \
+		MACH_RCV_VOUCHER)
+#endif // HAVE_MACH
+
+#endif // __DISPATCH_EVENT_EVENT_CONFIG__
diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c
new file mode 100644
index 0000000..67c8a01
--- /dev/null
+++ b/src/event/event_epoll.c
@@ -0,0 +1,538 @@
+/*
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+
+#include "internal.h"
+#if DISPATCH_EVENT_BACKEND_EPOLL
+#include <linux/sockios.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
+#include <sys/signalfd.h>
+#include <sys/timerfd.h>
+
+#ifndef EPOLLFREE
+#define EPOLLFREE 0x4000
+#endif
+
+#if !DISPATCH_USE_MGR_THREAD
+#error unsupported configuration
+#endif
+
+enum {
+	DISPATCH_EPOLL_EVENTFD    = 0x0001,
+	DISPATCH_EPOLL_CLOCK_WALL = 0x0002,
+	DISPATCH_EPOLL_CLOCK_MACH = 0x0003,
+};
+
+typedef struct dispatch_muxnote_s {
+	TAILQ_ENTRY(dispatch_muxnote_s) dmn_list;
+	TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_readers_head;
+	TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_writers_head;
+	int     dmn_fd;
+	int     dmn_ident;
+	int16_t dmn_filter;
+	int16_t dmn_events;
+	bool    dmn_socket_listener;
+} *dispatch_muxnote_t;
+
+typedef struct dispatch_epoll_timeout_s {
+	int       det_fd;
+	uint16_t  det_ident;
+	bool      det_registered;
+	bool      det_armed;
+} *dispatch_epoll_timeout_t;
+
+static int _dispatch_epfd, _dispatch_eventfd;
+DISPATCH_CACHELINE_ALIGN
+static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s)
+_dispatch_sources[DSL_HASH_SIZE];
+
+#define DISPATCH_EPOLL_TIMEOUT_INITIALIZER(clock) \
+	[DISPATCH_CLOCK_##clock] = { \
+		.det_fd = -1, \
+		.det_ident = DISPATCH_EPOLL_CLOCK_##clock, \
+	}
+static struct dispatch_epoll_timeout_s _dispatch_epoll_timeout[] = {
+	DISPATCH_EPOLL_TIMEOUT_INITIALIZER(WALL),
+	DISPATCH_EPOLL_TIMEOUT_INITIALIZER(MACH),
+};
+
+#pragma mark dispatch_muxnote_t
+
+DISPATCH_ALWAYS_INLINE
+static inline struct dispatch_muxnote_bucket_s *
+_dispatch_muxnote_bucket(int ident)
+{
+	return &_dispatch_sources[DSL_HASH((uint32_t)ident)];
+}
+#define _dispatch_unote_muxnote_bucket(du) \
+	_dispatch_muxnote_bucket(du._du->du_ident)
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_muxnote_t
+_dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb,
+		uint64_t ident, int16_t filter)
+{
+	dispatch_muxnote_t dmn;
+	if (filter == EVFILT_WRITE) filter = EVFILT_READ;
+	TAILQ_FOREACH(dmn, dmb, dmn_list) {
+		if (dmn->dmn_ident == ident && dmn->dmn_filter == filter) {
+			break;
+		}
+	}
+	return dmn;
+}
+#define _dispatch_unote_muxnote_find(dmb, du) \
+	_dispatch_muxnote_find(dmb, du._du->du_ident, du._du->du_filter)
+
+static void
+_dispatch_muxnote_dispose(dispatch_muxnote_t dmn)
+{
+	if (dmn->dmn_filter != EVFILT_READ || dmn->dmn_fd != dmn->dmn_ident) {
+		close(dmn->dmn_fd);
+	}
+	free(dmn);
+}
+
+static dispatch_muxnote_t
+_dispatch_muxnote_create(dispatch_unote_t du, int16_t events)
+{
+	dispatch_muxnote_t dmn;
+	struct stat sb;
+	int fd = du._du->du_ident;
+	int16_t filter = du._du->du_filter;
+	bool socket_listener = false;
+	sigset_t sigmask;
+
+	switch (filter) {
+	case EVFILT_SIGNAL:
+		sigemptyset(&sigmask);
+		sigaddset(&sigmask, du._du->du_ident);
+		fd = signalfd(-1, &sigmask, SFD_NONBLOCK | SFD_CLOEXEC);
+		if (fd < 0) {
+			return NULL;
+		}
+		sigprocmask(SIG_BLOCK, &sigmask, NULL);
+		break;
+
+	case EVFILT_WRITE:
+		filter = EVFILT_READ;
+	case EVFILT_READ:
+		if (fstat(fd, &sb) < 0) {
+			return NULL;
+		}
+		if (S_ISREG(sb.st_mode)) {
+			// make a dummy fd that is both readable & writeable
+			fd = eventfd(1, EFD_CLOEXEC | EFD_NONBLOCK);
+			if (fd < 0) {
+				return NULL;
+			}
+		} else if (S_ISSOCK(sb.st_mode)) {
+			socklen_t vlen = sizeof(int);
+			int v;
+			if (getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &v, &vlen) == 0) {
+				socket_listener = (bool)v;
+			}
+		}
+		break;
+
+	default:
+		DISPATCH_INTERNAL_CRASH(0, "Unexpected filter");
+	}
+
+	dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s));
+	TAILQ_INIT(&dmn->dmn_readers_head);
+	TAILQ_INIT(&dmn->dmn_writers_head);
+	dmn->dmn_fd = fd;
+	dmn->dmn_ident = du._du->du_ident;
+	dmn->dmn_filter = filter;
+	dmn->dmn_events = events;
+	dmn->dmn_socket_listener = socket_listener;
+	return dmn;
+}
+
+#pragma mark dispatch_unote_t
+
+static int
+_dispatch_epoll_update(dispatch_muxnote_t dmn, int op)
+{
+	struct epoll_event ev = {
+		.events = dmn->dmn_events,
+		.data = { .ptr = dmn },
+	};
+	return epoll_ctl(_dispatch_epfd, op, dmn->dmn_fd, &ev);
+}
+
+bool
+_dispatch_unote_register(dispatch_unote_t du, dispatch_priority_t pri)
+{
+	struct dispatch_muxnote_bucket_s *dmb;
+	dispatch_muxnote_t dmn;
+	int16_t events = EPOLLFREE;
+
+	dispatch_assert(!du._du->du_registered);
+	du._du->du_priority = pri;
+
+	switch (du._du->du_filter) {
+	case DISPATCH_EVFILT_CUSTOM_OR:
+	case DISPATCH_EVFILT_CUSTOM_ADD:
+		return du._du->du_registered = true;
+	case EVFILT_WRITE:
+		events |= EPOLLOUT;
+		break;
+	default:
+		events |= EPOLLIN;
+		break;
+	}
+
+	if (du._du->du_type->dst_flags & EV_DISPATCH) {
+		events |= EPOLLONESHOT;
+	}
+
+	dmb = _dispatch_unote_muxnote_bucket(du);
+	dmn = _dispatch_unote_muxnote_find(dmb, du);
+	if (dmn) {
+		events &= ~dmn->dmn_events;
+		if (events) {
+			dmn->dmn_events |= events;
+			if (_dispatch_epoll_update(dmn, EPOLL_CTL_MOD) < 0) {
+				dmn->dmn_events &= ~events;
+				dmn = NULL;
+			}
+		}
+	} else {
+		dmn = _dispatch_muxnote_create(du, events);
+		if (_dispatch_epoll_update(dmn, EPOLL_CTL_ADD) < 0) {
+			_dispatch_muxnote_dispose(dmn);
+			dmn = NULL;
+		} else {
+			TAILQ_INSERT_TAIL(dmb, dmn, dmn_list);
+		}
+	}
+
+	if (dmn) {
+		dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+		if (events & EPOLLOUT) {
+			TAILQ_INSERT_TAIL(&dmn->dmn_writers_head, dul, du_link);
+		} else {
+			TAILQ_INSERT_TAIL(&dmn->dmn_readers_head, dul, du_link);
+		}
+		dul->du_muxnote = dmn;
+	}
+	return du._du->du_registered = (dmn != NULL);
+}
+
+void
+_dispatch_unote_resume(dispatch_unote_t du)
+{
+	dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(du)->du_muxnote;
+	dispatch_assert((bool)du._du->du_registered);
+
+	_dispatch_epoll_update(dmn, EPOLL_CTL_MOD);
+}
+
+bool
+_dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags)
+{
+	switch (du._du->du_filter) {
+	case DISPATCH_EVFILT_CUSTOM_OR:
+	case DISPATCH_EVFILT_CUSTOM_ADD:
+		du._du->du_registered = false;
+		return true;
+	}
+	if (du._du->du_registered) {
+		dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+		dispatch_muxnote_t dmn = dul->du_muxnote;
+		int16_t events = dmn->dmn_events;
+
+		if (du._du->du_filter == EVFILT_WRITE) {
+			TAILQ_REMOVE(&dmn->dmn_writers_head, dul, du_link);
+		} else {
+			TAILQ_REMOVE(&dmn->dmn_readers_head, dul, du_link);
+		}
+		_TAILQ_TRASH_ENTRY(dul, du_link);
+		dul->du_muxnote = NULL;
+
+		if (TAILQ_EMPTY(&dmn->dmn_readers_head)) {
+			events &= ~EPOLLIN;
+		}
+		if (TAILQ_EMPTY(&dmn->dmn_writers_head)) {
+			events &= ~EPOLLOUT;
+		}
+
+		if (events == dmn->dmn_events) {
+			// nothing to do
+		} else if (events & (EPOLLIN | EPOLLOUT)) {
+			_dispatch_epoll_update(dmn, EPOLL_CTL_MOD);
+		} else {
+			epoll_ctl(_dispatch_epfd, EPOLL_CTL_DEL, dmn->dmn_fd, NULL);
+			TAILQ_REMOVE(_dispatch_unote_muxnote_bucket(du), dmn, dmn_list);
+			_dispatch_muxnote_dispose(dmn);
+		}
+		du._du->du_registered = false;
+	}
+	return true;
+}
+
+#pragma mark timers
+
+static void
+_dispatch_event_merge_timer(dispatch_clock_t clock)
+{
+	int qos;
+
+	_dispatch_timers_expired = true;
+	_dispatch_timers_processing_mask |= 1 << DISPATCH_TIMER_INDEX(clock, 0);
+#if DISPATCH_USE_DTRACE
+	_dispatch_timers_will_wake |= 1 << 0;
+#endif
+	_dispatch_epoll_timeout[clock].det_armed = false;
+	_dispatch_timers_heap[clock].dth_flags &= ~DTH_ARMED;
+}
+
+static void
+_dispatch_timeout_program(uint32_t tidx, uint64_t target, uint64_t leeway)
+{
+	uint32_t qos = DISPATCH_TIMER_QOS(tidx);
+	dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx);
+	dispatch_epoll_timeout_t timer = &_dispatch_epoll_timeout[clock];
+	struct epoll_event ev = {
+		.events = EPOLLONESHOT | EPOLLIN,
+		.data = { .u32 = timer->det_ident },
+	};
+	unsigned long op;
+
+	if (target >= INT64_MAX && !timer->det_registered) {
+		return;
+	}
+
+	if (unlikely(timer->det_fd < 0)) {
+		clockid_t clock;
+		int fd;
+		switch (DISPATCH_TIMER_CLOCK(tidx)) {
+		case DISPATCH_CLOCK_MACH:
+			clock = CLOCK_MONOTONIC;
+			break;
+		case DISPATCH_CLOCK_WALL:
+			clock = CLOCK_REALTIME;
+			break;
+		}
+		fd = timerfd_create(clock, TFD_NONBLOCK | TFD_CLOEXEC);
+		if (!dispatch_assume(fd >= 0)) {
+			return;
+		}
+		timer->det_fd = fd;
+	}
+
+	if (target < INT64_MAX) {
+		struct itimerspec its = { .it_value = {
+			.tv_sec  = target / NSEC_PER_SEC,
+			.tv_nsec = target % NSEC_PER_SEC,
+		} };
+		dispatch_assume_zero(timerfd_settime(timer->det_fd, TFD_TIMER_ABSTIME,
+				&its, NULL));
+		if (!timer->det_registered) {
+			op = EPOLL_CTL_ADD;
+		} else if (!timer->det_armed) {
+			op = EPOLL_CTL_MOD;
+		} else {
+			return;
+		}
+	} else {
+		op = EPOLL_CTL_DEL;
+	}
+	dispatch_assume_zero(epoll_ctl(_dispatch_epfd, op, timer->det_fd, &ev));
+	timer->det_armed = timer->det_registered = (op != EPOLL_CTL_DEL);;
+}
+
+void
+_dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range,
+		dispatch_clock_now_cache_t nows)
+{
+	uint64_t target = range.delay;
+	target += _dispatch_time_cached_now(nows, DISPATCH_TIMER_CLOCK(tidx));
+	_dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED;
+	_dispatch_timeout_program(tidx, target, range.leeway);
+}
+
+void
+_dispatch_event_loop_timer_delete(uint32_t tidx)
+{
+	_dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED;
+	_dispatch_timeout_program(tidx, UINT64_MAX, UINT64_MAX);
+}
+
+#pragma mark dispatch_loop
+
+void
+_dispatch_event_loop_atfork_child(void)
+{
+}
+
+void
+_dispatch_event_loop_init(void)
+{
+	unsigned int i;
+	for (i = 0; i < DSL_HASH_SIZE; i++) {
+		TAILQ_INIT(&_dispatch_sources[i]);
+	}
+
+	_dispatch_epfd = epoll_create1(EPOLL_CLOEXEC);
+	if (_dispatch_epfd < 0) {
+		DISPATCH_INTERNAL_CRASH(errno, "epoll_create1() failed");
+	}
+
+	_dispatch_eventfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+	if (_dispatch_eventfd < 0) {
+		DISPATCH_INTERNAL_CRASH(errno, "epoll_eventfd() failed");
+	}
+
+	struct epoll_event ev = {
+		.events = EPOLLIN | EPOLLFREE,
+		.data = { .u32 = DISPATCH_EPOLL_EVENTFD, },
+	};
+	unsigned long op = EPOLL_CTL_ADD;
+	if (epoll_ctl(_dispatch_eventfd, op, _dispatch_eventfd, &ev) < 0) {
+		DISPATCH_INTERNAL_CRASH(errno, "epoll_ctl() failed");
+	}
+}
+
+void
+_dispatch_event_loop_poke(void)
+{
+	dispatch_assume_zero(eventfd_write(_dispatch_eventfd, 1));
+}
+
+static void
+_dispatch_event_merge_signal(dispatch_muxnote_t dmn)
+{
+	dispatch_unote_linkage_t dul, dul_next;
+	struct signalfd_siginfo si;
+
+	dispatch_assume(read(dmn->dmn_fd, &si, sizeof(si)) == sizeof(si));
+
+	TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) {
+		dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
+		dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0);
+	}
+}
+
+static uintptr_t
+_dispatch_get_buffer_size(dispatch_muxnote_t dmn, bool writer)
+{
+	unsigned long op = writer ? SIOCOUTQ : SIOCINQ;
+	int n;
+
+	if (!writer && dmn->dmn_socket_listener) {
+		// Linux doesn't support saying how many clients are ready to be
+		// accept()ed
+		return 1;
+	}
+
+	if (dispatch_assume_zero(ioctl(dmn->dmn_ident, op, &n))) {
+		return 1;
+	}
+	return (uintptr_t)n;
+}
+
+static void
+_dispatch_event_merge_fd(dispatch_muxnote_t dmn, int16_t events)
+{
+	dispatch_unote_linkage_t dul, dul_next;
+	uintptr_t data;
+
+	if (events & EPOLLIN) {
+		data = _dispatch_get_buffer_size(dmn, false);
+		TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) {
+			dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
+			dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, ~data, 0);
+		}
+	}
+
+	if (events & EPOLLOUT) {
+		data = _dispatch_get_buffer_size(dmn, true);
+		TAILQ_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) {
+			dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
+			dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, ~data, 0);
+		}
+	}
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_event_loop_drain(dispatch_deferred_items_t ddi DISPATCH_UNUSED, bool poll)
+{
+	struct epoll_event ev[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT];
+	int i, r;
+
+retry:
+	r = epoll_wait(_dispatch_epfd, ev, countof(ev), poll ? 0 : -1);
+	if (unlikely(r == -1)) {
+		int err = errno;
+		switch (err) {
+		case EINTR:
+			goto retry;
+		case EBADF:
+			DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors");
+			break;
+		default:
+			(void)dispatch_assume_zero(err);
+			break;
+		}
+		return;
+	}
+
+	for (i = 0; i < r; i++) {
+		dispatch_muxnote_t dmn;
+		eventfd_t value;
+
+		if (ev[i].events & EPOLLFREE) {
+			DISPATCH_CLIENT_CRASH(0, "Do not close random Unix descriptors");
+		}
+
+		switch (ev[i].data.u32) {
+		case DISPATCH_EPOLL_EVENTFD:
+			dispatch_assume_zero(eventfd_read(_dispatch_eventfd, &value));
+			break;
+
+		case DISPATCH_EPOLL_CLOCK_WALL:
+			_dispatch_event_merge_timer(DISPATCH_CLOCK_WALL);
+			break;
+
+		case DISPATCH_EPOLL_CLOCK_MACH:
+			_dispatch_event_merge_timer(DISPATCH_CLOCK_MACH);
+			break;
+
+		default:
+			dmn = ev[i].data.ptr;
+			switch (dmn->dmn_filter) {
+			case EVFILT_SIGNAL:
+				_dispatch_event_merge_signal(dmn);
+				break;
+
+			case EVFILT_READ:
+				_dispatch_event_merge_fd(dmn, ev[i].events);
+				break;
+			}
+		}
+	}
+}
+
+#endif // DISPATCH_EVENT_BACKEND_EPOLL
diff --git a/src/event/event_internal.h b/src/event/event_internal.h
new file mode 100644
index 0000000..e74d0b3
--- /dev/null
+++ b/src/event/event_internal.h
@@ -0,0 +1,377 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __DISPATCH_EVENT_EVENT_INTERNAL__
+#define __DISPATCH_EVENT_EVENT_INTERNAL__
+
+#include "event_config.h"
+
+#define DISPATCH_UNOTE_CLASS_HEADER() \
+	dispatch_source_type_t du_type; \
+	uintptr_t du_owner_wref; /* "weak" back reference to the owner object */ \
+	uint32_t  du_ident; \
+	int16_t   du_filter; \
+	uint8_t   du_is_direct : 1; \
+	uint8_t   du_registered : 1; \
+	uint8_t   du_is_level : 1; \
+	uint8_t   du_is_adder : 1; \
+	uint8_t   du_is_timer : 1; \
+	uint8_t   du_needs_rearm : 1; \
+	uint8_t   du_memorypressure_override : 1; \
+	uint8_t   du_vmpressure_override : 1; \
+	union { \
+		bool  dmrr_handler_is_block; \
+		os_atomic(bool) dmsr_notification_armed; \
+	}; \
+	uint32_t  du_fflags; \
+	dispatch_priority_t du_priority
+
+#define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr))
+#define _dispatch_wref2ptr(ref) ((void*)~(ref))
+#define _dispatch_source_from_refs(dr) \
+		((dispatch_source_t)_dispatch_wref2ptr((dr)->du_owner_wref))
+
+typedef struct dispatch_unote_class_s {
+	DISPATCH_UNOTE_CLASS_HEADER();
+} *dispatch_unote_class_t;
+
+enum {
+	DS_EVENT_HANDLER = 0,
+	DS_CANCEL_HANDLER,
+	DS_REGISTN_HANDLER,
+};
+
+#define DISPATCH_SOURCE_REFS_HEADER() \
+	DISPATCH_UNOTE_CLASS_HEADER(); \
+	struct dispatch_continuation_s *volatile ds_handler[3]
+
+// Source state which may contain references to the source object
+// Separately allocated so that 'leaks' can see sources <rdar://problem/9050566>
+typedef struct dispatch_source_refs_s {
+	DISPATCH_SOURCE_REFS_HEADER();
+} *dispatch_source_refs_t;
+
+typedef struct dispatch_timer_delay_s {
+	uint64_t delay, leeway;
+} dispatch_timer_delay_s;
+
+#define DTH_TARGET_ID   0u
+#define DTH_DEADLINE_ID 1u
+#define DTH_ID_COUNT    2u
+
+typedef struct dispatch_timer_source_s {
+	union {
+		struct {
+			uint64_t target;
+			uint64_t deadline;
+		};
+		uint64_t heap_key[DTH_ID_COUNT];
+	};
+	uint64_t interval;
+} *dispatch_timer_source_t;
+
+typedef struct dispatch_timer_config_s {
+	struct dispatch_timer_source_s dtc_timer;
+	dispatch_clock_t dtc_clock;
+} *dispatch_timer_config_t;
+
+typedef struct dispatch_timer_source_refs_s {
+	DISPATCH_SOURCE_REFS_HEADER();
+	struct dispatch_timer_source_s dt_timer;
+	struct dispatch_timer_config_s *dt_pending_config;
+	uint32_t dt_heap_entry[DTH_ID_COUNT];
+} *dispatch_timer_source_refs_t;
+
+typedef struct dispatch_timer_heap_s {
+	uint64_t dth_target, dth_deadline;
+	uint32_t dth_count;
+	uint16_t dth_segments;
+#define DTH_ARMED  1u
+	uint16_t dth_flags;
+	dispatch_timer_source_refs_t dth_min[DTH_ID_COUNT];
+	void **dth_heap;
+} *dispatch_timer_heap_t;
+
+#if HAVE_MACH
+#if DISPATCH_MACHPORT_DEBUG
+void dispatch_debug_machport(mach_port_t name, const char *str);
+#define _dispatch_debug_machport(name) \
+		dispatch_debug_machport((name), __func__)
+#else
+#define _dispatch_debug_machport(name) ((void)(name))
+#endif // DISPATCH_MACHPORT_DEBUG
+
+// Mach channel state which may contain references to the channel object
+// layout must match dispatch_source_refs_s
+struct dispatch_mach_recv_refs_s {
+	DISPATCH_UNOTE_CLASS_HEADER();
+	dispatch_mach_handler_function_t dmrr_handler_func;
+	void *dmrr_handler_ctxt;
+};
+typedef struct dispatch_mach_recv_refs_s *dispatch_mach_recv_refs_t;
+
+struct dispatch_mach_reply_refs_s {
+	DISPATCH_UNOTE_CLASS_HEADER();
+	dispatch_priority_t dmr_priority;
+	void *dmr_ctxt;
+	voucher_t dmr_voucher;
+	TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list;
+};
+typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t;
+
+#define _DISPATCH_MACH_STATE_UNUSED_MASK        0xffffffa000000000ull
+#define DISPATCH_MACH_STATE_DIRTY               0x0000002000000000ull
+#define DISPATCH_MACH_STATE_PENDING_BARRIER     0x0000001000000000ull
+#define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE   0x0000000800000000ull
+#define DISPATCH_MACH_STATE_MAX_QOS_MASK        0x0000000700000000ull
+#define DISPATCH_MACH_STATE_MAX_QOS_SHIFT       32
+#define DISPATCH_MACH_STATE_UNLOCK_MASK         0x00000000ffffffffull
+
+struct dispatch_mach_send_refs_s {
+	DISPATCH_UNOTE_CLASS_HEADER();
+	dispatch_mach_msg_t dmsr_checkin;
+	TAILQ_HEAD(, dispatch_mach_reply_refs_s) dmsr_replies;
+	dispatch_unfair_lock_s dmsr_replies_lock;
+#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000)
+#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0)
+#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1)
+	uint32_t volatile dmsr_disconnect_cnt;
+	DISPATCH_UNION_LE(uint64_t volatile dmsr_state,
+			dispatch_unfair_lock_s dmsr_state_lock,
+			uint32_t dmsr_state_bits
+	) DISPATCH_ATOMIC64_ALIGN;
+	struct dispatch_object_s *volatile dmsr_tail;
+	struct dispatch_object_s *volatile dmsr_head;
+	mach_port_t dmsr_send, dmsr_checkin_port;
+};
+typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t;
+
+void _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr);
+
+struct dispatch_xpc_term_refs_s {
+	DISPATCH_UNOTE_CLASS_HEADER();
+};
+typedef struct dispatch_xpc_term_refs_s *dispatch_xpc_term_refs_t;
+#endif // HAVE_MACH
+
+typedef union dispatch_unote_u {
+	dispatch_unote_class_t _du;
+	dispatch_source_refs_t _dr;
+	dispatch_timer_source_refs_t _dt;
+#if HAVE_MACH
+	dispatch_mach_recv_refs_t _dmrr;
+	dispatch_mach_send_refs_t _dmsr;
+	dispatch_mach_reply_refs_t _dmr;
+	dispatch_xpc_term_refs_t _dxtr;
+#endif
+} __attribute__((__transparent_union__)) dispatch_unote_t;
+
+#define DISPATCH_UNOTE_NULL ((dispatch_unote_t){ ._du = NULL })
+
+#if TARGET_OS_EMBEDDED
+#define DSL_HASH_SIZE  64u // must be a power of two
+#else
+#define DSL_HASH_SIZE 256u // must be a power of two
+#endif
+#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1))
+
+typedef struct dispatch_unote_linkage_s {
+	TAILQ_ENTRY(dispatch_unote_linkage_s) du_link;
+	struct dispatch_muxnote_s *du_muxnote;
+} DISPATCH_ATOMIC64_ALIGN *dispatch_unote_linkage_t;
+
+#define DU_UNREGISTER_IMMEDIATE_DELETE 0x01
+#define DU_UNREGISTER_ALREADY_DELETED  0x02
+#define DU_UNREGISTER_DISCONNECTED     0x04
+#define DU_UNREGISTER_REPLY_REMOVE     0x08
+#define DU_UNREGISTER_WAKEUP           0x10
+
+typedef struct dispatch_source_type_s {
+	const char *dst_kind;
+	int16_t    dst_filter;
+	uint16_t   dst_flags;
+	uint32_t   dst_fflags;
+	uint32_t   dst_mask;
+	uint32_t   dst_size;
+#if DISPATCH_EVENT_BACKEND_KEVENT
+	uint32_t   dst_data;
+#endif
+
+	dispatch_unote_t (*dst_create)(dispatch_source_type_t dst,
+			uintptr_t handle, unsigned long mask);
+#if DISPATCH_EVENT_BACKEND_KEVENT
+	bool (*dst_update_mux)(struct dispatch_muxnote_s *dmn);
+#endif
+	void (*dst_merge_evt)(dispatch_unote_t du, uint32_t flags, uintptr_t data,
+			pthread_priority_t pp);
+#if HAVE_MACH
+	void (*dst_merge_msg)(dispatch_unote_t du, uint32_t flags,
+			mach_msg_header_t *msg, mach_msg_size_t sz);
+#endif
+} dispatch_source_type_s;
+
+#define dux_create(dst, handle, mask)  (dst)->dst_create(dst, handle, mask)
+#define dux_merge_evt(du, ...)   (du)->du_type->dst_merge_evt(du, __VA_ARGS__)
+#define dux_merge_msg(du, ...)   (du)->du_type->dst_merge_msg(du, __VA_ARGS__)
+
+extern const dispatch_source_type_s _dispatch_source_type_after;
+
+#if HAVE_MACH
+extern const dispatch_source_type_s _dispatch_source_type_mach_recv_pset;
+extern const dispatch_source_type_s _dispatch_source_type_mach_recv_direct;
+extern const dispatch_source_type_s _dispatch_source_type_mach_recv_direct_pset;
+extern const dispatch_source_type_s _dispatch_mach_type_send;
+extern const dispatch_source_type_s _dispatch_mach_type_recv;
+extern const dispatch_source_type_s _dispatch_mach_type_recv_pset;
+extern const dispatch_source_type_s _dispatch_mach_type_reply;
+extern const dispatch_source_type_s _dispatch_mach_type_reply_pset;
+extern const dispatch_source_type_s _dispatch_xpc_type_sigterm;
+#endif
+
+#pragma mark -
+#pragma mark deferred items
+
+#if DISPATCH_EVENT_BACKEND_KEVENT
+#if DISPATCH_USE_KEVENT_QOS
+typedef struct kevent_qos_s dispatch_kevent_s;
+#else
+typedef struct kevent dispatch_kevent_s;
+#endif
+typedef dispatch_kevent_s *dispatch_kevent_t;
+#endif // DISPATCH_EVENT_BACKEND_KEVENT
+
+#define DISPATCH_DEFERRED_ITEMS_MAGIC  0xdefe55edul /* deferred */
+#define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 8
+
+typedef struct dispatch_deferred_items_s {
+	uint32_t ddi_magic;
+	dispatch_queue_t ddi_stashed_dq;
+	struct dispatch_object_s *ddi_stashed_dou;
+#define DISPATCH_PRIORITY_NOSTASH ((dispatch_priority_t)~0u)
+	dispatch_priority_t ddi_stashed_pri;
+#if DISPATCH_EVENT_BACKEND_KEVENT
+	int ddi_nevents;
+	int ddi_maxevents;
+	dispatch_kevent_s ddi_eventlist[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT];
+#endif
+} dispatch_deferred_items_s, *dispatch_deferred_items_t;
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_deferred_items_set(dispatch_deferred_items_t ddi)
+{
+	_dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_deferred_items_t
+_dispatch_deferred_items_get(void)
+{
+	dispatch_deferred_items_t ddi = (dispatch_deferred_items_t)
+			_dispatch_thread_getspecific(dispatch_deferred_items_key);
+	if (ddi && ddi->ddi_magic == DISPATCH_DEFERRED_ITEMS_MAGIC) {
+		return ddi;
+	}
+	return NULL;
+}
+
+#pragma mark -
+#pragma mark inlines
+
+#if DISPATCH_PURE_C
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_unote_linkage_t
+_dispatch_unote_get_linkage(dispatch_unote_t du)
+{
+	dispatch_assert(!du._du->du_is_direct);
+	return (dispatch_unote_linkage_t)((char *)du._du
+			- sizeof(struct dispatch_unote_linkage_s));
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_unote_t
+_dispatch_unote_linkage_get_unote(dispatch_unote_linkage_t dul)
+{
+	return (dispatch_unote_t){ ._du = (dispatch_unote_class_t)(dul + 1) };
+}
+#endif
+
+#pragma mark -
+#pragma mark prototypes
+
+#if DISPATCH_HAVE_TIMER_QOS
+#define DISPATCH_TIMER_QOS_NORMAL       0u
+#define DISPATCH_TIMER_QOS_CRITICAL     1u
+#define DISPATCH_TIMER_QOS_BACKGROUND   2u
+#define DISPATCH_TIMER_QOS_COUNT        3u
+#else
+#define DISPATCH_TIMER_QOS_NORMAL       0u
+#define DISPATCH_TIMER_QOS_COUNT        1u
+#endif
+
+#define DISPATCH_TIMER_QOS(tidx)   (((uintptr_t)(tidx) >> 1) & 3u)
+#define DISPATCH_TIMER_CLOCK(tidx) (dispatch_clock_t)((tidx) & 1u)
+
+#define DISPATCH_TIMER_INDEX(clock, qos) ((qos) << 1 | (clock))
+#define DISPATCH_TIMER_COUNT \
+		DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT)
+#define DISPATCH_TIMER_IDENT_CANCELED    (~0u)
+
+
+extern struct dispatch_timer_heap_s _dispatch_timers_heap[DISPATCH_TIMER_COUNT];
+extern bool _dispatch_timers_reconfigure, _dispatch_timers_expired;
+extern uint32_t _dispatch_timers_processing_mask;
+#if DISPATCH_USE_DTRACE
+extern uint32_t _dispatch_timers_will_wake;
+#endif
+
+dispatch_unote_t _dispatch_unote_create_with_handle(dispatch_source_type_t dst,
+		uintptr_t handle, unsigned long mask);
+
+dispatch_unote_t _dispatch_unote_create_with_fd(dispatch_source_type_t dst,
+		uintptr_t handle, unsigned long mask);
+
+dispatch_unote_t _dispatch_unote_create_without_handle(dispatch_source_type_t dst,
+		uintptr_t handle, unsigned long mask);
+
+bool _dispatch_unote_register(dispatch_unote_t du, dispatch_priority_t pri);
+void _dispatch_unote_resume(dispatch_unote_t du);
+bool _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags);
+void _dispatch_unote_dispose(dispatch_unote_t du);
+
+void _dispatch_event_loop_atfork_child(void);
+void _dispatch_event_loop_init(void);
+void _dispatch_event_loop_poke(void);
+void _dispatch_event_loop_drain(dispatch_deferred_items_t ddi, bool poll);
+#if DISPATCH_EVENT_BACKEND_KEVENT
+void _dispatch_event_loop_merge(dispatch_kevent_t events, int nevents);
+void _dispatch_event_loop_update(dispatch_kevent_t events, int nevents);
+#endif
+void _dispatch_event_loop_timer_arm(unsigned int tidx,
+		dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows);
+void _dispatch_event_loop_timer_delete(unsigned int tidx);
+
+#endif /* __DISPATCH_EVENT_EVENT_INTERNAL__ */
diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c
new file mode 100644
index 0000000..9ebf456
--- /dev/null
+++ b/src/event/event_kevent.c
@@ -0,0 +1,2339 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "internal.h"
+#if DISPATCH_EVENT_BACKEND_KEVENT
+#if HAVE_MACH
+#include "protocol.h"
+#include "protocolServer.h"
+#endif
+
+#if DISPATCH_USE_KEVENT_WORKQUEUE && !DISPATCH_USE_KEVENT_QOS
+#error unsupported configuration
+#endif
+
+#define DISPATCH_KEVENT_MUXED_MARKER  1ul
+
+typedef struct dispatch_muxnote_s {
+	TAILQ_ENTRY(dispatch_muxnote_s) dmn_list;
+	TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_unotes_head;
+	dispatch_kevent_s dmn_kev;
+} *dispatch_muxnote_t;
+
+static int _dispatch_kq = -1;
+static dispatch_once_t _dispatch_muxnotes_pred;
+DISPATCH_CACHELINE_ALIGN
+static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s)
+_dispatch_sources[DSL_HASH_SIZE];
+
+#define DISPATCH_NOTE_CLOCK_WALL NOTE_MACH_CONTINUOUS_TIME
+#define DISPATCH_NOTE_CLOCK_MACH 0
+
+static const uint32_t _dispatch_timer_index_to_fflags[] = {
+#define DISPATCH_TIMER_FFLAGS_INIT(kind, qos, note) \
+	[DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)] = \
+			DISPATCH_NOTE_CLOCK_##kind | NOTE_ABSOLUTE | \
+			NOTE_NSECONDS | NOTE_LEEWAY | (note)
+	DISPATCH_TIMER_FFLAGS_INIT(WALL, NORMAL, 0),
+	DISPATCH_TIMER_FFLAGS_INIT(MACH, NORMAL, 0),
+#if DISPATCH_HAVE_TIMER_QOS
+	DISPATCH_TIMER_FFLAGS_INIT(WALL, CRITICAL, NOTE_CRITICAL),
+	DISPATCH_TIMER_FFLAGS_INIT(MACH, CRITICAL, NOTE_CRITICAL),
+	DISPATCH_TIMER_FFLAGS_INIT(WALL, BACKGROUND, NOTE_BACKGROUND),
+	DISPATCH_TIMER_FFLAGS_INIT(MACH, BACKGROUND, NOTE_BACKGROUND),
+#endif
+#undef DISPATCH_TIMER_FFLAGS_INIT
+};
+
+static void _dispatch_kevent_timer_drain(dispatch_kevent_t ke);
+
+#pragma mark -
+#pragma mark kevent debug
+
+DISPATCH_NOINLINE
+static const char *
+_evfiltstr(short filt)
+{
+	switch (filt) {
+#define _evfilt2(f) case (f): return #f
+	_evfilt2(EVFILT_READ);
+	_evfilt2(EVFILT_WRITE);
+	_evfilt2(EVFILT_SIGNAL);
+	_evfilt2(EVFILT_TIMER);
+
+#ifdef DISPATCH_EVENT_BACKEND_KEVENT
+	_evfilt2(EVFILT_AIO);
+	_evfilt2(EVFILT_VNODE);
+	_evfilt2(EVFILT_PROC);
+#if HAVE_MACH
+	_evfilt2(EVFILT_MACHPORT);
+	_evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION);
+#endif
+	_evfilt2(EVFILT_FS);
+	_evfilt2(EVFILT_USER);
+#ifdef EVFILT_SOCK
+	_evfilt2(EVFILT_SOCK);
+#endif
+#ifdef EVFILT_MEMORYSTATUS
+	_evfilt2(EVFILT_MEMORYSTATUS);
+#endif
+#endif // DISPATCH_EVENT_BACKEND_KEVENT
+
+	_evfilt2(DISPATCH_EVFILT_TIMER);
+	_evfilt2(DISPATCH_EVFILT_CUSTOM_ADD);
+	_evfilt2(DISPATCH_EVFILT_CUSTOM_OR);
+	default:
+		return "EVFILT_missing";
+	}
+}
+
+#if DISPATCH_DEBUG
+static const char *
+_evflagstr2(uint16_t *flagsp)
+{
+#define _evflag2(f) \
+	if ((*flagsp & (f)) == (f) && (f)) { \
+		*flagsp &= ~(f); \
+		return #f "|"; \
+	}
+	_evflag2(EV_ADD);
+	_evflag2(EV_DELETE);
+	_evflag2(EV_ENABLE);
+	_evflag2(EV_DISABLE);
+	_evflag2(EV_ONESHOT);
+	_evflag2(EV_CLEAR);
+	_evflag2(EV_RECEIPT);
+	_evflag2(EV_DISPATCH);
+	_evflag2(EV_UDATA_SPECIFIC);
+#ifdef EV_POLL
+	_evflag2(EV_POLL);
+#endif
+#ifdef EV_OOBAND
+	_evflag2(EV_OOBAND);
+#endif
+	_evflag2(EV_ERROR);
+	_evflag2(EV_EOF);
+	_evflag2(EV_VANISHED);
+	*flagsp = 0;
+	return "EV_UNKNOWN ";
+}
+
+DISPATCH_NOINLINE
+static const char *
+_evflagstr(uint16_t flags, char *str, size_t strsize)
+{
+	str[0] = 0;
+	while (flags) {
+		strlcat(str, _evflagstr2(&flags), strsize);
+	}
+	size_t sz = strlen(str);
+	if (sz) str[sz-1] = 0;
+	return str;
+}
+
+DISPATCH_NOINLINE
+static void
+dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev,
+		int i, int n, const char *function, unsigned int line)
+{
+	char flagstr[256];
+	char i_n[31];
+
+	if (n > 1) {
+		snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n);
+	} else {
+		i_n[0] = '\0';
+	}
+	if (verb == NULL) {
+		if (kev->flags & EV_DELETE) {
+			verb = "deleting";
+		} else if (kev->flags & EV_ADD) {
+			verb = "adding";
+		} else {
+			verb = "updating";
+		}
+	}
+#if DISPATCH_USE_KEVENT_QOS
+	_dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
+			"flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, "
+			"qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, "
+			"ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident,
+			_evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
+			sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
+			kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3],
+			function, line);
+#else
+	_dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
+			"flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx}: "
+			"%s #%u", verb, kev, i_n,
+			kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
+			sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
+			function, line);
+#endif
+}
+#else
+static inline void
+dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev,
+		int i, int n, const char *function, unsigned int line)
+{
+	(void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line;
+}
+#endif // DISPATCH_DEBUG
+#define _dispatch_kevent_debug_n(verb, _kev, i, n) \
+		dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__)
+#define _dispatch_kevent_debug(verb, _kev) \
+		_dispatch_kevent_debug_n(verb, _kev, 0, 0)
+#if DISPATCH_MGR_QUEUE_DEBUG
+#define _dispatch_kevent_mgr_debug(verb, kev) _dispatch_kevent_debug(verb, kev)
+#else
+#define _dispatch_kevent_mgr_debug(verb, kev) ((void)verb, (void)kev)
+#endif
+
+#if DISPATCH_MACHPORT_DEBUG
+#ifndef MACH_PORT_TYPE_SPREQUEST
+#define MACH_PORT_TYPE_SPREQUEST 0x40000000
+#endif
+
+DISPATCH_NOINLINE
+void
+dispatch_debug_machport(mach_port_t name, const char* str)
+{
+	mach_port_type_t type;
+	mach_msg_bits_t ns = 0, nr = 0, nso = 0, nd = 0;
+	unsigned int dnreqs = 0, dnrsiz;
+	kern_return_t kr = mach_port_type(mach_task_self(), name, &type);
+	if (kr) {
+		_dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name,
+				kr, mach_error_string(kr), str);
+		return;
+	}
+	if (type & MACH_PORT_TYPE_SEND) {
+		(void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
+				MACH_PORT_RIGHT_SEND, &ns));
+	}
+	if (type & MACH_PORT_TYPE_SEND_ONCE) {
+		(void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
+				MACH_PORT_RIGHT_SEND_ONCE, &nso));
+	}
+	if (type & MACH_PORT_TYPE_DEAD_NAME) {
+		(void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
+				MACH_PORT_RIGHT_DEAD_NAME, &nd));
+	}
+	if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) {
+		kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs);
+		if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr);
+	}
+	if (type & MACH_PORT_TYPE_RECEIVE) {
+		mach_port_status_t status = { .mps_pset = 0, };
+		mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT;
+		(void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
+				MACH_PORT_RIGHT_RECEIVE, &nr));
+		(void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(),
+				name, MACH_PORT_RECEIVE_STATUS, (void*)&status, &cnt));
+		_dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
+				"dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) "
+				"sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) "
+				"seqno(%03u) }: %s", name, nr, ns, nso, nd, dnreqs,
+				type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N",
+				status.mps_nsrequest ? "Y":"N", status.mps_pdrequest ? "Y":"N",
+				status.mps_srights ? "Y":"N", status.mps_sorights,
+				status.mps_qlimit, status.mps_msgcount, status.mps_mscount,
+				status.mps_seqno, str);
+	} else if (type & (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE|
+			MACH_PORT_TYPE_DEAD_NAME)) {
+		_dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
+				"dnreqs(%03u) spreq(%s) }: %s", name, nr, ns, nso, nd, dnreqs,
+				type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", str);
+	} else {
+		_dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name, type,
+				str);
+	}
+}
+#endif
+
+#pragma mark dispatch_kevent_t
+
+#if HAVE_MACH
+
+static dispatch_once_t _dispatch_mach_host_port_pred;
+static mach_port_t _dispatch_mach_host_port;
+
+static inline void*
+_dispatch_kevent_mach_msg_buf(dispatch_kevent_t ke)
+{
+	return (void*)ke->ext[0];
+}
+
+static inline mach_msg_size_t
+_dispatch_kevent_mach_msg_size(dispatch_kevent_t ke)
+{
+	// buffer size in the successful receive case, but message size (like
+	// msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size.
+	return (mach_msg_size_t)ke->ext[1];
+}
+
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+static void _dispatch_mach_kevent_portset_drain(dispatch_kevent_t ke);
+#endif
+static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke);
+static inline void _dispatch_mach_host_calendar_change_register(void);
+
+// DISPATCH_MACH_NOTIFICATION_ARMED are muxnotes that aren't registered with
+// kevent for real, but with mach_port_request_notification()
+//
+// the kevent structure is used for bookkeeping:
+// - ident, filter, flags and fflags have their usual meaning
+// - data is used to monitor the actual state of the
+//   mach_port_request_notification()
+// - ext[0] is a boolean that trackes whether the notification is armed or not
+#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->ext[0])
+#endif
+
+DISPATCH_ALWAYS_INLINE
+static dispatch_muxnote_t
+_dispatch_kevent_get_muxnote(dispatch_kevent_t ke)
+{
+	uintptr_t dmn_addr = (uintptr_t)ke->udata & ~DISPATCH_KEVENT_MUXED_MARKER;
+	return (dispatch_muxnote_t)dmn_addr;
+}
+
+DISPATCH_ALWAYS_INLINE
+static dispatch_unote_t
+_dispatch_kevent_get_unote(dispatch_kevent_t ke)
+{
+	dispatch_assert((ke->udata & DISPATCH_KEVENT_MUXED_MARKER) == 0);
+	return (dispatch_unote_t){ ._du = (dispatch_unote_class_t)ke->udata };
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_print_error(dispatch_kevent_t ke)
+{
+	dispatch_kevent_t kev = NULL;
+
+	if (ke->flags & EV_DELETE) {
+		if (ke->flags & EV_UDATA_SPECIFIC) {
+			if (ke->data == EINPROGRESS) {
+				// deferred EV_DELETE
+				return;
+			}
+#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
+			if (ke->data == ENOENT) {
+				// deferred EV_DELETE
+				return;
+			}
+#endif
+		}
+		// for EV_DELETE if the update was deferred we may have reclaimed
+		// the udata already, and it is unsafe to dereference it now.
+	} else if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) {
+		ke->flags |= _dispatch_kevent_get_muxnote(ke)->dmn_kev.flags;
+	} else if (ke->udata) {
+		if (!_dispatch_kevent_get_unote(ke)._du->du_registered) {
+			ke->flags |= EV_ADD;
+		}
+	}
+
+#if HAVE_MACH
+	if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP &&
+			(ke->flags & EV_ADD) && _dispatch_evfilt_machport_direct_enabled &&
+			kev && (kev->fflags & MACH_RCV_MSG)) {
+		DISPATCH_INTERNAL_CRASH(ke->ident,
+				"Missing EVFILT_MACHPORT support for ports");
+	}
+#endif
+
+	if (ke->data) {
+		// log the unexpected error
+		_dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter),
+				!ke->udata ? NULL :
+				ke->flags & EV_DELETE ? "delete" :
+				ke->flags & EV_ADD ? "add" :
+				ke->flags & EV_ENABLE ? "enable" : "monitor",
+				(int)ke->data);
+	}
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_merge(dispatch_unote_t du, dispatch_kevent_t ke)
+{
+	uintptr_t data;
+	pthread_priority_t pp = 0;
+#if DISPATCH_USE_KEVENT_QOS
+	pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK;
+#endif
+	if (du._du->du_is_level) {
+		// ke->data is signed and "negative available data" makes no sense
+		// zero bytes happens when EV_EOF is set
+		dispatch_assert(ke->data >= 0l);
+		data = ~(unsigned long)ke->data;
+#if HAVE_MACH
+	} else if (du._du->du_filter == EVFILT_MACHPORT) {
+		data = DISPATCH_MACH_RECV_MESSAGE;
+#endif
+	} else if (du._du->du_is_adder) {
+		data = (unsigned long)ke->data;
+	} else {
+		data = ke->fflags & du._du->du_fflags;
+	}
+	return dux_merge_evt(du._du, ke->flags, data, pp);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_merge_muxed(dispatch_kevent_t ke)
+{
+	dispatch_muxnote_t dmn = _dispatch_kevent_get_muxnote(ke);
+	dispatch_unote_linkage_t dul, dul_next;
+
+	TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) {
+		_dispatch_kevent_merge(_dispatch_unote_linkage_get_unote(dul), ke);
+	}
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_drain(dispatch_kevent_t ke)
+{
+	if (ke->filter == EVFILT_USER) {
+		_dispatch_kevent_mgr_debug("received", ke);
+		return;
+	}
+	_dispatch_kevent_debug("received", ke);
+	if (unlikely(ke->flags & EV_ERROR)) {
+		if (ke->filter == EVFILT_PROC && ke->data == ESRCH) {
+			// EVFILT_PROC may fail with ESRCH when the process exists but is a zombie
+			// <rdar://problem/5067725>. As a workaround, we simulate an exit event for
+			// any EVFILT_PROC with an invalid pid <rdar://problem/6626350>.
+			ke->flags &= ~(EV_ERROR | EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC);
+			ke->flags |= EV_ONESHOT;
+			ke->fflags = NOTE_EXIT;
+			ke->data = 0;
+			_dispatch_kevent_debug("synthetic NOTE_EXIT", ke);
+		} else {
+			_dispatch_debug("kevent[0x%llx]: handling error",
+					(unsigned long long)ke->udata);
+			return _dispatch_kevent_print_error(ke);
+		}
+	}
+	if (ke->filter == EVFILT_TIMER) {
+		return _dispatch_kevent_timer_drain(ke);
+	}
+
+#if HAVE_MACH
+	if (ke->filter == EVFILT_MACHPORT) {
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+		if (ke->udata == 0) {
+			return _dispatch_mach_kevent_portset_drain(ke);
+		}
+#endif
+		if (_dispatch_kevent_mach_msg_size(ke)) {
+			return _dispatch_kevent_mach_msg_drain(ke);
+		}
+	}
+#endif
+
+	if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) {
+		return _dispatch_kevent_merge_muxed(ke);
+	}
+	return _dispatch_kevent_merge(_dispatch_kevent_get_unote(ke), ke);
+}
+
+#pragma mark dispatch_kq
+
+#if DISPATCH_USE_MGR_THREAD
+DISPATCH_NOINLINE
+static int
+_dispatch_kq_create(const void *guard_ptr)
+{
+	static const dispatch_kevent_s kev = {
+		.ident = 1,
+		.filter = EVFILT_USER,
+		.flags = EV_ADD|EV_CLEAR,
+	};
+	int kqfd;
+
+	_dispatch_fork_becomes_unsafe();
+#if DISPATCH_USE_GUARDED_FD
+	guardid_t guard = (uintptr_t)guard_ptr;
+	kqfd = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP);
+#else
+	(void)guard_ptr;
+	kqfd = kqueue();
+#endif
+	if (kqfd == -1) {
+		int err = errno;
+		switch (err) {
+		case EMFILE:
+			DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
+					"process is out of file descriptors");
+			break;
+		case ENFILE:
+			DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
+					"system is out of file descriptors");
+			break;
+		case ENOMEM:
+			DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
+					"kernel is out of memory");
+			break;
+		default:
+			DISPATCH_INTERNAL_CRASH(err, "kqueue() failure");
+			break;
+		}
+	}
+#if DISPATCH_USE_KEVENT_QOS
+	dispatch_assume_zero(kevent_qos(kqfd, &kev, 1, NULL, 0, NULL, NULL, 0));
+#else
+	dispatch_assume_zero(kevent(kqfd, &kev, 1, NULL, 0, NULL));
+#endif
+	return kqfd;
+}
+#endif
+
+static void
+_dispatch_kq_init(void *context DISPATCH_UNUSED)
+{
+	_dispatch_fork_becomes_unsafe();
+#if DISPATCH_USE_KEVENT_WORKQUEUE
+	_dispatch_kevent_workqueue_init();
+	if (_dispatch_kevent_workqueue_enabled) {
+		int r;
+		const dispatch_kevent_s kev[] = {
+			[0] = {
+				.ident = 1,
+				.filter = EVFILT_USER,
+				.flags = EV_ADD|EV_CLEAR,
+				.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG,
+			},
+			[1] = {
+				.ident = 1,
+				.filter = EVFILT_USER,
+				.fflags = NOTE_TRIGGER,
+			},
+		};
+retry:
+		r = kevent_qos(-1, kev, 2, NULL, 0, NULL, NULL,
+				KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE);
+		if (unlikely(r == -1)) {
+			int err = errno;
+			switch (err) {
+			case EINTR:
+				goto retry;
+			default:
+				DISPATCH_CLIENT_CRASH(err,
+						"Failed to initalize workqueue kevent");
+				break;
+			}
+		}
+		return;
+	}
+#endif // DISPATCH_USE_KEVENT_WORKQUEUE
+#if DISPATCH_USE_MGR_THREAD
+	_dispatch_kq = _dispatch_kq_create(&_dispatch_mgr_q);
+	_dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0);
+#endif // DISPATCH_USE_MGR_THREAD
+}
+
+DISPATCH_NOINLINE
+static int
+_dispatch_kq_update(dispatch_kevent_t ke, int n)
+{
+	static dispatch_once_t pred;
+	dispatch_once_f(&pred, NULL, _dispatch_kq_init);
+
+	dispatch_kevent_s kev_error[n];
+	int i, r;
+	int kqfd = _dispatch_kq;
+
+#if DISPATCH_DEBUG
+	for (i = 0; i < n; i++) {
+		if (ke[i].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) {
+			_dispatch_kevent_debug_n(NULL, ke + i, i, n);
+		}
+	}
+#endif
+#if DISPATCH_USE_KEVENT_QOS
+	unsigned int flags = KEVENT_FLAG_ERROR_EVENTS;
+	if (_dispatch_kevent_workqueue_enabled) {
+		flags |= KEVENT_FLAG_WORKQ;
+	}
+#else
+	for (i = 0; i < n; i++) {
+		// emulate KEVENT_FLAG_ERROR_EVENTS
+		ke[i].flags |= EV_RECEIPT;
+	}
+#endif
+retry:
+#if DISPATCH_USE_KEVENT_QOS
+	r = kevent_qos(kqfd, ke, n, kev_error, n, NULL, NULL, flags);
+#else
+	r = kevent(kqfd, ke, n, kev_error, n, NULL);
+#endif
+	if (unlikely(r == -1)) {
+		int err = errno;
+		switch (err) {
+		case EINTR:
+			goto retry;
+		case EBADF:
+			DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors");
+			break;
+		default:
+			(void)dispatch_assume_zero(err);
+			break;
+		}
+		return err;
+	}
+
+	for (i = 0, n = r, r = 0; i < n; i++) {
+		if ((kev_error[i].flags & EV_ERROR) && (r = (int)kev_error[i].data)) {
+			_dispatch_kevent_drain(&kev_error[i]);
+		}
+	}
+	return r;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline int
+_dispatch_kq_update_one(dispatch_kevent_t ke)
+{
+	return _dispatch_kq_update(ke, 1);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_kq_update_all(dispatch_kevent_t ke, int n)
+{
+	(void)_dispatch_kq_update(ke, n);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk,
+		uint16_t action)
+{
+	dispatch_unote_class_t du = _du._du;
+	dispatch_source_type_t dst = du->du_type;
+	uint16_t flags = dst->dst_flags | action;
+
+	if ((flags & EV_VANISHED) && !(flags & EV_ADD)) {
+		flags &= ~EV_VANISHED;
+	}
+	pthread_priority_t pp = _dispatch_priority_to_pp(du->du_priority);
+	*dk = (dispatch_kevent_s){
+		.ident  = du->du_ident,
+		.filter = dst->dst_filter,
+		.flags  = flags,
+		.udata  = (uintptr_t)du,
+		.fflags = du->du_fflags | dst->dst_fflags,
+		.data   = (typeof(dk->data))dst->dst_data,
+#if DISPATCH_USE_KEVENT_QOS
+		.qos    = (typeof(dk->qos))pp,
+#endif
+	};
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline int
+_dispatch_kq_deferred_find_slot(dispatch_deferred_items_t ddi,
+		int16_t filter, uint64_t ident, uint64_t udata)
+{
+	dispatch_kevent_t events = ddi->ddi_eventlist;
+	int i;
+
+	for (i = 0; i < ddi->ddi_nevents; i++) {
+		if (events[i].filter == filter && events[i].ident == ident &&
+				events[i].udata == udata) {
+			break;
+		}
+	}
+	return i;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_kevent_t
+_dispatch_kq_deferred_reuse_slot(dispatch_deferred_items_t ddi, int slot)
+{
+	if (unlikely(slot == ddi->ddi_maxevents)) {
+		_dispatch_deferred_items_set(NULL);
+		_dispatch_kq_update_all(ddi->ddi_eventlist, ddi->ddi_nevents);
+		_dispatch_deferred_items_set(ddi);
+		ddi->ddi_nevents = 1;
+		slot = 0;
+	} else if (slot == ddi->ddi_nevents) {
+		ddi->ddi_nevents++;
+	}
+	return ddi->ddi_eventlist + slot;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_kq_deferred_discard_slot(dispatch_deferred_items_t ddi, int slot)
+{
+	if (slot < ddi->ddi_nevents) {
+		int last = --ddi->ddi_nevents;
+		if (slot != last) {
+			ddi->ddi_eventlist[slot] = ddi->ddi_eventlist[last];
+		}
+	}
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kq_deferred_update(dispatch_kevent_t ke)
+{
+	dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
+
+	if (ddi) {
+		int slot = _dispatch_kq_deferred_find_slot(ddi,
+				ke->filter, ke->ident, ke->udata);
+		*_dispatch_kq_deferred_reuse_slot(ddi, slot) = *ke;
+		if (ke->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) {
+			_dispatch_kevent_debug("deferred", ke);
+		}
+	} else {
+		_dispatch_kq_update_one(ke);
+	}
+}
+
+DISPATCH_NOINLINE
+static int
+_dispatch_kq_immediate_update(dispatch_kevent_t ke)
+{
+	dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
+	if (ddi) {
+		int slot = _dispatch_kq_deferred_find_slot(ddi,
+				ke->filter, ke->ident, ke->udata);
+		_dispatch_kq_deferred_discard_slot(ddi, slot);
+	}
+	return _dispatch_kq_update_one(ke);
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_kq_unote_update(dispatch_unote_t _du, uint16_t action_flags)
+{
+	dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
+	dispatch_unote_class_t du = _du._du;
+	int r = 0;
+
+	if (action_flags & EV_ADD) {
+		// as soon as we register we may get an event delivery and it has to
+		// see this bit already set, else it will not unregister the kevent
+		du->du_registered = true;
+	}
+
+	if (ddi) {
+		int slot = _dispatch_kq_deferred_find_slot(ddi,
+				du->du_filter, du->du_ident, (uintptr_t)du);
+		if (slot < ddi->ddi_nevents) {
+			// <rdar://problem/26202376> when deleting and an enable is pending,
+			// we must merge EV_ENABLE to do an immediate deletion
+			action_flags |= (ddi->ddi_eventlist[slot].flags & EV_ENABLE);
+		}
+
+		if (!(action_flags & EV_ADD) && (action_flags & EV_ENABLE)) {
+			// can be deferred, so do it!
+			dispatch_kevent_t ke = _dispatch_kq_deferred_reuse_slot(ddi, slot);
+			_dispatch_kq_unote_set_kevent(du, ke, action_flags);
+			_dispatch_kevent_debug("deferred", ke);
+			goto done;
+		}
+
+		// get rid of the deferred item if any, we can't wait
+		_dispatch_kq_deferred_discard_slot(ddi, slot);
+	}
+
+	if (action_flags) {
+		dispatch_kevent_s dk;
+		_dispatch_kq_unote_set_kevent(du, &dk, action_flags);
+		r = _dispatch_kq_update_one(&dk);
+	}
+
+done:
+	if (action_flags & EV_ADD) {
+		if (unlikely(r)) {
+			du->du_registered = false;
+		}
+		return r == 0;
+	}
+
+	if (action_flags & EV_DELETE) {
+		if (r == EINPROGRESS) {
+			return false;
+#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
+		} else if (r == ENOENT) {
+			return false;
+#endif
+		}
+		du->du_registered = false;
+	}
+
+	dispatch_assume_zero(r);
+	return true;
+}
+
+#pragma mark dispatch_muxnote_t
+
+static void
+_dispatch_muxnotes_init(void *ctxt DISPATCH_UNUSED)
+{
+	uint32_t i;
+	for (i = 0; i < DSL_HASH_SIZE; i++) {
+		TAILQ_INIT(&_dispatch_sources[i]);
+	}
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline struct dispatch_muxnote_bucket_s *
+_dispatch_muxnote_bucket(uint64_t ident, int16_t filter)
+{
+	switch (filter) {
+#if HAVE_MACH
+	case EVFILT_MACHPORT:
+	case DISPATCH_EVFILT_MACH_NOTIFICATION:
+		ident = MACH_PORT_INDEX(ident);
+		break;
+#endif
+	case EVFILT_SIGNAL: // signo
+	case EVFILT_PROC: // pid_t
+	default: // fd
+		break;
+	}
+
+	dispatch_once_f(&_dispatch_muxnotes_pred, NULL, _dispatch_muxnotes_init);
+	return &_dispatch_sources[DSL_HASH((uintptr_t)ident)];
+}
+#define _dispatch_unote_muxnote_bucket(du) \
+	_dispatch_muxnote_bucket(du._du->du_ident, du._du->du_filter)
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_muxnote_t
+_dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb,
+		uint64_t ident, int16_t filter)
+{
+	dispatch_muxnote_t dmn;
+	TAILQ_FOREACH(dmn, dmb, dmn_list) {
+		if (dmn->dmn_kev.ident == ident && dmn->dmn_kev.filter == filter) {
+			break;
+		}
+	}
+	return dmn;
+}
+#define _dispatch_unote_muxnote_find(dmb, du) \
+	_dispatch_muxnote_find(dmb, du._du->du_ident, du._du->du_filter)
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_muxnote_t
+_dispatch_mach_muxnote_find(mach_port_t name, int16_t filter)
+{
+	struct dispatch_muxnote_bucket_s *dmb;
+	dmb = _dispatch_muxnote_bucket(name, filter);
+	return _dispatch_muxnote_find(dmb, name, filter);
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_unote_register_muxed(dispatch_unote_t du)
+{
+	struct dispatch_muxnote_bucket_s *dmb = _dispatch_unote_muxnote_bucket(du);
+	dispatch_muxnote_t dmn = _dispatch_unote_muxnote_find(dmb, du);
+	bool installed = true;
+
+	if (dmn) {
+		uint32_t flags = du._du->du_fflags & ~dmn->dmn_kev.fflags;
+		if (flags) {
+			dmn->dmn_kev.fflags |= flags;
+			if (unlikely(du._du->du_type->dst_update_mux)) {
+				installed = du._du->du_type->dst_update_mux(dmn);
+			} else {
+				installed = _dispatch_kq_immediate_update(&dmn->dmn_kev) == 0;
+			}
+			if (!installed) dmn->dmn_kev.fflags &= ~flags;
+		}
+	} else {
+		dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s));
+		TAILQ_INIT(&dmn->dmn_unotes_head);
+		_dispatch_kq_unote_set_kevent(du, &dmn->dmn_kev, EV_ADD | EV_ENABLE);
+#if DISPATCH_USE_KEVENT_QOS
+		dmn->dmn_kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+#endif
+		dmn->dmn_kev.udata = (uintptr_t)dmn | DISPATCH_KEVENT_MUXED_MARKER;
+		if (unlikely(du._du->du_type->dst_update_mux)) {
+			installed = du._du->du_type->dst_update_mux(dmn);
+		} else {
+			installed = _dispatch_kq_immediate_update(&dmn->dmn_kev) == 0;
+		}
+		if (installed) {
+			TAILQ_INSERT_TAIL(dmb, dmn, dmn_list);
+			dmn->dmn_kev.flags &= ~(EV_ADD | EV_VANISHED);
+		} else {
+			free(dmn);
+		}
+	}
+
+	if (installed) {
+		dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+		TAILQ_INSERT_TAIL(&dmn->dmn_unotes_head, dul, du_link);
+		dul->du_muxnote = dmn;
+
+		if (du._du->du_filter == DISPATCH_EVFILT_MACH_NOTIFICATION) {
+			bool armed = DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev);
+			os_atomic_store2o(du._dmsr, dmsr_notification_armed, armed, relaxed);
+		}
+	}
+	return du._du->du_registered = installed;
+}
+
+bool
+_dispatch_unote_register(dispatch_unote_t du, dispatch_priority_t pri)
+{
+	dispatch_assert(!du._du->du_registered);
+	du._du->du_priority = pri;
+	switch (du._du->du_filter) {
+	case DISPATCH_EVFILT_CUSTOM_OR:
+	case DISPATCH_EVFILT_CUSTOM_ADD:
+		return du._du->du_registered = true;
+	}
+	if (!du._du->du_is_direct) {
+		return _dispatch_unote_register_muxed(du);
+	}
+	return _dispatch_kq_unote_update(du, EV_ADD | EV_ENABLE);
+}
+
+void
+_dispatch_unote_resume(dispatch_unote_t du)
+{
+	dispatch_assert(du._du->du_registered);
+
+	if (du._du->du_is_direct) {
+		_dispatch_kq_unote_update(du, EV_ENABLE);
+	} else if (unlikely(du._du->du_type->dst_update_mux)) {
+		dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+		du._du->du_type->dst_update_mux(dul->du_muxnote);
+	} else {
+		dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+		_dispatch_kq_deferred_update(&dul->du_muxnote->dmn_kev);
+	}
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_unote_unregister_muxed(dispatch_unote_t du, uint32_t flags)
+{
+	dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du);
+	dispatch_muxnote_t dmn = dul->du_muxnote;
+	bool update = false, dispose = false;
+
+	if (dmn->dmn_kev.filter == DISPATCH_EVFILT_MACH_NOTIFICATION) {
+		os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed);
+	}
+	du._du->du_registered = false;
+	TAILQ_REMOVE(&dmn->dmn_unotes_head, dul, du_link);
+	_TAILQ_TRASH_ENTRY(dul, du_link);
+	dul->du_muxnote = NULL;
+
+	if (TAILQ_EMPTY(&dmn->dmn_unotes_head)) {
+		dmn->dmn_kev.flags |= EV_DELETE;
+		update = dispose = true;
+	} else {
+		uint32_t fflags = du._du->du_type->dst_fflags;
+		TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) {
+			du = _dispatch_unote_linkage_get_unote(dul);
+			fflags |= du._du->du_fflags;
+		}
+		if (dmn->dmn_kev.fflags & ~fflags) {
+			dmn->dmn_kev.fflags &= fflags;
+			update = true;
+		}
+	}
+
+	if (update && !(flags & DU_UNREGISTER_ALREADY_DELETED)) {
+		if (unlikely(du._du->du_type->dst_update_mux)) {
+			dispatch_assume(du._du->du_type->dst_update_mux(dmn));
+		} else {
+			_dispatch_kq_deferred_update(&dmn->dmn_kev);
+		}
+	}
+	if (dispose) {
+		struct dispatch_muxnote_bucket_s *dmb;
+		dmb = _dispatch_muxnote_bucket(dmn->dmn_kev.ident, dmn->dmn_kev.filter);
+		TAILQ_REMOVE(dmb, dmn, dmn_list);
+		free(dmn);
+	}
+	return true;
+}
+
+bool
+_dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags)
+{
+	switch (du._du->du_filter) {
+	case DISPATCH_EVFILT_CUSTOM_OR:
+	case DISPATCH_EVFILT_CUSTOM_ADD:
+		du._du->du_registered = false;
+		return true;
+	}
+	if (du._du->du_registered) {
+		if (!du._du->du_is_direct) {
+			return _dispatch_unote_unregister_muxed(du, flags);
+		}
+		uint16_t action_flags;
+		if (flags & DU_UNREGISTER_ALREADY_DELETED) {
+			action_flags = 0;
+		} else if (flags & DU_UNREGISTER_IMMEDIATE_DELETE) {
+			action_flags = EV_DELETE | EV_ENABLE;
+		} else {
+			action_flags = EV_DELETE;
+		}
+		return _dispatch_kq_unote_update(du, action_flags);
+	}
+	return true;
+}
+
+#pragma mark -
+#pragma mark dispatch_loop
+
+#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
+static void _dispatch_memorypressure_init(void);
+#else
+#define _dispatch_memorypressure_init()
+#endif
+static bool _dispatch_timers_force_max_leeway;
+
+void
+_dispatch_event_loop_atfork_child(void)
+{
+#if HAVE_MACH
+	_dispatch_mach_host_port_pred = 0;
+	_dispatch_mach_host_port = MACH_VOUCHER_NULL;
+#endif
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_event_loop_init(void)
+{
+	if (unlikely(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) {
+		_dispatch_timers_force_max_leeway = true;
+	}
+	_dispatch_memorypressure_init();
+	_voucher_activity_debug_channel_init();
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_event_loop_poke(void)
+{
+	dispatch_kevent_s ke = {
+		.ident  = 1,
+		.filter = EVFILT_USER,
+		.fflags = NOTE_TRIGGER,
+	};
+	_dispatch_kq_deferred_update(&ke);
+}
+
+#if DISPATCH_USE_MGR_THREAD
+DISPATCH_NOINLINE
+void
+_dispatch_event_loop_drain(dispatch_deferred_items_t ddi, bool poll)
+{
+	int r, n = ddi->ddi_nevents;
+	dispatch_assert((size_t)ddi->ddi_maxevents < countof(ddi->ddi_eventlist));
+	int kqfd = _dispatch_kq;
+
+#if DISPATCH_DEBUG
+	int i;
+	for (i = 0; i < n; i++) {
+		dispatch_kevent_t ke = ddi->ddi_eventlist + i;
+		if (ke[i].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) {
+			_dispatch_kevent_debug_n(NULL, ke + i, i, n);
+		}
+	}
+#endif
+#if DISPATCH_USE_KEVENT_QOS
+	unsigned int flags = KEVENT_FLAG_NONE;
+	if (poll) flags |= KEVENT_FLAG_IMMEDIATE;
+#else
+	const struct timespec timeout_immediately = {}, *timeout = NULL;
+	if (poll) timeout = &timeout_immediately;
+#endif
+ retry:
+#if DISPATCH_USE_KEVENT_QOS
+	r = kevent_qos(kqfd, ddi->ddi_eventlist, n,
+			ddi->ddi_eventlist + ddi->ddi_maxevents, 1, NULL, NULL, flags);
+#else
+	r = kevent(kqfd, ddi->ddi_eventlist, n,
+			ddi->ddi_eventlist + ddi->ddi_maxevents, 1, timeout);
+#endif
+	if (unlikely(r == -1)) {
+		int err = errno;
+		switch (err) {
+		case EINTR:
+			goto retry;
+		case EBADF:
+			DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors");
+			break;
+		default:
+			(void)dispatch_assume_zero(err);
+			break;
+		}
+	}
+	ddi->ddi_nevents = 0;
+	if (r > 0) {
+		dispatch_kevent_t ke = ddi->ddi_eventlist + ddi->ddi_maxevents;
+		_dispatch_kevent_drain(ke);
+	}
+}
+#endif
+
+void
+_dispatch_event_loop_merge(dispatch_kevent_t ke, int n)
+{
+	while (n-- > 0) {
+		_dispatch_kevent_drain(ke++);
+	}
+}
+
+void
+_dispatch_event_loop_update(dispatch_kevent_t ke, int n)
+{
+	_dispatch_kq_update_all(ke, n);
+}
+
+#define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8)
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_timer_drain(dispatch_kevent_t ke)
+{
+	dispatch_assert(ke->data > 0);
+	dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) ==
+			DISPATCH_KEVENT_TIMEOUT_IDENT_MASK);
+	uint32_t tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK;
+
+	dispatch_assert(tidx < DISPATCH_TIMER_COUNT);
+	_dispatch_timers_expired = true;
+	_dispatch_timers_processing_mask |= 1 << tidx;
+	_dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED;
+#if DISPATCH_USE_DTRACE
+	_dispatch_timers_will_wake |= 1 << DISPATCH_TIMER_QOS(tidx);
+#endif
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_event_loop_timer_program(uint32_t tidx,
+		uint64_t target, uint64_t leeway, uint16_t action)
+{
+	dispatch_kevent_s ke = {
+		.ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK | tidx,
+		.filter = EVFILT_TIMER,
+		.flags = action | EV_ONESHOT,
+		.fflags = _dispatch_timer_index_to_fflags[tidx],
+		.data = (int64_t)target,
+		.udata = (uintptr_t)&_dispatch_timers_heap[tidx],
+#if DISPATCH_HAVE_TIMER_COALESCING
+		.ext[1] = leeway,
+#endif
+#if DISPATCH_USE_KEVENT_QOS
+		.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG,
+#endif
+	};
+
+	_dispatch_kq_deferred_update(&ke);
+}
+
+void
+_dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range,
+		dispatch_clock_now_cache_t nows)
+{
+	if (unlikely(_dispatch_timers_force_max_leeway)) {
+		range.delay += range.leeway;
+		range.leeway = 0;
+	}
+#if HAVE_MACH
+	if (DISPATCH_TIMER_CLOCK(tidx) == DISPATCH_CLOCK_WALL) {
+		_dispatch_mach_host_calendar_change_register();
+	}
+#endif
+
+	// <rdar://problem/13186331> EVFILT_TIMER NOTE_ABSOLUTE always expects
+	// a WALL deadline
+	uint64_t now = _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows);
+	_dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED;
+	_dispatch_event_loop_timer_program(tidx, now + range.delay, range.leeway,
+			EV_ADD | EV_ENABLE);
+}
+
+void
+_dispatch_event_loop_timer_delete(uint32_t tidx)
+{
+	_dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED;
+	_dispatch_event_loop_timer_program(tidx, 0, 0, EV_DELETE);
+}
+
+#pragma mark kevent specific sources
+
+const dispatch_source_type_s _dispatch_source_type_proc = {
+	.dst_kind       = "proc",
+	.dst_filter     = EVFILT_PROC,
+	.dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR,
+	.dst_fflags     = NOTE_EXIT, // rdar://16655831
+	.dst_mask       = NOTE_EXIT|NOTE_FORK|NOTE_EXEC
+#if HAVE_DECL_NOTE_SIGNAL
+			|NOTE_SIGNAL
+#endif
+#if HAVE_DECL_NOTE_REAP
+			|NOTE_REAP
+#endif
+			,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_unote_create_with_handle,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+const dispatch_source_type_s _dispatch_source_type_vnode = {
+	.dst_kind       = "vnode",
+	.dst_filter     = EVFILT_VNODE,
+	.dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED,
+	.dst_mask       = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK
+			|NOTE_RENAME|NOTE_FUNLOCK
+#if HAVE_DECL_NOTE_REVOKE
+			|NOTE_REVOKE
+#endif
+#if HAVE_DECL_NOTE_NONE
+			|NOTE_NONE
+#endif
+			,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_unote_create_with_fd,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+const dispatch_source_type_s _dispatch_source_type_vfs = {
+	.dst_kind       = "vfs",
+	.dst_filter     = EVFILT_FS,
+	.dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR,
+	.dst_mask       = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT
+			|VQ_DEAD|VQ_ASSIST|VQ_NOTRESPLOCK
+#if HAVE_DECL_VQ_UPDATE
+			|VQ_UPDATE
+#endif
+#if HAVE_DECL_VQ_VERYLOWDISK
+			|VQ_VERYLOWDISK
+#endif
+#if HAVE_DECL_VQ_QUOTA
+			|VQ_QUOTA
+#endif
+			,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_unote_create_without_handle,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+#ifdef EVFILT_SOCK
+const dispatch_source_type_s _dispatch_source_type_sock = {
+	.dst_kind       = "sock",
+	.dst_filter     = EVFILT_SOCK,
+	.dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED,
+	.dst_mask       = NOTE_CONNRESET|NOTE_READCLOSED|NOTE_WRITECLOSED
+			|NOTE_TIMEOUT|NOTE_NOSRCADDR|NOTE_IFDENIED|NOTE_SUSPEND|NOTE_RESUME
+			|NOTE_KEEPALIVE
+#ifdef NOTE_ADAPTIVE_WTIMO
+			|NOTE_ADAPTIVE_WTIMO|NOTE_ADAPTIVE_RTIMO
+#endif
+#ifdef NOTE_CONNECTED
+			|NOTE_CONNECTED|NOTE_DISCONNECTED|NOTE_CONNINFO_UPDATED
+#endif
+#ifdef NOTE_NOTIFY_ACK
+			|NOTE_NOTIFY_ACK
+#endif
+		,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_unote_create_with_fd,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+};
+#endif // EVFILT_SOCK
+
+#if DISPATCH_USE_MEMORYSTATUS
+
+#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
+#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \
+		DISPATCH_MEMORYPRESSURE_NORMAL | \
+		DISPATCH_MEMORYPRESSURE_WARN | \
+		DISPATCH_MEMORYPRESSURE_CRITICAL | \
+		DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
+		DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL)
+#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \
+		DISPATCH_MEMORYPRESSURE_WARN | \
+		DISPATCH_MEMORYPRESSURE_CRITICAL | \
+		DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
+		DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL)
+
+static void
+_dispatch_memorypressure_handler(void *context)
+{
+	dispatch_source_t ds = context;
+	unsigned long memorypressure = dispatch_source_get_data(ds);
+
+	if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) {
+		_dispatch_memory_warn = false;
+		_dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT;
+#if VOUCHER_USE_MACH_VOUCHER
+		if (_firehose_task_buffer) {
+			firehose_buffer_clear_bank_flags(_firehose_task_buffer,
+					FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY);
+		}
+#endif
+	}
+	if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) {
+		_dispatch_memory_warn = true;
+		_dispatch_continuation_cache_limit =
+				DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN;
+#if VOUCHER_USE_MACH_VOUCHER
+		if (_firehose_task_buffer) {
+			firehose_buffer_set_bank_flags(_firehose_task_buffer,
+					FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY);
+		}
+#endif
+	}
+	memorypressure &= DISPATCH_MEMORYPRESSURE_MALLOC_MASK;
+	if (memorypressure) {
+		malloc_memory_event_handler(memorypressure);
+	}
+}
+
+static void
+_dispatch_memorypressure_init(void)
+{
+	dispatch_source_t ds = dispatch_source_create(
+			DISPATCH_SOURCE_TYPE_MEMORYPRESSURE, 0,
+			DISPATCH_MEMORYPRESSURE_SOURCE_MASK,
+			_dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true));
+	dispatch_set_context(ds, ds);
+	dispatch_source_set_event_handler_f(ds, _dispatch_memorypressure_handler);
+	dispatch_activate(ds);
+}
+#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE
+
+#if TARGET_OS_SIMULATOR // rdar://problem/9219483
+static int _dispatch_ios_simulator_memory_warnings_fd = -1;
+static void
+_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED)
+{
+	char *e = getenv("SIMULATOR_MEMORY_WARNINGS");
+	if (!e) return;
+	_dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY);
+	if (_dispatch_ios_simulator_memory_warnings_fd == -1) {
+		(void)dispatch_assume_zero(errno);
+	}
+}
+
+static dispatch_unote_t
+_dispatch_source_memorypressure_create(dispatch_source_type_t dst,
+	uintptr_t handle, unsigned long mask)
+{
+	static dispatch_once_t pred;
+	dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init);
+
+	if (handle) {
+		return DISPATCH_UNOTE_NULL;
+	}
+
+	dst = &_dispatch_source_type_vnode;
+	handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd;
+	mask = NOTE_ATTRIB;
+
+	dispatch_unote_t du = dux_create(dst, handle, mask);
+	if (du._du) {
+		du._du->du_memorypressure_override = true;
+	}
+	return du;
+}
+#endif // TARGET_OS_SIMULATOR
+
+const dispatch_source_type_s _dispatch_source_type_memorypressure = {
+	.dst_kind       = "memorystatus",
+	.dst_filter     = EVFILT_MEMORYSTATUS,
+	.dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH,
+	.dst_mask       = NOTE_MEMORYSTATUS_PRESSURE_NORMAL
+			|NOTE_MEMORYSTATUS_PRESSURE_WARN|NOTE_MEMORYSTATUS_PRESSURE_CRITICAL
+			|NOTE_MEMORYSTATUS_LOW_SWAP|NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
+			|NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+#if TARGET_OS_SIMULATOR
+	.dst_create     = _dispatch_source_memorypressure_create,
+	// redirected to _dispatch_source_type_vnode
+#else
+	.dst_create     = _dispatch_unote_create_without_handle,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+#endif
+};
+
+static dispatch_unote_t
+_dispatch_source_vm_create(dispatch_source_type_t dst DISPATCH_UNUSED,
+		uintptr_t handle, unsigned long mask DISPATCH_UNUSED)
+{
+	// Map legacy vm pressure to memorypressure warning rdar://problem/15907505
+	dispatch_unote_t du = dux_create(&_dispatch_source_type_memorypressure,
+			handle, NOTE_MEMORYSTATUS_PRESSURE_WARN);
+	if (du._du) {
+		du._du->du_vmpressure_override = 1;
+	}
+	return du;
+}
+
+const dispatch_source_type_s _dispatch_source_type_vm = {
+	.dst_kind       = "vm (deprecated)",
+	.dst_filter     = EVFILT_MEMORYSTATUS,
+	.dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH,
+	.dst_mask       = NOTE_VM_PRESSURE,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_source_vm_create,
+	// redirected to _dispatch_source_type_memorypressure
+};
+#endif // DISPATCH_USE_MEMORYSTATUS
+
+#pragma mark mach send / notifications
+#if HAVE_MACH
+
+// Flags for all notifications that are registered/unregistered when a
+// send-possible notification is requested/delivered
+#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \
+		DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED)
+
+static void _dispatch_mach_host_notify_update(void *context);
+
+static mach_port_t _dispatch_mach_notify_port;
+static dispatch_source_t _dispatch_mach_notify_source;
+
+static void
+_dispatch_timers_calendar_change(void)
+{
+	uint32_t qos;
+
+	// calendar change may have gone past the wallclock deadline
+	_dispatch_timers_expired = true;
+	for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) {
+		_dispatch_timers_processing_mask |=
+				1 << DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, qos);
+	}
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr)
+{
+	mig_reply_error_t reply;
+	dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union
+		__ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem));
+	dispatch_assert(sizeof(mig_reply_error_t) <
+			DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE);
+	boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head);
+	if (!success && reply.RetCode == MIG_BAD_ID &&
+			(hdr->msgh_id == HOST_CALENDAR_SET_REPLYID ||
+			 hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) {
+		_dispatch_debug("calendar-change notification");
+		_dispatch_timers_calendar_change();
+		_dispatch_mach_host_notify_update(NULL);
+		success = TRUE;
+		reply.RetCode = KERN_SUCCESS;
+	}
+	if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) {
+		(void)dispatch_assume_zero(reply.RetCode);
+	}
+	if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) {
+		mach_msg_destroy(hdr);
+	}
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED)
+{
+	kern_return_t kr;
+#if HAVE_MACH_PORT_CONSTRUCT
+	mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT };
+#ifdef __LP64__
+	const mach_port_context_t guard = 0xfeed09071f1ca7edull;
+#else
+	const mach_port_context_t guard = 0xff1ca7edull;
+#endif
+	kr = mach_port_construct(mach_task_self(), &opts, guard,
+			&_dispatch_mach_notify_port);
+#else
+	kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
+			&_dispatch_mach_notify_port);
+#endif
+	DISPATCH_VERIFY_MIG(kr);
+	if (unlikely(kr)) {
+		DISPATCH_CLIENT_CRASH(kr,
+				"mach_port_construct() failed: cannot create receive right");
+	}
+
+	static const struct dispatch_continuation_s dc = {
+		.dc_func = (void*)_dispatch_mach_notify_source_invoke,
+	};
+	_dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv(
+			_dispatch_mach_notify_port, &dc);
+	dispatch_assert(_dispatch_mach_notify_source);
+	dispatch_activate(_dispatch_mach_notify_source);
+}
+
+static void
+_dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED)
+{
+	kern_return_t kr;
+	mach_port_t mp, mhp = mach_host_self();
+	kr = host_get_host_port(mhp, &mp);
+	DISPATCH_VERIFY_MIG(kr);
+	if (likely(!kr)) {
+		// mach_host_self returned the HOST_PRIV port
+		kr = mach_port_deallocate(mach_task_self(), mhp);
+		DISPATCH_VERIFY_MIG(kr);
+		mhp = mp;
+	} else if (kr != KERN_INVALID_ARGUMENT) {
+		(void)dispatch_assume_zero(kr);
+	}
+	if (unlikely(!mhp)) {
+		DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port");
+	}
+	_dispatch_mach_host_port = mhp;
+}
+
+mach_port_t
+_dispatch_get_mach_host_port(void)
+{
+	dispatch_once_f(&_dispatch_mach_host_port_pred, NULL,
+			_dispatch_mach_host_port_init);
+	return _dispatch_mach_host_port;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_port_t
+_dispatch_get_mach_notify_port(void)
+{
+	static dispatch_once_t pred;
+	dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init);
+	return _dispatch_mach_notify_port;
+}
+
+static void
+_dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED)
+{
+	static int notify_type = HOST_NOTIFY_CALENDAR_SET;
+	kern_return_t kr;
+	_dispatch_debug("registering for calendar-change notification");
+retry:
+	kr = host_request_notification(_dispatch_get_mach_host_port(),
+			notify_type, _dispatch_get_mach_notify_port());
+	// Fallback when missing support for newer _SET variant, fires strictly more
+	if (kr == KERN_INVALID_ARGUMENT &&
+			notify_type != HOST_NOTIFY_CALENDAR_CHANGE) {
+		notify_type = HOST_NOTIFY_CALENDAR_CHANGE;
+		goto retry;
+	}
+	DISPATCH_VERIFY_MIG(kr);
+	(void)dispatch_assume_zero(kr);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_host_calendar_change_register(void)
+{
+	static dispatch_once_t pred;
+	dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update);
+}
+
+static kern_return_t
+_dispatch_mach_notify_update(dispatch_muxnote_t dmn, uint32_t new_flags,
+		uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid,
+		mach_port_mscount_t notify_sync)
+{
+	mach_port_t previous, port = (mach_port_t)dmn->dmn_kev.ident;
+	typeof(dmn->dmn_kev.data) prev = dmn->dmn_kev.data;
+	kern_return_t kr, krr = 0;
+
+	// Update notification registration state.
+	dmn->dmn_kev.data |= (new_flags | dmn->dmn_kev.fflags) & mask;
+	dmn->dmn_kev.data &= ~(del_flags & mask);
+
+	_dispatch_debug_machport(port);
+	if ((dmn->dmn_kev.data & mask) && !(prev & mask)) {
+		_dispatch_debug("machport[0x%08x]: registering for send-possible "
+				"notification", port);
+		previous = MACH_PORT_NULL;
+		krr = mach_port_request_notification(mach_task_self(), port,
+				notify_msgid, notify_sync, _dispatch_get_mach_notify_port(),
+				MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
+		DISPATCH_VERIFY_MIG(krr);
+
+		switch (krr) {
+		case KERN_INVALID_NAME:
+		case KERN_INVALID_RIGHT:
+			// Suppress errors & clear registration state
+			dmn->dmn_kev.data &= ~mask;
+			break;
+		default:
+			// Else, we don't expect any errors from mach. Log any errors
+			if (dispatch_assume_zero(krr)) {
+				// log the error & clear registration state
+				dmn->dmn_kev.data &= ~mask;
+			} else if (dispatch_assume_zero(previous)) {
+				// Another subsystem has beat libdispatch to requesting the
+				// specified Mach notification on this port. We should
+				// technically cache the previous port and message it when the
+				// kernel messages our port. Or we can just say screw those
+				// subsystems and deallocate the previous port.
+				// They should adopt libdispatch :-P
+				kr = mach_port_deallocate(mach_task_self(), previous);
+				DISPATCH_VERIFY_MIG(kr);
+				(void)dispatch_assume_zero(kr);
+				previous = MACH_PORT_NULL;
+			}
+		}
+	} else if (!(dmn->dmn_kev.data & mask) && (prev & mask)) {
+		_dispatch_debug("machport[0x%08x]: unregistering for send-possible "
+				"notification", port);
+		previous = MACH_PORT_NULL;
+		kr = mach_port_request_notification(mach_task_self(), port,
+				notify_msgid, notify_sync, MACH_PORT_NULL,
+				MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous);
+		DISPATCH_VERIFY_MIG(kr);
+
+		switch (kr) {
+		case KERN_INVALID_NAME:
+		case KERN_INVALID_RIGHT:
+		case KERN_INVALID_ARGUMENT:
+			break;
+		default:
+			if (dispatch_assume_zero(kr)) {
+				// log the error
+			}
+		}
+	} else {
+		return 0;
+	}
+	if (unlikely(previous)) {
+		// the kernel has not consumed the send-once right yet
+		(void)dispatch_assume_zero(
+				_dispatch_send_consume_send_once_right(previous));
+	}
+	return krr;
+}
+
+static bool
+_dispatch_kevent_mach_notify_resume(dispatch_muxnote_t dmn, uint32_t new_flags,
+		uint32_t del_flags)
+{
+	kern_return_t kr = KERN_SUCCESS;
+	dispatch_assert_zero(new_flags & del_flags);
+	if ((new_flags & _DISPATCH_MACH_SP_FLAGS) ||
+			(del_flags & _DISPATCH_MACH_SP_FLAGS)) {
+		// Requesting a (delayed) non-sync send-possible notification
+		// registers for both immediate dead-name notification and delayed-arm
+		// send-possible notification for the port.
+		// The send-possible notification is armed when a mach_msg() with the
+		// the MACH_SEND_NOTIFY to the port times out.
+		// If send-possible is unavailable, fall back to immediate dead-name
+		// registration rdar://problem/2527840&9008724
+		kr = _dispatch_mach_notify_update(dmn, new_flags, del_flags,
+				_DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE,
+				MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME);
+	}
+	return kr == KERN_SUCCESS;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_notify_merge(mach_port_t name, uint32_t data, bool final)
+{
+	dispatch_unote_linkage_t dul, dul_next;
+	dispatch_muxnote_t dmn;
+
+	_dispatch_debug_machport(name);
+	dmn = _dispatch_mach_muxnote_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION);
+	if (!dmn) {
+		return;
+	}
+
+	dmn->dmn_kev.data &= ~_DISPATCH_MACH_SP_FLAGS;
+	if (!final) {
+		// Re-register for notification before delivery
+		final = !_dispatch_kevent_mach_notify_resume(dmn, data, 0);
+	}
+
+	uint32_t flags = final ? EV_ONESHOT : EV_ENABLE;
+	DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = 0;
+	TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) {
+		dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
+		os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed);
+		dux_merge_evt(du._du, flags, (data & du._du->du_fflags), 0);
+		if (!dul_next || DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev)) {
+			// current merge is last in list (dmn might have been freed)
+			// or it re-armed the notification
+			break;
+		}
+	}
+}
+
+kern_return_t
+_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED,
+		mach_port_name_t name)
+{
+#if DISPATCH_DEBUG
+	_dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x "
+			"deleted prematurely", name);
+#endif
+	_dispatch_debug_machport(name);
+	_dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true);
+	return KERN_SUCCESS;
+}
+
+kern_return_t
+_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED,
+		mach_port_name_t name)
+{
+	kern_return_t kr;
+
+	_dispatch_debug("machport[0x%08x]: dead-name notification", name);
+	_dispatch_debug_machport(name);
+	_dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true);
+
+	// the act of receiving a dead name notification allocates a dead-name
+	// right that must be deallocated
+	kr = mach_port_deallocate(mach_task_self(), name);
+	DISPATCH_VERIFY_MIG(kr);
+	//(void)dispatch_assume_zero(kr);
+	return KERN_SUCCESS;
+}
+
+kern_return_t
+_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED,
+		mach_port_name_t name)
+{
+	_dispatch_debug("machport[0x%08x]: send-possible notification", name);
+	_dispatch_debug_machport(name);
+	_dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false);
+	return KERN_SUCCESS;
+}
+
+void
+_dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr)
+{
+	dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(dmsr)->du_muxnote;
+	dispatch_unote_linkage_t dul;
+	dispatch_unote_t du;
+
+	if (!dmsr->du_registered) {
+		return;
+	}
+
+	DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = true;
+	TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) {
+		du = _dispatch_unote_linkage_get_unote(dul);
+		os_atomic_store2o(du._dmsr, dmsr_notification_armed, true, relaxed);
+	}
+}
+
+static dispatch_unote_t
+_dispatch_source_mach_send_create(dispatch_source_type_t dst,
+	uintptr_t handle, unsigned long mask)
+{
+	if (!mask) {
+		// Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD
+		mask = DISPATCH_MACH_SEND_DEAD;
+	}
+	if (!handle) {
+		handle = MACH_PORT_DEAD; // <rdar://problem/27651332>
+	}
+	return _dispatch_unote_create_with_handle(dst, handle, mask);
+}
+
+static bool
+_dispatch_mach_send_update(dispatch_muxnote_t dmn)
+{
+	if (dmn->dmn_kev.flags & EV_DELETE) {
+		return _dispatch_kevent_mach_notify_resume(dmn, 0, dmn->dmn_kev.fflags);
+	} else {
+		return _dispatch_kevent_mach_notify_resume(dmn, dmn->dmn_kev.fflags, 0);
+	}
+}
+
+const dispatch_source_type_s _dispatch_source_type_mach_send = {
+	.dst_kind       = "mach_send",
+	.dst_filter     = DISPATCH_EVFILT_MACH_NOTIFICATION,
+	.dst_flags      = EV_CLEAR,
+	.dst_mask       = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_source_mach_send_create,
+	.dst_update_mux = _dispatch_mach_send_update,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+};
+
+static dispatch_unote_t
+_dispatch_mach_send_create(dispatch_source_type_t dst,
+	uintptr_t handle, unsigned long mask)
+{
+	// without handle because the mach code will set the ident later
+	dispatch_unote_t du =
+			_dispatch_unote_create_without_handle(dst, handle, mask);
+	if (du._dmsr) {
+		du._dmsr->dmsr_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED;
+		TAILQ_INIT(&du._dmsr->dmsr_replies);
+	}
+	return du;
+}
+
+const dispatch_source_type_s _dispatch_mach_type_send = {
+	.dst_kind       = "mach_send (mach)",
+	.dst_filter     = DISPATCH_EVFILT_MACH_NOTIFICATION,
+	.dst_flags      = EV_CLEAR,
+	.dst_mask       = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE,
+	.dst_size       = sizeof(struct dispatch_mach_send_refs_s),
+
+	.dst_create     = _dispatch_mach_send_create,
+	.dst_update_mux = _dispatch_mach_send_update,
+	.dst_merge_evt  = _dispatch_mach_merge_notification,
+};
+
+#endif // HAVE_MACH
+#pragma mark mach recv / reply
+#if HAVE_MACH
+
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+static mach_port_t _dispatch_mach_portset,  _dispatch_mach_recv_portset;
+static dispatch_kevent_s _dispatch_mach_recv_kevent;
+
+static void
+_dispatch_mach_portset_init(void *context DISPATCH_UNUSED)
+{
+	kern_return_t kr = mach_port_allocate(mach_task_self(),
+			MACH_PORT_RIGHT_PORT_SET, &_dispatch_mach_portset);
+	DISPATCH_VERIFY_MIG(kr);
+	if (unlikely(kr)) {
+		DISPATCH_CLIENT_CRASH(kr,
+				"mach_port_allocate() failed: cannot create port set");
+	}
+
+	dispatch_kevent_s kev = {
+		.filter = EVFILT_MACHPORT,
+		.flags  = EV_ADD|EV_ENABLE,
+		.ident  = _dispatch_mach_portset,
+		.qos    = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG,
+	};
+	_dispatch_kq_deferred_update(&kev);
+}
+
+static bool
+_dispatch_mach_portset_update(mach_port_t mp, mach_port_t mps)
+{
+	kern_return_t kr;
+
+	_dispatch_debug_machport(mp);
+	kr = mach_port_move_member(mach_task_self(), mp, mps);
+	if (unlikely(kr)) {
+		DISPATCH_VERIFY_MIG(kr);
+		switch (kr) {
+		case KERN_INVALID_RIGHT:
+			if (mps) {
+				_dispatch_bug_mach_client("_dispatch_kevent_machport_enable: "
+						"mach_port_move_member() failed ", kr);
+				break;
+			}
+			//fall through
+		case KERN_INVALID_NAME:
+#if DISPATCH_DEBUG
+			_dispatch_log("Corruption: Mach receive right 0x%x destroyed "
+					"prematurely", mp);
+#endif
+			break;
+		default:
+			(void)dispatch_assume_zero(kr);
+			break;
+		}
+	}
+	if (mps) {
+		return kr == KERN_SUCCESS;
+	}
+	return true;
+}
+
+static mach_port_t
+_dispatch_mach_get_portset(void)
+{
+	static dispatch_once_t pred;
+	dispatch_once_f(&pred, NULL, _dispatch_mach_portset_init);
+	return _dispatch_mach_portset;
+}
+
+static bool
+_dispatch_mach_recv_update_portset_mux(dispatch_muxnote_t dmn)
+{
+	mach_port_t mp = (mach_port_t)dmn->dmn_kev.ident;
+	mach_port_t mps = MACH_PORT_NULL;
+	if (!(dmn->dmn_kev.flags & EV_DELETE)) {
+		mps = _dispatch_mach_get_portset();
+	}
+	return _dispatch_mach_portset_update(mp, mps);
+}
+
+static void
+_dispatch_mach_recv_msg_buf_init(dispatch_kevent_t ke)
+{
+	mach_vm_size_t vm_size = mach_vm_round_page(
+			DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE +
+			DISPATCH_MACH_TRAILER_SIZE);
+	mach_vm_address_t vm_addr = vm_page_size;
+	kern_return_t kr;
+
+	while (unlikely(kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size,
+			VM_FLAGS_ANYWHERE))) {
+		if (kr != KERN_NO_SPACE) {
+			DISPATCH_CLIENT_CRASH(kr,
+					"Could not allocate mach msg receive buffer");
+		}
+		_dispatch_temporary_resource_shortage();
+		vm_addr = vm_page_size;
+	}
+	ke->ext[0] = (uintptr_t)vm_addr;
+	ke->ext[1] = vm_size;
+}
+
+static void
+_dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED)
+{
+	kern_return_t kr = mach_port_allocate(mach_task_self(),
+			MACH_PORT_RIGHT_PORT_SET, &_dispatch_mach_recv_portset);
+	DISPATCH_VERIFY_MIG(kr);
+	if (unlikely(kr)) {
+		DISPATCH_CLIENT_CRASH(kr,
+				"mach_port_allocate() failed: cannot create port set");
+	}
+
+	dispatch_assert(DISPATCH_MACH_TRAILER_SIZE ==
+			REQUESTED_TRAILER_SIZE_NATIVE(MACH_RCV_TRAILER_ELEMENTS(
+			DISPATCH_MACH_RCV_TRAILER)));
+
+	_dispatch_mach_recv_kevent = (dispatch_kevent_s){
+		.filter = EVFILT_MACHPORT,
+		.ident  = _dispatch_mach_recv_portset,
+		.flags  = EV_ADD|EV_ENABLE|EV_DISPATCH,
+		.fflags = DISPATCH_MACH_RCV_OPTIONS,
+		.qos    = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG,
+	};
+	if (!_dispatch_kevent_workqueue_enabled) {
+		_dispatch_mach_recv_msg_buf_init(&_dispatch_mach_recv_kevent);
+	}
+	_dispatch_kq_deferred_update(&_dispatch_mach_recv_kevent);
+}
+
+static mach_port_t
+_dispatch_mach_get_recv_portset(void)
+{
+	static dispatch_once_t pred;
+	dispatch_once_f(&pred, NULL, _dispatch_mach_recv_portset_init);
+	return _dispatch_mach_recv_portset;
+}
+
+static bool
+_dispatch_mach_recv_direct_update_portset_mux(dispatch_muxnote_t dmn)
+{
+	mach_port_t mp = (mach_port_t)dmn->dmn_kev.ident;
+	mach_port_t mps = MACH_PORT_NULL;
+	if (!(dmn->dmn_kev.flags & EV_DELETE)) {
+		mps = _dispatch_mach_get_recv_portset();
+	}
+	return _dispatch_mach_portset_update(mp, mps);
+}
+
+static dispatch_unote_t
+_dispatch_mach_kevent_mach_recv_direct_find(mach_port_t name)
+{
+	dispatch_muxnote_t dmn = _dispatch_mach_muxnote_find(name, EVFILT_MACHPORT);
+	dispatch_unote_linkage_t dul;
+
+	TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) {
+		dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
+		if (du._du->du_type->dst_fflags & MACH_RCV_MSG) {
+			return du;
+		}
+	}
+	return DISPATCH_UNOTE_NULL;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_kevent_portset_merge(dispatch_kevent_t ke)
+{
+	mach_port_t name = (mach_port_name_t)ke->data;
+	dispatch_muxnote_t dmn = _dispatch_mach_muxnote_find(name, EVFILT_MACHPORT);
+	dispatch_unote_linkage_t dul, dul_next;
+
+	_dispatch_debug_machport(name);
+	if (!dispatch_assume(dmn)) {
+		return;
+	}
+	_dispatch_mach_portset_update(name, MACH_PORT_NULL); // emulate EV_DISPATCH
+
+	TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) {
+		dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul);
+		dux_merge_evt(du._du, EV_ENABLE | EV_DISPATCH,
+				DISPATCH_MACH_RECV_MESSAGE, 0);
+	}
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_kevent_portset_drain(dispatch_kevent_t ke)
+{
+	if (ke->ident == _dispatch_mach_recv_portset) {
+		return _dispatch_kevent_mach_msg_drain(ke);
+	} else {
+		dispatch_assert(ke->ident == _dispatch_mach_portset);
+		return _dispatch_mach_kevent_portset_merge(ke);
+	}
+}
+#endif // DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+
+static void
+_dispatch_kevent_mach_msg_recv(dispatch_unote_t du, uint32_t flags,
+		mach_msg_header_t *hdr)
+{
+	mach_msg_size_t siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE;
+	mach_port_t name = hdr->msgh_local_port;
+
+	if (!dispatch_assume(hdr->msgh_size <= UINT_MAX -
+			DISPATCH_MACH_TRAILER_SIZE)) {
+		_dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
+				"received overlarge message");
+	} else if (!dispatch_assume(name)) {
+		_dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
+				"received message with MACH_PORT_NULL port");
+	} else {
+		_dispatch_debug_machport(name);
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+		if (du._du == NULL) {
+			du = _dispatch_mach_kevent_mach_recv_direct_find(name);
+		}
+#endif
+		if (likely(du._du)) {
+			return dux_merge_msg(du._du, flags, hdr, siz);
+		}
+		_dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
+				"received message with no listeners");
+	}
+
+	mach_msg_destroy(hdr);
+	if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
+		free(hdr);
+	}
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke)
+{
+	mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke);
+	mach_msg_size_t siz;
+	mach_msg_return_t kr = (mach_msg_return_t)ke->fflags;
+	uint32_t flags = ke->flags;
+	dispatch_unote_t du = _dispatch_kevent_get_unote(ke);
+
+	if (unlikely(!hdr)) {
+		DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message");
+	}
+	if (likely(!kr)) {
+		_dispatch_kevent_mach_msg_recv(du, flags, hdr);
+		goto out;
+	} else if (kr != MACH_RCV_TOO_LARGE) {
+		goto out;
+	} else if (!ke->data) {
+		DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity");
+	}
+	if (unlikely(ke->ext[1] > (UINT_MAX - DISPATCH_MACH_TRAILER_SIZE))) {
+		DISPATCH_INTERNAL_CRASH(ke->ext[1],
+				"EVFILT_MACHPORT with overlarge message");
+	}
+	siz = _dispatch_kevent_mach_msg_size(ke) + DISPATCH_MACH_TRAILER_SIZE;
+	hdr = malloc(siz);
+	if (dispatch_assume(hdr)) {
+		flags |= DISPATCH_EV_MSG_NEEDS_FREE;
+	} else {
+		// Kernel will discard message too large to fit
+		hdr = NULL;
+		siz = 0;
+	}
+	mach_port_t name = (mach_port_name_t)ke->data;
+	const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS |
+			MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE);
+	kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE,
+			MACH_PORT_NULL);
+	if (likely(!kr)) {
+		_dispatch_kevent_mach_msg_recv(du, flags, hdr);
+		goto out;
+	} else if (kr == MACH_RCV_TOO_LARGE) {
+		_dispatch_log("BUG in libdispatch client: "
+				"_dispatch_kevent_mach_msg_drain: dropped message too "
+				"large to fit in memory: id = 0x%x, size = %u",
+				hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke));
+		kr = MACH_MSG_SUCCESS;
+	}
+	if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
+		free(hdr);
+	}
+out:
+	if (unlikely(kr)) {
+		_dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: "
+				"message reception failed", kr);
+	}
+
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+	if (!(flags & EV_UDATA_SPECIFIC)) {
+		_dispatch_kq_deferred_update(&_dispatch_mach_recv_kevent);
+	}
+#endif
+}
+
+static dispatch_unote_t
+_dispatch_source_mach_recv_create(dispatch_source_type_t dst,
+		uintptr_t handle, unsigned long mask)
+{
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+	if (!_dispatch_evfilt_machport_direct_enabled) {
+		dst = &_dispatch_source_type_mach_recv_pset;
+	}
+#endif
+	return _dispatch_unote_create_with_handle(dst, handle, mask);
+}
+
+const dispatch_source_type_s _dispatch_source_type_mach_recv = {
+	.dst_kind       = "mach_recv",
+	.dst_filter     = EVFILT_MACHPORT,
+	.dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
+	.dst_fflags     = 0,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_source_mach_recv_create,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+	.dst_merge_msg  = NULL, // never receives messages directly
+};
+
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+const dispatch_source_type_s _dispatch_source_type_mach_recv_pset = {
+	.dst_kind       = "mach_recv (portset)",
+	.dst_filter     = EVFILT_MACHPORT,
+	.dst_flags      = EV_DISPATCH,
+	.dst_fflags     = 0,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = NULL, // never created directly
+	.dst_update_mux = _dispatch_mach_recv_update_portset_mux,
+	.dst_merge_evt  = _dispatch_source_merge_evt,
+	.dst_merge_msg  = NULL, // never receives messages directly
+};
+#endif
+
+static void
+_dispatch_source_mach_recv_direct_merge_msg(dispatch_unote_t du, uint32_t flags,
+		mach_msg_header_t *msg, mach_msg_size_t msgsz DISPATCH_UNUSED)
+{
+	dispatch_continuation_t dc = du._dr->ds_handler[DS_EVENT_HANDLER];
+	dispatch_source_t ds = _dispatch_source_from_refs(du._dr);
+	dispatch_queue_t cq = _dispatch_queue_get_current();
+
+	// see firehose_client_push_notify_async
+	_dispatch_queue_set_current(ds->_as_dq);
+	dc->dc_func(msg);
+	_dispatch_queue_set_current(cq);
+	if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
+		free(msg);
+	}
+	if ((ds->dq_atomic_flags & DSF_CANCELED) ||
+			(flags & (EV_ONESHOT | EV_DELETE))) {
+		return _dispatch_source_merge_evt(du, flags, 0, 0);
+	}
+	if (du._du->du_needs_rearm) {
+		return _dispatch_unote_resume(du);
+	}
+}
+
+static dispatch_unote_t
+_dispatch_source_mach_recv_direct_create(dispatch_source_type_t dst,
+	uintptr_t handle, unsigned long mask)
+{
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+	if (!_dispatch_evfilt_machport_direct_enabled) {
+		dst = &_dispatch_source_type_mach_recv_direct_pset;
+	}
+#endif
+	return _dispatch_unote_create_with_handle(dst, handle, mask);
+}
+
+static void
+_dispatch_mach_recv_direct_merge(dispatch_unote_t du,
+		uint32_t flags, uintptr_t data, pthread_priority_t pp)
+{
+	if (flags & EV_VANISHED) {
+		DISPATCH_CLIENT_CRASH(du._du->du_ident,
+				"Unexpected EV_VANISHED (do not destroy random mach ports)");
+	}
+	return _dispatch_source_merge_evt(du, flags, data, pp);
+}
+
+const dispatch_source_type_s _dispatch_source_type_mach_recv_direct = {
+	.dst_kind       = "direct mach_recv",
+	.dst_filter     = EVFILT_MACHPORT,
+	.dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
+	.dst_fflags     = DISPATCH_MACH_RCV_OPTIONS,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = _dispatch_source_mach_recv_direct_create,
+	.dst_merge_evt  = _dispatch_mach_recv_direct_merge,
+	.dst_merge_msg  = _dispatch_source_mach_recv_direct_merge_msg,
+};
+
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+const dispatch_source_type_s _dispatch_source_type_mach_recv_direct_pset = {
+	.dst_kind       = "direct mach_recv (portset)",
+	.dst_filter     = EVFILT_MACHPORT,
+	.dst_flags      = 0,
+	.dst_fflags     = DISPATCH_MACH_RCV_OPTIONS,
+	.dst_size       = sizeof(struct dispatch_source_refs_s),
+
+	.dst_create     = NULL, // never created directly
+	.dst_update_mux = _dispatch_mach_recv_direct_update_portset_mux,
+	.dst_merge_evt  = _dispatch_mach_recv_direct_merge,
+	.dst_merge_msg  = _dispatch_source_mach_recv_direct_merge_msg,
+};
+#endif
+
+static dispatch_unote_t
+_dispatch_mach_recv_create(dispatch_source_type_t dst,
+	uintptr_t handle, unsigned long mask)
+{
+	// mach channels pass MACH_PORT_NULL until connect
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+	if (!_dispatch_evfilt_machport_direct_enabled) {
+		dst = &_dispatch_mach_type_recv_pset;
+	}
+#endif
+	// without handle because the mach code will set the ident later
+	return _dispatch_unote_create_without_handle(dst, handle, mask);
+}
+
+const dispatch_source_type_s _dispatch_mach_type_recv = {
+	.dst_kind       = "mach_recv (channel)",
+	.dst_filter     = EVFILT_MACHPORT,
+	.dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
+	.dst_fflags     = DISPATCH_MACH_RCV_OPTIONS,
+	.dst_size       = sizeof(struct dispatch_mach_recv_refs_s),
+
+	.dst_create     = _dispatch_mach_recv_create,
+	.dst_merge_evt  = _dispatch_mach_recv_direct_merge,
+	.dst_merge_msg  = _dispatch_mach_merge_msg,
+};
+
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+const dispatch_source_type_s _dispatch_mach_type_recv_pset = {
+	.dst_kind       = "mach_recv (channel, portset)",
+	.dst_filter     = EVFILT_MACHPORT,
+	.dst_flags      = 0,
+	.dst_fflags     = DISPATCH_MACH_RCV_OPTIONS,
+	.dst_size       = sizeof(struct dispatch_mach_recv_refs_s),
+
+	.dst_create     = NULL, // never created directly
+	.dst_update_mux = _dispatch_mach_recv_direct_update_portset_mux,
+	.dst_merge_evt  = _dispatch_mach_recv_direct_merge,
+	.dst_merge_msg  = _dispatch_mach_merge_msg,
+};
+#endif
+
+static dispatch_unote_t
+_dispatch_mach_reply_create(dispatch_source_type_t dst,
+	uintptr_t handle, unsigned long mask)
+{
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+	if (!_dispatch_evfilt_machport_direct_enabled) {
+		dst = &_dispatch_mach_type_reply_pset;
+	}
+#endif
+	return _dispatch_unote_create_with_handle(dst, handle, mask);
+}
+
+DISPATCH_NORETURN
+static void
+_dispatch_mach_reply_merge_evt(dispatch_unote_t du,
+		uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED,
+		pthread_priority_t pp DISPATCH_UNUSED)
+{
+	DISPATCH_INTERNAL_CRASH(du._du->du_ident, "Unexpected event");
+}
+
+const dispatch_source_type_s _dispatch_mach_type_reply = {
+	.dst_kind       = "mach reply",
+	.dst_filter     = EVFILT_MACHPORT,
+	.dst_flags      = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_ONESHOT|EV_VANISHED,
+	.dst_fflags     = DISPATCH_MACH_RCV_OPTIONS,
+	.dst_size       = sizeof(struct dispatch_mach_reply_refs_s),
+
+	.dst_create     = _dispatch_mach_reply_create,
+	.dst_merge_evt  = _dispatch_mach_reply_merge_evt,
+	.dst_merge_msg  = _dispatch_mach_reply_merge_msg,
+};
+
+#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
+const dispatch_source_type_s _dispatch_mach_type_reply_pset = {
+	.dst_kind       = "mach reply (portset)",
+	.dst_filter     = EVFILT_MACHPORT,
+	.dst_flags      = EV_ONESHOT,
+	.dst_fflags     = DISPATCH_MACH_RCV_OPTIONS,
+	.dst_size       = sizeof(struct dispatch_mach_reply_refs_s),
+
+	.dst_create     = NULL, // never created directly
+	.dst_update_mux = _dispatch_mach_recv_direct_update_portset_mux,
+	.dst_merge_evt  = _dispatch_mach_reply_merge_evt,
+	.dst_merge_msg  = _dispatch_mach_reply_merge_msg,
+};
+#endif
+
+#pragma mark Mach channel SIGTERM notification (for XPC channels only)
+
+const dispatch_source_type_s _dispatch_xpc_type_sigterm = {
+	.dst_kind       = "sigterm (xpc)",
+	.dst_filter     = EVFILT_SIGNAL,
+	.dst_flags      = DISPATCH_EV_DIRECT|EV_CLEAR|EV_ONESHOT,
+	.dst_fflags     = 0,
+	.dst_size       = sizeof(struct dispatch_xpc_term_refs_s),
+
+	.dst_create     = _dispatch_unote_create_with_handle,
+	.dst_merge_evt  = _dispatch_xpc_sigterm_merge,
+};
+
+#endif // HAVE_MACH
+
+#endif // DISPATCH_EVENT_BACKEND_KEVENT
diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c
index 1305bde..c69503d 100644
--- a/src/firehose/firehose_buffer.c
+++ b/src/firehose/firehose_buffer.c
@@ -49,10 +49,13 @@
 #define dispatch_hardware_pause() __asm__("")
 #endif
 
-#define _dispatch_wait_until(c) do { \
-		while (!fastpath(c)) { \
+#define _dispatch_wait_until(c) ({ \
+		typeof(c) _c; \
+		for (;;) { \
+			if (likely(_c = (c))) break; \
 			dispatch_hardware_pause(); \
-		} } while (0)
+		} \
+		_c; })
 #define dispatch_compiler_barrier()  __asm__ __volatile__("" ::: "memory")
 
 typedef uint32_t dispatch_lock;
@@ -71,9 +74,10 @@
 #include <sys/param.h>
 #include <sys/types.h>
 #include <vm/vm_kern.h>
+#include <internal/atomic.h> // os/internal/atomic.h
 #include <firehose_types_private.h> // <firehose/firehose_types_private.h>
 #include <tracepoint_private.h> // <firehose/tracepoint_private.h>
-#include <internal/atomic.h> // os/internal/atomic.h
+#include <chunk_private.h> // <firehose/chunk_private.h>
 #include "os/firehose_buffer_private.h"
 #include "firehose_buffer_internal.h"
 #include "firehose_inline_internal.h"
@@ -93,14 +97,11 @@
 		offsetof(firehose_stream_state_u, fss_allocator),
 		"fss_gate and fss_allocator alias");
 _Static_assert(sizeof(struct firehose_buffer_header_s) ==
-				FIREHOSE_BUFFER_CHUNK_SIZE,
+				FIREHOSE_CHUNK_SIZE,
 		"firehose buffer header must be 4k");
 _Static_assert(offsetof(struct firehose_buffer_header_s, fbh_unused) <=
-				FIREHOSE_BUFFER_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE,
+				FIREHOSE_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE,
 		"we must have enough space for the libtrace header");
-_Static_assert(sizeof(struct firehose_buffer_chunk_s) ==
-				FIREHOSE_BUFFER_CHUNK_SIZE,
-		"firehose buffer chunks must be 4k");
 _Static_assert(powerof2(FIREHOSE_BUFFER_CHUNK_COUNT),
 		"CHUNK_COUNT Must be a power of two");
 _Static_assert(FIREHOSE_BUFFER_CHUNK_COUNT <= 64,
@@ -109,14 +110,8 @@
 _Static_assert(powerof2(FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT),
 		"madvise chunk count must be a power of two");
 #endif
-_Static_assert(howmany(sizeof(struct firehose_tracepoint_s),
-		sizeof(struct firehose_buffer_chunk_s)) < 255,
-		"refcount assumes that you cannot have more than 255 tracepoints");
-// FIXME: we should have an event-count instead here
 _Static_assert(sizeof(struct firehose_buffer_stream_s) == 128,
 		"firehose buffer stream must be small (single cacheline if possible)");
-_Static_assert(offsetof(struct firehose_buffer_chunk_s, fbc_data) % 8 == 0,
-		"Page header is 8 byte aligned");
 _Static_assert(sizeof(struct firehose_tracepoint_s) == 24,
 		"tracepoint header should be exactly 24 bytes");
 #endif
@@ -177,21 +172,19 @@
 	uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_TEMPOWNER | MPO_INSERT_SEND_RIGHT;
 	sendp = firehose_mach_port_allocate(opts, fb);
 
-	if (oldsendp && _voucher_libtrace_hooks->vah_version >= 3) {
-		if (_voucher_libtrace_hooks->vah_get_reconnect_info) {
-			kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size);
-			if (likely(kr == KERN_SUCCESS) && addr && size) {
-				extra_info_size = size;
-				kr = mach_make_memory_entry_64(mach_task_self(), &size, addr,
-						flags, &extra_info_port, MACH_PORT_NULL);
-				if (unlikely(kr)) {
-					// the client probably has some form of memory corruption
-					// and/or a port leak
-					DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port");
-				}
-				kr = mach_vm_deallocate(mach_task_self(), addr, size);
-				(void)dispatch_assume_zero(kr);
+	if (oldsendp && _voucher_libtrace_hooks->vah_get_reconnect_info) {
+		kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size);
+		if (likely(kr == KERN_SUCCESS) && addr && size) {
+			extra_info_size = size;
+			kr = mach_make_memory_entry_64(mach_task_self(), &size, addr,
+					flags, &extra_info_port, MACH_PORT_NULL);
+			if (unlikely(kr)) {
+				// the client probably has some form of memory corruption
+				// and/or a port leak
+				DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port");
 			}
+			kr = mach_vm_deallocate(mach_task_self(), addr, size);
+			(void)dispatch_assume_zero(kr);
 		}
 	}
 
@@ -261,7 +254,7 @@
 		}
 	}
 
-	uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_BUFFER_CHUNK_SIZE);
+	uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_CHUNK_SIZE);
 	if (ratio > 1) {
 		total = roundup(total, ratio);
 	}
@@ -299,7 +292,7 @@
 
 	vm_addr = vm_page_size;
 	const size_t madvise_bytes = FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT *
-			FIREHOSE_BUFFER_CHUNK_SIZE;
+			FIREHOSE_CHUNK_SIZE;
 	if (slowpath(madvise_bytes % PAGE_SIZE)) {
 		DISPATCH_INTERNAL_CRASH(madvise_bytes,
 				"Invalid values for MADVISE_CHUNK_COUNT / CHUNK_SIZE");
@@ -320,7 +313,7 @@
 	vm_offset_t vm_addr = 0;
 	vm_size_t size;
 
-	size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE;
+	size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE;
 	__firehose_allocate(&vm_addr, size);
 
 	(void)logd_port; (void)unique_pid;
@@ -487,12 +480,7 @@
 		return;
 	}
 
-	bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) |
-			((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1));
-	state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header,
-			fbh_bank.fbb_state.fbs_atomic_state, bank_updates, relaxed);
-	if (state_out) *state_out = state;
-
+	__firehose_critical_region_enter();
 	os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail,
 			otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, {
 		ntail = otail;
@@ -500,6 +488,15 @@
 		ntail.frp_io_flushed += io_delta;
 		ntail.frp_mem_flushed += mem_delta;
 	});
+
+	bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) |
+			((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1));
+	state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header,
+			fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release);
+	__firehose_critical_region_leave();
+
+	if (state_out) *state_out = state;
+
 	if (async_notif) {
 		if (io_delta) {
 			os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_notifs, relaxed);
@@ -611,18 +608,18 @@
 
 OS_ALWAYS_INLINE
 static inline firehose_tracepoint_t
-firehose_buffer_chunk_init(firehose_buffer_chunk_t fbc,
+firehose_buffer_chunk_init(firehose_chunk_t fc,
 		firehose_tracepoint_query_t ask, uint8_t **privptr)
 {
 	const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
 
-	uint16_t pub_offs = offsetof(struct firehose_buffer_chunk_s, fbc_data);
-	uint16_t priv_offs = FIREHOSE_BUFFER_CHUNK_SIZE;
+	uint16_t pub_offs = offsetof(struct firehose_chunk_s, fc_data);
+	uint16_t priv_offs = FIREHOSE_CHUNK_SIZE;
 
 	pub_offs += roundup(ft_size + ask->pubsize, 8);
 	priv_offs -= ask->privsize;
 
-	if (fbc->fbc_pos.fbc_atomic_pos) {
+	if (fc->fc_pos.fcp_atomic_pos) {
 		// Needed for process death handling (recycle-reuse):
 		// No atomic fences required, we merely want to make sure the observers
 		// will see memory effects in program (asm) order.
@@ -632,32 +629,32 @@
 		// and it is dirty, when crawling the chunk, we don't see remnants of
 		// other tracepoints
 		//
-		// We only do that when the fbc_pos is non zero, because zero means
+		// We only do that when the fc_pos is non zero, because zero means
 		// we just faulted the chunk, and the kernel already bzero-ed it.
-		bzero(fbc->fbc_data, sizeof(fbc->fbc_data));
+		bzero(fc->fc_data, sizeof(fc->fc_data));
 	}
 	dispatch_compiler_barrier();
 	// <rdar://problem/23562733> boot starts mach absolute time at 0, and
 	// wrapping around to values above UINT64_MAX - FIREHOSE_STAMP_SLOP
 	// breaks firehose_buffer_stream_flush() assumptions
 	if (ask->stamp > FIREHOSE_STAMP_SLOP) {
-		fbc->fbc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP;
+		fc->fc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP;
 	} else {
-		fbc->fbc_timestamp = 0;
+		fc->fc_timestamp = 0;
 	}
-	fbc->fbc_pos = (firehose_buffer_pos_u){
-		.fbc_next_entry_offs = pub_offs,
-		.fbc_private_offs = priv_offs,
-		.fbc_refcnt = 1,
-		.fbc_qos_bits = firehose_buffer_qos_bits_propagate(),
-		.fbc_stream = ask->stream,
-		.fbc_flag_io = ask->for_io,
+	fc->fc_pos = (firehose_chunk_pos_u){
+		.fcp_next_entry_offs = pub_offs,
+		.fcp_private_offs = priv_offs,
+		.fcp_refcnt = 1,
+		.fcp_qos = firehose_buffer_qos_bits_propagate(),
+		.fcp_stream = ask->stream,
+		.fcp_flag_io = ask->for_io,
 	};
 
 	if (privptr) {
-		*privptr = fbc->fbc_start + priv_offs;
+		*privptr = fc->fc_start + priv_offs;
 	}
-	return (firehose_tracepoint_t)fbc->fbc_data;
+	return (firehose_tracepoint_t)fc->fc_data;
 }
 
 OS_NOINLINE
@@ -671,14 +668,18 @@
 	uint64_t stamp_and_len;
 
 	if (fastpath(ref)) {
-		firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
-		ft = firehose_buffer_chunk_init(fbc, ask, privptr);
+		firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref);
+		ft = firehose_buffer_chunk_init(fc, ask, privptr);
 		// Needed for process death handling (tracepoint-begin):
 		// write the length before making the chunk visible
-		stamp_and_len  = ask->stamp - fbc->fbc_timestamp;
+		stamp_and_len  = ask->stamp - fc->fc_timestamp;
 		stamp_and_len |= (uint64_t)ask->pubsize << 48;
 		os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed);
-
+#ifdef KERNEL
+		ft->ft_thread = thread_tid(current_thread());
+#else
+		ft->ft_thread = _pthread_threadid_self_np_direct();
+#endif
 		if (ask->stream == firehose_stream_metadata) {
 			os_atomic_or2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap,
 					1ULL << ref, relaxed);
@@ -750,7 +751,7 @@
 firehose_buffer_ring_shrink(firehose_buffer_t fb, uint16_t ref)
 {
 	const size_t madv_size =
-			FIREHOSE_BUFFER_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT;
+			FIREHOSE_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT;
 	const size_t madv_mask =
 			(1ULL << FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT) - 1;
 
@@ -779,12 +780,12 @@
 void
 firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref)
 {
-	firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
+	firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref);
 	uint16_t volatile *fbh_ring;
 	uint16_t volatile *fbh_ring_head;
 	uint16_t head, gen, dummy, idx;
-	firehose_buffer_pos_u fbc_pos = fbc->fbc_pos;
-	bool for_io = fbc_pos.fbc_flag_io;
+	firehose_chunk_pos_u fc_pos = fc->fc_pos;
+	bool for_io = fc_pos.fcp_flag_io;
 
 	if (for_io) {
 		fbh_ring = fb->fb_header.fbh_io_ring;
@@ -871,7 +872,7 @@
 		}));
 	}
 
-	pthread_priority_t pp = fbc_pos.fbc_qos_bits;
+	pthread_priority_t pp = fc_pos.fcp_qos;
 	pp <<= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT;
 	firehose_client_send_push_async(fb, _pthread_qos_class_decode(pp, NULL, NULL),
 			for_io);
@@ -885,7 +886,7 @@
 	firehose_ring_tail_u pos, old;
 	uint16_t volatile *fbh_ring;
 	uint16_t gen, ref, entry, tail;
-	firehose_buffer_chunk_t fbc;
+	firehose_chunk_t fc;
 	bool for_io;
 
 	os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail,
@@ -923,14 +924,14 @@
 	// and it is dirty, it is a chunk being written to that needs a flush
 	gen = (entry & FIREHOSE_RING_POS_GEN_MASK) + FIREHOSE_RING_POS_GEN_INC;
 	ref = entry & FIREHOSE_RING_POS_IDX_MASK;
-	fbc = firehose_buffer_ref_to_chunk(fb, ref);
+	fc = firehose_buffer_ref_to_chunk(fb, ref);
 
-	if (!for_io && fbc->fbc_pos.fbc_stream == firehose_stream_metadata) {
+	if (!for_io && fc->fc_pos.fcp_stream == firehose_stream_metadata) {
 		os_atomic_and2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap,
 				~(1ULL << ref), relaxed);
 	}
-	os_atomic_store2o(fbc, fbc_pos.fbc_atomic_pos,
-			FIREHOSE_BUFFER_POS_FULL_BIT, relaxed);
+	os_atomic_store2o(fc, fc_pos.fcp_atomic_pos,
+			FIREHOSE_CHUNK_POS_FULL_BIT, relaxed);
 	dispatch_compiler_barrier();
 	os_atomic_store(&fbh_ring[tail], gen | 0, relaxed);
 	return ref;
@@ -1020,7 +1021,7 @@
 	uint64_t unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(for_io);
 #ifndef KERNEL
 	state.fbs_atomic_state = os_atomic_add_orig2o(fbb,
-			fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), relaxed);
+			fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), acquire);
 	if (fastpath(!(state.fbs_atomic_state & unavail_mask))) {
 		ask->is_bank_ok = true;
 		if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) {
@@ -1034,7 +1035,7 @@
 #else
 	firehose_bank_state_u value;
 	ask->is_bank_ok = os_atomic_rmw_loop2o(fbb, fbb_state.fbs_atomic_state,
-			state.fbs_atomic_state, value.fbs_atomic_state, relaxed, {
+			state.fbs_atomic_state, value.fbs_atomic_state, acquire, {
 		value = state;
 		if (slowpath((value.fbs_atomic_state & unavail_mask) != 0)) {
 			os_atomic_rmw_loop_give_up(break);
@@ -1067,32 +1068,6 @@
 			privsize, privptr);
 }
 
-firehose_tracepoint_t
-__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc,
-		uint64_t stamp, firehose_stream_t stream,
-		uint16_t pubsize, uint16_t privsize, uint8_t **privptr)
-{
-
-	firehose_tracepoint_t ft;
-	long result;
-
-	result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream,
-			  pubsize, privsize, privptr);
-	if (fastpath(result > 0)) {
-		ft = (firehose_tracepoint_t)(fbc->fbc_start + result);
-		stamp -= fbc->fbc_timestamp;
-		stamp |= (uint64_t)pubsize << 48;
-		// Needed for process death handling (tracepoint-begin)
-		// see firehose_buffer_stream_chunk_install
-		os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed);
-		dispatch_compiler_barrier();
-		return ft;
-	}
-	else {
-		return NULL;
-	}
-}
-
 firehose_buffer_t
 __firehose_buffer_create(size_t *size)
 {
@@ -1101,7 +1076,7 @@
 	}
 
 	if (size) {
-		*size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE;
+		*size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE;
 	}
 	return kernel_firehose_buffer;
 }
@@ -1114,27 +1089,6 @@
 }
 
 void
-__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc,
-		firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
-{
-	firehose_buffer_pos_u pos;
-
-	// Needed for process death handling (tracepoint-flush):
-	// We want to make sure the observers
-	// will see memory effects in program (asm) order.
-	// 1. write all the data to the tracepoint
-	// 2. write the tracepoint ID, so that seeing it means the tracepoint
-	//    is valid
-	ft->ft_thread = thread_tid(current_thread());
-
-	// release barrier makes the log writes visible
-	os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release);
-	pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos,
-			FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed);
-	return;
-}
-
-void
 __firehose_merge_updates(firehose_push_reply_t update)
 {
 	firehose_buffer_t fb = kernel_firehose_buffer;
diff --git a/src/firehose/firehose_buffer_internal.h b/src/firehose/firehose_buffer_internal.h
index db8e026..f93cf14 100644
--- a/src/firehose/firehose_buffer_internal.h
+++ b/src/firehose/firehose_buffer_internal.h
@@ -173,11 +173,11 @@
 	dispatch_unfair_lock_s			fbh_logd_lock;
 #endif
 	uint64_t						fbh_unused[0];
-} OS_ALIGNED(FIREHOSE_BUFFER_CHUNK_SIZE) *firehose_buffer_header_t;
+} OS_ALIGNED(FIREHOSE_CHUNK_SIZE) *firehose_buffer_header_t;
 
 union firehose_buffer_u {
 	struct firehose_buffer_header_s fb_header;
-	struct firehose_buffer_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT];
+	struct firehose_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT];
 };
 
 // used to let the compiler pack these values in 1 or 2 registers
diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h
index 9576882..4bd3e40 100644
--- a/src/firehose/firehose_inline_internal.h
+++ b/src/firehose/firehose_inline_internal.h
@@ -55,17 +55,11 @@
 	mach_port_options_t opts = {
 		.flags = flags,
 	};
-	kern_return_t kr;
-
-	for (;;) {
-		kr = mach_port_construct(mach_task_self(), &opts,
-				(mach_port_context_t)ctx, &port);
-		if (fastpath(kr == KERN_SUCCESS)) {
-			break;
-		}
+	kern_return_t kr = mach_port_construct(mach_task_self(), &opts,
+			(mach_port_context_t)ctx, &port);
+	if (unlikely(kr)) {
 		DISPATCH_VERIFY_MIG(kr);
-		dispatch_assume_zero(kr);
-		_dispatch_temporary_resource_shortage();
+		DISPATCH_CLIENT_CRASH(kr, "Unable to allocate mach port");
 	}
 	return port;
 }
@@ -142,36 +136,28 @@
 #pragma mark firehose buffer
 
 OS_ALWAYS_INLINE
-static inline firehose_buffer_chunk_t
+static inline firehose_chunk_t
 firehose_buffer_chunk_for_address(void *addr)
 {
-	uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_BUFFER_CHUNK_SIZE - 1);
-	return (firehose_buffer_chunk_t)chunk_addr;
+	uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_CHUNK_SIZE - 1);
+	return (firehose_chunk_t)chunk_addr;
 }
 
 OS_ALWAYS_INLINE
 static inline uint16_t
-firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_buffer_chunk_t fbc)
+firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_chunk_t fbc)
 {
 	return (uint16_t)(fbc - fb->fb_chunks);
 }
 
 OS_ALWAYS_INLINE
-static inline firehose_buffer_chunk_t
+static inline firehose_chunk_t
 firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref)
 {
 	return fb->fb_chunks + ref;
 }
 
 #ifndef FIREHOSE_SERVER
-
-OS_ALWAYS_INLINE
-static inline bool
-firehose_buffer_pos_fits(firehose_buffer_pos_u pos, uint16_t size)
-{
-	return pos.fbc_next_entry_offs + size <= pos.fbc_private_offs;
-}
-
 #if DISPATCH_PURE_C
 
 OS_ALWAYS_INLINE
@@ -189,83 +175,12 @@
 }
 
 OS_ALWAYS_INLINE
-static inline long
-firehose_buffer_chunk_try_reserve(firehose_buffer_chunk_t fbc, uint64_t stamp,
-		firehose_stream_t stream, uint16_t pubsize,
-		uint16_t privsize, uint8_t **privptr)
-{
-	const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
-	firehose_buffer_pos_u orig, pos;
-	uint8_t qos_bits = firehose_buffer_qos_bits_propagate();
-	bool reservation_failed, stamp_delta_fits;
-
-	stamp_delta_fits = ((stamp - fbc->fbc_timestamp) >> 48) == 0;
-
-	// no acquire barrier because the returned space is written to only
-	os_atomic_rmw_loop2o(fbc, fbc_pos.fbc_atomic_pos,
-			orig.fbc_atomic_pos, pos.fbc_atomic_pos, relaxed, {
-		if (unlikely(orig.fbc_atomic_pos == 0)) {
-			// we acquired a really really old reference, and we probably
-			// just faulted in a new page
-			// FIXME: if/when we hit this we should try to madvise it back FREE
-			os_atomic_rmw_loop_give_up(return 0);
-		}
-		if (unlikely(!FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(orig, stream))) {
-			// nothing to do if the chunk is full, or the stream doesn't match,
-			// in which case the thread probably:
-			// - loaded the chunk ref
-			// - been suspended a long while
-			// - read the chunk to find a very old thing
-			os_atomic_rmw_loop_give_up(return 0);
-		}
-		pos = orig;
-		pos.fbc_qos_bits |= qos_bits;
-		if (unlikely(!firehose_buffer_pos_fits(orig,
-				ft_size + pubsize + privsize) || !stamp_delta_fits)) {
-			pos.fbc_flag_full = true;
-			reservation_failed = true;
-		} else {
-			// using these *_INC macros is so that the compiler generates better
-			// assembly: using the struct individual fields forces the compiler
-			// to handle carry propagations, and we know it won't happen
-			pos.fbc_atomic_pos += roundup(ft_size + pubsize, 8) *
-					FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC;
-			pos.fbc_atomic_pos -= privsize *
-					FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC;
-			pos.fbc_atomic_pos += FIREHOSE_BUFFER_POS_REFCNT_INC;
-			const uint16_t minimum_payload_size = 16;
-			if (!firehose_buffer_pos_fits(pos,
-					roundup(ft_size + minimum_payload_size , 8))) {
-				// if we can't even have minimum_payload_size bytes of payload
-				// for the next tracepoint, just flush right away
-				pos.fbc_flag_full = true;
-			}
-			reservation_failed = false;
-		}
-	});
-
-	if (reservation_failed) {
-		if (pos.fbc_refcnt) {
-			// nothing to do, there is a thread writing that will pick up
-			// the "FULL" flag on flush and push as a consequence
-			return 0;
-		}
-		// caller must enqueue chunk
-		return -1;
-	}
-	if (privptr) {
-		*privptr = fbc->fbc_start + pos.fbc_private_offs;
-	}
-	return orig.fbc_next_entry_offs;
-}
-
-OS_ALWAYS_INLINE
 static inline void
 firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream)
 {
 	firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream];
 	firehose_stream_state_u old_state, new_state;
-	firehose_buffer_chunk_t fbc;
+	firehose_chunk_t fc;
 	uint64_t stamp = UINT64_MAX; // will cause the reservation to fail
 	uint16_t ref;
 	long result;
@@ -278,8 +193,9 @@
 		return;
 	}
 
-	fbc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current);
-	result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, 1, 0, NULL);
+	fc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current);
+	result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream,
+			firehose_buffer_qos_bits_propagate(), 1, 0, NULL);
 	if (likely(result < 0)) {
 		firehose_buffer_ring_enqueue(fb, old_state.fss_current);
 	}
@@ -339,8 +255,7 @@
 {
 	firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream];
 	firehose_stream_state_u old_state, new_state;
-	firehose_tracepoint_t ft;
-	firehose_buffer_chunk_t fbc;
+	firehose_chunk_t fc;
 #if KERNEL
 	bool failable = false;
 #endif
@@ -356,18 +271,19 @@
 
 		ref = old_state.fss_current;
 		if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) {
-			fbc = firehose_buffer_ref_to_chunk(fb, ref);
-			result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream,
+			fc = firehose_buffer_ref_to_chunk(fb, ref);
+			result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream,
+					firehose_buffer_qos_bits_propagate(),
 					pubsize, privsize, privptr);
 			if (likely(result > 0)) {
-				ft = (firehose_tracepoint_t)(fbc->fbc_start + result);
-				stamp -= fbc->fbc_timestamp;
-				stamp |= (uint64_t)pubsize << 48;
-				// Needed for process death handling (tracepoint-begin)
-				// see firehose_buffer_stream_chunk_install
-				os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed);
-				dispatch_compiler_barrier();
-				return ft;
+				uint64_t thread;
+#ifdef KERNEL
+				thread = thread_tid(current_thread());
+#else
+				thread = _pthread_threadid_self_np_direct();
+#endif
+				return firehose_chunk_tracepoint_begin(fc,
+						stamp, pubsize, thread, result);
 			}
 			if (likely(result < 0)) {
 				firehose_buffer_ring_enqueue(fb, old_state.fss_current);
@@ -444,8 +360,7 @@
 firehose_buffer_tracepoint_flush(firehose_buffer_t fb,
 		firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
 {
-	firehose_buffer_chunk_t fbc = firehose_buffer_chunk_for_address(ft);
-	firehose_buffer_pos_u pos;
+	firehose_chunk_t fc = firehose_buffer_chunk_for_address(ft);
 
 	// Needed for process death handling (tracepoint-flush):
 	// We want to make sure the observers
@@ -453,17 +368,8 @@
 	// 1. write all the data to the tracepoint
 	// 2. write the tracepoint ID, so that seeing it means the tracepoint
 	//    is valid
-#ifdef KERNEL
-	ft->ft_thread = thread_tid(current_thread());
-#else
-	ft->ft_thread = _pthread_threadid_self_np_direct();
-#endif
-	// release barrier makes the log writes visible
-	os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release);
-	pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos,
-			FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed);
-	if (pos.fbc_refcnt == 0 && pos.fbc_flag_full) {
-		firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fbc));
+	if (firehose_chunk_tracepoint_end(fc, ft, ftid)) {
+		firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fc));
 	}
 }
 
diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c
index a6be2fa..b2b01c5 100644
--- a/src/firehose/firehose_server.c
+++ b/src/firehose/firehose_server.c
@@ -157,7 +157,7 @@
 firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags)
 {
 	firehose_buffer_t fb = fc->fc_buffer;
-	firehose_buffer_chunk_t fbc;
+	firehose_chunk_t fbc;
 	firehose_event_t evt;
 	uint16_t volatile *fbh_ring;
 	uint16_t flushed, ref, count = 0;
@@ -383,21 +383,21 @@
 	// Then look at all the allocated pages not seen in the ring
 	while (bitmap) {
 		uint16_t ref = firehose_bitmap_first_set(bitmap);
-		firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
-		uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs;
+		firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
+		uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs;
 
 		bitmap &= ~(1ULL << ref);
-		if (fbc->fbc_start + fbc_length <= fbc->fbc_data) {
+		if (fbc->fc_start + fbc_length <= fbc->fc_data) {
 			// this page has its "recycle-requeue" done, but hasn't gone
 			// through "recycle-reuse", or it has no data, ditch it
 			continue;
 		}
-		if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) {
+		if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) {
 			// this thing has data, but the first tracepoint is unreadable
 			// so also just ditch it
 			continue;
 		}
-		if (!fbc->fbc_pos.fbc_flag_io) {
+		if (!fbc->fc_pos.fcp_flag_io) {
 			mem_bitmap |= 1ULL << ref;
 			continue;
 		}
@@ -416,7 +416,7 @@
 
 		while (mem_bitmap_copy) {
 			uint16_t ref = firehose_bitmap_first_set(mem_bitmap_copy);
-			firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
+			firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
 
 			mem_bitmap_copy &= ~(1ULL << ref);
 			server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, fbc);
@@ -434,9 +434,9 @@
 firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason,
 		dispatch_mach_msg_t dmsg, mach_error_t error OS_UNUSED)
 {
-	mach_msg_header_t *msg_hdr;
+	mach_msg_header_t *msg_hdr = NULL;
 	firehose_client_t fc = ctx;
-	mach_port_t oldsendp, oldrecvp;
+	mach_port_t oldsendp = 0, oldrecvp = 0;
 
 	if (dmsg) {
 		msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL);
@@ -617,7 +617,7 @@
 		DISPATCH_INTERNAL_CRASH(errno, "Unable to map kernel buffer");
 	}
 	if (fb_map.fbmi_size !=
-			FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE) {
+			FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE) {
 		DISPATCH_INTERNAL_CRASH(fb_map.fbmi_size, "Unexpected kernel buffer size");
 	}
 
@@ -777,13 +777,31 @@
 			MACH_PORT_NULL, NULL);
 }
 
+dispatch_queue_t
+firehose_server_copy_queue(firehose_server_queue_t which)
+{
+	dispatch_queue_t dq;
+	switch (which) {
+	case FIREHOSE_SERVER_QUEUE_IO:
+		dq = server_config.fs_io_drain_queue;
+		break;
+	case FIREHOSE_SERVER_QUEUE_MEMORY:
+		dq = server_config.fs_mem_drain_queue;
+		break;
+	default:
+		DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type");
+	}
+	dispatch_retain(dq);
+	return dq;
+}
+
 #pragma mark -
 #pragma mark firehose snapshot and peeking
 
 void
 firehose_client_metadata_stream_peek(firehose_client_t fc,
 		firehose_event_t context, bool (^peek_should_start)(void),
-		bool (^peek)(firehose_buffer_chunk_t fbc))
+		bool (^peek)(firehose_chunk_t fbc))
 {
 	if (context != FIREHOSE_EVENT_MEM_BUFFER_RECEIVED) {
 		return dispatch_sync(server_config.fs_mem_drain_queue, ^{
@@ -802,21 +820,21 @@
 
 	while (bitmap) {
 		uint16_t ref = firehose_bitmap_first_set(bitmap);
-		firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
-		uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs;
+		firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
+		uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs;
 
 		bitmap &= ~(1ULL << ref);
-		if (fbc->fbc_start + fbc_length <= fbc->fbc_data) {
+		if (fbc->fc_start + fbc_length <= fbc->fc_data) {
 			// this page has its "recycle-requeue" done, but hasn't gone
 			// through "recycle-reuse", or it has no data, ditch it
 			continue;
 		}
-		if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) {
+		if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) {
 			// this thing has data, but the first tracepoint is unreadable
 			// so also just ditch it
 			continue;
 		}
-		if (fbc->fbc_pos.fbc_stream != firehose_stream_metadata) {
+		if (fbc->fc_pos.fcp_stream != firehose_stream_metadata) {
 			continue;
 		}
 		if (!peek(fbc)) {
@@ -872,21 +890,21 @@
 	// Then look at all the allocated pages not seen in the ring
 	while (bitmap) {
 		uint16_t ref = firehose_bitmap_first_set(bitmap);
-		firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
-		uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs;
+		firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
+		uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs;
 
 		bitmap &= ~(1ULL << ref);
-		if (fbc->fbc_start + fbc_length <= fbc->fbc_data) {
+		if (fbc->fc_start + fbc_length <= fbc->fc_data) {
 			// this page has its "recycle-requeue" done, but hasn't gone
 			// through "recycle-reuse", or it has no data, ditch it
 			continue;
 		}
-		if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) {
+		if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) {
 			// this thing has data, but the first tracepoint is unreadable
 			// so also just ditch it
 			continue;
 		}
-		if (fbc->fbc_pos.fbc_flag_io != for_io) {
+		if (fbc->fc_pos.fcp_flag_io != for_io) {
 			continue;
 		}
 		snapshot->handler(fc, evt, fbc);
diff --git a/src/init.c b/src/init.c
index 87be596..e47eafc 100644
--- a/src/init.c
+++ b/src/init.c
@@ -76,13 +76,12 @@
 pthread_key_t dispatch_cache_key;
 pthread_key_t dispatch_context_key;
 pthread_key_t dispatch_pthread_root_queue_observer_hooks_key;
-pthread_key_t dispatch_defaultpriority_key;
+pthread_key_t dispatch_basepri_key;
 #if DISPATCH_INTROSPECTION
 pthread_key_t dispatch_introspection_key;
 #elif DISPATCH_PERF_MON
 pthread_key_t dispatch_bcounter_key;
 #endif
-pthread_key_t dispatch_sema4_key;
 pthread_key_t dispatch_voucher_key;
 pthread_key_t dispatch_deferred_items_key;
 #endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE
@@ -176,8 +175,8 @@
 	.dqo_suspend_cnt_size = 0,
 	.dqo_target_queue = offsetof(struct dispatch_queue_s, do_targetq),
 	.dqo_target_queue_size = sizeof(((dispatch_queue_t)NULL)->do_targetq),
-	.dqo_priority = offsetof(struct dispatch_queue_s, dq_priority),
-	.dqo_priority_size = sizeof(((dispatch_queue_t)NULL)->dq_priority),
+	.dqo_priority = 0,
+	.dqo_priority_size = 0,
 };
 
 #if DISPATCH_USE_DIRECT_TSD
@@ -200,8 +199,7 @@
 #endif
 	.dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1),
 	.dq_label = "com.apple.main-thread",
-	.dq_width = 1,
-	.dq_atomic_bits = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC,
+	.dq_atomic_flags = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC | DQF_WIDTH(1),
 	.dq_override_voucher = DISPATCH_NO_VOUCHER,
 	.dq_serialnum = 1,
 };
@@ -212,8 +210,8 @@
 #define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, inactive) \
 		{ \
 			DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \
-			.dqa_qos_class = (qos), \
-			.dqa_relative_priority = (qos) ? (prio) : 0, \
+			.dqa_qos_and_relpri = (_dispatch_priority_make(qos, prio) & \
+					DISPATCH_PRIORITY_REQUESTED_MASK), \
 			.dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \
 			.dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \
 			.dqa_concurrent = (concurrent), \
@@ -276,7 +274,7 @@
 
 #define DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(qos) \
 		[DQA_INDEX_QOS_CLASS_##qos] = \
-				DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos)
+				DISPATCH_QUEUE_ATTR_PRIO_INIT(DISPATCH_QOS_##qos)
 
 // DISPATCH_QUEUE_CONCURRENT resp. _dispatch_queue_attr_concurrent is aliased
 // to array member [0][0][0][0][0][0] and their properties must match!
@@ -298,7 +296,7 @@
 #if DISPATCH_VARIANT_STATIC
 // <rdar://problem/16778703>
 struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent =
-	DISPATCH_QUEUE_ATTR_INIT(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0,
+	DISPATCH_QUEUE_ATTR_INIT(QOS_CLASS_UNSPECIFIED, 0,
 			unspecified, INHERIT, 1, false);
 #endif // DISPATCH_VARIANT_STATIC
 
@@ -481,31 +479,6 @@
 );
 
 
-const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = {
-	DC_VTABLE_ENTRY(ASYNC_REDIRECT,
-		.do_kind = "dc-redirect",
-		.do_invoke = _dispatch_async_redirect_invoke),
-#if HAVE_MACH
-	DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN,
-		.do_kind = "dc-mach-send-drain",
-		.do_invoke = _dispatch_mach_send_barrier_drain_invoke),
-	DC_VTABLE_ENTRY(MACH_SEND_BARRIER,
-		.do_kind = "dc-mach-send-barrier",
-		.do_invoke = _dispatch_mach_barrier_invoke),
-	DC_VTABLE_ENTRY(MACH_RECV_BARRIER,
-		.do_kind = "dc-mach-recv-barrier",
-		.do_invoke = _dispatch_mach_barrier_invoke),
-#endif
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-	DC_VTABLE_ENTRY(OVERRIDE_STEALING,
-		.do_kind = "dc-override-stealing",
-		.do_invoke = _dispatch_queue_override_invoke),
-	DC_VTABLE_ENTRY(OVERRIDE_OWNING,
-		.do_kind = "dc-override-owning",
-		.do_invoke = _dispatch_queue_override_invoke),
-#endif
-};
-
 void
 _dispatch_vtable_init(void)
 {
@@ -1096,396 +1069,8 @@
 #endif // !USE_OBJC
 
 #pragma mark -
-#pragma mark dispatch_source_types
-
-static void
-dispatch_source_type_timer_init(dispatch_source_t ds,
-	dispatch_source_type_t type DISPATCH_UNUSED,
-	uintptr_t handle DISPATCH_UNUSED,
-	unsigned long mask,
-	dispatch_queue_t q)
-{
-	if (fastpath(!ds->ds_refs)) {
-		ds->ds_refs = _dispatch_calloc(1ul,
-				sizeof(struct dispatch_timer_source_refs_s));
-	}
-	ds->ds_needs_rearm = true;
-	ds->ds_is_timer = true;
-	if (q == dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_BACKGROUND, 0)
-			|| q == dispatch_get_global_queue(
-			DISPATCH_QUEUE_PRIORITY_BACKGROUND, DISPATCH_QUEUE_OVERCOMMIT)){
-		mask |= DISPATCH_TIMER_BACKGROUND; // <rdar://problem/12200216>
-	}
-	ds_timer(ds->ds_refs).flags = mask;
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_timer = {
-	.ke = {
-		.filter = DISPATCH_EVFILT_TIMER,
-	},
-	.mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND|
-			DISPATCH_TIMER_WALL_CLOCK,
-	.init = dispatch_source_type_timer_init,
-};
-
-static void
-dispatch_source_type_after_init(dispatch_source_t ds,
-	dispatch_source_type_t type, uintptr_t handle, unsigned long mask,
-	dispatch_queue_t q)
-{
-	dispatch_source_type_timer_init(ds, type, handle, mask, q);
-	ds->ds_needs_rearm = false;
-	ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_AFTER;
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_after = {
-	.ke = {
-		.filter = DISPATCH_EVFILT_TIMER,
-	},
-	.init = dispatch_source_type_after_init,
-};
-
-static void
-dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds,
-	dispatch_source_type_t type, uintptr_t handle, unsigned long mask,
-	dispatch_queue_t q)
-{
-	ds->ds_refs = _dispatch_calloc(1ul,
-			sizeof(struct dispatch_timer_source_aggregate_refs_s));
-	dispatch_source_type_timer_init(ds, type, handle, mask, q);
-	ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_WITH_AGGREGATE;
-	ds->dq_specific_q = (void*)handle;
-	_dispatch_retain(ds->dq_specific_q);
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_timer_with_aggregate={
-	.ke = {
-		.filter = DISPATCH_EVFILT_TIMER,
-		.ident = ~0ull,
-	},
-	.mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND,
-	.init = dispatch_source_type_timer_with_aggregate_init,
-};
-
-static void
-dispatch_source_type_interval_init(dispatch_source_t ds,
-	dispatch_source_type_t type, uintptr_t handle, unsigned long mask,
-	dispatch_queue_t q)
-{
-	dispatch_source_type_timer_init(ds, type, handle, mask, q);
-	ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_INTERVAL;
-	unsigned long ident = _dispatch_source_timer_idx(ds->ds_refs);
-	ds->ds_dkev->dk_kevent.ident = ds->ds_ident_hack = ident;
-	_dispatch_source_set_interval(ds, handle);
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_interval = {
-	.ke = {
-		.filter = DISPATCH_EVFILT_TIMER,
-		.ident = ~0ull,
-	},
-	.mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND|
-			DISPATCH_INTERVAL_UI_ANIMATION,
-	.init = dispatch_source_type_interval_init,
-};
-
-static void
-dispatch_source_type_readwrite_init(dispatch_source_t ds,
-	dispatch_source_type_t type DISPATCH_UNUSED,
-	uintptr_t handle DISPATCH_UNUSED,
-	unsigned long mask DISPATCH_UNUSED,
-	dispatch_queue_t q DISPATCH_UNUSED)
-{
-	ds->ds_is_level = true;
-#if HAVE_DECL_NOTE_LOWAT
-	// bypass kernel check for device kqueue support rdar://19004921
-	ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT;
-#endif
-	ds->ds_dkev->dk_kevent.data = 1;
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_read = {
-	.ke = {
-		.filter = EVFILT_READ,
-		.flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
-	},
-	.init = dispatch_source_type_readwrite_init,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_write = {
-	.ke = {
-		.filter = EVFILT_WRITE,
-		.flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
-	},
-	.init = dispatch_source_type_readwrite_init,
-};
-
-#if DISPATCH_USE_MEMORYSTATUS
-
-#if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483
-static int _dispatch_ios_simulator_memory_warnings_fd = -1;
-static void
-_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED)
-{
-	char *e = getenv("SIMULATOR_MEMORY_WARNINGS");
-	if (!e) return;
-	_dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY);
-	if (_dispatch_ios_simulator_memory_warnings_fd == -1) {
-		(void)dispatch_assume_zero(errno);
-	}
-}
-#endif
-
-#if TARGET_IPHONE_SIMULATOR
-static void
-dispatch_source_type_memorypressure_init(dispatch_source_t ds,
-	dispatch_source_type_t type DISPATCH_UNUSED,
-	uintptr_t handle DISPATCH_UNUSED,
-	unsigned long mask DISPATCH_UNUSED,
-	dispatch_queue_t q DISPATCH_UNUSED)
-{
-	static dispatch_once_t pred;
-	dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init);
-	handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd;
-	mask = NOTE_ATTRIB;
-	ds->ds_dkev->dk_kevent.filter = EVFILT_VNODE;
-	ds->ds_dkev->dk_kevent.ident = handle;
-	ds->ds_dkev->dk_kevent.flags |= EV_CLEAR;
-	ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask;
-	ds->ds_ident_hack = handle;
-	ds->ds_pending_data_mask = mask;
-	ds->ds_memorypressure_override = 1;
-}
-#else
-#define dispatch_source_type_memorypressure_init NULL
-#endif
-
-#ifndef NOTE_MEMORYSTATUS_LOW_SWAP
-#define NOTE_MEMORYSTATUS_LOW_SWAP 0x8
-#endif
-
-const struct dispatch_source_type_s _dispatch_source_type_memorypressure = {
-	.ke = {
-		.filter = EVFILT_MEMORYSTATUS,
-		.flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
-	},
-	.mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN
-			|NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP
-			|NOTE_MEMORYSTATUS_PROC_LIMIT_WARN|NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL,
-	.init = dispatch_source_type_memorypressure_init,
-};
-
-static void
-dispatch_source_type_vm_init(dispatch_source_t ds,
-	dispatch_source_type_t type DISPATCH_UNUSED,
-	uintptr_t handle DISPATCH_UNUSED,
-	unsigned long mask DISPATCH_UNUSED,
-	dispatch_queue_t q DISPATCH_UNUSED)
-{
-	// Map legacy vm pressure to memorypressure warning rdar://problem/15907505
-	mask = NOTE_MEMORYSTATUS_PRESSURE_WARN;
-	ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask;
-	ds->ds_pending_data_mask = mask;
-	ds->ds_vmpressure_override = 1;
-#if TARGET_IPHONE_SIMULATOR
-	dispatch_source_type_memorypressure_init(ds, type, handle, mask, q);
-#endif
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_vm = {
-	.ke = {
-		.filter = EVFILT_MEMORYSTATUS,
-		.flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
-	},
-	.mask = NOTE_VM_PRESSURE,
-	.init = dispatch_source_type_vm_init,
-};
-
-#elif DISPATCH_USE_VM_PRESSURE
-
-const struct dispatch_source_type_s _dispatch_source_type_vm = {
-	.ke = {
-		.filter = EVFILT_VM,
-		.flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
-	},
-	.mask = NOTE_VM_PRESSURE,
-};
-
-#endif // DISPATCH_USE_VM_PRESSURE
-
-const struct dispatch_source_type_s _dispatch_source_type_signal = {
-	.ke = {
-		.filter = EVFILT_SIGNAL,
-		.flags = EV_UDATA_SPECIFIC,
-	},
-};
-
-#if !defined(__linux__)
-static void
-dispatch_source_type_proc_init(dispatch_source_t ds,
-	dispatch_source_type_t type DISPATCH_UNUSED,
-	uintptr_t handle DISPATCH_UNUSED,
-	unsigned long mask DISPATCH_UNUSED,
-	dispatch_queue_t q DISPATCH_UNUSED)
-{
-	ds->ds_dkev->dk_kevent.fflags |= NOTE_EXIT; // rdar://16655831
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_proc = {
-	.ke = {
-		.filter = EVFILT_PROC,
-		.flags = EV_CLEAR|EV_UDATA_SPECIFIC,
-	},
-	.mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC
-#if HAVE_DECL_NOTE_SIGNAL
-			|NOTE_SIGNAL
-#endif
-#if HAVE_DECL_NOTE_REAP
-			|NOTE_REAP
-#endif
-			,
-	.init = dispatch_source_type_proc_init,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_vnode = {
-	.ke = {
-		.filter = EVFILT_VNODE,
-		.flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC,
-	},
-	.mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK|
-			NOTE_RENAME|NOTE_FUNLOCK
-#if HAVE_DECL_NOTE_REVOKE
-			|NOTE_REVOKE
-#endif
-#if HAVE_DECL_NOTE_NONE
-			|NOTE_NONE
-#endif
-			,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_vfs = {
-	.ke = {
-		.filter = EVFILT_FS,
-		.flags = EV_CLEAR|EV_UDATA_SPECIFIC,
-	},
-	.mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD|
-			VQ_ASSIST|VQ_NOTRESPLOCK
-#if HAVE_DECL_VQ_UPDATE
-			|VQ_UPDATE
-#endif
-#if HAVE_DECL_VQ_VERYLOWDISK
-			|VQ_VERYLOWDISK
-#endif
-#if HAVE_DECL_VQ_QUOTA
-			|VQ_QUOTA
-#endif
-			,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_sock = {
-#ifdef EVFILT_SOCK
-	.ke = {
-		.filter = EVFILT_SOCK,
-		.flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC,
-	},
-	.mask = NOTE_CONNRESET |  NOTE_READCLOSED | NOTE_WRITECLOSED |
-		NOTE_TIMEOUT | NOTE_NOSRCADDR |  NOTE_IFDENIED | NOTE_SUSPEND |
-		NOTE_RESUME | NOTE_KEEPALIVE
-#ifdef NOTE_ADAPTIVE_WTIMO
-		| NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO
-#endif
-#ifdef NOTE_CONNECTED
-		| NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED
-#endif
-#ifdef NOTE_NOTIFY_ACK
-		| NOTE_NOTIFY_ACK
-#endif
-		,
-#endif // EVFILT_SOCK
-};
-#endif // !defined(__linux__)
-
-static void
-dispatch_source_type_data_init(dispatch_source_t ds,
-	dispatch_source_type_t type DISPATCH_UNUSED,
-	uintptr_t handle DISPATCH_UNUSED,
-	unsigned long mask DISPATCH_UNUSED,
-	dispatch_queue_t q DISPATCH_UNUSED)
-{
-	ds->ds_is_installed = true;
-	ds->ds_is_custom_source = true;
-	ds->ds_is_direct_kevent = true;
-	ds->ds_pending_data_mask = ~0ul;
-	ds->ds_needs_rearm = false; // not registered with kevent
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_data_add = {
-	.ke = {
-		.filter = DISPATCH_EVFILT_CUSTOM_ADD,
-		.flags = EV_UDATA_SPECIFIC,
-	},
-	.init = dispatch_source_type_data_init,
-};
-
-const struct dispatch_source_type_s _dispatch_source_type_data_or = {
-	.ke = {
-		.filter = DISPATCH_EVFILT_CUSTOM_OR,
-		.flags = EV_CLEAR|EV_UDATA_SPECIFIC,
-		.fflags = ~0u,
-	},
-	.init = dispatch_source_type_data_init,
-};
-
-#if HAVE_MACH
-
-static void
-dispatch_source_type_mach_send_init(dispatch_source_t ds,
-	dispatch_source_type_t type DISPATCH_UNUSED,
-	uintptr_t handle DISPATCH_UNUSED, unsigned long mask,
-	dispatch_queue_t q DISPATCH_UNUSED)
-{
-	if (!mask) {
-		// Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD
-		ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_SEND_DEAD;
-		ds->ds_pending_data_mask = DISPATCH_MACH_SEND_DEAD;
-	}
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_mach_send = {
-	.ke = {
-		.filter = DISPATCH_EVFILT_MACH_NOTIFICATION,
-		.flags = EV_CLEAR,
-	},
-	.mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE,
-	.init = dispatch_source_type_mach_send_init,
-};
-
-static void
-dispatch_source_type_mach_recv_init(dispatch_source_t ds,
-	dispatch_source_type_t type DISPATCH_UNUSED,
-	uintptr_t handle DISPATCH_UNUSED,
-	unsigned long mask DISPATCH_UNUSED,
-	dispatch_queue_t q DISPATCH_UNUSED)
-{
-	ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE;
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-	if (_dispatch_evfilt_machport_direct_enabled) return;
-	ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE;
-	ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED);
-	ds->ds_is_direct_kevent = false;
-#endif
-}
-
-const struct dispatch_source_type_s _dispatch_source_type_mach_recv = {
-	.ke = {
-		.filter = EVFILT_MACHPORT,
-		.flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
-	},
-	.init = dispatch_source_type_mach_recv_init,
-};
-
-#pragma mark -
 #pragma mark dispatch_mig
+#if HAVE_MACH
 
 void *
 dispatch_mach_msg_get_context(mach_msg_header_t *msg)
diff --git a/src/inline_internal.h b/src/inline_internal.h
index d1c73dd..0c515f9 100644
--- a/src/inline_internal.h
+++ b/src/inline_internal.h
@@ -250,48 +250,6 @@
 #pragma mark dispatch_thread
 #if DISPATCH_PURE_C
 
-#define DISPATCH_DEFERRED_ITEMS_MAGIC  0xdefe55edul /* deferred */
-#define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 8
-#ifdef WORKQ_KEVENT_EVENT_BUFFER_LEN
-_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >=
-		DISPATCH_DEFERRED_ITEMS_EVENT_COUNT,
-		"our list should not be longer than the kernel's");
-#endif
-
-typedef struct dispatch_deferred_items_s {
-	uint32_t ddi_magic;
-	dispatch_queue_t ddi_stashed_dq;
-	struct dispatch_object_s *ddi_stashed_dou;
-	dispatch_priority_t ddi_stashed_pp;
-	int ddi_nevents;
-	int ddi_maxevents;
-	_dispatch_kevent_qos_s ddi_eventlist[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT];
-} dispatch_deferred_items_s, *dispatch_deferred_items_t;
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_deferred_items_set(dispatch_deferred_items_t ddi)
-{
-	_dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_deferred_items_t
-_dispatch_deferred_items_get(void)
-{
-	dispatch_deferred_items_t ddi = (dispatch_deferred_items_t)
-			_dispatch_thread_getspecific(dispatch_deferred_items_key);
-	if (ddi && ddi->ddi_magic == DISPATCH_DEFERRED_ITEMS_MAGIC) {
-		return ddi;
-	}
-	return NULL;
-}
-
-#endif // DISPATCH_PURE_C
-#pragma mark -
-#pragma mark dispatch_thread
-#if DISPATCH_PURE_C
-
 DISPATCH_ALWAYS_INLINE
 static inline dispatch_thread_context_t
 _dispatch_thread_context_find(const void *key)
@@ -450,28 +408,28 @@
 DISPATCH_ALWAYS_INLINE
 static inline int
 _dispatch_wqthread_override_start_check_owner(mach_port_t thread,
-		pthread_priority_t pp, mach_port_t *ulock_addr)
+		dispatch_qos_t qos, mach_port_t *ulock_addr)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
 	if (!_dispatch_set_qos_class_enabled) return 0;
 	return _pthread_workqueue_override_start_direct_check_owner(thread,
-			pp, ulock_addr);
+			_dispatch_qos_to_pp(qos), ulock_addr);
 #else
-	(void)thread; (void)pp; (void)ulock_addr;
+	(void)thread; (void)qos; (void)ulock_addr;
 	return 0;
 #endif
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_wqthread_override_start(mach_port_t thread,
-		pthread_priority_t pp)
+_dispatch_wqthread_override_start(mach_port_t thread, dispatch_qos_t qos)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
 	if (!_dispatch_set_qos_class_enabled) return;
-	(void)_pthread_workqueue_override_start_direct(thread, pp);
+	(void)_pthread_workqueue_override_start_direct(thread,
+			_dispatch_qos_to_pp(qos));
 #else
-	(void)thread; (void)pp;
+	(void)thread; (void)qos;
 #endif
 }
 
@@ -510,43 +468,6 @@
 #endif
 }
 
-#if DISPATCH_DEBUG_QOS && HAVE_PTHREAD_WORKQUEUE_QOS
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_qos_class_is_valid(pthread_priority_t pp)
-{
-	pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	if (pp > (1UL << (DISPATCH_QUEUE_QOS_COUNT +
-			_PTHREAD_PRIORITY_QOS_CLASS_SHIFT))) {
-		return false;
-	}
-	return true;
-}
-#define _dispatch_assert_is_valid_qos_class(pp)  ({ typeof(pp) _pp = (pp); \
-		if (unlikely(!_dispatch_qos_class_is_valid(_pp))) { \
-			DISPATCH_INTERNAL_CRASH(_pp, "Invalid qos class"); \
-		} \
-	})
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_qos_override_is_valid(pthread_priority_t pp)
-{
-	if (pp & (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK) {
-		return false;
-	}
-	return _dispatch_qos_class_is_valid(pp);
-}
-#define _dispatch_assert_is_valid_qos_override(pp)  ({ typeof(pp) _pp = (pp); \
-		if (unlikely(!_dispatch_qos_override_is_valid(_pp))) { \
-			DISPATCH_INTERNAL_CRASH(_pp, "Invalid override"); \
-		} \
-	})
-#else
-#define _dispatch_assert_is_valid_qos_override(pp) (void)(pp)
-#define _dispatch_assert_is_valid_qos_class(pp) (void)(pp)
-#endif
-
 #endif // DISPATCH_PURE_C
 #pragma mark -
 #pragma mark dispatch_queue_t state accessors
@@ -658,12 +579,9 @@
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_queue_has_immutable_target(dispatch_queue_t dq)
+_dispatch_queue_is_legacy(dispatch_queue_t dq)
 {
-	if (dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) {
-		return false;
-	}
-	return dx_type(dq) != DISPATCH_QUEUE_LEGACY_TYPE;
+	return _dispatch_queue_atomic_flags(dq) & DQF_LEGACY;
 }
 
 #endif // DISPATCH_PURE_C
@@ -723,7 +641,8 @@
 {
 	return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION;
 }
-#define DISPATCH_QUEUE_IS_SUSPENDED(x)  _dq_state_is_suspended((x)->dq_state)
+#define DISPATCH_QUEUE_IS_SUSPENDED(x) \
+		_dq_state_is_suspended(os_atomic_load2o(x, dq_state, relaxed))
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
@@ -776,9 +695,36 @@
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dq_state_has_override(uint64_t dq_state)
+_dq_state_received_override(uint64_t dq_state)
 {
-	return dq_state & DISPATCH_QUEUE_HAS_OVERRIDE;
+	return dq_state & DISPATCH_QUEUE_RECEIVED_OVERRIDE;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dq_state_max_qos(uint64_t dq_state)
+{
+	dq_state &= DISPATCH_QUEUE_MAX_QOS_MASK;
+	return (dispatch_qos_t)(dq_state >> DISPATCH_QUEUE_MAX_QOS_SHIFT);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dq_state_from_qos(dispatch_qos_t qos)
+{
+	return (uint64_t)(qos) << DISPATCH_QUEUE_MAX_QOS_SHIFT;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dq_state_merge_qos(uint64_t dq_state, dispatch_qos_t qos)
+{
+	uint64_t qos_bits = _dq_state_from_qos(qos);
+	if ((dq_state & DISPATCH_QUEUE_MAX_QOS_MASK) < qos_bits) {
+		dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
+		dq_state |= qos_bits | DISPATCH_QUEUE_RECEIVED_OVERRIDE;
+	}
+	return dq_state;
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -849,20 +795,15 @@
 #pragma mark dispatch_queue_t state machine
 #ifndef __cplusplus
 
-static inline bool _dispatch_queue_need_override(dispatch_queue_class_t dqu,
-		pthread_priority_t pp);
-static inline bool _dispatch_queue_need_override_retain(
-		dispatch_queue_class_t dqu, pthread_priority_t pp);
-static inline dispatch_priority_t _dispatch_queue_reset_override_priority(
-		dispatch_queue_class_t dqu, bool qp_is_floor);
-static inline bool _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu,
-		dispatch_priority_t new_op);
-static inline pthread_priority_t _dispatch_get_defaultpriority(void);
-static inline void _dispatch_set_defaultpriority_override(void);
-static inline void _dispatch_reset_defaultpriority(pthread_priority_t pp);
 static inline pthread_priority_t _dispatch_get_priority(void);
-static inline pthread_priority_t _dispatch_set_defaultpriority(
-		pthread_priority_t pp, pthread_priority_t *new_pp);
+static inline dispatch_priority_t _dispatch_get_basepri(void);
+static inline dispatch_qos_t _dispatch_get_basepri_override_qos_floor(void);
+static inline void _dispatch_set_basepri_override_qos(dispatch_qos_t qos);
+static inline void _dispatch_reset_basepri(dispatch_priority_t dbp);
+static inline dispatch_priority_t _dispatch_set_basepri(dispatch_priority_t dbp);
+
+static inline bool _dispatch_queue_need_override_retain(
+		dispatch_queue_class_t dqu, dispatch_qos_t qos);
 
 DISPATCH_ALWAYS_INLINE
 static inline void
@@ -891,7 +832,7 @@
 		dq->do_ref_cnt++; // rdar://8181908 see _dispatch_queue_resume
 	}
 	dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
-	dqf |= (dispatch_queue_flags_t)width << DQF_WIDTH_SHIFT;
+	dqf |= DQF_WIDTH(width);
 	os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
 	dq->dq_state = dq_state;
 	dq->dq_override_voucher = DISPATCH_NO_VOUCHER;
@@ -946,7 +887,10 @@
 _dispatch_queue_try_wakeup(dispatch_queue_t dq, uint64_t dq_state,
 		dispatch_wakeup_flags_t flags)
 {
-	if (_dq_state_should_wakeup(dq_state)) {
+	if (_dq_state_is_runnable(dq_state) &&
+			!_dq_state_drain_locked(dq_state) &&
+			(!_dq_state_is_enqueued(dq_state) ||
+			(flags & DISPATCH_WAKEUP_WAITER_HANDOFF))) {
 		if (slowpath(_dq_state_is_dirty(dq_state))) {
 			// <rdar://problem/14637483>
 			// seq_cst wrt state changes that were flushed and not acted upon
@@ -959,6 +903,21 @@
 	}
 }
 
+#define _dispatch_queue_should_override_self(dq_state, qos) \
+	unlikely(qos < _dq_state_max_qos(dq_state))
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_queue_override_self(uint64_t dq_state)
+{
+	dispatch_qos_t qos = _dq_state_max_qos(dq_state);
+	_dispatch_wqthread_override_start(_dispatch_tid_self(), qos);
+	// ensure that the root queue sees
+	// that this thread was overridden.
+	_dispatch_set_basepri_override_qos(qos);
+	return qos;
+}
+
 /* Used by:
  * - _dispatch_queue_class_invoke (normal path)
  * - _dispatch_queue_override_invoke (stealer)
@@ -984,14 +943,23 @@
 		clear_enqueued_bit = DISPATCH_QUEUE_ENQUEUED;
 	}
 
+	dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor();
+retry:
 	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
 		new_state = old_state;
 		new_state ^= clear_enqueued_bit;
 		if (likely(_dq_state_is_runnable(old_state) &&
 				!_dq_state_drain_locked(old_state))) {
+			if (_dispatch_queue_should_override_self(old_state, oq_floor)) {
+				os_atomic_rmw_loop_give_up({
+					oq_floor = _dispatch_queue_override_self(old_state);
+					goto retry;
+				});
+			}
 			//
-			// Only keep the HAS_WAITER bit (and ENQUEUED if stealing).
-			// In particular acquiring the drain lock clears the DIRTY bit
+			// Only keep the HAS_WAITER, MAX_QOS and ENQUEUED (if stealing) bits
+			// In particular acquiring the drain lock clears the DIRTY and
+			// RECEIVED_OVERRIDE
 			//
 			new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
 			//
@@ -1176,44 +1144,41 @@
 {
 	uint64_t old_state = os_atomic_load2o(dq, dq_state, relaxed);
 	uint64_t new_state;
-	dispatch_priority_t pp = 0, op;
 
-	do {
+	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
+		new_state = old_state;
 		if (unlikely(_dq_state_is_dirty(old_state) &&
 				!_dq_state_is_suspended(old_state))) {
-			// just renew the drain lock with an acquire barrier, to see
-			// what the enqueuer that set DIRTY has done.
-			os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DIRTY, acquire);
-			_dispatch_queue_reinstate_override_priority(dq, pp);
-			return false;
-		}
-		new_state = old_state - owned;
-		if ((new_state & DISPATCH_QUEUE_WIDTH_FULL_BIT) ||
-				_dq_state_is_suspended(old_state)) {
-			// the test for the WIDTH_FULL_BIT is about narrow concurrent queues
-			// releasing the drain lock while being at the width limit
+			new_state &= ~DISPATCH_QUEUE_DIRTY;
+		} else {
+			new_state -= owned;
+			// the test for the WIDTH_FULL_BIT is about narrow concurrent
+			// queues releasing the drain lock while being at the width limit
 			//
 			// _non_barrier_complete() will set the DIRTY bit when going back
 			// under the limit which will cause the try_unlock to fail
-			new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
-		} else {
-			new_state &= ~DISPATCH_QUEUE_DIRTY;
-			new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
-			// This current owner is the only one that can clear HAS_OVERRIDE,
-			// so accumulating reset overrides here is valid.
-			if (unlikely(_dq_state_has_override(new_state))) {
-				new_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE;
-				dispatch_assert(!_dispatch_queue_is_thread_bound(dq));
-				op = _dispatch_queue_reset_override_priority(dq, false);
-				if (op > pp) pp = op;
+			if ((new_state & DISPATCH_QUEUE_WIDTH_FULL_BIT) ||
+					_dq_state_is_suspended(old_state)) {
+				new_state =
+					DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
+			} else {
+				new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
+				new_state &= ~DISPATCH_QUEUE_RECEIVED_OVERRIDE;
+				new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
 			}
 		}
-	} while (!fastpath(os_atomic_cmpxchgvw2o(dq, dq_state,
-			old_state, new_state, &old_state, release)));
+	});
 
-	if (_dq_state_has_override(old_state)) {
+	if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_DIRTY)) {
+		// just renew the drain lock with an acquire barrier, to see
+		// what the enqueuer that set DIRTY has done.
+		os_atomic_thread_fence(acquire);
+		return false;
+	}
+
+	if (_dq_state_received_override(old_state)) {
 		// Ensure that the root queue sees that this thread was overridden.
-		_dispatch_set_defaultpriority_override();
+		_dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state));
 	}
 	return true;
 }
@@ -1221,7 +1186,7 @@
 /* Used at the end of Drainers when the next work item is known
  * and that the dirty-head check isn't needed.
  *
- * This releases `owned`, clears DIRTY, and handles HAS_OVERRIDE when seen.
+ * This releases `owned`, clears DIRTY, and handles overrides when seen.
  */
 DISPATCH_ALWAYS_INLINE
 static inline uint64_t
@@ -1248,12 +1213,13 @@
 		value &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK;
 		value &= ~DISPATCH_QUEUE_DRAIN_PENDED;
 		value &= ~DISPATCH_QUEUE_DIRTY;
+		value &= ~DISPATCH_QUEUE_RECEIVED_OVERRIDE;
 		value ^= next_owner;
 	});
 
-	if (_dq_state_has_override(dq_state)) {
+	if (_dq_state_received_override(dq_state)) {
 		// Ensure that the root queue sees that this thread was overridden.
-		_dispatch_set_defaultpriority_override();
+		_dispatch_set_basepri_override_qos(_dq_state_max_qos(dq_state));
 	}
 	if (orig_state) *orig_state = dq_state;
 	return value;
@@ -1294,7 +1260,7 @@
 		os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \
 		_tail->_o_next = NULL; \
 		_prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \
-		if (fastpath(_prev)) { \
+		if (likely(_prev)) { \
 			os_atomic_store2o(_prev, _o_next, _head, relaxed); \
 		} \
 		(_prev == NULL); \
@@ -1314,20 +1280,22 @@
 // Single Consumer calls, can NOT be used safely concurrently
 //
 
-#define os_mpsc_get_head(q, _ns)  ({ \
-		os_mpsc_node_type(q, _ns) _head; \
-		_dispatch_wait_until(_head = (q)->_ns##_head); \
-		_head; \
-	})
+#define os_mpsc_get_head(q, _ns) \
+		_dispatch_wait_until(os_atomic_load2o(q, _ns##_head, dependency))
+
+#define os_mpsc_get_next(_n, _o_next) \
+		_dispatch_wait_until(os_atomic_load2o(_n, _o_next, dependency))
 
 #define os_mpsc_pop_head(q, _ns, head, _o_next)  ({ \
 		typeof(q) _q = (q); \
-		os_mpsc_node_type(_q, _ns) _head = (head), _n = fastpath(_head->_o_next); \
+		os_mpsc_node_type(_q, _ns) _head = (head), _n; \
+		_n = os_atomic_load2o(_head, _o_next, dependency); \
 		os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
 		/* 22708742: set tail to NULL with release, so that NULL write */ \
 		/* to head above doesn't clobber head from concurrent enqueuer */ \
-		if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release)) { \
-			_dispatch_wait_until(_n = fastpath(_head->_o_next)); \
+		if (unlikely(!_n && \
+				!os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release))) { \
+			_n = os_mpsc_get_next(_head, _o_next); \
 			os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
 		} \
 		_n; \
@@ -1336,17 +1304,17 @@
 #define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next)  ({ \
 		typeof(q) _q = (q); \
 		os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \
-		if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed)) { \
-			_dispatch_wait_until(_n = _q->_ns##_head); \
-			_head->_o_next = _n; \
+		if (unlikely(!_n && \
+				!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed))) { \
+			_n = os_mpsc_get_head(q, _ns); \
+			os_atomic_store2o(_head, _o_next, _n, relaxed); \
 		} \
 		os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
 	})
 
 #define os_mpsc_capture_snapshot(q, _ns, tail)  ({ \
 		typeof(q) _q = (q); \
-		os_mpsc_node_type(_q, _ns) _head; \
-		_dispatch_wait_until(_head = _q->_ns##_head); \
+		os_mpsc_node_type(_q, _ns) _head = os_mpsc_get_head(q, _ns); \
 		os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \
 		/* 22708742: set tail to NULL with release, so that NULL write */ \
 		/* to head above doesn't clobber head from concurrent enqueuer */ \
@@ -1357,17 +1325,17 @@
 #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \
 		os_unqualified_pointer_type(head) _head = (head), _n = NULL; \
 		if (_head != (tail)) { \
-			_dispatch_wait_until(_n = _head->_o_next); \
+			_n = os_mpsc_get_next(_head, _o_next); \
 		}; \
 		_n; })
 
 #define os_mpsc_prepend(q, _ns, head, tail, _o_next)  ({ \
 		typeof(q) _q = (q); \
 		os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \
-		_tail->_o_next = NULL; \
-		if (!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release)) { \
-			_dispatch_wait_until(_n = _q->_ns##_head); \
-			_tail->_o_next = _n; \
+		os_atomic_store2o(_tail, _o_next, NULL, relaxed); \
+		if (unlikely(!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release))) { \
+			_n = os_mpsc_get_head(q, _ns); \
+			os_atomic_store2o(_tail, _o_next, _n, relaxed); \
 		} \
 		os_atomic_store2o(_q, _ns##_head, _head, relaxed); \
 	})
@@ -1377,13 +1345,13 @@
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, pthread_priority_t pp)
+_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, dispatch_qos_t qos)
 {
 	dispatch_lock_owner owner;
 	if (_dispatch_unfair_lock_trylock(&dq->dq_sidelock, &owner)) {
 		return true;
 	}
-	_dispatch_wqthread_override_start_check_owner(owner, pp,
+	_dispatch_wqthread_override_start_check_owner(owner, qos,
 			&dq->dq_sidelock.dul_lock);
 	return false;
 }
@@ -1403,7 +1371,9 @@
 		return true;
 	}
 	// Ensure that the root queue sees that this thread was overridden.
-	_dispatch_set_defaultpriority_override();
+	// Since we don't know which override QoS was used, use MAINTENANCE
+	// as a marker for _dispatch_reset_basepri_override()
+	_dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE);
 	return false;
 }
 
@@ -1413,7 +1383,9 @@
 {
 	if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq->dq_sidelock)) {
 		// Ensure that the root queue sees that this thread was overridden.
-		_dispatch_set_defaultpriority_override();
+		// Since we don't know which override QoS was used, use MAINTENANCE
+		// as a marker for _dispatch_reset_basepri_override()
+		_dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE);
 	}
 }
 
@@ -1500,96 +1472,69 @@
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
-		dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
+		dispatch_object_t _tail, dispatch_qos_t qos, unsigned int n)
 {
 	struct dispatch_object_s *head = _head._do, *tail = _tail._do;
-	bool override = _dispatch_queue_need_override_retain(dq, pp);
+	bool overriding = _dispatch_queue_need_override_retain(dq, qos);
 	dispatch_queue_flags_t flags;
 	if (slowpath(_dispatch_queue_push_update_tail_list(dq, head, tail))) {
-		_dispatch_queue_push_update_head(dq, head, override);
+		_dispatch_queue_push_update_head(dq, head, overriding);
 		if (fastpath(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) {
 			return _dispatch_queue_push_list_slow(dq, n);
 		}
 		flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH;
-	} else if (override) {
+	} else if (overriding) {
 		flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING;
 	} else {
 		return;
 	}
-	dx_wakeup(dq, pp, flags);
+	dx_wakeup(dq, qos, flags);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail,
-		pthread_priority_t pp, dispatch_wakeup_flags_t flags)
+		dispatch_qos_t qos, dispatch_wakeup_flags_t flags)
 {
 	struct dispatch_object_s *tail = _tail._do;
-	bool override = _dispatch_queue_need_override(dq, pp);
-	if (flags & DISPATCH_WAKEUP_SLOW_WAITER) {
-		// when SLOW_WAITER is set, we borrow the reference of the caller
-		if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
-			_dispatch_queue_push_update_head(dq, tail, true);
-			flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_FLUSH;
-		} else if (override) {
-			flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_OVERRIDING;
-		} else {
-			flags = DISPATCH_WAKEUP_SLOW_WAITER;
-		}
+	bool overriding = _dispatch_queue_need_override_retain(dq, qos);
+	if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
+		_dispatch_queue_push_update_head(dq, tail, overriding);
+		flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH;
+	} else if (overriding) {
+		flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING;
 	} else {
-		if (override) _dispatch_retain(dq);
-		if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) {
-			_dispatch_queue_push_update_head(dq, tail, override);
-			flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH;
-		} else if (override) {
-			flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING;
-		} else {
-			return;
-		}
+		return;
 	}
-	return dx_wakeup(dq, pp, flags);
+	return dx_wakeup(dq, qos, flags);
 }
 
-struct _dispatch_identity_s {
-	pthread_priority_t old_pp;
-};
-
 DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di,
-		pthread_priority_t pp)
+static inline dispatch_priority_t
+_dispatch_root_queue_identity_assume(pthread_priority_t pp,
+		dispatch_queue_t invoking_dq)
 {
 	// assumed_rq was set by the caller, we need to fake the priorities
 	dispatch_queue_t assumed_rq = _dispatch_queue_get_current();
+	dispatch_priority_t old_dbp = _dispatch_get_basepri();
 
 	dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE);
 
-	di->old_pp = _dispatch_get_defaultpriority();
-
-	if (!(assumed_rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) {
-		if (!pp) {
-			pp = _dispatch_get_priority();
-			// _dispatch_root_queue_drain_deferred_item() may turn a manager
-			// thread into a regular root queue, and we must never try to
-			// restore the manager flag once we became a regular work queue
-			// thread.
-			pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-		}
-		if ((pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >
-				(assumed_rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
-			_dispatch_wqthread_override_start(_dispatch_tid_self(), pp);
+	if (!(assumed_rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE)) {
+		dispatch_qos_t qos = _dispatch_priority_qos(assumed_rq->dq_priority);
+		if (!pp) pp = _dispatch_get_priority();
+		if (_dispatch_qos_less_than_pp(qos, pp)) {
+			qos = _dispatch_qos_from_pp(pp);
+			_dispatch_wqthread_override_start(_dispatch_tid_self(), qos);
 			// Ensure that the root queue sees that this thread was overridden.
-			_dispatch_set_defaultpriority_override();
+			_dispatch_set_basepri_override_qos(qos);
 		}
 	}
-	_dispatch_reset_defaultpriority(assumed_rq->dq_priority);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di)
-{
-	_dispatch_reset_defaultpriority(di->old_pp);
+	_dispatch_reset_basepri(assumed_rq->dq_priority);
+	if (invoking_dq) {
+		_dispatch_set_basepri(invoking_dq->dq_priority);
+	}
+	return old_dbp;
 }
 
 typedef dispatch_queue_t
@@ -1623,28 +1568,19 @@
 	}
 	to_unlock = _dispatch_queue_drain_try_lock(dq, flags, &dq_state);
 	if (likely(to_unlock)) {
-		struct _dispatch_identity_s di;
-		pthread_priority_t old_dp;
-
+		dispatch_priority_t old_dbp;
 drain_pending_barrier:
-		if (overriding) {
-			_dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx",
-					_dispatch_tid_self(), _dispatch_get_defaultpriority());
-			_dispatch_root_queue_identity_assume(&di, 0);
-		}
-
 		if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
-			pthread_priority_t op, dp;
-
-			old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp);
-			op = dq->dq_override;
-			if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
-				_dispatch_wqthread_override_start(_dispatch_tid_self(), op);
-				// Ensure that the root queue sees that this thread was overridden.
-				_dispatch_set_defaultpriority_override();
+			if (overriding) {
+				_dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%x",
+						_dispatch_tid_self(), _dispatch_get_basepri());
+				old_dbp = _dispatch_root_queue_identity_assume(0, dq);
+			} else {
+				old_dbp = _dispatch_set_basepri(dq->dq_priority);
 			}
+		} else {
+			old_dbp = 0;
 		}
-
 		flags = _dispatch_queue_merge_autorelease_frequency(dq, flags);
 attempt_running_slow_head:
 		tq = invoke(dq, flags, &to_unlock, &dc);
@@ -1663,23 +1599,10 @@
 			}
 			to_unlock = 0;
 		}
-		if (overriding) {
-			_dispatch_root_queue_identity_restore(&di);
-		}
 		if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) {
-			_dispatch_reset_defaultpriority(old_dp);
-		}
-	} else if (overriding) {
-		uint32_t owner = _dq_state_drain_owner(dq_state);
-		pthread_priority_t p = dq->dq_override;
-		if (owner && p) {
-			_dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx",
-					owner, p);
-			_dispatch_wqthread_override_start_check_owner(owner, p,
-					&dq->dq_state_lock);
+			_dispatch_reset_basepri(old_dbp);
 		}
 	}
-
 	if (owning) {
 		_dispatch_introspection_queue_item_complete(dq);
 	}
@@ -1699,9 +1622,11 @@
 				new_state += DISPATCH_QUEUE_IN_BARRIER;
 				new_state += DISPATCH_QUEUE_WIDTH_INTERVAL;
 				new_state -= DISPATCH_QUEUE_PENDING_BARRIER;
-				new_state += to_unlock & DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
+				new_state +=
+					to_unlock & DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
 			} else {
-				new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
+				new_state =
+					DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
 				if (_dq_state_should_wakeup(new_state)) {
 					// drain was not interupted for suspension
 					// we will reenqueue right away, just put ENQUEUED back
@@ -1718,13 +1643,12 @@
 			to_unlock += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
 			goto drain_pending_barrier;
 		}
-		if (_dq_state_has_override(old_state)) {
+		if (_dq_state_received_override(old_state)) {
 			// Ensure that the root queue sees that this thread was overridden.
-			_dispatch_set_defaultpriority_override();
+			_dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state));
 		}
-
 		if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) {
-			return _dispatch_queue_push(tq, dq, 0);
+			return _dispatch_queue_push(tq, dq, _dq_state_max_qos(old_state));
 		}
 	}
 
@@ -1752,87 +1676,12 @@
 
 DISPATCH_ALWAYS_INLINE DISPATCH_CONST
 static inline dispatch_queue_t
-_dispatch_get_root_queue(qos_class_t priority, bool overcommit)
+_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
 {
-	if (overcommit) switch (priority) {
-	case _DISPATCH_QOS_CLASS_MAINTENANCE:
-		return &_dispatch_root_queues[
-				DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT];
-	case _DISPATCH_QOS_CLASS_BACKGROUND:
-		return &_dispatch_root_queues[
-				DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT];
-	case _DISPATCH_QOS_CLASS_UTILITY:
-		return &_dispatch_root_queues[
-				DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT];
-	case _DISPATCH_QOS_CLASS_DEFAULT:
-		return &_dispatch_root_queues[
-				DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT];
-	case _DISPATCH_QOS_CLASS_USER_INITIATED:
-		return &_dispatch_root_queues[
-				DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT];
-	case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
-		return &_dispatch_root_queues[
-				DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT];
-	} else switch (priority) {
-	case _DISPATCH_QOS_CLASS_MAINTENANCE:
-		return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS];
-	case _DISPATCH_QOS_CLASS_BACKGROUND:
-		return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS];
-	case _DISPATCH_QOS_CLASS_UTILITY:
-		return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS];
-	case _DISPATCH_QOS_CLASS_DEFAULT:
-		return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS];
-	case _DISPATCH_QOS_CLASS_USER_INITIATED:
-		return &_dispatch_root_queues[
-				DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS];
-	case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
-		return &_dispatch_root_queues[
-				DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS];
+	if (unlikely(qos == DISPATCH_QOS_UNSPECIFIED || qos > DISPATCH_QOS_MAX)) {
+		DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
 	}
-	return NULL;
-}
-
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-DISPATCH_ALWAYS_INLINE DISPATCH_CONST
-static inline dispatch_queue_t
-_dispatch_get_root_queue_for_priority(pthread_priority_t pp, bool overcommit)
-{
-	uint32_t idx;
-
-	pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	idx = (uint32_t)__builtin_ffs((int)pp);
-	if (unlikely(!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]
-			.dq_priority)) {
-		// If kernel doesn't support maintenance, bottom bit is background.
-		// Shift to our idea of where background bit is.
-		idx++;
-	}
-	// ffs starts at 1, and account for the QOS_CLASS_SHIFT
-	// if pp is 0, idx is 0 or 1 and this will wrap to a value larger than
-	// DISPATCH_QOS_COUNT
-	idx -= (_PTHREAD_PRIORITY_QOS_CLASS_SHIFT + 1);
-	if (unlikely(idx >= DISPATCH_QUEUE_QOS_COUNT)) {
-		DISPATCH_CLIENT_CRASH(pp, "Corrupted priority");
-	}
-	return &_dispatch_root_queues[2 * idx + overcommit];
-}
-#endif
-
-DISPATCH_ALWAYS_INLINE DISPATCH_CONST
-static inline dispatch_queue_t
-_dispatch_get_root_queue_with_overcommit(dispatch_queue_t rq, bool overcommit)
-{
-	bool rq_overcommit = (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
-	// root queues in _dispatch_root_queues are not overcommit for even indices
-	// and overcommit for odd ones, so fixing overcommit is either returning
-	// the same queue, or picking its neighbour in _dispatch_root_queues
-	if (overcommit && !rq_overcommit) {
-		return rq + 1;
-	}
-	if (!overcommit && rq_overcommit) {
-		return rq - 1;
-	}
-	return rq;
+	return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -1881,13 +1730,12 @@
 #pragma mark dispatch_priority
 
 DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_get_defaultpriority(void)
+static inline dispatch_priority_t
+_dispatch_get_basepri(void)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	pthread_priority_t pp = (uintptr_t)_dispatch_thread_getspecific(
-			dispatch_defaultpriority_key);
-	return pp;
+	return (dispatch_priority_t)(uintptr_t)_dispatch_thread_getspecific(
+			dispatch_basepri_key);
 #else
 	return 0;
 #endif
@@ -1895,97 +1743,90 @@
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_reset_defaultpriority(pthread_priority_t pp)
+_dispatch_reset_basepri(dispatch_priority_t dbp)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	pthread_priority_t old_pp = _dispatch_get_defaultpriority();
+	dispatch_priority_t old_dbp = _dispatch_get_basepri();
 	// If an inner-loop or'd in the override flag to the per-thread priority,
 	// it needs to be propagated up the chain.
-	pp |= old_pp & _PTHREAD_PRIORITY_OVERRIDE_FLAG;
-	_dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
+	dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
+	dbp |= (old_dbp & DISPATCH_PRIORITY_OVERRIDE_MASK);
+	_dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
 #else
-	(void)pp;
+	(void)dbp;
 #endif
 }
 
 DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_get_basepri_override_qos_floor(void)
+{
+	dispatch_priority_t dbp = _dispatch_get_basepri();
+	dispatch_qos_t qos = _dispatch_priority_qos(dbp);
+	dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp);
+	dispatch_qos_t qos_floor = MAX(qos, oqos);
+	return qos_floor ? qos_floor : DISPATCH_QOS_SATURATED;
+}
+
+DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_set_defaultpriority_override(void)
+_dispatch_set_basepri_override_qos(dispatch_qos_t qos)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	pthread_priority_t old_pp = _dispatch_get_defaultpriority();
-	pthread_priority_t pp = old_pp | _PTHREAD_PRIORITY_OVERRIDE_FLAG;
-
-	_dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
+	dispatch_priority_t dbp = _dispatch_get_basepri();
+	if (_dispatch_priority_override_qos(dbp) >= qos) return;
+	dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
+	dbp |= qos << DISPATCH_PRIORITY_OVERRIDE_SHIFT;
+	_dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
+#else
+	(void)qos;
 #endif
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_reset_defaultpriority_override(void)
+_dispatch_reset_basepri_override(void)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	pthread_priority_t old_pp = _dispatch_get_defaultpriority();
-	pthread_priority_t pp = old_pp &
-			~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG);
-
-	_dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
-	return unlikely(pp != old_pp);
+	dispatch_priority_t dbp = _dispatch_get_basepri();
+	dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp);
+	if (oqos) {
+		dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
+		_dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
+		return oqos != DISPATCH_QOS_SATURATED;
+	}
 #endif
 	return false;
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq,
-		dispatch_queue_t tq)
+static inline dispatch_priority_t
+_dispatch_set_basepri(dispatch_priority_t dbp)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	const dispatch_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
-	const dispatch_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG;
-	const dispatch_priority_t defaultqueue_flag =
-			_PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
-	dispatch_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority;
-	if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) &&
-			(tqp & rootqueue_flag)) {
-		if (tqp & defaultqueue_flag) {
-			dq->dq_priority = 0;
-		} else {
-			dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag;
+	const dispatch_priority_t preserved_mask =
+			DISPATCH_PRIORITY_OVERRIDE_MASK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+	dispatch_priority_t old_dbp = _dispatch_get_basepri();
+	if (old_dbp) {
+		dispatch_priority_t flags, defaultqueue, basepri;
+		flags = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE);
+		defaultqueue = (old_dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE);
+		basepri = old_dbp & DISPATCH_PRIORITY_REQUESTED_MASK;
+		dbp &= DISPATCH_PRIORITY_REQUESTED_MASK;
+		if (!dbp) {
+			flags = DISPATCH_PRIORITY_FLAG_INHERIT | defaultqueue;
+			dbp = basepri;
+		} else if (dbp < basepri && !defaultqueue) { // rdar://16349734
+			dbp = basepri;
 		}
+		dbp |= flags | (old_dbp & preserved_mask);
+	} else {
+		dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
 	}
+	_dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp);
+	return old_dbp;
 #else
-	(void)dq; (void)tq;
-#endif
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_set_defaultpriority(pthread_priority_t pp, pthread_priority_t *new_pp)
-{
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-	const pthread_priority_t default_priority_preserved_flags =
-			_PTHREAD_PRIORITY_OVERRIDE_FLAG|_PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-	pthread_priority_t old_pp = _dispatch_get_defaultpriority();
-	if (old_pp) {
-		pthread_priority_t flags, defaultqueue, basepri;
-		flags = (pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
-		defaultqueue = (old_pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
-		basepri = (old_pp & ~_PTHREAD_PRIORITY_FLAGS_MASK);
-		pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
-		if (!pp) {
-			flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue;
-			pp = basepri;
-		} else if (pp < basepri && !defaultqueue) { // rdar://16349734
-			pp = basepri;
-		}
-		pp |= flags | (old_pp & default_priority_preserved_flags);
-	}
-	_dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp);
-	if (new_pp) *new_pp = pp;
-	return old_pp;
-#else
-	(void)pp; (void)new_pp;
+	(void)dbp;
 	return 0;
 #endif
 }
@@ -1995,25 +1836,24 @@
 _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	pthread_priority_t defaultpri = _dispatch_get_defaultpriority();
-	bool enforce, inherited, defaultqueue;
-	enforce = (flags & DISPATCH_PRIORITY_ENFORCE) ||
+	dispatch_priority_t inherited, defaultqueue, dbp = _dispatch_get_basepri();
+	pthread_priority_t basepp = _dispatch_priority_to_pp_strip_flags(dbp);
+	bool enforce = (flags & DISPATCH_PRIORITY_ENFORCE) ||
 			(pp & _PTHREAD_PRIORITY_ENFORCE_FLAG);
-	inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG);
-	defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
-	defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
+	inherited = (dbp & DISPATCH_PRIORITY_FLAG_INHERIT);
+	defaultqueue = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE);
 	pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
 
 	if (!pp) {
-		return defaultpri;
+		return basepp;
 	} else if (defaultqueue) { // rdar://16349734
 		return pp;
-	} else if (pp < defaultpri) {
-		return defaultpri;
+	} else if (pp < basepp) {
+		return basepp;
 	} else if (enforce || inherited) {
 		return pp;
 	} else {
-		return defaultpri;
+		return basepp;
 	}
 #else
 	(void)pp; (void)flags;
@@ -2022,22 +1862,47 @@
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_priority_inherit_from_root_queue(pthread_priority_t pp,
+static inline void
+_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq,
+		dispatch_queue_t tq)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+	const dispatch_priority_t rootqueue_flag = DISPATCH_PRIORITY_FLAG_ROOTQUEUE;
+	const dispatch_priority_t inherited_flag = DISPATCH_PRIORITY_FLAG_INHERIT;
+	const dispatch_priority_t defaultqueue_flag =
+            DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
+	dispatch_priority_t pri = dq->dq_priority, tpri = tq->dq_priority;
+
+	if ((!_dispatch_priority_qos(pri) || (pri & inherited_flag)) &&
+			(tpri & rootqueue_flag)) {
+		if (tpri & defaultqueue_flag) {
+			dq->dq_priority = 0;
+		} else {
+			dq->dq_priority = (tpri & ~rootqueue_flag) | inherited_flag;
+		}
+	}
+#else
+	(void)dq; (void)tq;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_priority_t
+_dispatch_priority_inherit_from_root_queue(dispatch_priority_t pri,
 		dispatch_queue_t rq)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	pthread_priority_t p = pp & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-	pthread_priority_t rqp = rq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-	pthread_priority_t defaultqueue =
-			rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
+	dispatch_priority_t p = pri & DISPATCH_PRIORITY_REQUESTED_MASK;
+	dispatch_priority_t rqp = rq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK;
+	dispatch_priority_t defaultqueue =
+			rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
 
 	if (!p || (!defaultqueue && p < rqp)) {
 		p = rqp | defaultqueue;
 	}
-	return p | (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
+	return p | (rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT);
 #else
-	(void)rq; (void)pp;
+	(void)rq; (void)pri;
 	return 0;
 #endif
 }
@@ -2146,28 +2011,23 @@
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_queue_need_override(dispatch_queue_class_t dqu, pthread_priority_t pp)
+_dispatch_queue_need_override(dispatch_queue_class_t dqu, dispatch_qos_t qos)
 {
-	// global queues have their override set to DISPATCH_SATURATED_OVERRIDE
-	// which makes this test always return false for them.
-	return dqu._oq->oq_override < (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_queue_received_override(dispatch_queue_class_t dqu,
-		pthread_priority_t pp)
-{
-	dispatch_assert(dqu._oq->oq_override != DISPATCH_SATURATED_OVERRIDE);
-	return dqu._oq->oq_override > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
+	uint64_t dq_state = os_atomic_load2o(dqu._dq, dq_state, relaxed);
+	// dq_priority "override qos" contains the priority at which the queue
+	// is already running for thread-bound queues.
+	// For non thread-bound queues, the qos of the queue may not be observed
+	// when the first work item is dispatched synchronously.
+	return _dq_state_max_qos(dq_state) < qos &&
+			_dispatch_priority_override_qos(dqu._dq->dq_priority) < qos;
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
 _dispatch_queue_need_override_retain(dispatch_queue_class_t dqu,
-		pthread_priority_t pp)
+		dispatch_qos_t qos)
 {
-	if (_dispatch_queue_need_override(dqu, pp)) {
+	if (_dispatch_queue_need_override(dqu, qos)) {
 		_os_object_retain_internal_inline(dqu._oq->_as_os_obj);
 		return true;
 	}
@@ -2175,76 +2035,34 @@
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu,
-		dispatch_priority_t new_op)
+static inline dispatch_qos_t
+_dispatch_queue_override_qos(dispatch_queue_class_t dqu, dispatch_qos_t qos)
 {
-	dispatch_priority_t old_op;
-	new_op &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	if (!new_op) return false;
-	os_atomic_rmw_loop2o(dqu._oq, oq_override, old_op, new_op, relaxed, {
-		if (new_op <= old_op) {
-			os_atomic_rmw_loop_give_up(return false);
+	if (dqu._oq->oq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) {
+		return qos;
+	}
+	// for asynchronous workitems, queue priority is the floor for overrides
+	return MAX(qos, _dispatch_priority_qos(dqu._oq->oq_priority));
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_queue_reset_max_qos(dispatch_queue_class_t dqu)
+{
+	uint64_t old_state, new_state;
+	os_atomic_rmw_loop2o(dqu._dq, dq_state, old_state, new_state, relaxed, {
+		new_state = old_state;
+		new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
+		new_state &= ~DISPATCH_QUEUE_RECEIVED_OVERRIDE;
+		if (old_state == new_state) {
+			os_atomic_rmw_loop_give_up(return DISPATCH_QOS_UNSPECIFIED);
 		}
 	});
-	return true;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_override_priority(dispatch_queue_class_t dqu,
-		pthread_priority_t *pp, dispatch_wakeup_flags_t *flags)
-{
-	os_mpsc_queue_t oq = dqu._oq;
-	dispatch_priority_t qp = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	dispatch_priority_t np = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
-	dispatch_priority_t o;
-
-	_dispatch_assert_is_valid_qos_override(np);
-	if (oq->oq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG) {
-		qp = 0;
-	} else if (*flags & DISPATCH_WAKEUP_SLOW_WAITER) {
-		// when a queue is used as a lock its priority doesn't count
-	} else if (np < qp) {
-		// for asynchronous workitems, queue priority is the floor for overrides
-		np = qp;
+	dispatch_qos_t qos = _dq_state_max_qos(old_state);
+	if (_dq_state_received_override(old_state)) {
+		_dispatch_set_basepri_override_qos(qos);
 	}
-	*flags &= ~_DISPATCH_WAKEUP_OVERRIDE_BITS;
-
-	// this optimizes for the case when no update of the override is required
-	// os_atomic_rmw_loop2o optimizes for the case when the update happens,
-	// and can't be used.
-	o = os_atomic_load2o(oq, oq_override, relaxed);
-	do {
-		if (likely(np <= o)) break;
-	} while (unlikely(!os_atomic_cmpxchgvw2o(oq, oq_override, o, np, &o, relaxed)));
-
-	if (np <= o) {
-		*pp = o;
-	} else {
-		*flags |= DISPATCH_WAKEUP_OVERRIDING;
-		*pp = np;
-	}
-	if (o > qp) {
-		*flags |= DISPATCH_WAKEUP_WAS_OVERRIDDEN;
-	}
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_priority_t
-_dispatch_queue_reset_override_priority(dispatch_queue_class_t dqu,
-		bool qp_is_floor)
-{
-	os_mpsc_queue_t oq = dqu._oq;
-	dispatch_priority_t p = 0;
-	if (qp_is_floor) {
-		// thread bound queues floor their dq_override to their
-		// priority to avoid receiving useless overrides
-		p = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	}
-	dispatch_priority_t o = os_atomic_xchg2o(oq, oq_override, p, relaxed);
-	dispatch_assert(o != DISPATCH_SATURATED_OVERRIDE);
-	return (o > p) ? o : 0;
+	return qos;
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -2254,9 +2072,9 @@
 #if HAVE_PTHREAD_WORKQUEUE_QOS
 	pthread_priority_t pp = _dispatch_get_priority();
 	pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
-	if (pp > _dispatch_user_initiated_priority) {
+	if (_dispatch_qos_less_than_pp(DISPATCH_QOS_USER_INITIATED, pp)) {
 		// Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
-		pp = _dispatch_user_initiated_priority;
+		return _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED);
 	}
 	return pp;
 #else
@@ -2271,8 +2089,7 @@
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
 	pthread_priority_t pp = _dispatch_get_priority();
-	pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
-	return pp && (pp <= _dispatch_background_priority);
+	return _dispatch_qos_is_background(_dispatch_qos_from_pp(pp));
 #else
 	return false;
 #endif
@@ -2442,6 +2259,7 @@
 			_dispatch_continuation_free_to_cache_limit(dc1);
 		}
 	});
+	_dispatch_perfmon_workitem_inc();
 }
 
 DISPATCH_ALWAYS_INLINE_NDEBUG
@@ -2501,21 +2319,21 @@
 }
 
 DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_continuation_get_override_priority(dispatch_queue_t dq,
+static inline dispatch_qos_t
+_dispatch_continuation_override_qos(dispatch_queue_t dq,
 		dispatch_continuation_t dc)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	pthread_priority_t p = dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+	dispatch_qos_t dc_qos = _dispatch_qos_from_pp(dc->dc_priority);
 	bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG;
-	pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
+	dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority);
+	bool defaultqueue = dq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
 
 	dispatch_assert(dc->dc_priority != DISPATCH_NO_PRIORITY);
-	if (p && (enforce || !dqp || defaultqueue)) {
-		return p;
+	if (dc_qos && (enforce || !dq_qos || defaultqueue)) {
+		return dc_qos;
 	}
-	return dqp;
+	return dq_qos;
 #else
 	(void)dq; (void)dc;
 	return 0;
diff --git a/src/internal.h b/src/internal.h
index 8934b2c..a514f50 100644
--- a/src/internal.h
+++ b/src/internal.h
@@ -48,19 +48,19 @@
 #if TARGET_OS_MAC_DESKTOP
 #  define DISPATCH_HOST_SUPPORTS_OSX(x) \
 		(__MAC_OS_X_VERSION_MIN_REQUIRED >= (x))
-#  if !DISPATCH_HOST_SUPPORTS_OSX(101000)
-#    error "OS X hosts older than OS X 10.10 aren't supported anymore"
+#  if !DISPATCH_HOST_SUPPORTS_OSX(101100)
+#    error "OS X hosts older than OS X 10.11 aren't supported anymore"
 #  endif // !DISPATCH_HOST_SUPPORTS_OSX(101000)
 #elif TARGET_OS_SIMULATOR
 #  define DISPATCH_HOST_SUPPORTS_OSX(x) \
 		(IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x))
-#  if !DISPATCH_HOST_SUPPORTS_OSX(101000)
-#    error "Simulator hosts older than OS X 10.10 aren't supported anymore"
+#  if !DISPATCH_HOST_SUPPORTS_OSX(101100)
+#    error "Simulator hosts older than OS X 10.11 aren't supported anymore"
 #  endif // !DISPATCH_HOST_SUPPORTS_OSX(101000)
 #else
 #  define DISPATCH_HOST_SUPPORTS_OSX(x) 1
-#  if __IPHONE_OS_VERSION_MIN_REQUIRED < 70000
-#    error "iOS hosts older than iOS 7.0 aren't supported anymore"
+#  if __IPHONE_OS_VERSION_MIN_REQUIRED < 90000
+#    error "iOS hosts older than iOS 9.0 aren't supported anymore"
 #  endif
 #endif
 
@@ -243,7 +243,6 @@
 #include <sys/stat.h>
 
 #if !TARGET_OS_WIN32
-#include <sys/event.h>
 #include <sys/mount.h>
 #include <sys/queue.h>
 #ifdef __ANDROID__
@@ -256,9 +255,6 @@
 #include <sys/mman.h>
 #include <netinet/in.h>
 #endif
-#if defined(__linux__)
-#include <sys/eventfd.h>
-#endif
 
 #ifdef __BLOCKS__
 #include <Block_private.h>
@@ -309,6 +305,31 @@
 #define DISPATCH_CONCAT(x,y) DISPATCH_CONCAT1(x,y)
 #define DISPATCH_CONCAT1(x,y) x ## y
 
+#define DISPATCH_COUNT_ARGS(...) DISPATCH_COUNT_ARGS1(, ## __VA_ARGS__, \
+		_8, _7, _6, _5, _4, _3, _2, _1, _0)
+#define DISPATCH_COUNT_ARGS1(z, a, b, c, d, e, f, g, h, cnt, ...) cnt
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define DISPATCH_STRUCT_LE_2(a, b)        struct { a; b; }
+#define DISPATCH_STRUCT_LE_3(a, b, c)     struct { a; b; c; }
+#define DISPATCH_STRUCT_LE_4(a, b, c, d)  struct { a; b; c; d; }
+#else
+#define DISPATCH_STRUCT_LE_2(a, b)        struct { b; a; }
+#define DISPATCH_STRUCT_LE_3(a, b, c)     struct { c; b; a; }
+#define DISPATCH_STRUCT_LE_4(a, b, c, d)  struct { d; c; b; a; }
+#endif
+#if __has_feature(c_startic_assert)
+#define DISPATCH_UNION_ASSERT(alias, st) \
+		_Static_assert(sizeof(struct { alias; }) == sizeof(st), "bogus union");
+#else
+#define DISPATCH_UNION_ASSERT(alias, st)
+#endif
+#define DISPATCH_UNION_LE(alias, ...) \
+		DISPATCH_UNION_ASSERT(alias, DISPATCH_CONCAT(DISPATCH_STRUCT_LE, \
+				DISPATCH_COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)) \
+		union { alias; DISPATCH_CONCAT(DISPATCH_STRUCT_LE, \
+				DISPATCH_COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); }
+
 // workaround 6368156
 #ifdef NSEC_PER_SEC
 #undef NSEC_PER_SEC
@@ -340,16 +361,6 @@
 #define unlikely(x) (!!(x))
 #endif // __GNUC__
 
-#if BYTE_ORDER == LITTLE_ENDIAN
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b)        struct { a; b; }
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c)     struct { a; b; c; }
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d)  struct { a; b; c; d; }
-#else
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b)        struct { b; a; }
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c)     struct { c; b; a; }
-#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d)  struct { d; c; b; a; }
-#endif
-
 #define _TAILQ_IS_ENQUEUED(elm, field) \
 		((elm)->field.tqe_prev != NULL)
 #define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \
@@ -558,13 +569,6 @@
 } while (0)
 
 #if DISPATCH_DEBUG
-#if HAVE_MACH
-DISPATCH_NOINLINE DISPATCH_USED
-void dispatch_debug_machport(mach_port_t name, const char* str);
-#endif
-#endif
-
-#if DISPATCH_DEBUG
 /* This is the private version of the deprecated dispatch_debug() */
 DISPATCH_NONNULL2 DISPATCH_NOTHROW
 __attribute__((__format__(printf,2,3)))
@@ -612,8 +616,14 @@
 	}
 }
 
+#if DISPATCH_INTROSPECTION
+#undef DISPATCH_PERF_MON
+#define DISPATCH_PERF_MON 0
+#endif
+
 /* #includes dependent on internal.h */
 #include "shims.h"
+#include "event/event_internal.h"
 
 // Older Mac OS X and iOS Simulator fallbacks
 
@@ -645,124 +655,24 @@
 #endif
 #endif // PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK
 
-#if HAVE_MACH
-#if !defined(MACH_NOTIFY_SEND_POSSIBLE)
-#undef MACH_NOTIFY_SEND_POSSIBLE
-#define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME
-#endif
-#endif // HAVE_MACH
-
 #ifdef EVFILT_MEMORYSTATUS
 #ifndef DISPATCH_USE_MEMORYSTATUS
 #define DISPATCH_USE_MEMORYSTATUS 1
 #endif
 #endif // EVFILT_MEMORYSTATUS
 
-#if defined(EVFILT_VM) && !DISPATCH_USE_MEMORYSTATUS
-#ifndef DISPATCH_USE_VM_PRESSURE
-#define DISPATCH_USE_VM_PRESSURE 1
-#endif
-#endif // EVFILT_VM
-
 #if TARGET_OS_SIMULATOR
 #undef DISPATCH_USE_MEMORYPRESSURE_SOURCE
 #define DISPATCH_USE_MEMORYPRESSURE_SOURCE 0
-#undef DISPATCH_USE_VM_PRESSURE_SOURCE
-#define DISPATCH_USE_VM_PRESSURE_SOURCE 0
 #endif // TARGET_OS_SIMULATOR
 #if !defined(DISPATCH_USE_MEMORYPRESSURE_SOURCE) && DISPATCH_USE_MEMORYSTATUS
 #define DISPATCH_USE_MEMORYPRESSURE_SOURCE 1
-#elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE
-#define DISPATCH_USE_VM_PRESSURE_SOURCE 1
 #endif
 #if DISPATCH_USE_MEMORYPRESSURE_SOURCE
 extern bool _dispatch_memory_warn;
 #endif
 
-#if !defined(NOTE_LEEWAY)
-#undef NOTE_LEEWAY
-#define NOTE_LEEWAY 0
-#undef NOTE_CRITICAL
-#define NOTE_CRITICAL 0
-#undef NOTE_BACKGROUND
-#define NOTE_BACKGROUND 0
-#endif // NOTE_LEEWAY
-
-#if !defined(NOTE_FUNLOCK)
-#define NOTE_FUNLOCK 0x00000100
-#endif
-
-#if !defined(NOTE_MACH_CONTINUOUS_TIME)
-#define NOTE_MACH_CONTINUOUS_TIME 0
-#endif // NOTE_MACH_CONTINUOUS_TIME
-
-#if !defined(HOST_NOTIFY_CALENDAR_SET)
-#define HOST_NOTIFY_CALENDAR_SET HOST_NOTIFY_CALENDAR_CHANGE
-#endif // HOST_NOTIFY_CALENDAR_SET
-
-#if !defined(HOST_CALENDAR_SET_REPLYID)
-#define HOST_CALENDAR_SET_REPLYID 951
-#endif // HOST_CALENDAR_SET_REPLYID
-
-#if HAVE_DECL_NOTE_REAP
-#if defined(NOTE_REAP) && defined(__APPLE__)
-#undef NOTE_REAP
-#define NOTE_REAP 0x10000000 // <rdar://problem/13338526>
-#endif
-#endif // HAVE_DECL_NOTE_REAP
-
-#ifndef VQ_QUOTA
-#undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982
-#endif // VQ_QUOTA
-
-#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || \
-		!DISPATCH_HOST_SUPPORTS_OSX(101200)
-#undef NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
-#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0
-#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
-
-#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) || \
-		!DISPATCH_HOST_SUPPORTS_OSX(101200)
-#undef NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL
-#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0
-#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL
-
-#if !defined(EV_UDATA_SPECIFIC) || !DISPATCH_HOST_SUPPORTS_OSX(101100)
-#undef DISPATCH_USE_EV_UDATA_SPECIFIC
-#define DISPATCH_USE_EV_UDATA_SPECIFIC 0
-#elif !defined(DISPATCH_USE_EV_UDATA_SPECIFIC)
-#define DISPATCH_USE_EV_UDATA_SPECIFIC 1
-#endif // EV_UDATA_SPECIFIC
-
-#if !DISPATCH_USE_EV_UDATA_SPECIFIC
-#undef EV_UDATA_SPECIFIC
-#define EV_UDATA_SPECIFIC 0
-#undef EV_VANISHED
-#define EV_VANISHED 0
-#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC
-
-#ifndef EV_VANISHED
-#define EV_VANISHED 0x0200
-#endif
-
-#ifndef DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
-#if TARGET_OS_MAC && !DISPATCH_HOST_SUPPORTS_OSX(101200)
-// deferred delete can return bogus ENOENTs on older kernels
-#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 1
-#else
-#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 0
-#endif
-#endif
-
-#if !defined(EV_SET_QOS) || !DISPATCH_HOST_SUPPORTS_OSX(101100)
-#undef DISPATCH_USE_KEVENT_QOS
-#define DISPATCH_USE_KEVENT_QOS 0
-#elif !defined(DISPATCH_USE_KEVENT_QOS)
-#define DISPATCH_USE_KEVENT_QOS 1
-#endif // EV_SET_QOS
-
 #if HAVE_PTHREAD_WORKQUEUE_KEVENT && defined(KEVENT_FLAG_WORKQ) && \
-		DISPATCH_USE_EV_UDATA_SPECIFIC && DISPATCH_USE_KEVENT_QOS && \
 		DISPATCH_HOST_SUPPORTS_OSX(101200) && \
 		!defined(DISPATCH_USE_KEVENT_WORKQUEUE)
 #define DISPATCH_USE_KEVENT_WORKQUEUE 1
@@ -774,48 +684,17 @@
 #define DISPATCH_USE_MGR_THREAD 1
 #endif
 
-#if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_EV_UDATA_SPECIFIC && \
+#if DISPATCH_USE_KEVENT_WORKQUEUE && \
 		DISPATCH_HOST_SUPPORTS_OSX(101200) && \
 		!defined(DISPATCH_USE_EVFILT_MACHPORT_DIRECT)
 #define DISPATCH_USE_EVFILT_MACHPORT_DIRECT 1
 #endif
 
-#ifndef MACH_SEND_OVERRIDE
-#define MACH_SEND_OVERRIDE 0x00000020
-typedef unsigned int mach_msg_priority_t;
-#define MACH_MSG_PRIORITY_UNSPECIFIED ((mach_msg_priority_t)0)
-#endif // MACH_SEND_OVERRIDE
-
-
 #if (!DISPATCH_USE_EVFILT_MACHPORT_DIRECT || DISPATCH_DEBUG) && \
 		!defined(DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK)
 #define DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK 1
 #endif
 
-#if DISPATCH_USE_KEVENT_QOS
-typedef struct kevent_qos_s _dispatch_kevent_qos_s;
-typedef typeof(((struct kevent_qos_s*)NULL)->qos) _dispatch_kevent_priority_t;
-#else // DISPATCH_USE_KEVENT_QOS
-#ifndef KEVENT_FLAG_IMMEDIATE
-#define KEVENT_FLAG_NONE 0x00
-#define KEVENT_FLAG_IMMEDIATE 0x01
-#define KEVENT_FLAG_ERROR_EVENTS 0x02
-#endif // KEVENT_FLAG_IMMEDIATE
-typedef struct kevent64_s _dispatch_kevent_qos_s;
-#define kevent_qos(_kq, _changelist, _nchanges, _eventlist, _nevents, \
-		_data_out, _data_available, _flags) \
-		({ unsigned int _f = (_flags); _dispatch_kevent_qos_s _kev_copy; \
-		const _dispatch_kevent_qos_s *_cl = (_changelist); \
-		int _n = (_nchanges); const struct timespec _timeout_immediately = {}; \
-		dispatch_static_assert(!(_data_out) && !(_data_available)); \
-		if (_f & KEVENT_FLAG_ERROR_EVENTS) { \
-			dispatch_static_assert(_n == 1); \
-			_kev_copy = *_cl; _kev_copy.flags |= EV_RECEIPT; } \
-		kevent64((_kq), _f & KEVENT_FLAG_ERROR_EVENTS ? &_kev_copy : _cl, _n, \
-			(_eventlist), (_nevents), 0, \
-			_f & KEVENT_FLAG_IMMEDIATE ? &_timeout_immediately : NULL); })
-#endif // DISPATCH_USE_KEVENT_QOS
-
 #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE)
 #ifndef DISPATCH_USE_SETNOSIGPIPE
 #define DISPATCH_USE_SETNOSIGPIPE 1
@@ -845,10 +724,6 @@
 #ifndef DISPATCH_USE_GUARDED_FD
 #define DISPATCH_USE_GUARDED_FD 1
 #endif
-// change_fdguard_np() requires GUARD_DUP <rdar://problem/11814513>
-#if DISPATCH_USE_GUARDED_FD && RDAR_11814513
-#define DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD 1
-#endif
 #endif // HAVE_SYS_GUARDED_H
 
 
@@ -859,9 +734,15 @@
 #endif
 #ifndef KDBG_CODE
 #define KDBG_CODE(...) 0
+#define DBG_FUNC_START 0
+#define DBG_FUNC_END 0
 #endif
 #define DISPATCH_CODE(subclass, code) \
 		KDBG_CODE(DBG_DISPATCH, DISPATCH_TRACE_SUBCLASS_##subclass, code)
+#define DISPATCH_CODE_START(subclass, code) \
+		(DISPATCH_CODE(subclass, code) | DBG_FUNC_START)
+#define DISPATCH_CODE_END(subclass, code) \
+		(DISPATCH_CODE(subclass, code) | DBG_FUNC_END)
 #ifdef ARIADNEDBG_CODE
 #define ARIADNE_ENTER_DISPATCH_MAIN_CODE ARIADNEDBG_CODE(220, 2)
 #else
@@ -875,15 +756,21 @@
 #define DISPATCH_TRACE_SUBCLASS_VOUCHER 1
 #define DISPATCH_TRACE_SUBCLASS_PERF 2
 #define DISPATCH_TRACE_SUBCLASS_MACH_MSG 3
+#define DISPATCH_TRACE_SUBCLASS_PERF_MON 4
 
 #define DISPATCH_PERF_non_leaf_retarget DISPATCH_CODE(PERF, 1)
 #define DISPATCH_PERF_post_activate_retarget DISPATCH_CODE(PERF, 2)
 #define DISPATCH_PERF_post_activate_mutation DISPATCH_CODE(PERF, 3)
 #define DISPATCH_PERF_delayed_registration DISPATCH_CODE(PERF, 4)
 #define DISPATCH_PERF_mutable_target DISPATCH_CODE(PERF, 5)
+#define DISPATCH_PERF_strict_bg_timer DISPATCH_CODE(PERF, 6)
 
 #define DISPATCH_MACH_MSG_hdr_move DISPATCH_CODE(MACH_MSG, 1)
 
+#define DISPATCH_PERF_MON_worker_thread_start DISPATCH_CODE_START(PERF_MON, 1)
+#define DISPATCH_PERF_MON_worker_thread_end DISPATCH_CODE_END(PERF_MON, 1)
+#define DISPATCH_PERF_MON_worker_useless DISPATCH_CODE(PERF_MON, 2)
+
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b,
@@ -930,15 +817,11 @@
 #define MACH_SEND_INVALID_VOUCHER 0x10000005
 #endif
 
-#if TARGET_OS_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100
-#undef VOUCHER_USE_MACH_VOUCHER
-#define VOUCHER_USE_MACH_VOUCHER 0
-#endif
 #ifndef VOUCHER_USE_MACH_VOUCHER
 #if __has_include(<mach/mach_voucher.h>)
 #define VOUCHER_USE_MACH_VOUCHER 1
 #endif
-#endif
+#endif // VOUCHER_USE_MACH_VOUCHER
 
 #if RDAR_24272659 // FIXME: <rdar://problem/24272659>
 #if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200)
@@ -1066,8 +949,7 @@
 #endif
 #endif // HAVE_PTHREAD_WORKQUEUE_QOS
 #if DISPATCH_USE_KEVENT_WORKQUEUE
-#if !HAVE_PTHREAD_WORKQUEUE_QOS || !DISPATCH_USE_KEVENT_QOS || \
-		!DISPATCH_USE_EV_UDATA_SPECIFIC
+#if !HAVE_PTHREAD_WORKQUEUE_QOS || !EV_UDATA_SPECIFIC
 #error Invalid build configuration
 #endif
 #if DISPATCH_USE_MGR_THREAD
@@ -1075,10 +957,12 @@
 #else
 #define _dispatch_kevent_workqueue_enabled (1)
 #endif
+#else
+#define _dispatch_kevent_workqueue_enabled (0)
 #endif // DISPATCH_USE_KEVENT_WORKQUEUE
 
 #if DISPATCH_USE_EVFILT_MACHPORT_DIRECT
-#if !DISPATCH_USE_KEVENT_WORKQUEUE || !DISPATCH_USE_EV_UDATA_SPECIFIC
+#if !DISPATCH_USE_KEVENT_WORKQUEUE || !EV_UDATA_SPECIFIC
 #error Invalid build configuration
 #endif
 #if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
@@ -1097,6 +981,7 @@
 #include "introspection_internal.h"
 #include "queue_internal.h"
 #include "source_internal.h"
+#include "mach_internal.h"
 #include "voucher_internal.h"
 #include "data_internal.h"
 #if !TARGET_OS_WIN32
diff --git a/src/introspection.c b/src/introspection.c
index d847cb9..feeeb97 100644
--- a/src/introspection.c
+++ b/src/introspection.c
@@ -300,16 +300,11 @@
 		.suspend_count = _dq_state_suspend_cnt(dq_state) + ds->dq_side_suspend_cnt,
 		.enqueued = _dq_state_is_enqueued(dq_state),
 		.handler_is_block = hdlr_is_block,
-		.timer = ds->ds_is_timer,
-		.after = ds->ds_is_timer && (bool)(ds_timer(ds).flags & DISPATCH_TIMER_AFTER),
+		.timer = dr->du_is_timer,
+		.after = dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_AFTER),
+		.type = (unsigned long)dr->du_filter,
+		.handle = (unsigned long)dr->du_ident,
 	};
-	dispatch_kevent_t dk = ds->ds_dkev;
-	if (ds->ds_is_custom_source) {
-		dis.type = (unsigned long)dk;
-	} else if (dk) {
-		dis.type = (unsigned long)dk->dk_kevent.filter;
-		dis.handle = (unsigned long)dk->dk_kevent.ident;
-	}
 	return dis;
 }
 
@@ -739,7 +734,7 @@
 	dispatch_queue_order_entry_t dof_e;
 };
 
-DISPATCH_NOINLINE
+DISPATCH_NOINLINE DISPATCH_NORETURN
 static void
 _dispatch_introspection_lock_inversion_fail(dispatch_order_frame_t dof,
 		dispatch_queue_t top_q, dispatch_queue_t bottom_q)
diff --git a/src/introspection_internal.h b/src/introspection_internal.h
index 06504a8..af54842 100644
--- a/src/introspection_internal.h
+++ b/src/introspection_internal.h
@@ -129,7 +129,6 @@
 
 #define _dispatch_introspection_init()
 #define _dispatch_introspection_thread_add()
-#define _dispatch_introspection_thread_remove()
 
 DISPATCH_ALWAYS_INLINE
 static inline dispatch_queue_t
diff --git a/src/io.c b/src/io.c
index 0a00e6e..f538862 100644
--- a/src/io.c
+++ b/src/io.c
@@ -236,8 +236,7 @@
 	dispatch_io_t channel = _dispatch_alloc(DISPATCH_VTABLE(io),
 			sizeof(struct dispatch_io_s));
 	channel->do_next = DISPATCH_OBJECT_LISTLESS;
-	channel->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-			true);
+	channel->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
 	channel->params.type = type;
 	channel->params.high = SIZE_MAX;
 	channel->params.low = dispatch_io_defaults.low_water_chunks *
@@ -889,7 +888,7 @@
 		dispatch_operation_t op =
 			_dispatch_operation_create(DOP_DIR_READ, channel, 0,
 					length, dispatch_data_empty,
-					_dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false),
+					_dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false),
 					^(bool done, dispatch_data_t data, int error) {
 				if (data) {
 					data = dispatch_data_create_concat(deliver_data, data);
@@ -960,7 +959,7 @@
 		dispatch_operation_t op =
 			_dispatch_operation_create(DOP_DIR_WRITE, channel, 0,
 					dispatch_data_get_size(data), data,
-					_dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false),
+					_dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false),
 					^(bool done, dispatch_data_t d, int error) {
 				if (done) {
 					if (d) {
@@ -1155,8 +1154,9 @@
 	}
 	dispatch_source_t timer = dispatch_source_create(
 			DISPATCH_SOURCE_TYPE_TIMER, 0, 0, tq);
-	dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW,
-			(int64_t)op->params.interval), op->params.interval, 0);
+	dispatch_source_set_timer(timer,
+			dispatch_time(DISPATCH_TIME_NOW, (int64_t)op->params.interval),
+			op->params.interval, 0);
 	dispatch_source_set_event_handler(timer, ^{
 		// On stream queue or pick queue
 		if (dispatch_source_testcancel(timer)) {
@@ -1236,9 +1236,10 @@
 		return fd;
 	}
 	errno = 0;
+#else
+	(void)fd_entry;
 #endif
 	return open(path, oflag, mode);
-	(void)fd_entry;
 }
 
 static inline int
@@ -1248,11 +1249,12 @@
 		guardid_t guard = (uintptr_t)fd_entry;
 		return guarded_close_np(fd, &guard);
 	} else
+#else
+	(void)fd_entry;
 #endif
 	{
 		return close(fd);
 	}
-	(void)fd_entry;
 }
 
 static inline void
@@ -1388,8 +1390,9 @@
 						break;
 				);
 			}
-			_dispatch_stream_init(fd_entry, _dispatch_get_root_queue(
-					_DISPATCH_QOS_CLASS_DEFAULT, false));
+
+			_dispatch_stream_init(fd_entry,
+					_dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false));
 		}
 		fd_entry->orig_flags = orig_flags;
 		fd_entry->orig_nosigpipe = orig_nosigpipe;
@@ -1456,8 +1459,8 @@
 	if (S_ISREG(mode)) {
 		_dispatch_disk_init(fd_entry, major(dev));
 	} else {
-		_dispatch_stream_init(fd_entry, _dispatch_get_root_queue(
-				_DISPATCH_QOS_CLASS_DEFAULT, false));
+			_dispatch_stream_init(fd_entry,
+					_dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false));
 	}
 	fd_entry->fd = -1;
 	fd_entry->orig_flags = -1;
@@ -1636,8 +1639,7 @@
 	disk->do_next = DISPATCH_OBJECT_LISTLESS;
 	disk->do_xref_cnt = -1;
 	disk->advise_list_depth = pending_reqs_depth;
-	disk->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-			false);
+	disk->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
 	disk->dev = dev;
 	TAILQ_INIT(&disk->operations);
 	disk->cur_rq = TAILQ_FIRST(&disk->operations);
@@ -1897,7 +1899,7 @@
 	// Close queue must not run user cleanup handlers until sources are fully
 	// unregistered
 	dispatch_queue_t close_queue = op->fd_entry->close_queue;
-	dispatch_source_set_cancel_handler(source, ^{
+	dispatch_source_set_mandatory_cancel_handler(source, ^{
 		_dispatch_op_debug("stream source cancel", op);
 		dispatch_resume(close_queue);
 	});
diff --git a/src/libdispatch.codes b/src/libdispatch.codes
index 9aca7e1..0ecc333 100644
--- a/src/libdispatch.codes
+++ b/src/libdispatch.codes
@@ -11,3 +11,9 @@
 0x2e02000c	DISPATCH_PERF_post_activate_mutation
 0x2e020010	DISPATCH_PERF_delayed_registration
 0x2e020014	DISPATCH_PERF_mutable_target
+0x2e020018	DISPATCH_PERF_strict_bg_timer
+
+0x2e030004	DISPATCH_MACH_MSG_hdr_move
+
+0x2e040004	DISPATCH_PERF_MON_worker_thread
+0x2e040008	DISPATCH_PERF_MON_worker_useless
diff --git a/src/mach.c b/src/mach.c
new file mode 100644
index 0000000..aa57dea
--- /dev/null
+++ b/src/mach.c
@@ -0,0 +1,2551 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "internal.h"
+#if HAVE_MACH
+
+#define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1
+#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2
+#define DISPATCH_MACH_WAIT_FOR_REPLY 0x4
+#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8
+#define DISPATCH_MACH_OPTIONS_MASK 0xffff
+
+#define DM_SEND_STATUS_SUCCESS 0x1
+#define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2
+
+DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t,
+	DM_SEND_INVOKE_NONE            = 0x0,
+	DM_SEND_INVOKE_FLUSH           = 0x1,
+	DM_SEND_INVOKE_NEEDS_BARRIER   = 0x2,
+	DM_SEND_INVOKE_CANCEL          = 0x4,
+	DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8,
+	DM_SEND_INVOKE_IMMEDIATE_SEND  = 0x10,
+);
+#define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \
+		((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND)
+
+static inline mach_msg_option_t _dispatch_mach_checkin_options(void);
+static inline pthread_priority_t _dispatch_mach_priority_propagate(
+		mach_msg_option_t options);
+static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou);
+static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou);
+static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm,
+		mach_port_t local_port, mach_port_t remote_port);
+static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm,
+		dispatch_mach_reply_refs_t dmr, mach_port_t local_port);
+static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected(
+		dispatch_object_t dou, dispatch_mach_reply_refs_t dmr);
+static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm,
+		dispatch_object_t dou);
+static inline mach_msg_header_t* _dispatch_mach_msg_get_msg(
+		dispatch_mach_msg_t dmsg);
+static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou,
+		dispatch_qos_t qos);
+static void _dispatch_mach_cancel(dispatch_mach_t dm);
+static void _dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm,
+		dispatch_qos_t qos);
+static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm,
+		dispatch_mach_msg_t dmsg);
+
+dispatch_source_t
+_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp,
+		const struct dispatch_continuation_s *dc)
+{
+	dispatch_source_t ds;
+	ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct,
+			recvp, 0, &_dispatch_mgr_q);
+	os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER],
+			(dispatch_continuation_t)dc, relaxed);
+	return ds;
+}
+
+
+#pragma mark -
+#pragma mark dispatch to XPC callbacks
+
+// Default dmxh_direct_message_handler callback that does not handle
+// messages inline.
+static bool
+_dispatch_mach_xpc_no_handle_message(
+		void *_Nullable context DISPATCH_UNUSED,
+		dispatch_mach_reason_t reason DISPATCH_UNUSED,
+		dispatch_mach_msg_t message DISPATCH_UNUSED,
+		mach_error_t error DISPATCH_UNUSED)
+{
+	return false;
+}
+
+// Callbacks from dispatch to XPC. The default is to not support any callbacks.
+static const struct dispatch_mach_xpc_hooks_s _dispatch_mach_xpc_hooks_default
+		= {
+	.version = DISPATCH_MACH_XPC_HOOKS_VERSION,
+	.dmxh_direct_message_handler = &_dispatch_mach_xpc_no_handle_message
+};
+
+static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks
+		= &_dispatch_mach_xpc_hooks_default;
+
+void
+dispatch_mach_xpc_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks)
+{
+	if (!os_atomic_cmpxchg(&_dispatch_mach_xpc_hooks,
+			&_dispatch_mach_xpc_hooks_default, hooks, relaxed)) {
+		DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks,
+				"dispatch_mach_xpc_hooks_install_4libxpc called twice");
+	}
+}
+
+#pragma mark -
+#pragma mark dispatch_mach_t
+
+static dispatch_mach_t
+_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context,
+		dispatch_mach_handler_function_t handler, bool handler_is_block,
+		bool is_xpc)
+{
+	dispatch_mach_recv_refs_t dmrr;
+	dispatch_mach_send_refs_t dmsr;
+	dispatch_mach_t dm;
+	// ensure _dispatch_evfilt_machport_direct_enabled is initialized
+	_dispatch_root_queues_init();
+	dm = _dispatch_alloc(DISPATCH_VTABLE(mach),
+			sizeof(struct dispatch_mach_s));
+	_dispatch_queue_init(dm->_as_dq, DQF_LEGACY, 1, true);
+
+	dm->dq_label = label;
+	dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds
+	dm->dm_is_xpc = is_xpc;
+
+	dmrr = dux_create(&_dispatch_mach_type_recv, 0, 0)._dmrr;
+	dmrr->du_owner_wref = _dispatch_ptr2wref(dm);
+	dmrr->dmrr_handler_func = handler;
+	dmrr->dmrr_handler_ctxt = context;
+	dmrr->dmrr_handler_is_block = handler_is_block;
+	dm->dm_recv_refs = dmrr;
+
+	dmsr = dux_create(&_dispatch_mach_type_send, 0,
+			DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD)._dmsr;
+	dmsr->du_owner_wref = _dispatch_ptr2wref(dm);
+	dm->dm_send_refs = dmsr;
+
+	if (is_xpc) {
+		dispatch_xpc_term_refs_t _dxtr =
+				dux_create(&_dispatch_xpc_type_sigterm, SIGTERM, 0)._dxtr;
+		_dxtr->du_owner_wref = _dispatch_ptr2wref(dm);
+		dm->dm_xpc_term_refs = _dxtr;
+	}
+
+	if (slowpath(!q)) {
+		q = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
+	} else {
+		_dispatch_retain(q);
+	}
+	dm->do_targetq = q;
+	_dispatch_object_debug(dm, "%s", __func__);
+	return dm;
+}
+
+dispatch_mach_t
+dispatch_mach_create(const char *label, dispatch_queue_t q,
+		dispatch_mach_handler_t handler)
+{
+	dispatch_block_t bb = _dispatch_Block_copy((void*)handler);
+	return _dispatch_mach_create(label, q, bb,
+			(dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true,
+			false);
+}
+
+dispatch_mach_t
+dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context,
+		dispatch_mach_handler_function_t handler)
+{
+	return _dispatch_mach_create(label, q, context, handler, false, false);
+}
+
+dispatch_mach_t
+dispatch_mach_create_4libxpc(const char *label, dispatch_queue_t q,
+		void *context, dispatch_mach_handler_function_t handler)
+{
+	return _dispatch_mach_create(label, q, context, handler, false, true);
+}
+
+void
+_dispatch_mach_dispose(dispatch_mach_t dm)
+{
+	_dispatch_object_debug(dm, "%s", __func__);
+	_dispatch_unote_dispose(dm->dm_recv_refs);
+	dm->dm_recv_refs = NULL;
+	_dispatch_unote_dispose(dm->dm_send_refs);
+	dm->dm_send_refs = NULL;
+	if (dm->dm_xpc_term_refs) {
+		_dispatch_unote_dispose(dm->dm_xpc_term_refs);
+		dm->dm_xpc_term_refs = NULL;
+	}
+	_dispatch_queue_destroy(dm->_as_dq);
+}
+
+void
+dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive,
+		mach_port_t send, dispatch_mach_msg_t checkin)
+{
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	uint32_t disconnect_cnt;
+
+	if (MACH_PORT_VALID(receive)) {
+		dm->dm_recv_refs->du_ident = receive;
+		_dispatch_retain(dm); // the reference the manager queue holds
+	}
+	dmsr->dmsr_send = send;
+	if (MACH_PORT_VALID(send)) {
+		if (checkin) {
+			dispatch_mach_msg_t dmsg = checkin;
+			dispatch_retain(dmsg);
+			dmsg->dmsg_options = _dispatch_mach_checkin_options();
+			dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg);
+		}
+		dmsr->dmsr_checkin = checkin;
+	}
+	dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 ==
+			DISPATCH_MACH_NEVER_INSTALLED);
+	disconnect_cnt = os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, release);
+	if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) {
+		DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected");
+	}
+	_dispatch_object_debug(dm, "%s", __func__);
+	return dispatch_activate(dm);
+}
+
+// assumes low bit of mach port names is always set
+#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u
+
+static inline void
+_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr)
+{
+	dmr->du_ident &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED;
+}
+
+static inline bool
+_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr)
+{
+	mach_port_t reply_port = (mach_port_t)dmr->du_ident;
+	return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false;
+}
+
+static inline mach_port_t
+_dispatch_mach_reply_get_reply_port(dispatch_mach_reply_refs_t dmr)
+{
+	mach_port_t reply_port = (mach_port_t)dmr->du_ident;
+	return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0;
+}
+
+static inline bool
+_dispatch_mach_reply_tryremove(dispatch_mach_t dm,
+		dispatch_mach_reply_refs_t dmr)
+{
+	bool removed;
+	_dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+	if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
+		TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+		_TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
+	}
+	_dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+	return removed;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm,
+		dispatch_mach_reply_refs_t dmr, uint32_t options)
+{
+	dispatch_mach_msg_t dmsgr = NULL;
+	bool disconnected = (options & DU_UNREGISTER_DISCONNECTED);
+	if (options & DU_UNREGISTER_REPLY_REMOVE) {
+		_dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+		if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
+			DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration");
+		}
+		TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+		_TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
+		_dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+	}
+	if (disconnected) {
+		dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr);
+	} else if (dmr->dmr_voucher) {
+		_voucher_release(dmr->dmr_voucher);
+		dmr->dmr_voucher = NULL;
+	}
+	_dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p",
+			_dispatch_mach_reply_get_reply_port(dmr),
+			disconnected ? " (disconnected)" : "", dmr->dmr_ctxt);
+	if (dmsgr) {
+		return _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
+	}
+	dispatch_assert(!(options & DU_UNREGISTER_WAKEUP));
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm,
+		dispatch_mach_reply_refs_t dmr, uint32_t options)
+{
+	dispatch_mach_msg_t dmsgr = NULL;
+	bool replies_empty = false;
+	bool disconnected = (options & DU_UNREGISTER_DISCONNECTED);
+	if (options & DU_UNREGISTER_REPLY_REMOVE) {
+		_dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+		if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
+			DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration");
+		}
+		TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+		_TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
+		replies_empty = TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies);
+		_dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+	}
+	if (disconnected) {
+		dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr);
+	} else if (dmr->dmr_voucher) {
+		_voucher_release(dmr->dmr_voucher);
+		dmr->dmr_voucher = NULL;
+	}
+	_dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p",
+			(mach_port_t)dmr->du_ident, disconnected ? " (disconnected)" : "",
+			dmr->dmr_ctxt);
+	if (!_dispatch_unote_unregister(dmr, options)) {
+		_dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]",
+				(mach_port_t)dmr->du_ident, dmr);
+		dispatch_assert(options == DU_UNREGISTER_DISCONNECTED);
+		// dmr must be put back so that the event delivery finds it, the
+		// replies lock is held by the caller.
+		TAILQ_INSERT_HEAD(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+		if (dmsgr) {
+			dmr->dmr_voucher = dmsgr->dmsg_voucher;
+			dmsgr->dmsg_voucher = NULL;
+			dispatch_release(dmsgr);
+		}
+		return; // deferred unregistration
+	}
+	_dispatch_unote_dispose(dmr);
+	if (dmsgr) {
+		return _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
+	}
+	if ((options & DU_UNREGISTER_WAKEUP) && replies_empty &&
+			(dm->dm_send_refs->dmsr_disconnect_cnt ||
+			(dm->dq_atomic_flags & DSF_CANCELED))) {
+		dx_wakeup(dm, 0, DISPATCH_WAKEUP_FLUSH);
+	}
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_reply_waiter_register(dispatch_mach_t dm,
+		dispatch_mach_reply_refs_t dmr, mach_port_t reply_port,
+		dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts)
+{
+	dmr->du_owner_wref = _dispatch_ptr2wref(dm);
+	dmr->du_registered = false;
+	dmr->du_ident = reply_port;
+	if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) {
+		_dispatch_mach_reply_mark_reply_port_owned(dmr);
+	} else {
+		if (dmsg->dmsg_voucher) {
+			dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher);
+		}
+		dmr->dmr_priority =
+				_dispatch_priority_from_pp(dmsg->dmsg_priority);
+		// make reply context visible to leaks rdar://11777199
+		dmr->dmr_ctxt = dmsg->do_ctxt;
+	}
+
+	_dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p",
+			reply_port, dmsg->do_ctxt);
+	_dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+	if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
+		DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered");
+	}
+	TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+	_dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port,
+		dispatch_mach_msg_t dmsg)
+{
+	dispatch_mach_reply_refs_t dmr;
+	dispatch_priority_t mpri, pri;
+
+	dmr = dux_create(&_dispatch_mach_type_reply, reply_port, 0)._dmr;
+	dmr->du_owner_wref = _dispatch_ptr2wref(dm);
+	if (dmsg->dmsg_voucher) {
+		dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher);
+	}
+	dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority);
+	// make reply context visible to leaks rdar://11777199
+	dmr->dmr_ctxt = dmsg->do_ctxt;
+
+	pri = (dm->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK);
+	if (pri && dmr->du_is_direct) {
+		mpri = _dispatch_priority_from_pp_strip_flags(dmsg->dmsg_priority);
+		if (pri < mpri) pri = mpri;
+		pri |= dm->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+	} else {
+		pri = DISPATCH_PRIORITY_FLAG_MANAGER;
+	}
+
+	_dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p",
+			reply_port, dmsg->do_ctxt);
+	_dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+	if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
+		DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered");
+	}
+	TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+	_dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+
+	if (!_dispatch_unote_register(dmr, pri)) {
+		_dispatch_mach_reply_kevent_unregister(dm, dmr,
+				DU_UNREGISTER_DISCONNECTED|DU_UNREGISTER_REPLY_REMOVE);
+	}
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm)
+{
+	DISPATCH_ASSERT_ON_MANAGER_QUEUE();
+	if (dm->dm_send_refs->du_registered) {
+		dispatch_assume(_dispatch_unote_unregister(dm->dm_send_refs, 0));
+	}
+	dm->dm_send_refs->du_ident = 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send)
+{
+	DISPATCH_ASSERT_ON_MANAGER_QUEUE();
+	dm->dm_send_refs->du_ident = send;
+	dispatch_assume(_dispatch_unote_register(dm->dm_send_refs, 0));
+}
+
+static mach_port_t
+_dispatch_get_thread_reply_port(void)
+{
+	mach_port_t reply_port, mrp = _dispatch_get_thread_mig_reply_port();
+	if (mrp) {
+		reply_port = mrp;
+		_dispatch_debug("machport[0x%08x]: borrowed thread sync reply port",
+				reply_port);
+	} else {
+		reply_port = mach_reply_port();
+		_dispatch_set_thread_mig_reply_port(reply_port);
+		_dispatch_debug("machport[0x%08x]: allocated thread sync reply port",
+				reply_port);
+	}
+	_dispatch_debug_machport(reply_port);
+	return reply_port;
+}
+
+static void
+_dispatch_clear_thread_reply_port(mach_port_t reply_port)
+{
+	mach_port_t mrp = _dispatch_get_thread_mig_reply_port();
+	if (reply_port != mrp) {
+		if (mrp) {
+			_dispatch_debug("machport[0x%08x]: did not clear thread sync reply "
+					"port (found 0x%08x)", reply_port, mrp);
+		}
+		return;
+	}
+	_dispatch_set_thread_mig_reply_port(MACH_PORT_NULL);
+	_dispatch_debug_machport(reply_port);
+	_dispatch_debug("machport[0x%08x]: cleared thread sync reply port",
+			reply_port);
+}
+
+static void
+_dispatch_set_thread_reply_port(mach_port_t reply_port)
+{
+	_dispatch_debug_machport(reply_port);
+	mach_port_t mrp = _dispatch_get_thread_mig_reply_port();
+	if (mrp) {
+		kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port,
+				MACH_PORT_RIGHT_RECEIVE, -1);
+		DISPATCH_VERIFY_MIG(kr);
+		dispatch_assume_zero(kr);
+		_dispatch_debug("machport[0x%08x]: deallocated sync reply port "
+				"(found 0x%08x)", reply_port, mrp);
+	} else {
+		_dispatch_set_thread_mig_reply_port(reply_port);
+		_dispatch_debug("machport[0x%08x]: restored thread sync reply port",
+				reply_port);
+	}
+}
+
+static inline mach_port_t
+_dispatch_mach_msg_get_remote_port(dispatch_object_t dou)
+{
+	mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
+	mach_port_t remote = hdr->msgh_remote_port;
+	return remote;
+}
+
+static inline mach_port_t
+_dispatch_mach_msg_get_reply_port(dispatch_object_t dou)
+{
+	mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
+	mach_port_t local = hdr->msgh_local_port;
+	if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) !=
+			MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL;
+	return local;
+}
+
+static inline void
+_dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err,
+		unsigned long reason)
+{
+	dispatch_assert_zero(reason & ~(unsigned long)code_emask);
+	dmsg->dmsg_error = ((err || !reason) ? err :
+			 err_local|err_sub(0x3e0)|(mach_error_t)reason);
+}
+
+static inline unsigned long
+_dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr)
+{
+	mach_error_t err = dmsg->dmsg_error;
+
+	if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) {
+		*err_ptr = 0;
+		return err_get_code(err);
+	}
+	*err_ptr = err;
+	return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT;
+}
+
+static void
+_dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr,
+		uint32_t flags, mach_msg_header_t *hdr, mach_msg_size_t siz)
+{
+	_dispatch_debug_machport(hdr->msgh_remote_port);
+	_dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x",
+			hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
+	bool canceled = (dm->dq_atomic_flags & DSF_CANCELED);
+	if (!dmr && canceled) {
+		// message received after cancellation, _dispatch_mach_kevent_merge is
+		// responsible for mach channel source state (e.g. deferred deletion)
+		if (hdr) {
+			mach_msg_destroy(hdr);
+			if (flags & DISPATCH_EV_MSG_NEEDS_FREE) {
+				free(hdr);
+			}
+		}
+		return;
+	}
+
+	dispatch_mach_msg_t dmsg;
+	voucher_t voucher;
+	pthread_priority_t pp;
+	void *ctxt = NULL;
+	if (dmr) {
+		_voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher
+		voucher = dmr->dmr_voucher;
+		dmr->dmr_voucher = NULL; // transfer reference
+		pp = _dispatch_priority_to_pp(dmr->dmr_priority);
+		ctxt = dmr->dmr_ctxt;
+		uint32_t options = DU_UNREGISTER_IMMEDIATE_DELETE;
+		options |= DU_UNREGISTER_REPLY_REMOVE;
+		options |= DU_UNREGISTER_WAKEUP;
+		if (canceled) options |= DU_UNREGISTER_DISCONNECTED;
+		_dispatch_mach_reply_kevent_unregister(dm, dmr, options);
+		if (canceled) return;
+	} else {
+		voucher = voucher_create_with_mach_msg(hdr);
+		pp = _voucher_get_priority(voucher);
+	}
+	dispatch_mach_msg_destructor_t destructor;
+	destructor = (flags & DISPATCH_EV_MSG_NEEDS_FREE) ?
+			DISPATCH_MACH_MSG_DESTRUCTOR_FREE :
+			DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT;
+	dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
+	if (!(flags & DISPATCH_EV_MSG_NEEDS_FREE)) {
+		_dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move,
+				(uint64_t)hdr, (uint64_t)dmsg->dmsg_buf);
+	}
+	dmsg->dmsg_voucher = voucher;
+	dmsg->dmsg_priority = pp;
+	dmsg->do_ctxt = ctxt;
+	_dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED);
+	_dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg);
+	_dispatch_voucher_ktrace_dmsg_push(dmsg);
+	return _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+}
+
+void
+_dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags,
+		mach_msg_header_t *msg, mach_msg_size_t msgsz)
+{
+	dispatch_mach_recv_refs_t dmrr = du._dmrr;
+	dispatch_mach_t dm = _dispatch_wref2ptr(dmrr->du_owner_wref);
+
+	_dispatch_mach_msg_recv(dm, NULL, flags, msg, msgsz);
+
+	if ((dm->dq_atomic_flags & DSF_CANCELED) ||
+			(flags & (EV_ONESHOT | EV_DELETE))) {
+		return _dispatch_source_merge_evt(du, flags, 0, 0);
+	}
+	if (dmrr->du_needs_rearm) {
+		return _dispatch_unote_resume(du);
+	}
+}
+
+void
+_dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags,
+		mach_msg_header_t *msg, mach_msg_size_t msgsz)
+{
+	dispatch_mach_reply_refs_t dmr = du._dmr;
+	dispatch_mach_t dm = _dispatch_wref2ptr(dmr->du_owner_wref);
+
+	_dispatch_mach_msg_recv(dm, dmr, flags, msg, msgsz);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_mach_msg_t
+_dispatch_mach_msg_reply_recv(dispatch_mach_t dm,
+		dispatch_mach_reply_refs_t dmr, mach_port_t reply_port)
+{
+	if (slowpath(!MACH_PORT_VALID(reply_port))) {
+		DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port");
+	}
+	void *ctxt = dmr->dmr_ctxt;
+	mach_msg_header_t *hdr, *hdr2 = NULL;
+	void *hdr_copyout_addr;
+	mach_msg_size_t siz, msgsiz = 0;
+	mach_msg_return_t kr;
+	mach_msg_option_t options;
+	siz = mach_vm_round_page(DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE +
+			DISPATCH_MACH_TRAILER_SIZE);
+	hdr = alloca(siz);
+	for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size);
+			p < (mach_vm_address_t)hdr + siz; p += vm_page_size) {
+		*(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
+	}
+	options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER);
+retry:
+	_dispatch_debug_machport(reply_port);
+	_dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port,
+			(options & MACH_RCV_TIMEOUT) ? "poll" : "wait");
+	kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE,
+			MACH_PORT_NULL);
+	hdr_copyout_addr = hdr;
+	_dispatch_debug_machport(reply_port);
+	_dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) "
+			"returned: %s - 0x%x", reply_port, siz, options,
+			mach_error_string(kr), kr);
+	switch (kr) {
+	case MACH_RCV_TOO_LARGE:
+		if (!fastpath(hdr->msgh_size <= UINT_MAX -
+				DISPATCH_MACH_TRAILER_SIZE)) {
+			DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message");
+		}
+		if (options & MACH_RCV_LARGE) {
+			msgsiz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE;
+			hdr2 = malloc(msgsiz);
+			if (dispatch_assume(hdr2)) {
+				hdr = hdr2;
+				siz = msgsiz;
+			}
+			options |= MACH_RCV_TIMEOUT;
+			options &= ~MACH_RCV_LARGE;
+			goto retry;
+		}
+		_dispatch_log("BUG in libdispatch client: "
+				"dispatch_mach_send_and_wait_for_reply: dropped message too "
+				"large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id,
+				hdr->msgh_size);
+		break;
+	case MACH_RCV_INVALID_NAME: // rdar://problem/21963848
+	case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327
+	case MACH_RCV_PORT_DIED:
+		// channel was disconnected/canceled and reply port destroyed
+		_dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: "
+				"%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr);
+		goto out;
+	case MACH_MSG_SUCCESS:
+		if (hdr->msgh_remote_port) {
+			_dispatch_debug_machport(hdr->msgh_remote_port);
+		}
+		_dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, "
+				"reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id,
+				hdr->msgh_size, hdr->msgh_remote_port);
+		siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE;
+		if (hdr2 && siz < msgsiz) {
+			void *shrink = realloc(hdr2, msgsiz);
+			if (shrink) hdr = hdr2 = shrink;
+		}
+		break;
+	default:
+		dispatch_assume_zero(kr);
+		break;
+	}
+	_dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port);
+	hdr->msgh_local_port = MACH_PORT_NULL;
+	if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) {
+		if (!kr) mach_msg_destroy(hdr);
+		goto out;
+	}
+	dispatch_mach_msg_t dmsg;
+	dispatch_mach_msg_destructor_t destructor = (!hdr2) ?
+			DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT :
+			DISPATCH_MACH_MSG_DESTRUCTOR_FREE;
+	dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
+	if (!hdr2 || hdr != hdr_copyout_addr) {
+		_dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move,
+				(uint64_t)hdr_copyout_addr,
+				(uint64_t)_dispatch_mach_msg_get_msg(dmsg));
+	}
+	dmsg->do_ctxt = ctxt;
+	return dmsg;
+out:
+	free(hdr2);
+	return NULL;
+}
+
+static inline void
+_dispatch_mach_msg_reply_received(dispatch_mach_t dm,
+		dispatch_mach_reply_refs_t dmr, mach_port_t local_port)
+{
+	bool removed = _dispatch_mach_reply_tryremove(dm, dmr);
+	if (!MACH_PORT_VALID(local_port) || !removed) {
+		// port moved/destroyed during receive, or reply waiter was never
+		// registered or already removed (disconnected)
+		return;
+	}
+	mach_port_t reply_port = _dispatch_mach_reply_get_reply_port(dmr);
+	_dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p",
+			reply_port, dmr->dmr_ctxt);
+	if (_dispatch_mach_reply_is_reply_port_owned(dmr)) {
+		_dispatch_set_thread_reply_port(reply_port);
+		if (local_port != reply_port) {
+			DISPATCH_CLIENT_CRASH(local_port,
+					"Reply received on unexpected port");
+		}
+		return;
+	}
+	mach_msg_header_t *hdr;
+	dispatch_mach_msg_t dmsg;
+	dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
+			DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
+	hdr->msgh_local_port = local_port;
+	dmsg->dmsg_voucher = dmr->dmr_voucher;
+	dmr->dmr_voucher = NULL;  // transfer reference
+	dmsg->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority);
+	dmsg->do_ctxt = dmr->dmr_ctxt;
+	_dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED);
+	return _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+}
+
+static inline void
+_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port,
+		mach_port_t remote_port)
+{
+	mach_msg_header_t *hdr;
+	dispatch_mach_msg_t dmsg;
+	dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
+			DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
+	if (local_port) hdr->msgh_local_port = local_port;
+	if (remote_port) hdr->msgh_remote_port = remote_port;
+	_dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED);
+	_dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ?
+			local_port : remote_port, local_port ? "receive" : "send");
+	return _dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+}
+
+static inline dispatch_mach_msg_t
+_dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou,
+		dispatch_mach_reply_refs_t dmr)
+{
+	dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
+	mach_port_t reply_port = dmsg ? dmsg->dmsg_reply :
+			_dispatch_mach_reply_get_reply_port(dmr);
+	voucher_t v;
+
+	if (!reply_port) {
+		if (!dmsg) {
+			v = dmr->dmr_voucher;
+			dmr->dmr_voucher = NULL; // transfer reference
+			if (v) _voucher_release(v);
+		}
+		return NULL;
+	}
+
+	if (dmsg) {
+		v = dmsg->dmsg_voucher;
+		if (v) _voucher_retain(v);
+	} else {
+		v = dmr->dmr_voucher;
+		dmr->dmr_voucher = NULL; // transfer reference
+	}
+
+	if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) &&
+			(dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) ||
+			(dmr && !dmr->du_registered &&
+			_dispatch_mach_reply_is_reply_port_owned(dmr))) {
+		if (v) _voucher_release(v);
+		// deallocate owned reply port to break _dispatch_mach_msg_reply_recv
+		// out of waiting in mach_msg(MACH_RCV_MSG)
+		kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port,
+				MACH_PORT_RIGHT_RECEIVE, -1);
+		DISPATCH_VERIFY_MIG(kr);
+		dispatch_assume_zero(kr);
+		return NULL;
+	}
+
+	mach_msg_header_t *hdr;
+	dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
+			DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
+	dmsgr->dmsg_voucher = v;
+	hdr->msgh_local_port = reply_port;
+	if (dmsg) {
+		dmsgr->dmsg_priority = dmsg->dmsg_priority;
+		dmsgr->do_ctxt = dmsg->do_ctxt;
+	} else {
+		dmsgr->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority);
+		dmsgr->do_ctxt = dmr->dmr_ctxt;
+	}
+	_dispatch_mach_msg_set_reason(dmsgr, 0, DISPATCH_MACH_DISCONNECTED);
+	_dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p",
+			hdr->msgh_local_port, dmsgr->do_ctxt);
+	return dmsgr;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou)
+{
+	dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
+	mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
+	mach_msg_option_t msg_opts = dmsg->dmsg_options;
+	_dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, "
+			"msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x",
+			msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
+			msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply);
+	unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ?
+			0 : DISPATCH_MACH_MESSAGE_NOT_SENT;
+	dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL);
+	_dispatch_mach_msg_set_reason(dmsg, 0, reason);
+	_dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+	if (dmsgr) _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
+}
+
+DISPATCH_NOINLINE
+static uint32_t
+_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou,
+		dispatch_mach_reply_refs_t dmr, dispatch_qos_t qos,
+		dispatch_mach_send_invoke_flags_t send_flags)
+{
+	dispatch_mach_send_refs_t dsrr = dm->dm_send_refs;
+	dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL;
+	voucher_t voucher = dmsg->dmsg_voucher;
+	mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL;
+	uint32_t send_status = 0;
+	bool clear_voucher = false, kvoucher_move_send = false;
+	mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
+	bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
+			MACH_MSG_TYPE_MOVE_SEND_ONCE);
+	mach_port_t reply_port = dmsg->dmsg_reply;
+	if (!is_reply) {
+		dm->dm_needs_mgr = 0;
+		if (unlikely(dsrr->dmsr_checkin && dmsg != dsrr->dmsr_checkin)) {
+			// send initial checkin message
+			if (slowpath(dsrr->du_registered &&
+					_dispatch_queue_get_current() != &_dispatch_mgr_q)) {
+				// send kevent must be uninstalled on the manager queue
+				dm->dm_needs_mgr = 1;
+				goto out;
+			}
+			if (unlikely(!_dispatch_mach_msg_send(dm,
+					dsrr->dmsr_checkin, NULL, qos, DM_SEND_INVOKE_NONE))) {
+				goto out;
+			}
+			dsrr->dmsr_checkin = NULL;
+		}
+	}
+	mach_msg_return_t kr = 0;
+	mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options;
+	if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) {
+		mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED;
+		opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK);
+		if (!is_reply) {
+			if (dmsg != dsrr->dmsr_checkin) {
+				msg->msgh_remote_port = dsrr->dmsr_send;
+			}
+			if (_dispatch_queue_get_current() == &_dispatch_mgr_q) {
+				if (slowpath(!dsrr->du_registered)) {
+					_dispatch_mach_notification_kevent_register(dm,
+							msg->msgh_remote_port);
+				}
+				if (fastpath(dsrr->du_registered)) {
+					if (os_atomic_load2o(dsrr, dmsr_notification_armed,
+							relaxed)) {
+						goto out;
+					}
+					opts |= MACH_SEND_NOTIFY;
+				}
+			}
+			opts |= MACH_SEND_TIMEOUT;
+			if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) {
+				ipc_kvoucher = _voucher_create_mach_voucher_with_priority(
+						voucher, dmsg->dmsg_priority);
+			}
+			_dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg);
+			if (ipc_kvoucher) {
+				kvoucher_move_send = true;
+				clear_voucher = _voucher_mach_msg_set_mach_voucher(msg,
+						ipc_kvoucher, kvoucher_move_send);
+			} else {
+				clear_voucher = _voucher_mach_msg_set(msg, voucher);
+			}
+			if (qos && _dispatch_evfilt_machport_direct_enabled) {
+				opts |= MACH_SEND_OVERRIDE;
+				msg_priority = (mach_msg_priority_t)_dispatch_qos_to_pp(qos);
+			}
+		}
+		_dispatch_debug_machport(msg->msgh_remote_port);
+		if (reply_port) _dispatch_debug_machport(reply_port);
+		if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) {
+			if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) {
+				_dispatch_clear_thread_reply_port(reply_port);
+			}
+			_dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg,
+					msg_opts);
+		}
+		kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0,
+				msg_priority);
+		_dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, "
+				"opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: "
+				"%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
+				opts, msg_opts, msg->msgh_voucher_port, reply_port,
+				mach_error_string(kr), kr);
+		if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) {
+			_dispatch_mach_reply_waiter_unregister(dm, dmr,
+					DU_UNREGISTER_REPLY_REMOVE);
+		}
+		if (clear_voucher) {
+			if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) {
+				DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption");
+			}
+			mach_voucher_t kv;
+			kv = _voucher_mach_msg_clear(msg, kvoucher_move_send);
+			if (kvoucher_move_send) ipc_kvoucher = kv;
+		}
+	}
+	if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) {
+		if (opts & MACH_SEND_NOTIFY) {
+			_dispatch_debug("machport[0x%08x]: send-possible notification "
+					"armed", (mach_port_t)dsrr->du_ident);
+			_dispatch_mach_notification_set_armed(dsrr);
+		} else {
+			// send kevent must be installed on the manager queue
+			dm->dm_needs_mgr = 1;
+		}
+		if (ipc_kvoucher) {
+			_dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher);
+			voucher_t ipc_voucher;
+			ipc_voucher = _voucher_create_with_priority_and_mach_voucher(
+					voucher, dmsg->dmsg_priority, ipc_kvoucher);
+			_dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]",
+					ipc_voucher, dmsg, voucher);
+			if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
+			dmsg->dmsg_voucher = ipc_voucher;
+		}
+		goto out;
+	} else if (ipc_kvoucher && (kr || !kvoucher_move_send)) {
+		_voucher_dealloc_mach_voucher(ipc_kvoucher);
+	}
+	dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+	if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port &&
+			!(dmrr->du_registered && dmrr->du_ident == reply_port)) {
+		if (!dmrr->du_is_direct &&
+				_dispatch_queue_get_current() != &_dispatch_mgr_q) {
+			// reply receive kevent must be installed on the manager queue
+			dm->dm_needs_mgr = 1;
+			dmsg->dmsg_options = msg_opts | DISPATCH_MACH_REGISTER_FOR_REPLY;
+			goto out;
+		}
+		_dispatch_mach_reply_kevent_register(dm, reply_port, dmsg);
+	}
+	if (unlikely(!is_reply && dmsg == dsrr->dmsr_checkin && dsrr->du_registered)) {
+		_dispatch_mach_notification_kevent_unregister(dm);
+	}
+	if (slowpath(kr)) {
+		// Send failed, so reply was never registered <rdar://problem/14309159>
+		dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL);
+	}
+	_dispatch_mach_msg_set_reason(dmsg, kr, 0);
+	if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) &&
+			(msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) {
+		// Return sent message synchronously <rdar://problem/25947334>
+		send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT;
+	} else {
+		_dispatch_mach_handle_or_push_received_msg(dm, dmsg);
+	}
+	if (dmsgr) _dispatch_mach_handle_or_push_received_msg(dm, dmsgr);
+	send_status |= DM_SEND_STATUS_SUCCESS;
+out:
+	return send_status;
+}
+
+#pragma mark -
+#pragma mark dispatch_mach_send_refs_t
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dmsr_state_max_qos(uint64_t dmsr_state)
+{
+	return _dq_state_max_qos(dmsr_state);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dmsr_state_needs_override(uint64_t dmsr_state, dispatch_qos_t qos)
+{
+	dmsr_state &= DISPATCH_MACH_STATE_MAX_QOS_MASK;
+	return dmsr_state < _dq_state_from_qos(qos);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dmsr_state_merge_override(uint64_t dmsr_state, dispatch_qos_t qos)
+{
+	if (_dmsr_state_needs_override(dmsr_state, qos)) {
+		dmsr_state &= ~DISPATCH_MACH_STATE_MAX_QOS_MASK;
+		dmsr_state |= _dq_state_from_qos(qos);
+		dmsr_state |= DISPATCH_MACH_STATE_DIRTY;
+		dmsr_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+	}
+	return dmsr_state;
+}
+
+#define _dispatch_mach_send_push_update_tail(dmsr, tail) \
+		os_mpsc_push_update_tail(dmsr, dmsr, tail, do_next)
+#define _dispatch_mach_send_push_update_head(dmsr, head) \
+		os_mpsc_push_update_head(dmsr, dmsr, head)
+#define _dispatch_mach_send_get_head(dmsr) \
+		os_mpsc_get_head(dmsr, dmsr)
+#define _dispatch_mach_send_unpop_head(dmsr, dc, dc_next) \
+		os_mpsc_undo_pop_head(dmsr, dmsr, dc, dc_next, do_next)
+#define _dispatch_mach_send_pop_head(dmsr, head) \
+		os_mpsc_pop_head(dmsr, dmsr, head, do_next)
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dmsr,
+		dispatch_object_t dou)
+{
+	if (_dispatch_mach_send_push_update_tail(dmsr, dou._do)) {
+		_dispatch_mach_send_push_update_head(dmsr, dou._do);
+		return true;
+	}
+	return false;
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags,
+		dispatch_mach_send_invoke_flags_t send_flags)
+{
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	dispatch_mach_reply_refs_t dmr;
+	dispatch_mach_msg_t dmsg;
+	struct dispatch_object_s *dc = NULL, *next_dc = NULL;
+	dispatch_qos_t qos = _dmsr_state_max_qos(dmsr->dmsr_state);
+	uint64_t old_state, new_state;
+	uint32_t send_status;
+	bool needs_mgr, disconnecting, returning_send_result = false;
+
+again:
+	needs_mgr = false; disconnecting = false;
+	while (dmsr->dmsr_tail) {
+		dc = _dispatch_mach_send_get_head(dmsr);
+		do {
+			dispatch_mach_send_invoke_flags_t sf = send_flags;
+			// Only request immediate send result for the first message
+			send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK;
+			next_dc = _dispatch_mach_send_pop_head(dmsr, dc);
+			if (_dispatch_object_has_type(dc,
+					DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) {
+				if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) {
+					goto partial_drain;
+				}
+				_dispatch_continuation_pop(dc, dm->_as_dq, flags);
+				continue;
+			}
+			if (_dispatch_object_is_slow_item(dc)) {
+				dmsg = ((dispatch_continuation_t)dc)->dc_data;
+				dmr = ((dispatch_continuation_t)dc)->dc_other;
+			} else if (_dispatch_object_has_vtable(dc)) {
+				dmsg = (dispatch_mach_msg_t)dc;
+				dmr = NULL;
+			} else {
+				if ((dmsr->du_registered || !dm->dm_recv_refs->du_is_direct) &&
+						(_dispatch_queue_get_current() != &_dispatch_mgr_q)) {
+					// send kevent must be uninstalled on the manager queue
+					needs_mgr = true;
+					goto partial_drain;
+				}
+				if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) {
+					disconnecting = true;
+					goto partial_drain;
+				}
+				_dispatch_perfmon_workitem_inc();
+				continue;
+			}
+			_dispatch_voucher_ktrace_dmsg_pop(dmsg);
+			if (unlikely(dmsr->dmsr_disconnect_cnt ||
+					(dm->dq_atomic_flags & DSF_CANCELED))) {
+				_dispatch_mach_msg_not_sent(dm, dmsg);
+				_dispatch_perfmon_workitem_inc();
+				continue;
+			}
+			send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, qos, sf);
+			if (unlikely(!send_status)) {
+				goto partial_drain;
+			}
+			if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) {
+				returning_send_result = true;
+			}
+			_dispatch_perfmon_workitem_inc();
+		} while ((dc = next_dc));
+	}
+
+	os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
+		if (old_state & DISPATCH_MACH_STATE_DIRTY) {
+			new_state = old_state;
+			new_state &= ~DISPATCH_MACH_STATE_DIRTY;
+			new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+			new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
+		} else {
+			// unlock
+			new_state = 0;
+		}
+	});
+	goto out;
+
+partial_drain:
+	// if this is not a complete drain, we must undo some things
+	_dispatch_mach_send_unpop_head(dmsr, dc, next_dc);
+
+	if (_dispatch_object_has_type(dc,
+			DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) {
+		os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
+			new_state = old_state;
+			new_state |= DISPATCH_MACH_STATE_DIRTY;
+			new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER;
+			new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK;
+			new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+		});
+	} else {
+		os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
+			new_state = old_state;
+			if (old_state & (DISPATCH_MACH_STATE_DIRTY |
+					DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) {
+				new_state &= ~DISPATCH_MACH_STATE_DIRTY;
+				new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+				new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
+			} else {
+				new_state |= DISPATCH_MACH_STATE_DIRTY;
+				new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK;
+			}
+		});
+	}
+
+out:
+	if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) {
+		// Ensure that the root queue sees that this thread was overridden.
+		_dispatch_set_basepri_override_qos(_dmsr_state_max_qos(old_state));
+	}
+
+	if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) {
+		qos = _dmsr_state_max_qos(new_state);
+		os_atomic_thread_fence(dependency);
+		dmsr = os_atomic_force_dependency_on(dmsr, new_state);
+		goto again;
+	}
+
+	if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
+		qos = _dmsr_state_max_qos(new_state);
+		_dispatch_mach_send_barrier_drain_push(dm, qos);
+	} else {
+		if (needs_mgr || dm->dm_needs_mgr) {
+			qos = _dmsr_state_max_qos(new_state);
+		} else {
+			qos = 0;
+		}
+		if (!disconnecting) dx_wakeup(dm, qos, DISPATCH_WAKEUP_FLUSH);
+	}
+	return returning_send_result;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_send_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags,
+		dispatch_mach_send_invoke_flags_t send_flags)
+{
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	dispatch_lock_owner tid_self = _dispatch_tid_self();
+	uint64_t old_state, new_state;
+
+	uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK;
+	uint64_t canlock_state = 0;
+
+	if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) {
+		canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER;
+		canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER;
+	} else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) {
+		canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER;
+	}
+
+	dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor();
+retry:
+	os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, {
+		new_state = old_state;
+		if (unlikely((old_state & canlock_mask) != canlock_state)) {
+			if (!(send_flags & DM_SEND_INVOKE_FLUSH)) {
+				os_atomic_rmw_loop_give_up(break);
+			}
+			new_state |= DISPATCH_MACH_STATE_DIRTY;
+		} else {
+			if (_dispatch_queue_should_override_self(old_state, oq_floor)) {
+				os_atomic_rmw_loop_give_up({
+					oq_floor = _dispatch_queue_override_self(old_state);
+					goto retry;
+				});
+			}
+			new_state |= tid_self;
+			new_state &= ~DISPATCH_MACH_STATE_DIRTY;
+			new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+			new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
+		}
+	});
+
+	if (unlikely((old_state & canlock_mask) != canlock_state)) {
+		return;
+	}
+	if (send_flags & DM_SEND_INVOKE_CANCEL) {
+		_dispatch_mach_cancel(dm);
+	}
+	_dispatch_mach_send_drain(dm, flags, send_flags);
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc,
+		dispatch_invoke_flags_t flags)
+{
+	dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current();
+	uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
+	dispatch_thread_frame_s dtf;
+
+	DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY);
+	DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER);
+	// hide the mach channel (see _dispatch_mach_barrier_invoke comment)
+	_dispatch_thread_frame_stash(&dtf);
+	_dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{
+		_dispatch_mach_send_invoke(dm, flags,
+				DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER);
+	});
+	_dispatch_thread_frame_unstash(&dtf);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, dispatch_qos_t qos)
+{
+	dispatch_continuation_t dc = _dispatch_continuation_alloc();
+
+	dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN);
+	dc->dc_func = NULL;
+	dc->dc_ctxt = NULL;
+	dc->dc_voucher = DISPATCH_NO_VOUCHER;
+	dc->dc_priority = DISPATCH_NO_PRIORITY;
+	return _dispatch_queue_push(dm->_as_dq, dc, qos);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc,
+		dispatch_qos_t qos)
+{
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	uint64_t old_state, new_state, state_flags = 0;
+	dispatch_lock_owner owner;
+	bool wakeup;
+
+	// <rdar://problem/25896179> when pushing a send barrier that destroys
+	// the last reference to this channel, and the send queue is already
+	// draining on another thread, the send barrier may run as soon as
+	// _dispatch_mach_send_push_inline() returns.
+	_dispatch_retain(dm);
+
+	wakeup = _dispatch_mach_send_push_inline(dmsr, dc);
+	if (wakeup) {
+		state_flags = DISPATCH_MACH_STATE_DIRTY;
+		if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) {
+			state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER;
+		}
+	}
+
+	if (state_flags) {
+		os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
+			new_state = _dmsr_state_merge_override(old_state, qos);
+			new_state |= state_flags;
+		});
+	} else {
+		os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, relaxed, {
+			new_state = _dmsr_state_merge_override(old_state, qos);
+			if (old_state == new_state) {
+				os_atomic_rmw_loop_give_up(break);
+			}
+		});
+	}
+
+	qos = _dmsr_state_max_qos(new_state);
+	owner = _dispatch_lock_owner((dispatch_lock)old_state);
+	if (owner) {
+		if (_dmsr_state_needs_override(old_state, qos)) {
+			_dispatch_wqthread_override_start_check_owner(owner, qos,
+					&dmsr->dmsr_state_lock.dul_lock);
+		}
+		return _dispatch_release_tailcall(dm);
+	}
+
+	dispatch_wakeup_flags_t wflags = 0;
+	if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) {
+		_dispatch_mach_send_barrier_drain_push(dm, qos);
+	} else if (wakeup || dmsr->dmsr_disconnect_cnt ||
+			(dm->dq_atomic_flags & DSF_CANCELED)) {
+		wflags = DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME;
+	} else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
+		wflags = DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_CONSUME;
+	}
+	if (wflags) {
+		return dx_wakeup(dm, qos, wflags);
+	}
+	return _dispatch_release_tailcall(dm);
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm,
+		dispatch_object_t dou, dispatch_qos_t qos,
+		dispatch_mach_send_invoke_flags_t send_flags)
+{
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	dispatch_lock_owner tid_self = _dispatch_tid_self();
+	uint64_t old_state, new_state, canlock_mask, state_flags = 0;
+	dispatch_lock_owner owner;
+
+	bool wakeup = _dispatch_mach_send_push_inline(dmsr, dou);
+	if (wakeup) {
+		state_flags = DISPATCH_MACH_STATE_DIRTY;
+	}
+
+	if (unlikely(dmsr->dmsr_disconnect_cnt ||
+			(dm->dq_atomic_flags & DSF_CANCELED))) {
+		os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, {
+			new_state = _dmsr_state_merge_override(old_state, qos);
+			new_state |= state_flags;
+		});
+		dx_wakeup(dm, qos, DISPATCH_WAKEUP_FLUSH);
+		return false;
+	}
+
+	canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK |
+			DISPATCH_MACH_STATE_PENDING_BARRIER;
+	if (state_flags) {
+		os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, seq_cst, {
+			new_state = _dmsr_state_merge_override(old_state, qos);
+			new_state |= state_flags;
+			if (likely((old_state & canlock_mask) == 0)) {
+				new_state |= tid_self;
+				new_state &= ~DISPATCH_MACH_STATE_DIRTY;
+				new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+				new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
+			}
+		});
+	} else {
+		os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, {
+			new_state = _dmsr_state_merge_override(old_state, qos);
+			if (new_state == old_state) {
+				os_atomic_rmw_loop_give_up(return false);
+			}
+			if (likely((old_state & canlock_mask) == 0)) {
+				new_state |= tid_self;
+				new_state &= ~DISPATCH_MACH_STATE_DIRTY;
+				new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
+				new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
+			}
+		});
+	}
+
+	owner = _dispatch_lock_owner((dispatch_lock)old_state);
+	if (owner) {
+		if (_dmsr_state_needs_override(old_state, qos)) {
+			_dispatch_wqthread_override_start_check_owner(owner, qos,
+					&dmsr->dmsr_state_lock.dul_lock);
+		}
+		return false;
+	}
+
+	if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
+		dx_wakeup(dm, qos, DISPATCH_WAKEUP_OVERRIDING);
+		return false;
+	}
+
+	// Ensure our message is still at the head of the queue and has not already
+	// been dequeued by another thread that raced us to the send queue lock.
+	// A plain load of the head and comparison against our object pointer is
+	// sufficient.
+	if (unlikely(!(wakeup && dou._do == dmsr->dmsr_head))) {
+		// Don't request immediate send result for messages we don't own
+		send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK;
+	}
+	return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags);
+}
+
+void
+_dispatch_mach_merge_notification(dispatch_unote_t du,
+		uint32_t flags DISPATCH_UNUSED, uintptr_t data,
+		pthread_priority_t pp DISPATCH_UNUSED)
+{
+	dispatch_mach_send_refs_t dmsr = du._dmsr;
+	dispatch_mach_t dm = _dispatch_wref2ptr(dmsr->du_owner_wref);
+
+	if (data & dmsr->du_fflags) {
+		_dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN,
+				DM_SEND_INVOKE_FLUSH);
+	}
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm,
+		dispatch_mach_msg_t dmsg)
+{
+	mach_error_t error;
+	dispatch_mach_reason_t reason = _dispatch_mach_msg_get_reason(dmsg, &error);
+	if (!dm->dm_is_xpc ||
+			!_dispatch_mach_xpc_hooks->dmxh_direct_message_handler(
+			dm->dm_recv_refs->dmrr_handler_ctxt, reason, dmsg, error)) {
+		// Not XPC client or not a message that XPC can handle inline - push
+		// it onto the channel queue.
+		dispatch_qos_t qos = _dispatch_qos_from_pp(dmsg->dmsg_priority);
+		_dispatch_queue_push(dm->_as_dq, dmsg, qos);
+	} else {
+		// XPC handled the message inline. Do the cleanup that would otherwise
+		// have happened in _dispatch_mach_msg_invoke(), leaving out steps that
+		// are not required in this context.
+		dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
+		dispatch_release(dmsg);
+	}
+}
+
+#pragma mark -
+#pragma mark dispatch_mach_t
+
+static inline mach_msg_option_t
+_dispatch_mach_checkin_options(void)
+{
+	mach_msg_option_t options = 0;
+#if DISPATCH_USE_CHECKIN_NOIMPORTANCE
+	options = MACH_SEND_NOIMPORTANCE; // <rdar://problem/16996737>
+#endif
+	return options;
+}
+
+
+static inline mach_msg_option_t
+_dispatch_mach_send_options(void)
+{
+	mach_msg_option_t options = 0;
+	return options;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_mach_priority_propagate(mach_msg_option_t options)
+{
+#if DISPATCH_USE_NOIMPORTANCE_QOS
+	if (options & MACH_SEND_NOIMPORTANCE) return 0;
+#else
+	(void)options;
+#endif
+	return _dispatch_priority_propagate();
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
+		dispatch_continuation_t dc_wait, mach_msg_option_t options)
+{
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) {
+		DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued");
+	}
+	dispatch_retain(dmsg);
+	pthread_priority_t priority = _dispatch_mach_priority_propagate(options);
+	options |= _dispatch_mach_send_options();
+	dmsg->dmsg_options = options;
+	mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
+	dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg);
+	bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
+			MACH_MSG_TYPE_MOVE_SEND_ONCE);
+	dmsg->dmsg_priority = priority;
+	dmsg->dmsg_voucher = _voucher_copy();
+	_dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg);
+
+	uint32_t send_status;
+	bool returning_send_result = false;
+	dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE;
+	if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) {
+		send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND;
+	}
+	if (is_reply && !dmsg->dmsg_reply && !dmsr->dmsr_disconnect_cnt &&
+			!(dm->dq_atomic_flags & DSF_CANCELED)) {
+		// replies are sent to a send-once right and don't need the send queue
+		dispatch_assert(!dc_wait);
+		send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags);
+		dispatch_assert(send_status);
+		returning_send_result = !!(send_status &
+				DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT);
+	} else {
+		_dispatch_voucher_ktrace_dmsg_push(dmsg);
+		dispatch_object_t dou = { ._dmsg = dmsg };
+		if (dc_wait) dou._dc = dc_wait;
+		returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou,
+				_dispatch_qos_from_pp(priority), send_flags);
+	}
+	if (returning_send_result) {
+		_dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg);
+		if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
+		dmsg->dmsg_voucher = NULL;
+		dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
+		dispatch_release(dmsg);
+	}
+	return returning_send_result;
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
+		mach_msg_option_t options)
+{
+	dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+	options &= ~DISPATCH_MACH_OPTIONS_MASK;
+	bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
+	dispatch_assert(!returned_send_result);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
+		mach_msg_option_t options, dispatch_mach_send_flags_t send_flags,
+		dispatch_mach_reason_t *send_result, mach_error_t *send_error)
+{
+	if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
+		DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
+	}
+	dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+	options &= ~DISPATCH_MACH_OPTIONS_MASK;
+	options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
+	bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
+	unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
+	mach_error_t err = 0;
+	if (returned_send_result) {
+		reason = _dispatch_mach_msg_get_reason(dmsg, &err);
+	}
+	*send_result = reason;
+	*send_error = err;
+}
+
+static inline
+dispatch_mach_msg_t
+_dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm,
+		dispatch_mach_msg_t dmsg, mach_msg_option_t options,
+		bool *returned_send_result)
+{
+	mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg);
+	if (!reply_port) {
+		// use per-thread mach reply port <rdar://24597802>
+		reply_port = _dispatch_get_thread_reply_port();
+		mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
+		dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) ==
+				MACH_MSG_TYPE_MAKE_SEND_ONCE);
+		hdr->msgh_local_port = reply_port;
+		options |= DISPATCH_MACH_OWNED_REPLY_PORT;
+	}
+
+	dispatch_mach_reply_refs_t dmr;
+#if DISPATCH_DEBUG
+	dmr = _dispatch_calloc(1, sizeof(*dmr));
+#else
+	struct dispatch_mach_reply_refs_s dmr_buf = { };
+	dmr = &dmr_buf;
+#endif
+	struct dispatch_continuation_s dc_wait = {
+		.dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT,
+		.dc_data = dmsg,
+		.dc_other = dmr,
+		.dc_priority = DISPATCH_NO_PRIORITY,
+		.dc_voucher = DISPATCH_NO_VOUCHER,
+	};
+	dmr->dmr_ctxt = dmsg->do_ctxt;
+	*returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options);
+	if (options & DISPATCH_MACH_OWNED_REPLY_PORT) {
+		_dispatch_clear_thread_reply_port(reply_port);
+	}
+	dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port);
+#if DISPATCH_DEBUG
+	free(dmr);
+#endif
+	return dmsg;
+}
+
+DISPATCH_NOINLINE
+dispatch_mach_msg_t
+dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm,
+		dispatch_mach_msg_t dmsg, mach_msg_option_t options)
+{
+	bool returned_send_result;
+	dispatch_mach_msg_t reply;
+	dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+	options &= ~DISPATCH_MACH_OPTIONS_MASK;
+	options |= DISPATCH_MACH_WAIT_FOR_REPLY;
+	reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options,
+			&returned_send_result);
+	dispatch_assert(!returned_send_result);
+	return reply;
+}
+
+DISPATCH_NOINLINE
+dispatch_mach_msg_t
+dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm,
+		dispatch_mach_msg_t dmsg, mach_msg_option_t options,
+		dispatch_mach_send_flags_t send_flags,
+		dispatch_mach_reason_t *send_result, mach_error_t *send_error)
+{
+	if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
+		DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
+	}
+	bool returned_send_result;
+	dispatch_mach_msg_t reply;
+	dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+	options &= ~DISPATCH_MACH_OPTIONS_MASK;
+	options |= DISPATCH_MACH_WAIT_FOR_REPLY;
+	options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
+	reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options,
+			&returned_send_result);
+	unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
+	mach_error_t err = 0;
+	if (returned_send_result) {
+		reason = _dispatch_mach_msg_get_reason(dmsg, &err);
+	}
+	*send_result = reason;
+	*send_error = err;
+	return reply;
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_disconnect(dispatch_mach_t dm)
+{
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	bool disconnected;
+	if (dmsr->du_registered) {
+		_dispatch_mach_notification_kevent_unregister(dm);
+	}
+	if (MACH_PORT_VALID(dmsr->dmsr_send)) {
+		_dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dmsr->dmsr_send);
+	}
+	dmsr->dmsr_send = MACH_PORT_NULL;
+	if (dmsr->dmsr_checkin) {
+		_dispatch_mach_msg_not_sent(dm, dmsr->dmsr_checkin);
+		dmsr->dmsr_checkin = NULL;
+	}
+	_dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock);
+	dispatch_mach_reply_refs_t dmr, tmp;
+	TAILQ_FOREACH_SAFE(dmr, &dm->dm_send_refs->dmsr_replies, dmr_list, tmp) {
+		TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list);
+		_TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
+		if (dmr->du_registered) {
+			_dispatch_mach_reply_kevent_unregister(dm, dmr,
+					DU_UNREGISTER_DISCONNECTED);
+		} else {
+			_dispatch_mach_reply_waiter_unregister(dm, dmr,
+					DU_UNREGISTER_DISCONNECTED);
+		}
+	}
+	disconnected = TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies);
+	_dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock);
+
+	// The SIGTERM unote is registered until the channel is canceled.
+	if ((dm->dq_atomic_flags & DSF_CANCELED) && dm->dm_xpc_term_refs &&
+			!_dispatch_unote_unregister(dm->dm_xpc_term_refs, 0)) {
+		disconnected = false;
+	}
+	return disconnected;
+}
+
+static void
+_dispatch_mach_cancel(dispatch_mach_t dm)
+{
+	_dispatch_object_debug(dm, "%s", __func__);
+	if (!_dispatch_mach_disconnect(dm)) return;
+
+	dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+	mach_port_t local_port = (mach_port_t)dmrr->du_ident;
+	if (local_port) {
+		_dispatch_source_refs_unregister(dm->_as_ds, 0);
+		if ((dm->dq_atomic_flags & DSF_STATE_MASK) == DSF_DELETED) {
+			_dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL);
+		}
+	} else {
+		_dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED,
+				DSF_ARMED | DSF_DEFERRED_DELETE);
+	}
+}
+
+DISPATCH_NOINLINE
+static bool
+_dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou)
+{
+	if (!_dispatch_mach_disconnect(dm)) return false;
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	dmsr->dmsr_checkin = dou._dc->dc_data;
+	dmsr->dmsr_send = (mach_port_t)dou._dc->dc_other;
+	_dispatch_continuation_free(dou._dc);
+	(void)os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, relaxed);
+	_dispatch_object_debug(dm, "%s", __func__);
+	_dispatch_release(dm); // <rdar://problem/26266265>
+	return true;
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send,
+		dispatch_mach_msg_t checkin)
+{
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	(void)os_atomic_inc2o(dmsr, dmsr_disconnect_cnt, relaxed);
+	if (MACH_PORT_VALID(send) && checkin) {
+		dispatch_mach_msg_t dmsg = checkin;
+		dispatch_retain(dmsg);
+		dmsg->dmsg_options = _dispatch_mach_checkin_options();
+		dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg);
+	} else {
+		checkin = NULL;
+		dmsr->dmsr_checkin_port = MACH_PORT_NULL;
+	}
+	dispatch_continuation_t dc = _dispatch_continuation_alloc();
+	dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT;
+	// actually called manually in _dispatch_mach_send_drain
+	dc->dc_func = (void*)_dispatch_mach_reconnect_invoke;
+	dc->dc_ctxt = dc;
+	dc->dc_data = checkin;
+	dc->dc_other = (void*)(uintptr_t)send;
+	dc->dc_voucher = DISPATCH_NO_VOUCHER;
+	dc->dc_priority = DISPATCH_NO_PRIORITY;
+	_dispatch_retain(dm); // <rdar://problem/26266265>
+	return _dispatch_mach_send_push(dm, dc, 0);
+}
+
+DISPATCH_NOINLINE
+mach_port_t
+dispatch_mach_get_checkin_port(dispatch_mach_t dm)
+{
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) {
+		return MACH_PORT_DEAD;
+	}
+	return dmsr->dmsr_checkin_port;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_connect_invoke(dispatch_mach_t dm)
+{
+	dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+	_dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
+			DISPATCH_MACH_CONNECTED, NULL, 0, dmrr->dmrr_handler_func);
+	dm->dm_connect_handler_called = 1;
+	_dispatch_perfmon_workitem_inc();
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg,
+		dispatch_invoke_flags_t flags)
+{
+	dispatch_thread_frame_s dtf;
+	dispatch_mach_recv_refs_t dmrr;
+	dispatch_mach_t dm;
+	mach_error_t err;
+	unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err);
+	_dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE|
+			DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE;
+
+	// hide mach channel
+	dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf);
+	dmrr = dm->dm_recv_refs;
+	dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
+	_dispatch_voucher_ktrace_dmsg_pop(dmsg);
+	_dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg);
+	(void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority,
+			dmsg->dmsg_voucher, adopt_flags);
+	dmsg->dmsg_voucher = NULL;
+	dispatch_invoke_with_autoreleasepool(flags, {
+		if (slowpath(!dm->dm_connect_handler_called)) {
+			_dispatch_mach_connect_invoke(dm);
+		}
+		_dispatch_client_callout4(dmrr->dmrr_handler_ctxt, reason, dmsg, err,
+				dmrr->dmrr_handler_func);
+		_dispatch_perfmon_workitem_inc();
+	});
+	_dispatch_thread_frame_unstash(&dtf);
+	_dispatch_introspection_queue_item_complete(dmsg);
+	dispatch_release(dmsg);
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_mach_barrier_invoke(dispatch_continuation_t dc,
+		dispatch_invoke_flags_t flags)
+{
+	dispatch_thread_frame_s dtf;
+	dispatch_mach_t dm = dc->dc_other;
+	dispatch_mach_recv_refs_t dmrr;
+	uintptr_t dc_flags = (uintptr_t)dc->dc_data;
+	unsigned long type = dc_type(dc);
+
+	// hide mach channel from clients
+	if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) {
+		// on the send queue, the mach channel isn't the current queue
+		// its target queue is the current one already
+		_dispatch_thread_frame_stash(&dtf);
+	}
+	dmrr = dm->dm_recv_refs;
+	DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT);
+	_dispatch_continuation_pop_forwarded(dc, dm->dq_override_voucher, dc_flags,{
+		dispatch_invoke_with_autoreleasepool(flags, {
+			if (slowpath(!dm->dm_connect_handler_called)) {
+				_dispatch_mach_connect_invoke(dm);
+			}
+			_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
+			_dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
+					DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0,
+					dmrr->dmrr_handler_func);
+		});
+	});
+	if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) {
+		_dispatch_thread_frame_unstash(&dtf);
+	}
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context,
+		dispatch_function_t func)
+{
+	dispatch_continuation_t dc = _dispatch_continuation_alloc();
+	uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
+	dispatch_qos_t qos;
+
+	_dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags);
+	dc->dc_data = (void *)dc->dc_flags;
+	dc->dc_other = dm;
+	dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER);
+	_dispatch_trace_continuation_push(dm->_as_dq, dc);
+	qos = _dispatch_continuation_override_qos(dm->_as_dq, dc);
+	return _dispatch_mach_send_push(dm, dc, qos);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier)
+{
+	dispatch_continuation_t dc = _dispatch_continuation_alloc();
+	uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
+	dispatch_qos_t qos;
+
+	_dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags);
+	dc->dc_data = (void *)dc->dc_flags;
+	dc->dc_other = dm;
+	dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER);
+	_dispatch_trace_continuation_push(dm->_as_dq, dc);
+	qos = _dispatch_continuation_override_qos(dm->_as_dq, dc);
+	return _dispatch_mach_send_push(dm, dc, qos);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context,
+		dispatch_function_t func)
+{
+	dispatch_continuation_t dc = _dispatch_continuation_alloc();
+	uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
+
+	_dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags);
+	dc->dc_data = (void *)dc->dc_flags;
+	dc->dc_other = dm;
+	dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER);
+	return _dispatch_continuation_async(dm->_as_dq, dc);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier)
+{
+	dispatch_continuation_t dc = _dispatch_continuation_alloc();
+	uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
+
+	_dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags);
+	dc->dc_data = (void *)dc->dc_flags;
+	dc->dc_other = dm;
+	dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER);
+	return _dispatch_continuation_async(dm->_as_dq, dc);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags)
+{
+	dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+
+	dispatch_invoke_with_autoreleasepool(flags, {
+		if (slowpath(!dm->dm_connect_handler_called)) {
+			_dispatch_mach_connect_invoke(dm);
+		}
+		_dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
+				DISPATCH_MACH_CANCELED, NULL, 0, dmrr->dmrr_handler_func);
+		_dispatch_perfmon_workitem_inc();
+	});
+	dm->dm_cancel_handler_called = 1;
+	_dispatch_release(dm); // the retain is done at creation time
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_mach_cancel(dispatch_mach_t dm)
+{
+	dispatch_source_cancel(dm->_as_ds);
+}
+
+static void
+_dispatch_mach_install(dispatch_mach_t dm, dispatch_priority_t pri)
+{
+	dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+	uint32_t disconnect_cnt;
+
+	if (dmrr->du_ident) {
+		_dispatch_source_refs_register(dm->_as_ds, pri);
+	}
+	if (dm->dm_xpc_term_refs) {
+		_dispatch_unote_register(dm->dm_xpc_term_refs, pri);
+	}
+
+	if (dmrr->du_is_direct) {
+		// _dispatch_mach_reply_kevent_register assumes this has been done
+		// which is unlike regular sources or queues, the DEFAULTQUEUE flag
+		// is used so that the priority of the channel doesn't act as
+		// a QoS floor for incoming messages (26761457)
+		dm->dq_priority = pri;
+	}
+	dm->ds_is_installed = true;
+	if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_send_refs, dmsr_disconnect_cnt,
+			DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) {
+		DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed");
+	}
+}
+
+void
+_dispatch_mach_finalize_activation(dispatch_mach_t dm)
+{
+	dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+	if (dmrr->du_is_direct && !dm->ds_is_installed) {
+		dispatch_source_t ds = dm->_as_ds;
+		dispatch_priority_t pri = _dispatch_source_compute_kevent_priority(ds);
+		if (pri) _dispatch_mach_install(dm, pri);
+	}
+
+	// call "super"
+	_dispatch_queue_finalize_activation(dm->_as_dq);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_queue_t
+_dispatch_mach_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags,
+		uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED)
+{
+	dispatch_mach_t dm = dou._dm;
+	dispatch_queue_t retq = NULL;
+	dispatch_queue_t dq = _dispatch_queue_get_current();
+
+	// This function performs all mach channel actions. Each action is
+	// responsible for verifying that it takes place on the appropriate queue.
+	// If the current queue is not the correct queue for this action, the
+	// correct queue will be returned and the invoke will be re-driven on that
+	// queue.
+
+	// The order of tests here in invoke and in wakeup should be consistent.
+
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+	dispatch_queue_t dkq = &_dispatch_mgr_q;
+
+	if (dmrr->du_is_direct) {
+		dkq = dm->do_targetq;
+	}
+
+	if (unlikely(!dm->ds_is_installed)) {
+		// The channel needs to be installed on the kevent queue.
+		if (dq != dkq) {
+			return dkq;
+		}
+		_dispatch_mach_install(dm, _dispatch_get_basepri());
+		_dispatch_perfmon_workitem_inc();
+	}
+
+	if (_dispatch_queue_class_probe(dm)) {
+		if (dq == dm->do_targetq) {
+			retq = _dispatch_queue_serial_drain(dm->_as_dq, flags, owned, NULL);
+		} else {
+			retq = dm->do_targetq;
+		}
+	}
+
+	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
+
+	if (dmsr->dmsr_tail) {
+		bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt &&
+				(dmsr->du_registered || !dmrr->du_is_direct));
+		if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) ||
+				(dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) {
+			// The channel has pending messages to send.
+			if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) {
+				return retq ? retq : &_dispatch_mgr_q;
+			}
+			dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE;
+			if (dq != &_dispatch_mgr_q) {
+				send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER;
+			}
+			_dispatch_mach_send_invoke(dm, flags, send_flags);
+		}
+	} else if (dqf & DSF_CANCELED) {
+		// The channel has been cancelled and needs to be uninstalled from the
+		// manager queue. After uninstallation, the cancellation handler needs
+		// to be delivered to the target queue.
+		if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) {
+			// waiting for the delivery of a deferred delete event
+			return retq;
+		}
+		if ((dqf & DSF_STATE_MASK) != DSF_DELETED) {
+			if (dq != &_dispatch_mgr_q) {
+				return retq ? retq : &_dispatch_mgr_q;
+			}
+			_dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL);
+			dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
+			if (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) {
+				// waiting for the delivery of a deferred delete event
+				// or deletion didn't happen because send_invoke couldn't
+				// acquire the send lock
+				return retq;
+			}
+		}
+		if (!dm->dm_cancel_handler_called) {
+			if (dq != dm->do_targetq) {
+				return retq ? retq : dm->do_targetq;
+			}
+			_dispatch_mach_cancel_invoke(dm, flags);
+		}
+	}
+
+	return retq;
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags)
+{
+	_dispatch_queue_class_invoke(dm, flags, _dispatch_mach_invoke2);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_mach_reinstate_max_qos(dispatch_mach_t dm, dispatch_qos_t qos,
+		dispatch_wakeup_flags_t flags)
+{
+	uint64_t old_state, new_state;
+
+	if (!qos) return false;
+	os_atomic_rmw_loop2o(dm, dq_state, old_state, new_state, relaxed, {
+		new_state = _dq_state_merge_qos(old_state, qos);
+		if (new_state == old_state) {
+			os_atomic_rmw_loop_give_up(return false);
+		}
+	});
+	if (_dq_state_drain_locked(new_state)) {
+		_dispatch_queue_class_wakeup_with_override(dm->_as_dq, qos,
+				flags | DISPATCH_WAKEUP_OVERRIDING, new_state);
+		return true;
+	}
+	return false;
+}
+
+void
+_dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos,
+		dispatch_wakeup_flags_t flags)
+{
+	// This function determines whether the mach channel needs to be invoked.
+	// The order of tests here in probe and in invoke should be consistent.
+
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+	dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR;
+	dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE;
+	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
+
+	if (dmrr->du_is_direct) {
+		dkq = DISPATCH_QUEUE_WAKEUP_TARGET;
+	}
+
+	if (!dm->ds_is_installed) {
+		// The channel needs to be installed on the kevent queue.
+		tq = dkq;
+		goto done;
+	}
+
+	if (_dispatch_queue_class_probe(dm)) {
+		tq = DISPATCH_QUEUE_WAKEUP_TARGET;
+		goto done;
+	}
+
+	if (_dispatch_lock_is_locked(dmsr->dmsr_state_lock.dul_lock)) {
+		// Sending and uninstallation below require the send lock, the channel
+		// will be woken up when the lock is dropped <rdar://15132939&15203957>
+		if (_dispatch_mach_reinstate_max_qos(dm, qos, flags)) {
+			return;
+		}
+		goto done;
+	}
+
+	if (dmsr->dmsr_tail) {
+		bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt &&
+				(dmsr->du_registered || !dmrr->du_is_direct));
+		if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) ||
+				(dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) {
+			if (unlikely(requires_mgr)) {
+				tq = DISPATCH_QUEUE_WAKEUP_MGR;
+			} else {
+				tq = DISPATCH_QUEUE_WAKEUP_TARGET;
+			}
+		} else if (_dispatch_mach_reinstate_max_qos(dm, qos, flags)) {
+			// can happen when we can't send because the port is full
+			// but we should not lose the override
+			return;
+		}
+	} else if (dqf & DSF_CANCELED) {
+		if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) {
+			// waiting for the delivery of a deferred delete event
+		} else if ((dqf & DSF_STATE_MASK) != DSF_DELETED) {
+			// The channel needs to be uninstalled from the manager queue
+			tq = DISPATCH_QUEUE_WAKEUP_MGR;
+		} else if (!dm->dm_cancel_handler_called) {
+			// the cancellation handler needs to be delivered to the target
+			// queue.
+			tq = DISPATCH_QUEUE_WAKEUP_TARGET;
+		}
+	}
+
+done:
+	if (tq) {
+		return _dispatch_queue_class_wakeup(dm->_as_dq, qos, flags, tq);
+	} else if (qos) {
+		return _dispatch_queue_class_override_drainer(dm->_as_dq, qos, flags);
+	} else if (flags & DISPATCH_WAKEUP_CONSUME) {
+		return _dispatch_release_tailcall(dm);
+	}
+}
+
+static void
+_dispatch_mach_sigterm_invoke(void *ctx)
+{
+	dispatch_mach_t dm = ctx;
+	if (!(dm->dq_atomic_flags & DSF_CANCELED)) {
+		dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+		_dispatch_client_callout4(dmrr->dmrr_handler_ctxt,
+				DISPATCH_MACH_SIGTERM_RECEIVED, NULL, 0,
+				dmrr->dmrr_handler_func);
+	}
+}
+
+void
+_dispatch_xpc_sigterm_merge(dispatch_unote_t du,
+		uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED,
+		pthread_priority_t pp)
+{
+	uint32_t options = 0;
+	if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) &&
+			!(flags & EV_DELETE)) {
+		options = DU_UNREGISTER_IMMEDIATE_DELETE;
+	} else {
+		dispatch_assert((flags & EV_ONESHOT) && (flags & EV_DELETE));
+		options = DU_UNREGISTER_ALREADY_DELETED;
+	}
+	_dispatch_unote_unregister(du, options);
+
+	dispatch_mach_t dm = _dispatch_wref2ptr(du._du->du_owner_wref);
+	if (!(dm->dq_atomic_flags & DSF_CANCELED)) {
+		_dispatch_barrier_async_detached_f(dm->_as_dq, dm,
+				_dispatch_mach_sigterm_invoke);
+	} else {
+		dx_wakeup(dm, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_FLUSH);
+	}
+}
+
+
+#pragma mark -
+#pragma mark dispatch_mach_msg_t
+
+dispatch_mach_msg_t
+dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size,
+		dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr)
+{
+	if (slowpath(size < sizeof(mach_msg_header_t)) ||
+			slowpath(destructor && !msg)) {
+		DISPATCH_CLIENT_CRASH(size, "Empty message");
+	}
+	dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg),
+			sizeof(struct dispatch_mach_msg_s) +
+			(destructor ? 0 : size - sizeof(dmsg->dmsg_msg)));
+	if (destructor) {
+		dmsg->dmsg_msg = msg;
+	} else if (msg) {
+		memcpy(dmsg->dmsg_buf, msg, size);
+	}
+	dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
+	dmsg->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
+	dmsg->dmsg_destructor = destructor;
+	dmsg->dmsg_size = size;
+	if (msg_ptr) {
+		*msg_ptr = _dispatch_mach_msg_get_msg(dmsg);
+	}
+	return dmsg;
+}
+
+void
+_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg)
+{
+	if (dmsg->dmsg_voucher) {
+		_voucher_release(dmsg->dmsg_voucher);
+		dmsg->dmsg_voucher = NULL;
+	}
+	switch (dmsg->dmsg_destructor) {
+	case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT:
+		break;
+	case DISPATCH_MACH_MSG_DESTRUCTOR_FREE:
+		free(dmsg->dmsg_msg);
+		break;
+	case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: {
+		mach_vm_size_t vm_size = dmsg->dmsg_size;
+		mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg;
+		(void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(),
+				vm_addr, vm_size));
+		break;
+	}}
+}
+
+static inline mach_msg_header_t*
+_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg)
+{
+	return dmsg->dmsg_destructor ? dmsg->dmsg_msg :
+			(mach_msg_header_t*)dmsg->dmsg_buf;
+}
+
+mach_msg_header_t*
+dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr)
+{
+	if (size_ptr) {
+		*size_ptr = dmsg->dmsg_size;
+	}
+	return _dispatch_mach_msg_get_msg(dmsg);
+}
+
+size_t
+_dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz)
+{
+	size_t offset = 0;
+	offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
+			dx_kind(dmsg), dmsg);
+	offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, "
+			"refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1);
+	offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, "
+			"msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf);
+	mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
+	if (hdr->msgh_id) {
+		offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ",
+				hdr->msgh_id);
+	}
+	if (hdr->msgh_size) {
+		offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ",
+				hdr->msgh_size);
+	}
+	if (hdr->msgh_bits) {
+		offset += dsnprintf(&buf[offset], bufsiz - offset, "bits <l %u, r %u",
+				MACH_MSGH_BITS_LOCAL(hdr->msgh_bits),
+				MACH_MSGH_BITS_REMOTE(hdr->msgh_bits));
+		if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) {
+			offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x",
+					MACH_MSGH_BITS_OTHER(hdr->msgh_bits));
+		}
+		offset += dsnprintf(&buf[offset], bufsiz - offset, ">, ");
+	}
+	if (hdr->msgh_local_port && hdr->msgh_remote_port) {
+		offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, "
+				"remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port);
+	} else if (hdr->msgh_local_port) {
+		offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x",
+				hdr->msgh_local_port);
+	} else if (hdr->msgh_remote_port) {
+		offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x",
+				hdr->msgh_remote_port);
+	} else {
+		offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports");
+	}
+	offset += dsnprintf(&buf[offset], bufsiz - offset, " } }");
+	return offset;
+}
+
+#pragma mark -
+#pragma mark dispatch_mig_server
+
+mach_msg_return_t
+dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz,
+		dispatch_mig_callback_t callback)
+{
+	mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT
+		| MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX)
+		| MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER;
+	mach_msg_options_t tmp_options;
+	mig_reply_error_t *bufTemp, *bufRequest, *bufReply;
+	mach_msg_return_t kr = 0;
+	uint64_t assertion_token = 0;
+	uint32_t cnt = 1000; // do not stall out serial queues
+	boolean_t demux_success;
+	bool received = false;
+	size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE;
+	dispatch_source_refs_t dr = ds->ds_refs;
+
+	bufRequest = alloca(rcv_size);
+	bufRequest->RetCode = 0;
+	for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size);
+			p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) {
+		*(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
+	}
+
+	bufReply = alloca(rcv_size);
+	bufReply->Head.msgh_size = 0;
+	for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size);
+			p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) {
+		*(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
+	}
+
+#if DISPATCH_DEBUG
+	options |= MACH_RCV_LARGE; // rdar://problem/8422992
+#endif
+	tmp_options = options;
+	// XXX FIXME -- change this to not starve out the target queue
+	for (;;) {
+		if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) {
+			options &= ~MACH_RCV_MSG;
+			tmp_options &= ~MACH_RCV_MSG;
+
+			if (!(tmp_options & MACH_SEND_MSG)) {
+				goto out;
+			}
+		}
+		kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size,
+				(mach_msg_size_t)rcv_size, (mach_port_t)dr->du_ident, 0, 0);
+
+		tmp_options = options;
+
+		if (slowpath(kr)) {
+			switch (kr) {
+			case MACH_SEND_INVALID_DEST:
+			case MACH_SEND_TIMED_OUT:
+				if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
+					mach_msg_destroy(&bufReply->Head);
+				}
+				break;
+			case MACH_RCV_TIMED_OUT:
+				// Don't return an error if a message was sent this time or
+				// a message was successfully received previously
+				// rdar://problems/7363620&7791738
+				if(bufReply->Head.msgh_remote_port || received) {
+					kr = MACH_MSG_SUCCESS;
+				}
+				break;
+			case MACH_RCV_INVALID_NAME:
+				break;
+#if DISPATCH_DEBUG
+			case MACH_RCV_TOO_LARGE:
+				// receive messages that are too large and log their id and size
+				// rdar://problem/8422992
+				tmp_options &= ~MACH_RCV_LARGE;
+				size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE;
+				void *large_buf = malloc(large_size);
+				if (large_buf) {
+					rcv_size = large_size;
+					bufReply = large_buf;
+				}
+				if (!mach_msg(&bufReply->Head, tmp_options, 0,
+						(mach_msg_size_t)rcv_size,
+						(mach_port_t)dr->du_ident, 0, 0)) {
+					_dispatch_log("BUG in libdispatch client: "
+							"dispatch_mig_server received message larger than "
+							"requested size %zd: id = 0x%x, size = %d",
+							maxmsgsz, bufReply->Head.msgh_id,
+							bufReply->Head.msgh_size);
+				}
+				if (large_buf) {
+					free(large_buf);
+				}
+				// fall through
+#endif
+			default:
+				_dispatch_bug_mach_client(
+						"dispatch_mig_server: mach_msg() failed", kr);
+				break;
+			}
+			goto out;
+		}
+
+		if (!(tmp_options & MACH_RCV_MSG)) {
+			goto out;
+		}
+
+		if (assertion_token) {
+#if DISPATCH_USE_IMPORTANCE_ASSERTION
+			int r = proc_importance_assertion_complete(assertion_token);
+			(void)dispatch_assume_zero(r);
+#endif
+			assertion_token = 0;
+		}
+		received = true;
+
+		bufTemp = bufRequest;
+		bufRequest = bufReply;
+		bufReply = bufTemp;
+
+#if DISPATCH_USE_IMPORTANCE_ASSERTION
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+		int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head,
+				NULL, &assertion_token);
+		if (r && slowpath(r != EIO)) {
+			(void)dispatch_assume_zero(r);
+		}
+#pragma clang diagnostic pop
+#endif
+		_voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head));
+		demux_success = callback(&bufRequest->Head, &bufReply->Head);
+
+		if (!demux_success) {
+			// destroy the request - but not the reply port
+			bufRequest->Head.msgh_remote_port = 0;
+			mach_msg_destroy(&bufRequest->Head);
+		} else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
+			// if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode
+			// is present
+			if (slowpath(bufReply->RetCode)) {
+				if (bufReply->RetCode == MIG_NO_REPLY) {
+					continue;
+				}
+
+				// destroy the request - but not the reply port
+				bufRequest->Head.msgh_remote_port = 0;
+				mach_msg_destroy(&bufRequest->Head);
+			}
+		}
+
+		if (bufReply->Head.msgh_remote_port) {
+			tmp_options |= MACH_SEND_MSG;
+			if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) !=
+					MACH_MSG_TYPE_MOVE_SEND_ONCE) {
+				tmp_options |= MACH_SEND_TIMEOUT;
+			}
+		}
+	}
+
+out:
+	if (assertion_token) {
+#if DISPATCH_USE_IMPORTANCE_ASSERTION
+		int r = proc_importance_assertion_complete(assertion_token);
+		(void)dispatch_assume_zero(r);
+#endif
+	}
+
+	return kr;
+}
+
+#pragma mark -
+#pragma mark dispatch_mach_debug
+
+static size_t
+_dispatch_mach_debug_attr(dispatch_mach_t dm, char *buf, size_t bufsiz)
+{
+	dispatch_queue_t target = dm->do_targetq;
+	dispatch_mach_send_refs_t dmsr = dm->dm_send_refs;
+	dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs;
+
+	return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, "
+			"send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, "
+			"send state = %016llx, disconnected = %d, canceled = %d ",
+			target && target->dq_label ? target->dq_label : "", target,
+			(mach_port_t)dmrr->du_ident, dmsr->dmsr_send,
+			(mach_port_t)dmsr->du_ident,
+			dmsr->dmsr_notification_armed ? " (armed)" : "",
+			dmsr->dmsr_checkin_port, dmsr->dmsr_checkin ? " (pending)" : "",
+			dmsr->dmsr_state, dmsr->dmsr_disconnect_cnt,
+			(bool)(dm->dq_atomic_flags & DSF_CANCELED));
+}
+
+size_t
+_dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz)
+{
+	size_t offset = 0;
+	offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
+			dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label :
+			dx_kind(dm), dm);
+	offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset);
+	offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset);
+	offset += dsnprintf(&buf[offset], bufsiz - offset, "}");
+	return offset;
+}
+
+#endif /* HAVE_MACH */
diff --git a/src/mach_internal.h b/src/mach_internal.h
new file mode 100644
index 0000000..507dd2a
--- /dev/null
+++ b/src/mach_internal.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __DISPATCH_MACH_INTERNAL__
+#define __DISPATCH_MACH_INTERNAL__
+#if HAVE_MACH
+
+#ifndef __DISPATCH_INDIRECT__
+#error "Please #include <dispatch/dispatch.h> instead of this file directly."
+#include <dispatch/base.h> // for HeaderDoc
+#endif
+
+// NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t
+//       bit values must not overlap as they share the same kevent fflags !
+
+/*!
+ * @enum dispatch_source_mach_send_flags_t
+ *
+ * @constant DISPATCH_MACH_SEND_DELETED
+ * Port-deleted notification. Disabled for source registration.
+ */
+enum {
+	DISPATCH_MACH_SEND_DELETED = 0x4,
+};
+/*!
+ * @enum dispatch_source_mach_recv_flags_t
+ *
+ * @constant DISPATCH_MACH_RECV_MESSAGE
+ * Receive right has pending messages
+ */
+enum {
+	DISPATCH_MACH_RECV_MESSAGE = 0x2,
+};
+
+
+DISPATCH_CLASS_DECL(mach);
+DISPATCH_CLASS_DECL(mach_msg);
+
+#if DISPATCH_PURE_C
+struct dispatch_mach_s {
+	DISPATCH_SOURCE_HEADER(mach);
+	dispatch_mach_send_refs_t dm_send_refs;
+	dispatch_xpc_term_refs_t dm_xpc_term_refs;
+} DISPATCH_ATOMIC64_ALIGN;
+
+struct dispatch_mach_msg_s {
+	DISPATCH_OBJECT_HEADER(mach_msg);
+	union {
+		mach_msg_option_t dmsg_options;
+		mach_error_t dmsg_error;
+	};
+	mach_port_t dmsg_reply;
+	pthread_priority_t dmsg_priority;
+	voucher_t dmsg_voucher;
+	dispatch_mach_msg_destructor_t dmsg_destructor;
+	size_t dmsg_size;
+	union {
+		mach_msg_header_t *dmsg_msg;
+		char dmsg_buf[0];
+	};
+};
+#endif // DISPATCH_PURE_C
+
+dispatch_source_t
+_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp,
+		const struct dispatch_continuation_s *dc);
+
+void _dispatch_mach_dispose(dispatch_mach_t dm);
+void _dispatch_mach_finalize_activation(dispatch_mach_t dm);
+void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags);
+void _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos,
+		dispatch_wakeup_flags_t flags);
+size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz);
+void _dispatch_mach_merge_notification(dispatch_unote_t du,
+		uint32_t flags, uintptr_t data, pthread_priority_t pp);
+void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags,
+		mach_msg_header_t *msg, mach_msg_size_t msgsz);
+void _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags,
+		mach_msg_header_t *msg, mach_msg_size_t msgsz);
+void _dispatch_xpc_sigterm_merge(dispatch_unote_t du, uint32_t flags,
+		uintptr_t data, pthread_priority_t pp);
+
+void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg);
+void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg,
+		dispatch_invoke_flags_t flags);
+size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf,
+		size_t bufsiz);
+
+void _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc,
+		dispatch_invoke_flags_t flags);
+void _dispatch_mach_barrier_invoke(dispatch_continuation_t dc,
+		dispatch_invoke_flags_t flags);
+
+#endif // HAVE_MACH
+#endif /* __DISPATCH_MACH_INTERNAL__ */
diff --git a/src/object.c b/src/object.c
index 1928df5..faca98b 100644
--- a/src/object.c
+++ b/src/object.c
@@ -240,7 +240,7 @@
 	} else if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT &&
 			!slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) {
 		if (slowpath(!tq)) {
-			tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false);
+			tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
 		}
 		_dispatch_object_set_target_queue_inline(dou._do, tq);
 	}
diff --git a/src/object.m b/src/object.m
index 323c98b..59758a1 100644
--- a/src/object.m
+++ b/src/object.m
@@ -233,7 +233,7 @@
 	NSUInteger offset = 0;
 	NSString *desc = [dou debugDescription];
 	[desc getBytes:buf maxLength:bufsiz-1 usedLength:&offset
-			encoding:NSUTF8StringEncoding options:0
+			encoding:NSUTF8StringEncoding options:(NSStringEncodingConversionOptions)0
 			range:NSMakeRange(0, [desc length]) remainingRange:NULL];
 	if (offset) buf[offset] = 0;
 	return offset;
@@ -263,9 +263,9 @@
 	} else {
 		strlcpy(buf, dx_kind(obj), sizeof(buf));
 	}
-	return [nsstring stringWithFormat:
-			[nsstring stringWithUTF8String:"<%s: %s>"],
-			class_getName([self class]), buf];
+	NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"];
+	if (!format) return nil;
+	return [nsstring stringWithFormat:format, class_getName([self class]), buf];
 }
 
 @end
@@ -277,9 +277,10 @@
 - (NSString *)description {
 	Class nsstring = objc_lookUpClass("NSString");
 	if (!nsstring) return nil;
-	return [nsstring stringWithFormat:
-			[nsstring stringWithUTF8String:"<%s: %s[%p]>"],
-			class_getName([self class]), dispatch_queue_get_label(self), self];
+	NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"];
+	if (!format) return nil;
+	return [nsstring stringWithFormat:format, class_getName([self class]),
+			dispatch_queue_get_label(self), self];
 }
 
 - (void)_xref_dispose {
@@ -364,9 +365,9 @@
 	if (!nsstring) return nil;
 	char buf[2048];
 	_voucher_debug(self, buf, sizeof(buf));
-	return [nsstring stringWithFormat:
-			[nsstring stringWithUTF8String:"<%s: %s>"],
-			class_getName([self class]), buf];
+	NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"];
+	if (!format) return nil;
+	return [nsstring stringWithFormat:format, class_getName([self class]), buf];
 }
 
 @end
diff --git a/src/object_internal.h b/src/object_internal.h
index 80bb102..3f936da 100644
--- a/src/object_internal.h
+++ b/src/object_internal.h
@@ -184,7 +184,7 @@
 #define DISPATCH_QUEUEABLE_VTABLE_HEADER(x) \
 	DISPATCH_INVOKABLE_VTABLE_HEADER(x); \
 	void (*const do_wakeup)(struct x##_s *, \
-			pthread_priority_t, dispatch_wakeup_flags_t); \
+			dispatch_qos_t, dispatch_wakeup_flags_t); \
 	void (*const do_dispose)(struct x##_s *)
 
 #define DISPATCH_OBJECT_VTABLE_HEADER(x) \
@@ -241,21 +241,12 @@
 	// involved before dx_wakeup returns
 	DISPATCH_WAKEUP_FLUSH					= 0x00000002,
 
-	// A slow waiter was just enqueued
-	DISPATCH_WAKEUP_SLOW_WAITER				= 0x00000004,
+	// The caller desires to apply an override on the object being woken up.
+	// When this flag is passed, the qos passed to dx_wakeup() should not be 0
+	DISPATCH_WAKEUP_OVERRIDING              = 0x00000004,
 
-	// The caller desires to apply an override on the object being woken up
-	// and has already adjusted the `oq_override` field. When this flag is
-	// passed, the priority passed to dx_wakeup() should not be 0
-	DISPATCH_WAKEUP_OVERRIDING              = 0x00000008,
-
-	// At the time this queue was woken up it had an override that must be
-	// preserved (used to solve a race with _dispatch_queue_drain_try_unlock())
-	DISPATCH_WAKEUP_WAS_OVERRIDDEN          = 0x00000010,
-
-#define _DISPATCH_WAKEUP_OVERRIDE_BITS \
-		((dispatch_wakeup_flags_t)(DISPATCH_WAKEUP_OVERRIDING | \
-		DISPATCH_WAKEUP_WAS_OVERRIDDEN))
+	// This wakeup is caused by a handoff from a slow waiter.
+	DISPATCH_WAKEUP_WAITER_HANDOFF          = 0x00000008,
 );
 
 DISPATCH_ENUM(dispatch_invoke_flags, uint32_t,
@@ -410,38 +401,30 @@
 
 #if OS_OBJECT_HAVE_OBJC1
 #define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \
-	struct dispatch_object_s *volatile ns##_items_head; \
-	unsigned long ns##_serialnum; \
-	union { \
-		uint64_t volatile __state_field__; \
-		DISPATCH_STRUCT_LITTLE_ENDIAN_2( \
+	DISPATCH_UNION_LE(uint64_t volatile __state_field__, \
 			dispatch_lock __state_field__##_lock, \
 			uint32_t __state_field__##_bits \
-		); \
-	}; /* needs to be 64-bit aligned */ \
-	/* LP64 global queue cacheline boundary */ \
+	) DISPATCH_ATOMIC64_ALIGN; \
+	struct dispatch_object_s *volatile ns##_items_head; \
+	unsigned long ns##_serialnum; \
 	const char *ns##_label; \
 	voucher_t ns##_override_voucher; \
-	dispatch_priority_t ns##_priority; \
-	dispatch_priority_t volatile ns##_override; \
-	struct dispatch_object_s *volatile ns##_items_tail
+	struct dispatch_object_s *volatile ns##_items_tail; \
+	dispatch_priority_t ns##_priority
 #else
 #define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \
 	struct dispatch_object_s *volatile ns##_items_head; \
-	union { \
-		uint64_t volatile __state_field__; \
-		DISPATCH_STRUCT_LITTLE_ENDIAN_2( \
+	DISPATCH_UNION_LE(uint64_t volatile __state_field__, \
 			dispatch_lock __state_field__##_lock, \
 			uint32_t __state_field__##_bits \
-		); \
-	}; /* needs to be 64-bit aligned */ \
+	) DISPATCH_ATOMIC64_ALIGN; \
 	/* LP64 global queue cacheline boundary */ \
 	unsigned long ns##_serialnum; \
 	const char *ns##_label; \
 	voucher_t ns##_override_voucher; \
-	dispatch_priority_t ns##_priority; \
-	dispatch_priority_t volatile ns##_override; \
-	struct dispatch_object_s *volatile ns##_items_tail
+	struct dispatch_object_s *volatile ns##_items_tail; \
+	dispatch_priority_t ns##_priority
+	/* LP64: 32bit hole */
 #endif
 
 OS_OBJECT_INTERNAL_CLASS_DECL(os_mpsc_queue, object,
diff --git a/src/once.c b/src/once.c
index d7d6a8e..b2e34ec 100644
--- a/src/once.c
+++ b/src/once.c
@@ -63,61 +63,9 @@
 		dow.dow_thread = _dispatch_tid_self();
 		_dispatch_client_callout(ctxt, func);
 
-		// The next barrier must be long and strong.
-		//
-		// The scenario: SMP systems with weakly ordered memory models
-		// and aggressive out-of-order instruction execution.
-		//
-		// The problem:
-		//
-		// The dispatch_once*() wrapper macro causes the callee's
-		// instruction stream to look like this (pseudo-RISC):
-		//
-		//      load r5, pred-addr
-		//      cmpi r5, -1
-		//      beq  1f
-		//      call dispatch_once*()
-		//      1f:
-		//      load r6, data-addr
-		//
-		// May be re-ordered like so:
-		//
-		//      load r6, data-addr
-		//      load r5, pred-addr
-		//      cmpi r5, -1
-		//      beq  1f
-		//      call dispatch_once*()
-		//      1f:
-		//
-		// Normally, a barrier on the read side is used to workaround
-		// the weakly ordered memory model. But barriers are expensive
-		// and we only need to synchronize once! After func(ctxt)
-		// completes, the predicate will be marked as "done" and the
-		// branch predictor will correctly skip the call to
-		// dispatch_once*().
-		//
-		// A far faster alternative solution: Defeat the speculative
-		// read-ahead of peer CPUs.
-		//
-		// Modern architectures will throw away speculative results
-		// once a branch mis-prediction occurs. Therefore, if we can
-		// ensure that the predicate is not marked as being complete
-		// until long after the last store by func(ctxt), then we have
-		// defeated the read-ahead of peer CPUs.
-		//
-		// In other words, the last "store" by func(ctxt) must complete
-		// and then N cycles must elapse before ~0l is stored to *val.
-		// The value of N is whatever is sufficient to defeat the
-		// read-ahead mechanism of peer CPUs.
-		//
-		// On some CPUs, the most fully synchronizing instruction might
-		// need to be issued.
-
-		os_atomic_maximally_synchronizing_barrier();
-		// above assumed to contain release barrier
-		next = os_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed);
+		next = (_dispatch_once_waiter_t)_dispatch_once_xchg_done(val);
 		while (next != tail) {
-			_dispatch_wait_until(tmp = (_dispatch_once_waiter_t)next->dow_next);
+			tmp = (_dispatch_once_waiter_t)_dispatch_wait_until(next->dow_next);
 			event = &next->dow_event;
 			next = tmp;
 			_dispatch_thread_event_signal(event);
diff --git a/src/queue.c b/src/queue.c
index a08f21b..ad3d979 100644
--- a/src/queue.c
+++ b/src/queue.c
@@ -20,7 +20,7 @@
 
 #include "internal.h"
 #if HAVE_MACH
-#include "protocol.h"
+#include "protocol.h" // _dispatch_send_wakeup_runloop_thread
 #endif
 
 #if (!HAVE_PTHREAD_WORKQUEUES || DISPATCH_DEBUG) && \
@@ -30,19 +30,11 @@
 #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL
 #define DISPATCH_USE_PTHREAD_POOL 1
 #endif
-#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || DISPATCH_DEBUG) \
-		&& !defined(DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK)
-#define DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK 1
-#endif
-#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK && \
+#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || DISPATCH_DEBUG) && \
 		!HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \
 		!defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK)
 #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1
 #endif
-#if HAVE_PTHREAD_WORKQUEUE_QOS && !DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-#undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
-#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0
-#endif
 #if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \
 		!DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
 #define pthread_workqueue_t void*
@@ -59,6 +51,8 @@
 static void _dispatch_context_cleanup(void *ctxt);
 static void _dispatch_non_barrier_complete(dispatch_queue_t dq);
 static inline void _dispatch_global_queue_poke(dispatch_queue_t dq);
+static void _dispatch_queue_push_slow_waiter(dispatch_queue_t dq,
+		dispatch_object_t dou);
 #if HAVE_PTHREAD_WORKQUEUES
 static void _dispatch_worker_thread4(void *context);
 #if HAVE_PTHREAD_WORKQUEUE_QOS
@@ -76,7 +70,7 @@
 #if DISPATCH_COCOA_COMPAT
 static dispatch_once_t _dispatch_main_q_handle_pred;
 static void _dispatch_runloop_queue_poke(dispatch_queue_t dq,
-		pthread_priority_t pp, dispatch_wakeup_flags_t flags);
+		dispatch_qos_t qos, dispatch_wakeup_flags_t flags);
 static void _dispatch_runloop_queue_handle_init(void *ctxt);
 static void _dispatch_runloop_queue_handle_dispose(dispatch_queue_t dq);
 #endif
@@ -185,7 +179,7 @@
 static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = {
 	[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE,
+		.dgq_qos = QOS_CLASS_MAINTENANCE,
 		.dgq_wq_priority = WORKQ_BG_PRIOQUEUE,
 		.dgq_wq_options = 0,
 #endif
@@ -196,7 +190,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE,
+		.dgq_qos = QOS_CLASS_MAINTENANCE,
 		.dgq_wq_priority = WORKQ_BG_PRIOQUEUE,
 		.dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
@@ -207,7 +201,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND,
+		.dgq_qos = QOS_CLASS_BACKGROUND,
 		.dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL,
 		.dgq_wq_options = 0,
 #endif
@@ -218,7 +212,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND,
+		.dgq_qos = QOS_CLASS_BACKGROUND,
 		.dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL,
 		.dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
@@ -229,7 +223,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_UTILITY,
+		.dgq_qos = QOS_CLASS_UTILITY,
 		.dgq_wq_priority = WORKQ_LOW_PRIOQUEUE,
 		.dgq_wq_options = 0,
 #endif
@@ -240,7 +234,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_UTILITY,
+		.dgq_qos = QOS_CLASS_UTILITY,
 		.dgq_wq_priority = WORKQ_LOW_PRIOQUEUE,
 		.dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
@@ -251,7 +245,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT,
+		.dgq_qos = QOS_CLASS_DEFAULT,
 		.dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE,
 		.dgq_wq_options = 0,
 #endif
@@ -262,7 +256,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT,
+		.dgq_qos = QOS_CLASS_DEFAULT,
 		.dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE,
 		.dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
@@ -273,7 +267,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED,
+		.dgq_qos = QOS_CLASS_USER_INITIATED,
 		.dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE,
 		.dgq_wq_options = 0,
 #endif
@@ -284,7 +278,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED,
+		.dgq_qos = QOS_CLASS_USER_INITIATED,
 		.dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE,
 		.dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
@@ -295,7 +289,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE,
+		.dgq_qos = QOS_CLASS_USER_INTERACTIVE,
 		.dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL,
 		.dgq_wq_options = 0,
 #endif
@@ -306,7 +300,7 @@
 	}}},
 	[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{
 #if HAVE_PTHREAD_WORKQUEUES
-		.dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE,
+		.dgq_qos = QOS_CLASS_USER_INTERACTIVE,
 		.dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL,
 		.dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
 #endif
@@ -321,62 +315,69 @@
 //         renaming this symbol
 DISPATCH_CACHELINE_ALIGN
 struct dispatch_queue_s _dispatch_root_queues[] = {
-#define _DISPATCH_ROOT_QUEUE_ENTRY(n, ...) \
-	[DISPATCH_ROOT_QUEUE_IDX_##n] = { \
+#define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \
+	((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \
+		DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \
+		DISPATCH_ROOT_QUEUE_IDX_##n##_QOS)
+#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \
+	[_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \
 		DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), \
 		.dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \
 		.do_ctxt = &_dispatch_root_queue_contexts[ \
-				DISPATCH_ROOT_QUEUE_IDX_##n], \
-		.dq_width = DISPATCH_QUEUE_WIDTH_POOL, \
+				_DISPATCH_ROOT_QUEUE_IDX(n, flags)], \
+		.dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \
 		.dq_override_voucher = DISPATCH_NO_VOUCHER, \
-		.dq_override = DISPATCH_SATURATED_OVERRIDE, \
+		.dq_priority = _dispatch_priority_make(DISPATCH_QOS_##n, 0) | flags | \
+				DISPATCH_PRIORITY_FLAG_ROOTQUEUE | \
+				DISPATCH_PRIORITY_SATURATED_OVERRIDE, \
 		__VA_ARGS__ \
 	}
-	_DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS,
+	_DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0,
 		.dq_label = "com.apple.root.maintenance-qos",
 		.dq_serialnum = 4,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS_OVERCOMMIT,
+	_DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
 		.dq_label = "com.apple.root.maintenance-qos.overcommit",
 		.dq_serialnum = 5,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS,
+	_DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0,
 		.dq_label = "com.apple.root.background-qos",
 		.dq_serialnum = 6,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS_OVERCOMMIT,
+	_DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
 		.dq_label = "com.apple.root.background-qos.overcommit",
 		.dq_serialnum = 7,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS,
+	_DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0,
 		.dq_label = "com.apple.root.utility-qos",
 		.dq_serialnum = 8,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS_OVERCOMMIT,
+	_DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
 		.dq_label = "com.apple.root.utility-qos.overcommit",
 		.dq_serialnum = 9,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS,
+	_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE,
 		.dq_label = "com.apple.root.default-qos",
 		.dq_serialnum = 10,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS_OVERCOMMIT,
+	_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT,
+			DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
 		.dq_label = "com.apple.root.default-qos.overcommit",
 		.dq_serialnum = 11,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS,
+	_DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0,
 		.dq_label = "com.apple.root.user-initiated-qos",
 		.dq_serialnum = 12,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS_OVERCOMMIT,
+	_DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
 		.dq_label = "com.apple.root.user-initiated-qos.overcommit",
 		.dq_serialnum = 13,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS,
+	_DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0,
 		.dq_label = "com.apple.root.user-interactive-qos",
 		.dq_serialnum = 14,
 	),
-	_DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS_OVERCOMMIT,
+	_DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
 		.dq_label = "com.apple.root.user-interactive-qos.overcommit",
 		.dq_serialnum = 15,
 	),
@@ -407,36 +408,6 @@
 };
 #endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
 
-#define DISPATCH_PRIORITY_COUNT 5
-
-enum {
-	// No DISPATCH_PRIORITY_IDX_MAINTENANCE define because there is no legacy
-	// maintenance priority
-	DISPATCH_PRIORITY_IDX_BACKGROUND = 0,
-	DISPATCH_PRIORITY_IDX_NON_INTERACTIVE,
-	DISPATCH_PRIORITY_IDX_LOW,
-	DISPATCH_PRIORITY_IDX_DEFAULT,
-	DISPATCH_PRIORITY_IDX_HIGH,
-};
-
-static qos_class_t _dispatch_priority2qos[] = {
-	[DISPATCH_PRIORITY_IDX_BACKGROUND] = _DISPATCH_QOS_CLASS_BACKGROUND,
-	[DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = _DISPATCH_QOS_CLASS_UTILITY,
-	[DISPATCH_PRIORITY_IDX_LOW] = _DISPATCH_QOS_CLASS_UTILITY,
-	[DISPATCH_PRIORITY_IDX_DEFAULT] = _DISPATCH_QOS_CLASS_DEFAULT,
-	[DISPATCH_PRIORITY_IDX_HIGH] = _DISPATCH_QOS_CLASS_USER_INITIATED,
-};
-
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-static const int _dispatch_priority2wq[] = {
-	[DISPATCH_PRIORITY_IDX_BACKGROUND] = WORKQ_BG_PRIOQUEUE,
-	[DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = WORKQ_NON_INTERACTIVE_PRIOQUEUE,
-	[DISPATCH_PRIORITY_IDX_LOW] = WORKQ_LOW_PRIOQUEUE,
-	[DISPATCH_PRIORITY_IDX_DEFAULT] = WORKQ_DEFAULT_PRIOQUEUE,
-	[DISPATCH_PRIORITY_IDX_HIGH] = WORKQ_HIGH_PRIOQUEUE,
-};
-#endif
-
 #if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
 static struct dispatch_queue_s _dispatch_mgr_root_queue;
 #else
@@ -452,9 +423,10 @@
 	.dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1),
 	.do_targetq = &_dispatch_mgr_root_queue,
 	.dq_label = "com.apple.libdispatch-manager",
-	.dq_width = 1,
+	.dq_atomic_flags = DQF_WIDTH(1),
+	.dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER |
+			DISPATCH_PRIORITY_SATURATED_OVERRIDE,
 	.dq_override_voucher = DISPATCH_NO_VOUCHER,
-	.dq_override = DISPATCH_SATURATED_OVERRIDE,
 	.dq_serialnum = 2,
 };
 
@@ -466,46 +438,16 @@
 	}
 	dispatch_once_f(&_dispatch_root_queues_pred, NULL,
 			_dispatch_root_queues_init_once);
-	qos_class_t qos;
-	switch (priority) {
-#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-	case _DISPATCH_QOS_CLASS_MAINTENANCE:
-		if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]
-				.dq_priority) {
-			// map maintenance to background on old kernel
-			qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND];
-		} else {
-			qos = (qos_class_t)priority;
-		}
-		break;
-#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-	case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
-		qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND];
-		break;
-	case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE:
-		qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_NON_INTERACTIVE];
-		break;
-	case DISPATCH_QUEUE_PRIORITY_LOW:
-		qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_LOW];
-		break;
-	case DISPATCH_QUEUE_PRIORITY_DEFAULT:
-		qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_DEFAULT];
-		break;
-	case DISPATCH_QUEUE_PRIORITY_HIGH:
-		qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH];
-		break;
-	case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
-#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-		if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS]
-				.dq_priority) {
-			qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH];
-			break;
-		}
+	dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority);
+#if !HAVE_PTHREAD_WORKQUEUE_QOS
+	if (qos == QOS_CLASS_MAINTENANCE) {
+		qos = DISPATCH_QOS_BACKGROUND;
+	} else if (qos == QOS_CLASS_USER_INTERACTIVE) {
+		qos = DISPATCH_QOS_USER_INITIATED;
+	}
 #endif
-		// fallthrough
-	default:
-		qos = (qos_class_t)priority;
-		break;
+	if (qos == DISPATCH_QOS_UNSPECIFIED) {
+		return DISPATCH_BAD_INPUT;
 	}
 	return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT);
 }
@@ -515,7 +457,7 @@
 _dispatch_get_current_queue(void)
 {
 	return _dispatch_queue_get_current() ?:
-			_dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true);
+			_dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
 }
 
 dispatch_queue_t
@@ -625,40 +567,6 @@
 #pragma mark -
 #pragma mark dispatch_init
 
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-pthread_priority_t _dispatch_background_priority;
-pthread_priority_t _dispatch_user_initiated_priority;
-
-static void
-_dispatch_root_queues_init_qos(int supported)
-{
-	pthread_priority_t p;
-	qos_class_t qos;
-	unsigned int i;
-	for (i = 0; i < DISPATCH_PRIORITY_COUNT; i++) {
-		p = _pthread_qos_class_encode_workqueue(_dispatch_priority2wq[i], 0);
-		qos = _pthread_qos_class_decode(p, NULL, NULL);
-		dispatch_assert(qos != _DISPATCH_QOS_CLASS_UNSPECIFIED);
-		_dispatch_priority2qos[i] = qos;
-	}
-	for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
-		qos = _dispatch_root_queue_contexts[i].dgq_qos;
-		if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE &&
-				!(supported & WORKQ_FEATURE_MAINTENANCE)) {
-			continue;
-		}
-		unsigned long flags = i & 1 ? _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0;
-		flags |= _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
-		if (i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS ||
-				i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT) {
-			flags |= _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
-		}
-		p = _pthread_qos_class_encode(qos, 0, flags);
-		_dispatch_root_queues[i].dq_priority = (dispatch_priority_t)p;
-	}
-}
-#endif // HAVE_PTHREAD_WORKQUEUE_QOS
-
 static inline bool
 _dispatch_root_queues_init_workq(int *wq_supported)
 {
@@ -705,7 +613,10 @@
 			result = !r;
 #endif
 		}
-		if (result) _dispatch_root_queues_init_qos(*wq_supported);
+		if (!(*wq_supported & WORKQ_FEATURE_MAINTENANCE)) {
+			DISPATCH_INTERNAL_CRASH(*wq_supported,
+					"QoS Maintenance support required");
+		}
 	}
 #endif // DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS
 #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
@@ -728,7 +639,7 @@
 			(void)dispatch_assume_zero(r);
 		}
 #endif
-		int i;
+		size_t i;
 		for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
 			pthread_workqueue_t pwq = NULL;
 			dispatch_root_queue_context_t qc;
@@ -784,18 +695,9 @@
 #endif
 	}
 #endif // HAVE_PTHREAD_WORKQUEUES
-#if USE_MACH_SEM
-	// override the default FIFO behavior for the pool semaphores
-	kern_return_t kr = semaphore_create(mach_task_self(),
-			&pqc->dpq_thread_mediator.dsema_port, SYNC_POLICY_LIFO, 0);
-	DISPATCH_VERIFY_MIG(kr);
-	(void)dispatch_assume_zero(kr);
-	(void)dispatch_assume(pqc->dpq_thread_mediator.dsema_port);
-#elif USE_POSIX_SEM
-	/* XXXRW: POSIX semaphores don't support LIFO? */
-	int ret = sem_init(&(pqc->dpq_thread_mediator.dsema_sem), 0, 0);
-	(void)dispatch_assume_zero(ret);
-#endif
+	_dispatch_sema4_t *sema = &pqc->dpq_thread_mediator.dsema_sema;
+	_dispatch_sema4_init(sema, _DSEMA4_POLICY_LIFO);
+	_dispatch_sema4_create(sema, _DSEMA4_POLICY_LIFO);
 }
 #endif // DISPATCH_USE_PTHREAD_POOL
 
@@ -815,7 +717,7 @@
 	_dispatch_fork_becomes_unsafe();
 	if (!_dispatch_root_queues_init_workq(&wq_supported)) {
 #if DISPATCH_ENABLE_THREAD_POOL
-		int i;
+		size_t i;
 		for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
 			bool overcommit = true;
 #if TARGET_OS_EMBEDDED
@@ -840,8 +742,7 @@
 void
 libdispatch_init(void)
 {
-	dispatch_assert(DISPATCH_QUEUE_QOS_COUNT == 6);
-	dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 12);
+	dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 2 * DISPATCH_QOS_MAX);
 
 	dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW ==
 			-DISPATCH_QUEUE_PRIORITY_HIGH);
@@ -849,12 +750,6 @@
 			DISPATCH_ROOT_QUEUE_COUNT);
 	dispatch_assert(countof(_dispatch_root_queue_contexts) ==
 			DISPATCH_ROOT_QUEUE_COUNT);
-	dispatch_assert(countof(_dispatch_priority2qos) ==
-			DISPATCH_PRIORITY_COUNT);
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-	dispatch_assert(countof(_dispatch_priority2wq) ==
-			DISPATCH_PRIORITY_COUNT);
-#endif
 #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
 	dispatch_assert(sizeof(_dispatch_wq2root_queues) /
 			sizeof(_dispatch_wq2root_queues[0][0]) ==
@@ -879,15 +774,9 @@
 
 
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	// 26497968 _dispatch_user_initiated_priority should be set for qos
-	//          propagation to work properly
-	pthread_priority_t p = _pthread_qos_class_encode(qos_class_main(), 0, 0);
-	_dispatch_main_q.dq_priority = (dispatch_priority_t)p;
-	_dispatch_main_q.dq_override = p & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_USER_INITIATED, 0, 0);
-	_dispatch_user_initiated_priority = p;
-	p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_BACKGROUND, 0, 0);
-	_dispatch_background_priority = p;
+	dispatch_qos_t qos = _dispatch_qos_from_qos_class(qos_class_main());
+	dispatch_priority_t pri = _dispatch_priority_make(qos, 0);
+	_dispatch_main_q.dq_priority = _dispatch_priority_with_override_qos(pri, qos);
 #if DISPATCH_DEBUG
 	if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) {
 		_dispatch_set_qos_class_enabled = 1;
@@ -899,24 +788,20 @@
 	_dispatch_thread_key_create(&__dispatch_tsd_key, _libdispatch_tsd_cleanup);
 #else
 	_dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup);
-	_dispatch_thread_key_create(&dispatch_deferred_items_key,
-			_dispatch_deferred_items_cleanup);
 	_dispatch_thread_key_create(&dispatch_frame_key, _dispatch_frame_cleanup);
-	_dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup);
 	_dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup);
 	_dispatch_thread_key_create(&dispatch_context_key, _dispatch_context_cleanup);
-	_dispatch_thread_key_create(&dispatch_defaultpriority_key, NULL);
 	_dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key,
 			NULL);
-#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
+	_dispatch_thread_key_create(&dispatch_basepri_key, NULL);
+#if DISPATCH_INTROSPECTION
+	_dispatch_thread_key_create(&dispatch_introspection_key , NULL);
+#elif DISPATCH_PERF_MON
 	_dispatch_thread_key_create(&dispatch_bcounter_key, NULL);
 #endif
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-	if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-		_dispatch_thread_key_create(&dispatch_sema4_key,
-				_dispatch_thread_semaphore_dispose);
-	}
-#endif
+	_dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup);
+	_dispatch_thread_key_create(&dispatch_deferred_items_key,
+			_dispatch_deferred_items_cleanup);
 #endif
 
 #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707
@@ -938,40 +823,6 @@
 	_dispatch_introspection_init();
 }
 
-#if HAVE_MACH
-static dispatch_once_t _dispatch_mach_host_port_pred;
-static mach_port_t _dispatch_mach_host_port;
-
-static void
-_dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED)
-{
-	kern_return_t kr;
-	mach_port_t mp, mhp = mach_host_self();
-	kr = host_get_host_port(mhp, &mp);
-	DISPATCH_VERIFY_MIG(kr);
-	if (fastpath(!kr)) {
-		// mach_host_self returned the HOST_PRIV port
-		kr = mach_port_deallocate(mach_task_self(), mhp);
-		DISPATCH_VERIFY_MIG(kr);
-		mhp = mp;
-	} else if (kr != KERN_INVALID_ARGUMENT) {
-		(void)dispatch_assume_zero(kr);
-	}
-	if (!fastpath(mhp)) {
-		DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port");
-	}
-	_dispatch_mach_host_port = mhp;
-}
-
-mach_port_t
-_dispatch_get_mach_host_port(void)
-{
-	dispatch_once_f(&_dispatch_mach_host_port_pred, NULL,
-			_dispatch_mach_host_port_init);
-	return _dispatch_mach_host_port;
-}
-#endif
-
 #if DISPATCH_USE_THREAD_LOCAL_STORAGE
 #include <unistd.h>
 #include <sys/syscall.h>
@@ -998,20 +849,20 @@
 {
 	struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx;
 
+	_tsd_call_cleanup(dispatch_priority_key, NULL);
+
 	_tsd_call_cleanup(dispatch_queue_key, _dispatch_queue_cleanup);
 	_tsd_call_cleanup(dispatch_frame_key, _dispatch_frame_cleanup);
 	_tsd_call_cleanup(dispatch_cache_key, _dispatch_cache_cleanup);
 	_tsd_call_cleanup(dispatch_context_key, _dispatch_context_cleanup);
 	_tsd_call_cleanup(dispatch_pthread_root_queue_observer_hooks_key,
 			NULL);
-	_tsd_call_cleanup(dispatch_defaultpriority_key, NULL);
-#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
+	_tsd_call_cleanup(dispatch_basepri_key, NULL);
+#if DISPATCH_INTROSPECTION
+	_tsd_call_cleanup(dispatch_introspection_key, NULL);
+#elif DISPATCH_PERF_MON
 	_tsd_call_cleanup(dispatch_bcounter_key, NULL);
 #endif
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-	_tsd_call_cleanup(dispatch_sema4_key, _dispatch_thread_semaphore_dispose);
-#endif
-	_tsd_call_cleanup(dispatch_priority_key, NULL);
 	_tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup);
 	_tsd_call_cleanup(dispatch_deferred_items_key,
 			_dispatch_deferred_items_cleanup);
@@ -1034,11 +885,8 @@
 	void *crash = (void *)0x100;
 	size_t i;
 
-#if HAVE_MACH
-	_dispatch_mach_host_port_pred = 0;
-	_dispatch_mach_host_port = MACH_VOUCHER_NULL;
-#endif
 	_voucher_atfork_child();
+	_dispatch_event_loop_atfork_child();
 	if (!_dispatch_is_multithreaded_inline()) {
 		// clear the _PROHIBIT bit if set
 		_dispatch_unsafe_fork = 0;
@@ -1068,13 +916,13 @@
 {
 	qos_class_t qos = (qos_class_t)qos_class;
 	switch (qos) {
-	case _DISPATCH_QOS_CLASS_MAINTENANCE:
-	case _DISPATCH_QOS_CLASS_BACKGROUND:
-	case _DISPATCH_QOS_CLASS_UTILITY:
-	case _DISPATCH_QOS_CLASS_DEFAULT:
-	case _DISPATCH_QOS_CLASS_USER_INITIATED:
-	case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
-	case _DISPATCH_QOS_CLASS_UNSPECIFIED:
+	case QOS_CLASS_MAINTENANCE:
+	case QOS_CLASS_BACKGROUND:
+	case QOS_CLASS_UTILITY:
+	case QOS_CLASS_DEFAULT:
+	case QOS_CLASS_USER_INITIATED:
+	case QOS_CLASS_USER_INTERACTIVE:
+	case QOS_CLASS_UNSPECIFIED:
 		break;
 	default:
 		return false;
@@ -1085,20 +933,6 @@
 	return true;
 }
 
-#define DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(qos) \
-		[_DISPATCH_QOS_CLASS_##qos] = DQA_INDEX_QOS_CLASS_##qos
-
-static const
-_dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = {
-	DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UNSPECIFIED),
-	DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(MAINTENANCE),
-	DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(BACKGROUND),
-	DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UTILITY),
-	DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(DEFAULT),
-	DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INITIATED),
-	DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INTERACTIVE),
-};
-
 #define DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit) \
 		((overcommit) == _dispatch_queue_attr_overcommit_disabled ? \
 		DQA_INDEX_NON_OVERCOMMIT : \
@@ -1116,10 +950,10 @@
 
 #define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio))
 
-#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (_dispatch_queue_attr_qos2idx[(qos)])
+#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (qos)
 
 static inline dispatch_queue_attr_t
-_dispatch_get_queue_attr(qos_class_t qos, int prio,
+_dispatch_get_queue_attr(dispatch_qos_t qos, int prio,
 		_dispatch_queue_attr_overcommit_t overcommit,
 		dispatch_autorelease_frequency_t frequency,
 		bool concurrent, bool inactive)
@@ -1136,16 +970,16 @@
 dispatch_queue_attr_t
 _dispatch_get_default_queue_attr(void)
 {
-	return _dispatch_get_queue_attr(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0,
+	return _dispatch_get_queue_attr(DISPATCH_QOS_UNSPECIFIED, 0,
 				_dispatch_queue_attr_overcommit_unspecified,
 				DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, false, false);
 }
 
 dispatch_queue_attr_t
 dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa,
-		dispatch_qos_class_t qos_class, int relative_priority)
+		dispatch_qos_class_t qos_class, int relpri)
 {
-	if (!_dispatch_qos_class_valid(qos_class, relative_priority)) {
+	if (!_dispatch_qos_class_valid(qos_class, relpri)) {
 		return DISPATCH_BAD_INPUT;
 	}
 	if (!slowpath(dqa)) {
@@ -1153,8 +987,8 @@
 	} else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
 		DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
 	}
-	return _dispatch_get_queue_attr(qos_class, relative_priority,
-			dqa->dqa_overcommit, dqa->dqa_autorelease_frequency,
+	return _dispatch_get_queue_attr(_dispatch_qos_from_qos_class(qos_class),
+			relpri, dqa->dqa_overcommit, dqa->dqa_autorelease_frequency,
 			dqa->dqa_concurrent, dqa->dqa_inactive);
 }
 
@@ -1166,8 +1000,9 @@
 	} else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
 		DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
 	}
-	return _dispatch_get_queue_attr(dqa->dqa_qos_class,
-			dqa->dqa_relative_priority, dqa->dqa_overcommit,
+	dispatch_priority_t pri = dqa->dqa_qos_and_relpri;
+	return _dispatch_get_queue_attr(_dispatch_priority_qos(pri),
+			_dispatch_priority_relpri(pri), dqa->dqa_overcommit,
 			dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, true);
 }
 
@@ -1180,8 +1015,9 @@
 	} else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
 		DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
 	}
-	return _dispatch_get_queue_attr(dqa->dqa_qos_class,
-			dqa->dqa_relative_priority, overcommit ?
+	dispatch_priority_t pri = dqa->dqa_qos_and_relpri;
+	return _dispatch_get_queue_attr(_dispatch_priority_qos(pri),
+			_dispatch_priority_relpri(pri), overcommit ?
 			_dispatch_queue_attr_overcommit_enabled :
 			_dispatch_queue_attr_overcommit_disabled,
 			dqa->dqa_autorelease_frequency, dqa->dqa_concurrent,
@@ -1205,14 +1041,28 @@
 	} else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
 		DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
 	}
-	return _dispatch_get_queue_attr(dqa->dqa_qos_class,
-			dqa->dqa_relative_priority, dqa->dqa_overcommit,
+	dispatch_priority_t pri = dqa->dqa_qos_and_relpri;
+	return _dispatch_get_queue_attr(_dispatch_priority_qos(pri),
+			_dispatch_priority_relpri(pri), dqa->dqa_overcommit,
 			frequency, dqa->dqa_concurrent, dqa->dqa_inactive);
 }
 
 #pragma mark -
 #pragma mark dispatch_queue_t
 
+void
+dispatch_queue_set_label_nocopy(dispatch_queue_t dq, const char *label)
+{
+	if (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) {
+		return;
+	}
+	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dq);
+	if (unlikely(dqf & DQF_LABEL_NEEDS_FREE)) {
+		DISPATCH_CLIENT_CRASH(dq, "Cannot change label for this queue");
+	}
+	dq->dq_label = label;
+}
+
 // skip zero
 // 1 - main_q
 // 2 - mgr_q
@@ -1226,7 +1076,7 @@
 _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
 		dispatch_queue_t tq, bool legacy)
 {
-#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+#if !HAVE_PTHREAD_WORKQUEUE_QOS
 	// Be sure the root queue priorities are set
 	dispatch_once_f(&_dispatch_root_queues_pred, NULL,
 			_dispatch_root_queues_init_once);
@@ -1241,25 +1091,15 @@
 	// Step 1: Normalize arguments (qos, overcommit, tq)
 	//
 
-	qos_class_t qos = dqa->dqa_qos_class;
-#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-	if (qos == _DISPATCH_QOS_CLASS_USER_INTERACTIVE &&
-			!_dispatch_root_queues[
-			DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS].dq_priority) {
-		qos = _DISPATCH_QOS_CLASS_USER_INITIATED;
+	dispatch_qos_t qos = _dispatch_priority_qos(dqa->dqa_qos_and_relpri);
+#if !HAVE_PTHREAD_WORKQUEUE_QOS
+	if (qos == DISPATCH_QOS_USER_INTERACTIVE) {
+		qos = DISPATCH_QOS_USER_INITIATED;
 	}
-#endif
-	bool maintenance_fallback = false;
-#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-	maintenance_fallback = true;
-#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
-	if (maintenance_fallback) {
-		if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE &&
-				!_dispatch_root_queues[
-				DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].dq_priority) {
-			qos = _DISPATCH_QOS_CLASS_BACKGROUND;
-		}
+	if (qos == DISPATCH_QOS_MAINTENANCE) {
+		qos = DISPATCH_QOS_BACKGROUND;
 	}
+#endif // !HAVE_PTHREAD_WORKQUEUE_QOS
 
 	_dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit;
 	if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) {
@@ -1273,14 +1113,15 @@
 			tq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) {
 		// Handle discrepancies between attr and target queue, attributes win
 		if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
-			if (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
+			if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) {
 				overcommit = _dispatch_queue_attr_overcommit_enabled;
 			} else {
 				overcommit = _dispatch_queue_attr_overcommit_disabled;
 			}
 		}
-		if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) {
-			tq = _dispatch_get_root_queue_with_overcommit(tq,
+		if (qos == DISPATCH_QOS_UNSPECIFIED) {
+			dispatch_qos_t tq_qos = _dispatch_priority_qos(tq->dq_priority);
+			tq = _dispatch_get_root_queue(tq_qos,
 					overcommit == _dispatch_queue_attr_overcommit_enabled);
 		} else {
 			tq = NULL;
@@ -1292,7 +1133,7 @@
 			DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute "
 					"and use this kind of target queue");
 		}
-		if (qos != _DISPATCH_QOS_CLASS_UNSPECIFIED) {
+		if (qos != DISPATCH_QOS_UNSPECIFIED) {
 			DISPATCH_CLIENT_CRASH(tq, "Cannot specify a QoS attribute "
 					"and use this kind of target queue");
 		}
@@ -1305,10 +1146,9 @@
 		}
 	}
 	if (!tq) {
-		qos_class_t tq_qos = qos == _DISPATCH_QOS_CLASS_UNSPECIFIED ?
-				_DISPATCH_QOS_CLASS_DEFAULT : qos;
-		tq = _dispatch_get_root_queue(tq_qos, overcommit ==
-				_dispatch_queue_attr_overcommit_enabled);
+		tq = _dispatch_get_root_queue(
+				qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos,
+				overcommit == _dispatch_queue_attr_overcommit_enabled);
 		if (slowpath(!tq)) {
 			DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute");
 		}
@@ -1342,6 +1182,9 @@
 		dqf |= DQF_AUTORELEASE_ALWAYS;
 		break;
 	}
+	if (legacy) {
+		dqf |= DQF_LEGACY;
+	}
 	if (label) {
 		const char *tmp = _dispatch_strdup_if_mutable(label);
 		if (tmp != label) {
@@ -1358,13 +1201,13 @@
 	dq->dq_label = label;
 
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	dq->dq_priority = (dispatch_priority_t)_pthread_qos_class_encode(qos,
-			dqa->dqa_relative_priority,
-			overcommit == _dispatch_queue_attr_overcommit_enabled ?
-			_PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0);
+	dq->dq_priority = dqa->dqa_qos_and_relpri;
+	if (overcommit == _dispatch_queue_attr_overcommit_enabled) {
+		dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+	}
 #endif
 	_dispatch_retain(tq);
-	if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) {
+	if (qos == QOS_CLASS_UNSPECIFIED) {
 		// legacy way of inherithing the QoS from the target
 		_dispatch_queue_priority_inherit_from_target(dq, tq);
 	}
@@ -1413,7 +1256,7 @@
 		// dispatch_cancel_and_wait may apply overrides in a racy way with
 		// the source cancellation finishing. This race is expensive and not
 		// really worthwhile to resolve since the source becomes dead anyway.
-		dq_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE;
+		dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
 	}
 	if (slowpath(dq_state != initial_state)) {
 		if (_dq_state_drain_locked(dq_state)) {
@@ -1688,18 +1531,19 @@
 	}
 
 	if ((dq_state ^ value) & DISPATCH_QUEUE_IN_BARRIER) {
-		_dispatch_release(dq);
-		return _dispatch_try_lock_transfer_or_wakeup(dq);
+		_dispatch_try_lock_transfer_or_wakeup(dq);
+	} else if (_dq_state_should_wakeup(value)) {
+		// <rdar://problem/14637483>
+		// dependency ordering for dq state changes that were flushed
+		// and not acted upon
+		os_atomic_thread_fence(dependency);
+		dq = os_atomic_force_dependency_on(dq, value);
+		dispatch_qos_t qos = _dispatch_queue_reset_max_qos(dq);
+		// Balancing the retain() done in suspend() for rdar://8181908
+		return dx_wakeup(dq, qos, DISPATCH_WAKEUP_CONSUME);
 	}
 
-	if (_dq_state_should_wakeup(value)) {
-		// <rdar://problem/14637483>
-		// seq_cst wrt state changes that were flushed and not acted upon
-		os_atomic_thread_fence(acquire);
-		pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq,
-				_dispatch_queue_is_thread_bound(dq));
-		return dx_wakeup(dq, pp, DISPATCH_WAKEUP_CONSUME);
-	}
+	// Balancing the retain() done in suspend() for rdar://8181908
 	return _dispatch_release_tailcall(dq);
 
 over_resume:
@@ -1719,19 +1563,13 @@
 }
 
 qos_class_t
-dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relative_priority_ptr)
+dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relpri_ptr)
 {
-	qos_class_t qos = _DISPATCH_QOS_CLASS_UNSPECIFIED;
-	int relative_priority = 0;
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-	pthread_priority_t dqp = dq->dq_priority;
-	if (dqp & _PTHREAD_PRIORITY_INHERIT_FLAG) dqp = 0;
-	qos = _pthread_qos_class_decode(dqp, &relative_priority, NULL);
-#else
-	(void)dq;
-#endif
-	if (relative_priority_ptr) *relative_priority_ptr = relative_priority;
-	return qos;
+	dispatch_qos_class_t qos = _dispatch_priority_qos(dq->dq_priority);
+	if (relpri_ptr) {
+		*relpri_ptr = qos ? _dispatch_priority_relpri(dq->dq_priority) : 0;
+	}
+	return _dispatch_qos_to_qos_class(qos);
 }
 
 static void
@@ -1765,8 +1603,7 @@
 
 	dispatch_queue_flags_t old_dqf, new_dqf;
 	os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, {
-		new_dqf = old_dqf & ~DQF_WIDTH_MASK;
-		new_dqf |= (tmp << DQF_WIDTH_SHIFT);
+		new_dqf = (old_dqf & DQF_FLAGS_MASK) | DQF_WIDTH(tmp);
 	});
 	_dispatch_object_debug(dq, "%s", __func__);
 }
@@ -1830,10 +1667,9 @@
 	dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT &&
 			dq->do_targetq);
 
-	if (slowpath(!tq)) {
+	if (unlikely(!tq)) {
 		bool is_concurrent_q = (dq->dq_width > 1);
-		tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-				!is_concurrent_q);
+		tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, !is_concurrent_q);
 	}
 
 	if (_dispatch_queue_try_inactive_suspend(dq)) {
@@ -1847,6 +1683,11 @@
 				"after it has been activated");
 	}
 
+	if (unlikely(!_dispatch_queue_is_legacy(dq))) {
+		DISPATCH_CLIENT_CRASH(dq, "Cannot change the target of this object "
+				"after it has been activated");
+	}
+
 	unsigned long type = dx_type(dq);
 	switch (type) {
 	case DISPATCH_QUEUE_LEGACY_TYPE:
@@ -1861,11 +1702,6 @@
 		_dispatch_bug_deprecated("Changing the target of a source "
 				"after it has been activated");
 		break;
-
-	case DISPATCH_QUEUE_SERIAL_TYPE:
-	case DISPATCH_QUEUE_CONCURRENT_TYPE:
-		DISPATCH_CLIENT_CRASH(type, "Cannot change the target of this queue "
-				"after it has been activated");
 	default:
 		DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type");
 	}
@@ -1895,9 +1731,10 @@
 	.dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE,
 	.do_ctxt = &_dispatch_mgr_root_queue_context,
 	.dq_label = "com.apple.root.libdispatch-manager",
-	.dq_width = DISPATCH_QUEUE_WIDTH_POOL,
-	.dq_override = DISPATCH_SATURATED_OVERRIDE,
+	.dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL),
 	.dq_override_voucher = DISPATCH_NO_VOUCHER,
+	.dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER |
+			DISPATCH_PRIORITY_SATURATED_OVERRIDE,
 	.dq_serialnum = 3,
 };
 #endif // DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
@@ -1918,12 +1755,12 @@
 #if HAVE_PTHREAD_WORKQUEUE_QOS
 // Must be kept in sync with list of qos classes in sys/qos.h
 static const int _dispatch_mgr_sched_qos2prio[] = {
-	[_DISPATCH_QOS_CLASS_MAINTENANCE] = 4,
-	[_DISPATCH_QOS_CLASS_BACKGROUND] = 4,
-	[_DISPATCH_QOS_CLASS_UTILITY] = 20,
-	[_DISPATCH_QOS_CLASS_DEFAULT] = 31,
-	[_DISPATCH_QOS_CLASS_USER_INITIATED] = 37,
-	[_DISPATCH_QOS_CLASS_USER_INTERACTIVE] = 47,
+	[QOS_CLASS_MAINTENANCE] = 4,
+	[QOS_CLASS_BACKGROUND] = 4,
+	[QOS_CLASS_UTILITY] = 20,
+	[QOS_CLASS_DEFAULT] = 31,
+	[QOS_CLASS_USER_INITIATED] = 37,
+	[QOS_CLASS_USER_INTERACTIVE] = 47,
 };
 #endif // HAVE_PTHREAD_WORKQUEUE_QOS
 
@@ -1943,8 +1780,8 @@
 	(void)dispatch_assume_zero(pthread_attr_getschedparam(attr, &param));
 #if HAVE_PTHREAD_WORKQUEUE_QOS
 	qos_class_t qos = qos_class_main();
-	if (qos == _DISPATCH_QOS_CLASS_DEFAULT) {
-		qos = _DISPATCH_QOS_CLASS_USER_INITIATED; // rdar://problem/17279292
+	if (qos == QOS_CLASS_DEFAULT) {
+		qos = QOS_CLASS_USER_INITIATED; // rdar://problem/17279292
 	}
 	if (qos) {
 		_dispatch_mgr_sched.qos = qos;
@@ -1977,8 +1814,6 @@
 			(void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr,
 					qos, 0));
 		}
-		_dispatch_mgr_q.dq_priority =
-				(dispatch_priority_t)_pthread_qos_class_encode(qos, 0, 0);
 	}
 #endif
 	param.sched_priority = _dispatch_mgr_sched.prio;
@@ -2094,7 +1929,6 @@
 	pthread_priority_t pp = 0;
 	if (qos) {
 		pp = _pthread_qos_class_encode(qos, 0, 0);
-		_dispatch_mgr_q.dq_priority = (dispatch_priority_t)pp;
 	}
 	if (prio > _dispatch_mgr_sched.default_prio) {
 		pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG;
@@ -2142,10 +1976,10 @@
 
 	_dispatch_queue_init(dq, dqf, DISPATCH_QUEUE_WIDTH_POOL, false);
 	dq->dq_label = label;
-	dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE,
-	dq->dq_override = DISPATCH_SATURATED_OVERRIDE;
+	dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE;
 	dq->do_ctxt = qc;
 	dq->do_targetq = NULL;
+	dq->dq_priority = DISPATCH_PRIORITY_SATURATED_OVERRIDE;
 
 	pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore);
 	qc->dgq_ctxt = pqc;
@@ -2230,8 +2064,7 @@
 	if (pqc->dpq_thread_configure) {
 		Block_release(pqc->dpq_thread_configure);
 	}
-	dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-			false);
+	dq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
 #endif
 	if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) {
 		free((void*)dq->dq_label);
@@ -2246,7 +2079,7 @@
 	DISPATCH_QUEUE_HEADER(queue_specific_queue);
 	TAILQ_HEAD(dispatch_queue_specific_head_s,
 			dispatch_queue_specific_s) dqsq_contexts;
-} DISPATCH_QUEUE_ALIGN;
+} DISPATCH_ATOMIC64_ALIGN;
 
 struct dispatch_queue_specific_s {
 	const void *dqs_key;
@@ -2260,12 +2093,11 @@
 _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq)
 {
 	dispatch_queue_specific_t dqs, tmp;
+	dispatch_queue_t rq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
 
 	TAILQ_FOREACH_SAFE(dqs, &dqsq->dqsq_contexts, dqs_list, tmp) {
 		if (dqs->dqs_destructor) {
-			dispatch_async_f(_dispatch_get_root_queue(
-					_DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt,
-					dqs->dqs_destructor);
+			dispatch_async_f(rq, dqs->dqs_ctxt, dqs->dqs_destructor);
 		}
 		free(dqs);
 	}
@@ -2282,8 +2114,7 @@
 	_dispatch_queue_init(dqsq->_as_dq, DQF_NONE,
 			DISPATCH_QUEUE_WIDTH_MAX, false);
 	dqsq->do_xref_cnt = -1;
-	dqsq->do_targetq = _dispatch_get_root_queue(
-			_DISPATCH_QOS_CLASS_USER_INITIATED, true);
+	dqsq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_USER_INITIATED, true);
 	dqsq->dq_label = "queue-specific";
 	TAILQ_INIT(&dqsq->dqsq_contexts);
 	if (slowpath(!os_atomic_cmpxchg2o(dq, dq_specific_q, NULL,
@@ -2304,7 +2135,7 @@
 			// Destroy previous context for existing key
 			if (dqs->dqs_destructor) {
 				dispatch_async_f(_dispatch_get_root_queue(
-						_DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt,
+						DISPATCH_QOS_DEFAULT, false), dqs->dqs_ctxt,
 						dqs->dqs_destructor);
 			}
 			if (dqsn->dqs_ctxt) {
@@ -2362,6 +2193,18 @@
 	*ctxtp = NULL;
 }
 
+DISPATCH_ALWAYS_INLINE
+static inline void *
+_dispatch_queue_get_specific_inline(dispatch_queue_t dq, const void *key)
+{
+	void *ctxt = NULL;
+	if (fastpath(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE && dq->dq_specific_q)){
+		ctxt = (void *)key;
+		dispatch_sync_f(dq->dq_specific_q, &ctxt, _dispatch_queue_get_specific);
+	}
+	return ctxt;
+}
+
 DISPATCH_NOINLINE
 void *
 dispatch_queue_get_specific(dispatch_queue_t dq, const void *key)
@@ -2369,13 +2212,7 @@
 	if (slowpath(!key)) {
 		return NULL;
 	}
-	void *ctxt = NULL;
-
-	if (fastpath(dq->dq_specific_q)) {
-		ctxt = (void *)key;
-		dispatch_sync_f(dq->dq_specific_q, &ctxt, _dispatch_queue_get_specific);
-	}
-	return ctxt;
+	return _dispatch_queue_get_specific_inline(dq, key);
 }
 
 DISPATCH_NOINLINE
@@ -2389,12 +2226,8 @@
 	dispatch_queue_t dq = _dispatch_queue_get_current();
 
 	while (slowpath(dq)) {
-		if (slowpath(dq->dq_specific_q)) {
-			ctxt = (void *)key;
-			dispatch_sync_f(dq->dq_specific_q, &ctxt,
-					_dispatch_queue_get_specific);
-			if (ctxt) break;
-		}
+		ctxt = _dispatch_queue_get_specific_inline(dq, key);
+		if (ctxt) break;
 		dq = dq->do_targetq;
 	}
 	return ctxt;
@@ -2442,8 +2275,9 @@
 	if (_dq_state_is_dirty(dq_state)) {
 		offset += dsnprintf(&buf[offset], bufsiz - offset, ", dirty");
 	}
-	if (_dq_state_has_override(dq_state)) {
-		offset += dsnprintf(&buf[offset], bufsiz - offset, ", async-override");
+	dispatch_qos_t qos = _dq_state_max_qos(dq_state);
+	if (qos) {
+		offset += dsnprintf(&buf[offset], bufsiz - offset, ", max qos %d", qos);
 	}
 	mach_port_t owner = _dq_state_drain_owner(dq_state);
 	if (!_dispatch_queue_is_thread_bound(dq) && owner) {
@@ -2489,34 +2323,37 @@
 }
 #endif
 
-#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
-static OSSpinLock _dispatch_stats_lock;
+#if DISPATCH_PERF_MON
 static struct {
-	uint64_t time_total;
-	uint64_t count_total;
-	uint64_t thread_total;
-} _dispatch_stats[65]; // ffs*/fls*() returns zero when no bits are set
+	uint64_t volatile time_total;
+	uint64_t volatile count_total;
+	uint64_t volatile thread_total;
+} _dispatch_stats[65];
 
-static void
-_dispatch_queue_merge_stats(uint64_t start)
+void
+_dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type)
 {
 	uint64_t delta = _dispatch_absolute_time() - start;
 	unsigned long count;
+	int bucket = 0;
 
 	count = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key);
 	_dispatch_thread_setspecific(dispatch_bcounter_key, NULL);
 
-	int bucket = flsl((long)count);
-
-	// 64-bit counters on 32-bit require a lock or a queue
-	OSSpinLockLock(&_dispatch_stats_lock);
-
-	_dispatch_stats[bucket].time_total += delta;
-	_dispatch_stats[bucket].count_total += count;
-	_dispatch_stats[bucket].thread_total++;
-
-	OSSpinLockUnlock(&_dispatch_stats_lock);
+	if (count == 0) {
+		bucket = 0;
+		if (trace) _dispatch_ktrace1(DISPATCH_PERF_MON_worker_useless, type);
+	} else {
+		bucket = (int)sizeof(count) * CHAR_BIT - __builtin_clzl(count);
+		os_atomic_add(&_dispatch_stats[bucket].count_total, count, relaxed);
+	}
+	os_atomic_add(&_dispatch_stats[bucket].time_total, delta, relaxed);
+	os_atomic_inc(&_dispatch_stats[bucket].thread_total, relaxed);
+	if (trace) {
+		_dispatch_ktrace3(DISPATCH_PERF_MON_worker_thread_end, count, delta, type);
+	}
 }
+
 #endif
 
 #pragma mark -
@@ -2536,8 +2373,8 @@
 				pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND;
 				// when we unbind, overcomitness can flip, so we need to learn
 				// it from the defaultpri, see _dispatch_priority_compute_update
-				pp |= (_dispatch_get_defaultpriority() &
-						_PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
+				pp |= (_dispatch_get_basepri() &
+						DISPATCH_PRIORITY_FLAG_OVERCOMMIT);
 			} else {
 				// else we need to keep the one that is set in the current pri
 				pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
@@ -2606,6 +2443,31 @@
 #pragma mark -
 #pragma mark dispatch_continuation_t
 
+const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = {
+	DC_VTABLE_ENTRY(ASYNC_REDIRECT,
+		.do_kind = "dc-redirect",
+		.do_invoke = _dispatch_async_redirect_invoke),
+#if HAVE_MACH
+	DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN,
+		.do_kind = "dc-mach-send-drain",
+		.do_invoke = _dispatch_mach_send_barrier_drain_invoke),
+	DC_VTABLE_ENTRY(MACH_SEND_BARRIER,
+		.do_kind = "dc-mach-send-barrier",
+		.do_invoke = _dispatch_mach_barrier_invoke),
+	DC_VTABLE_ENTRY(MACH_RECV_BARRIER,
+		.do_kind = "dc-mach-recv-barrier",
+		.do_invoke = _dispatch_mach_barrier_invoke),
+#endif
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+	DC_VTABLE_ENTRY(OVERRIDE_STEALING,
+		.do_kind = "dc-override-stealing",
+		.do_invoke = _dispatch_queue_override_invoke),
+	DC_VTABLE_ENTRY(OVERRIDE_OWNING,
+		.do_kind = "dc-override-owning",
+		.do_invoke = _dispatch_queue_override_invoke),
+#endif
+};
+
 static void
 _dispatch_force_cache_cleanup(void)
 {
@@ -2639,7 +2501,7 @@
 	dc = _dispatch_thread_getspecific(dispatch_cache_key);
 	int cnt;
 	if (!dc || (cnt = dc->dc_cache_cnt -
-			_dispatch_continuation_cache_limit) <= 0){
+			_dispatch_continuation_cache_limit) <= 0) {
 		return;
 	}
 	do {
@@ -2656,11 +2518,12 @@
 		dispatch_object_t dou)
 {
 	dispatch_continuation_t dc = dou._dc;
-	pthread_priority_t pp = dq->dq_override;
+	uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
+	dispatch_qos_t qos = _dq_state_max_qos(dq_state);
 
 	_dispatch_trace_continuation_pop(dq, dc);
-	if (pp > (dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
-		_dispatch_wqthread_override_start((mach_port_t)dc->dc_data, pp);
+	if (_dispatch_qos_greater_than_pp(qos, dc->dc_priority)) {
+		_dispatch_wqthread_override_start((mach_port_t)dc->dc_data, qos);
 	}
 	_dispatch_thread_event_signal((dispatch_thread_event_t)dc->dc_other);
 	_dispatch_introspection_queue_item_complete(dc);
@@ -2670,18 +2533,7 @@
 static void
 _dispatch_continuation_push(dispatch_queue_t dq, dispatch_continuation_t dc)
 {
-	_dispatch_queue_push(dq, dc,
-			_dispatch_continuation_get_override_priority(dq, dc));
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_continuation_push_sync_slow(dispatch_queue_t dq,
-		dispatch_continuation_t dc)
-{
-	_dispatch_queue_push_inline(dq, dc,
-			_dispatch_continuation_get_override_priority(dq, dc),
-			DISPATCH_WAKEUP_SLOW_WAITER);
+	_dispatch_queue_push(dq, dc, _dispatch_continuation_override_qos(dq, dc));
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -3002,8 +2854,8 @@
 		// neither of us would ever release. Side effect: After a _wait
 		// that times out, subsequent waits will not boost the qos of the
 		// still-running block.
-		dx_wakeup(boost_oq, pp, DISPATCH_WAKEUP_OVERRIDING |
-				DISPATCH_WAKEUP_CONSUME);
+		dx_wakeup(boost_oq, _dispatch_qos_from_pp(pp),
+				DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_CONSUME);
 	}
 
 	mach_port_t boost_th = dbpd->dbpd_thread;
@@ -3199,9 +3051,8 @@
 	// the "right" root queue was stuffed into dc_func
 	dispatch_queue_t assumed_rq = (dispatch_queue_t)dc->dc_func;
 	dispatch_queue_t dq = dc->dc_data, rq, old_dq;
-	struct _dispatch_identity_s di;
+	dispatch_priority_t old_dbp;
 
-	pthread_priority_t op, dp, old_dp;
 
 	if (ctxt_flags) {
 		flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK;
@@ -3210,15 +3061,9 @@
 	old_dq = _dispatch_get_current_queue();
 	if (assumed_rq) {
 		_dispatch_queue_set_current(assumed_rq);
-		_dispatch_root_queue_identity_assume(&di, 0);
-	}
-
-	old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp);
-	op = dq->dq_override;
-	if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
-		_dispatch_wqthread_override_start(_dispatch_tid_self(), op);
-		// Ensure that the root queue sees that this thread was overridden.
-		_dispatch_set_defaultpriority_override();
+		old_dbp = _dispatch_root_queue_identity_assume(0, dq);
+	} else {
+		old_dbp = _dispatch_set_basepri(dq->dq_priority);
 	}
 
 	_dispatch_thread_frame_push(&dtf, dq);
@@ -3227,11 +3072,8 @@
 		_dispatch_continuation_pop(other_dc, dq, flags);
 	});
 	_dispatch_thread_frame_pop(&dtf);
-	if (assumed_rq) {
-		_dispatch_root_queue_identity_restore(&di);
-		_dispatch_queue_set_current(old_dq);
-	}
-	_dispatch_reset_defaultpriority(old_dp);
+	if (assumed_rq) _dispatch_queue_set_current(old_dq);
+	_dispatch_reset_basepri(old_dbp);
 
 	rq = dq->do_targetq;
 	while (slowpath(rq->do_targetq) && rq != old_dq) {
@@ -3270,7 +3112,7 @@
 DISPATCH_NOINLINE
 static void
 _dispatch_async_f_redirect(dispatch_queue_t dq,
-		dispatch_object_t dou, pthread_priority_t pp)
+		dispatch_object_t dou, dispatch_qos_t qos)
 {
 	if (!slowpath(_dispatch_object_is_redirection(dou))) {
 		dou._dc = _dispatch_async_redirect_wrap(dq, dou);
@@ -3292,7 +3134,7 @@
 		dq = dq->do_targetq;
 	}
 
-	_dispatch_queue_push(dq, dou, pp);
+	_dispatch_queue_push(dq, dou, qos);
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -3305,7 +3147,8 @@
 	// by _dispatch_async_f2.
 	// However we want to end up on the root queue matching `dc` qos, so pick up
 	// the current override of `dq` which includes dc's overrde (and maybe more)
-	_dispatch_async_f_redirect(dq, dc, dq->dq_override);
+	uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
+	_dispatch_async_f_redirect(dq, dc, _dq_state_max_qos(dq_state));
 	_dispatch_introspection_queue_item_complete(dc);
 }
 
@@ -3326,7 +3169,7 @@
 	}
 
 	return _dispatch_async_f_redirect(dq, dc,
-			_dispatch_continuation_get_override_priority(dq, dc));
+			_dispatch_continuation_override_qos(dq, dc));
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -3622,7 +3465,7 @@
 		_dispatch_continuation_voucher_set(&dbsc.dbsc_dc, dq, 0);
 		// save frame linkage for _dispatch_barrier_sync_f_slow_invoke
 		_dispatch_thread_frame_save_state(&dbsc.dbsc_dtf);
-		// thread bound queues cannot mutate their target queue hierarchy
+		// thread-bound queues cannot mutate their target queue hierarchy
 		// so it's fine to look now
 		_dispatch_introspection_barrier_sync_begin(dq, func);
 	}
@@ -3644,14 +3487,17 @@
 				"already owned by current thread");
 	}
 
-	_dispatch_continuation_push_sync_slow(dq, &dbss);
+	_dispatch_queue_push_slow_waiter(dq, &dbss);
 	_dispatch_thread_event_wait(&event); // acquire
 	_dispatch_thread_event_destroy(&event);
-	if (_dispatch_queue_received_override(dq, pp)) {
+	dq_state = os_atomic_load2o(dq, dq_state, relaxed);
+	dispatch_qos_t qos = _dq_state_max_qos(dq_state);
+	if (_dispatch_qos_greater_than_pp(qos, pp)) {
 		// Ensure that the root queue sees that this thread was overridden.
 		// pairs with the _dispatch_wqthread_override_start in
-		// _dispatch_continuation_slow_item_signal
-		_dispatch_set_defaultpriority_override();
+		// _dispatch_continuation_slow_item_signal, which is why it doesn't use
+		// the usual _dq_state_received_override() test.
+		_dispatch_set_basepri_override_qos(qos);
 	}
 
 #if DISPATCH_COCOA_COMPAT
@@ -3728,7 +3574,7 @@
 	}
 	// balanced in d_block_sync_invoke or d_block_wait
 	if (os_atomic_cmpxchg2o(_dispatch_block_get_data(work),
-			dbpd_queue, NULL, dq, relaxed)) {
+			dbpd_queue, NULL, dq->_as_oq, relaxed)) {
 		_dispatch_retain(dq);
 	}
 	if (flags & DISPATCH_BLOCK_BARRIER) {
@@ -3803,7 +3649,8 @@
 		return _dispatch_try_lock_transfer_or_wakeup(dq);
 	}
 	if (!_dq_state_is_runnable(old_state)) {
-		_dispatch_queue_try_wakeup(dq, new_state, 0);
+		_dispatch_queue_try_wakeup(dq, new_state,
+				DISPATCH_WAKEUP_WAITER_HANDOFF);
 	}
 }
 
@@ -3839,14 +3686,17 @@
 				"already owned by current thread");
 	}
 
-	_dispatch_continuation_push_sync_slow(dq, &dc);
+	_dispatch_queue_push_slow_waiter(dq, &dc);
 	_dispatch_thread_event_wait(&event); // acquire
 	_dispatch_thread_event_destroy(&event);
-	if (_dispatch_queue_received_override(dq, pp)) {
+	dq_state = os_atomic_load2o(dq, dq_state, relaxed);
+	dispatch_qos_t qos = _dq_state_max_qos(dq_state);
+	if (_dispatch_qos_greater_than_pp(qos, pp)) {
 		// Ensure that the root queue sees that this thread was overridden.
 		// pairs with the _dispatch_wqthread_override_start in
-		// _dispatch_continuation_slow_item_signal
-		_dispatch_set_defaultpriority_override();
+		// _dispatch_continuation_slow_item_signal, which is why it doesn't use
+		// the usual _dq_state_received_override() test.
+		_dispatch_set_basepri_override_qos(qos);
 	}
 	_dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp);
 }
@@ -3999,75 +3849,11 @@
 }
 
 #pragma mark -
-#pragma mark dispatch_after
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
-		void *ctxt, void *handler, bool block)
-{
-	dispatch_source_t ds;
-	uint64_t leeway, delta;
-
-	if (when == DISPATCH_TIME_FOREVER) {
-#if DISPATCH_DEBUG
-		DISPATCH_CLIENT_CRASH(0, "dispatch_after called with 'when' == infinity");
-#endif
-		return;
-	}
-
-	delta = _dispatch_timeout(when);
-	if (delta == 0) {
-		if (block) {
-			return dispatch_async(queue, handler);
-		}
-		return dispatch_async_f(queue, ctxt, handler);
-	}
-	leeway = delta / 10; // <rdar://problem/13447496>
-
-	if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC;
-	if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC;
-
-	// this function can and should be optimized to not use a dispatch source
-	ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue);
-	dispatch_assert(ds);
-
-	dispatch_continuation_t dc = _dispatch_continuation_alloc();
-	if (block) {
-		_dispatch_continuation_init(dc, ds, handler, 0, 0, 0);
-	} else {
-		_dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0);
-	}
-	// reference `ds` so that it doesn't show up as a leak
-	dc->dc_data = ds;
-	_dispatch_source_set_event_handler_continuation(ds, dc);
-	dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, leeway);
-	dispatch_activate(ds);
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt,
-		dispatch_function_t func)
-{
-	_dispatch_after(when, queue, ctxt, func, false);
-}
-
-#ifdef __BLOCKS__
-void
-dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
-		dispatch_block_t work)
-{
-	_dispatch_after(when, queue, NULL, work, true);
-}
-#endif
-
-#pragma mark -
 #pragma mark dispatch_queue_wakeup
 
 DISPATCH_NOINLINE
 void
-_dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+_dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags)
 {
 	dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;
@@ -4076,9 +3862,9 @@
 		target = DISPATCH_QUEUE_WAKEUP_TARGET;
 	}
 	if (target) {
-		return _dispatch_queue_class_wakeup(dq, pp, flags, target);
-	} else if (pp) {
-		return _dispatch_queue_class_override_drainer(dq, pp, flags);
+		return _dispatch_queue_class_wakeup(dq, qos, flags, target);
+	} else if (qos) {
+		return _dispatch_queue_class_override_drainer(dq, qos, flags);
 	} else if (flags & DISPATCH_WAKEUP_CONSUME) {
 		return _dispatch_release_tailcall(dq);
 	}
@@ -4128,24 +3914,27 @@
 #endif // DISPATCH_COCOA_COMPAT
 
 void
-_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags)
 {
 #if DISPATCH_COCOA_COMPAT
 	if (slowpath(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) {
 		// <rdar://problem/14026816>
-		return _dispatch_queue_wakeup(dq, pp, flags);
+		return _dispatch_queue_wakeup(dq, qos, flags);
 	}
 
+	if (flags & DISPATCH_WAKEUP_FLUSH) {
+		os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release);
+	}
 	if (_dispatch_queue_class_probe(dq)) {
-		return _dispatch_runloop_queue_poke(dq, pp, flags);
+		return _dispatch_runloop_queue_poke(dq, qos, flags);
 	}
 
-	pp = _dispatch_queue_reset_override_priority(dq, true);
-	if (pp) {
+	qos = _dispatch_queue_reset_max_qos(dq);
+	if (qos) {
 		mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq);
 		if (_dispatch_queue_class_probe(dq)) {
-			_dispatch_runloop_queue_poke(dq, pp, flags);
+			_dispatch_runloop_queue_poke(dq, qos, flags);
 		}
 		_dispatch_thread_override_end(owner, dq);
 		return;
@@ -4154,26 +3943,25 @@
 		return _dispatch_release_tailcall(dq);
 	}
 #else
-	return _dispatch_queue_wakeup(dq, pp, flags);
+	return _dispatch_queue_wakeup(dq, qos, flags);
 #endif
 }
 
 void
-_dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+_dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags)
 {
 #if DISPATCH_COCOA_COMPAT
 	if (_dispatch_queue_is_thread_bound(dq)) {
-		return _dispatch_runloop_queue_wakeup(dq, pp, flags);
+		return _dispatch_runloop_queue_wakeup(dq, qos, flags);
 	}
 #endif
-	return _dispatch_queue_wakeup(dq, pp, flags);
+	return _dispatch_queue_wakeup(dq, qos, flags);
 }
 
 void
 _dispatch_root_queue_wakeup(dispatch_queue_t dq,
-		pthread_priority_t pp DISPATCH_UNUSED,
-		dispatch_wakeup_flags_t flags)
+		dispatch_qos_t qos DISPATCH_UNUSED, dispatch_wakeup_flags_t flags)
 {
 	if (flags & DISPATCH_WAKEUP_CONSUME) {
 		// see _dispatch_queue_push_set_head
@@ -4194,7 +3982,7 @@
 		return;
 	}
 
-#if TARGET_OS_MAC
+#if HAVE_MACH
 	mach_port_t mp = handle;
 	kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0);
 	switch (kr) {
@@ -4219,27 +4007,38 @@
 
 DISPATCH_NOINLINE
 static void
-_dispatch_runloop_queue_poke(dispatch_queue_t dq,
-		pthread_priority_t pp, dispatch_wakeup_flags_t flags)
+_dispatch_runloop_queue_poke(dispatch_queue_t dq, dispatch_qos_t qos,
+		dispatch_wakeup_flags_t flags)
 {
 	// it's not useful to handle WAKEUP_FLUSH because mach_msg() will have
-	// a release barrier and that when runloop queues stop being thread bound
+	// a release barrier and that when runloop queues stop being thread-bound
 	// they have a non optional wake-up to start being a "normal" queue
 	// either in _dispatch_runloop_queue_xref_dispose,
 	// or in _dispatch_queue_cleanup2() for the main thread.
+	uint64_t old_state, new_state;
 
 	if (dq == &_dispatch_main_q) {
 		dispatch_once_f(&_dispatch_main_q_handle_pred, dq,
 				_dispatch_runloop_queue_handle_init);
 	}
-	_dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags);
-	if (flags & DISPATCH_WAKEUP_OVERRIDING) {
-		mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq);
+
+	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+		new_state = _dq_state_merge_qos(old_state, qos);
+		if (old_state == new_state) {
+			os_atomic_rmw_loop_give_up(goto no_change);
+		}
+	});
+
+	dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority);
+	if (qos > dq_qos) {
+		mach_port_t owner = _dq_state_drain_owner(new_state);
+		pthread_priority_t pp = _dispatch_qos_to_pp(qos);
 		_dispatch_thread_override_start(owner, pp, dq);
-		if (flags & DISPATCH_WAKEUP_WAS_OVERRIDDEN) {
+		if (_dq_state_max_qos(old_state) > dq_qos) {
 			_dispatch_thread_override_end(owner, dq);
 		}
 	}
+no_change:
 	_dispatch_runloop_queue_class_poke(dq);
 	if (flags & DISPATCH_WAKEUP_CONSUME) {
 		return _dispatch_release_tailcall(dq);
@@ -4253,7 +4052,10 @@
 {
 	dispatch_root_queue_context_t qc = dq->do_ctxt;
 	uint32_t i = n;
-	int r;
+	int r = ENOSYS;
+
+	dispatch_once_f(&_dispatch_root_queues_pred, NULL,
+			_dispatch_root_queues_init_once);
 
 	_dispatch_debug_root_queue(dq, __func__);
 #if HAVE_PTHREAD_WORKQUEUES
@@ -4275,18 +4077,14 @@
 			return;
 		}
 #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
-#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
-		if (!dq->dq_priority) {
-			r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority,
-					qc->dgq_wq_options, (int)i);
-			(void)dispatch_assume_zero(r);
-			return;
-		}
-#endif
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-		r = _pthread_workqueue_addthreads((int)i, dq->dq_priority);
-		(void)dispatch_assume_zero(r);
+		r = _pthread_workqueue_addthreads((int)i,
+				_dispatch_priority_to_pp(dq->dq_priority));
+#elif HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+		r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority,
+				qc->dgq_wq_options, (int)i);
 #endif
+		(void)dispatch_assume_zero(r);
 		return;
 	}
 #endif // HAVE_PTHREAD_WORKQUEUES
@@ -4349,7 +4147,7 @@
 		return;
 	}
 #endif // HAVE_PTHREAD_WORKQUEUES
-	return 	_dispatch_global_queue_poke_slow(dq, n);
+	return _dispatch_global_queue_poke_slow(dq, n);
 }
 
 static inline void
@@ -4431,7 +4229,8 @@
 	while (dq->dq_items_tail) {
 		dc = _dispatch_queue_head(dq);
 		do {
-			if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(dq))) {
+			uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed);
+			if (unlikely(_dq_state_is_suspended(dq_state))) {
 				goto out;
 			}
 			if (unlikely(orig_tq != dq->do_targetq)) {
@@ -4482,7 +4281,6 @@
 			}
 
 			_dispatch_continuation_pop_inline(dc, dq, flags);
-			_dispatch_perfmon_workitem_inc();
 			if (unlikely(dtf.dtf_deferred)) {
 				goto out_with_deferred_compute_owned;
 			}
@@ -4549,6 +4347,41 @@
 }
 
 #if DISPATCH_COCOA_COMPAT
+DISPATCH_NOINLINE
+static void
+_dispatch_main_queue_update_priority_from_thread(void)
+{
+	dispatch_queue_t dq = &_dispatch_main_q;
+	uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed);
+	mach_port_t owner = _dq_state_drain_owner(dq_state);
+
+	dispatch_priority_t main_pri =
+			_dispatch_priority_from_pp_strip_flags(_dispatch_get_priority());
+	dispatch_qos_t main_qos = _dispatch_priority_qos(main_pri);
+	dispatch_qos_t max_qos = _dq_state_max_qos(dq_state);
+	dispatch_qos_t old_qos = _dispatch_priority_qos(dq->dq_priority);
+
+	// the main thread QoS was adjusted by someone else, learn the new QoS
+	// and reinitialize _dispatch_main_q.dq_priority
+	dq->dq_priority = _dispatch_priority_with_override_qos(main_pri, main_qos);
+
+	if (old_qos < max_qos && main_qos == DISPATCH_QOS_UNSPECIFIED) {
+		// main thread is opted out of QoS and we had an override
+		return _dispatch_thread_override_end(owner, dq);
+	}
+
+	if (old_qos < max_qos && max_qos <= main_qos) {
+		// main QoS was raised, and we had an override which is now useless
+		return _dispatch_thread_override_end(owner, dq);
+	}
+
+	if (main_qos < max_qos && max_qos <= old_qos) {
+		// main thread QoS was lowered, and we actually need an override
+		pthread_priority_t pp = _dispatch_qos_to_pp(max_qos);
+		return _dispatch_thread_override_start(owner, pp, dq);
+	}
+}
+
 static void
 _dispatch_main_queue_drain(void)
 {
@@ -4559,6 +4392,7 @@
 		return;
 	}
 
+	_dispatch_perfmon_start_notrace();
 	if (!fastpath(_dispatch_queue_is_thread_bound(dq))) {
 		DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called"
 				" after dispatch_main()");
@@ -4572,32 +4406,38 @@
 	dispatch_once_f(&_dispatch_main_q_handle_pred, dq,
 			_dispatch_runloop_queue_handle_init);
 
-	_dispatch_perfmon_start();
 	// <rdar://problem/23256682> hide the frame chaining when CFRunLoop
 	// drains the main runloop, as this should not be observable that way
 	_dispatch_thread_frame_push_and_rebase(&dtf, dq, NULL);
 
-	pthread_priority_t old_pri = _dispatch_get_priority();
-	pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL);
+	pthread_priority_t pp = _dispatch_get_priority();
+	dispatch_priority_t pri = _dispatch_priority_from_pp(pp);
+	dispatch_qos_t qos = _dispatch_priority_qos(pri);
 	voucher_t voucher = _voucher_copy();
 
+	if (unlikely(qos != _dispatch_priority_qos(dq->dq_priority))) {
+		_dispatch_main_queue_update_priority_from_thread();
+	}
+	dispatch_priority_t old_dbp = _dispatch_set_basepri(pri);
+	_dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED);
+
 	struct dispatch_object_s *dc, *next_dc, *tail;
 	dc = os_mpsc_capture_snapshot(dq, dq_items, &tail);
 	do {
 		next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next);
 		_dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE);
-		_dispatch_perfmon_workitem_inc();
 	} while ((dc = next_dc));
 
 	// runloop based queues use their port for the queue PUBLISH pattern
 	// so this raw call to dx_wakeup(0) is valid
 	dx_wakeup(dq, 0, 0);
 	_dispatch_voucher_debug("main queue restore", voucher);
-	_dispatch_reset_defaultpriority(old_dp);
-	_dispatch_reset_priority_and_voucher(old_pri, voucher);
+	_dispatch_reset_basepri(old_dbp);
+	_dispatch_reset_basepri_override();
+	_dispatch_reset_priority_and_voucher(pp, voucher);
 	_dispatch_thread_frame_pop(&dtf);
-	_dispatch_perfmon_end();
 	_dispatch_force_cache_cleanup();
+	_dispatch_perfmon_end_notrace();
 }
 
 static bool
@@ -4606,18 +4446,19 @@
 	if (!dq->dq_items_tail) {
 		return false;
 	}
+	_dispatch_perfmon_start_notrace();
 	dispatch_thread_frame_s dtf;
-	_dispatch_perfmon_start();
 	_dispatch_thread_frame_push(&dtf, dq);
-	pthread_priority_t old_pri = _dispatch_get_priority();
-	pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL);
+	pthread_priority_t pp = _dispatch_get_priority();
+	dispatch_priority_t pri = _dispatch_priority_from_pp(pp);
 	voucher_t voucher = _voucher_copy();
+	dispatch_priority_t old_dbp = _dispatch_set_basepri(pri);
+	_dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED);
 
 	struct dispatch_object_s *dc, *next_dc;
 	dc = _dispatch_queue_head(dq);
 	next_dc = _dispatch_queue_next(dq, dc);
 	_dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE);
-	_dispatch_perfmon_workitem_inc();
 
 	if (!next_dc) {
 		// runloop based queues use their port for the queue PUBLISH pattern
@@ -4626,11 +4467,12 @@
 	}
 
 	_dispatch_voucher_debug("runloop queue restore", voucher);
-	_dispatch_reset_defaultpriority(old_dp);
-	_dispatch_reset_priority_and_voucher(old_pri, voucher);
+	_dispatch_reset_basepri(old_dbp);
+	_dispatch_reset_basepri_override();
+	_dispatch_reset_priority_and_voucher(pp, voucher);
 	_dispatch_thread_frame_pop(&dtf);
-	_dispatch_perfmon_end();
 	_dispatch_force_cache_cleanup();
+	_dispatch_perfmon_end_notrace();
 	return next_dc;
 }
 #endif
@@ -4639,7 +4481,7 @@
 void
 _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq)
 {
-	dispatch_continuation_t dc_tmp, dc_start, dc_end;
+	dispatch_continuation_t dc_tmp, dc_start = NULL, dc_end = NULL;
 	struct dispatch_object_s *dc = NULL;
 	uint64_t dq_state, owned;
 	size_t count = 0;
@@ -4695,7 +4537,8 @@
 		// for sources and mach channels in the first place.
 		owned = _dispatch_queue_adjust_owned(dq, owned, dc);
 		dq_state = _dispatch_queue_drain_unlock(dq, owned, NULL);
-		return _dispatch_queue_try_wakeup(dq, dq_state, 0);
+		return _dispatch_queue_try_wakeup(dq, dq_state,
+				DISPATCH_WAKEUP_WAITER_HANDOFF);
 	} else if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) {
 		// someone enqueued a slow item at the head
 		// looping may be its last chance
@@ -4712,13 +4555,14 @@
 
 	if (dq->dq_items_tail) {
 		_dispatch_perfmon_start();
+		_dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED);
 		if (slowpath(_dispatch_queue_serial_drain(dq, flags, &owned, NULL))) {
 			DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue");
 		}
 		_dispatch_voucher_debug("mgr queue clear", NULL);
 		_voucher_clear();
-		_dispatch_reset_defaultpriority_override();
-		_dispatch_perfmon_end();
+		_dispatch_reset_basepri_override();
+		_dispatch_perfmon_end(perfmon_thread_manager);
 	}
 
 #if DISPATCH_USE_KEVENT_WORKQUEUE
@@ -4796,15 +4640,17 @@
 					_dq_state_drain_owner(old_state) != self) {
 				os_atomic_rmw_loop_give_up({
 					// We may have been overridden, so inform the root queue
-					_dispatch_set_defaultpriority_override();
+					_dispatch_set_basepri_override_qos(
+							_dq_state_max_qos(old_state));
 					return _dispatch_release_tailcall(dq);
 				});
 			}
-			new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
+			new_state =
+				DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state);
 		});
-		if (_dq_state_has_override(old_state)) {
+		if (_dq_state_received_override(old_state)) {
 			// Ensure that the root queue sees that this thread was overridden.
-			_dispatch_set_defaultpriority_override();
+			_dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state));
 		}
 		return dx_invoke(dq, flags | DISPATCH_INVOKE_STEALING);
 	}
@@ -4886,39 +4732,37 @@
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_need_global_root_queue_push_override(dispatch_queue_t rq,
-		pthread_priority_t pp)
+_dispatch_need_global_root_queue_override(dispatch_queue_t rq,
+		dispatch_qos_t qos)
 {
-	pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
+	dispatch_qos_t rqos = _dispatch_priority_qos(rq->dq_priority);
+	bool defaultqueue = rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
 
-	if (unlikely(!rqp)) return false;
+	if (unlikely(!rqos)) return false;
 
-	pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	return defaultqueue ? pp && pp != rqp : pp > rqp;
+	return defaultqueue ? qos && qos != rqos : qos > rqos;
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline bool
-_dispatch_need_global_root_queue_push_override_stealer(dispatch_queue_t rq,
-		pthread_priority_t pp)
+_dispatch_need_global_root_queue_override_stealer(dispatch_queue_t rq,
+		dispatch_qos_t qos)
 {
-	pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
+	dispatch_qos_t rqos = _dispatch_priority_qos(rq->dq_priority);
+	bool defaultqueue = rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE;
 
-	if (unlikely(!rqp)) return false;
+	if (unlikely(!rqos)) return false;
 
-	pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	return defaultqueue || pp > rqp;
+	return defaultqueue ? qos != 0 : qos > rqos;
 }
 
 DISPATCH_NOINLINE
 static void
 _dispatch_root_queue_push_override(dispatch_queue_t orig_rq,
-		dispatch_object_t dou, pthread_priority_t pp)
+		dispatch_object_t dou, dispatch_qos_t qos)
 {
-	bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-	dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit);
+	bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+	dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit);
 	dispatch_continuation_t dc = dou._dc;
 
 	if (_dispatch_object_is_redirection(dc)) {
@@ -4944,10 +4788,10 @@
 DISPATCH_NOINLINE
 static void
 _dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq,
-		dispatch_queue_t dq, pthread_priority_t pp)
+		dispatch_queue_t dq, dispatch_qos_t qos)
 {
-	bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-	dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit);
+	bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+	dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit);
 	dispatch_continuation_t dc = _dispatch_continuation_alloc();
 
 	dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING);
@@ -4964,20 +4808,20 @@
 }
 
 DISPATCH_NOINLINE
-static void
+void
 _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq,
-		pthread_priority_t pp, dispatch_wakeup_flags_t flags, uint64_t dq_state)
+		dispatch_qos_t qos, dispatch_wakeup_flags_t flags, uint64_t dq_state)
 {
 	mach_port_t owner = _dq_state_drain_owner(dq_state);
-	pthread_priority_t pp2;
 	dispatch_queue_t tq;
+	dispatch_qos_t oqos;
 	bool locked;
 
 	if (owner) {
-		int rc = _dispatch_wqthread_override_start_check_owner(owner, pp,
+		int rc = _dispatch_wqthread_override_start_check_owner(owner, qos,
 				&dq->dq_state_lock);
 		// EPERM means the target of the override is not a work queue thread
-		// and could be a thread bound queue such as the main queue.
+		// and could be a thread-bound queue such as the main queue.
 		// When that happens we must get to that queue and wake it up if we
 		// want the override to be appplied and take effect.
 		if (rc != EPERM) {
@@ -4991,14 +4835,14 @@
 
 	tq = dq->do_targetq;
 
-	if (_dispatch_queue_has_immutable_target(dq)) {
+	if (likely(!_dispatch_queue_is_legacy(dq))) {
 		locked = false;
 	} else if (_dispatch_is_in_root_queues_array(tq)) {
 		// avoid locking when we recognize the target queue as a global root
 		// queue it is gross, but is a very common case. The locking isn't
 		// needed because these target queues cannot go away.
 		locked = false;
-	} else if (_dispatch_queue_sidelock_trylock(dq, pp)) {
+	} else if (_dispatch_queue_sidelock_trylock(dq, qos)) {
 		// <rdar://problem/17735825> to traverse the tq chain safely we must
 		// lock it to ensure it cannot change
 		locked = true;
@@ -5008,10 +4852,9 @@
 		//
 		// Leading to being there, the current thread has:
 		// 1. enqueued an object on `dq`
-		// 2. raised the dq_override value of `dq`
-		// 3. set the HAS_OVERRIDE bit and not seen an owner
-		// 4. tried and failed to acquire the side lock
-		//
+		// 2. raised the dq_override value and set RECEIVED_OVERRDE on `dq`
+		//    and not seen an owner
+		// 3. tried and failed to acquire the side lock
 		//
 		// The side lock owner can only be one of three things:
 		//
@@ -5021,20 +4864,19 @@
 		//   the eventual dispatch_resume().
 		//
 		// - A dispatch_set_target_queue() call. The fact that we saw no `owner`
-		//   means that the trysync it does wasn't being drained when (3)
+		//   means that the trysync it does wasn't being drained when (2)
 		//   happened which can only be explained by one of these interleavings:
 		//
 		//    o `dq` became idle between when the object queued in (1) ran and
 		//      the set_target_queue call and we were unlucky enough that our
-		//      step (3) happened while this queue was idle. There is no reason
+		//      step (2) happened while this queue was idle. There is no reason
 		//		to override anything anymore, the queue drained to completion
 		//      while we were preempted, our job is done.
 		//
-		//    o `dq` is queued but not draining during (1-3), then when we try
-		//      to lock at (4) the queue is now draining a set_target_queue.
-		//      Since we set HAS_OVERRIDE with a release barrier, the effect of
-		//      (2) was visible to the drainer when he acquired the drain lock,
-		//      and that guy has applied our override. Our job is done.
+		//    o `dq` is queued but not draining during (1-2), then when we try
+		//      to lock at (3) the queue is now draining a set_target_queue.
+		//      This drainer must have seen the effects of (2) and that guy has
+		//      applied our override. Our job is done.
 		//
 		// - Another instance of _dispatch_queue_class_wakeup_with_override(),
 		//   which is fine because trylock leaves a hint that we failed our
@@ -5047,11 +4889,13 @@
 
 apply_again:
 	if (dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
-		if (_dispatch_need_global_root_queue_push_override_stealer(tq, pp)) {
-			_dispatch_root_queue_push_override_stealer(tq, dq, pp);
+		if (_dispatch_need_global_root_queue_override_stealer(tq, qos)) {
+			_dispatch_root_queue_push_override_stealer(tq, dq, qos);
 		}
-	} else if (_dispatch_queue_need_override(tq, pp)) {
-		dx_wakeup(tq, pp, DISPATCH_WAKEUP_OVERRIDING);
+	} else if (flags & DISPATCH_WAKEUP_WAITER_HANDOFF) {
+		dx_wakeup(tq, qos, flags);
+	} else if (_dispatch_queue_need_override(tq, qos)) {
+		dx_wakeup(tq, qos, DISPATCH_WAKEUP_OVERRIDING);
 	}
 	while (unlikely(locked && !_dispatch_queue_sidelock_tryunlock(dq))) {
 		// rdar://problem/24081326
@@ -5060,9 +4904,9 @@
 		// tried to acquire the side lock while we were running, and could have
 		// had a better override than ours to apply.
 		//
-		pp2 = dq->dq_override;
-		if (pp2 > pp) {
-			pp = pp2;
+		oqos = _dq_state_max_qos(os_atomic_load2o(dq, dq_state, relaxed));
+		if (oqos > qos) {
+			qos = oqos;
 			// The other instance had a better priority than ours, override
 			// our thread, and apply the override that wasn't applied to `dq`
 			// because of us.
@@ -5080,32 +4924,26 @@
 DISPATCH_NOINLINE
 void
 _dispatch_queue_class_override_drainer(dispatch_queue_t dq,
-		pthread_priority_t pp, dispatch_wakeup_flags_t flags)
+		dispatch_qos_t qos, dispatch_wakeup_flags_t flags)
 {
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	uint64_t dq_state, value;
+	uint64_t old_state, new_state;
 
 	//
 	// Someone is trying to override the last work item of the queue.
-	// Do not remember this override on the queue because we know the precise
-	// duration the override is required for: until the current drain unlocks.
 	//
-	// That is why this function only tries to set HAS_OVERRIDE if we can
-	// still observe a drainer, and doesn't need to set the DIRTY bit
-	// because oq_override wasn't touched and there is no race to resolve
-	//
-	os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, {
-		if (!_dq_state_drain_locked(dq_state)) {
-			os_atomic_rmw_loop_give_up(break);
+	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
+		new_state = _dq_state_merge_qos(old_state, qos);
+		if (!_dq_state_drain_locked(new_state) || new_state == old_state) {
+			os_atomic_rmw_loop_give_up(goto done);
 		}
-		value = dq_state | DISPATCH_QUEUE_HAS_OVERRIDE;
 	});
-	if (_dq_state_drain_locked(dq_state)) {
-		return _dispatch_queue_class_wakeup_with_override(dq, pp,
-				flags, dq_state);
-	}
+	return _dispatch_queue_class_wakeup_with_override(dq, qos,
+			flags, new_state);
+
+done:
 #else
-	(void)pp;
+	(void)qos;
 #endif // HAVE_PTHREAD_WORKQUEUE_QOS
 	if (flags & DISPATCH_WAKEUP_CONSUME) {
 		return _dispatch_release_tailcall(dq);
@@ -5116,31 +4954,30 @@
 DISPATCH_NOINLINE
 static void
 _dispatch_trystash_to_deferred_items(dispatch_queue_t dq, dispatch_object_t dou,
-		pthread_priority_t pp, dispatch_deferred_items_t ddi)
+		dispatch_qos_t qos, dispatch_deferred_items_t ddi)
 {
-	dispatch_priority_t old_pp = ddi->ddi_stashed_pp;
+	dispatch_priority_t old_pri = ddi->ddi_stashed_pri;
 	dispatch_queue_t old_dq = ddi->ddi_stashed_dq;
 	struct dispatch_object_s *old_dou = ddi->ddi_stashed_dou;
 	dispatch_priority_t rq_overcommit;
 
-	rq_overcommit = dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-	if (likely(!old_pp || rq_overcommit)) {
+	rq_overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
+	if (likely(!old_pri || rq_overcommit)) {
 		ddi->ddi_stashed_dq = dq;
 		ddi->ddi_stashed_dou = dou._do;
-		ddi->ddi_stashed_pp = (dispatch_priority_t)pp | rq_overcommit |
-				_PTHREAD_PRIORITY_PRIORITY_MASK;
-		if (likely(!old_pp)) {
+		ddi->ddi_stashed_pri = _dispatch_priority_make(qos, 0) | rq_overcommit;
+		if (likely(!old_pri)) {
 			return;
 		}
 		// push the previously stashed item
-		pp = old_pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+		qos = _dispatch_priority_qos(old_pri);
 		dq = old_dq;
 		dou._do = old_dou;
 	}
-	if (_dispatch_need_global_root_queue_push_override(dq, pp)) {
-		return _dispatch_root_queue_push_override(dq, dou, pp);
+	if (_dispatch_need_global_root_queue_override(dq, qos)) {
+		return _dispatch_root_queue_push_override(dq, dou, qos);
 	}
-	// bit of cheating: we should really pass `pp` but we know that we are
+	// bit of cheating: we should really pass `qos` but we know that we are
 	// pushing onto a global queue at this point, and we just checked that
 	// `pp` doesn't matter.
 	DISPATCH_COMPILER_CAN_ASSUME(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE);
@@ -5152,54 +4989,48 @@
 DISPATCH_NOINLINE
 static void
 _dispatch_queue_push_slow(dispatch_queue_t dq, dispatch_object_t dou,
-		pthread_priority_t pp)
+		dispatch_qos_t qos)
 {
 	dispatch_once_f(&_dispatch_root_queues_pred, NULL,
 			_dispatch_root_queues_init_once);
-	_dispatch_queue_push(dq, dou, pp);
+	_dispatch_queue_push(dq, dou, qos);
 }
 #endif
 
 DISPATCH_NOINLINE
 void
 _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
-		pthread_priority_t pp)
+		dispatch_qos_t qos)
 {
-	_dispatch_assert_is_valid_qos_override(pp);
 	if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
 #if DISPATCH_USE_KEVENT_WORKQUEUE
 		dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
-		if (unlikely(ddi && !(ddi->ddi_stashed_pp &
-				(dispatch_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK))) {
+		if (unlikely(ddi &&
+				(ddi->ddi_stashed_pri != DISPATCH_PRIORITY_NOSTASH))) {
 			dispatch_assert(_dispatch_root_queues_pred == DLOCK_ONCE_DONE);
-			return _dispatch_trystash_to_deferred_items(dq, dou, pp, ddi);
+			return _dispatch_trystash_to_deferred_items(dq, dou, qos, ddi);
 		}
 #endif
 #if HAVE_PTHREAD_WORKQUEUE_QOS
 		// can't use dispatch_once_f() as it would create a frame
 		if (unlikely(_dispatch_root_queues_pred != DLOCK_ONCE_DONE)) {
-			return _dispatch_queue_push_slow(dq, dou, pp);
+			return _dispatch_queue_push_slow(dq, dou, qos);
 		}
-		if (_dispatch_need_global_root_queue_push_override(dq, pp)) {
-			return _dispatch_root_queue_push_override(dq, dou, pp);
+		if (_dispatch_need_global_root_queue_override(dq, qos)) {
+			return _dispatch_root_queue_push_override(dq, dou, qos);
 		}
 #endif
 	}
-	_dispatch_queue_push_inline(dq, dou, pp, 0);
+	_dispatch_queue_push_inline(dq, dou, qos, 0);
 }
 
 DISPATCH_NOINLINE
 static void
-_dispatch_queue_class_wakeup_enqueue(dispatch_queue_t dq, pthread_priority_t pp,
+_dispatch_queue_class_wakeup_enqueue(dispatch_queue_t dq, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target)
 {
 	dispatch_queue_t tq;
 
-	if (flags & (DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_WAS_OVERRIDDEN)) {
-		// _dispatch_queue_drain_try_unlock may have reset the override while
-		// we were becoming the enqueuer
-		_dispatch_queue_reinstate_override_priority(dq, (dispatch_priority_t)pp);
-	}
 	if (!(flags & DISPATCH_WAKEUP_CONSUME)) {
 		_dispatch_retain(dq);
 	}
@@ -5208,123 +5039,36 @@
 		// of a queue asyncing to that queue is not an uncommon pattern
 		// and in that case the acquire is completely useless
 		//
-		// so instead use a thread fence here when we will read the targetq
-		// pointer because that is the only thing that really requires
-		// that barrier.
-		os_atomic_thread_fence(acquire);
-		tq = dq->do_targetq;
+		// so instead use depdendency ordering to read the targetq pointer.
+		os_atomic_thread_fence(dependency);
+		tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, (long)qos);
 	} else {
 		dispatch_assert(target == DISPATCH_QUEUE_WAKEUP_MGR);
 		tq = &_dispatch_mgr_q;
 	}
-	return _dispatch_queue_push(tq, dq, pp);
+	return _dispatch_queue_push(tq, dq, qos);
 }
 
-DISPATCH_NOINLINE
-void
-_dispatch_queue_class_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
-		dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target)
+DISPATCH_ALWAYS_INLINE
+static void
+_dispatch_queue_class_wakeup_finish(dispatch_queue_t dq, dispatch_qos_t qos,
+		dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target,
+		uint64_t old_state, uint64_t new_state)
 {
-	uint64_t old_state, new_state, bits = 0;
-
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-	_dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags);
-#endif
-
-	if (flags & DISPATCH_WAKEUP_FLUSH) {
-		bits = DISPATCH_QUEUE_DIRTY;
-	}
-	if (flags & DISPATCH_WAKEUP_OVERRIDING) {
-		//
-		// Setting the dirty bit here is about forcing callers of
-		// _dispatch_queue_drain_try_unlock() to loop again when an override
-		// has just been set to close the following race:
-		//
-		// Drainer (in drain_try_unlokc():
-		//    override_reset();
-		//    preempted....
-		//
-		// Enqueuer:
-		//    atomic_or(oq_override, override, relaxed);
-		//    atomic_or(dq_state, HAS_OVERRIDE, release);
-		//
-		// Drainer:
-		//    ... resumes
-		//    successful drain_unlock() and leaks `oq_override`
-		//
-		bits = DISPATCH_QUEUE_DIRTY | DISPATCH_QUEUE_HAS_OVERRIDE;
-	}
-
-	if (flags & DISPATCH_WAKEUP_SLOW_WAITER) {
-		uint64_t pending_barrier_width =
-				(dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
-		uint64_t xor_owner_and_set_full_width_and_in_barrier =
-				_dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT |
-				DISPATCH_QUEUE_IN_BARRIER;
-
-#ifdef DLOCK_NOWAITERS_BIT
-		bits  |= DLOCK_NOWAITERS_BIT;
-#else
-		bits  |= DLOCK_WAITERS_BIT;
-#endif
-		flags ^= DISPATCH_WAKEUP_SLOW_WAITER;
-		dispatch_assert(!(flags & DISPATCH_WAKEUP_CONSUME));
-
-		os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
-			new_state = old_state | bits;
-			if (_dq_state_drain_pended(old_state)) {
-				// same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT
-				// but we want to be more efficient wrt the WAITERS_BIT
-				new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK;
-				new_state &= ~DISPATCH_QUEUE_DRAIN_PENDED;
-			}
-			if (unlikely(_dq_state_drain_locked(new_state))) {
-#ifdef DLOCK_NOWAITERS_BIT
-				new_state &= ~(uint64_t)DLOCK_NOWAITERS_BIT;
-#endif
-			} else if (unlikely(!_dq_state_is_runnable(new_state) ||
-					!(flags & DISPATCH_WAKEUP_FLUSH))) {
-				// either not runnable, or was not for the first item (26700358)
-				// so we should not try to lock and handle overrides instead
-			} else if (_dq_state_has_pending_barrier(old_state) ||
-					new_state + pending_barrier_width <
-					DISPATCH_QUEUE_WIDTH_FULL_BIT) {
-				// see _dispatch_queue_drain_try_lock
-				new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
-				new_state ^= xor_owner_and_set_full_width_and_in_barrier;
-			} else {
-				new_state |= DISPATCH_QUEUE_ENQUEUED;
-			}
-		});
-		if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) {
-			return _dispatch_try_lock_transfer_or_wakeup(dq);
-		}
-	} else if (bits) {
-		os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{
-			new_state = old_state | bits;
-			if (likely(_dq_state_should_wakeup(old_state))) {
-				new_state |= DISPATCH_QUEUE_ENQUEUED;
-			}
-		});
+	if ((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK) {
+		flags |= DISPATCH_WAKEUP_OVERRIDING;
 	} else {
-		os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed,{
-			new_state = old_state;
-			if (likely(_dq_state_should_wakeup(old_state))) {
-				new_state |= DISPATCH_QUEUE_ENQUEUED;
-			} else {
-				os_atomic_rmw_loop_give_up(break);
-			}
-		});
+		flags &= ~(dispatch_wakeup_flags_t)DISPATCH_WAKEUP_OVERRIDING;
+		qos = _dq_state_max_qos(new_state);
 	}
-
 	if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) {
-		return _dispatch_queue_class_wakeup_enqueue(dq, pp, flags, target);
+		return _dispatch_queue_class_wakeup_enqueue(dq, qos, flags, target);
 	}
 
 #if HAVE_PTHREAD_WORKQUEUE_QOS
-	if ((flags & DISPATCH_WAKEUP_OVERRIDING)
+	if ((flags & (DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_WAITER_HANDOFF))
 			&& target == DISPATCH_QUEUE_WAKEUP_TARGET) {
-		return _dispatch_queue_class_wakeup_with_override(dq, pp,
+		return _dispatch_queue_class_wakeup_with_override(dq, qos,
 				flags, new_state);
 	}
 #endif
@@ -5334,6 +5078,97 @@
 	}
 }
 
+DISPATCH_NOINLINE
+void
+_dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
+		dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target)
+{
+	uint64_t old_state, new_state;
+
+	qos = _dispatch_queue_override_qos(dq, qos);
+	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
+		new_state = _dq_state_merge_qos(old_state, qos);
+		if (likely(_dq_state_should_wakeup(old_state))) {
+			new_state |= DISPATCH_QUEUE_ENQUEUED;
+		}
+		if (flags & DISPATCH_WAKEUP_FLUSH) {
+			new_state |= DISPATCH_QUEUE_DIRTY;
+		} else if (new_state == old_state) {
+			os_atomic_rmw_loop_give_up(break);
+		}
+	});
+
+	return _dispatch_queue_class_wakeup_finish(dq, qos, flags, target,
+			old_state, new_state);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_queue_push_slow_waiter(dispatch_queue_t dq, dispatch_object_t dou)
+{
+	uint64_t pending_barrier_width =
+			(dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL;
+	uint64_t xor_owner_and_set_full_width_and_in_barrier =
+			_dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT |
+			DISPATCH_QUEUE_IN_BARRIER;
+	dispatch_qos_t qos = _dispatch_continuation_override_qos(dq, dou._dc);
+	uint64_t old_state, new_state;
+	dispatch_wakeup_flags_t flags = 0;
+
+	_dispatch_trace_continuation_push(dq, dou);
+	if (unlikely(_dispatch_queue_push_update_tail(dq, dou._do))) {
+		// for slow waiters, we borrow the reference of the caller
+		// so we don't need to protect the wakeup with a temporary retain
+		_dispatch_queue_push_update_head(dq, dou._do, true);
+		flags = DISPATCH_WAKEUP_FLUSH;
+		if (unlikely(_dispatch_queue_is_thread_bound(dq))) {
+			return dx_wakeup(dq, qos, flags);
+		}
+	}
+
+	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
+		new_state = _dq_state_merge_qos(old_state, qos);
+#ifdef DLOCK_NOWAITERS_BIT
+		new_state |= DLOCK_NOWAITERS_BIT;
+#else
+		new_state |= DLOCK_WAITERS_BIT;
+#endif
+		if (flags & DISPATCH_WAKEUP_FLUSH) {
+			new_state |= DISPATCH_QUEUE_DIRTY;
+		}
+		if (_dq_state_drain_pended(old_state)) {
+			// same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT
+			// but we want to be more efficient wrt the WAITERS_BIT
+			new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK;
+			new_state &= ~DISPATCH_QUEUE_DRAIN_PENDED;
+		}
+		if (unlikely(_dq_state_drain_locked(new_state))) {
+#ifdef DLOCK_NOWAITERS_BIT
+			new_state &= ~(uint64_t)DLOCK_NOWAITERS_BIT;
+#endif
+		} else if (unlikely(!_dq_state_is_runnable(new_state) ||
+				!(flags & DISPATCH_WAKEUP_FLUSH))) {
+			// either not runnable, or was not for the first item (26700358)
+			// so we should not try to lock and handle overrides instead
+		} else if (_dq_state_has_pending_barrier(old_state) ||
+				new_state + pending_barrier_width <
+				DISPATCH_QUEUE_WIDTH_FULL_BIT) {
+			// see _dispatch_queue_drain_try_lock
+			new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK;
+			new_state ^= xor_owner_and_set_full_width_and_in_barrier;
+		} else {
+			new_state |= DISPATCH_QUEUE_ENQUEUED;
+		}
+	});
+
+	if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) {
+		return _dispatch_try_lock_transfer_or_wakeup(dq);
+	}
+
+	return _dispatch_queue_class_wakeup_finish(dq, qos, flags,
+			DISPATCH_QUEUE_WAKEUP_TARGET, old_state, new_state);
+}
+
 #pragma mark -
 #pragma mark dispatch_root_queue_drain
 
@@ -5438,7 +5273,7 @@
 			goto out;
 		}
 		// There must be a next item now.
-		_dispatch_wait_until(next = head->do_next);
+		next = os_mpsc_get_next(head, do_next);
 	}
 
 	os_atomic_store2o(dq, dq_items_head, next, relaxed);
@@ -5449,30 +5284,30 @@
 
 void
 _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq,
-		struct dispatch_object_s *dou, pthread_priority_t pp)
+		struct dispatch_object_s *dou, dispatch_qos_t qos
+		DISPATCH_PERF_MON_ARGS_PROTO)
 {
-	struct _dispatch_identity_s di;
-
 	// fake that we queued `dou` on `dq` for introspection purposes
 	_dispatch_trace_continuation_push(dq, dou);
 
-	pp = _dispatch_priority_inherit_from_root_queue(pp, dq);
+	dispatch_priority_t old_pri, pri = _dispatch_priority_make(qos, 0);
+	pri = _dispatch_priority_inherit_from_root_queue(pri, dq);
 	_dispatch_queue_set_current(dq);
-	_dispatch_root_queue_identity_assume(&di, pp);
+	pthread_priority_t pp = _dispatch_priority_to_pp(pri);
+	old_pri = _dispatch_root_queue_identity_assume(pp, NULL);
 #if DISPATCH_COCOA_COMPAT
 	void *pool = _dispatch_last_resort_autorelease_pool_push();
 #endif // DISPATCH_COCOA_COMPAT
 
-	_dispatch_perfmon_start();
 	_dispatch_continuation_pop_inline(dou, dq,
 			DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN);
-	_dispatch_perfmon_workitem_inc();
-	_dispatch_perfmon_end();
+	// event thread that could steal
+	_dispatch_perfmon_end(perfmon_thread_event_steal);
 
 #if DISPATCH_COCOA_COMPAT
 	_dispatch_last_resort_autorelease_pool_pop(pool);
 #endif // DISPATCH_COCOA_COMPAT
-	_dispatch_reset_defaultpriority(di.old_pp);
+	_dispatch_reset_basepri(old_pri);
 	_dispatch_queue_set_current(NULL);
 
 	_dispatch_voucher_debug("root queue clear", NULL);
@@ -5481,7 +5316,7 @@
 
 DISPATCH_NOT_TAIL_CALLED // prevent tailcall (for Instrument DTrace probe)
 static void
-_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pri)
+_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pp)
 {
 #if DISPATCH_DEBUG
 	dispatch_queue_t cq;
@@ -5490,28 +5325,34 @@
 	}
 #endif
 	_dispatch_queue_set_current(dq);
-	if (dq->dq_priority) pri = dq->dq_priority;
-	pthread_priority_t old_dp = _dispatch_set_defaultpriority(pri, NULL);
+	dispatch_priority_t pri = dq->dq_priority;
+	if (!pri) pri = _dispatch_priority_from_pp(pp);
+	dispatch_priority_t old_dbp = _dispatch_set_basepri(pri);
 #if DISPATCH_COCOA_COMPAT
 	void *pool = _dispatch_last_resort_autorelease_pool_push();
 #endif // DISPATCH_COCOA_COMPAT
 
-	_dispatch_perfmon_start();
 	struct dispatch_object_s *item;
 	bool reset = false;
+	_dispatch_perfmon_start();
 	while ((item = fastpath(_dispatch_root_queue_drain_one(dq)))) {
 		if (reset) _dispatch_wqthread_override_reset();
 		_dispatch_continuation_pop_inline(item, dq,
 				DISPATCH_INVOKE_WORKER_DRAIN|DISPATCH_INVOKE_REDIRECTING_DRAIN);
-		_dispatch_perfmon_workitem_inc();
-		reset = _dispatch_reset_defaultpriority_override();
+		reset = _dispatch_reset_basepri_override();
 	}
-	_dispatch_perfmon_end();
+
+	// overcommit or not. worker thread
+	if (pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
+		_dispatch_perfmon_end(perfmon_thread_worker_oc);
+	} else {
+		_dispatch_perfmon_end(perfmon_thread_worker_non_oc);
+	}
 
 #if DISPATCH_COCOA_COMPAT
 	_dispatch_last_resort_autorelease_pool_pop(pool);
 #endif // DISPATCH_COCOA_COMPAT
-	_dispatch_reset_defaultpriority(old_dp);
+	_dispatch_reset_basepri(old_dbp);
 	_dispatch_queue_set_current(NULL);
 }
 
@@ -5541,7 +5382,7 @@
 	dispatch_queue_t dq;
 	pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK;
 	_dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp);
-	dq = _dispatch_get_root_queue_for_priority(pp, overcommit);
+	dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit);
 	return _dispatch_worker_thread4(dq);
 }
 #endif // HAVE_PTHREAD_WORKQUEUE_QOS
@@ -5653,7 +5494,7 @@
 	dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD;
 	dq = _dispatch_alloc(DISPATCH_VTABLE(queue_runloop), dqs);
 	_dispatch_queue_init(dq, DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, 1, false);
-	dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,true);
+	dq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
 	dq->dq_label = label ? label : "runloop-queue"; // no-copy contract
 	_dispatch_runloop_queue_handle_init(dq);
 	_dispatch_queue_set_bound_thread(dq);
@@ -5666,10 +5507,10 @@
 {
 	_dispatch_object_debug(dq, "%s", __func__);
 
-	pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq, true);
+	dispatch_qos_t qos = _dispatch_queue_reset_max_qos(dq);
 	_dispatch_queue_clear_bound_thread(dq);
-	dx_wakeup(dq, pp, DISPATCH_WAKEUP_FLUSH);
-	if (pp) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq);
+	dx_wakeup(dq, qos, DISPATCH_WAKEUP_FLUSH);
+	if (qos) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq);
 }
 
 void
@@ -5828,12 +5669,7 @@
 
 void
 _dispatch_main_queue_callback_4CF(
-#if TARGET_OS_MAC
-		mach_msg_header_t *_Null_unspecified msg
-#else
-		void *ignored
-#endif
-		DISPATCH_UNUSED)
+		void *ignored DISPATCH_UNUSED)
 {
 	if (main_q_is_draining) {
 		return;
@@ -5901,52 +5737,25 @@
 _dispatch_queue_cleanup2(void)
 {
 	dispatch_queue_t dq = &_dispatch_main_q;
-	_dispatch_queue_clear_bound_thread(dq);
+	uint64_t old_state, new_state;
 
-	// <rdar://problem/22623242>
-	// Here is what happens when both this cleanup happens because of
-	// dispatch_main() being called, and a concurrent enqueuer makes the queue
-	// non empty.
+	// Turning the main queue from a runloop queue into an ordinary serial queue
+	// is a 3 steps operation:
+	// 1. finish taking the main queue lock the usual way
+	// 2. clear the THREAD_BOUND flag
+	// 3. do a handoff
 	//
-	// _dispatch_queue_cleanup2:
-	//     atomic_and(dq_is_thread_bound, ~DQF_THREAD_BOUND, relaxed);
-	//     maximal_barrier();
-	//     if (load(dq_items_tail, seq_cst)) {
-	//         // do the wake up the normal serial queue way
-	//     } else {
-	//         // do no wake up  <----
-	//     }
-	//
-	// enqueuer:
-	//     store(dq_items_tail, new_tail, release);
-	//     if (load(dq_is_thread_bound, relaxed)) {
-	//         // do the wake up the runloop way <----
-	//     } else {
-	//         // do the wake up the normal serial way
-	//     }
-	//
-	// what would be bad is to take both paths marked <---- because the queue
-	// wouldn't be woken up until the next time it's used (which may never
-	// happen)
-	//
-	// An enqueuer that speculates the load of the old value of thread_bound
-	// and then does the store may wake up the main queue the runloop way.
-	// But then, the cleanup thread will see that store because the load
-	// of dq_items_tail is sequentially consistent, and we have just thrown away
-	// our pipeline.
-	//
-	// By the time cleanup2() is out of the maximally synchronizing barrier,
-	// no other thread can speculate the wrong load anymore, and both cleanup2()
-	// and a concurrent enqueuer would treat the queue in the standard non
-	// thread bound way
-
-	_dispatch_queue_atomic_flags_clear(dq,
-			DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC);
-	os_atomic_maximally_synchronizing_barrier();
-	// no need to drop the override, the thread will die anyway
-	// the barrier above includes an acquire, so it's ok to do this raw
-	// call to dx_wakeup(0)
-	dx_wakeup(dq, 0, 0);
+	// If an enqueuer executes concurrently, he may do the wakeup the runloop
+	// way, because he still believes the queue to be thread-bound, but the
+	// dirty bit will force this codepath to notice the enqueue, and the usual
+	// lock transfer will do the proper wakeup.
+	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
+		new_state = old_state & ~DISPATCH_QUEUE_DIRTY;
+		new_state += DISPATCH_QUEUE_WIDTH_INTERVAL;
+		new_state += DISPATCH_QUEUE_IN_BARRIER;
+	});
+	_dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND|DQF_CANNOT_TRYSYNC);
+	_dispatch_try_lock_transfer_or_wakeup(dq);
 
 	// overload the "probably" variable to mean that dispatch_main() or
 	// similar non-POSIX API was called
@@ -5955,7 +5764,7 @@
 #ifndef __linux__
 	if (_dispatch_program_is_probably_callback_driven) {
 		_dispatch_barrier_async_detached_f(_dispatch_get_root_queue(
-				_DISPATCH_QOS_CLASS_DEFAULT, true), NULL, _dispatch_sig_thread);
+				DISPATCH_QOS_DEFAULT, true), NULL, _dispatch_sig_thread);
 		sleep(1); // workaround 6778970
 	}
 #endif
@@ -5978,6 +5787,7 @@
 			"Premature thread exit while a dispatch queue is running");
 }
 
+DISPATCH_NORETURN
 static void
 _dispatch_deferred_items_cleanup(void *ctxt)
 {
@@ -5986,6 +5796,7 @@
 			"Premature thread exit with unhandled deferred items");
 }
 
+DISPATCH_NORETURN
 static void
 _dispatch_frame_cleanup(void *ctxt)
 {
@@ -5994,6 +5805,7 @@
 			"Premature thread exit while a dispatch frame is active");
 }
 
+DISPATCH_NORETURN
 static void
 _dispatch_context_cleanup(void *ctxt)
 {
diff --git a/src/queue_internal.h b/src/queue_internal.h
index 1bff7b0..a499c51 100644
--- a/src/queue_internal.h
+++ b/src/queue_internal.h
@@ -49,16 +49,17 @@
 #pragma mark dispatch_queue_t
 
 DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
-	DQF_NONE				= 0x0000,
-	DQF_AUTORELEASE_ALWAYS	= 0x0001,
-	DQF_AUTORELEASE_NEVER	= 0x0002,
-#define _DQF_AUTORELEASE_MASK 0x0003
-	DQF_THREAD_BOUND		= 0x0004, // queue is bound to a thread
-	DQF_BARRIER_BIT			= 0x0008, // queue is a barrier on its target
-	DQF_TARGETED			= 0x0010, // queue is targeted by another object
-	DQF_LABEL_NEEDS_FREE	= 0x0020, // queue label was strduped; need to free it
-	DQF_CANNOT_TRYSYNC		= 0x0040,
-	DQF_RELEASED			= 0x0080, // xref_cnt == -1
+	DQF_NONE                = 0x00000000,
+	DQF_AUTORELEASE_ALWAYS  = 0x00010000,
+	DQF_AUTORELEASE_NEVER   = 0x00020000,
+#define _DQF_AUTORELEASE_MASK 0x00030000
+	DQF_THREAD_BOUND        = 0x00040000, // queue is bound to a thread
+	DQF_BARRIER_BIT         = 0x00080000, // queue is a barrier on its target
+	DQF_TARGETED            = 0x00100000, // queue is targeted by another object
+	DQF_LABEL_NEEDS_FREE    = 0x00200000, // queue label was strduped; need to free it
+	DQF_CANNOT_TRYSYNC      = 0x00400000,
+	DQF_RELEASED            = 0x00800000, // xref_cnt == -1
+	DQF_LEGACY              = 0x01000000,
 
 	// only applies to sources
 	//
@@ -77,66 +78,60 @@
 	//    will be -p-.
 	//
 	// -pd
-	//    Received EV_DELETE (from ap-), needs to free `ds_dkev`, the knote is
-	//    gone from the kernel, but ds_dkev lives. Next state will be --d.
+	//    Received EV_DELETE (from ap-), needs to unregister ds_refs, the muxnote
+	//    is gone from the kernel. Next state will be --d.
 	//
 	// -p-
 	//    Received an EV_ONESHOT event (from a--), or the delivery of an event
 	//    causing the cancellation to fail with EINPROGRESS was delivered
-	//    (from ap-). The knote still lives, next state will be --d.
+	//    (from ap-). The muxnote still lives, next state will be --d.
 	//
 	// --d
-	//    Final state of the source, the knote is gone from the kernel and
-	//    ds_dkev is freed. The source can safely be released.
+	//    Final state of the source, the muxnote is gone from the kernel and
+	//    ds_refs is unregistered. The source can safely be released.
 	//
 	// a-d (INVALID)
 	// apd (INVALID)
 	//    Setting DSF_DELETED should also always atomically clear DSF_ARMED. If
-	//    the knote is gone from the kernel, it makes no sense whatsoever to
+	//    the muxnote is gone from the kernel, it makes no sense whatsoever to
 	//    have it armed. And generally speaking, once `d` or `p` has been set,
 	//    `a` cannot do a cleared -> set transition anymore
 	//    (see _dispatch_source_try_set_armed).
 	//
-	DSF_CANCEL_WAITER		= 0x0800, // synchronous waiters for cancel
-	DSF_CANCELED			= 0x1000, // cancellation has been requested
-	DSF_ARMED				= 0x2000, // source is armed
-	DSF_DEFERRED_DELETE		= 0x4000, // source is pending delete
-	DSF_DELETED				= 0x8000, // source knote is deleted
+	DSF_CANCEL_WAITER       = 0x08000000, // synchronous waiters for cancel
+	DSF_CANCELED            = 0x10000000, // cancellation has been requested
+	DSF_ARMED               = 0x20000000, // source is armed
+	DSF_DEFERRED_DELETE     = 0x40000000, // source is pending delete
+	DSF_DELETED             = 0x80000000, // source muxnote is deleted
 #define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED)
 
-	DQF_WIDTH_MASK			= 0xffff0000,
-#define DQF_WIDTH_SHIFT		16
+#define DQF_FLAGS_MASK        ((dispatch_queue_flags_t)0xffff0000)
+#define DQF_WIDTH_MASK        ((dispatch_queue_flags_t)0x0000ffff)
+#define DQF_WIDTH(n)          ((dispatch_queue_flags_t)(uint16_t)(n))
 );
 
 #define _DISPATCH_QUEUE_HEADER(x) \
 	struct os_mpsc_queue_s _as_oq[0]; \
 	DISPATCH_OBJECT_HEADER(x); \
 	_OS_MPSC_QUEUE_FIELDS(dq, dq_state); \
-	dispatch_queue_t dq_specific_q; \
-	union {	\
-		uint32_t volatile dq_atomic_flags; \
-		DISPATCH_STRUCT_LITTLE_ENDIAN_2( \
-			uint16_t dq_atomic_bits, \
-			uint16_t dq_width \
-		); \
-	}; \
+	DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \
+		const uint16_t dq_width, \
+		const uint16_t __dq_unused \
+	); \
 	uint32_t dq_side_suspend_cnt; \
-	DISPATCH_INTROSPECTION_QUEUE_HEADER; \
-	dispatch_unfair_lock_s dq_sidelock
-	/* LP64: 32bit hole on LP64 */
+	dispatch_unfair_lock_s dq_sidelock; \
+	union { \
+		dispatch_queue_t dq_specific_q; \
+		struct dispatch_source_refs_s *ds_refs; \
+		struct dispatch_timer_source_refs_s *ds_timer_refs; \
+		struct dispatch_mach_recv_refs_s *dm_recv_refs; \
+	}; \
+	DISPATCH_INTROSPECTION_QUEUE_HEADER
 
 #define DISPATCH_QUEUE_HEADER(x) \
 	struct dispatch_queue_s _as_dq[0]; \
 	_DISPATCH_QUEUE_HEADER(x)
 
-#define DISPATCH_QUEUE_ALIGN  __attribute__((aligned(8)))
-
-#define DISPATCH_QUEUE_WIDTH_POOL 0x7fff
-#define DISPATCH_QUEUE_WIDTH_MAX  0x7ffe
-#define DISPATCH_QUEUE_USES_REDIRECTION(width) \
-		({ uint16_t _width = (width); \
-		_width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
-
 #define DISPATCH_QUEUE_CACHELINE_PADDING \
 		char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
 #ifdef __LP64__
@@ -206,10 +201,15 @@
  *    the full width of the queue is used or reserved (depending on the context)
  *    In other words that the queue has reached or overflown its capacity.
  */
-#define DISPATCH_QUEUE_WIDTH_FULL_BIT			0x0010000000000000ull
-#define DISPATCH_QUEUE_WIDTH_FULL				0x8000ull
+#define DISPATCH_QUEUE_WIDTH_FULL_BIT		0x0010000000000000ull
+#define DISPATCH_QUEUE_WIDTH_FULL			0x1000ull
+#define DISPATCH_QUEUE_WIDTH_POOL (DISPATCH_QUEUE_WIDTH_FULL - 1)
+#define DISPATCH_QUEUE_WIDTH_MAX  (DISPATCH_QUEUE_WIDTH_FULL - 2)
+#define DISPATCH_QUEUE_USES_REDIRECTION(width) \
+		({ uint16_t _width = (width); \
+		_width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
 /*
- * w:  width (bits 51 - 37)
+ * w:  width (bits 51 - 40)
  *    This encodes how many work items are in flight. Barriers hold `dq_width`
  *    of them while they run. This is encoded as a signed offset with respect,
  *    to full use, where the negative values represent how many available slots
@@ -218,19 +218,29 @@
  *
  *    When this value is positive, then `wo` is always set to 1.
  */
-#define DISPATCH_QUEUE_WIDTH_INTERVAL		0x0000002000000000ull
-#define DISPATCH_QUEUE_WIDTH_MASK			0x001fffe000000000ull
-#define DISPATCH_QUEUE_WIDTH_SHIFT			37
+#define DISPATCH_QUEUE_WIDTH_INTERVAL		0x0000010000000000ull
+#define DISPATCH_QUEUE_WIDTH_MASK			0x001fff0000000000ull
+#define DISPATCH_QUEUE_WIDTH_SHIFT			40
 /*
- * pb: pending barrier (bit 36)
+ * pb: pending barrier (bit 39)
  *    Drainers set this bit when they couldn't run the next work item and it is
  *    a barrier. When this bit is set, `dq_width - 1` work item slots are
  *    reserved so that no wakeup happens until the last work item in flight
  *    completes.
  */
-#define DISPATCH_QUEUE_PENDING_BARRIER		0x0000001000000000ull
+#define DISPATCH_QUEUE_PENDING_BARRIER		0x0000008000000000ull
 /*
- * d: dirty bit (bit 35)
+ * p: pended bit (bit 38)
+ *    Set when a drain lock has been pended. When this bit is set,
+ *    the drain lock is taken and ENQUEUED is never set.
+ *
+ *    This bit marks a queue that needs further processing but was kept pended
+ *    by an async drainer (not reenqueued) in the hope of being able to drain
+ *    it further later.
+ */
+#define DISPATCH_QUEUE_DRAIN_PENDED			0x0000004000000000ull
+/*
+ * d: dirty bit (bit 37)
  *    This bit is set when a queue transitions from empty to not empty.
  *    This bit is set before dq_items_head is set, with appropriate barriers.
  *    Any thread looking at a queue head is responsible for unblocking any
@@ -342,50 +352,31 @@
  *
  *    So on the async "acquire" side, there is no subtlety at all.
  */
-#define DISPATCH_QUEUE_DIRTY				0x0000000800000000ull
+#define DISPATCH_QUEUE_DIRTY				0x0000002000000000ull
 /*
- * qo: (bit 34)
- *    Set when a queue has a useful override set.
- *    This bit is only cleared when the final drain_try_unlock() succeeds.
- *
- *    When the queue dq_override is touched (overrides or-ed in), usually with
- *    _dispatch_queue_override_priority(), then the HAS_OVERRIDE bit is set
- *    with a release barrier and one of these three things happen next:
- *
- *    - the queue is enqueued, which will cause it to be drained, and the
- *      override to be handled by _dispatch_queue_drain_try_unlock().
- *      In rare cases it could cause the queue to be queued while empty though.
- *
- *    - the DIRTY bit is also set with a release barrier, which pairs with
- *      the handling of these bits by _dispatch_queue_drain_try_unlock(),
- *      so that dq_override is reset properly.
- *
- *    - the queue was suspended, and _dispatch_queue_resume() will handle the
- *      override as part of its wakeup sequence.
- */
-#define DISPATCH_QUEUE_HAS_OVERRIDE			0x0000000400000000ull
-/*
- * p: pended bit (bit 33)
- *    Set when a drain lock has been pended. When this bit is set,
- *    the drain lock is taken and ENQUEUED is never set.
- *
- *    This bit marks a queue that needs further processing but was kept pended
- *    by an async drainer (not reenqueued) in the hope of being able to drain
- *    it further later.
- */
-#define DISPATCH_QUEUE_DRAIN_PENDED			0x0000000200000000ull
-/*
- * e: enqueued bit (bit 32)
+ * e: enqueued bit (bit 36)
  *    Set when a queue is enqueued on its target queue
  */
-#define DISPATCH_QUEUE_ENQUEUED				0x0000000100000000ull
+#define DISPATCH_QUEUE_ENQUEUED				0x0000001000000000ull
+/*
+ * o: has override (bits 34)
+ *    Set when a queue has received a QOS override and needs to reset it.
+ *    This bit is only cleared when the final drain_try_unlock() succeeds.
+ */
+#define DISPATCH_QUEUE_RECEIVED_OVERRIDE	0x0000000800000000ull
+/*
+ * max_qos: max qos (bits 34 - 32)
+ *   This is the maximum qos that has been enqueued on the queue
+ */
+#define DISPATCH_QUEUE_MAX_QOS_MASK			0x0000000700000000ull
+#define DISPATCH_QUEUE_MAX_QOS_SHIFT		32
 /*
  * dl: drain lock (bits 31-0)
  *    This is used by the normal drain to drain exlusively relative to other
  *    drain stealers (like the QoS Override codepath). It holds the identity
  *    (thread port) of the current drainer.
  */
-#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK	0x00000002ffffffffull
+#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK	(DISPATCH_QUEUE_DRAIN_PENDED | ~0u)
 #ifdef DLOCK_NOWAITERS_BIT
 #define DISPATCH_QUEUE_DRAIN_OWNER_MASK \
 		((uint64_t)(DLOCK_OWNER_MASK | DLOCK_NOFAILED_TRYLOCK_BIT))
@@ -393,7 +384,7 @@
 		(((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))\
 				^ DLOCK_NOWAITERS_BIT)
 #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
-		(DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \
+		(DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_MAX_QOS_MASK | \
 				DLOCK_NOWAITERS_BIT)
 #else
 #define DISPATCH_QUEUE_DRAIN_OWNER_MASK \
@@ -401,7 +392,7 @@
 #define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \
 		((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))
 #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
-		(DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \
+		(DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_MAX_QOS_MASK | \
 				DLOCK_WAITERS_BIT)
 #endif
 /*
@@ -497,12 +488,12 @@
 		(DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL)
 
 DISPATCH_CLASS_DECL(queue);
-#if !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
+#if !defined(__cplusplus) || !DISPATCH_INTROSPECTION
 struct dispatch_queue_s {
 	_DISPATCH_QUEUE_HEADER(queue);
 	DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only
-} DISPATCH_QUEUE_ALIGN;
-#endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
+} DISPATCH_ATOMIC64_ALIGN;
+#endif // !defined(__cplusplus) || !DISPATCH_INTROSPECTION
 
 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial, queue);
 DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent, queue);
@@ -520,14 +511,12 @@
 	struct dispatch_source_s *_ds;
 	struct dispatch_mach_s *_dm;
 	struct dispatch_queue_specific_queue_s *_dqsq;
-	struct dispatch_timer_aggregate_s *_dta;
 #if USE_OBJC
 	os_mpsc_queue_t _ojbc_oq;
 	dispatch_queue_t _objc_dq;
 	dispatch_source_t _objc_ds;
 	dispatch_mach_t _objc_dm;
 	dispatch_queue_specific_queue_t _objc_dqsq;
-	dispatch_timer_aggregate_t _objc_dta;
 #endif
 } dispatch_queue_class_t __attribute__((__transparent_union__));
 
@@ -555,9 +544,11 @@
 	DISPATCH_QUEUE_WAKEUP_MGR,
 );
 
+void _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq,
+		dispatch_qos_t qos, dispatch_wakeup_flags_t flags, uint64_t dq_state);
 void _dispatch_queue_class_override_drainer(dispatch_queue_t dqu,
-		pthread_priority_t pp, dispatch_wakeup_flags_t flags);
-void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, pthread_priority_t pp,
+		dispatch_qos_t qos, dispatch_wakeup_flags_t flags);
+void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target);
 
 void _dispatch_queue_destroy(dispatch_queue_t dq);
@@ -569,9 +560,9 @@
 void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags);
 void _dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n);
 void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
-		pthread_priority_t pp);
+		dispatch_qos_t qos);
 void _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq);
-void _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+void _dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags);
 dispatch_queue_t _dispatch_queue_serial_drain(dispatch_queue_t dq,
 		dispatch_invoke_flags_t flags, uint64_t *owned,
@@ -581,14 +572,15 @@
 		struct dispatch_object_s *dc);
 void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
 		dqsq);
-void _dispatch_root_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+void _dispatch_root_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags);
 void _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq,
-		struct dispatch_object_s *dou, pthread_priority_t pp);
+		struct dispatch_object_s *dou, dispatch_qos_t qos
+		DISPATCH_PERF_MON_ARGS_PROTO);
 void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq);
-void _dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+void _dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags);
-void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags);
 void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq);
 void _dispatch_runloop_queue_dispose(dispatch_queue_t dq);
@@ -622,10 +614,9 @@
 size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf,
 		size_t bufsiz);
 
-#define DISPATCH_QUEUE_QOS_COUNT 6
-#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2)
+#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_MAX * 2)
 
-// must be in lowest to highest qos order (as encoded in pthread_priority_t)
+// must be in lowest to highest qos order (as encoded in dispatch_qos_t)
 // overcommit qos index values need bit 1 set
 enum {
 	DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0,
@@ -648,13 +639,13 @@
 extern struct dispatch_queue_s _dispatch_mgr_q;
 void _dispatch_root_queues_init(void);
 
-#if HAVE_PTHREAD_WORKQUEUE_QOS
-extern pthread_priority_t _dispatch_background_priority;
-extern pthread_priority_t _dispatch_user_initiated_priority;
+#if DISPATCH_DEBUG
+#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \
+       dispatch_assert_queue(&_dispatch_mgr_q)
+#else
+#define DISPATCH_ASSERT_ON_MANAGER_QUEUE()
 #endif
 
-typedef uint8_t _dispatch_qos_class_t;
-
 #pragma mark -
 #pragma mark dispatch_queue_attr_t
 
@@ -667,8 +658,7 @@
 DISPATCH_CLASS_DECL(queue_attr);
 struct dispatch_queue_attr_s {
 	OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr);
-	_dispatch_qos_class_t dqa_qos_class;
-	int8_t   dqa_relative_priority;
+	dispatch_priority_requested_t dqa_qos_and_relpri;
 	uint16_t dqa_overcommit:2;
 	uint16_t dqa_autorelease_frequency:2;
 	uint16_t dqa_concurrent:1;
@@ -920,7 +910,13 @@
 		mach_voucher_t kv);
 voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t pri,
 		voucher_t voucher, _dispatch_thread_set_self_t flags);
-
+#else
+static inline void
+_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri,
+		mach_voucher_t kv)
+{
+	(void)pri; (void)kv;
+}
 #endif
 #pragma mark -
 #pragma mark dispatch_apply_t
diff --git a/src/semaphore.c b/src/semaphore.c
index 4d232b7..fa6d21a 100644
--- a/src/semaphore.c
+++ b/src/semaphore.c
@@ -20,53 +20,6 @@
 
 #include "internal.h"
 
-// semaphores are too fundamental to use the dispatch_assume*() macros
-#if USE_WIN32_SEM
-// rdar://problem/8428132
-static DWORD best_resolution = 1; // 1ms
-
-DWORD
-_push_timer_resolution(DWORD ms)
-{
-	MMRESULT res;
-	static dispatch_once_t once;
-
-	if (ms > 16) {
-		// only update timer resolution if smaller than default 15.6ms
-		// zero means not updated
-		return 0;
-	}
-
-	// aim for the best resolution we can accomplish
-	dispatch_once(&once, ^{
-		TIMECAPS tc;
-		MMRESULT res;
-		res = timeGetDevCaps(&tc, sizeof(tc));
-		if (res == MMSYSERR_NOERROR) {
-			best_resolution = min(max(tc.wPeriodMin, best_resolution),
-					tc.wPeriodMax);
-		}
-	});
-
-	res = timeBeginPeriod(best_resolution);
-	if (res == TIMERR_NOERROR) {
-		return best_resolution;
-	}
-	// zero means not updated
-	return 0;
-}
-
-// match ms parameter to result from _push_timer_resolution
-void
-_pop_timer_resolution(DWORD ms)
-{
-	if (ms) {
-		timeEndPeriod(ms);
-	}
-}
-#endif	/* USE_WIN32_SEM */
-
-
 DISPATCH_WEAK // rdar://problem/8503746
 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema);
 
@@ -79,36 +32,9 @@
 	struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr;
 
 	dsema->do_next = DISPATCH_OBJECT_LISTLESS;
-	dsema->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-			false);
+	dsema->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false);
 	dsema->dsema_value = value;
-#if USE_POSIX_SEM
-	int ret = sem_init(&dsema->dsema_sem, 0, 0);
-	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-#endif
-}
-
-static void
-_dispatch_semaphore_class_dispose(dispatch_semaphore_class_t dsemau)
-{
-	struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr;
-
-#if USE_MACH_SEM
-	kern_return_t kr;
-	if (dsema->dsema_port) {
-		kr = semaphore_destroy(mach_task_self(), dsema->dsema_port);
-		DISPATCH_VERIFY_MIG(kr);
-		DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-	}
-	dsema->dsema_port = MACH_PORT_DEAD;
-#elif USE_POSIX_SEM
-	int ret = sem_destroy(&dsema->dsema_sem);
-	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-#elif USE_WIN32_SEM
-	if (dsema->dsema_handle) {
-		CloseHandle(dsema->dsema_handle);
-	}
-#endif
+	_dispatch_sema4_init(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
 }
 
 #pragma mark -
@@ -133,59 +59,6 @@
 	return dsema;
 }
 
-#if USE_MACH_SEM
-static void
-_dispatch_semaphore_create_port(semaphore_t *s4)
-{
-	kern_return_t kr;
-	semaphore_t tmp;
-
-	if (*s4) {
-		return;
-	}
-	_dispatch_fork_becomes_unsafe();
-
-	// lazily allocate the semaphore port
-
-	// Someday:
-	// 1) Switch to a doubly-linked FIFO in user-space.
-	// 2) User-space timers for the timeout.
-	// 3) Use the per-thread semaphore port.
-
-	while ((kr = semaphore_create(mach_task_self(), &tmp,
-			SYNC_POLICY_FIFO, 0))) {
-		DISPATCH_VERIFY_MIG(kr);
-		_dispatch_temporary_resource_shortage();
-	}
-
-	if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) {
-		kr = semaphore_destroy(mach_task_self(), tmp);
-		DISPATCH_VERIFY_MIG(kr);
-		DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-	}
-}
-#elif USE_WIN32_SEM
-static void
-_dispatch_semaphore_create_handle(HANDLE *s4)
-{
-	HANDLE tmp;
-
-	if (*s4) {
-		return;
-	}
-
-	// lazily allocate the semaphore port
-
-	while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) {
-		_dispatch_temporary_resource_shortage();
-	}
-
-	if (!os_atomic_cmpxchg(s4, 0, tmp)) {
-		CloseHandle(tmp);
-	}
-}
-#endif
-
 void
 _dispatch_semaphore_dispose(dispatch_object_t dou)
 {
@@ -196,7 +69,7 @@
 				"Semaphore object deallocated while in use");
 	}
 
-	_dispatch_semaphore_class_dispose(dsema);
+	_dispatch_sema4_dispose(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
 }
 
 size_t
@@ -210,7 +83,7 @@
 	offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset);
 #if USE_MACH_SEM
 	offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ",
-			dsema->dsema_port);
+			dsema->dsema_sema);
 #endif
 	offset += dsnprintf(&buf[offset], bufsiz - offset,
 			"value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig);
@@ -221,18 +94,8 @@
 long
 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
 {
-#if USE_MACH_SEM
-	_dispatch_semaphore_create_port(&dsema->dsema_port);
-	kern_return_t kr = semaphore_signal(dsema->dsema_port);
-	DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-#elif USE_POSIX_SEM
-	int ret = sem_post(&dsema->dsema_sem);
-	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-#elif USE_WIN32_SEM
-	_dispatch_semaphore_create_handle(&dsema->dsema_handle);
-	int ret = ReleaseSemaphore(dsema->dsema_handle, 1, NULL);
-	dispatch_assume(ret);
-#endif
+	_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
+	_dispatch_sema4_signal(&dsema->dsema_sema, 1);
 	return 1;
 }
 
@@ -257,61 +120,12 @@
 {
 	long orig;
 
-#if USE_MACH_SEM
-	mach_timespec_t _timeout;
-	kern_return_t kr;
-#elif USE_POSIX_SEM
-	struct timespec _timeout;
-	int ret;
-#elif USE_WIN32_SEM
-	uint64_t nsec;
-	DWORD msec;
-	DWORD resolution;
-	DWORD wait_result;
-#endif
-
-#if USE_MACH_SEM
-	_dispatch_semaphore_create_port(&dsema->dsema_port);
-#elif USE_WIN32_SEM
-	_dispatch_semaphore_create_handle(&dsema->dsema_handle);
-#endif
-
+	_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
 	switch (timeout) {
 	default:
-#if USE_MACH_SEM
-		do {
-			uint64_t nsec = _dispatch_timeout(timeout);
-			_timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
-			_timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
-			kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout));
-		} while (kr == KERN_ABORTED);
-
-		if (kr != KERN_OPERATION_TIMED_OUT) {
-			DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+		if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) {
 			break;
 		}
-#elif USE_POSIX_SEM
-		do {
-			uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout);
-			_timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
-			_timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
-			ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout));
-		} while (ret == -1 && errno == EINTR);
-
-		if (!(ret == -1 && errno == ETIMEDOUT)) {
-			DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-			break;
-		}
-#elif USE_WIN32_SEM
-		nsec = _dispatch_timeout(timeout);
-		msec = (DWORD)(nsec / (uint64_t)1000000);
-		resolution = _push_timer_resolution(msec);
-		wait_result = WaitForSingleObject(dsema->dsema_handle, msec);
-		_pop_timer_resolution(resolution);
-		if (wait_result != WAIT_TIMEOUT) {
-			break;
-		}
-#endif
 		// Fall through and try to undo what the fast path did to
 		// dsema->dsema_value
 	case DISPATCH_TIME_NOW:
@@ -319,30 +133,13 @@
 		while (orig < 0) {
 			if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1,
 					&orig, relaxed)) {
-#if USE_MACH_SEM
-				return KERN_OPERATION_TIMED_OUT;
-#elif USE_POSIX_SEM || USE_WIN32_SEM
-				errno = ETIMEDOUT;
-				return -1;
-#endif
+				return _DSEMA4_TIMEOUT();
 			}
 		}
 		// Another thread called semaphore_signal().
 		// Fall through and drain the wakeup.
 	case DISPATCH_TIME_FOREVER:
-#if USE_MACH_SEM
-		do {
-			kr = semaphore_wait(dsema->dsema_port);
-		} while (kr == KERN_ABORTED);
-		DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-#elif USE_POSIX_SEM
-		do {
-			ret = sem_wait(&dsema->dsema_sem);
-		} while (ret != 0);
-		DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-#elif USE_WIN32_SEM
-		WaitForSingleObject(dsema->dsema_handle, INFINITE);
-#endif
+		_dispatch_sema4_wait(&dsema->dsema_sema);
 		break;
 	}
 	return 0;
@@ -416,25 +213,8 @@
 	rval = (long)os_atomic_xchg2o(dg, dg_waiters, 0, relaxed);
 	if (rval) {
 		// wake group waiters
-#if USE_MACH_SEM
-		_dispatch_semaphore_create_port(&dg->dg_port);
-		do {
-			kern_return_t kr = semaphore_signal(dg->dg_port);
-			DISPATCH_GROUP_VERIFY_KR(kr);
-		} while (--rval);
-#elif USE_POSIX_SEM
-		do {
-			int ret = sem_post(&dg->dg_sem);
-			DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-		} while (--rval);
-#elif USE_WIN32_SEM
-		_dispatch_semaphore_create_handle(&dg->dg_handle);
-		int ret;
-		ret = ReleaseSemaphore(dg->dg_handle, rval, NULL);
-		dispatch_assume(ret);
-#else
-#error "No supported semaphore type"
-#endif
+		_dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO);
+		_dispatch_sema4_signal(&dg->dg_sema, rval);
 	}
 	if (head) {
 		// async group notify blocks
@@ -475,7 +255,7 @@
 				"Group object deallocated while in use");
 	}
 
-	_dispatch_semaphore_class_dispose(dg);
+	_dispatch_sema4_dispose(&dg->dg_sema, _DSEMA4_POLICY_FIFO);
 }
 
 size_t
@@ -489,7 +269,7 @@
 	offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset);
 #if USE_MACH_SEM
 	offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ",
-			dg->dg_port);
+			dg->dg_sema);
 #endif
 	offset += dsnprintf(&buf[offset], bufsiz - offset,
 			"count = %ld, waiters = %d }", dg->dg_value, dg->dg_waiters);
@@ -503,19 +283,6 @@
 	long value;
 	int orig_waiters;
 
-#if USE_MACH_SEM
-	mach_timespec_t _timeout;
-	kern_return_t kr;
-#elif USE_POSIX_SEM // KVV
-	struct timespec _timeout;
-	int ret;
-#elif USE_WIN32_SEM // KVV
-	uint64_t nsec;
-	DWORD msec;
-	DWORD resolution;
-	DWORD wait_result;
-#endif
-
 	// check before we cause another signal to be sent by incrementing
 	// dg->dg_waiters
 	value = os_atomic_load2o(dg, dg_value, ordered); // 19296565
@@ -533,48 +300,12 @@
 		timeout = DISPATCH_TIME_FOREVER;
 	}
 
-#if USE_MACH_SEM
-	_dispatch_semaphore_create_port(&dg->dg_port);
-#elif USE_WIN32_SEM
-	_dispatch_semaphore_create_handle(&dg->dg_handle);
-#endif
-
+	_dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO);
 	switch (timeout) {
 	default:
-#if USE_MACH_SEM
-		do {
-			uint64_t nsec = _dispatch_timeout(timeout);
-			_timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
-			_timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
-			kr = slowpath(semaphore_timedwait(dg->dg_port, _timeout));
-		} while (kr == KERN_ABORTED);
-
-		if (kr != KERN_OPERATION_TIMED_OUT) {
-			DISPATCH_GROUP_VERIFY_KR(kr);
+		if (!_dispatch_sema4_timedwait(&dg->dg_sema, timeout)) {
 			break;
 		}
-#elif USE_POSIX_SEM
-		do {
-			uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout);
-			_timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
-			_timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
-			ret = slowpath(sem_timedwait(&dg->dg_sem, &_timeout));
-		} while (ret == -1 && errno == EINTR);
-
-		if (!(ret == -1 && errno == ETIMEDOUT)) {
-			DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-			break;
-		}
-#elif USE_WIN32_SEM
-		nsec = _dispatch_timeout(timeout);
-		msec = (DWORD)(nsec / (uint64_t)1000000);
-		resolution = _push_timer_resolution(msec);
-		wait_result = WaitForSingleObject(dg->dg_handle, msec);
-		_pop_timer_resolution(resolution);
-		if (wait_result != WAIT_TIMEOUT) {
-			break;
-		}
-#endif
 		// Fall through and try to undo the earlier change to
 		// dg->dg_waiters
 	case DISPATCH_TIME_NOW:
@@ -582,30 +313,13 @@
 		while (orig_waiters) {
 			if (os_atomic_cmpxchgvw2o(dg, dg_waiters, orig_waiters,
 					orig_waiters - 1, &orig_waiters, relaxed)) {
-#if USE_MACH_SEM
-				return KERN_OPERATION_TIMED_OUT;
-#elif USE_POSIX_SEM || USE_WIN32_SEM
-				errno = ETIMEDOUT;
-				return -1;
-#endif
+				return _DSEMA4_TIMEOUT();
 			}
 		}
-		// Another thread called semaphore_signal().
+		// Another thread is running _dispatch_group_wake()
 		// Fall through and drain the wakeup.
 	case DISPATCH_TIME_FOREVER:
-#if USE_MACH_SEM
-		do {
-			kr = semaphore_wait(dg->dg_port);
-		} while (kr == KERN_ABORTED);
-		DISPATCH_GROUP_VERIFY_KR(kr);
-#elif USE_POSIX_SEM
-		do {
-			ret = sem_wait(&dg->dg_sem);
-		} while (ret == -1 && errno == EINTR);
-		DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-#elif USE_WIN32_SEM
-		WaitForSingleObject(dg->dg_handle, INFINITE);
-#endif
+		_dispatch_sema4_wait(&dg->dg_sema);
 		break;
 	}
 	return 0;
@@ -618,12 +332,7 @@
 		return 0;
 	}
 	if (timeout == 0) {
-#if USE_MACH_SEM
-		return KERN_OPERATION_TIMED_OUT;
-#elif USE_POSIX_SEM || USE_WIN32_SEM
-		errno = ETIMEDOUT;
-		return (-1);
-#endif
+		return _DSEMA4_TIMEOUT();
 	}
 	return _dispatch_group_wait_slow(dg, timeout);
 }
diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h
index dceda6d..2abda38 100644
--- a/src/semaphore_internal.h
+++ b/src/semaphore_internal.h
@@ -29,20 +29,10 @@
 
 struct dispatch_queue_s;
 
-#if USE_MACH_SEM
-#define DISPATCH_OS_SEMA_FIELD(base)	semaphore_t base##_port
-#elif USE_POSIX_SEM
-#define DISPATCH_OS_SEMA_FIELD(base)	sem_t base##_sem
-#elif USE_WIN32_SEM
-#define DISPATCH_OS_SEMA_FIELD(base)	HANDLE base##_handle
-#else
-#error "No supported semaphore type"
-#endif
-
 #define DISPATCH_SEMAPHORE_HEADER(cls, ns) \
 	DISPATCH_OBJECT_HEADER(cls); \
 	long volatile ns##_value; \
-	DISPATCH_OS_SEMA_FIELD(ns)
+	_dispatch_sema4_t ns##_sema
 
 struct dispatch_semaphore_header_s {
 	DISPATCH_SEMAPHORE_HEADER(semaphore, dsema);
diff --git a/src/shims.h b/src/shims.h
index 30d8929..ebf75f5 100644
--- a/src/shims.h
+++ b/src/shims.h
@@ -28,71 +28,6 @@
 #define __DISPATCH_OS_SHIMS__
 
 #include <pthread.h>
-#if HAVE_PTHREAD_QOS_H && __has_include(<pthread/qos.h>)
-#include <pthread/qos.h>
-#if __has_include(<pthread/qos_private.h>)
-#include <pthread/qos_private.h>
-#define _DISPATCH_QOS_CLASS_USER_INTERACTIVE QOS_CLASS_USER_INTERACTIVE
-#define _DISPATCH_QOS_CLASS_USER_INITIATED QOS_CLASS_USER_INITIATED
-#define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_DEFAULT
-#define _DISPATCH_QOS_CLASS_UTILITY QOS_CLASS_UTILITY
-#define _DISPATCH_QOS_CLASS_BACKGROUND QOS_CLASS_BACKGROUND
-#define _DISPATCH_QOS_CLASS_UNSPECIFIED QOS_CLASS_UNSPECIFIED
-#else // pthread/qos_private.h
-typedef unsigned long pthread_priority_t;
-#endif // pthread/qos_private.h
-#if __has_include(<sys/qos_private.h>)
-#include <sys/qos_private.h>
-#define _DISPATCH_QOS_CLASS_MAINTENANCE QOS_CLASS_MAINTENANCE
-#else // sys/qos_private.h
-#define _DISPATCH_QOS_CLASS_MAINTENANCE	0x05
-#endif // sys/qos_private.h
-#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
-#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
-#endif
-#ifndef _PTHREAD_PRIORITY_INHERIT_FLAG
-#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000
-#endif
-#ifndef _PTHREAD_PRIORITY_ROOTQUEUE_FLAG
-#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000
-#endif
-#ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG
-#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000
-#endif
-#ifndef _PTHREAD_PRIORITY_ENFORCE_FLAG
-#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000
-#endif
-#ifndef _PTHREAD_PRIORITY_OVERRIDE_FLAG
-#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000
-#endif
-#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
-#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
-#endif
-#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
-#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
-#endif
-#ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
-#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000
-#endif
-
-#else // HAVE_PTHREAD_QOS_H
-typedef unsigned int qos_class_t;
-typedef unsigned long pthread_priority_t;
-#define QOS_MIN_RELATIVE_PRIORITY (-15)
-#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff)
-#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00
-#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull)
-#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff
-#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
-#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000
-#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000
-#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000
-#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000
-#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
-#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
-#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000
-#endif // HAVE_PTHREAD_QOS_H
-
 #ifdef __linux__
 #include "shims/linux_stubs.h"
 #endif
@@ -101,20 +36,8 @@
 #include "shims/android_stubs.h"
 #endif
 
-typedef uint32_t dispatch_priority_t;
-#define DISPATCH_SATURATED_OVERRIDE ((dispatch_priority_t)UINT32_MAX)
+#include "shims/priority.h"
 
-#ifndef _DISPATCH_QOS_CLASS_USER_INTERACTIVE
-enum {
-	_DISPATCH_QOS_CLASS_USER_INTERACTIVE = 0x21,
-	_DISPATCH_QOS_CLASS_USER_INITIATED = 0x19,
-	_DISPATCH_QOS_CLASS_DEFAULT = 0x15,
-	_DISPATCH_QOS_CLASS_UTILITY = 0x11,
-	_DISPATCH_QOS_CLASS_BACKGROUND = 0x09,
-	_DISPATCH_QOS_CLASS_MAINTENANCE = 0x05,
-	_DISPATCH_QOS_CLASS_UNSPECIFIED = 0x00,
-};
-#endif // _DISPATCH_QOS_CLASS_USER_INTERACTIVE
 #if HAVE_PTHREAD_WORKQUEUES
 #if __has_include(<pthread/workqueue_private.h>)
 #include <pthread/workqueue_private.h>
@@ -231,6 +154,8 @@
 #ifndef __OS_INTERNAL_ATOMIC__
 #include "shims/atomic.h"
 #endif
+#define DISPATCH_ATOMIC64_ALIGN  __attribute__((aligned(8)))
+
 #include "shims/atomic_sfb.h"
 #include "shims/tsd.h"
 #include "shims/yield.h"
diff --git a/src/shims/android_stubs.h b/src/shims/android_stubs.h
index f9c164c..c8032a3 100644
--- a/src/shims/android_stubs.h
+++ b/src/shims/android_stubs.h
@@ -16,4 +16,8 @@
 #ifndef __DISPATCH__ANDROID__STUBS__INTERNAL
 #define __DISPATCH__ANDROID__STUBS__INTERNAL
 
+#if !__has_feature(c_static_assert)
+#define _Static_assert(...)
+#endif
+
 #endif /* __DISPATCH__ANDROID__STUBS__INTERNAL */
diff --git a/src/shims/atomic.h b/src/shims/atomic.h
index 5199477..fae05b7 100644
--- a/src/shims/atomic.h
+++ b/src/shims/atomic.h
@@ -27,114 +27,50 @@
 #ifndef __DISPATCH_SHIMS_ATOMIC__
 #define __DISPATCH_SHIMS_ATOMIC__
 
-#if !__has_extension(c_atomic) || \
-		!__has_extension(c_generic_selections) || \
-		!__has_include(<stdatomic.h>)
-#error libdispatch requires C11 with <stdatomic.h> and generic selections
+#if !__has_extension(c_atomic) || !__has_include(<stdatomic.h>)
+#error libdispatch requires C11 with <stdatomic.h>
 #endif
 
 #include <stdatomic.h>
 
-#define memory_order_ordered memory_order_seq_cst
+#define memory_order_ordered    memory_order_seq_cst
+#define memory_order_dependency memory_order_acquire
 
-#define _os_atomic_basetypeof(p) \
-		typeof(*_Generic((p), \
-		char*: (char*)(p), \
-		volatile char*: (char*)(p), \
-		signed char*: (signed char*)(p), \
-		volatile signed char*: (signed char*)(p), \
-		unsigned char*: (unsigned char*)(p), \
-		volatile unsigned char*: (unsigned char*)(p), \
-		short*: (short*)(p), \
-		volatile short*: (short*)(p), \
-		unsigned short*: (unsigned short*)(p), \
-		volatile unsigned short*: (unsigned short*)(p), \
-		int*: (int*)(p), \
-		volatile int*: (int*)(p), \
-		unsigned int*: (unsigned int*)(p), \
-		volatile unsigned int*: (unsigned int*)(p), \
-		long*: (long*)(p), \
-		volatile long*: (long*)(p), \
-		unsigned long*: (unsigned long*)(p), \
-		volatile unsigned long*: (unsigned long*)(p), \
-		long long*: (long long*)(p), \
-		volatile long long*: (long long*)(p), \
-		unsigned long long*: (unsigned long long*)(p), \
-		volatile unsigned long long*: (unsigned long long*)(p), \
-		const void**: (const void**)(p), \
-		const void*volatile*: (const void**)(p), \
-		default: (void**)(p)))
+#define os_atomic(type) type _Atomic
 
 #define _os_atomic_c11_atomic(p) \
-		_Generic((p), \
-		char*: (_Atomic(char)*)(p), \
-		volatile char*: (volatile _Atomic(char)*)(p), \
-		signed char*: (_Atomic(signed char)*)(p), \
-		volatile signed char*: (volatile _Atomic(signed char)*)(p), \
-		unsigned char*: (_Atomic(unsigned char)*)(p), \
-		volatile unsigned char*: (volatile _Atomic(unsigned char)*)(p), \
-		short*: (_Atomic(short)*)(p), \
-		volatile short*: (volatile _Atomic(short)*)(p), \
-		unsigned short*: (_Atomic(unsigned short)*)(p), \
-		volatile unsigned short*: (volatile _Atomic(unsigned short)*)(p), \
-		int*: (_Atomic(int)*)(p), \
-		volatile int*: (volatile _Atomic(int)*)(p), \
-		unsigned int*: (_Atomic(unsigned int)*)(p), \
-		volatile unsigned int*: (volatile _Atomic(unsigned int)*)(p), \
-		long*: (_Atomic(long)*)(p), \
-		volatile long*: (volatile _Atomic(long)*)(p), \
-		unsigned long*: (_Atomic(unsigned long)*)(p), \
-		volatile unsigned long*: (volatile _Atomic(unsigned long)*)(p), \
-		long long*: (_Atomic(long long)*)(p), \
-		volatile long long*: (volatile _Atomic(long long)*)(p), \
-		unsigned long long*: (_Atomic(unsigned long long)*)(p), \
-		volatile unsigned long long*: \
-				(volatile _Atomic(unsigned long long)*)(p), \
-		const void**: (_Atomic(const void*)*)(p), \
-		const void*volatile*: (volatile _Atomic(const void*)*)(p), \
-		default: (volatile _Atomic(void*)*)(p))
+		((typeof(*(p)) _Atomic *)(p))
 
-#define os_atomic_thread_fence(m)  atomic_thread_fence(memory_order_##m)
-// see comment in dispatch_once.c
-#define os_atomic_maximally_synchronizing_barrier() \
-		atomic_thread_fence(memory_order_seq_cst)
+// This removes the _Atomic and volatile qualifiers on the type of *p
+#define _os_atomic_basetypeof(p) \
+		typeof(atomic_load(_os_atomic_c11_atomic(p), memory_order_relaxed))
 
 #define os_atomic_load(p, m) \
-		({ _os_atomic_basetypeof(p) _r = \
-		atomic_load_explicit(_os_atomic_c11_atomic(p), \
-		memory_order_##m); (typeof(*(p)))_r; })
+		atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m)
 #define os_atomic_store(p, v, m) \
-		({ _os_atomic_basetypeof(p) _v = (v); \
-		atomic_store_explicit(_os_atomic_c11_atomic(p), _v, \
-		memory_order_##m); })
+		atomic_store_explicit(_os_atomic_c11_atomic(p), _v, memory_order_##m)
 #define os_atomic_xchg(p, v, m) \
-		({ _os_atomic_basetypeof(p) _v = (v), _r = \
-		atomic_exchange_explicit(_os_atomic_c11_atomic(p), _v, \
-		memory_order_##m); (typeof(*(p)))_r; })
+		atomic_exchange_explicit(_os_atomic_c11_atomic(p), _v, memory_order_##m)
 #define os_atomic_cmpxchg(p, e, v, m) \
-		({ _os_atomic_basetypeof(p) _v = (v), _r = (e); \
+		({ _os_atomic_basetypeof(p) _r = (e); \
 		atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \
-		&_r, _v, memory_order_##m, \
-		memory_order_relaxed); })
+		&_r, v, memory_order_##m, memory_order_relaxed); })
 #define os_atomic_cmpxchgv(p, e, v, g, m) \
-		({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \
+		({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \
 		atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \
-		&_r, _v, memory_order_##m, \
-		memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; })
+		&_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; })
 #define os_atomic_cmpxchgvw(p, e, v, g, m) \
-		({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \
+		({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \
 		atomic_compare_exchange_weak_explicit(_os_atomic_c11_atomic(p), \
-		&_r, _v, memory_order_##m, \
-		memory_order_relaxed); *(g) = (typeof(*(p)))_r;  _b; })
+		&_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r;  _b; })
 
 #define _os_atomic_c11_op(p, v, m, o, op) \
 		({ _os_atomic_basetypeof(p) _v = (v), _r = \
 		atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \
 		memory_order_##m); (typeof(*(p)))(_r op _v); })
 #define _os_atomic_c11_op_orig(p, v, m, o, op) \
-		({ _os_atomic_basetypeof(p) _v = (v), _r = \
 		atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \
-		memory_order_##m); (typeof(*(p)))_r; })
+		memory_order_##m)
 #define os_atomic_add(p, v, m) \
 		_os_atomic_c11_op((p), (v), m, add, +)
 #define os_atomic_add_orig(p, v, m) \
@@ -156,22 +92,13 @@
 #define os_atomic_xor_orig(p, v, m) \
 		_os_atomic_c11_op_orig((p), (v), m, xor, ^)
 
-#define os_atomic_rmw_loop(p, ov, nv, m, ...)  ({ \
-		bool _result = false; \
-		typeof(p) _p = (p); \
-		ov = os_atomic_load(_p, relaxed); \
-		do { \
-			__VA_ARGS__; \
-			_result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \
-		} while (os_unlikely(!_result)); \
-		_result; \
-	})
-#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \
-		os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__)
-#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \
-		({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); })
-#define os_atomic_rmw_loop_give_up(expr) \
-		os_atomic_rmw_loop_give_up_with_fence(relaxed, expr)
+#define os_atomic_force_dependency_on(p, e) (p)
+#define os_atomic_load_with_dependency_on(p, e) \
+		os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed)
+#define os_atomic_load_with_dependency_on2o(p, f, e) \
+		os_atomic_load_with_dependency_on(&(p)->f, e)
+
+#define os_atomic_thread_fence(m)  atomic_thread_fence(memory_order_##m)
 
 #define os_atomic_load2o(p, f, m) \
 		os_atomic_load(&(p)->f, m)
@@ -223,28 +150,21 @@
 #define os_atomic_dec_orig2o(p, f, m) \
 		os_atomic_sub_orig2o(p, f, 1, m)
 
-#if defined(__x86_64__) || defined(__i386__)
-#undef os_atomic_maximally_synchronizing_barrier
-#ifdef __LP64__
-#define os_atomic_maximally_synchronizing_barrier() \
-		({ unsigned long _clbr; __asm__ __volatile__( \
-		"cpuid" \
-		: "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); })
-#else
-#ifdef __llvm__
-#define os_atomic_maximally_synchronizing_barrier() \
-		({ unsigned long _clbr; __asm__ __volatile__( \
-		"cpuid" \
-		: "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); })
-#else // gcc does not allow inline i386 asm to clobber ebx
-#define os_atomic_maximally_synchronizing_barrier() \
-		({ unsigned long _clbr; __asm__ __volatile__( \
-		"pushl	%%ebx\n\t" \
-		"cpuid\n\t" \
-		"popl	%%ebx" \
-		: "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); })
-#endif
-#endif
-#endif // defined(__x86_64__) || defined(__i386__)
+#define os_atomic_rmw_loop(p, ov, nv, m, ...)  ({ \
+		bool _result = false; \
+		typeof(p) _p = (p); \
+		ov = os_atomic_load(_p, relaxed); \
+		do { \
+			__VA_ARGS__; \
+			_result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \
+		} while (os_unlikely(!_result)); \
+		_result; \
+	})
+#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \
+		os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__)
+#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \
+		({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); })
+#define os_atomic_rmw_loop_give_up(expr) \
+		os_atomic_rmw_loop_give_up_with_fence(relaxed, expr)
 
 #endif // __DISPATCH_SHIMS_ATOMIC__
diff --git a/src/shims/linux_stubs.h b/src/shims/linux_stubs.h
index 4b05be9..ec68417 100644
--- a/src/shims/linux_stubs.h
+++ b/src/shims/linux_stubs.h
@@ -39,35 +39,15 @@
 
 typedef uint32_t mach_error_t;
 
-typedef uint32_t mach_vm_size_t;
-
 typedef uint32_t mach_msg_return_t;
 
 typedef uint32_t mach_msg_bits_t;
 
-typedef uintptr_t mach_vm_address_t;
+typedef void *dispatch_mach_msg_t;
 
-typedef uint32_t dispatch_mach_msg_t;
+typedef uint64_t firehose_activity_id_t;
 
-typedef uint32_t dispatch_mach_t;
-
-typedef uint32_t dispatch_mach_reason_t;
-
-typedef uint32_t voucher_activity_mode_t;
-
-typedef uint32_t voucher_activity_trace_id_t;
-
-typedef uint32_t voucher_activity_id_t;
-
-typedef uint32_t voucher_activity_flag_t;
-
-typedef struct { } mach_msg_header_t;
-
-
-typedef void (*dispatch_mach_handler_function_t)(void*, dispatch_mach_reason_t,
-						 dispatch_mach_msg_t, mach_error_t);
-
-typedef void (*dispatch_mach_msg_destructor_t)(void*);
+typedef void *mach_msg_header_t;
 
 // Print a warning when an unported code path executes.
 #define LINUX_PORT_ERROR()  do { \
@@ -78,38 +58,9 @@
  * Stub out defines for other missing types
  */
 
-#if __linux__
-// we fall back to use kevent
-#define kevent64_s kevent
-#define kevent64(kq,cl,nc,el,ne,f,to)  kevent(kq,cl,nc,el,ne,to)
-#endif
-
 // SIZE_T_MAX should not be hardcoded like this here.
 #ifndef SIZE_T_MAX
 #define SIZE_T_MAX (~(size_t)0)
 #endif
 
-// Define to 0 the NOTE_ values that are not present on Linux.
-// Revisit this...would it be better to ifdef out the uses instead??
-
-// The following values are passed as part of the EVFILT_TIMER requests
-
-#define IGNORE_KEVENT64_EXT   /* will force the kevent64_s.ext[] to not be used -> leeway ignored */
-
-#ifndef NOTE_SECONDS
-#define NOTE_SECONDS	0x01
-#define NOTE_USECONDS	0x02
-#define NOTE_NSECONDS	0x04
-#define NOTE_ABSOLUTE	0x08
-#define KEVENT_NSEC_NOT_SUPPORTED
-#endif
-#define NOTE_CRITICAL	0x10
-#define NOTE_BACKGROUND	0x20
-#define NOTE_LEEWAY	0x40
-
-// need to catch the following usage if it happens ..
-// we simply return '0' as a value probably not correct
-
-#define NOTE_VM_PRESSURE ({LINUX_PORT_ERROR(); 0;})
-
 #endif
diff --git a/src/shims/lock.c b/src/shims/lock.c
index 983fe47..7aa94db 100644
--- a/src/shims/lock.c
+++ b/src/shims/lock.c
@@ -49,6 +49,264 @@
 }
 #endif
 
+#pragma mark - semaphores
+
+#if USE_MACH_SEM
+#if __has_include(<os/semaphore_private.h>)
+#include <os/semaphore_private.h>
+#define DISPATCH_USE_OS_SEMAPHORE_CACHE 1
+#else
+#define DISPATCH_USE_OS_SEMAPHORE_CACHE 0
+#endif
+
+#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
+		DISPATCH_VERIFY_MIG(x); \
+		if (unlikely((x) == KERN_INVALID_NAME)) { \
+			DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \
+		} else if (unlikely(x)) { \
+			DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
+		} \
+	} while (0)
+
+void
+_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy)
+{
+	semaphore_t tmp = MACH_PORT_NULL;
+
+	_dispatch_fork_becomes_unsafe();
+
+	// lazily allocate the semaphore port
+
+	// Someday:
+	// 1) Switch to a doubly-linked FIFO in user-space.
+	// 2) User-space timers for the timeout.
+
+#if DISPATCH_USE_OS_SEMAPHORE_CACHE
+	if (policy == _DSEMA4_POLICY_FIFO) {
+		tmp = (_dispatch_sema4_t)os_get_cached_semaphore();
+		if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) {
+			os_put_cached_semaphore((os_semaphore_t)tmp);
+		}
+		return;
+	}
+#endif
+
+	kern_return_t kr = semaphore_create(mach_task_self(), &tmp, policy, 0);
+	DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+
+	if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) {
+		kr = semaphore_destroy(mach_task_self(), tmp);
+		DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+	}
+}
+
+void
+_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy)
+{
+	semaphore_t sema_port = *sema;
+	*sema = MACH_PORT_DEAD;
+#if DISPATCH_USE_OS_SEMAPHORE_CACHE
+	if (policy == _DSEMA4_POLICY_FIFO) {
+		return os_put_cached_semaphore((os_semaphore_t)sema_port);
+	}
+#endif
+	kern_return_t kr = semaphore_destroy(mach_task_self(), sema_port);
+	DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+}
+
+void
+_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count)
+{
+	do {
+		kern_return_t kr = semaphore_signal(*sema);
+		DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+	} while (--count);
+}
+
+void
+_dispatch_sema4_wait(_dispatch_sema4_t *sema)
+{
+	kern_return_t kr;
+	do {
+		kr = semaphore_wait(*sema);
+	} while (kr == KERN_ABORTED);
+	DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+}
+
+bool
+_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout)
+{
+	mach_timespec_t _timeout;
+	kern_return_t kr;
+
+	do {
+		uint64_t nsec = _dispatch_timeout(timeout);
+		_timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
+		_timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
+		kr = slowpath(semaphore_timedwait(*sema, _timeout));
+	} while (kr == KERN_ABORTED);
+
+	if (kr == KERN_OPERATION_TIMED_OUT) {
+		return true;
+	}
+	DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+	return false;
+}
+#elif USE_POSIX_SEM
+#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
+		if (unlikely((x) == -1)) { \
+			DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \
+		} \
+	} while (0)
+
+void
+_dispatch_sema4_init(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED)
+{
+	int rc = sem_init(sema, 0, 0);
+	DISPATCH_SEMAPHORE_VERIFY_RET(rc);
+}
+
+void
+_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED)
+{
+	int rc = sem_destroy(sema);
+	DISPATCH_SEMAPHORE_VERIFY_RET(rc);
+}
+
+void
+_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count)
+{
+	do {
+		int ret = sem_post(sema);
+		DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+	} while (--count);
+}
+
+void
+_dispatch_sema4_wait(_dispatch_sema4_t *sema)
+{
+	int ret = sem_wait(sema);
+	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+}
+
+bool
+_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout)
+{
+	struct timespec _timeout;
+	int ret;
+
+	do {
+		uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout);
+		_timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
+		_timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
+		ret = slowpath(sem_timedwait(sema, &_timeout));
+	} while (ret == -1 && errno == EINTR);
+
+	if (ret == -1 && errno == ETIMEDOUT) {
+		return true;
+	}
+	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+	return false;
+}
+#elif USE_WIN32_SEM
+// rdar://problem/8428132
+static DWORD best_resolution = 1; // 1ms
+
+static DWORD
+_push_timer_resolution(DWORD ms)
+{
+	MMRESULT res;
+	static dispatch_once_t once;
+
+	if (ms > 16) {
+		// only update timer resolution if smaller than default 15.6ms
+		// zero means not updated
+		return 0;
+	}
+
+	// aim for the best resolution we can accomplish
+	dispatch_once(&once, ^{
+		TIMECAPS tc;
+		MMRESULT res;
+		res = timeGetDevCaps(&tc, sizeof(tc));
+		if (res == MMSYSERR_NOERROR) {
+			best_resolution = min(max(tc.wPeriodMin, best_resolution),
+					tc.wPeriodMax);
+		}
+	});
+
+	res = timeBeginPeriod(best_resolution);
+	if (res == TIMERR_NOERROR) {
+		return best_resolution;
+	}
+	// zero means not updated
+	return 0;
+}
+
+// match ms parameter to result from _push_timer_resolution
+DISPATCH_ALWAYS_INLINE
+static inline void
+_pop_timer_resolution(DWORD ms)
+{
+	if (ms) timeEndPeriod(ms);
+}
+
+void
+_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy DISPATCH_UNUSED)
+{
+	HANDLE tmp;
+
+	// lazily allocate the semaphore port
+
+	while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) {
+		_dispatch_temporary_resource_shortage();
+	}
+
+	if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) {
+		CloseHandle(tmp);
+	}
+}
+
+void
+_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED)
+{
+	HANDLE sema_handle = *sema;
+	CloseHandle(sema_handle);
+	*sema = 0;
+}
+
+void
+_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count)
+{
+	int ret = ReleaseSemaphore(*sema, count, NULL);
+	dispatch_assume(ret);
+}
+
+void
+_dispatch_sema4_wait(_dispatch_sema4_t *sema)
+{
+	WaitForSingleObject(*sema, INFINITE);
+}
+
+bool
+_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout)
+{
+	uint64_t nsec;
+	DWORD msec;
+	DWORD resolution;
+	DWORD wait_result;
+
+	nsec = _dispatch_timeout(timeout);
+	msec = (DWORD)(nsec / (uint64_t)1000000);
+	resolution = _push_timer_resolution(msec);
+	wait_result = WaitForSingleObject(dsema->dsema_handle, msec);
+	_pop_timer_resolution(resolution);
+	return wait_result == WAIT_TIMEOUT;
+}
+#else
+#error "port has to implement _dispatch_sema4_t"
+#endif
+
 #pragma mark - ulock wrappers
 #if HAVE_UL_COMPARE_AND_WAIT
 
@@ -210,36 +468,12 @@
 
 #pragma mark - thread event
 
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-semaphore_t
-_dispatch_thread_semaphore_create(void)
-{
-	semaphore_t s4;
-	kern_return_t kr;
-	while (unlikely(kr = semaphore_create(mach_task_self(), &s4,
-			SYNC_POLICY_FIFO, 0))) {
-		DISPATCH_VERIFY_MIG(kr);
-		_dispatch_temporary_resource_shortage();
-	}
-	return s4;
-}
-
-void
-_dispatch_thread_semaphore_dispose(void *ctxt)
-{
-	semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt;
-	kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
-	DISPATCH_VERIFY_MIG(kr);
-	DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-}
-#endif
-
 void
 _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte)
 {
 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
 	if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-		kern_return_t kr = semaphore_signal(dte->dte_semaphore);
+		kern_return_t kr = semaphore_signal(dte->dte_sema);
 		DISPATCH_SEMAPHORE_VERIFY_KR(kr);
 		return;
 	}
@@ -248,9 +482,8 @@
 	_dispatch_ulock_wake(&dte->dte_value, 0);
 #elif HAVE_FUTEX
 	_dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG);
-#elif USE_POSIX_SEM
-	int rc = sem_post(&dte->dte_sem);
-	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+#else
+	_dispatch_sema4_signal(&dte->dte_sema, 1);
 #endif
 }
 
@@ -261,7 +494,7 @@
 	if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
 		kern_return_t kr;
 		do {
-			kr = semaphore_wait(dte->dte_semaphore);
+			kr = semaphore_wait(dte->dte_sema);
 		} while (unlikely(kr == KERN_ABORTED));
 		DISPATCH_SEMAPHORE_VERIFY_KR(kr);
 		return;
@@ -282,12 +515,8 @@
 				NULL, FUTEX_PRIVATE_FLAG);
 #endif
 	}
-#elif USE_POSIX_SEM
-	int rc;
-	do {
-		rc = sem_wait(&dte->dte_sem);
-	} while (unlikely(rc != 0));
-	DISPATCH_SEMAPHORE_VERIFY_RET(rc);
+#else
+	_dispatch_sema4_wait(&dte->dte_sema);
 #endif
 }
 
diff --git a/src/shims/lock.h b/src/shims/lock.h
index 246c807..80f01b9 100644
--- a/src/shims/lock.h
+++ b/src/shims/lock.h
@@ -30,7 +30,7 @@
 #pragma mark - platform macros
 
 DISPATCH_ENUM(dispatch_lock_options, uint32_t,
-		DLOCK_LOCK_NONE 			= 0x00000000,
+		DLOCK_LOCK_NONE				= 0x00000000,
 		DLOCK_LOCK_DATA_CONTENTION  = 0x00010000,
 );
 
@@ -88,6 +88,7 @@
 
 #elif defined(__linux__)
 #include <linux/futex.h>
+#include <linux/membarrier.h>
 #include <unistd.h>
 #include <sys/syscall.h>   /* For SYS_xxx definitions */
 
@@ -160,10 +161,6 @@
 #endif
 #endif // HAVE_UL_UNFAIR_LOCK
 
-#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT && !HAVE_FUTEX)
-#endif
-
 #ifndef HAVE_FUTEX
 #ifdef __linux__
 #define HAVE_FUTEX 1
@@ -172,28 +169,75 @@
 #endif
 #endif // HAVE_FUTEX
 
-#if USE_MACH_SEM
-#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
-		if (unlikely((x) == KERN_INVALID_NAME)) { \
-			DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \
-		} else if (unlikely(x)) { \
-			DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
-		} \
-	} while (0)
-#define DISPATCH_GROUP_VERIFY_KR(x) do { \
-		if (unlikely((x) == KERN_INVALID_NAME)) { \
-			DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_group_t"); \
-		} else if (unlikely(x)) { \
-			DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
-		} \
-	} while (0)
-#elif USE_POSIX_SEM
-#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
-		if (unlikely((x) == -1)) { \
-			DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \
-		} \
-	} while (0)
+#pragma mark - semaphores
+
+#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
+#if TARGET_OS_MAC
+#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT)
+#else
+#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK 0
 #endif
+#endif
+
+#if USE_MACH_SEM
+
+typedef semaphore_t _dispatch_sema4_t;
+#define _DSEMA4_POLICY_FIFO  SYNC_POLICY_FIFO
+#define _DSEMA4_POLICY_LIFO  SYNC_POLICY_LIFO
+#define _DSEMA4_TIMEOUT() KERN_OPERATION_TIMED_OUT
+
+#define _dispatch_sema4_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL)
+#define _dispatch_sema4_is_created(sema)   (*(sema) != MACH_PORT_NULL)
+void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy);
+
+#elif USE_POSIX_SEM
+
+typedef sem_t _dispatch_sema4_t;
+#define _DSEMA4_POLICY_FIFO 0
+#define _DSEMA4_POLICY_LIFO 0
+#define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1)
+
+void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy);
+#define _dispatch_sema4_is_created(sema) 1
+#define _dispatch_sema4_create_slow(sema, policy) ((void)0)
+
+#elif USE_WIN32_SEM
+
+typedef HANDLE _dispatch_sema4_t;
+#define _DSEMA4_POLICY_FIFO 0
+#define _DSEMA4_POLICY_LIFO 0
+#define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1)
+
+#define _dispatch_sema4_init(sema, policy) (void)(*(sema) = 0)
+#define _dispatch_sema4_is_created(sema)   (*(sema) != 0)
+void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy);
+
+#else
+#error "port has to implement _dispatch_sema4_t"
+#endif
+
+void _dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy);
+void _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count);
+void _dispatch_sema4_wait(_dispatch_sema4_t *sema);
+bool _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout);
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_sema4_create(_dispatch_sema4_t *sema, int policy)
+{
+	if (!_dispatch_sema4_is_created(sema)) {
+		_dispatch_sema4_create_slow(sema, policy);
+	}
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_sema4_dispose(_dispatch_sema4_t *sema, int policy)
+{
+	if (_dispatch_sema4_is_created(sema)) {
+		_dispatch_sema4_dispose_slow(sema, policy);
+	}
+}
 
 #pragma mark - compare and wait
 
@@ -224,7 +268,7 @@
 typedef struct dispatch_thread_event_s {
 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
 	union {
-		semaphore_t dte_semaphore;
+		_dispatch_sema4_t dte_sema;
 		uint32_t dte_value;
 	};
 #elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
@@ -232,43 +276,11 @@
 	// UINT32_MAX means waited on, but not signalled yet
 	// 0 is the initial and final state
 	uint32_t dte_value;
-#elif USE_POSIX_SEM
-	sem_t dte_sem;
 #else
-#  error define dispatch_thread_event_s for your platform
+	_dispatch_sema4_t dte_sema;
 #endif
 } dispatch_thread_event_s, *dispatch_thread_event_t;
 
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-semaphore_t _dispatch_thread_semaphore_create(void);
-void _dispatch_thread_semaphore_dispose(void *);
-
-DISPATCH_ALWAYS_INLINE
-static inline semaphore_t
-_dispatch_get_thread_semaphore(void)
-{
-	semaphore_t sema = (semaphore_t)(uintptr_t)
-			_dispatch_thread_getspecific(dispatch_sema4_key);
-	if (unlikely(!sema)) {
-		return _dispatch_thread_semaphore_create();
-	}
-	_dispatch_thread_setspecific(dispatch_sema4_key, NULL);
-	return sema;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_put_thread_semaphore(semaphore_t sema)
-{
-	semaphore_t old_sema = (semaphore_t)(uintptr_t)
-			_dispatch_thread_getspecific(dispatch_sema4_key);
-	_dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema);
-	if (unlikely(old_sema)) {
-		return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema);
-	}
-}
-#endif
-
 DISPATCH_NOT_TAIL_CALLED
 void _dispatch_thread_event_wait_slow(dispatch_thread_event_t);
 void _dispatch_thread_event_signal_slow(dispatch_thread_event_t);
@@ -279,15 +291,15 @@
 {
 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
 	if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-		dte->dte_semaphore = _dispatch_get_thread_semaphore();
+		_dispatch_sema4_init(&dte->dte_sema, _DSEMA4_POLICY_FIFO);
+		_dispatch_sema4_create(&dte->dte_sema, _DSEMA4_POLICY_FIFO);
 		return;
 	}
 #endif
 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
 	dte->dte_value = 0;
-#elif USE_POSIX_SEM
-	int rc = sem_init(&dte->dte_sem, 0, 0);
-	DISPATCH_SEMAPHORE_VERIFY_RET(rc);
+#else
+	_dispatch_sema4_init(&dte->dte_sema, _DSEMA4_POLICY_FIFO);
 #endif
 }
 
@@ -308,7 +320,7 @@
 		// waiters do the validation
 		return;
 	}
-#elif USE_POSIX_SEM
+#else
 	// fallthrough
 #endif
 	_dispatch_thread_event_signal_slow(dte);
@@ -331,7 +343,7 @@
 		// for any other value, go to the slowpath which checks it's not corrupt
 		return;
 	}
-#elif USE_POSIX_SEM
+#else
 	// fallthrough
 #endif
 	_dispatch_thread_event_wait_slow(dte);
@@ -343,16 +355,15 @@
 {
 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
 	if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
-		_dispatch_put_thread_semaphore(dte->dte_semaphore);
+		_dispatch_sema4_dispose(&dte->dte_sema, _DSEMA4_POLICY_FIFO);
 		return;
 	}
 #endif
 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
 	// nothing to do
 	dispatch_assert(dte->dte_value == 0);
-#elif USE_POSIX_SEM
-	int rc = sem_destroy(&dte->dte_sem);
-	DISPATCH_SEMAPHORE_VERIFY_RET(rc);
+#else
+	_dispatch_sema4_dispose(&dte->dte_sema, _DSEMA4_POLICY_FIFO);
 #endif
 }
 
@@ -524,14 +535,29 @@
 			DLOCK_LOCK_NONE)
 
 DISPATCH_ALWAYS_INLINE
+static inline dispatch_once_t
+_dispatch_once_xchg_done(dispatch_once_t *pred)
+{
+#if defined(__i386__) || defined(__x86_64__)
+	// On Intel, any load is a load-acquire, so we don't need to be fancy
+	return os_atomic_xchg(pred, DLOCK_ONCE_DONE, release);
+#elif defined(__linux__)
+	if (unlikely(syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0) < 0)) {
+		DISPATCH_INTERNAL_CRASH(errno, "sys_membarrier not supported");
+	}
+	return os_atomic_xchg(pred, DLOCK_ONCE_DONE, relaxed);
+#else
+#  error dispatch_once algorithm not available for this port
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_once_gate_broadcast(dispatch_once_gate_t l)
 {
 	dispatch_once_t tid_cur, tid_self = (dispatch_once_t)_dispatch_tid_self();
-	// see once.c for explanation about this trick
-	os_atomic_maximally_synchronizing_barrier();
-	// above assumed to contain release barrier
-	tid_cur = os_atomic_xchg(&l->dgo_once, DLOCK_ONCE_DONE, relaxed);
+
+	tid_cur = _dispatch_once_xchg_done(&l->dgo_once);
 	if (likely(tid_cur == tid_self)) return;
 	_dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)tid_cur);
 }
diff --git a/src/shims/perfmon.h b/src/shims/perfmon.h
index 8af33ea..fe23a1d 100644
--- a/src/shims/perfmon.h
+++ b/src/shims/perfmon.h
@@ -27,26 +27,22 @@
 #ifndef __DISPATCH_SHIMS_PERFMON__
 #define __DISPATCH_SHIMS_PERFMON__
 
-#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
-
-#if defined (USE_APPLE_TSD_OPTIMIZATIONS) && defined(SIMULATE_5491082) && \
-		(defined(__i386__) || defined(__x86_64__))
-#ifdef __LP64__
-#define _dispatch_perfmon_workitem_inc() asm("incq %%gs:%0" : "+m" \
-		(*(void **)(dispatch_bcounter_key * sizeof(void *) + \
-		_PTHREAD_TSD_OFFSET)) :: "cc")
-#define _dispatch_perfmon_workitem_dec() asm("decq %%gs:%0" : "+m" \
-		(*(void **)(dispatch_bcounter_key * sizeof(void *) + \
-		_PTHREAD_TSD_OFFSET)) :: "cc")
-#else
-#define _dispatch_perfmon_workitem_inc() asm("incl %%gs:%0" : "+m" \
-		(*(void **)(dispatch_bcounter_key * sizeof(void *) + \
-		_PTHREAD_TSD_OFFSET)) :: "cc")
-#define _dispatch_perfmon_workitem_dec() asm("decl %%gs:%0" : "+m" \
-		(*(void **)(dispatch_bcounter_key * sizeof(void *) + \
-		_PTHREAD_TSD_OFFSET)) :: "cc")
+#if DISPATCH_PERF_MON
+#if DISPATCH_INTROSPECTION
+#error invalid configuration
 #endif
-#else /* !USE_APPLE_TSD_OPTIMIZATIONS */
+
+typedef enum {
+	perfmon_thread_no_trace = 0,
+	perfmon_thread_event_no_steal,	// 1) Event threads that couldn't steal
+	perfmon_thread_event_steal,		// 2) Event threads failing to steal very late
+	perfmon_thread_worker_non_oc,	// 3) Non overcommit threads finding
+									//		nothing on the root queues
+	perfmon_thread_worker_oc,		// 4) Overcommit thread finding nothing to do
+	perfmon_thread_manager,
+} perfmon_thread_type;
+
+DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_perfmon_workitem_inc(void)
 {
@@ -54,6 +50,8 @@
 	cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key);
 	_dispatch_thread_setspecific(dispatch_bcounter_key, (void *)++cnt);
 }
+
+DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_perfmon_workitem_dec(void)
 {
@@ -61,18 +59,38 @@
 	cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key);
 	_dispatch_thread_setspecific(dispatch_bcounter_key, (void *)--cnt);
 }
-#endif /* USE_APPLE_TSD_OPTIMIZATIONS */
 
+#define DISPATCH_PERF_MON_ARGS_PROTO  , uint64_t perfmon_start
+#define DISPATCH_PERF_MON_ARGS        , perfmon_start
+#define DISPATCH_PERF_MON_VAR         uint64_t perfmon_start;
+
+#define _dispatch_perfmon_start_impl(trace) ({ \
+		if (trace) _dispatch_ktrace0(DISPATCH_PERF_MON_worker_thread_start); \
+		perfmon_start = _dispatch_absolute_time(); \
+	})
 #define _dispatch_perfmon_start() \
-		uint64_t start = _dispatch_absolute_time()
-#define _dispatch_perfmon_end() \
-		_dispatch_queue_merge_stats(start)
+		DISPATCH_PERF_MON_VAR _dispatch_perfmon_start_impl(true)
+#define _dispatch_perfmon_start_notrace() \
+		DISPATCH_PERF_MON_VAR _dispatch_perfmon_start_impl(false)
+#define _dispatch_perfmon_end(thread_type) \
+		_dispatch_queue_merge_stats(perfmon_start, true, thread_type)
+#define _dispatch_perfmon_end_notrace() \
+		_dispatch_queue_merge_stats(perfmon_start, false, perfmon_thread_no_trace)
+
+void _dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type);
+
 #else
 
+#define DISPATCH_PERF_MON_ARGS_PROTO
+#define DISPATCH_PERF_MON_ARGS
+#define DISPATCH_PERF_MON_VAR
 #define _dispatch_perfmon_workitem_inc()
 #define _dispatch_perfmon_workitem_dec()
+#define _dispatch_perfmon_start_impl(trace)
 #define _dispatch_perfmon_start()
-#define _dispatch_perfmon_end()
+#define _dispatch_perfmon_end(thread_type)
+#define _dispatch_perfmon_start_notrace()
+#define _dispatch_perfmon_end_notrace()
 
 #endif // DISPATCH_PERF_MON
 
diff --git a/src/shims/priority.h b/src/shims/priority.h
new file mode 100644
index 0000000..70c2638
--- /dev/null
+++ b/src/shims/priority.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __DISPATCH_SHIMS_PRIORITY__
+#define __DISPATCH_SHIMS_PRIORITY__
+
+#if HAVE_PTHREAD_QOS_H && __has_include(<pthread/qos_private.h>)
+#include <pthread/qos.h>
+#include <pthread/qos_private.h>
+#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
+#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
+#endif
+#ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG
+#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000
+#endif
+#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
+#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
+#endif
+#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
+#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
+#endif
+#ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
+#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000
+#endif
+#else // HAVE_PTHREAD_QOS_H
+OS_ENUM(qos_class, unsigned int,
+	QOS_CLASS_USER_INTERACTIVE = 0x21,
+	QOS_CLASS_USER_INITIATED = 0x19,
+	QOS_CLASS_DEFAULT = 0x15,
+	QOS_CLASS_UTILITY = 0x11,
+	QOS_CLASS_BACKGROUND = 0x09,
+	QOS_CLASS_MAINTENANCE = 0x05,
+	QOS_CLASS_UNSPECIFIED = 0x00,
+);
+typedef unsigned long pthread_priority_t;
+#define QOS_MIN_RELATIVE_PRIORITY (-15)
+#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff)
+#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00
+#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull)
+#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff
+#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
+#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000
+#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
+#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
+#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000
+
+#endif // HAVE_PTHREAD_QOS_H
+
+typedef uint32_t dispatch_qos_t;
+typedef uint32_t dispatch_priority_t;
+typedef uint32_t dispatch_priority_t;
+typedef uint16_t dispatch_priority_requested_t;
+
+#define DISPATCH_QOS_UNSPECIFIED            ((dispatch_qos_t)0)
+#define DISPATCH_QOS_MAINTENANCE            ((dispatch_qos_t)1)
+#define DISPATCH_QOS_BACKGROUND             ((dispatch_qos_t)2)
+#define DISPATCH_QOS_UTILITY                ((dispatch_qos_t)3)
+#define DISPATCH_QOS_DEFAULT                ((dispatch_qos_t)4)
+#define DISPATCH_QOS_USER_INITIATED         ((dispatch_qos_t)5)
+#define DISPATCH_QOS_USER_INTERACTIVE       ((dispatch_qos_t)6)
+#define DISPATCH_QOS_MAX                    DISPATCH_QOS_USER_INTERACTIVE
+#define DISPATCH_QOS_SATURATED              ((dispatch_qos_t)15)
+
+#define DISPATCH_PRIORITY_RELPRI_MASK        ((dispatch_priority_t)0x000000ff)
+#define DISPATCH_PRIORITY_RELPRI_SHIFT       0
+#define DISPATCH_PRIORITY_QOS_MASK           ((dispatch_priority_t)0x0000ff00)
+#define DISPATCH_PRIORITY_QOS_SHIFT          8
+#define DISPATCH_PRIORITY_REQUESTED_MASK     ((dispatch_priority_t)0x0000ffff)
+#define DISPATCH_PRIORITY_OVERRIDE_MASK      ((dispatch_priority_t)0x00ff0000)
+#define DISPATCH_PRIORITY_OVERRIDE_SHIFT     16
+#define DISPATCH_PRIORITY_FLAGS_MASK         ((dispatch_priority_t)0xff000000)
+
+#define DISPATCH_PRIORITY_SATURATED_OVERRIDE DISPATCH_PRIORITY_OVERRIDE_MASK
+
+#define DISPATCH_PRIORITY_FLAG_OVERCOMMIT    ((dispatch_priority_t)0x80000000) // _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
+#define DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE  ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
+#define DISPATCH_PRIORITY_FLAG_MANAGER       ((dispatch_priority_t)0x02000000) // _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
+#define DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK \
+		(DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | \
+		DISPATCH_PRIORITY_FLAG_MANAGER)
+
+// not passed to pthread
+#define DISPATCH_PRIORITY_FLAG_INHERIT       ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG
+#define DISPATCH_PRIORITY_FLAG_ENFORCE       ((dispatch_priority_t)0x10000000) // _PTHREAD_PRIORITY_ENFORCE_FLAG
+#define DISPATCH_PRIORITY_FLAG_ROOTQUEUE     ((dispatch_priority_t)0x20000000) // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG
+
+#pragma mark dispatch_qos
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_qos_from_qos_class(qos_class_t cls)
+{
+	switch ((unsigned int)cls) {
+	case QOS_CLASS_USER_INTERACTIVE: return DISPATCH_QOS_USER_INTERACTIVE;
+	case QOS_CLASS_USER_INITIATED:   return DISPATCH_QOS_USER_INITIATED;
+	case QOS_CLASS_DEFAULT:          return DISPATCH_QOS_DEFAULT;
+	case QOS_CLASS_UTILITY:          return DISPATCH_QOS_UTILITY;
+	case QOS_CLASS_BACKGROUND:       return DISPATCH_QOS_BACKGROUND;
+	case QOS_CLASS_MAINTENANCE:      return DISPATCH_QOS_MAINTENANCE;
+	default: return DISPATCH_QOS_UNSPECIFIED;
+	}
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline qos_class_t
+_dispatch_qos_to_qos_class(dispatch_qos_t qos)
+{
+	switch (qos) {
+	case DISPATCH_QOS_USER_INTERACTIVE: return QOS_CLASS_USER_INTERACTIVE;
+	case DISPATCH_QOS_USER_INITIATED:   return QOS_CLASS_USER_INITIATED;
+	case DISPATCH_QOS_DEFAULT:          return QOS_CLASS_DEFAULT;
+	case DISPATCH_QOS_UTILITY:          return QOS_CLASS_UTILITY;
+	case DISPATCH_QOS_BACKGROUND:       return QOS_CLASS_BACKGROUND;
+	case DISPATCH_QOS_MAINTENANCE:      return (qos_class_t)QOS_CLASS_MAINTENANCE;
+	default: return QOS_CLASS_UNSPECIFIED;
+	}
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_qos_from_queue_priority(long priority)
+{
+	switch (priority) {
+	case DISPATCH_QUEUE_PRIORITY_BACKGROUND:      return DISPATCH_QOS_BACKGROUND;
+	case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: return DISPATCH_QOS_UTILITY;
+	case DISPATCH_QUEUE_PRIORITY_LOW:             return DISPATCH_QOS_UTILITY;
+	case DISPATCH_QUEUE_PRIORITY_DEFAULT:         return DISPATCH_QOS_DEFAULT;
+	case DISPATCH_QUEUE_PRIORITY_HIGH:            return DISPATCH_QOS_USER_INITIATED;
+	default: return _dispatch_qos_from_qos_class((qos_class_t)priority);
+	}
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_qos_from_pp(pthread_priority_t pp)
+{
+	pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+	pp >>= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT;
+	return (dispatch_qos_t)__builtin_ffs((int)pp);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_qos_to_pp(dispatch_qos_t qos)
+{
+	pthread_priority_t pp;
+	pp = 1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT);
+	return pp | _PTHREAD_PRIORITY_PRIORITY_MASK;
+}
+
+// including maintenance
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_qos_is_background(dispatch_qos_t qos)
+{
+	return qos && qos <= DISPATCH_QOS_BACKGROUND;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_qos_greater_than_pp(dispatch_qos_t qos, pthread_priority_t pp)
+{
+	pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+	return (pp >> ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT - 1)) == 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_qos_less_than_pp(dispatch_qos_t qos, pthread_priority_t pp)
+{
+	pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+	return (pp >> ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)) != 0;
+}
+
+#pragma mark dispatch_priority
+
+#define _dispatch_priority_make(qos, relpri) \
+	(qos ? ((((qos) << DISPATCH_PRIORITY_QOS_SHIFT) & DISPATCH_PRIORITY_QOS_MASK) | \
+	 ((dispatch_priority_t)(relpri - 1) & DISPATCH_PRIORITY_RELPRI_MASK)) : 0)
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_priority_t
+_dispatch_priority_with_override_qos(dispatch_priority_t pri,
+		dispatch_qos_t oqos)
+{
+	pri &= ~DISPATCH_PRIORITY_OVERRIDE_MASK;
+	pri |= oqos << DISPATCH_PRIORITY_OVERRIDE_SHIFT;
+	return pri;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline int
+_dispatch_priority_relpri(dispatch_priority_t dbp)
+{
+	if (dbp & DISPATCH_PRIORITY_QOS_MASK) {
+		return (int8_t)(dbp & DISPATCH_PRIORITY_RELPRI_MASK) + 1;
+	}
+	return 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_priority_qos(dispatch_priority_t dbp)
+{
+	dbp &= DISPATCH_PRIORITY_QOS_MASK;
+	return dbp >> DISPATCH_PRIORITY_QOS_SHIFT;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_qos_t
+_dispatch_priority_override_qos(dispatch_priority_t dbp)
+{
+	dbp &= DISPATCH_PRIORITY_OVERRIDE_MASK;
+	return dbp >> DISPATCH_PRIORITY_OVERRIDE_SHIFT;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_priority_t
+_dispatch_priority_from_pp_impl(pthread_priority_t pp, bool keep_flags)
+{
+	dispatch_assert(!(pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG));
+
+	dispatch_priority_t dbp;
+	if (keep_flags) {
+		dbp = pp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK |
+				DISPATCH_PRIORITY_RELPRI_MASK);
+	} else {
+		dbp = pp & DISPATCH_PRIORITY_RELPRI_MASK;
+	}
+
+	dbp |= _dispatch_qos_from_pp(pp) << DISPATCH_PRIORITY_QOS_SHIFT;
+	return dbp;
+}
+#define _dispatch_priority_from_pp(pp) \
+		_dispatch_priority_from_pp_impl(pp, true)
+#define _dispatch_priority_from_pp_strip_flags(pp) \
+		_dispatch_priority_from_pp_impl(pp, false)
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_priority_to_pp_impl(dispatch_priority_t dbp, bool keep_flags)
+{
+	pthread_priority_t pp;
+	if (keep_flags) {
+		pp = dbp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK |
+				DISPATCH_PRIORITY_RELPRI_MASK);
+	} else {
+		pp = dbp & DISPATCH_PRIORITY_RELPRI_MASK;
+	}
+	dispatch_qos_t qos = _dispatch_priority_qos(dbp);
+	if (qos) {
+		pp |= (1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT));
+	}
+	return pp;
+}
+#define _dispatch_priority_to_pp(pp) \
+		_dispatch_priority_to_pp_impl(pp, true)
+#define _dispatch_priority_to_pp_strip_flags(pp) \
+		_dispatch_priority_to_pp_impl(pp, false)
+
+#endif // __DISPATCH_SHIMS_PRIORITY__
diff --git a/src/shims/time.h b/src/shims/time.h
index 7b29771..f05795a 100644
--- a/src/shims/time.h
+++ b/src/shims/time.h
@@ -40,7 +40,11 @@
 }
 #endif
 
-uint64_t _dispatch_get_nanoseconds(void);
+typedef enum {
+	DISPATCH_CLOCK_WALL,
+	DISPATCH_CLOCK_MACH,
+#define DISPATCH_CLOCK_COUNT  (DISPATCH_CLOCK_MACH + 1)
+} dispatch_clock_t;
 
 #if defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME
 // x86 currently implements mach time in nanoseconds
@@ -73,14 +77,14 @@
 	_dispatch_host_time_data_s *const data = &_dispatch_host_time_data;
 	dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init);
 
-	if (!machtime || slowpath(data->ratio_1_to_1)) {
+	if (unlikely(!machtime || data->ratio_1_to_1)) {
 		return machtime;
 	}
 	if (machtime >= INT64_MAX) {
 		return INT64_MAX;
 	}
-	long double big_tmp = ((long double)machtime * data->frac) + .5;
-	if (slowpath(big_tmp >= INT64_MAX)) {
+	long double big_tmp = ((long double)machtime * data->frac) + .5L;
+	if (unlikely(big_tmp >= INT64_MAX)) {
 		return INT64_MAX;
 	}
 	return (uint64_t)big_tmp;
@@ -92,50 +96,120 @@
 	_dispatch_host_time_data_s *const data = &_dispatch_host_time_data;
 	dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init);
 
-	if (!nsec || slowpath(data->ratio_1_to_1)) {
+	if (unlikely(!nsec || data->ratio_1_to_1)) {
 		return nsec;
 	}
 	if (nsec >= INT64_MAX) {
 		return INT64_MAX;
 	}
-	long double big_tmp = ((long double)nsec / data->frac) + .5;
-	if (slowpath(big_tmp >= INT64_MAX)) {
+	long double big_tmp = ((long double)nsec / data->frac) + .5L;
+	if (unlikely(big_tmp >= INT64_MAX)) {
 		return INT64_MAX;
 	}
 	return (uint64_t)big_tmp;
 }
 #endif
 
+/* XXXRW: Some kind of overflow detection needed? */
+#define _dispatch_timespec_to_nano(ts) \
+		((uint64_t)(ts).tv_sec * NSEC_PER_SEC + (uint64_t)(ts).tv_nsec)
+#define _dispatch_timeval_to_nano(tv) \
+		((uint64_t)(tv).tv_sec * NSEC_PER_SEC + \
+				(uint64_t)(tv).tv_usec * NSEC_PER_USEC)
+
+static inline uint64_t
+_dispatch_get_nanoseconds(void)
+{
+	dispatch_static_assert(sizeof(NSEC_PER_SEC) == 8);
+	dispatch_static_assert(sizeof(USEC_PER_SEC) == 8);
+
+#if TARGET_OS_MAC && DISPATCH_HOST_SUPPORTS_OSX(101200)
+	return clock_gettime_nsec_np(CLOCK_REALTIME);
+#elif HAVE_DECL_CLOCK_REALTIME
+	struct timespec ts;
+	dispatch_assume_zero(clock_gettime(CLOCK_REALTIME, &ts));
+	return _dispatch_timespec_to_nano(ts);
+#elif TARGET_OS_WIN32
+	// FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC).
+	FILETIME ft;
+	ULARGE_INTEGER li;
+	GetSystemTimeAsFileTime(&ft);
+	li.LowPart = ft.dwLowDateTime;
+	li.HighPart = ft.dwHighDateTime;
+	return li.QuadPart * 100ull;
+#else
+	struct timeval tv;
+	dispatch_assert_zero(gettimeofday(&tv, NULL));
+	return _dispatch_timeval_to_nano(tv);
+#endif
+}
+
 static inline uint64_t
 _dispatch_absolute_time(void)
 {
 #if HAVE_MACH_ABSOLUTE_TIME
 	return mach_absolute_time();
+#elif HAVE_DECL_CLOCK_UPTIME && !defined(__linux__)
+	struct timespec ts;
+	dispatch_assume_zero(clock_gettime(CLOCK_UPTIME, &ts));
+	return _dispatch_timespec_to_nano(ts);
+#elif HAVE_DECL_CLOCK_MONOTONIC
+	struct timespec ts;
+	dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC, &ts));
+	return _dispatch_timespec_to_nano(ts);
 #elif TARGET_OS_WIN32
 	LARGE_INTEGER now;
 	return QueryPerformanceCounter(&now) ? now.QuadPart : 0;
 #else
-	struct timespec ts;
-	int ret;
-
-#if HAVE_DECL_CLOCK_UPTIME
-	ret = clock_gettime(CLOCK_UPTIME, &ts);
-#elif HAVE_DECL_CLOCK_MONOTONIC
-	ret = clock_gettime(CLOCK_MONOTONIC, &ts);
-#else
-#error "clock_gettime: no supported absolute time clock"
+#error platform needs to implement _dispatch_absolute_time()
 #endif
-	(void)dispatch_assume_zero(ret);
-
-	/* XXXRW: Some kind of overflow detection needed? */
-	return (ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec);
-#endif // HAVE_MACH_ABSOLUTE_TIME
 }
 
+DISPATCH_ALWAYS_INLINE
 static inline uint64_t
 _dispatch_approximate_time(void)
 {
+#if HAVE_MACH_APPROXIMATE_TIME
+	return mach_approximate_time();
+#elif HAVE_DECL_CLOCK_UPTIME_FAST && !defined(__linux__)
+	struct timesmec ts;
+	dispatch_assume_zero(clock_gettime(CLOCK_UPTIME_FAST, &ts));
+	return _dispatch_timespec_to_nano(ts);
+#elif defined(__linux__)
+	struct timesmec ts;
+	dispatch_assume_zero(clock_gettime(CLOCK_REALTIME_COARSE, &ts));
+	return _dispatch_timespec_to_nano(ts);
+#else
 	return _dispatch_absolute_time();
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dispatch_time_now(dispatch_clock_t clock)
+{
+	switch (clock) {
+	case DISPATCH_CLOCK_MACH:
+		return _dispatch_absolute_time();
+	case DISPATCH_CLOCK_WALL:
+		return _dispatch_get_nanoseconds();
+	}
+	__builtin_unreachable();
+}
+
+typedef struct {
+	uint64_t nows[DISPATCH_CLOCK_COUNT];
+} dispatch_clock_now_cache_s, *dispatch_clock_now_cache_t;
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dispatch_time_now_cached(dispatch_clock_t clock,
+		dispatch_clock_now_cache_t cache)
+{
+	if (likely(cache->nows[clock])) {
+		return cache->nows[clock];
+	}
+	return cache->nows[clock] = _dispatch_time_now(clock);
 }
 
 #endif // __DISPATCH_SHIMS_TIME__
diff --git a/src/shims/tsd.h b/src/shims/tsd.h
index 2e3ece8..f1f177c 100644
--- a/src/shims/tsd.h
+++ b/src/shims/tsd.h
@@ -59,6 +59,11 @@
 #endif
 
 #if DISPATCH_USE_DIRECT_TSD
+#ifndef __TSD_THREAD_QOS_CLASS
+#define __TSD_THREAD_QOS_CLASS 4
+#endif
+static const unsigned long dispatch_priority_key	= __TSD_THREAD_QOS_CLASS;
+
 // dispatch_queue_key & dispatch_frame_key need to be contiguous
 // in that order, and queue_key to be an even number
 static const unsigned long dispatch_queue_key		= __PTK_LIBDISPATCH_KEY0;
@@ -67,21 +72,12 @@
 static const unsigned long dispatch_context_key		= __PTK_LIBDISPATCH_KEY3;
 static const unsigned long dispatch_pthread_root_queue_observer_hooks_key =
 		__PTK_LIBDISPATCH_KEY4;
-static const unsigned long dispatch_defaultpriority_key =__PTK_LIBDISPATCH_KEY5;
+static const unsigned long dispatch_basepri_key     = __PTK_LIBDISPATCH_KEY5;
 #if DISPATCH_INTROSPECTION
 static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6;
 #elif DISPATCH_PERF_MON
 static const unsigned long dispatch_bcounter_key	= __PTK_LIBDISPATCH_KEY6;
 #endif
-static const unsigned long dispatch_sema4_key		= __PTK_LIBDISPATCH_KEY7;
-
-#ifndef __TSD_THREAD_QOS_CLASS
-#define __TSD_THREAD_QOS_CLASS 4
-#endif
-#ifndef __TSD_THREAD_VOUCHER
-#define __TSD_THREAD_VOUCHER 6
-#endif
-static const unsigned long dispatch_priority_key	= __TSD_THREAD_QOS_CLASS;
 static const unsigned long dispatch_voucher_key		= __PTK_LIBDISPATCH_KEY8;
 static const unsigned long dispatch_deferred_items_key = __PTK_LIBDISPATCH_KEY9;
 
@@ -108,15 +104,12 @@
 	void *dispatch_cache_key;
 	void *dispatch_context_key;
 	void *dispatch_pthread_root_queue_observer_hooks_key;
-	void *dispatch_defaultpriority_key;
+	void *dispatch_basepri_key;
 #if DISPATCH_INTROSPECTION
 	void *dispatch_introspection_key;
 #elif DISPATCH_PERF_MON
 	void *dispatch_bcounter_key;
 #endif
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-	void *dispatch_sema4_key;
-#endif
 	void *dispatch_priority_key;
 	void *dispatch_voucher_key;
 	void *dispatch_deferred_items_key;
@@ -160,19 +153,18 @@
 	  _dispatch_thread_setspecific(k2,(p)[1]) )
 
 #else
+extern pthread_key_t dispatch_priority_key;
 extern pthread_key_t dispatch_queue_key;
 extern pthread_key_t dispatch_frame_key;
 extern pthread_key_t dispatch_cache_key;
 extern pthread_key_t dispatch_context_key;
 extern pthread_key_t dispatch_pthread_root_queue_observer_hooks_key;
-extern pthread_key_t dispatch_defaultpriority_key;
+extern pthread_key_t dispatch_basepri_key;
 #if DISPATCH_INTROSPECTION
 extern pthread_key_t dispatch_introspection_key;
 #elif DISPATCH_PERF_MON
 extern pthread_key_t dispatch_bcounter_key;
 #endif
-extern pthread_key_t dispatch_sema4_key;
-extern pthread_key_t dispatch_priority_key;
 extern pthread_key_t dispatch_voucher_key;
 extern pthread_key_t dispatch_deferred_items_key;
 
diff --git a/src/shims/yield.h b/src/shims/yield.h
index 1850aee..67f8679 100644
--- a/src/shims/yield.h
+++ b/src/shims/yield.h
@@ -31,31 +31,40 @@
 #pragma mark _dispatch_wait_until
 
 #if DISPATCH_HW_CONFIG_UP
-#define _dispatch_wait_until(c) do { \
+#define _dispatch_wait_until(c) ({ \
+		typeof(c) _c; \
 		int _spins = 0; \
-		while (!fastpath(c)) { \
+		for (;;) { \
+			if (likely(_c = (c))) break; \
 			_spins++; \
 			_dispatch_preemption_yield(_spins); \
-		} } while (0)
+		} \
+		_c; })
 #elif TARGET_OS_EMBEDDED
 // <rdar://problem/15440575>
 #ifndef DISPATCH_WAIT_SPINS
 #define DISPATCH_WAIT_SPINS 1024
 #endif
-#define _dispatch_wait_until(c) do { \
+#define _dispatch_wait_until(c) ({ \
+		typeof(c) _c; \
 		int _spins = -(DISPATCH_WAIT_SPINS); \
-		while (!fastpath(c)) { \
+		for (;;) { \
+			if (likely(_c = (c))) break; \
 			if (slowpath(_spins++ >= 0)) { \
 				_dispatch_preemption_yield(_spins); \
 			} else { \
 				dispatch_hardware_pause(); \
 			} \
-		} } while (0)
+		} \
+		_c; })
 #else
-#define _dispatch_wait_until(c) do { \
-		while (!fastpath(c)) { \
+#define _dispatch_wait_until(c) ({ \
+		typeof(c) _c; \
+		for (;;) { \
+			if (likely(_c = (c))) break; \
 			dispatch_hardware_pause(); \
-		} } while (0)
+		} \
+		_c; })
 #endif
 
 #pragma mark -
diff --git a/src/source.c b/src/source.c
index 7537f32..f558d8d 100644
--- a/src/source.c
+++ b/src/source.c
@@ -19,216 +19,54 @@
  */
 
 #include "internal.h"
-#if HAVE_MACH
-#include "protocol.h"
-#include "protocolServer.h"
-#endif
-#include <sys/mount.h>
-
-#define DKEV_DISPOSE_IMMEDIATE_DELETE 0x1
-#define DKEV_UNREGISTER_DISCONNECTED 0x2
-#define DKEV_UNREGISTER_REPLY_REMOVE 0x4
-#define DKEV_UNREGISTER_WAKEUP 0x8
 
 static void _dispatch_source_handler_free(dispatch_source_t ds, long kind);
-static void _dispatch_source_merge_kevent(dispatch_source_t ds,
-		const _dispatch_kevent_qos_s *ke);
-static bool _dispatch_kevent_register(dispatch_kevent_t *dkp,
-		pthread_priority_t pp, uint32_t *flgp);
-static long _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg,
-		unsigned int options);
-static long _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags,
-		uint32_t del_flags);
-static void _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke);
-static void _dispatch_kevent_merge(_dispatch_kevent_qos_s *ke);
-static void _dispatch_timers_kevent(_dispatch_kevent_qos_s *ke);
-static void _dispatch_timers_unregister(dispatch_source_t ds,
-		dispatch_kevent_t dk);
-static void _dispatch_timers_update(dispatch_source_t ds);
-static void _dispatch_timer_aggregates_check(void);
-static void _dispatch_timer_aggregates_register(dispatch_source_t ds);
-static void _dispatch_timer_aggregates_update(dispatch_source_t ds,
-		unsigned int tidx);
-static void _dispatch_timer_aggregates_unregister(dispatch_source_t ds,
-		unsigned int tidx);
+static void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval);
+
+#define DISPATCH_TIMERS_RETAIN_ON_DISARM  1u
+static void _dispatch_timers_update(dispatch_unote_t du, uint32_t flags);
+static void _dispatch_timers_unregister(dispatch_timer_source_refs_t dt);
+
+static void _dispatch_source_timer_configure(dispatch_source_t ds);
 static inline unsigned long _dispatch_source_timer_data(
-		dispatch_source_refs_t dr, unsigned long prev);
-static void _dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke);
-static long _dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke);
-static void _dispatch_memorypressure_init(void);
-#if HAVE_MACH
-static void _dispatch_mach_host_calendar_change_register(void);
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-static void _dispatch_mach_recv_msg_buf_init(void);
-static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk,
-		uint32_t new_flags, uint32_t del_flags);
-#endif
-static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk,
-		uint32_t new_flags, uint32_t del_flags);
-static void _dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke);
-static mach_msg_size_t _dispatch_kevent_mach_msg_size(
-		_dispatch_kevent_qos_s *ke);
-#else
-static inline void _dispatch_mach_host_calendar_change_register(void) {}
-static inline void _dispatch_mach_recv_msg_buf_init(void) {}
-#endif
-static const char * _evfiltstr(short filt);
-#if DISPATCH_DEBUG
-static void dispatch_kevent_debug(const char *verb,
-		const _dispatch_kevent_qos_s *kev, int i, int n,
-		const char *function, unsigned int line);
-static void _dispatch_kevent_debugger(void *context);
-#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \
-	dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q)
-#else
-static inline void
-dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev,
-		int i, int n, const char *function, unsigned int line)
-{
-	(void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line;
-}
-#define DISPATCH_ASSERT_ON_MANAGER_QUEUE()
-#endif
-#define _dispatch_kevent_debug(verb, _kev) \
-		dispatch_kevent_debug(verb, _kev, 0, 1, __FUNCTION__, __LINE__)
-#define _dispatch_kevent_debug_n(verb, _kev, i, n) \
-		dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__)
-#ifndef DISPATCH_MGR_QUEUE_DEBUG
-#define DISPATCH_MGR_QUEUE_DEBUG 0
-#endif
-#if DISPATCH_MGR_QUEUE_DEBUG
-#define _dispatch_kevent_mgr_debug _dispatch_kevent_debug
-#else
-static inline void
-_dispatch_kevent_mgr_debug(_dispatch_kevent_qos_s* kev DISPATCH_UNUSED) {}
-#endif
+		dispatch_source_t ds, dispatch_unote_t du);
 
 #pragma mark -
 #pragma mark dispatch_source_t
 
 dispatch_source_t
-dispatch_source_create(dispatch_source_type_t type, uintptr_t handle,
+dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle,
 		unsigned long mask, dispatch_queue_t dq)
 {
+	dispatch_source_refs_t dr;
+	dispatch_source_t ds;
+
 	// ensure _dispatch_evfilt_machport_direct_enabled is initialized
 	_dispatch_root_queues_init();
-	const _dispatch_kevent_qos_s *proto_kev = &type->ke;
-	dispatch_source_t ds;
-	dispatch_kevent_t dk;
 
-	// input validation
-	if (type == NULL || (mask & ~type->mask)) {
+	dr = dux_create(dst, handle, mask)._dr;
+	if (unlikely(!dr)) {
 		return DISPATCH_BAD_INPUT;
 	}
-	if (type->mask && !mask) {
-		// expect a non-zero mask when the type declares one ... except
-		switch (type->ke.filter) {
-		case DISPATCH_EVFILT_TIMER:
-			break; // timers don't need masks
-#if DISPATCH_USE_VM_PRESSURE
-		case EVFILT_VM:
-			break; // type->init forces the only acceptable mask
-#endif
-		case DISPATCH_EVFILT_MACH_NOTIFICATION:
-			break; // type->init handles zero mask as a legacy case
-		default:
-			// otherwise reject as invalid input
-			return DISPATCH_BAD_INPUT;
-		}
-	}
-
-	switch (type->ke.filter) {
-	case EVFILT_SIGNAL:
-		if (handle >= NSIG) {
-			return DISPATCH_BAD_INPUT;
-		}
-		break;
-	case EVFILT_FS:
-#if DISPATCH_USE_VM_PRESSURE
-	case EVFILT_VM:
-#endif
-#if DISPATCH_USE_MEMORYSTATUS
-	case EVFILT_MEMORYSTATUS:
-#endif
-	case DISPATCH_EVFILT_CUSTOM_ADD:
-	case DISPATCH_EVFILT_CUSTOM_OR:
-		if (handle) {
-			return DISPATCH_BAD_INPUT;
-		}
-		break;
-	case DISPATCH_EVFILT_TIMER:
-		if ((handle == 0) != (type->ke.ident == 0)) {
-			return DISPATCH_BAD_INPUT;
-		}
-		break;
-	default:
-		break;
-	}
 
 	ds = _dispatch_alloc(DISPATCH_VTABLE(source),
 			sizeof(struct dispatch_source_s));
 	// Initialize as a queue first, then override some settings below.
-	_dispatch_queue_init(ds->_as_dq, DQF_NONE, 1, true);
+	_dispatch_queue_init(ds->_as_dq, DQF_LEGACY, 1, true);
 	ds->dq_label = "source";
 	ds->do_ref_cnt++; // the reference the manager queue holds
-
-	switch (type->ke.filter) {
-	case DISPATCH_EVFILT_CUSTOM_OR:
-		dk = DISPATCH_KEV_CUSTOM_OR;
-		break;
-	case DISPATCH_EVFILT_CUSTOM_ADD:
-		dk = DISPATCH_KEV_CUSTOM_ADD;
-		break;
-	default:
-		dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s));
-		dk->dk_kevent = *proto_kev;
-		dk->dk_kevent.ident = handle;
-		dk->dk_kevent.flags |= EV_ADD|EV_ENABLE;
-		dk->dk_kevent.fflags |= (uint32_t)mask;
-		dk->dk_kevent.udata = (_dispatch_kevent_qos_udata_t)dk;
-		TAILQ_INIT(&dk->dk_sources);
-
-		ds->ds_pending_data_mask = dk->dk_kevent.fflags;
-		ds->ds_ident_hack = (uintptr_t)dk->dk_kevent.ident;
-		if (EV_UDATA_SPECIFIC & proto_kev->flags) {
-			dk->dk_kevent.flags |= EV_DISPATCH;
-			ds->ds_is_direct_kevent = true;
-			ds->ds_needs_rearm = true;
-		}
-		break;
-	}
-	ds->ds_dkev = dk;
-
-	if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) {
-		ds->ds_needs_rearm = true;
-	} else if (!(EV_CLEAR & proto_kev->flags)) {
-		// we cheat and use EV_CLEAR to mean a "flag thingy"
-		ds->ds_is_adder = true;
-	}
-	// Some sources require special processing
-	if (type->init != NULL) {
-		type->init(ds, type, handle, mask, dq);
-	}
-	dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder));
-	if (!ds->ds_is_custom_source && (dk->dk_kevent.flags & EV_VANISHED)) {
-		// see _dispatch_source_merge_kevent
-		dispatch_assert(!(dk->dk_kevent.flags & EV_ONESHOT));
-		dispatch_assert(dk->dk_kevent.flags & EV_DISPATCH);
-		dispatch_assert(dk->dk_kevent.flags & EV_UDATA_SPECIFIC);
-	}
-
-	if (fastpath(!ds->ds_refs)) {
-		ds->ds_refs = _dispatch_calloc(1ul,
-				sizeof(struct dispatch_source_refs_s));
-	}
-	ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds);
+	ds->ds_refs = dr;
+	dr->du_owner_wref = _dispatch_ptr2wref(ds);
 
 	if (slowpath(!dq)) {
-		dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true);
+		dq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true);
 	} else {
-		_dispatch_retain(dq);
+		_dispatch_retain((dispatch_queue_t _Nonnull)dq);
 	}
 	ds->do_targetq = dq;
+	if (dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_INTERVAL)) {
+		_dispatch_source_set_interval(ds, handle);
+	}
 	_dispatch_object_debug(ds, "%s", __func__);
 	return ds;
 }
@@ -240,13 +78,19 @@
 	_dispatch_source_handler_free(ds, DS_REGISTN_HANDLER);
 	_dispatch_source_handler_free(ds, DS_EVENT_HANDLER);
 	_dispatch_source_handler_free(ds, DS_CANCEL_HANDLER);
-	free(ds->ds_refs);
+	_dispatch_unote_dispose(ds->ds_refs);
+	ds->ds_refs = NULL;
 	_dispatch_queue_destroy(ds->_as_dq);
 }
 
 void
 _dispatch_source_xref_dispose(dispatch_source_t ds)
 {
+	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+	if (unlikely(!(dqf & (DQF_LEGACY|DSF_CANCELED)))) {
+		DISPATCH_CLIENT_CRASH(ds, "Release of a source that has not been "
+				"cancelled, but has a mandatory cancel handler");
+	}
 	dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH);
 }
 
@@ -259,78 +103,79 @@
 unsigned long
 dispatch_source_get_mask(dispatch_source_t ds)
 {
-	unsigned long mask = ds->ds_pending_data_mask;
-	if (ds->ds_vmpressure_override) {
-		mask = NOTE_VM_PRESSURE;
+	dispatch_source_refs_t dr = ds->ds_refs;
+	if (ds->dq_atomic_flags & DSF_CANCELED) {
+		return 0;
+	}
+#if DISPATCH_USE_MEMORYSTATUS
+	if (dr->du_vmpressure_override) {
+		return NOTE_VM_PRESSURE;
 	}
 #if TARGET_IPHONE_SIMULATOR
-	else if (ds->ds_memorypressure_override) {
-		mask = NOTE_MEMORYSTATUS_PRESSURE_WARN;
+	if (dr->du_memorypressure_override) {
+		return NOTE_MEMORYSTATUS_PRESSURE_WARN;
 	}
 #endif
-	return mask;
+#endif // DISPATCH_USE_MEMORYSTATUS
+	return dr->du_fflags;
 }
 
 uintptr_t
 dispatch_source_get_handle(dispatch_source_t ds)
 {
-	unsigned int handle = (unsigned int)ds->ds_ident_hack;
+	dispatch_source_refs_t dr = ds->ds_refs;
 #if TARGET_IPHONE_SIMULATOR
-	if (ds->ds_memorypressure_override) {
-		handle = 0;
+	if (dr->du_memorypressure_override) {
+		return 0;
 	}
 #endif
-	return handle;
+	return dr->du_ident;
 }
 
 unsigned long
 dispatch_source_get_data(dispatch_source_t ds)
 {
-	unsigned long data = ds->ds_data;
-	if (ds->ds_vmpressure_override) {
-		data = NOTE_VM_PRESSURE;
+#if DISPATCH_USE_MEMORYSTATUS
+	dispatch_source_refs_t dr = ds->ds_refs;
+	if (dr->du_vmpressure_override) {
+		return NOTE_VM_PRESSURE;
 	}
 #if TARGET_IPHONE_SIMULATOR
-	else if (ds->ds_memorypressure_override) {
-		data = NOTE_MEMORYSTATUS_PRESSURE_WARN;
+	if (dr->du_memorypressure_override) {
+		return NOTE_MEMORYSTATUS_PRESSURE_WARN;
 	}
 #endif
-	return data;
+#endif // DISPATCH_USE_MEMORYSTATUS
+	return ds->ds_data;
 }
 
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_source_merge_data2(dispatch_source_t ds,
-		pthread_priority_t pp, unsigned long val)
+DISPATCH_NOINLINE
+void
+_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp,
+		unsigned long val)
 {
-	_dispatch_kevent_qos_s kev = {
-		.fflags = (typeof(kev.fflags))val,
-		.data = (typeof(kev.data))val,
-#if DISPATCH_USE_KEVENT_QOS
-		.qos = (_dispatch_kevent_priority_t)pp,
-#endif
-	};
-#if !DISPATCH_USE_KEVENT_QOS
-	(void)pp;
-#endif
+	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+	int filter = ds->ds_refs->du_filter;
 
-	dispatch_assert(ds->ds_dkev == DISPATCH_KEV_CUSTOM_OR ||
-			ds->ds_dkev == DISPATCH_KEV_CUSTOM_ADD);
-	_dispatch_kevent_debug("synthetic data", &kev);
-	_dispatch_source_merge_kevent(ds, &kev);
+	if (unlikely(dqf & (DSF_CANCELED | DSF_DELETED))) {
+		return;
+	}
+
+	if (filter == DISPATCH_EVFILT_CUSTOM_OR) {
+		os_atomic_or2o(ds, ds_pending_data, val, relaxed);
+	} else if (filter == DISPATCH_EVFILT_CUSTOM_ADD) {
+		os_atomic_add2o(ds, ds_pending_data, val, relaxed);
+	} else {
+		DISPATCH_CLIENT_CRASH(filter, "Invalid source type");
+	}
+
+	dx_wakeup(ds, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_FLUSH);
 }
 
 void
 dispatch_source_merge_data(dispatch_source_t ds, unsigned long val)
 {
-	_dispatch_source_merge_data2(ds, 0, val);
-}
-
-void
-_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp,
-		unsigned long val)
-{
-	_dispatch_source_merge_data2(ds, pp, val);
+	_dispatch_source_merge_data(ds, 0, val);
 }
 
 #pragma mark -
@@ -450,6 +295,10 @@
 		_dispatch_source_handler_replace(ds, kind, dc);
 		return dx_vtable(ds)->do_resume(ds, false);
 	}
+	if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) {
+		DISPATCH_CLIENT_CRASH(kind, "Cannot change a handler of this source "
+				"after it has been activated");
+	}
 	_dispatch_ktrace1(DISPATCH_PERF_post_activate_mutation, ds);
 	if (kind == DS_REGISTN_HANDLER) {
 		_dispatch_bug_deprecated("Setting registration handler after "
@@ -480,27 +329,40 @@
 	_dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc);
 }
 
-void
-_dispatch_source_set_event_handler_continuation(dispatch_source_t ds,
-		dispatch_continuation_t dc)
-{
-	_dispatch_trace_continuation_push(ds->_as_dq, dc);
-	_dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc);
-}
-
 #ifdef __BLOCKS__
-void
-dispatch_source_set_cancel_handler(dispatch_source_t ds,
+DISPATCH_NOINLINE
+static void
+_dispatch_source_set_cancel_handler(dispatch_source_t ds,
 		dispatch_block_t handler)
 {
 	dispatch_continuation_t dc;
 	dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true);
 	_dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc);
 }
-#endif /* __BLOCKS__ */
 
 void
-dispatch_source_set_cancel_handler_f(dispatch_source_t ds,
+dispatch_source_set_cancel_handler(dispatch_source_t ds,
+		dispatch_block_t handler)
+{
+	if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) {
+		DISPATCH_CLIENT_CRASH(0, "Cannot set a non mandatory handler on "
+				"this source");
+	}
+	return _dispatch_source_set_cancel_handler(ds, handler);
+}
+
+void
+dispatch_source_set_mandatory_cancel_handler(dispatch_source_t ds,
+		dispatch_block_t handler)
+{
+	_dispatch_queue_atomic_flags_clear(ds->_as_dq, DQF_LEGACY);
+	return _dispatch_source_set_cancel_handler(ds, handler);
+}
+#endif /* __BLOCKS__ */
+
+DISPATCH_NOINLINE
+static void
+_dispatch_source_set_cancel_handler_f(dispatch_source_t ds,
 		dispatch_function_t handler)
 {
 	dispatch_continuation_t dc;
@@ -508,6 +370,25 @@
 	_dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc);
 }
 
+void
+dispatch_source_set_cancel_handler_f(dispatch_source_t ds,
+		dispatch_function_t handler)
+{
+	if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) {
+		DISPATCH_CLIENT_CRASH(0, "Cannot set a non mandatory handler on "
+				"this source");
+	}
+	return _dispatch_source_set_cancel_handler_f(ds, handler);
+}
+
+void
+dispatch_source_set_mandatory_cancel_handler_f(dispatch_source_t ds,
+		dispatch_function_t handler)
+{
+	_dispatch_queue_atomic_flags_clear(ds->_as_dq, DQF_LEGACY);
+	return _dispatch_source_set_cancel_handler_f(ds, handler);
+}
+
 #ifdef __BLOCKS__
 void
 dispatch_source_set_registration_handler(dispatch_source_t ds,
@@ -555,7 +436,6 @@
 	dispatch_continuation_t dc;
 
 	dc = _dispatch_source_handler_take(ds, DS_CANCEL_HANDLER);
-	ds->ds_pending_data_mask = 0;
 	ds->ds_pending_data = 0;
 	ds->ds_data = 0;
 	_dispatch_source_handler_free(ds, DS_EVENT_HANDLER);
@@ -576,15 +456,17 @@
 _dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq,
 		dispatch_invoke_flags_t flags)
 {
-	unsigned long prev;
-
 	dispatch_source_refs_t dr = ds->ds_refs;
 	dispatch_continuation_t dc = _dispatch_source_get_handler(dr, DS_EVENT_HANDLER);
-	prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed);
-	if (ds->ds_is_level) {
+	unsigned long prev;
+
+	if (dr->du_is_timer && !(dr->du_fflags & DISPATCH_TIMER_AFTER)) {
+		prev = _dispatch_source_timer_data(ds, dr);
+	} else {
+		prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed);
+	}
+	if (dr->du_is_level) {
 		ds->ds_data = ~prev;
-	} else if (ds->ds_is_timer && ds_timer(dr).target && prev) {
-		ds->ds_data = _dispatch_source_timer_data(dr, prev);
 	} else {
 		ds->ds_data = prev;
 	}
@@ -592,81 +474,48 @@
 		return;
 	}
 	_dispatch_continuation_pop(dc, cq, flags);
-	if (ds->ds_is_timer && (ds_timer(dr).flags & DISPATCH_TIMER_AFTER)) {
+	if (dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_AFTER)) {
 		_dispatch_source_handler_free(ds, DS_EVENT_HANDLER);
 		dispatch_release(ds); // dispatch_after sources are one-shot
 	}
 }
 
-static void
-_dispatch_source_kevent_unregister(dispatch_source_t ds)
+void
+_dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options)
 {
 	_dispatch_object_debug(ds, "%s", __func__);
-	uint32_t flags = (uint32_t)ds->ds_pending_data_mask;
-	dispatch_kevent_t dk = ds->ds_dkev;
 	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
-	if (ds->ds_is_custom_source) {
-		ds->ds_dkev = NULL;
-		goto done;
-	}
+	dispatch_source_refs_t dr = ds->ds_refs;
 
-	if (ds->ds_is_direct_kevent &&
-			((dqf & DSF_DELETED) || !(ds->ds_is_installed))) {
-		dk->dk_kevent.flags |= EV_DELETE; // already deleted
-		dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED);
-	}
-	if (dk->dk_kevent.filter == DISPATCH_EVFILT_TIMER) {
-		ds->ds_dkev = NULL;
-		if (ds->ds_is_installed) {
-			_dispatch_timers_unregister(ds, dk);
+	if (dr->du_filter == DISPATCH_EVFILT_TIMER) {
+		if (dr->du_registered) {
+			_dispatch_timers_unregister(ds->ds_timer_refs);
 		}
-	} else if (!ds->ds_is_direct_kevent) {
-		ds->ds_dkev = NULL;
-		dispatch_assert((bool)ds->ds_is_installed);
-		TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list);
-		_dispatch_kevent_unregister(dk, flags, 0);
+		dr->du_ident = DISPATCH_TIMER_IDENT_CANCELED;
 	} else {
-		unsigned int dkev_dispose_options = 0;
-		if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) {
-			dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE;
-		} else if (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) {
-			if (!ds->ds_is_direct_kevent) {
-				dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE;
-			}
+		if (dr->du_needs_rearm && !(dqf & DSF_ARMED)) {
+			options |= DU_UNREGISTER_IMMEDIATE_DELETE;
 		}
-		long r = _dispatch_kevent_unregister(dk, flags, dkev_dispose_options);
-		if (r == EINPROGRESS) {
+		if (!_dispatch_unote_unregister(dr, options)) {
 			_dispatch_debug("kevent-source[%p]: deferred delete kevent[%p]",
-					ds, dk);
+					ds, dr);
 			_dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE);
 			return; // deferred unregistration
-#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
-		} else if (r == ENOENT) {
-			_dispatch_debug("kevent-source[%p]: ENOENT delete kevent[%p]",
-					ds, dk);
-			_dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE);
-			return; // potential concurrent EV_DELETE delivery rdar://22047283
-#endif
-		} else {
-			dispatch_assume_zero(r);
 		}
-		ds->ds_dkev = NULL;
-		_TAILQ_TRASH_ENTRY(ds->ds_refs, dr_list);
 	}
-done:
+
 	dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq,
 			DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER);
 	if (dqf & DSF_CANCEL_WAITER) {
 		_dispatch_wake_by_address(&ds->dq_atomic_flags);
 	}
 	ds->ds_is_installed = true;
-	ds->ds_needs_rearm = false; // re-arm is pointless and bad now
-	_dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dk);
+	_dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr);
 	_dispatch_release(ds); // the retain is done at creation time
 }
 
 DISPATCH_ALWAYS_INLINE
-static bool
+static inline bool
 _dispatch_source_tryarm(dispatch_source_t ds)
 {
 	dispatch_queue_flags_t oqf, nqf;
@@ -680,58 +529,53 @@
 	});
 }
 
-static bool
-_dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags)
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_source_refs_resume(dispatch_source_t ds)
 {
-	switch (ds->ds_dkev->dk_kevent.filter) {
-	case DISPATCH_EVFILT_TIMER:
-		_dispatch_timers_update(ds);
-		_dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
-		_dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds,
-				ds->ds_dkev);
+	dispatch_source_refs_t dr = ds->ds_refs;
+	if (dr->du_filter == DISPATCH_EVFILT_TIMER) {
+		_dispatch_timers_update(dr, 0);
 		return true;
-#if HAVE_MACH
-	case EVFILT_MACHPORT:
-		if ((ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) &&
-				!ds->ds_is_direct_kevent) {
-			new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH
-		}
-		break;
-#endif
 	}
 	if (unlikely(!_dispatch_source_tryarm(ds))) {
 		return false;
 	}
-	if (unlikely(_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0))) {
-		_dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DELETED,
-				DSF_ARMED);
-		return false;
-	}
-	_dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev);
+	_dispatch_unote_resume(dr);
+	_dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, dr);
 	return true;
 }
 
-static void
-_dispatch_source_kevent_register(dispatch_source_t ds, pthread_priority_t pp)
+void
+_dispatch_source_refs_register(dispatch_source_t ds, dispatch_priority_t pri)
 {
-	dispatch_assert_zero((bool)ds->ds_is_installed);
-	switch (ds->ds_dkev->dk_kevent.filter) {
-	case DISPATCH_EVFILT_TIMER:
-		_dispatch_timers_update(ds);
-		_dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
-		_dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev);
+	dispatch_source_refs_t dr = ds->ds_refs;
+
+	dispatch_assert(!ds->ds_is_installed);
+	ds->ds_is_installed = true;
+
+	if (dr->du_filter == DISPATCH_EVFILT_TIMER) {
+		dispatch_priority_t kbp = _dispatch_source_compute_kevent_priority(ds);
+		// aggressively coalesce background/maintenance QoS timers
+		// <rdar://problem/12200216&27342536>
+		if (_dispatch_qos_is_background(_dispatch_priority_qos(kbp))) {
+			if (dr->du_fflags & DISPATCH_TIMER_STRICT) {
+				_dispatch_ktrace1(DISPATCH_PERF_strict_bg_timer, ds);
+			} else {
+				dr->du_fflags |= DISPATCH_TIMER_BACKGROUND;
+				dr->du_ident = _dispatch_source_timer_idx(dr);
+			}
+		}
+		_dispatch_timers_update(dr, 0);
 		return;
 	}
-	uint32_t flags;
-	bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, pp, &flags);
-	TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds->ds_refs, dr_list);
-	ds->ds_is_installed = true;
-	if (do_resume || ds->ds_needs_rearm) {
-		if (unlikely(!_dispatch_source_kevent_resume(ds, flags))) {
-			_dispatch_source_kevent_unregister(ds);
-		}
+
+	if (unlikely(!_dispatch_source_tryarm(ds) ||
+			!_dispatch_unote_register(dr, pri))) {
+		_dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DELETED,
+				DSF_ARMED | DSF_DEFERRED_DELETE);
 	} else {
-		_dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
+		_dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, dr);
 	}
 	_dispatch_object_debug(ds, "%s", __func__);
 }
@@ -747,19 +591,19 @@
 	}
 }
 
-static pthread_priority_t
+dispatch_priority_t
 _dispatch_source_compute_kevent_priority(dispatch_source_t ds)
 {
-	pthread_priority_t p = ds->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
+	dispatch_priority_t p = ds->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK;
 	dispatch_queue_t tq = ds->do_targetq;
-	pthread_priority_t tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
+	dispatch_priority_t tqp = tq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK;
 
 	while (unlikely(tq->do_targetq)) {
 		if (unlikely(tq == &_dispatch_mgr_q)) {
-			return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+			return DISPATCH_PRIORITY_FLAG_MANAGER;
 		}
 		if (unlikely(_dispatch_queue_is_thread_bound(tq))) {
-			// thread bound hierarchies are weird, we need to install
+			// thread-bound hierarchies are weird, we need to install
 			// from the context of the thread this hierarchy is bound to
 			return 0;
 		}
@@ -769,18 +613,18 @@
 			_dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds);
 			return 0;
 		}
-		if (unlikely(!_dispatch_queue_has_immutable_target(tq))) {
+		if (unlikely(_dispatch_queue_is_legacy(tq))) {
 			if (!_dispatch_is_in_root_queues_array(tq->do_targetq)) {
 				// we're not allowed to dereference tq->do_targetq
 				_dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds);
 				return 0;
 			}
 		}
-		if (!(tq->dq_priority & _PTHREAD_PRIORITY_INHERIT_FLAG)) {
+		if (!(tq->dq_priority & DISPATCH_PRIORITY_FLAG_INHERIT)) {
 			if (p < tqp) p = tqp;
 		}
 		tq = tq->do_targetq;
-		tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
+		tqp = tq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK;
 	}
 
 	if (unlikely(!tqp)) {
@@ -794,18 +638,19 @@
 _dispatch_source_finalize_activation(dispatch_source_t ds)
 {
 	dispatch_continuation_t dc;
+	dispatch_source_refs_t dr = ds->ds_refs;
 
-	if (unlikely(ds->ds_is_direct_kevent &&
+	if (unlikely(dr->du_is_direct &&
 			(_dispatch_queue_atomic_flags(ds->_as_dq) & DSF_CANCELED))) {
-		return _dispatch_source_kevent_unregister(ds);
+		return _dispatch_source_refs_unregister(ds, 0);
 	}
 
-	dc = _dispatch_source_get_event_handler(ds->ds_refs);
+	dc = _dispatch_source_get_event_handler(dr);
 	if (dc) {
 		if (_dispatch_object_is_barrier(dc)) {
 			_dispatch_queue_atomic_flags_set(ds->_as_dq, DQF_BARRIER_BIT);
 		}
-		ds->dq_priority = dc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
+		ds->dq_priority = _dispatch_priority_from_pp_strip_flags(dc->dc_priority);
 		if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) {
 			_dispatch_barrier_async_detached_f(ds->_as_dq, ds,
 					_dispatch_source_set_event_handler_context);
@@ -815,9 +660,9 @@
 	// call "super"
 	_dispatch_queue_finalize_activation(ds->_as_dq);
 
-	if (ds->ds_is_direct_kevent && !ds->ds_is_installed) {
-		pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds);
-		if (pp) _dispatch_source_kevent_register(ds, pp);
+	if (dr->du_is_direct && !ds->ds_is_installed) {
+		dispatch_priority_t bp = _dispatch_source_compute_kevent_priority(ds);
+		if (bp) _dispatch_source_refs_register(ds, bp);
 	}
 }
 
@@ -846,17 +691,31 @@
 
 	dispatch_source_refs_t dr = ds->ds_refs;
 	dispatch_queue_t dkq = &_dispatch_mgr_q;
+	dispatch_queue_flags_t dqf;
+	bool prevent_starvation = false;
 
-	if (ds->ds_is_direct_kevent) {
+	if (dr->du_is_direct) {
 		dkq = ds->do_targetq;
 	}
 
+	if (dr->du_is_timer &&
+			os_atomic_load2o(ds, ds_timer_refs->dt_pending_config, relaxed)) {
+		dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+		if (!(dqf & (DSF_CANCELED | DQF_RELEASED))) {
+			// timer has to be configured on the kevent queue
+			if (dq != dkq) {
+				return dkq;
+			}
+			_dispatch_source_timer_configure(ds);
+		}
+	}
+
 	if (!ds->ds_is_installed) {
 		// The source needs to be installed on the kevent queue.
 		if (dq != dkq) {
 			return dkq;
 		}
-		_dispatch_source_kevent_register(ds, _dispatch_get_defaultpriority());
+		_dispatch_source_refs_register(ds, _dispatch_get_basepri());
 	}
 
 	if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) {
@@ -874,18 +733,15 @@
 		_dispatch_source_registration_callout(ds, dq, flags);
 	}
 
-	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
-	bool prevent_starvation = false;
-
-	if ((dqf & DSF_DEFERRED_DELETE) &&
-			((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) {
+	dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+	if ((dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_ARMED)) {
 unregister_event:
 		// DSF_DELETE: Pending source kevent unregistration has been completed
 		// !DSF_ARMED: event was delivered and can safely be unregistered
 		if (dq != dkq) {
 			return dkq;
 		}
-		_dispatch_source_kevent_unregister(ds);
+		_dispatch_source_refs_unregister(ds, DU_UNREGISTER_IMMEDIATE_DELETE);
 		dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
 	}
 
@@ -905,7 +761,7 @@
 			// queue, this would requeue the source and ask for a new overcommit
 			// thread right away.
 			prevent_starvation = dq->do_targetq ||
-					!(dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
+					!(dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT);
 			if (prevent_starvation && ds->ds_pending_data) {
 				retq = ds->do_targetq;
 			}
@@ -921,10 +777,14 @@
 		// kevent queue. After uninstallation, the cancellation handler needs
 		// to be delivered to the target queue.
 		if (!(dqf & DSF_DELETED)) {
-			if (dq != dkq) {
+			if (dr->du_is_timer && !(dqf & DSF_ARMED)) {
+				// timers can cheat if not armed because there's nothing left
+				// to do on the manager queue and unregistration can happen
+				// on the regular target queue
+			} else if (dq != dkq) {
 				return dkq;
 			}
-			_dispatch_source_kevent_unregister(ds);
+			_dispatch_source_refs_unregister(ds, 0);
 			dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
 			if (unlikely(dqf & DSF_DEFERRED_DELETE)) {
 				if (!(dqf & DSF_ARMED)) {
@@ -945,7 +805,8 @@
 		prevent_starvation = false;
 	}
 
-	if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) {
+	if (dr->du_needs_rearm &&
+			!(dqf & (DSF_ARMED|DSF_DELETED|DSF_CANCELED|DQF_RELEASED))) {
 		// The source needs to be rearmed on the kevent queue.
 		if (dq != dkq) {
 			return dkq;
@@ -964,8 +825,7 @@
 			// not a concern and we can rearm right away.
 			return ds->do_targetq;
 		}
-		if (unlikely(!_dispatch_source_kevent_resume(ds, 0))) {
-			dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+		if (unlikely(!_dispatch_source_refs_resume(ds))) {
 			goto unregister_event;
 		}
 	}
@@ -981,7 +841,7 @@
 }
 
 void
-_dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp,
+_dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags)
 {
 	// This function determines whether the source needs to be invoked.
@@ -993,17 +853,21 @@
 	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
 	bool deferred_delete = (dqf & DSF_DEFERRED_DELETE);
 
-	if (ds->ds_is_direct_kevent) {
+	if (dr->du_is_direct) {
 		dkq = DISPATCH_QUEUE_WAKEUP_TARGET;
 	}
 
-	if (!ds->ds_is_installed) {
+	if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && dr->du_is_timer &&
+			os_atomic_load2o(ds, ds_timer_refs->dt_pending_config, relaxed)) {
+		// timer has to be configured on the kevent queue
+		tq = dkq;
+	} else if (!ds->ds_is_installed) {
 		// The source needs to be installed on the kevent queue.
 		tq = dkq;
 	} else if (_dispatch_source_get_registration_handler(dr)) {
 		// The registration handler needs to be delivered to the target queue.
 		tq = DISPATCH_QUEUE_WAKEUP_TARGET;
-	} else if (deferred_delete && ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) {
+	} else if (deferred_delete && !(dqf & DSF_ARMED)) {
 		// Pending source kevent unregistration has been completed
 		// or EV_ONESHOT event can be acknowledged
 		tq = dkq;
@@ -1015,13 +879,21 @@
 		// cancellation handler needs to be delivered to the target queue.
 		// Note: cancellation assumes installation.
 		if (!(dqf & DSF_DELETED)) {
-			tq = dkq;
+			if (dr->du_is_timer && !(dqf & DSF_ARMED)) {
+				// timers can cheat if not armed because there's nothing left
+				// to do on the manager queue and unregistration can happen
+				// on the regular target queue
+				tq = DISPATCH_QUEUE_WAKEUP_TARGET;
+			} else {
+				tq = dkq;
+			}
 		} else if (_dispatch_source_get_event_handler(dr) ||
 				_dispatch_source_get_cancel_handler(dr) ||
 				_dispatch_source_get_registration_handler(dr)) {
 			tq = DISPATCH_QUEUE_WAKEUP_TARGET;
 		}
-	} else if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) {
+	} else if (dr->du_needs_rearm &&
+			!(dqf & (DSF_ARMED|DSF_DELETED|DSF_CANCELED|DQF_RELEASED))) {
 		// The source needs to be rearmed on the kevent queue.
 		tq = dkq;
 	}
@@ -1030,9 +902,9 @@
 	}
 
 	if (tq) {
-		return _dispatch_queue_class_wakeup(ds->_as_dq, pp, flags, tq);
-	} else if (pp) {
-		return _dispatch_queue_class_override_drainer(ds->_as_dq, pp, flags);
+		return _dispatch_queue_class_wakeup(ds->_as_dq, qos, flags, tq);
+	} else if (qos) {
+		return _dispatch_queue_class_override_drainer(ds->_as_dq, qos, flags);
 	} else if (flags & DISPATCH_WAKEUP_CONSUME) {
 		return _dispatch_release_tailcall(ds);
 	}
@@ -1060,9 +932,9 @@
 dispatch_source_cancel_and_wait(dispatch_source_t ds)
 {
 	dispatch_queue_flags_t old_dqf, dqf, new_dqf;
-	pthread_priority_t pp;
+	dispatch_source_refs_t dr = ds->ds_refs;
 
-	if (unlikely(_dispatch_source_get_cancel_handler(ds->ds_refs))) {
+	if (unlikely(_dispatch_source_get_cancel_handler(dr))) {
 		DISPATCH_CLIENT_CRASH(ds, "Source has a cancel handler");
 	}
 
@@ -1074,7 +946,7 @@
 		}
 		if ((old_dqf & DSF_STATE_MASK) == DSF_DELETED) {
 			// just add DSF_CANCELED
-		} else if ((old_dqf & DSF_DEFERRED_DELETE) || !ds->ds_is_direct_kevent){
+		} else if ((old_dqf & DSF_DEFERRED_DELETE) || !dr->du_is_direct) {
 			new_dqf |= DSF_CANCEL_WAITER;
 		}
 	});
@@ -1126,7 +998,7 @@
 		// same thing _dispatch_source_invoke2() does when handling cancellation
 		dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
 		if (!(dqf & (DSF_DEFERRED_DELETE | DSF_DELETED))) {
-			_dispatch_source_kevent_unregister(ds);
+			_dispatch_source_refs_unregister(ds, 0);
 			dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
 			if (likely((dqf & DSF_STATE_MASK) == DSF_DELETED)) {
 				_dispatch_source_cancel_callout(ds, NULL, DISPATCH_INVOKE_NONE);
@@ -1137,9 +1009,10 @@
 		DISPATCH_CLIENT_CRASH(ds, "dispatch_source_cancel_and_wait "
 				"called from a source handler");
 	} else {
+		dispatch_qos_t qos;
 override:
-		pp = _dispatch_get_priority() & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-		if (pp) dx_wakeup(ds, pp, DISPATCH_WAKEUP_OVERRIDING);
+		qos = _dispatch_qos_from_pp(_dispatch_get_priority());
+		if (qos) dx_wakeup(ds, qos, DISPATCH_WAKEUP_OVERRIDING);
 		dispatch_activate(ds);
 	}
 
@@ -1157,46 +1030,44 @@
 	}
 }
 
-static void
-_dispatch_source_merge_kevent(dispatch_source_t ds,
-		const _dispatch_kevent_qos_s *ke)
+void
+_dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data,
+		pthread_priority_t pp)
 {
-	_dispatch_object_debug(ds, "%s", __func__);
-	dispatch_wakeup_flags_t flags = 0;
+	dispatch_source_refs_t dr = du._dr;
+	dispatch_source_t ds = _dispatch_source_from_refs(dr);
+	dispatch_wakeup_flags_t wflags = 0;
 	dispatch_queue_flags_t dqf;
-	pthread_priority_t pp = 0;
 
-	if (ds->ds_needs_rearm || (ke->flags & (EV_DELETE | EV_ONESHOT))) {
+	if (dr->du_needs_rearm || (flags & (EV_DELETE | EV_ONESHOT))) {
 		// once we modify the queue atomic flags below, it will allow concurrent
 		// threads running _dispatch_source_invoke2 to dispose of the source,
-		// so we can't safely borrow the reference we get from the knote udata
+		// so we can't safely borrow the reference we get from the muxnote udata
 		// anymore, and need our own
-		flags = DISPATCH_WAKEUP_CONSUME;
+		wflags = DISPATCH_WAKEUP_CONSUME;
 		_dispatch_retain(ds); // rdar://20382435
 	}
 
-	if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) &&
-			!(ke->flags & EV_DELETE)) {
+	if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) &&
+			!(flags & EV_DELETE)) {
 		dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq,
 				DSF_DEFERRED_DELETE, DSF_ARMED);
-		if (ke->flags & EV_VANISHED) {
-			_dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter),
+		if (flags & EV_VANISHED) {
+			_dispatch_bug_kevent_client("kevent", dr->du_type->dst_kind,
 					"monitored resource vanished before the source "
 					"cancel handler was invoked", 0);
 		}
 		_dispatch_debug("kevent-source[%p]: %s kevent[%p]", ds,
-				(ke->flags & EV_VANISHED) ? "vanished" :
-				"deferred delete oneshot", (void*)ke->udata);
-	} else if ((ke->flags & EV_DELETE) || (ke->flags & EV_ONESHOT)) {
-		dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq,
-				DSF_DELETED, DSF_ARMED);
-		_dispatch_debug("kevent-source[%p]: delete kevent[%p]",
-				ds, (void*)ke->udata);
-		if (ke->flags & EV_DELETE) goto done;
-	} else if (ds->ds_needs_rearm) {
+				(flags & EV_VANISHED) ? "vanished" :
+				"deferred delete oneshot", dr);
+	} else if (flags & (EV_DELETE | EV_ONESHOT)) {
+		_dispatch_source_refs_unregister(ds, DU_UNREGISTER_ALREADY_DELETED);
+		_dispatch_debug("kevent-source[%p]: deleted kevent[%p]", ds, dr);
+		if (flags & EV_DELETE) goto done;
+		dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
+	} else if (dr->du_needs_rearm) {
 		dqf = _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED);
-		_dispatch_debug("kevent-source[%p]: disarmed kevent[%p] ",
-				ds, (void*)ke->udata);
+		_dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr);
 	} else {
 		dqf = _dispatch_queue_atomic_flags(ds->_as_dq);
 	}
@@ -1204,16 +1075,9 @@
 	if (dqf & (DSF_CANCELED | DQF_RELEASED)) {
 		goto done; // rdar://20204025
 	}
-#if HAVE_MACH
-	if (ke->filter == EVFILT_MACHPORT &&
-			dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) {
-		DISPATCH_INTERNAL_CRASH(ke->flags,"Unexpected kevent for mach channel");
-	}
-#endif
 
-	unsigned long data;
-	if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) &&
-			(ke->flags & EV_VANISHED)) {
+	if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) &&
+			(flags & EV_VANISHED)) {
 		// if the resource behind the ident vanished, the event handler can't
 		// do anything useful anymore, so do not try to call it at all
 		//
@@ -1223,497 +1087,54 @@
 		// if we get both bits it was a real EV_VANISHED delivery
 		os_atomic_store2o(ds, ds_pending_data, 0, relaxed);
 #if HAVE_MACH
-	} else if (ke->filter == EVFILT_MACHPORT) {
-		data = DISPATCH_MACH_RECV_MESSAGE;
+	} else if (dr->du_filter == EVFILT_MACHPORT) {
 		os_atomic_store2o(ds, ds_pending_data, data, relaxed);
 #endif
-	} else if (ds->ds_is_level) {
-		// ke->data is signed and "negative available data" makes no sense
-		// zero bytes happens when EV_EOF is set
-		dispatch_assert(ke->data >= 0l);
-		data = ~(unsigned long)ke->data;
+	} else if (dr->du_is_level) {
 		os_atomic_store2o(ds, ds_pending_data, data, relaxed);
-	} else if (ds->ds_is_adder) {
-		data = (unsigned long)ke->data;
+	} else if (dr->du_is_adder) {
 		os_atomic_add2o(ds, ds_pending_data, data, relaxed);
-	} else if (ke->fflags & ds->ds_pending_data_mask) {
-		data = ke->fflags & ds->ds_pending_data_mask;
+	} else if (data) {
 		os_atomic_or2o(ds, ds_pending_data, data, relaxed);
 	}
+	_dispatch_debug("kevent-source[%p]: merged kevent[%p]", ds, dr);
 
 done:
-#if DISPATCH_USE_KEVENT_QOS
-	pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-#endif
-	dx_wakeup(ds, pp, flags | DISPATCH_WAKEUP_FLUSH);
+	_dispatch_object_debug(ds, "%s", __func__);
+	dx_wakeup(ds, _dispatch_qos_from_pp(pp), wflags | DISPATCH_WAKEUP_FLUSH);
 }
 
 #pragma mark -
-#pragma mark dispatch_kevent_t
-
-#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD
-static void _dispatch_kevent_guard(dispatch_kevent_t dk);
-static void _dispatch_kevent_unguard(dispatch_kevent_t dk);
-#else
-static inline void _dispatch_kevent_guard(dispatch_kevent_t dk) { (void)dk; }
-static inline void _dispatch_kevent_unguard(dispatch_kevent_t dk) { (void)dk; }
-#endif
-
-#if !DISPATCH_USE_EV_UDATA_SPECIFIC
-static struct dispatch_kevent_s _dispatch_kevent_data_or = {
-	.dk_kevent = {
-		.filter = DISPATCH_EVFILT_CUSTOM_OR,
-		.flags = EV_CLEAR,
-	},
-	.dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_or.dk_sources),
-};
-static struct dispatch_kevent_s _dispatch_kevent_data_add = {
-	.dk_kevent = {
-		.filter = DISPATCH_EVFILT_CUSTOM_ADD,
-	},
-	.dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources),
-};
-#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC
-
-#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1))
-
-DISPATCH_CACHELINE_ALIGN
-static TAILQ_HEAD(, dispatch_kevent_s) _dispatch_sources[DSL_HASH_SIZE];
-
-static void
-_dispatch_kevent_init()
-{
-	unsigned int i;
-	for (i = 0; i < DSL_HASH_SIZE; i++) {
-		TAILQ_INIT(&_dispatch_sources[i]);
-	}
-
-#if !DISPATCH_USE_EV_UDATA_SPECIFIC
-	TAILQ_INSERT_TAIL(&_dispatch_sources[0],
-			&_dispatch_kevent_data_or, dk_list);
-	TAILQ_INSERT_TAIL(&_dispatch_sources[0],
-			&_dispatch_kevent_data_add, dk_list);
-	_dispatch_kevent_data_or.dk_kevent.udata =
-			(_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_or;
-	_dispatch_kevent_data_add.dk_kevent.udata =
-			(_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_add;
-#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC
-}
-
-static inline uintptr_t
-_dispatch_kevent_hash(uint64_t ident, short filter)
-{
-	uint64_t value;
-#if HAVE_MACH
-	value = (filter == EVFILT_MACHPORT ||
-			filter == DISPATCH_EVFILT_MACH_NOTIFICATION ?
-			MACH_PORT_INDEX(ident) : ident);
-#else
-	value = ident;
-	(void)filter;
-#endif
-	return DSL_HASH((uintptr_t)value);
-}
-
-static dispatch_kevent_t
-_dispatch_kevent_find(uint64_t ident, short filter)
-{
-	uintptr_t hash = _dispatch_kevent_hash(ident, filter);
-	dispatch_kevent_t dki;
-
-	TAILQ_FOREACH(dki, &_dispatch_sources[hash], dk_list) {
-		if (dki->dk_kevent.ident == ident && dki->dk_kevent.filter == filter) {
-			break;
-		}
-	}
-	return dki;
-}
-
-static void
-_dispatch_kevent_insert(dispatch_kevent_t dk)
-{
-	if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) return;
-	_dispatch_kevent_guard(dk);
-	uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident,
-			dk->dk_kevent.filter);
-	TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list);
-}
-
-// Find existing kevents, and merge any new flags if necessary
-static bool
-_dispatch_kevent_register(dispatch_kevent_t *dkp, pthread_priority_t pp,
-		uint32_t *flgp)
-{
-	dispatch_kevent_t dk = NULL, ds_dkev = *dkp;
-	uint32_t new_flags;
-	bool do_resume = false;
-
-	if (!(ds_dkev->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-		dk = _dispatch_kevent_find(ds_dkev->dk_kevent.ident,
-				ds_dkev->dk_kevent.filter);
-	}
-	if (dk) {
-		// If an existing dispatch kevent is found, check to see if new flags
-		// need to be added to the existing kevent
-		new_flags = ~dk->dk_kevent.fflags & ds_dkev->dk_kevent.fflags;
-		dk->dk_kevent.fflags |= ds_dkev->dk_kevent.fflags;
-		free(ds_dkev);
-		*dkp = dk;
-		do_resume = new_flags;
-	} else {
-		dk = ds_dkev;
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-		if (!_dispatch_kevent_workqueue_enabled) {
-			// do nothing
-		} else if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-			dk->dk_kevent.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-		} else {
-			pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK |
-					_PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
-			if (!pp) pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-			_dispatch_assert_is_valid_qos_class(pp);
-			dk->dk_kevent.qos = (_dispatch_kevent_priority_t)pp;
-		}
-#else
-		(void)pp;
-#endif
-		_dispatch_kevent_insert(dk);
-		new_flags = dk->dk_kevent.fflags;
-		do_resume = true;
-	}
-	// Re-register the kevent with the kernel if new flags were added
-	// by the dispatch kevent
-	if (do_resume) {
-		dk->dk_kevent.flags |= EV_ADD;
-	}
-	*flgp = new_flags;
-	return do_resume;
-}
-
-static long
-_dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags,
-		uint32_t del_flags)
-{
-	long r;
-	bool oneshot;
-	if (dk->dk_kevent.flags & EV_DELETE) {
-		return 0;
-	}
-	switch (dk->dk_kevent.filter) {
-	case DISPATCH_EVFILT_TIMER:
-	case DISPATCH_EVFILT_CUSTOM_ADD:
-	case DISPATCH_EVFILT_CUSTOM_OR:
-		// these types not registered with kevent
-		return 0;
-#if HAVE_MACH
-	case DISPATCH_EVFILT_MACH_NOTIFICATION:
-		return _dispatch_kevent_mach_notify_resume(dk, new_flags, del_flags);
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-	case EVFILT_MACHPORT:
-		if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-			return _dispatch_kevent_machport_resume(dk, new_flags, del_flags);
-		}
-		// fall through
-#endif
-#endif // HAVE_MACH
-	default:
-		// oneshot dk may be freed by the time we return from
-		// _dispatch_kq_immediate_update if the event was delivered (and then
-		// unregistered) concurrently.
-		oneshot = (dk->dk_kevent.flags & EV_ONESHOT);
-		r = _dispatch_kq_immediate_update(&dk->dk_kevent);
-		if (r && (dk->dk_kevent.flags & EV_ADD) &&
-				(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-			dk->dk_kevent.flags |= EV_DELETE;
-			dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED);
-		} else if (!oneshot && (dk->dk_kevent.flags & EV_DISPATCH)) {
-			// we can safely skip doing this for ONESHOT events because
-			// the next kq update we will do is _dispatch_kevent_dispose()
-			// which also clears EV_ADD.
-			dk->dk_kevent.flags &= ~(EV_ADD|EV_VANISHED);
-		}
-		return r;
-	}
-	(void)new_flags; (void)del_flags;
-}
-
-static long
-_dispatch_kevent_dispose(dispatch_kevent_t dk, unsigned int options)
-{
-	long r = 0;
-	switch (dk->dk_kevent.filter) {
-	case DISPATCH_EVFILT_TIMER:
-	case DISPATCH_EVFILT_CUSTOM_ADD:
-	case DISPATCH_EVFILT_CUSTOM_OR:
-		if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) {
-			free(dk);
-		} else {
-			// these sources live on statically allocated lists
-		}
-		return r;
-	}
-	if (!(dk->dk_kevent.flags & EV_DELETE)) {
-		dk->dk_kevent.flags |= EV_DELETE;
-		dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED);
-		if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) {
-			dk->dk_kevent.flags |= EV_ENABLE;
-		}
-		switch (dk->dk_kevent.filter) {
-#if HAVE_MACH
-		case DISPATCH_EVFILT_MACH_NOTIFICATION:
-			r = _dispatch_kevent_mach_notify_resume(dk, 0,dk->dk_kevent.fflags);
-			break;
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-		case EVFILT_MACHPORT:
-			if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-				r = _dispatch_kevent_machport_resume(dk,0,dk->dk_kevent.fflags);
-				break;
-			}
-			// fall through
-#endif
-#endif
-		default:
-			if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) {
-				_dispatch_kq_deferred_update(&dk->dk_kevent);
-			} else {
-				r = _dispatch_kq_immediate_update(&dk->dk_kevent);
-			}
-			break;
-		}
-		if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) {
-			dk->dk_kevent.flags &= ~EV_ENABLE;
-		}
-	}
-	if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) {
-		bool deferred_delete = (r == EINPROGRESS);
-#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
-		if (r == ENOENT) deferred_delete = true;
-#endif
-		if (deferred_delete) {
-			// deferred EV_DELETE or concurrent concurrent EV_DELETE delivery
-			dk->dk_kevent.flags &= ~EV_DELETE;
-			dk->dk_kevent.flags |= EV_ENABLE;
-			return r;
-		}
-	} else {
-		uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident,
-				dk->dk_kevent.filter);
-		TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list);
-	}
-	_dispatch_kevent_unguard(dk);
-	free(dk);
-	return r;
-}
-
-static long
-_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg,
-		unsigned int options)
-{
-	dispatch_source_refs_t dri;
-	uint32_t del_flags, fflags = 0;
-	long r = 0;
-
-	if (TAILQ_EMPTY(&dk->dk_sources) ||
-			(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
-		r = _dispatch_kevent_dispose(dk, options);
-	} else {
-		TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) {
-			dispatch_source_t dsi = _dispatch_source_from_refs(dri);
-			uint32_t mask = (uint32_t)dsi->ds_pending_data_mask;
-			fflags |= mask;
-		}
-		del_flags = flg & ~fflags;
-		if (del_flags) {
-			dk->dk_kevent.flags |= EV_ADD;
-			dk->dk_kevent.fflags &= ~del_flags;
-			r = _dispatch_kevent_resume(dk, 0, del_flags);
-		}
-	}
-	return r;
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_kevent_proc_exit(_dispatch_kevent_qos_s *ke)
-{
-	// EVFILT_PROC may fail with ESRCH when the process exists but is a zombie
-	// <rdar://problem/5067725>. As a workaround, we simulate an exit event for
-	// any EVFILT_PROC with an invalid pid <rdar://problem/6626350>.
-	_dispatch_kevent_qos_s fake;
-	fake = *ke;
-	fake.flags &= ~EV_ERROR;
-	fake.flags |= EV_ONESHOT;
-	fake.fflags = NOTE_EXIT;
-	fake.data = 0;
-	_dispatch_kevent_debug("synthetic NOTE_EXIT", ke);
-	_dispatch_kevent_merge(&fake);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_kevent_error(_dispatch_kevent_qos_s *ke)
-{
-	_dispatch_kevent_qos_s *kev = NULL;
-
-	if (ke->flags & EV_DELETE) {
-		if (ke->flags & EV_UDATA_SPECIFIC) {
-			if (ke->data == EINPROGRESS) {
-				// deferred EV_DELETE
-				return;
-			}
-#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS
-			if (ke->data == ENOENT) {
-				// deferred EV_DELETE
-				return;
-			}
-#endif
-		}
-		// for EV_DELETE if the update was deferred we may have reclaimed
-		// our dispatch_kevent_t, and it is unsafe to dereference it now.
-	} else if (ke->udata) {
-		kev = &((dispatch_kevent_t)ke->udata)->dk_kevent;
-		ke->flags |= kev->flags;
-	}
-
-#if HAVE_MACH
-	if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP &&
-			(ke->flags & EV_ADD) && _dispatch_evfilt_machport_direct_enabled &&
-			kev && (kev->fflags & MACH_RCV_MSG)) {
-		DISPATCH_INTERNAL_CRASH(ke->ident,
-				"Missing EVFILT_MACHPORT support for ports");
-	}
-#endif
-
-	if (ke->data) {
-		// log the unexpected error
-		_dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter),
-				!ke->udata ? NULL :
-				ke->flags & EV_DELETE ? "delete" :
-				ke->flags & EV_ADD ? "add" :
-				ke->flags & EV_ENABLE ? "enable" : "monitor",
-				(int)ke->data);
-	}
-}
-
-static void
-_dispatch_kevent_drain(_dispatch_kevent_qos_s *ke)
-{
-#if DISPATCH_DEBUG
-	static dispatch_once_t pred;
-	dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger);
-#endif
-	if (ke->filter == EVFILT_USER) {
-		_dispatch_kevent_mgr_debug(ke);
-		return;
-	}
-	if (slowpath(ke->flags & EV_ERROR)) {
-		if (ke->filter == EVFILT_PROC && ke->data == ESRCH) {
-			_dispatch_debug("kevent[0x%llx]: ESRCH from EVFILT_PROC: "
-					"generating fake NOTE_EXIT", (unsigned long long)ke->udata);
-			return _dispatch_kevent_proc_exit(ke);
-		}
-		_dispatch_debug("kevent[0x%llx]: handling error",
-				(unsigned long long)ke->udata);
-		return _dispatch_kevent_error(ke);
-	}
-	if (ke->filter == EVFILT_TIMER) {
-		_dispatch_debug("kevent[0x%llx]: handling timer",
-				(unsigned long long)ke->udata);
-		return _dispatch_timers_kevent(ke);
-	}
-#if HAVE_MACH
-	if (ke->filter == EVFILT_MACHPORT) {
-		_dispatch_debug("kevent[0x%llx]: handling mach port",
-				(unsigned long long)ke->udata);
-		return _dispatch_mach_kevent_merge(ke);
-	}
-#endif
-	return _dispatch_kevent_merge(ke);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_kevent_merge(_dispatch_kevent_qos_s *ke)
-{
-	dispatch_kevent_t dk = (void*)ke->udata;
-	dispatch_source_refs_t dri, dr_next;
-
-	TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) {
-		_dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke);
-	}
-}
-
-#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD
-static void
-_dispatch_kevent_guard(dispatch_kevent_t dk)
-{
-	guardid_t guard;
-	const unsigned int guard_flags = GUARD_CLOSE;
-	int r, fd_flags = 0;
-	switch (dk->dk_kevent.filter) {
-	case EVFILT_READ:
-	case EVFILT_WRITE:
-	case EVFILT_VNODE:
-		guard = &dk->dk_kevent;
-		r = change_fdguard_np((int)dk->dk_kevent.ident, NULL, 0,
-				&guard, guard_flags, &fd_flags);
-		if (slowpath(r == -1)) {
-			int err = errno;
-			if (err != EPERM) {
-				(void)dispatch_assume_zero(err);
-			}
-			return;
-		}
-		dk->dk_kevent.ext[0] = guard_flags;
-		dk->dk_kevent.ext[1] = fd_flags;
-		break;
-	}
-}
-
-static void
-_dispatch_kevent_unguard(dispatch_kevent_t dk)
-{
-	guardid_t guard;
-	unsigned int guard_flags;
-	int r, fd_flags;
-	switch (dk->dk_kevent.filter) {
-	case EVFILT_READ:
-	case EVFILT_WRITE:
-	case EVFILT_VNODE:
-		guard_flags = (unsigned int)dk->dk_kevent.ext[0];
-		if (!guard_flags) {
-			return;
-		}
-		guard = &dk->dk_kevent;
-		fd_flags = (int)dk->dk_kevent.ext[1];
-		r = change_fdguard_np((int)dk->dk_kevent.ident, &guard,
-				guard_flags, NULL, 0, &fd_flags);
-		if (slowpath(r == -1)) {
-			(void)dispatch_assume_zero(errno);
-			return;
-		}
-		dk->dk_kevent.ext[0] = 0;
-		break;
-	}
-}
-#endif // DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD
-
-#pragma mark -
 #pragma mark dispatch_source_timer
 
 #if DISPATCH_USE_DTRACE
-static dispatch_source_refs_t
+static dispatch_timer_source_refs_t
 		_dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT];
 #define _dispatch_trace_next_timer_set(x, q) \
 		_dispatch_trace_next_timer[(q)] = (x)
 #define _dispatch_trace_next_timer_program(d, q) \
 		_dispatch_trace_timer_program(_dispatch_trace_next_timer[(q)], (d))
-#define _dispatch_trace_next_timer_wake(q) \
-		_dispatch_trace_timer_wake(_dispatch_trace_next_timer[(q)])
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mgr_trace_timers_wakes(void)
+{
+	uint32_t qos;
+
+	if (_dispatch_timers_will_wake) {
+		if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) {
+			for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) {
+				if (_dispatch_timers_will_wake & (1 << qos)) {
+					_dispatch_trace_timer_wake(_dispatch_trace_next_timer[qos]);
+				}
+			}
+		}
+		_dispatch_timers_will_wake = 0;
+	}
+}
 #else
 #define _dispatch_trace_next_timer_set(x, q)
 #define _dispatch_trace_next_timer_program(d, q)
-#define _dispatch_trace_next_timer_wake(q)
+#define _dispatch_mgr_trace_timers_wakes()
 #endif
 
 #define _dispatch_source_timer_telemetry_enabled() false
@@ -1721,118 +1142,57 @@
 DISPATCH_NOINLINE
 static void
 _dispatch_source_timer_telemetry_slow(dispatch_source_t ds,
-		uintptr_t ident, struct dispatch_timer_source_s *values)
+		dispatch_clock_t clock, struct dispatch_timer_source_s *values)
 {
 	if (_dispatch_trace_timer_configure_enabled()) {
-		_dispatch_trace_timer_configure(ds, ident, values);
+		_dispatch_trace_timer_configure(ds, clock, values);
 	}
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_source_timer_telemetry(dispatch_source_t ds, uintptr_t ident,
+_dispatch_source_timer_telemetry(dispatch_source_t ds, dispatch_clock_t clock,
 		struct dispatch_timer_source_s *values)
 {
 	if (_dispatch_trace_timer_configure_enabled() ||
 			_dispatch_source_timer_telemetry_enabled()) {
-		_dispatch_source_timer_telemetry_slow(ds, ident, values);
+		_dispatch_source_timer_telemetry_slow(ds, clock, values);
 		asm(""); // prevent tailcall
 	}
 }
 
-// approx 1 year (60s * 60m * 24h * 365d)
-#define FOREVER_NSEC 31536000000000000ull
-
-DISPATCH_ALWAYS_INLINE
-static inline uint64_t
-_dispatch_source_timer_now(uint64_t nows[], unsigned int tidx)
-{
-	unsigned int tk = DISPATCH_TIMER_KIND(tidx);
-	if (nows && fastpath(nows[tk] != 0)) {
-		return nows[tk];
-	}
-	uint64_t now;
-	switch (tk) {
-	case DISPATCH_TIMER_KIND_MACH:
-		now = _dispatch_absolute_time();
-		break;
-	case DISPATCH_TIMER_KIND_WALL:
-		now = _dispatch_get_nanoseconds();
-		break;
-	}
-	if (nows) {
-		nows[tk] = now;
-	}
-	return now;
-}
-
-static inline unsigned long
-_dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev)
-{
-	// calculate the number of intervals since last fire
-	unsigned long data, missed;
-	uint64_t now;
-	now = _dispatch_source_timer_now(NULL, _dispatch_source_timer_idx(dr));
-	missed = (unsigned long)((now - ds_timer(dr).last_fire) /
-			ds_timer(dr).interval);
-	// correct for missed intervals already delivered last time
-	data = prev - ds_timer(dr).missed + missed;
-	ds_timer(dr).missed = missed;
-	return data;
-}
-
-struct dispatch_set_timer_params {
-	dispatch_source_t ds;
-	uintptr_t ident;
-	struct dispatch_timer_source_s values;
-};
-
-static void
-_dispatch_source_set_timer3(void *context)
-{
-	// Called on the _dispatch_mgr_q
-	struct dispatch_set_timer_params *params = context;
-	dispatch_source_t ds = params->ds;
-	ds->ds_ident_hack = params->ident;
-	ds_timer(ds->ds_refs) = params->values;
-	// Clear any pending data that might have accumulated on
-	// older timer params <rdar://problem/8574886>
-	ds->ds_pending_data = 0;
-	// Re-arm in case we got disarmed because of pending set_timer suspension
-	_dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
-	_dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev);
-	dispatch_resume(ds);
-	// Must happen after resume to avoid getting disarmed due to suspension
-	_dispatch_timers_update(ds);
-	dispatch_release(ds);
-	if (params->values.flags & DISPATCH_TIMER_WALL_CLOCK) {
-		_dispatch_mach_host_calendar_change_register();
-	}
-	free(params);
-}
-
-static void
-_dispatch_source_set_timer2(void *context)
-{
-	// Called on the source queue
-	struct dispatch_set_timer_params *params = context;
-	dispatch_suspend(params->ds);
-	_dispatch_barrier_async_detached_f(&_dispatch_mgr_q, params,
-			_dispatch_source_set_timer3);
-}
-
 DISPATCH_NOINLINE
-static struct dispatch_set_timer_params *
-_dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start,
+static void
+_dispatch_source_timer_configure(dispatch_source_t ds)
+{
+	dispatch_timer_source_refs_t dt = ds->ds_timer_refs;
+	dispatch_timer_config_t dtc;
+
+	dtc = os_atomic_xchg2o(dt, dt_pending_config, NULL, dependency);
+	if (dtc->dtc_clock == DISPATCH_CLOCK_MACH) {
+		dt->du_fflags |= DISPATCH_TIMER_CLOCK_MACH;
+	} else {
+		dt->du_fflags &= ~(uint32_t)DISPATCH_TIMER_CLOCK_MACH;
+	}
+	dt->dt_timer = dtc->dtc_timer;
+	free(dtc);
+	if (ds->ds_is_installed) {
+		// Clear any pending data that might have accumulated on
+		// older timer params <rdar://problem/8574886>
+		os_atomic_store2o(ds, ds_pending_data, 0, relaxed);
+		_dispatch_timers_update(dt, 0);
+	}
+}
+
+static dispatch_timer_config_t
+_dispatch_source_timer_config_create(dispatch_time_t start,
 		uint64_t interval, uint64_t leeway)
 {
-	struct dispatch_set_timer_params *params;
-	params = _dispatch_calloc(1ul, sizeof(struct dispatch_set_timer_params));
-	params->ds = ds;
-	params->values.flags = ds_timer(ds->ds_refs).flags;
-
-	if (interval == 0) {
-		// we use zero internally to mean disabled
+	dispatch_timer_config_t dtc;
+	dtc = _dispatch_calloc(1ul, sizeof(struct dispatch_timer_config_s));
+	if (unlikely(interval == 0)) {
+		_dispatch_bug_deprecated("Setting timer interval to 0, "
+				"do you mean FOREVER?");
 		interval = 1;
 	} else if ((int64_t)interval < 0) {
 		// 6866347 - make sure nanoseconds won't overflow
@@ -1850,7 +1210,7 @@
 	if ((int64_t)start < 0) {
 		// wall clock
 		start = (dispatch_time_t)-((int64_t)start);
-		params->values.flags |= DISPATCH_TIMER_WALL_CLOCK;
+		dtc->dtc_clock = DISPATCH_CLOCK_WALL;
 	} else {
 		// absolute clock
 		interval = _dispatch_time_nano2mach(interval);
@@ -1862,64 +1222,50 @@
 			interval = 1;
 		}
 		leeway = _dispatch_time_nano2mach(leeway);
-		params->values.flags &= ~(unsigned long)DISPATCH_TIMER_WALL_CLOCK;
+		dtc->dtc_clock = DISPATCH_CLOCK_MACH;
 	}
-	params->ident = DISPATCH_TIMER_IDENT(params->values.flags);
-	params->values.target = start;
-	params->values.deadline = (start < UINT64_MAX - leeway) ?
-			start + leeway : UINT64_MAX;
-	params->values.interval = interval;
-	params->values.leeway = (interval == INT64_MAX || leeway < interval / 2) ?
-			leeway : interval / 2;
-	return params;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start,
-		uint64_t interval, uint64_t leeway, bool source_sync)
-{
-	if (slowpath(!ds->ds_is_timer) ||
-			slowpath(ds_timer(ds->ds_refs).flags & DISPATCH_TIMER_INTERVAL)) {
-		DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source");
+	if (interval < INT64_MAX && leeway > interval / 2) {
+		leeway = interval / 2;
 	}
 
-	struct dispatch_set_timer_params *params;
-	params = _dispatch_source_timer_params(ds, start, interval, leeway);
-
-	_dispatch_source_timer_telemetry(ds, params->ident, &params->values);
-	// Suspend the source so that it doesn't fire with pending changes
-	// The use of suspend/resume requires the external retain/release
-	dispatch_retain(ds);
-	if (source_sync) {
-		return _dispatch_barrier_trysync_or_async_f(ds->_as_dq, params,
-				_dispatch_source_set_timer2);
+	dtc->dtc_timer.target = start;
+	dtc->dtc_timer.interval = interval;
+	if (start + leeway < INT64_MAX) {
+		dtc->dtc_timer.deadline = start + leeway;
 	} else {
-		return _dispatch_source_set_timer2(params);
+		dtc->dtc_timer.deadline = INT64_MAX;
 	}
+	return dtc;
 }
 
+DISPATCH_NOINLINE
 void
 dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start,
 		uint64_t interval, uint64_t leeway)
 {
-	_dispatch_source_set_timer(ds, start, interval, leeway, true);
+	dispatch_timer_source_refs_t dt = ds->ds_timer_refs;
+	dispatch_timer_config_t dtc;
+
+	if (unlikely(!dt->du_is_timer || (dt->du_fflags&DISPATCH_TIMER_INTERVAL))) {
+		DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source");
+	}
+
+	dtc = _dispatch_source_timer_config_create(start, interval, leeway);
+	_dispatch_source_timer_telemetry(ds, dtc->dtc_clock, &dtc->dtc_timer);
+	dtc = os_atomic_xchg2o(dt, dt_pending_config, dtc, release);
+	if (dtc) free(dtc);
+	dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH);
 }
 
-void
-_dispatch_source_set_runloop_timer_4CF(dispatch_source_t ds,
-		dispatch_time_t start, uint64_t interval, uint64_t leeway)
-{
-	// Don't serialize through the source queue for CF timers <rdar://13833190>
-	_dispatch_source_set_timer(ds, start, interval, leeway, false);
-}
-
-void
+static void
 _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval)
 {
-	dispatch_source_refs_t dr = ds->ds_refs;
-	#define NSEC_PER_FRAME (NSEC_PER_SEC/60)
-	const bool animation = ds_timer(dr).flags & DISPATCH_INTERVAL_UI_ANIMATION;
+#define NSEC_PER_FRAME (NSEC_PER_SEC/60)
+// approx 1 year (60s * 60m * 24h * 365d)
+#define FOREVER_NSEC 31536000000000000ull
+
+	dispatch_timer_source_refs_t dr = ds->ds_timer_refs;
+	const bool animation = dr->du_fflags & DISPATCH_INTERVAL_UI_ANIMATION;
 	if (fastpath(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME :
 			FOREVER_NSEC/NSEC_PER_MSEC))) {
 		interval *= animation ? NSEC_PER_FRAME : NSEC_PER_MSEC;
@@ -1928,494 +1274,857 @@
 	}
 	interval = _dispatch_time_nano2mach(interval);
 	uint64_t target = _dispatch_absolute_time() + interval;
-	target = (target / interval) * interval;
+	target -= (target % interval);
 	const uint64_t leeway = animation ?
 			_dispatch_time_nano2mach(NSEC_PER_FRAME) : interval / 2;
-	ds_timer(dr).target = target;
-	ds_timer(dr).deadline = target + leeway;
-	ds_timer(dr).interval = interval;
-	ds_timer(dr).leeway = leeway;
-	_dispatch_source_timer_telemetry(ds, ds->ds_ident_hack, &ds_timer(dr));
+	dr->dt_timer.target = target;
+	dr->dt_timer.deadline = target + leeway;
+	dr->dt_timer.interval = interval;
+	_dispatch_source_timer_telemetry(ds, DISPATCH_CLOCK_MACH, &dr->dt_timer);
 }
 
 #pragma mark -
+#pragma mark dispatch_after
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
+		void *ctxt, void *handler, bool block)
+{
+	dispatch_timer_source_refs_t dt;
+	dispatch_source_t ds;
+	uint64_t leeway, delta;
+
+	if (when == DISPATCH_TIME_FOREVER) {
+#if DISPATCH_DEBUG
+		DISPATCH_CLIENT_CRASH(0, "dispatch_after called with 'when' == infinity");
+#endif
+		return;
+	}
+
+	delta = _dispatch_timeout(when);
+	if (delta == 0) {
+		if (block) {
+			return dispatch_async(queue, handler);
+		}
+		return dispatch_async_f(queue, ctxt, handler);
+	}
+	leeway = delta / 10; // <rdar://problem/13447496>
+
+	if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC;
+	if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC;
+
+	// this function can and should be optimized to not use a dispatch source
+	ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue);
+	dt = ds->ds_timer_refs;
+
+	dispatch_continuation_t dc = _dispatch_continuation_alloc();
+	if (block) {
+		_dispatch_continuation_init(dc, ds, handler, 0, 0, 0);
+	} else {
+		_dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0);
+	}
+	// reference `ds` so that it doesn't show up as a leak
+	dc->dc_data = ds;
+	_dispatch_trace_continuation_push(ds->_as_dq, dc);
+	os_atomic_store2o(dt, ds_handler[DS_EVENT_HANDLER], dc, relaxed);
+
+	if ((int64_t)when < 0) {
+		// wall clock
+		when = (dispatch_time_t)-((int64_t)when);
+	} else {
+		// absolute clock
+		dt->du_fflags |= DISPATCH_TIMER_CLOCK_MACH;
+		leeway = _dispatch_time_nano2mach(leeway);
+	}
+	dt->dt_timer.target = when;
+	dt->dt_timer.interval = UINT64_MAX;
+	dt->dt_timer.deadline = when + leeway;
+	dispatch_activate(ds);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt,
+		dispatch_function_t func)
+{
+	_dispatch_after(when, queue, ctxt, func, false);
+}
+
+#ifdef __BLOCKS__
+void
+dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
+		dispatch_block_t work)
+{
+	_dispatch_after(when, queue, NULL, work, true);
+}
+#endif
+
+#pragma mark -
 #pragma mark dispatch_timers
 
-#define DISPATCH_TIMER_STRUCT(refs) \
-	uint64_t target, deadline; \
-	TAILQ_HEAD(, refs) dt_sources
+/*
+ * The dispatch_timer_heap_t structure is a double min-heap of timers,
+ * interleaving the by-target min-heap in the even slots, and the by-deadline
+ * in the odd ones.
+ *
+ * The min element of these is held inline in the dispatch_timer_heap_t
+ * structure, and further entries are held in segments.
+ *
+ * dth_segments is the number of allocated segments.
+ *
+ * Segment 0 has a size of `DISPATCH_HEAP_INIT_SEGMENT_CAPACITY` pointers
+ * Segment k has a size of (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (k - 1))
+ *
+ * Segment n (dth_segments - 1) is the last segment and points its final n
+ * entries to previous segments. Its address is held in the `dth_heap` field.
+ *
+ * segment n   [ regular timer pointers | n-1 | k | 0 ]
+ *                                         |    |   |
+ * segment n-1 <---------------------------'    |   |
+ * segment k   <--------------------------------'   |
+ * segment 0   <------------------------------------'
+ */
+#define DISPATCH_HEAP_INIT_SEGMENT_CAPACITY 8u
 
-typedef struct dispatch_timer_s {
-	DISPATCH_TIMER_STRUCT(dispatch_timer_source_refs_s);
-} *dispatch_timer_t;
+/*
+ * There are two min-heaps stored interleaved in a single array,
+ * even indices are for the by-target min-heap, and odd indices for
+ * the by-deadline one.
+ */
+#define DTH_HEAP_ID_MASK (DTH_ID_COUNT - 1)
+#define DTH_HEAP_ID(idx) ((idx) & DTH_HEAP_ID_MASK)
+#define DTH_IDX_FOR_HEAP_ID(idx, heap_id) \
+		(((idx) & ~DTH_HEAP_ID_MASK) | (heap_id))
 
-#define DISPATCH_TIMER_INITIALIZER(tidx) \
-	[tidx] = { \
-		.target = UINT64_MAX, \
-		.deadline = UINT64_MAX, \
-		.dt_sources = TAILQ_HEAD_INITIALIZER( \
-				_dispatch_timer[tidx].dt_sources), \
-	}
-#define DISPATCH_TIMER_INIT(kind, qos) \
-		DISPATCH_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \
-		DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos))
-
-struct dispatch_timer_s _dispatch_timer[] =  {
-	DISPATCH_TIMER_INIT(WALL, NORMAL),
-	DISPATCH_TIMER_INIT(WALL, CRITICAL),
-	DISPATCH_TIMER_INIT(WALL, BACKGROUND),
-	DISPATCH_TIMER_INIT(MACH, NORMAL),
-	DISPATCH_TIMER_INIT(MACH, CRITICAL),
-	DISPATCH_TIMER_INIT(MACH, BACKGROUND),
-};
-#define DISPATCH_TIMER_COUNT \
-		((sizeof(_dispatch_timer) / sizeof(_dispatch_timer[0])))
-
-#if __linux__
-#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \
-		(void*)&_dispatch_kevent_timer[tidx]
-#else
-#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \
-		(uintptr_t)&_dispatch_kevent_timer[tidx]
-#endif
-#ifdef __LP64__
-#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \
-		.udata = DISPATCH_KEVENT_TIMER_UDATA(tidx)
-#else // __LP64__
-// dynamic initialization in _dispatch_timers_init()
-#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \
-		.udata = 0
-#endif // __LP64__
-#define DISPATCH_KEVENT_TIMER_INITIALIZER(tidx) \
-	[tidx] = { \
-		.dk_kevent = { \
-			.ident = tidx, \
-			.filter = DISPATCH_EVFILT_TIMER, \
-			DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx), \
-		}, \
-		.dk_sources = TAILQ_HEAD_INITIALIZER( \
-				_dispatch_kevent_timer[tidx].dk_sources), \
-	}
-#define DISPATCH_KEVENT_TIMER_INIT(kind, qos) \
-		DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \
-		DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos))
-
-struct dispatch_kevent_s _dispatch_kevent_timer[] = {
-	DISPATCH_KEVENT_TIMER_INIT(WALL, NORMAL),
-	DISPATCH_KEVENT_TIMER_INIT(WALL, CRITICAL),
-	DISPATCH_KEVENT_TIMER_INIT(WALL, BACKGROUND),
-	DISPATCH_KEVENT_TIMER_INIT(MACH, NORMAL),
-	DISPATCH_KEVENT_TIMER_INIT(MACH, CRITICAL),
-	DISPATCH_KEVENT_TIMER_INIT(MACH, BACKGROUND),
-	DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX_DISARM),
-};
-#define DISPATCH_KEVENT_TIMER_COUNT \
-		((sizeof(_dispatch_kevent_timer) / sizeof(_dispatch_kevent_timer[0])))
-
-#define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8)
-#define DISPATCH_KEVENT_TIMEOUT_INITIALIZER(tidx, note) \
-	[tidx] = { \
-		.ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK|(tidx), \
-		.filter = EVFILT_TIMER, \
-		.flags = EV_ONESHOT, \
-		.fflags = NOTE_ABSOLUTE|NOTE_NSECONDS|NOTE_LEEWAY|(note), \
-	}
-#define DISPATCH_KEVENT_TIMEOUT_INIT(kind, qos, note) \
-		DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_INDEX( \
-		DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos), note)
-
-_dispatch_kevent_qos_s _dispatch_kevent_timeout[] = {
-	DISPATCH_KEVENT_TIMEOUT_INIT(WALL, NORMAL, NOTE_MACH_CONTINUOUS_TIME),
-	DISPATCH_KEVENT_TIMEOUT_INIT(WALL, CRITICAL, NOTE_MACH_CONTINUOUS_TIME | NOTE_CRITICAL),
-	DISPATCH_KEVENT_TIMEOUT_INIT(WALL, BACKGROUND, NOTE_MACH_CONTINUOUS_TIME | NOTE_BACKGROUND),
-	DISPATCH_KEVENT_TIMEOUT_INIT(MACH, NORMAL, 0),
-	DISPATCH_KEVENT_TIMEOUT_INIT(MACH, CRITICAL, NOTE_CRITICAL),
-	DISPATCH_KEVENT_TIMEOUT_INIT(MACH, BACKGROUND, NOTE_BACKGROUND),
-};
-#define DISPATCH_KEVENT_TIMEOUT_COUNT \
-		((sizeof(_dispatch_kevent_timeout) / sizeof(_dispatch_kevent_timeout[0])))
-#if __has_feature(c_static_assert)
-_Static_assert(DISPATCH_KEVENT_TIMEOUT_COUNT == DISPATCH_TIMER_INDEX_COUNT - 1,
-		"should have a kevent for everything but disarm (ddt assumes this)");
-#endif
-
-#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \
-		[DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC
-
-static const uint64_t _dispatch_kevent_coalescing_window[] = {
-	DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75),
-	DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1),
-	DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100),
-};
-
-#define _dispatch_timers_insert(tidx, dra, dr, dr_list, dta, dt, dt_list) ({ \
-	typeof(dr) dri = NULL; typeof(dt) dti; \
-	if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \
-		TAILQ_FOREACH(dri, &dra[tidx].dk_sources, dr_list) { \
-			if (ds_timer(dr).target < ds_timer(dri).target) { \
-				break; \
-			} \
-		} \
-		TAILQ_FOREACH(dti, &dta[tidx].dt_sources, dt_list) { \
-			if (ds_timer(dt).deadline < ds_timer(dti).deadline) { \
-				break; \
-			} \
-		} \
-		if (dti) { \
-			TAILQ_INSERT_BEFORE(dti, dt, dt_list); \
-		} else { \
-			TAILQ_INSERT_TAIL(&dta[tidx].dt_sources, dt, dt_list); \
-		} \
-	} \
-	if (dri) { \
-		TAILQ_INSERT_BEFORE(dri, dr, dr_list); \
-	} else { \
-		TAILQ_INSERT_TAIL(&dra[tidx].dk_sources, dr, dr_list); \
-	} \
-	})
-
-#define _dispatch_timers_remove(tidx, dk, dra, dr, dr_list, dta, dt, dt_list) \
-	({ \
-	if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \
-		TAILQ_REMOVE(&dta[tidx].dt_sources, dt, dt_list); \
-	} \
-	TAILQ_REMOVE(dk ? &(*(dk)).dk_sources : &dra[tidx].dk_sources, dr, \
-			dr_list); })
-
-#define _dispatch_timers_check(dra, dta) ({ \
-	unsigned int timerm = _dispatch_timers_mask; \
-	bool update = false; \
-	unsigned int tidx; \
-	for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { \
-		if (!(timerm & (1 << tidx))){ \
-			continue; \
-		} \
-		dispatch_timer_source_refs_t dr = (dispatch_timer_source_refs_t) \
-				TAILQ_FIRST(&dra[tidx].dk_sources); \
-		dispatch_timer_source_refs_t dt = (dispatch_timer_source_refs_t) \
-				TAILQ_FIRST(&dta[tidx].dt_sources); \
-		uint64_t target = dr ? ds_timer(dr).target : UINT64_MAX; \
-		uint64_t deadline = dr ? ds_timer(dt).deadline : UINT64_MAX; \
-		if (target != dta[tidx].target) { \
-			dta[tidx].target = target; \
-			update = true; \
-		} \
-		if (deadline != dta[tidx].deadline) { \
-			dta[tidx].deadline = deadline; \
-			update = true; \
-		} \
-	} \
-	update; })
-
-static bool _dispatch_timers_reconfigure, _dispatch_timer_expired;
-static unsigned int _dispatch_timers_mask;
-static bool _dispatch_timers_force_max_leeway;
-
-static void
-_dispatch_timers_init(void)
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_timer_heap_capacity(uint32_t segments)
 {
-#ifndef __LP64__
-	unsigned int tidx;
-	for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
-		_dispatch_kevent_timer[tidx].dk_kevent.udata =
-				DISPATCH_KEVENT_TIMER_UDATA(tidx);
+	if (segments == 0) return 2;
+	uint32_t seg_no = segments - 1;
+	// for C = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY,
+	// 2 + C + SUM(C << (i-1), i = 1..seg_no) - seg_no
+	return 2 + (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << seg_no) - seg_no;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_timer_heap_grow(dispatch_timer_heap_t dth)
+{
+	uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY;
+	uint32_t seg_no = dth->dth_segments++;
+	void **heap, **heap_prev = dth->dth_heap;
+
+	if (seg_no > 0) {
+		seg_capacity <<= (seg_no - 1);
 	}
-#endif // __LP64__
-	if (slowpath(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) {
-		_dispatch_timers_force_max_leeway = true;
+	heap = _dispatch_calloc(seg_capacity, sizeof(void *));
+	if (seg_no > 1) {
+		uint32_t prev_seg_no = seg_no - 1;
+		uint32_t prev_seg_capacity = seg_capacity >> 1;
+		memcpy(&heap[seg_capacity - prev_seg_no],
+				&heap_prev[prev_seg_capacity - prev_seg_no],
+				prev_seg_no * sizeof(void *));
+	}
+	if (seg_no > 0) {
+		heap[seg_capacity - seg_no] = heap_prev;
+	}
+	dth->dth_heap = heap;
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_timer_heap_shrink(dispatch_timer_heap_t dth)
+{
+	uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY;
+	uint32_t seg_no = --dth->dth_segments;
+	void **heap = dth->dth_heap, **heap_prev = NULL;
+
+	if (seg_no > 0) {
+		seg_capacity <<= (seg_no - 1);
+		heap_prev = heap[seg_capacity - seg_no];
+	}
+	if (seg_no > 1) {
+		uint32_t prev_seg_no = seg_no - 1;
+		uint32_t prev_seg_capacity = seg_capacity >> 1;
+		memcpy(&heap_prev[prev_seg_capacity - prev_seg_no],
+				&heap[seg_capacity - prev_seg_no],
+				prev_seg_no * sizeof(void *));
+	}
+	dth->dth_heap = heap_prev;
+	free(heap);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_timer_source_refs_t *
+_dispatch_timer_heap_get_slot(dispatch_timer_heap_t dth, uint32_t idx)
+{
+	uint32_t seg_no, segments = dth->dth_segments;
+	void **segment;
+
+	if (idx < DTH_ID_COUNT) {
+		return &dth->dth_min[idx];
+	}
+	idx -= DTH_ID_COUNT;
+
+	// Derive the segment number from the index. Naming
+	// DISPATCH_HEAP_INIT_SEGMENT_CAPACITY `C`, the segments index ranges are:
+	// 0: 0 .. (C - 1)
+	// 1: C .. 2 * C - 1
+	// k: 2^(k-1) * C .. 2^k * C - 1
+	// so `k` can be derived from the first bit set in `idx`
+	seg_no = (uint32_t)(__builtin_clz(DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1) -
+			__builtin_clz(idx | (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1)));
+	if (seg_no + 1 == segments) {
+		segment = dth->dth_heap;
+	} else {
+		uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY;
+		seg_capacity <<= (segments - 2);
+		segment = dth->dth_heap[seg_capacity - seg_no - 1];
+	}
+	if (seg_no) {
+		idx -= DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (seg_no - 1);
+	}
+	return (dispatch_timer_source_refs_t *)(segment + idx);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_timer_heap_set(dispatch_timer_source_refs_t *slot,
+		dispatch_timer_source_refs_t dt, uint32_t idx)
+{
+	*slot = dt;
+	dt->dt_heap_entry[DTH_HEAP_ID(idx)] = idx;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_timer_heap_parent(uint32_t idx)
+{
+	uint32_t heap_id = DTH_HEAP_ID(idx);
+	idx = (idx - DTH_ID_COUNT) / 2; // go to the parent
+	return DTH_IDX_FOR_HEAP_ID(idx, heap_id);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_timer_heap_left_child(uint32_t idx)
+{
+	uint32_t heap_id = DTH_HEAP_ID(idx);
+	// 2 * (idx - heap_id) + DTH_ID_COUNT + heap_id
+	return 2 * idx + DTH_ID_COUNT - heap_id;
+}
+
+#if DISPATCH_HAVE_TIMER_COALESCING
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_timer_heap_walk_skip(uint32_t idx, uint32_t count)
+{
+	uint32_t heap_id = DTH_HEAP_ID(idx);
+
+	idx -= heap_id;
+	if (unlikely(idx + DTH_ID_COUNT == count)) {
+		// reaching `count` doesn't mean we're done, but there is a weird
+		// corner case if the last item of the heap is a left child:
+		//
+		//     /\
+		//    /  \
+		//   /  __\
+		//  /__/
+		//     ^
+		//
+		// The formula below would return the sibling of `idx` which is
+		// out of bounds. Fortunately, the correct answer is the same
+		// as for idx's parent
+		idx = _dispatch_timer_heap_parent(idx);
+	}
+
+	//
+	// When considering the index in a non interleaved, 1-based array
+	// representation of a heap, hence looking at (idx / DTH_ID_COUNT + 1)
+	// for a given idx in our dual-heaps, that index is in one of two forms:
+	//
+	//     (a) 1xxxx011111    or    (b) 111111111
+	//         d    i    0              d       0
+	//
+	// The first bit set is the row of the binary tree node (0-based).
+	// The following digits from most to least significant represent the path
+	// to that node, where `0` is a left turn and `1` a right turn.
+	//
+	// For example 0b0101 (5) is a node on row 2 accessed going left then right:
+	//
+	// row 0          1
+	//              /   .
+	// row 1      2       3
+	//           . \     . .
+	// row 2    4   5   6   7
+	//         : : : : : : : :
+	//
+	// Skipping a sub-tree in walk order means going to the sibling of the last
+	// node reached after we turned left. If the node was of the form (a),
+	// this node is 1xxxx1, which for the above example is 0b0011 (3).
+	// If the node was of the form (b) then we never took a left, meaning
+	// we reached the last element in traversal order.
+	//
+
+	//
+	// we want to find
+	// - the least significant bit set to 0 in (idx / DTH_ID_COUNT + 1)
+	// - which is offset by log_2(DTH_ID_COUNT) from the position of the least
+	//   significant 0 in (idx + DTH_ID_COUNT + DTH_ID_COUNT - 1)
+	//   since idx is a multiple of DTH_ID_COUNT and DTH_ID_COUNT a power of 2.
+	// - which in turn is the same as the position of the least significant 1 in
+	//   ~(idx + DTH_ID_COUNT + DTH_ID_COUNT - 1)
+	//
+	dispatch_static_assert(powerof2(DTH_ID_COUNT));
+	idx += DTH_ID_COUNT + DTH_ID_COUNT - 1;
+	idx >>= __builtin_ctz(~idx);
+
+	//
+	// `idx` is now either:
+	// - 0 if it was the (b) case above, in which case the walk is done
+	// - 1xxxx0 as the position in a 0 based array representation of a non
+	//   interleaved heap, so we just have to compute the interleaved index.
+	//
+	return likely(idx) ? DTH_ID_COUNT * idx + heap_id : UINT32_MAX;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_dispatch_timer_heap_walk_next(uint32_t idx, uint32_t count)
+{
+	//
+	// Goes to the next element in heap walk order, which is the prefix ordered
+	// walk of the tree.
+	//
+	// From a given node, the next item to return is the left child if it
+	// exists, else the first right sibling we find by walking our parent chain,
+	// which is exactly what _dispatch_timer_heap_walk_skip() returns.
+	//
+	uint32_t lchild = _dispatch_timer_heap_left_child(idx);
+	if (lchild < count) {
+		return lchild;
+	}
+	return _dispatch_timer_heap_walk_skip(idx, count);
+}
+
+DISPATCH_NOINLINE
+static uint64_t
+_dispatch_timer_heap_max_target_before(dispatch_timer_heap_t dth, uint64_t limit)
+{
+	dispatch_timer_source_refs_t dri;
+	uint32_t idx = _dispatch_timer_heap_left_child(DTH_TARGET_ID);
+	uint32_t count = dth->dth_count;
+	uint64_t tmp, target = dth->dth_min[DTH_TARGET_ID]->dt_timer.target;
+
+	while (idx < count) {
+		dri = *_dispatch_timer_heap_get_slot(dth, idx);
+		tmp = dri->dt_timer.target;
+		if (tmp > limit) {
+			// skip subtree since none of the targets below can be before limit
+			idx = _dispatch_timer_heap_walk_skip(idx, count);
+		} else {
+			target = tmp;
+			idx = _dispatch_timer_heap_walk_next(idx, count);
+		}
+	}
+	return target;
+}
+#endif // DISPATCH_HAVE_TIMER_COALESCING
+
+DISPATCH_NOINLINE
+static void
+_dispatch_timer_heap_resift(dispatch_timer_heap_t dth,
+		dispatch_timer_source_refs_t dt, uint32_t idx)
+{
+	dispatch_static_assert(offsetof(struct dispatch_timer_source_s, target) ==
+			offsetof(struct dispatch_timer_source_s, heap_key[DTH_TARGET_ID]));
+	dispatch_static_assert(offsetof(struct dispatch_timer_source_s, deadline) ==
+			offsetof(struct dispatch_timer_source_s, heap_key[DTH_DEADLINE_ID]));
+#define dth_cmp(hid, dt1, op, dt2) \
+		(((dt1)->dt_timer.heap_key)[hid] op ((dt2)->dt_timer.heap_key)[hid])
+
+	dispatch_timer_source_refs_t *pslot, pdt;
+	dispatch_timer_source_refs_t *cslot, cdt;
+	dispatch_timer_source_refs_t *rslot, rdt;
+	uint32_t cidx, dth_count = dth->dth_count;
+	dispatch_timer_source_refs_t *slot;
+	int heap_id = DTH_HEAP_ID(idx);
+	bool sifted_up = false;
+
+	// try to sift up
+
+	slot = _dispatch_timer_heap_get_slot(dth, idx);
+	while (idx >= DTH_ID_COUNT) {
+		uint32_t pidx = _dispatch_timer_heap_parent(idx);
+		pslot = _dispatch_timer_heap_get_slot(dth, pidx);
+		pdt = *pslot;
+		if (dth_cmp(heap_id, pdt, <=, dt)) {
+			break;
+		}
+		_dispatch_timer_heap_set(slot, pdt, idx);
+		slot = pslot;
+		idx = pidx;
+		sifted_up = true;
+	}
+	if (sifted_up) {
+		goto done;
+	}
+
+	// try to sift down
+
+	while ((cidx = _dispatch_timer_heap_left_child(idx)) < dth_count) {
+		uint32_t ridx = cidx + DTH_ID_COUNT;
+		cslot = _dispatch_timer_heap_get_slot(dth, cidx);
+		cdt = *cslot;
+		if (ridx < dth_count) {
+			rslot = _dispatch_timer_heap_get_slot(dth, ridx);
+			rdt = *rslot;
+			if (dth_cmp(heap_id, cdt, >, rdt)) {
+				cidx = ridx;
+				cdt = rdt;
+				cslot = rslot;
+			}
+		}
+		if (dth_cmp(heap_id, dt, <=, cdt)) {
+			break;
+		}
+		_dispatch_timer_heap_set(slot, cdt, idx);
+		slot = cslot;
+		idx = cidx;
+	}
+
+done:
+	_dispatch_timer_heap_set(slot, dt, idx);
+#undef dth_cmp
+}
+
+DISPATCH_ALWAYS_INLINE
+static void
+_dispatch_timer_heap_insert(dispatch_timer_heap_t dth,
+		dispatch_timer_source_refs_t dt)
+{
+	uint32_t idx = (dth->dth_count += DTH_ID_COUNT) - DTH_ID_COUNT;
+
+	if (idx == 0) {
+		dt->dt_heap_entry[DTH_TARGET_ID] = DTH_TARGET_ID;
+		dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_DEADLINE_ID;
+		dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = dt;
+		return;
+	}
+
+	if (unlikely(idx + DTH_ID_COUNT >
+			_dispatch_timer_heap_capacity(dth->dth_segments))) {
+		_dispatch_timer_heap_grow(dth);
+	}
+	_dispatch_timer_heap_resift(dth, dt, idx + DTH_TARGET_ID);
+	_dispatch_timer_heap_resift(dth, dt, idx + DTH_DEADLINE_ID);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_timer_heap_remove(dispatch_timer_heap_t dth,
+		dispatch_timer_source_refs_t removed_dt)
+{
+	uint32_t idx = (dth->dth_count -= DTH_ID_COUNT);
+
+	if (idx == 0) {
+		dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = NULL;
+		return;
+	}
+
+	for (uint32_t heap_id = 0; heap_id < DTH_ID_COUNT; heap_id++) {
+		dispatch_timer_source_refs_t *slot, dt;
+		slot = _dispatch_timer_heap_get_slot(dth, idx + heap_id);
+		dt = *slot; *slot = NULL;
+		if (dt != removed_dt) {
+			uint32_t removed_idx = removed_dt->dt_heap_entry[heap_id];
+			_dispatch_timer_heap_resift(dth, dt, removed_idx);
+		}
+	}
+	if (unlikely(idx <= _dispatch_timer_heap_capacity(dth->dth_segments - 1))) {
+		_dispatch_timer_heap_shrink(dth);
 	}
 }
 
+DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_timers_unregister(dispatch_source_t ds, dispatch_kevent_t dk)
+_dispatch_timer_heap_update(dispatch_timer_heap_t dth,
+		dispatch_timer_source_refs_t dt)
 {
-	dispatch_source_refs_t dr = ds->ds_refs;
-	unsigned int tidx = (unsigned int)dk->dk_kevent.ident;
+	_dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_TARGET_ID]);
+	_dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_DEADLINE_ID]);
+}
 
-	if (slowpath(ds_timer_aggregate(ds))) {
-		_dispatch_timer_aggregates_unregister(ds, tidx);
+DISPATCH_ALWAYS_INLINE
+static bool
+_dispatch_timer_heap_has_new_min(dispatch_timer_heap_t dth,
+		uint32_t count, uint32_t mask)
+{
+	dispatch_timer_source_refs_t dt;
+	bool changed = false;
+	uint64_t tmp;
+	uint32_t tidx;
+
+	for (tidx = 0; tidx < count; tidx++) {
+		if (!(mask & (1u << tidx))) {
+			continue;
+		}
+
+		dt = dth[tidx].dth_min[DTH_TARGET_ID];
+		tmp = dt ? dt->dt_timer.target : UINT64_MAX;
+		if (dth[tidx].dth_target != tmp) {
+			dth[tidx].dth_target = tmp;
+			changed = true;
+		}
+		dt = dth[tidx].dth_min[DTH_DEADLINE_ID];
+		tmp = dt ? dt->dt_timer.deadline : UINT64_MAX;
+		if (dth[tidx].dth_deadline != tmp) {
+			dth[tidx].dth_deadline = tmp;
+			changed = true;
+		}
 	}
-	_dispatch_timers_remove(tidx, dk, _dispatch_kevent_timer, dr, dr_list,
-			_dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list);
-	if (tidx != DISPATCH_TIMER_INDEX_DISARM) {
-		_dispatch_timers_reconfigure = true;
-		_dispatch_timers_mask |= 1 << tidx;
+	return changed;
+}
+
+static inline void
+_dispatch_timers_unregister(dispatch_timer_source_refs_t dt)
+{
+	uint32_t tidx = dt->du_ident;
+	dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx];
+
+	_dispatch_timer_heap_remove(heap, dt);
+	_dispatch_timers_reconfigure = true;
+	_dispatch_timers_processing_mask |= 1 << tidx;
+	dt->du_registered = false;
+}
+
+static inline void
+_dispatch_timers_register(dispatch_timer_source_refs_t dt, uint32_t tidx)
+{
+	dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx];
+	if (dt->du_registered) {
+		dispatch_assert(dt->du_ident == tidx);
+		_dispatch_timer_heap_update(heap, dt);
+	} else {
+		dt->du_ident = tidx;
+		_dispatch_timer_heap_insert(heap, dt);
 	}
+	_dispatch_timers_reconfigure = true;
+	_dispatch_timers_processing_mask |= 1 << tidx;
+	dt->du_registered = true;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_source_timer_tryarm(dispatch_source_t ds)
+{
+	dispatch_queue_flags_t oqf, nqf;
+	return os_atomic_rmw_loop2o(ds, dq_atomic_flags, oqf, nqf, relaxed, {
+		if (oqf & (DSF_CANCELED | DQF_RELEASED)) {
+			// do not install a cancelled timer
+			os_atomic_rmw_loop_give_up(break);
+		}
+		nqf = oqf | DSF_ARMED;
+	});
 }
 
 // Updates the ordered list of timers based on next fire date for changes to ds.
 // Should only be called from the context of _dispatch_mgr_q.
 static void
-_dispatch_timers_update(dispatch_source_t ds)
+_dispatch_timers_update(dispatch_unote_t du, uint32_t flags)
 {
-	dispatch_kevent_t dk = ds->ds_dkev;
-	dispatch_source_refs_t dr = ds->ds_refs;
-	unsigned int tidx;
+	dispatch_timer_source_refs_t dr = du._dt;
+	dispatch_source_t ds = _dispatch_source_from_refs(dr);
+	const char *verb = "updated";
+	bool will_register, disarm = false;
 
 	DISPATCH_ASSERT_ON_MANAGER_QUEUE();
 
-	// Do not reschedule timers unregistered with _dispatch_kevent_unregister()
-	if (slowpath(!dk)) {
+	if (unlikely(dr->du_ident == DISPATCH_TIMER_IDENT_CANCELED)) {
 		return;
 	}
-	// Move timers that are disabled, suspended or have missed intervals to the
-	// disarmed list, rearm after resume resp. source invoke will reenable them
-	if (!ds_timer(dr).target || DISPATCH_QUEUE_IS_SUSPENDED(ds) ||
-			ds->ds_pending_data) {
-		tidx = DISPATCH_TIMER_INDEX_DISARM;
-		_dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED);
-		_dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds,
-				ds->ds_dkev);
-	} else {
-		tidx = _dispatch_source_timer_idx(dr);
-	}
-	if (slowpath(ds_timer_aggregate(ds))) {
-		_dispatch_timer_aggregates_register(ds);
-	}
-	if (slowpath(!ds->ds_is_installed)) {
-		ds->ds_is_installed = true;
-		if (tidx != DISPATCH_TIMER_INDEX_DISARM) {
-			_dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
-			_dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds,
-					ds->ds_dkev);
+
+	// Unregister timers that are unconfigured, disabled, suspended or have
+	// missed intervals. Rearm after dispatch_set_timer(), resume or source
+	// invoke will reenable them
+	will_register = dr->dt_timer.target < INT64_MAX && !ds->ds_pending_data &&
+			!DISPATCH_QUEUE_IS_SUSPENDED(ds) &&
+			!os_atomic_load2o(dr, dt_pending_config, relaxed);
+	if (!dr->du_registered && will_register) {
+		if (unlikely(!_dispatch_source_timer_tryarm(ds))) {
+			return;
 		}
-		_dispatch_object_debug(ds, "%s", __func__);
-		ds->ds_dkev = NULL;
-		free(dk);
+		verb = "armed";
+	} else if (unlikely(dr->du_registered && !will_register)) {
+		disarm = true;
+		verb = "disarmed";
+	}
+
+	uint32_t tidx = _dispatch_source_timer_idx(dr);
+	if (unlikely(dr->du_registered && (!will_register || dr->du_ident != tidx))) {
+		_dispatch_timers_unregister(dr);
+	}
+	if (likely(will_register)) {
+		_dispatch_timers_register(dr, tidx);
+	}
+
+	if (disarm) {
+		if (flags & DISPATCH_TIMERS_RETAIN_ON_DISARM) {
+			_dispatch_retain(ds);
+		}
+		_dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED);
+	}
+	_dispatch_debug("kevent-source[%p]: %s timer[%p]", ds, verb, dr);
+	_dispatch_object_debug(ds, "%s", __func__);
+}
+
+#define DISPATCH_TIMER_MISSED_MARKER  1ul
+
+DISPATCH_ALWAYS_INLINE
+static inline unsigned long
+_dispatch_source_timer_compute_missed(dispatch_timer_source_refs_t dt,
+		uint64_t now, unsigned long prev)
+{
+	uint64_t missed = (now - dt->dt_timer.target) / dt->dt_timer.interval;
+	if (++missed + prev > LONG_MAX) {
+		missed = LONG_MAX - prev;
+	}
+	if (dt->dt_timer.interval < INT64_MAX) {
+		uint64_t push_by = missed * dt->dt_timer.interval;
+		dt->dt_timer.target += push_by;
+		dt->dt_timer.deadline += push_by;
 	} else {
-		_dispatch_timers_unregister(ds, dk);
+		dt->dt_timer.target = UINT64_MAX;
+		dt->dt_timer.deadline = UINT64_MAX;
 	}
-	if (tidx != DISPATCH_TIMER_INDEX_DISARM) {
-		_dispatch_timers_reconfigure = true;
-		_dispatch_timers_mask |= 1 << tidx;
+	prev += missed;
+	return prev;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline unsigned long
+_dispatch_source_timer_data(dispatch_source_t ds, dispatch_unote_t du)
+{
+	dispatch_timer_source_refs_t dr = du._dt;
+	unsigned long data, prev, clear_prev = 0;
+
+	os_atomic_rmw_loop2o(ds, ds_pending_data, prev, clear_prev, relaxed, {
+		data = prev >> 1;
+		if (unlikely(prev & DISPATCH_TIMER_MISSED_MARKER)) {
+			os_atomic_rmw_loop_give_up(goto handle_missed_intervals);
+		}
+	});
+	return data;
+
+handle_missed_intervals:
+	// The timer may be in _dispatch_source_invoke2() already for other
+	// reasons such as running the registration handler when ds_pending_data
+	// is changed by _dispatch_timers_run2() without holding the drain lock.
+	//
+	// We hence need dependency ordering to pair with the release barrier
+	// done by _dispatch_timers_run2() when setting the MISSED_MARKER bit.
+	os_atomic_thread_fence(dependency);
+	dr = os_atomic_force_dependency_on(dr, data);
+
+	uint64_t now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(dr->du_ident));
+	if (now >= dr->dt_timer.target) {
+		OS_COMPILER_CAN_ASSUME(dr->dt_timer.interval < INT64_MAX);
+		data = _dispatch_source_timer_compute_missed(dr, now, data);
 	}
-	if (dk != &_dispatch_kevent_timer[tidx]){
-		ds->ds_dkev = &_dispatch_kevent_timer[tidx];
-	}
-	_dispatch_timers_insert(tidx, _dispatch_kevent_timer, dr, dr_list,
-			_dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list);
-	if (slowpath(ds_timer_aggregate(ds))) {
-		_dispatch_timer_aggregates_update(ds, tidx);
-	}
+
+	// When we see the MISSED_MARKER the manager has given up on this timer
+	// and expects the handler to call "resume".
+	//
+	// However, it may not have reflected this into the atomic flags yet
+	// so make sure _dispatch_source_invoke2() sees the timer is disarmed
+	//
+	// The subsequent _dispatch_source_refs_resume() will enqueue the source
+	// on the manager and make the changes to `ds_timer` above visible.
+	_dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED);
+	os_atomic_store2o(ds, ds_pending_data, 0, relaxed);
+	return data;
 }
 
 static inline void
-_dispatch_timers_run2(uint64_t nows[], unsigned int tidx)
+_dispatch_timers_run2(dispatch_clock_now_cache_t nows, uint32_t tidx)
 {
-	dispatch_source_refs_t dr;
+	dispatch_timer_source_refs_t dr;
 	dispatch_source_t ds;
-	uint64_t now, missed;
+	unsigned long data, pending_data;
+	uint64_t now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows);
 
-	now = _dispatch_source_timer_now(nows, tidx);
-	while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources))) {
+	while ((dr = _dispatch_timers_heap[tidx].dth_min[DTH_TARGET_ID])) {
+		dispatch_assert(tidx == dr->du_ident && dr->dt_timer.target);
 		ds = _dispatch_source_from_refs(dr);
-		// We may find timers on the wrong list due to a pending update from
-		// dispatch_source_set_timer. Force an update of the list in that case.
-		if (tidx != ds->ds_ident_hack) {
-			_dispatch_timers_update(ds);
-			continue;
-		}
-		if (!ds_timer(dr).target) {
-			// No configured timers on the list
-			break;
-		}
-		if (ds_timer(dr).target > now) {
+		if (dr->dt_timer.target > now) {
 			// Done running timers for now.
 			break;
 		}
-		// Remove timers that are suspended or have missed intervals from the
-		// list, rearm after resume resp. source invoke will reenable them
-		if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || ds->ds_pending_data) {
-			_dispatch_timers_update(ds);
+		if (dr->du_fflags & DISPATCH_TIMER_AFTER) {
+			_dispatch_trace_timer_fire(dr, 1, 1);
+			_dispatch_source_merge_evt(dr, EV_ONESHOT, 1, 0);
+			_dispatch_debug("kevent-source[%p]: fired after timer[%p]", ds, dr);
+			_dispatch_object_debug(ds, "%s", __func__);
 			continue;
 		}
-		// Calculate number of missed intervals.
-		missed = (now - ds_timer(dr).target) / ds_timer(dr).interval;
-		if (++missed > INT_MAX) {
-			missed = INT_MAX;
-		}
-		if (ds_timer(dr).interval < INT64_MAX) {
-			ds_timer(dr).target += missed * ds_timer(dr).interval;
-			ds_timer(dr).deadline = ds_timer(dr).target + ds_timer(dr).leeway;
-		} else {
-			ds_timer(dr).target = UINT64_MAX;
-			ds_timer(dr).deadline = UINT64_MAX;
-		}
-		_dispatch_timers_update(ds);
-		ds_timer(dr).last_fire = now;
 
-		unsigned long data;
-		data = os_atomic_add2o(ds, ds_pending_data,
-				(unsigned long)missed, relaxed);
-		_dispatch_trace_timer_fire(dr, data, (unsigned long)missed);
-		dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH);
-		if (ds_timer(dr).flags & DISPATCH_TIMER_AFTER) {
-			_dispatch_source_kevent_unregister(ds);
+		data = os_atomic_load2o(ds, ds_pending_data, relaxed);
+		if (unlikely(data)) {
+			// the release barrier is required to make the changes
+			// to `ds_timer` visible to _dispatch_source_timer_data()
+			if (os_atomic_cmpxchg2o(ds, ds_pending_data, data,
+					data | DISPATCH_TIMER_MISSED_MARKER, release)) {
+				_dispatch_timers_update(dr, 0);
+				continue;
+			}
 		}
+
+		data = _dispatch_source_timer_compute_missed(dr, now, 0);
+		_dispatch_timers_update(dr, DISPATCH_TIMERS_RETAIN_ON_DISARM);
+		pending_data = data << 1;
+		if (!dr->du_registered && dr->dt_timer.target < INT64_MAX) {
+			// if we unregistered because of suspension we have to fake we
+			// missed events.
+			pending_data |= DISPATCH_TIMER_MISSED_MARKER;
+		}
+		os_atomic_store2o(ds, ds_pending_data, pending_data, relaxed);
+		_dispatch_trace_timer_fire(dr, data, data);
+		_dispatch_debug("kevent-source[%p]: fired timer[%p]", ds, dr);
+		_dispatch_object_debug(ds, "%s", __func__);
+
+		dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_FLUSH;
+		if (!dr->du_registered) wflags |= DISPATCH_WAKEUP_CONSUME;
+		dx_wakeup(ds, 0, wflags);
 	}
 }
 
 DISPATCH_NOINLINE
 static void
-_dispatch_timers_run(uint64_t nows[])
+_dispatch_timers_run(dispatch_clock_now_cache_t nows)
 {
-	unsigned int tidx;
+	uint32_t tidx;
 	for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
-		if (!TAILQ_EMPTY(&_dispatch_kevent_timer[tidx].dk_sources)) {
+		if (_dispatch_timers_heap[tidx].dth_count) {
 			_dispatch_timers_run2(nows, tidx);
 		}
 	}
 }
 
-static inline unsigned int
-_dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[],
-		uint64_t *delay, uint64_t *leeway, int qos, int kind)
-{
-	unsigned int tidx, ridx = DISPATCH_TIMER_COUNT;
-	uint64_t tmp, delta = UINT64_MAX, dldelta = UINT64_MAX;
+#if DISPATCH_HAVE_TIMER_COALESCING
+#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \
+		[DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC
 
-	for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
-		if (qos >= 0 && qos != DISPATCH_TIMER_QOS(tidx)){
-			continue;
-		}
-		if (kind >= 0 && kind != DISPATCH_TIMER_KIND(tidx)){
-			continue;
-		}
-		uint64_t target = timer[tidx].target;
-		if (target == UINT64_MAX) {
-			continue;
-		}
-		uint64_t deadline = timer[tidx].deadline;
-		if (qos >= 0) {
-			// Timer pre-coalescing <rdar://problem/13222034>
-			uint64_t window = _dispatch_kevent_coalescing_window[qos];
-			uint64_t latest = deadline > window ? deadline - window : 0;
-			dispatch_source_refs_t dri;
-			TAILQ_FOREACH(dri, &_dispatch_kevent_timer[tidx].dk_sources,
-					dr_list) {
-				tmp = ds_timer(dri).target;
-				if (tmp > latest) break;
-				target = tmp;
-			}
-		}
-		uint64_t now = _dispatch_source_timer_now(nows, tidx);
-		if (target <= now) {
-			delta = 0;
-			break;
-		}
-		tmp = target - now;
-		if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) {
-			tmp = _dispatch_time_mach2nano(tmp);
-		}
-		if (tmp < INT64_MAX && tmp < delta) {
-			ridx = tidx;
-			delta = tmp;
-		}
-		dispatch_assert(target <= deadline);
-		tmp = deadline - now;
-		if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) {
-			tmp = _dispatch_time_mach2nano(tmp);
-		}
-		if (tmp < INT64_MAX && tmp < dldelta) {
-			dldelta = tmp;
-		}
-	}
-	*delay = delta;
-	*leeway = delta && delta < UINT64_MAX ? dldelta - delta : UINT64_MAX;
-	return ridx;
-}
-
-
-#ifdef __linux__
-// in linux we map the _dispatch_kevent_qos_s  to struct kevent instead
-// of struct kevent64. We loose the kevent.ext[] members and the time
-// out is based on relavite msec based time vs. absolute nsec based time.
-// For now we make the adjustments right here until the solution
-// to either extend libkqueue with a proper kevent64 API or removing kevent
-// all together and move to a lower API (e.g. epoll or kernel_module.
-// Also leeway is ignored.
-
-static void
-_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay,
-		uint64_t leeway, uint64_t nows[])
-{
-	// call to update nows[]
-	_dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL);
-#ifdef KEVENT_NSEC_NOT_SUPPORTED
-	// adjust nsec based delay to msec based and ignore leeway
-	delay /= 1000000L;
-	if ((int64_t)(delay) <= 0) {
-		delay = 1; // if value <= 0 the dispatch will stop
-	}
-#else
-	ke->fflags |= NOTE_NSECONDS;
+static const uint64_t _dispatch_kevent_coalescing_window[] = {
+	DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75),
+#if DISPATCH_HAVE_TIMER_QOS
+	DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1),
+	DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100),
 #endif
-	ke->data = (int64_t)delay;
-}
+};
+#endif // DISPATCH_HAVE_TIMER_COALESCING
 
-#else
-static void
-_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay,
-		uint64_t leeway, uint64_t nows[])
+static inline dispatch_timer_delay_s
+_dispatch_timers_get_delay(dispatch_timer_heap_t dth, dispatch_clock_t clock,
+		uint32_t qos, dispatch_clock_now_cache_t nows)
 {
-	delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL);
-	if (slowpath(_dispatch_timers_force_max_leeway)) {
-		ke->data = (int64_t)(delay + leeway);
-		ke->ext[1] = 0;
-	} else {
-		ke->data = (int64_t)delay;
-		ke->ext[1] = leeway;
+	uint64_t target = dth->dth_target, deadline = dth->dth_deadline;
+	uint64_t delta = INT64_MAX, dldelta = INT64_MAX;
+	dispatch_timer_delay_s rc;
+
+	dispatch_assert(target <= deadline);
+	if (delta == 0 || target >= INT64_MAX) {
+		goto done;
 	}
+
+	if (qos < DISPATCH_TIMER_QOS_COUNT && dth->dth_count > 2) {
+#if DISPATCH_HAVE_TIMER_COALESCING
+		// Timer pre-coalescing <rdar://problem/13222034>
+		// When we have several timers with this target/deadline bracket:
+		//
+		//      Target        window  Deadline
+		//        V           <-------V
+		// t1:    [...........|.................]
+		// t2:         [......|.......]
+		// t3:             [..|..........]
+		// t4:                | [.............]
+		//                 ^
+		//          Optimal Target
+		//
+		// Coalescing works better if the Target is delayed to "Optimal", by
+		// picking the latest target that isn't too close to the deadline.
+		uint64_t window = _dispatch_kevent_coalescing_window[qos];
+		if (target + window < deadline) {
+			uint64_t latest = deadline - window;
+			target = _dispatch_timer_heap_max_target_before(dth, latest);
+		}
+#endif
+	}
+
+	uint64_t now = _dispatch_time_now_cached(clock, nows);
+	if (target <= now) {
+		delta = 0;
+		dldelta = 0;
+		goto done;
+	}
+
+	uint64_t tmp = target - now;
+	if (clock != DISPATCH_CLOCK_WALL) {
+		tmp = _dispatch_time_mach2nano(tmp);
+	}
+	if (tmp < delta) {
+		delta = tmp;
+	}
+
+	tmp = deadline - now;
+	if (clock != DISPATCH_CLOCK_WALL) {
+		tmp = _dispatch_time_mach2nano(tmp);
+	}
+	if (tmp < dldelta) {
+		dldelta = tmp;
+	}
+
+done:
+	rc.delay = delta;
+	rc.leeway = delta < INT64_MAX ? dldelta - delta : INT64_MAX;
+	return rc;
 }
-#endif // __linux__
 
 static bool
-_dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke,
-		unsigned int tidx)
+_dispatch_timers_program2(dispatch_clock_now_cache_t nows, uint32_t tidx)
 {
-	bool poll;
-	uint64_t delay, leeway;
+	uint32_t qos = DISPATCH_TIMER_QOS(tidx);
+	dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx);
+	dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx];
+	dispatch_timer_delay_s range;
 
-	_dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway,
-			(int)DISPATCH_TIMER_QOS(tidx), (int)DISPATCH_TIMER_KIND(tidx));
-	poll = (delay == 0);
-	if (poll || delay == UINT64_MAX) {
-		_dispatch_trace_next_timer_set(NULL, DISPATCH_TIMER_QOS(tidx));
-		if (!ke->data) {
-			return poll;
+	range = _dispatch_timers_get_delay(heap, clock, qos, nows);
+	if (range.delay == 0 || range.delay >= INT64_MAX) {
+		_dispatch_trace_next_timer_set(NULL, qos);
+		if (heap->dth_flags & DTH_ARMED) {
+			_dispatch_event_loop_timer_delete(tidx);
 		}
-		ke->data = 0;
-		ke->flags |= EV_DELETE;
-		ke->flags &= ~(EV_ADD|EV_ENABLE);
-	} else {
-		_dispatch_trace_next_timer_set(
-				TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources), DISPATCH_TIMER_QOS(tidx));
-		_dispatch_trace_next_timer_program(delay, DISPATCH_TIMER_QOS(tidx));
-		_dispatch_kevent_timer_set_delay(ke, delay, leeway, nows);
-		ke->flags |= EV_ADD|EV_ENABLE;
-		ke->flags &= ~EV_DELETE;
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-		if (_dispatch_kevent_workqueue_enabled) {
-			ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-		}
-#endif
+		return range.delay == 0;
 	}
-	_dispatch_kq_deferred_update(ke);
-	return poll;
+
+	_dispatch_trace_next_timer_set(heap->dth_min[DTH_TARGET_ID], qos);
+	_dispatch_trace_next_timer_program(range.delay, qos);
+	_dispatch_event_loop_timer_arm(tidx, range, nows);
+	return false;
 }
 
 DISPATCH_NOINLINE
 static bool
-_dispatch_timers_program(uint64_t nows[])
+_dispatch_timers_program(dispatch_clock_now_cache_t nows)
 {
 	bool poll = false;
-	unsigned int tidx, timerm = _dispatch_timers_mask;
-	for (tidx = 0; tidx < DISPATCH_KEVENT_TIMEOUT_COUNT; tidx++) {
-		if (!(timerm & 1 << tidx)){
-			continue;
+	uint32_t tidx, timerm = _dispatch_timers_processing_mask;
+
+	for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
+		if (timerm & (1 << tidx)) {
+			poll |= _dispatch_timers_program2(nows, tidx);
 		}
-		poll |= _dispatch_timers_program2(nows, &_dispatch_kevent_timeout[tidx],
-				tidx);
 	}
 	return poll;
 }
@@ -2424,486 +2133,40 @@
 static bool
 _dispatch_timers_configure(void)
 {
-	_dispatch_timer_aggregates_check();
 	// Find out if there is a new target/deadline on the timer lists
-	return _dispatch_timers_check(_dispatch_kevent_timer, _dispatch_timer);
-}
-
-#if HAVE_MACH
-static void
-_dispatch_timers_calendar_change(void)
-{
-	unsigned int qos;
-
-	// calendar change may have gone past the wallclock deadline
-	_dispatch_timer_expired = true;
-	for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) {
-		_dispatch_timers_mask |=
-				1 << DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_WALL, qos);
-	}
-}
-#endif
-
-static void
-_dispatch_timers_kevent(_dispatch_kevent_qos_s *ke)
-{
-	dispatch_assert(ke->data > 0);
-	dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) ==
-			DISPATCH_KEVENT_TIMEOUT_IDENT_MASK);
-	unsigned int tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK;
-	dispatch_assert(tidx < DISPATCH_KEVENT_TIMEOUT_COUNT);
-	dispatch_assert(_dispatch_kevent_timeout[tidx].data != 0);
-	_dispatch_kevent_timeout[tidx].data = 0; // kevent deleted via EV_ONESHOT
-	_dispatch_timer_expired = true;
-	_dispatch_timers_mask |= 1 << tidx;
-	_dispatch_trace_next_timer_wake(DISPATCH_TIMER_QOS(tidx));
+	return _dispatch_timer_heap_has_new_min(_dispatch_timers_heap,
+			countof(_dispatch_timers_heap), _dispatch_timers_processing_mask);
 }
 
 static inline bool
 _dispatch_mgr_timers(void)
 {
-	uint64_t nows[DISPATCH_TIMER_KIND_COUNT] = {};
-	bool expired = slowpath(_dispatch_timer_expired);
-	if (expired) {
-		_dispatch_timers_run(nows);
+	dispatch_clock_now_cache_s nows = { };
+	bool expired = _dispatch_timers_expired;
+	if (unlikely(expired)) {
+		_dispatch_timers_run(&nows);
 	}
-	bool reconfigure = slowpath(_dispatch_timers_reconfigure);
-	if (reconfigure || expired) {
+	_dispatch_mgr_trace_timers_wakes();
+	bool reconfigure = _dispatch_timers_reconfigure;
+	if (unlikely(reconfigure || expired)) {
 		if (reconfigure) {
 			reconfigure = _dispatch_timers_configure();
 			_dispatch_timers_reconfigure = false;
 		}
 		if (reconfigure || expired) {
-			expired = _dispatch_timer_expired = _dispatch_timers_program(nows);
-			expired = expired || _dispatch_mgr_q.dq_items_tail;
+			expired = _dispatch_timers_expired = _dispatch_timers_program(&nows);
 		}
-		_dispatch_timers_mask = 0;
+		_dispatch_timers_processing_mask = 0;
 	}
 	return expired;
 }
 
 #pragma mark -
-#pragma mark dispatch_timer_aggregate
-
-typedef struct {
-	TAILQ_HEAD(, dispatch_timer_source_aggregate_refs_s) dk_sources;
-} dispatch_timer_aggregate_refs_s;
-
-typedef struct dispatch_timer_aggregate_s {
-	DISPATCH_QUEUE_HEADER(queue);
-	TAILQ_ENTRY(dispatch_timer_aggregate_s) dta_list;
-	dispatch_timer_aggregate_refs_s
-			dta_kevent_timer[DISPATCH_KEVENT_TIMER_COUNT];
-	struct {
-		DISPATCH_TIMER_STRUCT(dispatch_timer_source_aggregate_refs_s);
-	} dta_timer[DISPATCH_TIMER_COUNT];
-	struct dispatch_timer_s dta_timer_data[DISPATCH_TIMER_COUNT];
-	unsigned int dta_refcount;
-} DISPATCH_QUEUE_ALIGN dispatch_timer_aggregate_s;
-
-typedef TAILQ_HEAD(, dispatch_timer_aggregate_s) dispatch_timer_aggregates_s;
-static dispatch_timer_aggregates_s _dispatch_timer_aggregates =
-		TAILQ_HEAD_INITIALIZER(_dispatch_timer_aggregates);
-
-dispatch_timer_aggregate_t
-dispatch_timer_aggregate_create(void)
-{
-	unsigned int tidx;
-	dispatch_timer_aggregate_t dta = _dispatch_alloc(DISPATCH_VTABLE(queue),
-			sizeof(struct dispatch_timer_aggregate_s));
-	_dispatch_queue_init(dta->_as_dq, DQF_NONE,
-			DISPATCH_QUEUE_WIDTH_MAX, false);
-	dta->do_targetq = _dispatch_get_root_queue(
-			_DISPATCH_QOS_CLASS_USER_INITIATED, true);
-	//FIXME: aggregates need custom vtable
-	//dta->dq_label = "timer-aggregate";
-	for (tidx = 0; tidx < DISPATCH_KEVENT_TIMER_COUNT; tidx++) {
-		TAILQ_INIT(&dta->dta_kevent_timer[tidx].dk_sources);
-	}
-	for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
-		TAILQ_INIT(&dta->dta_timer[tidx].dt_sources);
-		dta->dta_timer[tidx].target = UINT64_MAX;
-		dta->dta_timer[tidx].deadline = UINT64_MAX;
-		dta->dta_timer_data[tidx].target = UINT64_MAX;
-		dta->dta_timer_data[tidx].deadline = UINT64_MAX;
-	}
-	return (dispatch_timer_aggregate_t)_dispatch_introspection_queue_create(
-			dta->_as_dq);
-}
-
-typedef struct dispatch_timer_delay_s {
-	dispatch_timer_t timer;
-	uint64_t delay, leeway;
-} *dispatch_timer_delay_t;
-
-static void
-_dispatch_timer_aggregate_get_delay(void *ctxt)
-{
-	dispatch_timer_delay_t dtd = ctxt;
-	struct { uint64_t nows[DISPATCH_TIMER_KIND_COUNT]; } dtn = {};
-	_dispatch_timers_get_delay(dtn.nows, dtd->timer, &dtd->delay, &dtd->leeway,
-			-1, -1);
-}
-
-uint64_t
-dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t dta,
-		uint64_t *leeway_ptr)
-{
-	struct dispatch_timer_delay_s dtd = {
-		.timer = dta->dta_timer_data,
-	};
-	dispatch_sync_f(dta->_as_dq, &dtd, _dispatch_timer_aggregate_get_delay);
-	if (leeway_ptr) {
-		*leeway_ptr = dtd.leeway;
-	}
-	return dtd.delay;
-}
-
-static void
-_dispatch_timer_aggregate_update(void *ctxt)
-{
-	dispatch_timer_aggregate_t dta = (void*)_dispatch_queue_get_current();
-	dispatch_timer_t dtau = ctxt;
-	unsigned int tidx;
-	for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
-		dta->dta_timer_data[tidx].target = dtau[tidx].target;
-		dta->dta_timer_data[tidx].deadline = dtau[tidx].deadline;
-	}
-	free(dtau);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_timer_aggregates_configure(void)
-{
-	dispatch_timer_aggregate_t dta;
-	dispatch_timer_t dtau;
-	TAILQ_FOREACH(dta, &_dispatch_timer_aggregates, dta_list) {
-		if (!_dispatch_timers_check(dta->dta_kevent_timer, dta->dta_timer)) {
-			continue;
-		}
-		dtau = _dispatch_calloc(DISPATCH_TIMER_COUNT, sizeof(*dtau));
-		memcpy(dtau, dta->dta_timer, sizeof(dta->dta_timer));
-		_dispatch_barrier_async_detached_f(dta->_as_dq, dtau,
-				_dispatch_timer_aggregate_update);
-	}
-}
-
-static inline void
-_dispatch_timer_aggregates_check(void)
-{
-	if (fastpath(TAILQ_EMPTY(&_dispatch_timer_aggregates))) {
-		return;
-	}
-	_dispatch_timer_aggregates_configure();
-}
-
-static void
-_dispatch_timer_aggregates_register(dispatch_source_t ds)
-{
-	dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds);
-	if (!dta->dta_refcount++) {
-		TAILQ_INSERT_TAIL(&_dispatch_timer_aggregates, dta, dta_list);
-	}
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_timer_aggregates_update(dispatch_source_t ds, unsigned int tidx)
-{
-	dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds);
-	dispatch_timer_source_aggregate_refs_t dr;
-	dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs;
-	_dispatch_timers_insert(tidx, dta->dta_kevent_timer, dr, dra_list,
-			dta->dta_timer, dr, dta_list);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx)
-{
-	dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds);
-	dispatch_timer_source_aggregate_refs_t dr;
-	dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs;
-	_dispatch_timers_remove(tidx, (dispatch_timer_aggregate_refs_s*)NULL,
-			dta->dta_kevent_timer, dr, dra_list, dta->dta_timer, dr, dta_list);
-	if (!--dta->dta_refcount) {
-		TAILQ_REMOVE(&_dispatch_timer_aggregates, dta, dta_list);
-	}
-}
-
-#pragma mark -
-#pragma mark dispatch_kqueue
-
-static int _dispatch_kq;
-
-#if DISPATCH_DEBUG_QOS && DISPATCH_USE_KEVENT_WORKQUEUE
-#define _dispatch_kevent_assert_valid_qos(ke)  ({ \
-		if (_dispatch_kevent_workqueue_enabled) { \
-			const _dispatch_kevent_qos_s *_ke = (ke); \
-			if (_ke->flags & (EV_ADD|EV_ENABLE)) { \
-				_dispatch_assert_is_valid_qos_class(\
-						(pthread_priority_t)_ke->qos); \
-				dispatch_assert(_ke->qos); \
-			} \
-		} \
-	})
-#else
-#define _dispatch_kevent_assert_valid_qos(ke)  ((void)ke)
-#endif
-
-
-static void
-_dispatch_kq_init(void *context DISPATCH_UNUSED)
-{
-	_dispatch_fork_becomes_unsafe();
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-	_dispatch_kevent_workqueue_init();
-	if (_dispatch_kevent_workqueue_enabled) {
-		int r;
-		const _dispatch_kevent_qos_s kev[] = {
-			[0] = {
-				.ident = 1,
-				.filter = EVFILT_USER,
-				.flags = EV_ADD|EV_CLEAR,
-				.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG,
-			},
-			[1] = {
-				.ident = 1,
-				.filter = EVFILT_USER,
-				.fflags = NOTE_TRIGGER,
-			},
-		};
-		_dispatch_kq = -1;
-retry:
-		r = kevent_qos(-1, kev, 2, NULL, 0, NULL, NULL,
-				KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE);
-		if (slowpath(r == -1)) {
-			int err = errno;
-			switch (err) {
-			case EINTR:
-				goto retry;
-			default:
-				DISPATCH_CLIENT_CRASH(err,
-						"Failed to initalize workqueue kevent");
-				break;
-			}
-		}
-		return;
-	}
-#endif // DISPATCH_USE_KEVENT_WORKQUEUE
-#if DISPATCH_USE_MGR_THREAD
-	static const _dispatch_kevent_qos_s kev = {
-		.ident = 1,
-		.filter = EVFILT_USER,
-		.flags = EV_ADD|EV_CLEAR,
-	};
-
-	_dispatch_fork_becomes_unsafe();
-#if DISPATCH_USE_GUARDED_FD
-	guardid_t guard = (uintptr_t)&kev;
-	_dispatch_kq = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP);
-#else
-	_dispatch_kq = kqueue();
-#endif
-	if (_dispatch_kq == -1) {
-		int err = errno;
-		switch (err) {
-		case EMFILE:
-			DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
-					"process is out of file descriptors");
-			break;
-		case ENFILE:
-			DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
-					"system is out of file descriptors");
-			break;
-		case ENOMEM:
-			DISPATCH_CLIENT_CRASH(err, "kqueue() failure: "
-					"kernel is out of memory");
-			break;
-		default:
-			DISPATCH_INTERNAL_CRASH(err, "kqueue() failure");
-			break;
-		}
-	}
-	(void)dispatch_assume_zero(kevent_qos(_dispatch_kq, &kev, 1, NULL, 0, NULL,
-			NULL, 0));
-	_dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0);
-#endif // DISPATCH_USE_MGR_THREAD
-}
-
-DISPATCH_NOINLINE
-static long
-_dispatch_kq_update(const _dispatch_kevent_qos_s *ke, int n)
-{
-	int i, r;
-	_dispatch_kevent_qos_s kev_error[n];
-	static dispatch_once_t pred;
-	dispatch_once_f(&pred, NULL, _dispatch_kq_init);
-
-	for (i = 0; i < n; i++) {
-		if (ke[i].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) {
-			_dispatch_kevent_debug_n("updating", ke + i, i, n);
-		}
-	}
-
-	unsigned int flags = KEVENT_FLAG_ERROR_EVENTS;
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-	if (_dispatch_kevent_workqueue_enabled) {
-		flags |= KEVENT_FLAG_WORKQ;
-	}
-#endif
-
-retry:
-	r = kevent_qos(_dispatch_kq, ke, n, kev_error, n, NULL, NULL, flags);
-	if (slowpath(r == -1)) {
-		int err = errno;
-		switch (err) {
-		case EINTR:
-			goto retry;
-		case EBADF:
-			DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors");
-			break;
-		default:
-			(void)dispatch_assume_zero(err);
-			break;
-		}
-		return err;
-	}
-	for (i = 0, n = r; i < n; i++) {
-		if (kev_error[i].flags & EV_ERROR) {
-			_dispatch_kevent_debug("returned error", &kev_error[i]);
-			_dispatch_kevent_drain(&kev_error[i]);
-			r = (int)kev_error[i].data;
-		} else {
-			_dispatch_kevent_mgr_debug(&kev_error[i]);
-			r = 0;
-		}
-	}
-	return r;
-}
-
-DISPATCH_ALWAYS_INLINE
-static void
-_dispatch_kq_update_all(const _dispatch_kevent_qos_s *kev, int n)
-{
-	(void)_dispatch_kq_update(kev, n);
-}
-
-DISPATCH_ALWAYS_INLINE
-static long
-_dispatch_kq_update_one(const _dispatch_kevent_qos_s *kev)
-{
-	return _dispatch_kq_update(kev, 1);
-}
-
-static inline bool
-_dispatch_kevent_maps_to_same_knote(const _dispatch_kevent_qos_s *e1,
-		const _dispatch_kevent_qos_s *e2)
-{
-	return e1->filter == e2->filter &&
-			e1->ident == e2->ident &&
-			e1->udata == e2->udata;
-}
-
-static inline int
-_dispatch_deferred_event_find_slot(dispatch_deferred_items_t ddi,
-		const _dispatch_kevent_qos_s *ke)
-{
-	_dispatch_kevent_qos_s *events = ddi->ddi_eventlist;
-	int i;
-
-	for (i = 0; i < ddi->ddi_nevents; i++) {
-		if (_dispatch_kevent_maps_to_same_knote(&events[i], ke)) {
-			break;
-		}
-	}
-	return i;
-}
-
-static void
-_dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke)
-{
-	dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
-	int slot;
-
-	_dispatch_kevent_assert_valid_qos(ke);
-	if (ddi) {
-		if (unlikely(ddi->ddi_nevents == ddi->ddi_maxevents)) {
-			_dispatch_deferred_items_set(NULL);
-			_dispatch_kq_update_all(ddi->ddi_eventlist, ddi->ddi_nevents);
-			ddi->ddi_nevents = 0;
-			_dispatch_deferred_items_set(ddi);
-		}
-		if (ke->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) {
-			_dispatch_kevent_debug("deferred", ke);
-		}
-		bool needs_enable = false;
-		slot = _dispatch_deferred_event_find_slot(ddi, ke);
-		if (slot == ddi->ddi_nevents) {
-			ddi->ddi_nevents++;
-		} else if (ke->flags & EV_DELETE) {
-			// <rdar://problem/26202376> when deleting and an enable is pending,
-			// we must merge EV_ENABLE to do an immediate deletion
-			needs_enable = (ddi->ddi_eventlist[slot].flags & EV_ENABLE);
-		}
-		ddi->ddi_eventlist[slot] = *ke;
-		if (needs_enable) {
-			ddi->ddi_eventlist[slot].flags |= EV_ENABLE;
-		}
-	} else {
-		_dispatch_kq_update_one(ke);
-	}
-}
-
-static long
-_dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke)
-{
-	dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
-	int slot, last;
-
-	_dispatch_kevent_assert_valid_qos(ke);
-	if (ddi) {
-		_dispatch_kevent_qos_s *events = ddi->ddi_eventlist;
-		slot = _dispatch_deferred_event_find_slot(ddi, ke);
-		if (slot < ddi->ddi_nevents) {
-			// <rdar://problem/26202376> when deleting and an enable is pending,
-			// we must merge EV_ENABLE to do an immediate deletion
-			if ((ke->flags & EV_DELETE) && (events[slot].flags & EV_ENABLE)) {
-				ke->flags |= EV_ENABLE;
-			}
-			last = --ddi->ddi_nevents;
-			if (slot != last) {
-				events[slot] = events[last];
-			}
-		}
-	}
-	return _dispatch_kq_update_one(ke);
-}
-
-#pragma mark -
 #pragma mark dispatch_mgr
 
-DISPATCH_NOINLINE
-static void
-_dispatch_mgr_queue_poke(dispatch_queue_t dq DISPATCH_UNUSED,
-		pthread_priority_t pp DISPATCH_UNUSED)
-{
-	static const _dispatch_kevent_qos_s kev = {
-		.ident = 1,
-		.filter = EVFILT_USER,
-		.fflags = NOTE_TRIGGER,
-	};
-
-#if DISPATCH_DEBUG && DISPATCH_MGR_QUEUE_DEBUG
-	_dispatch_debug("waking up the dispatch manager queue: %p", dq);
-#endif
-	_dispatch_kq_deferred_update(&kev);
-}
-
 void
-_dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
-		dispatch_wakeup_flags_t flags)
+_dispatch_mgr_queue_wakeup(dispatch_queue_t dq,
+		dispatch_qos_t qos DISPATCH_UNUSED, dispatch_wakeup_flags_t flags)
 {
 	if (flags & DISPATCH_WAKEUP_FLUSH) {
 		os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release);
@@ -2917,20 +2180,7 @@
 		return;
 	}
 
-	_dispatch_mgr_queue_poke(dq, pp);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_event_init(void)
-{
-	_dispatch_kevent_init();
-	_dispatch_timers_init();
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-	_dispatch_mach_recv_msg_buf_init();
-#endif
-	_dispatch_memorypressure_init();
-	_voucher_activity_debug_channel_init();
+	_dispatch_event_loop_poke();
 }
 
 #if DISPATCH_USE_MGR_THREAD
@@ -2945,35 +2195,7 @@
 		DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail");
 	}
 	_dispatch_mgr_priority_init();
-	_dispatch_event_init();
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mgr_wait_for_event(dispatch_deferred_items_t ddi, bool poll)
-{
-	int r;
-	dispatch_assert((size_t)ddi->ddi_maxevents < countof(ddi->ddi_eventlist));
-
-retry:
-	r = kevent_qos(_dispatch_kq, ddi->ddi_eventlist, ddi->ddi_nevents,
-			ddi->ddi_eventlist + ddi->ddi_maxevents, 1, NULL, NULL,
-			poll ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE);
-	if (slowpath(r == -1)) {
-		int err = errno;
-		switch (err) {
-		case EINTR:
-			goto retry;
-		case EBADF:
-			DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors");
-			break;
-		default:
-			(void)dispatch_assume_zero(err);
-			break;
-		}
-	}
-	ddi->ddi_nevents = 0;
-	return r > 0;
+	_dispatch_event_loop_init();
 }
 
 DISPATCH_NOINLINE DISPATCH_NORETURN
@@ -2984,21 +2206,18 @@
 	bool poll;
 
 	ddi.ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC;
-	ddi.ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+	ddi.ddi_stashed_pri = DISPATCH_PRIORITY_NOSTASH;
+#if DISPATCH_EVENT_BACKEND_KEVENT
 	ddi.ddi_nevents = 0;
 	ddi.ddi_maxevents = 1;
-
+#endif
 	_dispatch_deferred_items_set(&ddi);
 
 	for (;;) {
 		_dispatch_mgr_queue_drain();
 		poll = _dispatch_mgr_timers();
 		poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q);
-		if (_dispatch_mgr_wait_for_event(&ddi, poll)) {
-			_dispatch_kevent_qos_s *ke = ddi.ddi_eventlist + ddi.ddi_maxevents;
-			_dispatch_kevent_debug("received", ke);
-			_dispatch_kevent_drain(ke);
-		}
+		_dispatch_event_loop_drain(&ddi, poll);
 	}
 }
 #endif // DISPATCH_USE_MGR_THREAD
@@ -3024,18 +2243,23 @@
 
 #if DISPATCH_USE_KEVENT_WORKQUEUE
 
-#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((pthread_priority_t)(~0ul))
+#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((dispatch_priority_t)~0u)
+
+_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >=
+		DISPATCH_DEFERRED_ITEMS_EVENT_COUNT,
+		"our list should not be longer than the kernel's");
 
 DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
+static inline dispatch_priority_t
 _dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi)
 {
 	uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
+	dispatch_priority_t old_dbp;
 
 	ddi->ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC;
 	ddi->ddi_nevents = 0;
 	ddi->ddi_maxevents = countof(ddi->ddi_eventlist);
-	ddi->ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+	ddi->ddi_stashed_pri = DISPATCH_PRIORITY_NOSTASH;
 
 	pthread_priority_t pp = _dispatch_get_priority();
 	if (!(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) {
@@ -3049,7 +2273,7 @@
 		pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
 		_dispatch_thread_setspecific(dispatch_priority_key,
 					(void *)(uintptr_t)pp);
-		ddi->ddi_stashed_pp = 0;
+		ddi->ddi_stashed_pri = 0;
 		return DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER;
 	}
 
@@ -3076,8 +2300,7 @@
 	_dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp);
 
 	// ensure kevents registered from this thread are registered at manager QoS
-	pthread_priority_t old_dp = _dispatch_set_defaultpriority(
-			(pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, NULL);
+	old_dbp = _dispatch_set_basepri(DISPATCH_PRIORITY_FLAG_MANAGER);
 	_dispatch_queue_set_current(&_dispatch_mgr_q);
 	if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q,
 			DISPATCH_INVOKE_STEALING, NULL) != owned) {
@@ -3086,28 +2309,28 @@
 	static int event_thread_init;
 	if (!event_thread_init) {
 		event_thread_init = 1;
-		_dispatch_event_init();
+		_dispatch_event_loop_init();
 	}
-	return old_dp;
+	return old_dbp;
 }
 
 DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
 static inline bool
-_dispatch_kevent_worker_thread_reset(pthread_priority_t old_dp)
+_dispatch_kevent_worker_thread_reset(dispatch_priority_t old_dbp)
 {
 	dispatch_queue_t dq = &_dispatch_mgr_q;
 	uint64_t orig_dq_state;
 
 	_dispatch_queue_drain_unlock(dq, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED,
 			&orig_dq_state);
-	_dispatch_reset_defaultpriority(old_dp);
+	_dispatch_reset_basepri(old_dbp);
 	_dispatch_queue_set_current(NULL);
 	return _dq_state_is_dirty(orig_dq_state);
 }
 
 DISPATCH_NOINLINE
 void
-_dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, int *nevents)
+_dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents)
 {
 	_dispatch_introspection_thread_add();
 
@@ -3116,3382 +2339,67 @@
 		return;
 	}
 
-	_dispatch_kevent_qos_s *ke = *events;
+	dispatch_kevent_t ke = *events;
+	DISPATCH_PERF_MON_VAR
 	int n = *nevents;
 	if (!dispatch_assume(n) || !dispatch_assume(*events)) return;
 
 	dispatch_deferred_items_s ddi;
-	pthread_priority_t old_dp = _dispatch_kevent_worker_thread_init(&ddi);
+	dispatch_priority_t old_dbp = _dispatch_kevent_worker_thread_init(&ddi);
 
 	_dispatch_deferred_items_set(&ddi);
-	for (int i = 0; i < n; i++) {
-		_dispatch_kevent_debug("received", ke);
-		_dispatch_kevent_drain(ke++);
-	}
+	_dispatch_event_loop_merge(ke, n);
 
-	if (old_dp != DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) {
+	if (old_dbp == DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) {
+		_dispatch_perfmon_start_impl(true);
+	} else {
 		_dispatch_mgr_queue_drain();
 		bool poll = _dispatch_mgr_timers();
-		if (_dispatch_kevent_worker_thread_reset(old_dp)) {
+		if (_dispatch_kevent_worker_thread_reset(old_dbp)) {
 			poll = true;
 		}
-		if (poll) _dispatch_mgr_queue_poke(&_dispatch_mgr_q, 0);
+		if (poll) _dispatch_event_loop_poke();
 	}
 	_dispatch_deferred_items_set(NULL);
 
-	if (ddi.ddi_stashed_pp & _PTHREAD_PRIORITY_PRIORITY_MASK) {
+	if (ddi.ddi_stashed_pri &&
+			ddi.ddi_stashed_pri != DISPATCH_PRIORITY_NOSTASH) {
 		*nevents = 0;
 		if (ddi.ddi_nevents) {
-			_dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents);
+			_dispatch_event_loop_update(ddi.ddi_eventlist, ddi.ddi_nevents);
 		}
-		ddi.ddi_stashed_pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+		dispatch_qos_t qos = _dispatch_priority_qos(ddi.ddi_stashed_pri);
 		return _dispatch_root_queue_drain_deferred_item(ddi.ddi_stashed_dq,
-				ddi.ddi_stashed_dou, ddi.ddi_stashed_pp);
-#ifndef WORKQ_KEVENT_EVENT_BUFFER_LEN
-	} else if (ddi.ddi_nevents > *nevents) {
-		*nevents = 0;
-		_dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents);
-#endif
+				ddi.ddi_stashed_dou, qos DISPATCH_PERF_MON_ARGS);
 	} else {
+		if (ddi.ddi_nevents) {
+			_dispatch_debug("flushing %d deferred kevents", ddi.ddi_nevents);
+		}
 		*nevents = ddi.ddi_nevents;
 		dispatch_static_assert(__builtin_types_compatible_p(typeof(**events),
 				typeof(*ddi.ddi_eventlist)));
 		memcpy(*events, ddi.ddi_eventlist,
 			 (size_t)ddi.ddi_nevents * sizeof(*ddi.ddi_eventlist));
 	}
+	if (old_dbp == DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) {
+		_dispatch_perfmon_end(perfmon_thread_event_no_steal);
+	}
 }
+
 #endif // DISPATCH_USE_KEVENT_WORKQUEUE
-
-#pragma mark -
-#pragma mark dispatch_memorypressure
-
-#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
-#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_MEMORYPRESSURE
-#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \
-		DISPATCH_MEMORYPRESSURE_NORMAL | \
-		DISPATCH_MEMORYPRESSURE_WARN | \
-		DISPATCH_MEMORYPRESSURE_CRITICAL | \
-		DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
-		DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL)
-#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \
-		DISPATCH_MEMORYPRESSURE_WARN | \
-		DISPATCH_MEMORYPRESSURE_CRITICAL | \
-		DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
-		DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL)
-#elif DISPATCH_USE_VM_PRESSURE_SOURCE
-#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM
-#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK DISPATCH_VM_PRESSURE
-#endif
-
-#if DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE
-static dispatch_source_t _dispatch_memorypressure_source;
-
-static void
-_dispatch_memorypressure_handler(void *context DISPATCH_UNUSED)
-{
-#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
-	unsigned long memorypressure;
-	memorypressure = dispatch_source_get_data(_dispatch_memorypressure_source);
-
-	if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) {
-		_dispatch_memory_warn = false;
-		_dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT;
-#if VOUCHER_USE_MACH_VOUCHER
-		if (_firehose_task_buffer) {
-			firehose_buffer_clear_bank_flags(_firehose_task_buffer,
-					FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY);
-		}
-#endif
-	}
-	if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) {
-		_dispatch_memory_warn = true;
-		_dispatch_continuation_cache_limit =
-				DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN;
-#if VOUCHER_USE_MACH_VOUCHER
-		if (_firehose_task_buffer) {
-			firehose_buffer_set_bank_flags(_firehose_task_buffer,
-					FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY);
-		}
-#endif
-	}
-	if (memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK) {
-		malloc_memory_event_handler(memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK);
-	}
-#elif DISPATCH_USE_VM_PRESSURE_SOURCE
-	// we must have gotten DISPATCH_VM_PRESSURE
-	malloc_zone_pressure_relief(0,0);
-#endif
-}
-
-static void
-_dispatch_memorypressure_init(void)
-{
-	_dispatch_memorypressure_source = dispatch_source_create(
-			DISPATCH_MEMORYPRESSURE_SOURCE_TYPE, 0,
-			DISPATCH_MEMORYPRESSURE_SOURCE_MASK,
-			_dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true));
-	dispatch_source_set_event_handler_f(_dispatch_memorypressure_source,
-			_dispatch_memorypressure_handler);
-	dispatch_activate(_dispatch_memorypressure_source);
-}
-#else
-static inline void _dispatch_memorypressure_init(void) {}
-#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE
-
-#pragma mark -
-#pragma mark dispatch_mach
-
-#if HAVE_MACH
-
-#if DISPATCH_DEBUG && DISPATCH_MACHPORT_DEBUG
-#define _dispatch_debug_machport(name) \
-		dispatch_debug_machport((name), __func__)
-#else
-#define _dispatch_debug_machport(name) ((void)(name))
-#endif
-
-// Flags for all notifications that are registered/unregistered when a
-// send-possible notification is requested/delivered
-#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \
-		DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED)
-#define _DISPATCH_MACH_RECV_FLAGS (DISPATCH_MACH_RECV_MESSAGE| \
-		DISPATCH_MACH_RECV_MESSAGE_DIRECT| \
-		DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE)
-#define _DISPATCH_MACH_RECV_DIRECT_FLAGS ( \
-		DISPATCH_MACH_RECV_MESSAGE_DIRECT| \
-		DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE)
-
-#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v)
-#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? \
-		(MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y)))
-
-#define _DISPATCH_MACHPORT_HASH_SIZE 32
-#define _DISPATCH_MACHPORT_HASH(x) \
-		_DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE)
-
-#ifndef MACH_RCV_VOUCHER
-#define MACH_RCV_VOUCHER 0x00000800
-#endif
-#define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX
-#define DISPATCH_MACH_RCV_OPTIONS ( \
-		MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \
-		MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \
-		MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) | \
-		MACH_RCV_VOUCHER
-
-#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->dk_kevent.ext[0])
-
-static void _dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke,
-		mach_msg_header_t *hdr);
-static void _dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke,
-		mach_msg_header_t *hdr);
-static void _dispatch_source_merge_mach_msg(dispatch_source_t ds,
-		dispatch_source_refs_t dr, dispatch_kevent_t dk,
-		_dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr,
-		mach_msg_size_t siz);
-static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk,
-		uint32_t new_flags, uint32_t del_flags, uint32_t mask,
-		mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync);
-static void _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr);
-static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm,
-		dispatch_mach_reply_refs_t dmr, unsigned int options);
-static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm);
-static void _dispatch_mach_msg_recv(dispatch_mach_t dm,
-		dispatch_mach_reply_refs_t dmr, _dispatch_kevent_qos_s *ke,
-		mach_msg_header_t *hdr, mach_msg_size_t siz);
-static void _dispatch_mach_merge_notification_kevent(dispatch_mach_t dm,
-		const _dispatch_kevent_qos_s *ke);
-static inline mach_msg_option_t _dispatch_mach_checkin_options(void);
-
-static const size_t _dispatch_mach_recv_msg_size =
-		DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE;
-static const size_t dispatch_mach_trailer_size =
-		sizeof(dispatch_mach_trailer_t);
-static mach_port_t _dispatch_mach_notify_port;
-static dispatch_source_t _dispatch_mach_notify_source;
-
-static inline void*
-_dispatch_kevent_mach_msg_buf(_dispatch_kevent_qos_s *ke)
-{
-	return (void*)ke->ext[0];
-}
-
-static inline mach_msg_size_t
-_dispatch_kevent_mach_msg_size(_dispatch_kevent_qos_s *ke)
-{
-	// buffer size in the successful receive case, but message size (like
-	// msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size.
-	return (mach_msg_size_t)ke->ext[1];
-}
-
-static void
-_dispatch_source_type_mach_recv_direct_init(dispatch_source_t ds,
-	dispatch_source_type_t type DISPATCH_UNUSED,
-	uintptr_t handle DISPATCH_UNUSED,
-	unsigned long mask DISPATCH_UNUSED,
-	dispatch_queue_t q DISPATCH_UNUSED)
-{
-	ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT;
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-	if (_dispatch_evfilt_machport_direct_enabled) return;
-	ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT;
-	ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED);
-	ds->ds_is_direct_kevent = false;
-#endif
-}
-
-static const
-struct dispatch_source_type_s _dispatch_source_type_mach_recv_direct = {
-	.ke = {
-		.filter = EVFILT_MACHPORT,
-		.flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC,
-		.fflags = DISPATCH_MACH_RCV_OPTIONS,
-	},
-	.init = _dispatch_source_type_mach_recv_direct_init,
-};
-
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-static mach_port_t _dispatch_mach_portset,  _dispatch_mach_recv_portset;
-static _dispatch_kevent_qos_s _dispatch_mach_recv_kevent = {
-	.filter = EVFILT_MACHPORT,
-	.flags = EV_ADD|EV_ENABLE|EV_DISPATCH,
-	.fflags = DISPATCH_MACH_RCV_OPTIONS,
-};
-
-static void
-_dispatch_mach_recv_msg_buf_init(void)
-{
-	if (_dispatch_evfilt_machport_direct_enabled) return;
-	mach_vm_size_t vm_size = mach_vm_round_page(
-			_dispatch_mach_recv_msg_size + dispatch_mach_trailer_size);
-	mach_vm_address_t vm_addr = vm_page_size;
-	kern_return_t kr;
-
-	while (slowpath(kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size,
-			VM_FLAGS_ANYWHERE))) {
-		if (kr != KERN_NO_SPACE) {
-			DISPATCH_CLIENT_CRASH(kr,
-					"Could not allocate mach msg receive buffer");
-		}
-		_dispatch_temporary_resource_shortage();
-		vm_addr = vm_page_size;
-	}
-	_dispatch_mach_recv_kevent.ext[0] = (uintptr_t)vm_addr;
-	_dispatch_mach_recv_kevent.ext[1] = vm_size;
-}
-#endif
-
-DISPATCH_NOINLINE
-static void
-_dispatch_source_merge_mach_msg_direct(dispatch_source_t ds,
-		_dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr)
-{
-	dispatch_continuation_t dc = _dispatch_source_get_event_handler(ds->ds_refs);
-	dispatch_queue_t cq = _dispatch_queue_get_current();
-
-	// see firehose_client_push_notify_async
-	_dispatch_queue_set_current(ds->_as_dq);
-	dc->dc_func(hdr);
-	_dispatch_queue_set_current(cq);
-	if (hdr != _dispatch_kevent_mach_msg_buf(ke)) {
-		free(hdr);
-	}
-}
-
-dispatch_source_t
-_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp,
-		const struct dispatch_continuation_s *dc)
-{
-	dispatch_source_t ds;
-	ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct,
-			recvp, 0, &_dispatch_mgr_q);
-	os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER],
-			(dispatch_continuation_t)dc, relaxed);
-	return ds;
-}
-
-static void
-_dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED)
-{
-	kern_return_t kr;
-#if HAVE_MACH_PORT_CONSTRUCT
-	mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT };
-#ifdef __LP64__
-	const mach_port_context_t guard = 0xfeed09071f1ca7edull;
-#else
-	const mach_port_context_t guard = 0xff1ca7edull;
-#endif
-	kr = mach_port_construct(mach_task_self(), &opts, guard,
-			&_dispatch_mach_notify_port);
-#else
-	kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
-			&_dispatch_mach_notify_port);
-#endif
-	DISPATCH_VERIFY_MIG(kr);
-	if (slowpath(kr)) {
-		DISPATCH_CLIENT_CRASH(kr,
-				"mach_port_construct() failed: cannot create receive right");
-	}
-
-	static const struct dispatch_continuation_s dc = {
-		.dc_func = (void*)_dispatch_mach_notify_source_invoke,
-	};
-	_dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv(
-			_dispatch_mach_notify_port, &dc);
-	dispatch_assert(_dispatch_mach_notify_source);
-	dispatch_activate(_dispatch_mach_notify_source);
-}
-
-static mach_port_t
-_dispatch_get_mach_notify_port(void)
-{
-	static dispatch_once_t pred;
-	dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init);
-	return _dispatch_mach_notify_port;
-}
-
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-static void
-_dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED)
-{
-	kern_return_t kr;
-
-	kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET,
-			&_dispatch_mach_recv_portset);
-	DISPATCH_VERIFY_MIG(kr);
-	if (slowpath(kr)) {
-		DISPATCH_CLIENT_CRASH(kr,
-				"mach_port_allocate() failed: cannot create port set");
-	}
-	_dispatch_kevent_qos_s *ke = &_dispatch_mach_recv_kevent;
-	dispatch_assert(_dispatch_kevent_mach_msg_buf(ke));
-	dispatch_assert(dispatch_mach_trailer_size ==
-			REQUESTED_TRAILER_SIZE_NATIVE(MACH_RCV_TRAILER_ELEMENTS(
-			DISPATCH_MACH_RCV_TRAILER)));
-	ke->ident = _dispatch_mach_recv_portset;
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-	if (_dispatch_kevent_workqueue_enabled) {
-		ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-	}
-#endif
-	_dispatch_kq_immediate_update(&_dispatch_mach_recv_kevent);
-}
-
-static mach_port_t
-_dispatch_get_mach_recv_portset(void)
-{
-	static dispatch_once_t pred;
-	dispatch_once_f(&pred, NULL, _dispatch_mach_recv_portset_init);
-	return _dispatch_mach_recv_portset;
-}
-
-static void
-_dispatch_mach_portset_init(void *context DISPATCH_UNUSED)
-{
-	_dispatch_kevent_qos_s kev = {
-		.filter = EVFILT_MACHPORT,
-		.flags = EV_ADD,
-	};
-#if DISPATCH_USE_KEVENT_WORKQUEUE
-	if (_dispatch_kevent_workqueue_enabled) {
-		kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-	}
-#endif
-
-	kern_return_t kr;
-
-	kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET,
-			&_dispatch_mach_portset);
-	DISPATCH_VERIFY_MIG(kr);
-	if (slowpath(kr)) {
-		DISPATCH_CLIENT_CRASH(kr,
-				"mach_port_allocate() failed: cannot create port set");
-	}
-	kev.ident = _dispatch_mach_portset;
-	_dispatch_kq_immediate_update(&kev);
-}
-
-static mach_port_t
-_dispatch_get_mach_portset(void)
-{
-	static dispatch_once_t pred;
-	dispatch_once_f(&pred, NULL, _dispatch_mach_portset_init);
-	return _dispatch_mach_portset;
-}
-
-static kern_return_t
-_dispatch_mach_portset_update(dispatch_kevent_t dk, mach_port_t mps)
-{
-	mach_port_t mp = (mach_port_t)dk->dk_kevent.ident;
-	kern_return_t kr;
-
-	_dispatch_debug_machport(mp);
-	kr = mach_port_move_member(mach_task_self(), mp, mps);
-	if (slowpath(kr)) {
-		DISPATCH_VERIFY_MIG(kr);
-		switch (kr) {
-		case KERN_INVALID_RIGHT:
-			if (mps) {
-				_dispatch_bug_mach_client("_dispatch_kevent_machport_enable: "
-						"mach_port_move_member() failed ", kr);
-				break;
-			}
-			//fall through
-		case KERN_INVALID_NAME:
-#if DISPATCH_DEBUG
-			_dispatch_log("Corruption: Mach receive right 0x%x destroyed "
-					"prematurely", mp);
-#endif
-			break;
-		default:
-			(void)dispatch_assume_zero(kr);
-			break;
-		}
-	}
-	return mps ? kr : 0;
-}
-
-static kern_return_t
-_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags,
-		uint32_t del_flags)
-{
-	kern_return_t kr = 0;
-	dispatch_assert_zero(new_flags & del_flags);
-	if ((new_flags & _DISPATCH_MACH_RECV_FLAGS) ||
-			(del_flags & _DISPATCH_MACH_RECV_FLAGS)) {
-		mach_port_t mps;
-		if (new_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) {
-			mps = _dispatch_get_mach_recv_portset();
-		} else if ((new_flags & DISPATCH_MACH_RECV_MESSAGE) ||
-				((del_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) &&
-				(dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE))) {
-			mps = _dispatch_get_mach_portset();
-		} else {
-			mps = MACH_PORT_NULL;
-		}
-		kr = _dispatch_mach_portset_update(dk, mps);
-	}
-	return kr;
-}
-#endif // DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-
-static kern_return_t
-_dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags,
-		uint32_t del_flags)
-{
-	kern_return_t kr = 0;
-	dispatch_assert_zero(new_flags & del_flags);
-	if ((new_flags & _DISPATCH_MACH_SP_FLAGS) ||
-			(del_flags & _DISPATCH_MACH_SP_FLAGS)) {
-		// Requesting a (delayed) non-sync send-possible notification
-		// registers for both immediate dead-name notification and delayed-arm
-		// send-possible notification for the port.
-		// The send-possible notification is armed when a mach_msg() with the
-		// the MACH_SEND_NOTIFY to the port times out.
-		// If send-possible is unavailable, fall back to immediate dead-name
-		// registration rdar://problem/2527840&9008724
-		kr = _dispatch_mach_notify_update(dk, new_flags, del_flags,
-				_DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE,
-				MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME ? 1 : 0);
-	}
-	return kr;
-}
-
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-DISPATCH_NOINLINE
-static void
-_dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke)
-{
-	mach_port_t name = (mach_port_name_t)ke->data;
-	dispatch_kevent_t dk;
-
-	_dispatch_debug_machport(name);
-	dk = _dispatch_kevent_find(name, EVFILT_MACHPORT);
-	if (!dispatch_assume(dk)) {
-		return;
-	}
-	_dispatch_mach_portset_update(dk, MACH_PORT_NULL); // emulate EV_DISPATCH
-
-	_dispatch_kevent_qos_s kev = {
-		.ident = name,
-		.filter = EVFILT_MACHPORT,
-		.flags = EV_ADD|EV_ENABLE|EV_DISPATCH,
-		.fflags = DISPATCH_MACH_RECV_MESSAGE,
-		.udata = (uintptr_t)dk,
-	};
-	_dispatch_kevent_debug("synthetic", &kev);
-	_dispatch_kevent_merge(&kev);
-}
-#endif
-
-DISPATCH_NOINLINE
-static void
-_dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke)
-{
-	mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke);
-	mach_msg_size_t siz;
-	mach_msg_return_t kr = (mach_msg_return_t)ke->fflags;
-
-	if (!fastpath(hdr)) {
-		DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message");
-	}
-	if (fastpath(!kr)) {
-		_dispatch_kevent_mach_msg_recv(ke, hdr);
-		goto out;
-	} else if (kr != MACH_RCV_TOO_LARGE) {
-		goto out;
-	} else if (!ke->data) {
-		DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity");
-	}
-	if (slowpath(ke->ext[1] > (UINT_MAX - dispatch_mach_trailer_size))) {
-		DISPATCH_INTERNAL_CRASH(ke->ext[1],
-				"EVFILT_MACHPORT with overlarge message");
-	}
-	siz = _dispatch_kevent_mach_msg_size(ke) + dispatch_mach_trailer_size;
-	hdr = malloc(siz);
-	if (!dispatch_assume(hdr)) {
-		// Kernel will discard message too large to fit
-		hdr = NULL;
-		siz = 0;
-	}
-	mach_port_t name = (mach_port_name_t)ke->data;
-	const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS |
-			MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE);
-	kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE,
-			MACH_PORT_NULL);
-	if (fastpath(!kr)) {
-		_dispatch_kevent_mach_msg_recv(ke, hdr);
-		goto out;
-	} else if (kr == MACH_RCV_TOO_LARGE) {
-		_dispatch_log("BUG in libdispatch client: "
-				"_dispatch_kevent_mach_msg_drain: dropped message too "
-				"large to fit in memory: id = 0x%x, size = %u",
-				hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke));
-		kr = MACH_MSG_SUCCESS;
-	}
-	if (hdr != _dispatch_kevent_mach_msg_buf(ke)) {
-		free(hdr);
-	}
-out:
-	if (slowpath(kr)) {
-		_dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: "
-				"message reception failed", kr);
-	}
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke)
-{
-	if (unlikely(!(ke->flags & EV_UDATA_SPECIFIC))) {
-#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
-		if (ke->ident == _dispatch_mach_recv_portset) {
-			_dispatch_kevent_mach_msg_drain(ke);
-			return _dispatch_kq_deferred_update(&_dispatch_mach_recv_kevent);
-		} else if (ke->ident == _dispatch_mach_portset) {
-			return _dispatch_kevent_machport_drain(ke);
-		}
-#endif
-		return _dispatch_kevent_error(ke);
-	}
-
-	dispatch_kevent_t dk = (dispatch_kevent_t)ke->udata;
-	dispatch_source_refs_t dr = TAILQ_FIRST(&dk->dk_sources);
-	bool is_reply = (dk->dk_kevent.flags & EV_ONESHOT);
-	dispatch_source_t ds = _dispatch_source_from_refs(dr);
-
-	if (_dispatch_kevent_mach_msg_size(ke)) {
-		_dispatch_kevent_mach_msg_drain(ke);
-		if (is_reply) {
-			// _dispatch_kevent_mach_msg_drain() should have deleted this event
-			dispatch_assert(ke->flags & EV_DELETE);
-			return;
-		}
-
-		if (!(ds->dq_atomic_flags & DSF_CANCELED)) {
-			// re-arm the mach channel
-			ke->fflags = DISPATCH_MACH_RCV_OPTIONS;
-			ke->data = 0;
-			ke->ext[0] = 0;
-			ke->ext[1] = 0;
-			return _dispatch_kq_deferred_update(ke);
-		}
-	} else if (is_reply) {
-		DISPATCH_INTERNAL_CRASH(ke->flags, "Unexpected EVFILT_MACHPORT event");
-	}
-	if (unlikely((ke->flags & EV_VANISHED) &&
-			(dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE))) {
-		DISPATCH_CLIENT_CRASH(ke->flags,
-				"Unexpected EV_VANISHED (do not destroy random mach ports)");
-	}
-	return _dispatch_kevent_merge(ke);
-}
-
-static void
-_dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke,
-		mach_msg_header_t *hdr)
-{
-	dispatch_source_refs_t dri;
-	dispatch_kevent_t dk;
-	mach_port_t name = hdr->msgh_local_port;
-	mach_msg_size_t siz = hdr->msgh_size + dispatch_mach_trailer_size;
-
-	if (!dispatch_assume(hdr->msgh_size <= UINT_MAX -
-			dispatch_mach_trailer_size)) {
-		_dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
-				"received overlarge message");
-		return _dispatch_kevent_mach_msg_destroy(ke, hdr);
-	}
-	if (!dispatch_assume(name)) {
-		_dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
-				"received message with MACH_PORT_NULL port");
-		return _dispatch_kevent_mach_msg_destroy(ke, hdr);
-	}
-	_dispatch_debug_machport(name);
-	if (ke->flags & EV_UDATA_SPECIFIC) {
-		dk = (void*)ke->udata;
-	} else {
-		dk = _dispatch_kevent_find(name, EVFILT_MACHPORT);
-	}
-	if (!dispatch_assume(dk)) {
-		_dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
-				"received message with unknown kevent");
-		return _dispatch_kevent_mach_msg_destroy(ke, hdr);
-	}
-	TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) {
-		dispatch_source_t dsi = _dispatch_source_from_refs(dri);
-		if (dsi->ds_pending_data_mask & _DISPATCH_MACH_RECV_DIRECT_FLAGS) {
-			return _dispatch_source_merge_mach_msg(dsi, dri, dk, ke, hdr, siz);
-		}
-	}
-	_dispatch_bug_client("_dispatch_kevent_mach_msg_recv: "
-			"received message with no listeners");
-	return _dispatch_kevent_mach_msg_destroy(ke, hdr);
-}
-
-static void
-_dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke,
-		mach_msg_header_t *hdr)
-{
-	if (hdr) {
-		mach_msg_destroy(hdr);
-		if (hdr != _dispatch_kevent_mach_msg_buf(ke)) {
-			free(hdr);
-		}
-	}
-}
-
-static void
-_dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr,
-		dispatch_kevent_t dk, _dispatch_kevent_qos_s *ke,
-		mach_msg_header_t *hdr, mach_msg_size_t siz)
-{
-	if (dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE) {
-		return _dispatch_source_merge_mach_msg_direct(ds, ke, hdr);
-	}
-	dispatch_mach_reply_refs_t dmr = NULL;
-	if (dk->dk_kevent.flags & EV_ONESHOT) {
-		dmr = (dispatch_mach_reply_refs_t)dr;
-	}
-	return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, ke, hdr, siz);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final)
-{
-	dispatch_source_refs_t dri, dr_next;
-	dispatch_kevent_t dk;
-	bool unreg;
-
-	dk = _dispatch_kevent_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION);
-	if (!dk) {
-		return;
-	}
-
-	// Update notification registration state.
-	dk->dk_kevent.data &= ~_DISPATCH_MACH_SP_FLAGS;
-	_dispatch_kevent_qos_s kev = {
-		.ident = name,
-		.filter = DISPATCH_EVFILT_MACH_NOTIFICATION,
-		.flags = EV_ADD|EV_ENABLE,
-		.fflags = flag,
-		.udata = (uintptr_t)dk,
-	};
-	if (final) {
-		// This can never happen again
-		unreg = true;
-	} else {
-		// Re-register for notification before delivery
-		unreg = _dispatch_kevent_resume(dk, flag, 0);
-	}
-	DISPATCH_MACH_NOTIFICATION_ARMED(dk) = 0;
-	TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) {
-		dispatch_source_t dsi = _dispatch_source_from_refs(dri);
-		if (dx_type(dsi) == DISPATCH_MACH_CHANNEL_TYPE) {
-			dispatch_mach_t dm = (dispatch_mach_t)dsi;
-			_dispatch_mach_merge_notification_kevent(dm, &kev);
-			if (unreg && dm->dm_dkev) {
-				_dispatch_mach_notification_kevent_unregister(dm);
-			}
-		} else {
-			_dispatch_source_merge_kevent(dsi, &kev);
-			if (unreg) {
-				_dispatch_source_kevent_unregister(dsi);
-			}
-		}
-		if (!dr_next || DISPATCH_MACH_NOTIFICATION_ARMED(dk)) {
-			// current merge is last in list (dk might have been freed)
-			// or it re-armed the notification
-			return;
-		}
-	}
-}
-
-static kern_return_t
-_dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags,
-		uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid,
-		mach_port_mscount_t notify_sync)
-{
-	mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident;
-	typeof(dk->dk_kevent.data) prev = dk->dk_kevent.data;
-	kern_return_t kr, krr = 0;
-
-	// Update notification registration state.
-	dk->dk_kevent.data |= (new_flags | dk->dk_kevent.fflags) & mask;
-	dk->dk_kevent.data &= ~(del_flags & mask);
-
-	_dispatch_debug_machport(port);
-	if ((dk->dk_kevent.data & mask) && !(prev & mask)) {
-		_dispatch_debug("machport[0x%08x]: registering for send-possible "
-				"notification", port);
-		previous = MACH_PORT_NULL;
-		krr = mach_port_request_notification(mach_task_self(), port,
-				notify_msgid, notify_sync, _dispatch_get_mach_notify_port(),
-				MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
-		DISPATCH_VERIFY_MIG(krr);
-
-		switch(krr) {
-		case KERN_INVALID_NAME:
-		case KERN_INVALID_RIGHT:
-			// Suppress errors & clear registration state
-			dk->dk_kevent.data &= ~mask;
-			break;
-		default:
-			// Else, we don't expect any errors from mach. Log any errors
-			if (dispatch_assume_zero(krr)) {
-				// log the error & clear registration state
-				dk->dk_kevent.data &= ~mask;
-			} else if (dispatch_assume_zero(previous)) {
-				// Another subsystem has beat libdispatch to requesting the
-				// specified Mach notification on this port. We should
-				// technically cache the previous port and message it when the
-				// kernel messages our port. Or we can just say screw those
-				// subsystems and deallocate the previous port.
-				// They should adopt libdispatch :-P
-				kr = mach_port_deallocate(mach_task_self(), previous);
-				DISPATCH_VERIFY_MIG(kr);
-				(void)dispatch_assume_zero(kr);
-				previous = MACH_PORT_NULL;
-			}
-		}
-	} else if (!(dk->dk_kevent.data & mask) && (prev & mask)) {
-		_dispatch_debug("machport[0x%08x]: unregistering for send-possible "
-				"notification", port);
-		previous = MACH_PORT_NULL;
-		kr = mach_port_request_notification(mach_task_self(), port,
-				notify_msgid, notify_sync, MACH_PORT_NULL,
-				MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous);
-		DISPATCH_VERIFY_MIG(kr);
-
-		switch (kr) {
-		case KERN_INVALID_NAME:
-		case KERN_INVALID_RIGHT:
-		case KERN_INVALID_ARGUMENT:
-			break;
-		default:
-			if (dispatch_assume_zero(kr)) {
-				// log the error
-			}
-		}
-	} else {
-		return 0;
-	}
-	if (slowpath(previous)) {
-		// the kernel has not consumed the send-once right yet
-		(void)dispatch_assume_zero(
-				_dispatch_send_consume_send_once_right(previous));
-	}
-	return krr;
-}
-
-static void
-_dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED)
-{
-	static int notify_type = HOST_NOTIFY_CALENDAR_SET;
-	kern_return_t kr;
-	_dispatch_debug("registering for calendar-change notification");
-retry:
-	kr = host_request_notification(_dispatch_get_mach_host_port(),
-			notify_type, _dispatch_get_mach_notify_port());
-	// Fallback when missing support for newer _SET variant, fires strictly more.
-	if (kr == KERN_INVALID_ARGUMENT &&
-		notify_type != HOST_NOTIFY_CALENDAR_CHANGE){
-		notify_type = HOST_NOTIFY_CALENDAR_CHANGE;
-		goto retry;
-	}
-	DISPATCH_VERIFY_MIG(kr);
-	(void)dispatch_assume_zero(kr);
-}
-
-static void
-_dispatch_mach_host_calendar_change_register(void)
-{
-	static dispatch_once_t pred;
-	dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update);
-}
-
-static void
-_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr)
-{
-	mig_reply_error_t reply;
-	dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union
-		__ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem));
-	dispatch_assert(sizeof(mig_reply_error_t) < _dispatch_mach_recv_msg_size);
-	boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head);
-	if (!success && reply.RetCode == MIG_BAD_ID &&
-			(hdr->msgh_id == HOST_CALENDAR_SET_REPLYID ||
-			 hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) {
-		_dispatch_debug("calendar-change notification");
-		_dispatch_timers_calendar_change();
-		_dispatch_mach_host_notify_update(NULL);
-		success = TRUE;
-		reply.RetCode = KERN_SUCCESS;
-	}
-	if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) {
-		(void)dispatch_assume_zero(reply.RetCode);
-	}
-	if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) {
-		mach_msg_destroy(hdr);
-	}
-}
-
-kern_return_t
-_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED,
-		mach_port_name_t name)
-{
-#if DISPATCH_DEBUG
-	_dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x "
-			"deleted prematurely", name);
-#endif
-
-	_dispatch_debug_machport(name);
-	_dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true);
-
-	return KERN_SUCCESS;
-}
-
-kern_return_t
-_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED,
-		mach_port_name_t name)
-{
-	kern_return_t kr;
-
-	_dispatch_debug("machport[0x%08x]: dead-name notification", name);
-	_dispatch_debug_machport(name);
-	_dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true);
-
-	// the act of receiving a dead name notification allocates a dead-name
-	// right that must be deallocated
-	kr = mach_port_deallocate(mach_task_self(), name);
-	DISPATCH_VERIFY_MIG(kr);
-	//(void)dispatch_assume_zero(kr);
-
-	return KERN_SUCCESS;
-}
-
-kern_return_t
-_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED,
-		mach_port_name_t name)
-{
-	_dispatch_debug("machport[0x%08x]: send-possible notification", name);
-	_dispatch_debug_machport(name);
-	_dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false);
-
-	return KERN_SUCCESS;
-}
-
-#pragma mark -
-#pragma mark dispatch_mach_t
-
-#define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1
-#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2
-#define DISPATCH_MACH_WAIT_FOR_REPLY 0x4
-#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8
-#define DISPATCH_MACH_OPTIONS_MASK 0xffff
-
-#define DM_SEND_STATUS_SUCCESS 0x1
-#define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2
-
-DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t,
-	DM_SEND_INVOKE_NONE            = 0x0,
-	DM_SEND_INVOKE_FLUSH           = 0x1,
-	DM_SEND_INVOKE_NEEDS_BARRIER   = 0x2,
-	DM_SEND_INVOKE_CANCEL          = 0x4,
-	DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8,
-	DM_SEND_INVOKE_IMMEDIATE_SEND  = 0x10,
-);
-#define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \
-		((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND)
-
-static inline pthread_priority_t _dispatch_mach_priority_propagate(
-		mach_msg_option_t options);
-static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou);
-static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou);
-static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm,
-		mach_port_t local_port, mach_port_t remote_port);
-static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm,
-		dispatch_mach_reply_refs_t dmr, mach_port_t local_port);
-static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected(
-		dispatch_object_t dou, dispatch_mach_reply_refs_t dmr);
-static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm,
-		dispatch_object_t dou);
-static inline mach_msg_header_t* _dispatch_mach_msg_get_msg(
-		dispatch_mach_msg_t dmsg);
-static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou,
-		pthread_priority_t pp);
-
-static dispatch_mach_t
-_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context,
-		dispatch_mach_handler_function_t handler, bool handler_is_block)
-{
-	dispatch_mach_t dm;
-	dispatch_mach_refs_t dr;
-
-	dm = _dispatch_alloc(DISPATCH_VTABLE(mach),
-			sizeof(struct dispatch_mach_s));
-	_dispatch_queue_init(dm->_as_dq, DQF_NONE, 1, true);
-
-	dm->dq_label = label;
-	dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds
-
-	dr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_refs_s));
-	dr->dr_source_wref = _dispatch_ptr2wref(dm);
-	dr->dm_handler_func = handler;
-	dr->dm_handler_ctxt = context;
-	dm->ds_refs = dr;
-	dm->dm_handler_is_block = handler_is_block;
-
-	dm->dm_refs = _dispatch_calloc(1ul,
-			sizeof(struct dispatch_mach_send_refs_s));
-	dm->dm_refs->dr_source_wref = _dispatch_ptr2wref(dm);
-	dm->dm_refs->dm_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED;
-	TAILQ_INIT(&dm->dm_refs->dm_replies);
-
-	if (slowpath(!q)) {
-		q = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true);
-	} else {
-		_dispatch_retain(q);
-	}
-	dm->do_targetq = q;
-	_dispatch_object_debug(dm, "%s", __func__);
-	return dm;
-}
-
-dispatch_mach_t
-dispatch_mach_create(const char *label, dispatch_queue_t q,
-		dispatch_mach_handler_t handler)
-{
-	dispatch_block_t bb = _dispatch_Block_copy((void*)handler);
-	return _dispatch_mach_create(label, q, bb,
-			(dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true);
-}
-
-dispatch_mach_t
-dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context,
-		dispatch_mach_handler_function_t handler)
-{
-	return _dispatch_mach_create(label, q, context, handler, false);
-}
-
-void
-_dispatch_mach_dispose(dispatch_mach_t dm)
-{
-	_dispatch_object_debug(dm, "%s", __func__);
-	dispatch_mach_refs_t dr = dm->ds_refs;
-	if (dm->dm_handler_is_block && dr->dm_handler_ctxt) {
-		Block_release(dr->dm_handler_ctxt);
-	}
-	free(dr);
-	free(dm->dm_refs);
-	_dispatch_queue_destroy(dm->_as_dq);
-}
-
-void
-dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive,
-		mach_port_t send, dispatch_mach_msg_t checkin)
-{
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	dispatch_kevent_t dk;
-	uint32_t disconnect_cnt;
-	dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct;
-
-	dm->ds_is_direct_kevent = (bool)_dispatch_evfilt_machport_direct_enabled;
-	if (MACH_PORT_VALID(receive)) {
-		dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s));
-		dk->dk_kevent = type->ke;
-		dk->dk_kevent.ident = receive;
-		dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_VANISHED;
-		dk->dk_kevent.udata = (uintptr_t)dk;
-		TAILQ_INIT(&dk->dk_sources);
-		dm->ds_dkev = dk;
-		dm->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT;
-		dm->ds_needs_rearm = dm->ds_is_direct_kevent;
-		if (!dm->ds_is_direct_kevent) {
-			dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT;
-			dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED);
-		}
-		_dispatch_retain(dm); // the reference the manager queue holds
-	}
-	dr->dm_send = send;
-	if (MACH_PORT_VALID(send)) {
-		if (checkin) {
-			dispatch_retain(checkin);
-			checkin->dmsg_options = _dispatch_mach_checkin_options();
-			dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin);
-		}
-		dr->dm_checkin = checkin;
-	}
-	// monitor message reply ports
-	dm->ds_pending_data_mask |= DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE;
-	dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 ==
-			DISPATCH_MACH_NEVER_INSTALLED);
-	disconnect_cnt = os_atomic_dec2o(dr, dm_disconnect_cnt, release);
-	if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) {
-		DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected");
-	}
-	_dispatch_object_debug(dm, "%s", __func__);
-	return dispatch_activate(dm);
-}
-
-// assumes low bit of mach port names is always set
-#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u
-
-static inline void
-_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr)
-{
-	dmr->dmr_reply &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED;
-}
-
-static inline bool
-_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr)
-{
-	mach_port_t reply_port = dmr->dmr_reply;
-	return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false;
-}
-
-static inline mach_port_t
-_dispatch_mach_reply_get_reply_port(dispatch_mach_reply_refs_t dmr)
-{
-	mach_port_t reply_port = dmr->dmr_reply;
-	return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0;
-}
-
-static inline bool
-_dispatch_mach_reply_tryremove(dispatch_mach_t dm,
-		dispatch_mach_reply_refs_t dmr)
-{
-	bool removed;
-	_dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-	if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
-		TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list);
-		_TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
-	}
-	_dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-	return removed;
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm,
-		dispatch_mach_reply_refs_t dmr, unsigned int options)
-{
-	dispatch_mach_msg_t dmsgr = NULL;
-	bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED);
-	if (options & DKEV_UNREGISTER_REPLY_REMOVE) {
-		_dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-		if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
-			DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration");
-		}
-		TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list);
-		_TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
-		_dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-	}
-	if (disconnected) {
-		dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr);
-	} else if (dmr->dmr_voucher) {
-		_voucher_release(dmr->dmr_voucher);
-		dmr->dmr_voucher = NULL;
-	}
-	_dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p",
-			_dispatch_mach_reply_get_reply_port(dmr),
-			disconnected ? " (disconnected)" : "", dmr->dmr_ctxt);
-	if (dmsgr) {
-		return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority);
-	}
-	dispatch_assert(!(options & DKEV_UNREGISTER_WAKEUP));
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm,
-		dispatch_mach_reply_refs_t dmr, unsigned int options)
-{
-	dispatch_mach_msg_t dmsgr = NULL;
-	bool replies_empty = false;
-	bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED);
-	if (options & DKEV_UNREGISTER_REPLY_REMOVE) {
-		_dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-		if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
-			DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration");
-		}
-		TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list);
-		_TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
-		replies_empty = TAILQ_EMPTY(&dm->dm_refs->dm_replies);
-		_dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-	}
-	if (disconnected) {
-		dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr);
-	} else if (dmr->dmr_voucher) {
-		_voucher_release(dmr->dmr_voucher);
-		dmr->dmr_voucher = NULL;
-	}
-	uint32_t flags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE;
-	dispatch_kevent_t dk = dmr->dmr_dkev;
-	_dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p",
-			(mach_port_t)dk->dk_kevent.ident,
-			disconnected ? " (disconnected)" : "", dmr->dmr_ctxt);
-	if (!dm->ds_is_direct_kevent) {
-		dmr->dmr_dkev = NULL;
-		TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list);
-		_dispatch_kevent_unregister(dk, flags, 0);
-	} else {
-		long r = _dispatch_kevent_unregister(dk, flags, options);
-		if (r == EINPROGRESS) {
-			_dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]",
-					(mach_port_t)dk->dk_kevent.ident, dk);
-			dispatch_assert(options == DKEV_UNREGISTER_DISCONNECTED);
-			// dmr must be put back so that the event delivery finds it, the
-			// replies lock is held by the caller.
-			TAILQ_INSERT_HEAD(&dm->dm_refs->dm_replies, dmr, dmr_list);
-			if (dmsgr) {
-				dmr->dmr_voucher = dmsgr->dmsg_voucher;
-				dmsgr->dmsg_voucher = NULL;
-				dispatch_release(dmsgr);
-			}
-			return; // deferred unregistration
-		}
-		dispatch_assume_zero(r);
-		dmr->dmr_dkev = NULL;
-		_TAILQ_TRASH_ENTRY(dmr, dr_list);
-	}
-	free(dmr);
-	if (dmsgr) {
-		return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority);
-	}
-	if ((options & DKEV_UNREGISTER_WAKEUP) && replies_empty &&
-			(dm->dm_refs->dm_disconnect_cnt ||
-			(dm->dq_atomic_flags & DSF_CANCELED))) {
-		dx_wakeup(dm, 0, DISPATCH_WAKEUP_FLUSH);
-	}
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_reply_waiter_register(dispatch_mach_t dm,
-		dispatch_mach_reply_refs_t dmr, mach_port_t reply_port,
-		dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts)
-{
-	dmr->dr_source_wref = _dispatch_ptr2wref(dm);
-	dmr->dmr_dkev = NULL;
-	dmr->dmr_reply = reply_port;
-	if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) {
-		_dispatch_mach_reply_mark_reply_port_owned(dmr);
-	} else {
-		if (dmsg->dmsg_voucher) {
-			dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher);
-		}
-		dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority;
-		// make reply context visible to leaks rdar://11777199
-		dmr->dmr_ctxt = dmsg->do_ctxt;
-	}
-
-	_dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p",
-			reply_port, dmsg->do_ctxt);
-	_dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-	if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
-		DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered");
-	}
-	TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list);
-	_dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port,
-		dispatch_mach_msg_t dmsg)
-{
-	dispatch_kevent_t dk;
-	dispatch_mach_reply_refs_t dmr;
-	dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct;
-	pthread_priority_t mp, pp;
-
-	dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s));
-	dk->dk_kevent = type->ke;
-	dk->dk_kevent.ident = reply_port;
-	dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_ONESHOT;
-	dk->dk_kevent.udata = (uintptr_t)dk;
-	TAILQ_INIT(&dk->dk_sources);
-	if (!dm->ds_is_direct_kevent) {
-		dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE;
-		dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED);
-	}
-
-	dmr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_reply_refs_s));
-	dmr->dr_source_wref = _dispatch_ptr2wref(dm);
-	dmr->dmr_dkev = dk;
-	dmr->dmr_reply = reply_port;
-	if (dmsg->dmsg_voucher) {
-		dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher);
-	}
-	dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority;
-	// make reply context visible to leaks rdar://11777199
-	dmr->dmr_ctxt = dmsg->do_ctxt;
-
-	pp = dm->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-	if (pp && dm->ds_is_direct_kevent) {
-		mp = dmsg->dmsg_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
-		if (pp < mp) pp = mp;
-		pp |= dm->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
-	} else {
-		pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-	}
-
-	_dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p",
-			reply_port, dmsg->do_ctxt);
-	uint32_t flags;
-	bool do_resume = _dispatch_kevent_register(&dmr->dmr_dkev, pp, &flags);
-	TAILQ_INSERT_TAIL(&dmr->dmr_dkev->dk_sources, (dispatch_source_refs_t)dmr,
-			dr_list);
-	_dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-	if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) {
-		DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered");
-	}
-	TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list);
-	_dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-	if (do_resume && _dispatch_kevent_resume(dmr->dmr_dkev, flags, 0)) {
-		return _dispatch_mach_reply_kevent_unregister(dm, dmr,
-				DKEV_UNREGISTER_DISCONNECTED|DKEV_UNREGISTER_REPLY_REMOVE);
-	}
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm)
-{
-	DISPATCH_ASSERT_ON_MANAGER_QUEUE();
-	dispatch_kevent_t dk = dm->dm_dkev;
-	dm->dm_dkev = NULL;
-	TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dm->dm_refs,
-			dr_list);
-	dm->ds_pending_data_mask &= ~(unsigned long)
-			(DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD);
-	_dispatch_kevent_unregister(dk,
-			DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD, 0);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send)
-{
-	DISPATCH_ASSERT_ON_MANAGER_QUEUE();
-	dispatch_kevent_t dk;
-
-	dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s));
-	dk->dk_kevent = _dispatch_source_type_mach_send.ke;
-	dk->dk_kevent.ident = send;
-	dk->dk_kevent.flags |= EV_ADD|EV_ENABLE;
-	dk->dk_kevent.fflags = DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD;
-	dk->dk_kevent.udata = (uintptr_t)dk;
-	TAILQ_INIT(&dk->dk_sources);
-
-	dm->ds_pending_data_mask |= dk->dk_kevent.fflags;
-
-	uint32_t flags;
-	bool do_resume = _dispatch_kevent_register(&dk,
-			_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, &flags);
-	TAILQ_INSERT_TAIL(&dk->dk_sources,
-			(dispatch_source_refs_t)dm->dm_refs, dr_list);
-	dm->dm_dkev = dk;
-	if (do_resume && _dispatch_kevent_resume(dm->dm_dkev, flags, 0)) {
-		_dispatch_mach_notification_kevent_unregister(dm);
-	}
-}
-
-static mach_port_t
-_dispatch_get_thread_reply_port(void)
-{
-	mach_port_t reply_port, mrp = _dispatch_get_thread_mig_reply_port();
-	if (mrp) {
-		reply_port = mrp;
-		_dispatch_debug("machport[0x%08x]: borrowed thread sync reply port",
-				reply_port);
-	} else {
-		reply_port = mach_reply_port();
-		_dispatch_set_thread_mig_reply_port(reply_port);
-		_dispatch_debug("machport[0x%08x]: allocated thread sync reply port",
-				reply_port);
-	}
-	_dispatch_debug_machport(reply_port);
-	return reply_port;
-}
-
-static void
-_dispatch_clear_thread_reply_port(mach_port_t reply_port)
-{
-	mach_port_t mrp = _dispatch_get_thread_mig_reply_port();
-	if (reply_port != mrp) {
-		if (mrp) {
-			_dispatch_debug("machport[0x%08x]: did not clear thread sync reply "
-					"port (found 0x%08x)", reply_port, mrp);
-		}
-		return;
-	}
-	_dispatch_set_thread_mig_reply_port(MACH_PORT_NULL);
-	_dispatch_debug_machport(reply_port);
-	_dispatch_debug("machport[0x%08x]: cleared thread sync reply port",
-			reply_port);
-}
-
-static void
-_dispatch_set_thread_reply_port(mach_port_t reply_port)
-{
-	_dispatch_debug_machport(reply_port);
-	mach_port_t mrp = _dispatch_get_thread_mig_reply_port();
-	if (mrp) {
-		kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port,
-				MACH_PORT_RIGHT_RECEIVE, -1);
-		DISPATCH_VERIFY_MIG(kr);
-		dispatch_assume_zero(kr);
-		_dispatch_debug("machport[0x%08x]: deallocated sync reply port "
-				"(found 0x%08x)", reply_port, mrp);
-	} else {
-		_dispatch_set_thread_mig_reply_port(reply_port);
-		_dispatch_debug("machport[0x%08x]: restored thread sync reply port",
-				reply_port);
-	}
-}
-
-static inline mach_port_t
-_dispatch_mach_msg_get_remote_port(dispatch_object_t dou)
-{
-	mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
-	mach_port_t remote = hdr->msgh_remote_port;
-	return remote;
-}
-
-static inline mach_port_t
-_dispatch_mach_msg_get_reply_port(dispatch_object_t dou)
-{
-	mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
-	mach_port_t local = hdr->msgh_local_port;
-	if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) !=
-			MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL;
-	return local;
-}
-
-static inline void
-_dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err,
-		unsigned long reason)
-{
-	dispatch_assert_zero(reason & ~(unsigned long)code_emask);
-	dmsg->dmsg_error = ((err || !reason) ? err :
-			 err_local|err_sub(0x3e0)|(mach_error_t)reason);
-}
-
-static inline unsigned long
-_dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr)
-{
-	mach_error_t err = dmsg->dmsg_error;
-
-	dmsg->dmsg_error = 0;
-	if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) {
-		*err_ptr = 0;
-		return err_get_code(err);
-	}
-	*err_ptr = err;
-	return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT;
-}
-
-static void
-_dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr,
-		_dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr, mach_msg_size_t siz)
-{
-	_dispatch_debug_machport(hdr->msgh_remote_port);
-	_dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x",
-			hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port);
-	bool canceled = (dm->dq_atomic_flags & DSF_CANCELED);
-	if (!dmr && canceled) {
-		// message received after cancellation, _dispatch_mach_kevent_merge is
-		// responsible for mach channel source state (e.g. deferred deletion)
-		return _dispatch_kevent_mach_msg_destroy(ke, hdr);
-	}
-	dispatch_mach_msg_t dmsg;
-	voucher_t voucher;
-	pthread_priority_t priority;
-	void *ctxt = NULL;
-	if (dmr) {
-		_voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher
-		voucher = dmr->dmr_voucher;
-		dmr->dmr_voucher = NULL; // transfer reference
-		priority = dmr->dmr_priority;
-		ctxt = dmr->dmr_ctxt;
-		unsigned int options = DKEV_DISPOSE_IMMEDIATE_DELETE;
-		options |= DKEV_UNREGISTER_REPLY_REMOVE;
-		options |= DKEV_UNREGISTER_WAKEUP;
-		if (canceled) options |= DKEV_UNREGISTER_DISCONNECTED;
-		_dispatch_mach_reply_kevent_unregister(dm, dmr, options);
-		ke->flags |= EV_DELETE; // remember that unregister deleted the event
-		if (canceled) return;
-	} else {
-		voucher = voucher_create_with_mach_msg(hdr);
-		priority = _voucher_get_priority(voucher);
-	}
-	dispatch_mach_msg_destructor_t destructor;
-	destructor = (hdr == _dispatch_kevent_mach_msg_buf(ke)) ?
-			DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT :
-			DISPATCH_MACH_MSG_DESTRUCTOR_FREE;
-	dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
-	if (hdr == _dispatch_kevent_mach_msg_buf(ke)) {
-		_dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr, (uint64_t)dmsg->dmsg_buf);
-	}
-	dmsg->dmsg_voucher = voucher;
-	dmsg->dmsg_priority = priority;
-	dmsg->do_ctxt = ctxt;
-	_dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED);
-	_dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg);
-	_dispatch_voucher_ktrace_dmsg_push(dmsg);
-	return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_mach_msg_t
-_dispatch_mach_msg_reply_recv(dispatch_mach_t dm,
-		dispatch_mach_reply_refs_t dmr, mach_port_t reply_port)
-{
-	if (slowpath(!MACH_PORT_VALID(reply_port))) {
-		DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port");
-	}
-	void *ctxt = dmr->dmr_ctxt;
-	mach_msg_header_t *hdr, *hdr2 = NULL;
-	void *hdr_copyout_addr;
-	mach_msg_size_t siz, msgsiz = 0;
-	mach_msg_return_t kr;
-	mach_msg_option_t options;
-	siz = mach_vm_round_page(_dispatch_mach_recv_msg_size +
-			dispatch_mach_trailer_size);
-	hdr = alloca(siz);
-	for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size);
-			p < (mach_vm_address_t)hdr + siz; p += vm_page_size) {
-		*(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
-	}
-	options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER);
-retry:
-	_dispatch_debug_machport(reply_port);
-	_dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port,
-			(options & MACH_RCV_TIMEOUT) ? "poll" : "wait");
-	kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE,
-			MACH_PORT_NULL);
-	hdr_copyout_addr = hdr;
-	_dispatch_debug_machport(reply_port);
-	_dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) "
-			"returned: %s - 0x%x", reply_port, siz, options,
-			mach_error_string(kr), kr);
-	switch (kr) {
-	case MACH_RCV_TOO_LARGE:
-		if (!fastpath(hdr->msgh_size <= UINT_MAX -
-				dispatch_mach_trailer_size)) {
-			DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message");
-		}
-		if (options & MACH_RCV_LARGE) {
-			msgsiz = hdr->msgh_size + dispatch_mach_trailer_size;
-			hdr2 = malloc(msgsiz);
-			if (dispatch_assume(hdr2)) {
-				hdr = hdr2;
-				siz = msgsiz;
-			}
-			options |= MACH_RCV_TIMEOUT;
-			options &= ~MACH_RCV_LARGE;
-			goto retry;
-		}
-		_dispatch_log("BUG in libdispatch client: "
-				"dispatch_mach_send_and_wait_for_reply: dropped message too "
-				"large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id,
-				hdr->msgh_size);
-		break;
-	case MACH_RCV_INVALID_NAME: // rdar://problem/21963848
-	case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327
-	case MACH_RCV_PORT_DIED:
-		// channel was disconnected/canceled and reply port destroyed
-		_dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: "
-				"%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr);
-		goto out;
-	case MACH_MSG_SUCCESS:
-		if (hdr->msgh_remote_port) {
-			_dispatch_debug_machport(hdr->msgh_remote_port);
-		}
-		_dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, "
-				"reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id,
-				hdr->msgh_size, hdr->msgh_remote_port);
-		siz = hdr->msgh_size + dispatch_mach_trailer_size;
-		if (hdr2 && siz < msgsiz) {
-			void *shrink = realloc(hdr2, msgsiz);
-			if (shrink) hdr = hdr2 = shrink;
-		}
-		break;
-	default:
-		dispatch_assume_zero(kr);
-		break;
-	}
-	_dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port);
-	hdr->msgh_local_port = MACH_PORT_NULL;
-	if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) {
-		if (!kr) mach_msg_destroy(hdr);
-		goto out;
-	}
-	dispatch_mach_msg_t dmsg;
-	dispatch_mach_msg_destructor_t destructor = (!hdr2) ?
-			DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT :
-			DISPATCH_MACH_MSG_DESTRUCTOR_FREE;
-	dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
-	if (!hdr2 || hdr != hdr_copyout_addr) {
-		_dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr_copyout_addr, (uint64_t)_dispatch_mach_msg_get_msg(dmsg));
-	}
-	dmsg->do_ctxt = ctxt;
-	return dmsg;
-out:
-	free(hdr2);
-	return NULL;
-}
-
-static inline void
-_dispatch_mach_msg_reply_received(dispatch_mach_t dm,
-		dispatch_mach_reply_refs_t dmr, mach_port_t local_port)
-{
-	bool removed = _dispatch_mach_reply_tryremove(dm, dmr);
-	if (!MACH_PORT_VALID(local_port) || !removed) {
-		// port moved/destroyed during receive, or reply waiter was never
-		// registered or already removed (disconnected)
-		return;
-	}
-	mach_port_t reply_port = _dispatch_mach_reply_get_reply_port(dmr);
-	_dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p",
-			reply_port, dmr->dmr_ctxt);
-	if (_dispatch_mach_reply_is_reply_port_owned(dmr)) {
-		_dispatch_set_thread_reply_port(reply_port);
-		if (local_port != reply_port) {
-			DISPATCH_CLIENT_CRASH(local_port,
-					"Reply received on unexpected port");
-		}
-		return;
-	}
-	mach_msg_header_t *hdr;
-	dispatch_mach_msg_t dmsg;
-	dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
-			DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
-	hdr->msgh_local_port = local_port;
-	dmsg->dmsg_voucher = dmr->dmr_voucher;
-	dmr->dmr_voucher = NULL;  // transfer reference
-	dmsg->dmsg_priority = dmr->dmr_priority;
-	dmsg->do_ctxt = dmr->dmr_ctxt;
-	_dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED);
-	return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority);
-}
-
-static inline void
-_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port,
-		mach_port_t remote_port)
-{
-	mach_msg_header_t *hdr;
-	dispatch_mach_msg_t dmsg;
-	dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
-			DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
-	if (local_port) hdr->msgh_local_port = local_port;
-	if (remote_port) hdr->msgh_remote_port = remote_port;
-	_dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED);
-	_dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ?
-			local_port : remote_port, local_port ? "receive" : "send");
-	return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority);
-}
-
-static inline dispatch_mach_msg_t
-_dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou,
-		dispatch_mach_reply_refs_t dmr)
-{
-	dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
-	mach_port_t reply_port = dmsg ? dmsg->dmsg_reply :
-			_dispatch_mach_reply_get_reply_port(dmr);
-	voucher_t v;
-
-	if (!reply_port) {
-		if (!dmsg) {
-			v = dmr->dmr_voucher;
-			dmr->dmr_voucher = NULL; // transfer reference
-			if (v) _voucher_release(v);
-		}
-		return NULL;
-	}
-
-	if (dmsg) {
-		v = dmsg->dmsg_voucher;
-		if (v) _voucher_retain(v);
-	} else {
-		v = dmr->dmr_voucher;
-		dmr->dmr_voucher = NULL; // transfer reference
-	}
-
-	if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) &&
-			(dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) ||
-			(dmr && !dmr->dmr_dkev &&
-			_dispatch_mach_reply_is_reply_port_owned(dmr))) {
-		if (v) _voucher_release(v);
-		// deallocate owned reply port to break _dispatch_mach_msg_reply_recv
-		// out of waiting in mach_msg(MACH_RCV_MSG)
-		kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port,
-				MACH_PORT_RIGHT_RECEIVE, -1);
-		DISPATCH_VERIFY_MIG(kr);
-		dispatch_assume_zero(kr);
-		return NULL;
-	}
-
-	mach_msg_header_t *hdr;
-	dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
-			DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
-	dmsgr->dmsg_voucher = v;
-	hdr->msgh_local_port = reply_port;
-	if (dmsg) {
-		dmsgr->dmsg_priority = dmsg->dmsg_priority;
-		dmsgr->do_ctxt = dmsg->do_ctxt;
-	} else {
-		dmsgr->dmsg_priority = dmr->dmr_priority;
-		dmsgr->do_ctxt = dmr->dmr_ctxt;
-	}
-	_dispatch_mach_msg_set_reason(dmsgr, 0, DISPATCH_MACH_DISCONNECTED);
-	_dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p",
-			hdr->msgh_local_port, dmsgr->do_ctxt);
-	return dmsgr;
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou)
-{
-	dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
-	mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
-	mach_msg_option_t msg_opts = dmsg->dmsg_options;
-	_dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, "
-			"msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x",
-			msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
-			msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply);
-	unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ?
-			0 : DISPATCH_MACH_MESSAGE_NOT_SENT;
-	dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL);
-	_dispatch_mach_msg_set_reason(dmsg, 0, reason);
-	_dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority);
-	if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority);
-}
-
-DISPATCH_NOINLINE
-static uint32_t
-_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou,
-		dispatch_mach_reply_refs_t dmr, pthread_priority_t pp,
-		dispatch_mach_send_invoke_flags_t send_flags)
-{
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL;
-	voucher_t voucher = dmsg->dmsg_voucher;
-	mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL;
-	uint32_t send_status = 0;
-	bool clear_voucher = false, kvoucher_move_send = false;
-	mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
-	bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
-			MACH_MSG_TYPE_MOVE_SEND_ONCE);
-	mach_port_t reply_port = dmsg->dmsg_reply;
-	if (!is_reply) {
-		dr->dm_needs_mgr = 0;
-		if (unlikely(dr->dm_checkin && dmsg != dr->dm_checkin)) {
-			// send initial checkin message
-			if (dm->dm_dkev && slowpath(_dispatch_queue_get_current() !=
-					&_dispatch_mgr_q)) {
-				// send kevent must be uninstalled on the manager queue
-				dr->dm_needs_mgr = 1;
-				goto out;
-			}
-			if (unlikely(!_dispatch_mach_msg_send(dm,
-					dr->dm_checkin, NULL, pp, DM_SEND_INVOKE_NONE))) {
-				goto out;
-			}
-			dr->dm_checkin = NULL;
-		}
-	}
-	mach_msg_return_t kr = 0;
-	mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options;
-	if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) {
-		mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED;
-		opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK);
-		if (!is_reply) {
-			if (dmsg != dr->dm_checkin) {
-				msg->msgh_remote_port = dr->dm_send;
-			}
-			if (_dispatch_queue_get_current() == &_dispatch_mgr_q) {
-				if (slowpath(!dm->dm_dkev)) {
-					_dispatch_mach_notification_kevent_register(dm,
-							msg->msgh_remote_port);
-				}
-				if (fastpath(dm->dm_dkev)) {
-					if (DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) {
-						goto out;
-					}
-					opts |= MACH_SEND_NOTIFY;
-				}
-			}
-			opts |= MACH_SEND_TIMEOUT;
-			if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) {
-				ipc_kvoucher = _voucher_create_mach_voucher_with_priority(
-						voucher, dmsg->dmsg_priority);
-			}
-			_dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg);
-			if (ipc_kvoucher) {
-				kvoucher_move_send = true;
-				clear_voucher = _voucher_mach_msg_set_mach_voucher(msg,
-						ipc_kvoucher, kvoucher_move_send);
-			} else {
-				clear_voucher = _voucher_mach_msg_set(msg, voucher);
-			}
-			if (pp && _dispatch_evfilt_machport_direct_enabled) {
-				opts |= MACH_SEND_OVERRIDE;
-				msg_priority = (mach_msg_priority_t)pp;
-			}
-		}
-		_dispatch_debug_machport(msg->msgh_remote_port);
-		if (reply_port) _dispatch_debug_machport(reply_port);
-		if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) {
-			if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) {
-				_dispatch_clear_thread_reply_port(reply_port);
-			}
-			_dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg,
-					msg_opts);
-		}
-		kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0,
-				msg_priority);
-		_dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, "
-				"opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: "
-				"%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
-				opts, msg_opts, msg->msgh_voucher_port, reply_port,
-				mach_error_string(kr), kr);
-		if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) {
-			_dispatch_mach_reply_waiter_unregister(dm, dmr,
-					DKEV_UNREGISTER_REPLY_REMOVE);
-		}
-		if (clear_voucher) {
-			if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) {
-				DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption");
-			}
-			mach_voucher_t kv;
-			kv = _voucher_mach_msg_clear(msg, kvoucher_move_send);
-			if (kvoucher_move_send) ipc_kvoucher = kv;
-		}
-	}
-	if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) {
-		if (opts & MACH_SEND_NOTIFY) {
-			_dispatch_debug("machport[0x%08x]: send-possible notification "
-					"armed", (mach_port_t)dm->dm_dkev->dk_kevent.ident);
-			DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) = 1;
-		} else {
-			// send kevent must be installed on the manager queue
-			dr->dm_needs_mgr = 1;
-		}
-		if (ipc_kvoucher) {
-			_dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher);
-			voucher_t ipc_voucher;
-			ipc_voucher = _voucher_create_with_priority_and_mach_voucher(
-					voucher, dmsg->dmsg_priority, ipc_kvoucher);
-			_dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]",
-					ipc_voucher, dmsg, voucher);
-			if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
-			dmsg->dmsg_voucher = ipc_voucher;
-		}
-		goto out;
-	} else if (ipc_kvoucher && (kr || !kvoucher_move_send)) {
-		_voucher_dealloc_mach_voucher(ipc_kvoucher);
-	}
-	if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port &&
-			!(dm->ds_dkev && dm->ds_dkev->dk_kevent.ident == reply_port)) {
-		if (!dm->ds_is_direct_kevent &&
-				_dispatch_queue_get_current() != &_dispatch_mgr_q) {
-			// reply receive kevent must be installed on the manager queue
-			dr->dm_needs_mgr = 1;
-			dmsg->dmsg_options = msg_opts | DISPATCH_MACH_REGISTER_FOR_REPLY;
-			goto out;
-		}
-		_dispatch_mach_reply_kevent_register(dm, reply_port, dmsg);
-	}
-	if (unlikely(!is_reply && dmsg == dr->dm_checkin && dm->dm_dkev)) {
-		_dispatch_mach_notification_kevent_unregister(dm);
-	}
-	if (slowpath(kr)) {
-		// Send failed, so reply was never registered <rdar://problem/14309159>
-		dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL);
-	}
-	_dispatch_mach_msg_set_reason(dmsg, kr, 0);
-	if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) &&
-			(msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) {
-		// Return sent message synchronously <rdar://problem/25947334>
-		send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT;
-	} else {
-		_dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority);
-	}
-	if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority);
-	send_status |= DM_SEND_STATUS_SUCCESS;
-out:
-	return send_status;
-}
-
-#pragma mark -
-#pragma mark dispatch_mach_send_refs_t
-
-static void _dispatch_mach_cancel(dispatch_mach_t dm);
-static void _dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm,
-		pthread_priority_t pp);
-
-DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dm_state_get_override(uint64_t dm_state)
-{
-	dm_state &= DISPATCH_MACH_STATE_OVERRIDE_MASK;
-	return (pthread_priority_t)(dm_state >> 32);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline uint64_t
-_dm_state_override_from_priority(pthread_priority_t pp)
-{
-	uint64_t pp_state = pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-	return pp_state << 32;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dm_state_needs_override(uint64_t dm_state, uint64_t pp_state)
-{
-	return (pp_state > (dm_state & DISPATCH_MACH_STATE_OVERRIDE_MASK));
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline uint64_t
-_dm_state_merge_override(uint64_t dm_state, uint64_t pp_state)
-{
-	if (_dm_state_needs_override(dm_state, pp_state)) {
-		dm_state &= ~DISPATCH_MACH_STATE_OVERRIDE_MASK;
-		dm_state |= pp_state;
-		dm_state |= DISPATCH_MACH_STATE_DIRTY;
-		dm_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-	}
-	return dm_state;
-}
-
-#define _dispatch_mach_send_push_update_tail(dr, tail) \
-		os_mpsc_push_update_tail(dr, dm, tail, do_next)
-#define _dispatch_mach_send_push_update_head(dr, head) \
-		os_mpsc_push_update_head(dr, dm, head)
-#define _dispatch_mach_send_get_head(dr) \
-		os_mpsc_get_head(dr, dm)
-#define _dispatch_mach_send_unpop_head(dr, dc, dc_next) \
-		os_mpsc_undo_pop_head(dr, dm, dc, dc_next, do_next)
-#define _dispatch_mach_send_pop_head(dr, head) \
-		os_mpsc_pop_head(dr, dm, head, do_next)
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dr,
-		dispatch_object_t dou)
-{
-	if (_dispatch_mach_send_push_update_tail(dr, dou._do)) {
-		_dispatch_mach_send_push_update_head(dr, dou._do);
-		return true;
-	}
-	return false;
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags,
-		dispatch_mach_send_invoke_flags_t send_flags)
-{
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	dispatch_mach_reply_refs_t dmr;
-	dispatch_mach_msg_t dmsg;
-	struct dispatch_object_s *dc = NULL, *next_dc = NULL;
-	pthread_priority_t pp = _dm_state_get_override(dr->dm_state);
-	uint64_t old_state, new_state;
-	uint32_t send_status;
-	bool needs_mgr, disconnecting, returning_send_result = false;
-
-again:
-	needs_mgr = false; disconnecting = false;
-	while (dr->dm_tail) {
-		dc = _dispatch_mach_send_get_head(dr);
-		do {
-			dispatch_mach_send_invoke_flags_t sf = send_flags;
-			// Only request immediate send result for the first message
-			send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK;
-			next_dc = _dispatch_mach_send_pop_head(dr, dc);
-			if (_dispatch_object_has_type(dc,
-					DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) {
-				if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) {
-					goto partial_drain;
-				}
-				_dispatch_continuation_pop(dc, dm->_as_dq, flags);
-				continue;
-			}
-			if (_dispatch_object_is_slow_item(dc)) {
-				dmsg = ((dispatch_continuation_t)dc)->dc_data;
-				dmr = ((dispatch_continuation_t)dc)->dc_other;
-			} else if (_dispatch_object_has_vtable(dc)) {
-				dmsg = (dispatch_mach_msg_t)dc;
-				dmr = NULL;
-			} else {
-				if ((dm->dm_dkev || !dm->ds_is_direct_kevent) &&
-						(_dispatch_queue_get_current() != &_dispatch_mgr_q)) {
-					// send kevent must be uninstalled on the manager queue
-					needs_mgr = true;
-					goto partial_drain;
-				}
-				if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) {
-					disconnecting = true;
-					goto partial_drain;
-				}
-				continue;
-			}
-			_dispatch_voucher_ktrace_dmsg_pop(dmsg);
-			if (unlikely(dr->dm_disconnect_cnt ||
-					(dm->dq_atomic_flags & DSF_CANCELED))) {
-				_dispatch_mach_msg_not_sent(dm, dmsg);
-				continue;
-			}
-			send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, pp, sf);
-			if (unlikely(!send_status)) {
-				goto partial_drain;
-			}
-			if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) {
-				returning_send_result = true;
-			}
-		} while ((dc = next_dc));
-	}
-
-	os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, {
-		if (old_state & DISPATCH_MACH_STATE_DIRTY) {
-			new_state = old_state;
-			new_state &= ~DISPATCH_MACH_STATE_DIRTY;
-			new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-			new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
-		} else {
-			// unlock
-			new_state = 0;
-		}
-	});
-	goto out;
-
-partial_drain:
-	// if this is not a complete drain, we must undo some things
-	_dispatch_mach_send_unpop_head(dr, dc, next_dc);
-
-	if (_dispatch_object_has_type(dc,
-			DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) {
-		os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, {
-			new_state = old_state;
-			new_state |= DISPATCH_MACH_STATE_DIRTY;
-			new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER;
-			new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK;
-		});
-	} else {
-		os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, {
-			new_state = old_state;
-			if (old_state & (DISPATCH_MACH_STATE_DIRTY |
-					DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) {
-				new_state &= ~DISPATCH_MACH_STATE_DIRTY;
-				new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-				new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
-			} else {
-				new_state |= DISPATCH_MACH_STATE_DIRTY;
-				new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK;
-			}
-		});
-	}
-
-out:
-	if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) {
-		// Ensure that the root queue sees that this thread was overridden.
-		_dispatch_set_defaultpriority_override();
-	}
-
-	if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) {
-		os_atomic_thread_fence(acquire);
-		pp = _dm_state_get_override(new_state);
-		goto again;
-	}
-
-	if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
-		pp = _dm_state_get_override(new_state);
-		_dispatch_mach_send_barrier_drain_push(dm, pp);
-	} else {
-		if (needs_mgr) {
-			pp = _dm_state_get_override(new_state);
-		} else {
-			pp = 0;
-		}
-		if (!disconnecting) dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH);
-	}
-	return returning_send_result;
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_send_invoke(dispatch_mach_t dm,
-		dispatch_invoke_flags_t flags,
-		dispatch_mach_send_invoke_flags_t send_flags)
-{
-	dispatch_lock_owner tid_self = _dispatch_tid_self();
-	uint64_t old_state, new_state;
-	pthread_priority_t pp_floor;
-
-	uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK;
-	uint64_t canlock_state = 0;
-
-	if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) {
-		canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER;
-		canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER;
-	} else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) {
-		canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER;
-	}
-
-	if (flags & DISPATCH_INVOKE_MANAGER_DRAIN) {
-		pp_floor = 0;
-	} else {
-		// _dispatch_queue_class_invoke will have applied the queue override
-		// (if any) before we get here. Else use the default base priority
-		// as an estimation of the priority we already asked for.
-		pp_floor = dm->_as_dq->dq_override;
-		if (!pp_floor) {
-			pp_floor = _dispatch_get_defaultpriority();
-			pp_floor &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-		}
-	}
-
-retry:
-	os_atomic_rmw_loop2o(dm->dm_refs, dm_state, old_state, new_state, acquire, {
-		new_state = old_state;
-		if (unlikely((old_state & canlock_mask) != canlock_state)) {
-			if (!(send_flags & DM_SEND_INVOKE_FLUSH)) {
-				os_atomic_rmw_loop_give_up(break);
-			}
-			new_state |= DISPATCH_MACH_STATE_DIRTY;
-		} else {
-			if (likely(pp_floor)) {
-				pthread_priority_t pp = _dm_state_get_override(old_state);
-				if (unlikely(pp > pp_floor)) {
-					os_atomic_rmw_loop_give_up({
-						_dispatch_wqthread_override_start(tid_self, pp);
-						// Ensure that the root queue sees
-						// that this thread was overridden.
-						_dispatch_set_defaultpriority_override();
-						pp_floor = pp;
-						goto retry;
-					});
-				}
-			}
-			new_state |= tid_self;
-			new_state &= ~DISPATCH_MACH_STATE_DIRTY;
-			new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-			new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
-		}
-	});
-
-	if (unlikely((old_state & canlock_mask) != canlock_state)) {
-		return;
-	}
-	if (send_flags & DM_SEND_INVOKE_CANCEL) {
-		_dispatch_mach_cancel(dm);
-	}
-	_dispatch_mach_send_drain(dm, flags, send_flags);
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc,
-		dispatch_invoke_flags_t flags)
-{
-	dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current();
-	uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-	dispatch_thread_frame_s dtf;
-
-	DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY);
-	DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER);
-	// hide the mach channel (see _dispatch_mach_barrier_invoke comment)
-	_dispatch_thread_frame_stash(&dtf);
-	_dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{
-		_dispatch_mach_send_invoke(dm, flags,
-				DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER);
-	});
-	_dispatch_thread_frame_unstash(&dtf);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm,
-		pthread_priority_t pp)
-{
-	dispatch_continuation_t dc = _dispatch_continuation_alloc();
-
-	dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN);
-	dc->dc_func = NULL;
-	dc->dc_ctxt = NULL;
-	dc->dc_voucher = DISPATCH_NO_VOUCHER;
-	dc->dc_priority = DISPATCH_NO_PRIORITY;
-	return _dispatch_queue_push(dm->_as_dq, dc, pp);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc,
-		pthread_priority_t pp)
-{
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	uint64_t pp_state, old_state, new_state, state_flags = 0;
-	dispatch_lock_owner owner;
-	bool wakeup;
-
-	// <rdar://problem/25896179> when pushing a send barrier that destroys
-	// the last reference to this channel, and the send queue is already
-	// draining on another thread, the send barrier may run as soon as
-	// _dispatch_mach_send_push_inline() returns.
-	_dispatch_retain(dm);
-	pp_state = _dm_state_override_from_priority(pp);
-
-	wakeup = _dispatch_mach_send_push_inline(dr, dc);
-	if (wakeup) {
-		state_flags = DISPATCH_MACH_STATE_DIRTY;
-		if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) {
-			state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER;
-		}
-	}
-
-	if (state_flags) {
-		os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, {
-			new_state = _dm_state_merge_override(old_state, pp_state);
-			new_state |= state_flags;
-		});
-	} else {
-		os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, relaxed, {
-			new_state = _dm_state_merge_override(old_state, pp_state);
-			if (old_state == new_state) {
-				os_atomic_rmw_loop_give_up(break);
-			}
-		});
-	}
-
-	pp = _dm_state_get_override(new_state);
-	owner = _dispatch_lock_owner((dispatch_lock)old_state);
-	if (owner) {
-		if (_dm_state_needs_override(old_state, pp_state)) {
-			_dispatch_wqthread_override_start_check_owner(owner, pp,
-					&dr->dm_state_lock.dul_lock);
-		}
-		return _dispatch_release_tailcall(dm);
-	}
-
-	dispatch_wakeup_flags_t wflags = 0;
-	if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) {
-		_dispatch_mach_send_barrier_drain_push(dm, pp);
-	} else if (wakeup || dr->dm_disconnect_cnt ||
-			(dm->dq_atomic_flags & DSF_CANCELED)) {
-		wflags = DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME;
-	} else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
-		wflags = DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_CONSUME;
-	}
-	if (wflags) {
-		return dx_wakeup(dm, pp, wflags);
-	}
-	return _dispatch_release_tailcall(dm);
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm,
-		dispatch_object_t dou, pthread_priority_t pp,
-		dispatch_mach_send_invoke_flags_t send_flags)
-{
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	dispatch_lock_owner tid_self = _dispatch_tid_self();
-	uint64_t pp_state, old_state, new_state, canlock_mask, state_flags = 0;
-	dispatch_lock_owner owner;
-
-	pp_state = _dm_state_override_from_priority(pp);
-	bool wakeup = _dispatch_mach_send_push_inline(dr, dou);
-	if (wakeup) {
-		state_flags = DISPATCH_MACH_STATE_DIRTY;
-	}
-
-	if (unlikely(dr->dm_disconnect_cnt ||
-			(dm->dq_atomic_flags & DSF_CANCELED))) {
-		os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, {
-			new_state = _dm_state_merge_override(old_state, pp_state);
-			new_state |= state_flags;
-		});
-		dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH);
-		return false;
-	}
-
-	canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK |
-			DISPATCH_MACH_STATE_PENDING_BARRIER;
-	if (state_flags) {
-		os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, seq_cst, {
-			new_state = _dm_state_merge_override(old_state, pp_state);
-			new_state |= state_flags;
-			if (likely((old_state & canlock_mask) == 0)) {
-				new_state |= tid_self;
-				new_state &= ~DISPATCH_MACH_STATE_DIRTY;
-				new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-				new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
-			}
-		});
-	} else {
-		os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, acquire, {
-			new_state = _dm_state_merge_override(old_state, pp_state);
-			if (new_state == old_state) {
-				os_atomic_rmw_loop_give_up(return false);
-			}
-			if (likely((old_state & canlock_mask) == 0)) {
-				new_state |= tid_self;
-				new_state &= ~DISPATCH_MACH_STATE_DIRTY;
-				new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE;
-				new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER;
-			}
-		});
-	}
-
-	owner = _dispatch_lock_owner((dispatch_lock)old_state);
-	if (owner) {
-		if (_dm_state_needs_override(old_state, pp_state)) {
-			_dispatch_wqthread_override_start_check_owner(owner, pp,
-					&dr->dm_state_lock.dul_lock);
-		}
-		return false;
-	}
-
-	if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) {
-		dx_wakeup(dm, pp, DISPATCH_WAKEUP_OVERRIDING);
-		return false;
-	}
-
-	// Ensure our message is still at the head of the queue and has not already
-	// been dequeued by another thread that raced us to the send queue lock.
-	// A plain load of the head and comparison against our object pointer is
-	// sufficient.
-	if (unlikely(!(wakeup && dou._do == dr->dm_head))) {
-		// Don't request immediate send result for messages we don't own
-		send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK;
-	}
-	return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags);
-}
-
-static void
-_dispatch_mach_merge_notification_kevent(dispatch_mach_t dm,
-		const _dispatch_kevent_qos_s *ke)
-{
-	if (!(ke->fflags & dm->ds_pending_data_mask)) {
-		return;
-	}
-	_dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN,
-			DM_SEND_INVOKE_FLUSH);
-}
-
-#pragma mark -
-#pragma mark dispatch_mach_t
-
-static inline mach_msg_option_t
-_dispatch_mach_checkin_options(void)
-{
-	mach_msg_option_t options = 0;
-#if DISPATCH_USE_CHECKIN_NOIMPORTANCE
-	options = MACH_SEND_NOIMPORTANCE; // <rdar://problem/16996737>
-#endif
-	return options;
-}
-
-
-static inline mach_msg_option_t
-_dispatch_mach_send_options(void)
-{
-	mach_msg_option_t options = 0;
-	return options;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline pthread_priority_t
-_dispatch_mach_priority_propagate(mach_msg_option_t options)
-{
-#if DISPATCH_USE_NOIMPORTANCE_QOS
-	if (options & MACH_SEND_NOIMPORTANCE) return 0;
-#else
-	(void)options;
-#endif
-	return _dispatch_priority_propagate();
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
-		dispatch_continuation_t dc_wait, mach_msg_option_t options)
-{
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) {
-		DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued");
-	}
-	dispatch_retain(dmsg);
-	pthread_priority_t priority = _dispatch_mach_priority_propagate(options);
-	options |= _dispatch_mach_send_options();
-	dmsg->dmsg_options = options;
-	mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
-	dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg);
-	bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
-			MACH_MSG_TYPE_MOVE_SEND_ONCE);
-	dmsg->dmsg_priority = priority;
-	dmsg->dmsg_voucher = _voucher_copy();
-	_dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg);
-
-	uint32_t send_status;
-	bool returning_send_result = false;
-	dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE;
-	if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) {
-		send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND;
-	}
-	if (is_reply && !dmsg->dmsg_reply && !dr->dm_disconnect_cnt &&
-			!(dm->dq_atomic_flags & DSF_CANCELED)) {
-		// replies are sent to a send-once right and don't need the send queue
-		dispatch_assert(!dc_wait);
-		send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags);
-		dispatch_assert(send_status);
-		returning_send_result = !!(send_status &
-				DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT);
-	} else {
-		_dispatch_voucher_ktrace_dmsg_push(dmsg);
-		priority &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
-		dispatch_object_t dou = { ._dmsg = dmsg };
-		if (dc_wait) dou._dc = dc_wait;
-		returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou,
-				priority, send_flags);
-	}
-	if (returning_send_result) {
-		_dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg);
-		if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
-		dmsg->dmsg_voucher = NULL;
-		dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
-		dispatch_release(dmsg);
-	}
-	return returning_send_result;
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
-		mach_msg_option_t options)
-{
-	dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
-	options &= ~DISPATCH_MACH_OPTIONS_MASK;
-	bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
-	dispatch_assert(!returned_send_result);
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
-		mach_msg_option_t options, dispatch_mach_send_flags_t send_flags,
-		dispatch_mach_reason_t *send_result, mach_error_t *send_error)
-{
-	if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
-		DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
-	}
-	dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
-	options &= ~DISPATCH_MACH_OPTIONS_MASK;
-	options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
-	bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options);
-	unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
-	mach_error_t err = 0;
-	if (returned_send_result) {
-		reason = _dispatch_mach_msg_get_reason(dmsg, &err);
-	}
-	*send_result = reason;
-	*send_error = err;
-}
-
-static inline
-dispatch_mach_msg_t
-_dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm,
-		dispatch_mach_msg_t dmsg, mach_msg_option_t options,
-		bool *returned_send_result)
-{
-	mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg);
-	if (!reply_port) {
-		// use per-thread mach reply port <rdar://24597802>
-		reply_port = _dispatch_get_thread_reply_port();
-		mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
-		dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) ==
-				MACH_MSG_TYPE_MAKE_SEND_ONCE);
-		hdr->msgh_local_port = reply_port;
-		options |= DISPATCH_MACH_OWNED_REPLY_PORT;
-	}
-
-	dispatch_mach_reply_refs_t dmr;
-#if DISPATCH_DEBUG
-	dmr = _dispatch_calloc(1, sizeof(*dmr));
-#else
-	struct dispatch_mach_reply_refs_s dmr_buf = { };
-	dmr = &dmr_buf;
-#endif
-	struct dispatch_continuation_s dc_wait = {
-		.dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT,
-		.dc_data = dmsg,
-		.dc_other = dmr,
-		.dc_priority = DISPATCH_NO_PRIORITY,
-		.dc_voucher = DISPATCH_NO_VOUCHER,
-	};
-	dmr->dmr_ctxt = dmsg->do_ctxt;
-	*returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options);
-	if (options & DISPATCH_MACH_OWNED_REPLY_PORT) {
-		_dispatch_clear_thread_reply_port(reply_port);
-	}
-	dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port);
-#if DISPATCH_DEBUG
-	free(dmr);
-#endif
-	return dmsg;
-}
-
-DISPATCH_NOINLINE
-dispatch_mach_msg_t
-dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm,
-		dispatch_mach_msg_t dmsg, mach_msg_option_t options)
-{
-	bool returned_send_result;
-	dispatch_mach_msg_t reply;
-	dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
-	options &= ~DISPATCH_MACH_OPTIONS_MASK;
-	options |= DISPATCH_MACH_WAIT_FOR_REPLY;
-	reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options,
-			&returned_send_result);
-	dispatch_assert(!returned_send_result);
-	return reply;
-}
-
-DISPATCH_NOINLINE
-dispatch_mach_msg_t
-dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm,
-		dispatch_mach_msg_t dmsg, mach_msg_option_t options,
-		dispatch_mach_send_flags_t send_flags,
-		dispatch_mach_reason_t *send_result, mach_error_t *send_error)
-{
-	if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) {
-		DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags");
-	}
-	bool returned_send_result;
-	dispatch_mach_msg_t reply;
-	dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
-	options &= ~DISPATCH_MACH_OPTIONS_MASK;
-	options |= DISPATCH_MACH_WAIT_FOR_REPLY;
-	options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT;
-	reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options,
-			&returned_send_result);
-	unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND;
-	mach_error_t err = 0;
-	if (returned_send_result) {
-		reason = _dispatch_mach_msg_get_reason(dmsg, &err);
-	}
-	*send_result = reason;
-	*send_error = err;
-	return reply;
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mach_disconnect(dispatch_mach_t dm)
-{
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	bool disconnected;
-	if (dm->dm_dkev) {
-		_dispatch_mach_notification_kevent_unregister(dm);
-	}
-	if (MACH_PORT_VALID(dr->dm_send)) {
-		_dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dr->dm_send);
-	}
-	dr->dm_send = MACH_PORT_NULL;
-	if (dr->dm_checkin) {
-		_dispatch_mach_msg_not_sent(dm, dr->dm_checkin);
-		dr->dm_checkin = NULL;
-	}
-	_dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock);
-	dispatch_mach_reply_refs_t dmr, tmp;
-	TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dmr_list, tmp) {
-		TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list);
-		_TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list);
-		if (dmr->dmr_dkev) {
-			_dispatch_mach_reply_kevent_unregister(dm, dmr,
-					DKEV_UNREGISTER_DISCONNECTED);
-		} else {
-			_dispatch_mach_reply_waiter_unregister(dm, dmr,
-					DKEV_UNREGISTER_DISCONNECTED);
-		}
-	}
-	disconnected = TAILQ_EMPTY(&dm->dm_refs->dm_replies);
-	_dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock);
-	return disconnected;
-}
-
-static void
-_dispatch_mach_cancel(dispatch_mach_t dm)
-{
-	_dispatch_object_debug(dm, "%s", __func__);
-	if (!_dispatch_mach_disconnect(dm)) return;
-	if (dm->ds_dkev) {
-		mach_port_t local_port = (mach_port_t)dm->ds_dkev->dk_kevent.ident;
-		_dispatch_source_kevent_unregister(dm->_as_ds);
-		if ((dm->dq_atomic_flags & DSF_STATE_MASK) == DSF_DELETED) {
-			_dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL);
-		}
-	} else {
-		_dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED,
-				DSF_ARMED | DSF_DEFERRED_DELETE);
-	}
-}
-
-DISPATCH_NOINLINE
-static bool
-_dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou)
-{
-	if (!_dispatch_mach_disconnect(dm)) return false;
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	dr->dm_checkin = dou._dc->dc_data;
-	dr->dm_send = (mach_port_t)dou._dc->dc_other;
-	_dispatch_continuation_free(dou._dc);
-	(void)os_atomic_dec2o(dr, dm_disconnect_cnt, relaxed);
-	_dispatch_object_debug(dm, "%s", __func__);
-	_dispatch_release(dm); // <rdar://problem/26266265>
-	return true;
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send,
-		dispatch_mach_msg_t checkin)
-{
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	(void)os_atomic_inc2o(dr, dm_disconnect_cnt, relaxed);
-	if (MACH_PORT_VALID(send) && checkin) {
-		dispatch_retain(checkin);
-		checkin->dmsg_options = _dispatch_mach_checkin_options();
-		dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin);
-	} else {
-		checkin = NULL;
-		dr->dm_checkin_port = MACH_PORT_NULL;
-	}
-	dispatch_continuation_t dc = _dispatch_continuation_alloc();
-	dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-	// actually called manually in _dispatch_mach_send_drain
-	dc->dc_func = (void*)_dispatch_mach_reconnect_invoke;
-	dc->dc_ctxt = dc;
-	dc->dc_data = checkin;
-	dc->dc_other = (void*)(uintptr_t)send;
-	dc->dc_voucher = DISPATCH_NO_VOUCHER;
-	dc->dc_priority = DISPATCH_NO_PRIORITY;
-	_dispatch_retain(dm); // <rdar://problem/26266265>
-	return _dispatch_mach_send_push(dm, dc, 0);
-}
-
-DISPATCH_NOINLINE
-mach_port_t
-dispatch_mach_get_checkin_port(dispatch_mach_t dm)
-{
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) {
-		return MACH_PORT_DEAD;
-	}
-	return dr->dm_checkin_port;
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_connect_invoke(dispatch_mach_t dm)
-{
-	dispatch_mach_refs_t dr = dm->ds_refs;
-	_dispatch_client_callout4(dr->dm_handler_ctxt,
-			DISPATCH_MACH_CONNECTED, NULL, 0, dr->dm_handler_func);
-	dm->dm_connect_handler_called = 1;
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg,
-		dispatch_invoke_flags_t flags)
-{
-	dispatch_thread_frame_s dtf;
-	dispatch_mach_refs_t dr;
-	dispatch_mach_t dm;
-	mach_error_t err;
-	unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err);
-	_dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE|
-			DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE;
-
-	// hide mach channel
-	dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf);
-	dr = dm->ds_refs;
-	dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
-	_dispatch_voucher_ktrace_dmsg_pop(dmsg);
-	_dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg);
-	(void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority,
-			dmsg->dmsg_voucher, adopt_flags);
-	dmsg->dmsg_voucher = NULL;
-	dispatch_invoke_with_autoreleasepool(flags, {
-		if (slowpath(!dm->dm_connect_handler_called)) {
-			_dispatch_mach_connect_invoke(dm);
-		}
-		_dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err,
-				dr->dm_handler_func);
-	});
-	_dispatch_thread_frame_unstash(&dtf);
-	_dispatch_introspection_queue_item_complete(dmsg);
-	dispatch_release(dmsg);
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_mach_barrier_invoke(dispatch_continuation_t dc,
-		dispatch_invoke_flags_t flags)
-{
-	dispatch_thread_frame_s dtf;
-	dispatch_mach_t dm = dc->dc_other;
-	dispatch_mach_refs_t dr;
-	uintptr_t dc_flags = (uintptr_t)dc->dc_data;
-	unsigned long type = dc_type(dc);
-
-	// hide mach channel from clients
-	if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) {
-		// on the send queue, the mach channel isn't the current queue
-		// its target queue is the current one already
-		_dispatch_thread_frame_stash(&dtf);
-	}
-	dr = dm->ds_refs;
-	DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT);
-	_dispatch_continuation_pop_forwarded(dc, dm->dq_override_voucher, dc_flags,{
-		dispatch_invoke_with_autoreleasepool(flags, {
-			if (slowpath(!dm->dm_connect_handler_called)) {
-				_dispatch_mach_connect_invoke(dm);
-			}
-			_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
-			_dispatch_client_callout4(dr->dm_handler_ctxt,
-					DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0,
-					dr->dm_handler_func);
-		});
-	});
-	if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) {
-		_dispatch_thread_frame_unstash(&dtf);
-	}
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context,
-		dispatch_function_t func)
-{
-	dispatch_continuation_t dc = _dispatch_continuation_alloc();
-	uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-	pthread_priority_t pp;
-
-	_dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags);
-	dc->dc_data = (void *)dc->dc_flags;
-	dc->dc_other = dm;
-	dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER);
-	_dispatch_trace_continuation_push(dm->_as_dq, dc);
-	pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc);
-	return _dispatch_mach_send_push(dm, dc, pp);
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier)
-{
-	dispatch_continuation_t dc = _dispatch_continuation_alloc();
-	uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-	pthread_priority_t pp;
-
-	_dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags);
-	dc->dc_data = (void *)dc->dc_flags;
-	dc->dc_other = dm;
-	dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER);
-	_dispatch_trace_continuation_push(dm->_as_dq, dc);
-	pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc);
-	return _dispatch_mach_send_push(dm, dc, pp);
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context,
-		dispatch_function_t func)
-{
-	dispatch_continuation_t dc = _dispatch_continuation_alloc();
-	uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-
-	_dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags);
-	dc->dc_data = (void *)dc->dc_flags;
-	dc->dc_other = dm;
-	dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER);
-	return _dispatch_continuation_async(dm->_as_dq, dc);
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier)
-{
-	dispatch_continuation_t dc = _dispatch_continuation_alloc();
-	uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT;
-
-	_dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags);
-	dc->dc_data = (void *)dc->dc_flags;
-	dc->dc_other = dm;
-	dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER);
-	return _dispatch_continuation_async(dm->_as_dq, dc);
-}
-
-DISPATCH_NOINLINE
-static void
-_dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags)
-{
-	dispatch_mach_refs_t dr = dm->ds_refs;
-
-	dispatch_invoke_with_autoreleasepool(flags, {
-		if (slowpath(!dm->dm_connect_handler_called)) {
-			_dispatch_mach_connect_invoke(dm);
-		}
-		_dispatch_client_callout4(dr->dm_handler_ctxt,
-				DISPATCH_MACH_CANCELED, NULL, 0, dr->dm_handler_func);
-	});
-	dm->dm_cancel_handler_called = 1;
-	_dispatch_release(dm); // the retain is done at creation time
-}
-
-DISPATCH_NOINLINE
-void
-dispatch_mach_cancel(dispatch_mach_t dm)
-{
-	dispatch_source_cancel(dm->_as_ds);
-}
-
-static void
-_dispatch_mach_install(dispatch_mach_t dm, pthread_priority_t pp)
-{
-	uint32_t disconnect_cnt;
-
-	if (dm->ds_dkev) {
-		_dispatch_source_kevent_register(dm->_as_ds, pp);
-	}
-	if (dm->ds_is_direct_kevent) {
-		pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK |
-				_PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG |
-				_PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
-		// _dispatch_mach_reply_kevent_register assumes this has been done
-		// which is unlike regular sources or queues, the DEFAULTQUEUE flag
-		// is used so that the priority of that channel doesn't act as a floor
-		// QoS for incoming messages (26761457)
-		dm->dq_priority = (dispatch_priority_t)pp;
-	}
-	dm->ds_is_installed = true;
-	if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_refs, dm_disconnect_cnt,
-			DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) {
-		DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed");
-	}
-}
-
-void
-_dispatch_mach_finalize_activation(dispatch_mach_t dm)
-{
-	if (dm->ds_is_direct_kevent && !dm->ds_is_installed) {
-		dispatch_source_t ds = dm->_as_ds;
-		pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds);
-		if (pp) _dispatch_mach_install(dm, pp);
-	}
-
-	// call "super"
-	_dispatch_queue_finalize_activation(dm->_as_dq);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_queue_t
-_dispatch_mach_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags,
-		uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED)
-{
-	dispatch_mach_t dm = dou._dm;
-	dispatch_queue_t retq = NULL;
-	dispatch_queue_t dq = _dispatch_queue_get_current();
-
-	// This function performs all mach channel actions. Each action is
-	// responsible for verifying that it takes place on the appropriate queue.
-	// If the current queue is not the correct queue for this action, the
-	// correct queue will be returned and the invoke will be re-driven on that
-	// queue.
-
-	// The order of tests here in invoke and in wakeup should be consistent.
-
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	dispatch_queue_t dkq = &_dispatch_mgr_q;
-
-	if (dm->ds_is_direct_kevent) {
-		dkq = dm->do_targetq;
-	}
-
-	if (slowpath(!dm->ds_is_installed)) {
-		// The channel needs to be installed on the kevent queue.
-		if (dq != dkq) {
-			return dkq;
-		}
-		_dispatch_mach_install(dm, _dispatch_get_defaultpriority());
-	}
-
-	if (_dispatch_queue_class_probe(dm)) {
-		if (dq == dm->do_targetq) {
-			retq = _dispatch_queue_serial_drain(dm->_as_dq, flags, owned, NULL);
-		} else {
-			retq = dm->do_targetq;
-		}
-	}
-
-	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
-
-	if (dr->dm_tail) {
-		bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt &&
-				(dm->dm_dkev || !dm->ds_is_direct_kevent));
-		if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) ||
-				(dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) {
-			// The channel has pending messages to send.
-			if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) {
-				return retq ? retq : &_dispatch_mgr_q;
-			}
-			dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE;
-			if (dq != &_dispatch_mgr_q) {
-				send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER;
-			}
-			_dispatch_mach_send_invoke(dm, flags, send_flags);
-		}
-	} else if (dqf & DSF_CANCELED) {
-		// The channel has been cancelled and needs to be uninstalled from the
-		// manager queue. After uninstallation, the cancellation handler needs
-		// to be delivered to the target queue.
-		if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) {
-			// waiting for the delivery of a deferred delete event
-			return retq;
-		}
-		if ((dqf & DSF_STATE_MASK) != DSF_DELETED) {
-			if (dq != &_dispatch_mgr_q) {
-				return retq ? retq : &_dispatch_mgr_q;
-			}
-			_dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL);
-			dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
-			if (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) {
-				// waiting for the delivery of a deferred delete event
-				// or deletion didn't happen because send_invoke couldn't
-				// acquire the send lock
-				return retq;
-			}
-		}
-		if (!dm->dm_cancel_handler_called) {
-			if (dq != dm->do_targetq) {
-				return retq ? retq : dm->do_targetq;
-			}
-			_dispatch_mach_cancel_invoke(dm, flags);
-		}
-	}
-
-	return retq;
-}
-
-DISPATCH_NOINLINE
-void
-_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags)
-{
-	_dispatch_queue_class_invoke(dm, flags, _dispatch_mach_invoke2);
-}
-
-void
-_dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp,
-		dispatch_wakeup_flags_t flags)
-{
-	// This function determines whether the mach channel needs to be invoked.
-	// The order of tests here in probe and in invoke should be consistent.
-
-	dispatch_mach_send_refs_t dr = dm->dm_refs;
-	dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR;
-	dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE;
-	dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq);
-
-	if (dm->ds_is_direct_kevent) {
-		dkq = DISPATCH_QUEUE_WAKEUP_TARGET;
-	}
-
-	if (!dm->ds_is_installed) {
-		// The channel needs to be installed on the kevent queue.
-		tq = dkq;
-		goto done;
-	}
-
-	if (_dispatch_queue_class_probe(dm)) {
-		tq = DISPATCH_QUEUE_WAKEUP_TARGET;
-		goto done;
-	}
-
-	if (_dispatch_lock_is_locked(dr->dm_state_lock.dul_lock)) {
-		// Sending and uninstallation below require the send lock, the channel
-		// will be woken up when the lock is dropped <rdar://15132939&15203957>
-		_dispatch_queue_reinstate_override_priority(dm, (dispatch_priority_t)pp);
-		goto done;
-	}
-
-	if (dr->dm_tail) {
-		bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt &&
-				(dm->dm_dkev || !dm->ds_is_direct_kevent));
-		if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) ||
-				(dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) {
-			if (unlikely(requires_mgr)) {
-				tq = DISPATCH_QUEUE_WAKEUP_MGR;
-			} else {
-				tq = DISPATCH_QUEUE_WAKEUP_TARGET;
-			}
-		} else {
-			// can happen when we can't send because the port is full
-			// but we should not lose the override
-			_dispatch_queue_reinstate_override_priority(dm,
-					(dispatch_priority_t)pp);
-		}
-	} else if (dqf & DSF_CANCELED) {
-		if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) {
-			// waiting for the delivery of a deferred delete event
-		} else if ((dqf & DSF_STATE_MASK) != DSF_DELETED) {
-			// The channel needs to be uninstalled from the manager queue
-			tq = DISPATCH_QUEUE_WAKEUP_MGR;
-		} else if (!dm->dm_cancel_handler_called) {
-			// the cancellation handler needs to be delivered to the target
-			// queue.
-			tq = DISPATCH_QUEUE_WAKEUP_TARGET;
-		}
-	}
-
-done:
-	if (tq) {
-		return _dispatch_queue_class_wakeup(dm->_as_dq, pp, flags, tq);
-	} else if (pp) {
-		return _dispatch_queue_class_override_drainer(dm->_as_dq, pp, flags);
-	} else if (flags & DISPATCH_WAKEUP_CONSUME) {
-		return _dispatch_release_tailcall(dm);
-	}
-}
-
-#pragma mark -
-#pragma mark dispatch_mach_msg_t
-
-dispatch_mach_msg_t
-dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size,
-		dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr)
-{
-	if (slowpath(size < sizeof(mach_msg_header_t)) ||
-			slowpath(destructor && !msg)) {
-		DISPATCH_CLIENT_CRASH(size, "Empty message");
-	}
-	dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg),
-			sizeof(struct dispatch_mach_msg_s) +
-			(destructor ? 0 : size - sizeof(dmsg->dmsg_msg)));
-	if (destructor) {
-		dmsg->dmsg_msg = msg;
-	} else if (msg) {
-		memcpy(dmsg->dmsg_buf, msg, size);
-	}
-	dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
-	dmsg->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
-			false);
-	dmsg->dmsg_destructor = destructor;
-	dmsg->dmsg_size = size;
-	if (msg_ptr) {
-		*msg_ptr = _dispatch_mach_msg_get_msg(dmsg);
-	}
-	return dmsg;
-}
-
-void
-_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg)
-{
-	if (dmsg->dmsg_voucher) {
-		_voucher_release(dmsg->dmsg_voucher);
-		dmsg->dmsg_voucher = NULL;
-	}
-	switch (dmsg->dmsg_destructor) {
-	case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT:
-		break;
-	case DISPATCH_MACH_MSG_DESTRUCTOR_FREE:
-		free(dmsg->dmsg_msg);
-		break;
-	case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: {
-		mach_vm_size_t vm_size = dmsg->dmsg_size;
-		mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg;
-		(void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(),
-				vm_addr, vm_size));
-		break;
-	}}
-}
-
-static inline mach_msg_header_t*
-_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg)
-{
-	return dmsg->dmsg_destructor ? dmsg->dmsg_msg :
-			(mach_msg_header_t*)dmsg->dmsg_buf;
-}
-
-mach_msg_header_t*
-dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr)
-{
-	if (size_ptr) {
-		*size_ptr = dmsg->dmsg_size;
-	}
-	return _dispatch_mach_msg_get_msg(dmsg);
-}
-
-size_t
-_dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz)
-{
-	size_t offset = 0;
-	offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
-			dx_kind(dmsg), dmsg);
-	offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, "
-			"refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1);
-	offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, "
-			"msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf);
-	mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
-	if (hdr->msgh_id) {
-		offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ",
-				hdr->msgh_id);
-	}
-	if (hdr->msgh_size) {
-		offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ",
-				hdr->msgh_size);
-	}
-	if (hdr->msgh_bits) {
-		offset += dsnprintf(&buf[offset], bufsiz - offset, "bits <l %u, r %u",
-				MACH_MSGH_BITS_LOCAL(hdr->msgh_bits),
-				MACH_MSGH_BITS_REMOTE(hdr->msgh_bits));
-		if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) {
-			offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x",
-					MACH_MSGH_BITS_OTHER(hdr->msgh_bits));
-		}
-		offset += dsnprintf(&buf[offset], bufsiz - offset, ">, ");
-	}
-	if (hdr->msgh_local_port && hdr->msgh_remote_port) {
-		offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, "
-				"remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port);
-	} else if (hdr->msgh_local_port) {
-		offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x",
-				hdr->msgh_local_port);
-	} else if (hdr->msgh_remote_port) {
-		offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x",
-				hdr->msgh_remote_port);
-	} else {
-		offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports");
-	}
-	offset += dsnprintf(&buf[offset], bufsiz - offset, " } }");
-	return offset;
-}
-
-#pragma mark -
-#pragma mark dispatch_mig_server
-
-mach_msg_return_t
-dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz,
-		dispatch_mig_callback_t callback)
-{
-	mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT
-		| MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX)
-		| MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER;
-	mach_msg_options_t tmp_options;
-	mig_reply_error_t *bufTemp, *bufRequest, *bufReply;
-	mach_msg_return_t kr = 0;
-	uint64_t assertion_token = 0;
-	unsigned int cnt = 1000; // do not stall out serial queues
-	boolean_t demux_success;
-	bool received = false;
-	size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE;
-
-	bufRequest = alloca(rcv_size);
-	bufRequest->RetCode = 0;
-	for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size);
-			p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) {
-		*(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
-	}
-
-	bufReply = alloca(rcv_size);
-	bufReply->Head.msgh_size = 0;
-	for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size);
-			p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) {
-		*(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard
-	}
-
-#if DISPATCH_DEBUG
-	options |= MACH_RCV_LARGE; // rdar://problem/8422992
-#endif
-	tmp_options = options;
-	// XXX FIXME -- change this to not starve out the target queue
-	for (;;) {
-		if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) {
-			options &= ~MACH_RCV_MSG;
-			tmp_options &= ~MACH_RCV_MSG;
-
-			if (!(tmp_options & MACH_SEND_MSG)) {
-				goto out;
-			}
-		}
-		kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size,
-				(mach_msg_size_t)rcv_size, (mach_port_t)ds->ds_ident_hack, 0,0);
-
-		tmp_options = options;
-
-		if (slowpath(kr)) {
-			switch (kr) {
-			case MACH_SEND_INVALID_DEST:
-			case MACH_SEND_TIMED_OUT:
-				if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
-					mach_msg_destroy(&bufReply->Head);
-				}
-				break;
-			case MACH_RCV_TIMED_OUT:
-				// Don't return an error if a message was sent this time or
-				// a message was successfully received previously
-				// rdar://problems/7363620&7791738
-				if(bufReply->Head.msgh_remote_port || received) {
-					kr = MACH_MSG_SUCCESS;
-				}
-				break;
-			case MACH_RCV_INVALID_NAME:
-				break;
-#if DISPATCH_DEBUG
-			case MACH_RCV_TOO_LARGE:
-				// receive messages that are too large and log their id and size
-				// rdar://problem/8422992
-				tmp_options &= ~MACH_RCV_LARGE;
-				size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE;
-				void *large_buf = malloc(large_size);
-				if (large_buf) {
-					rcv_size = large_size;
-					bufReply = large_buf;
-				}
-				if (!mach_msg(&bufReply->Head, tmp_options, 0,
-						(mach_msg_size_t)rcv_size,
-						(mach_port_t)ds->ds_ident_hack, 0, 0)) {
-					_dispatch_log("BUG in libdispatch client: "
-							"dispatch_mig_server received message larger than "
-							"requested size %zd: id = 0x%x, size = %d",
-							maxmsgsz, bufReply->Head.msgh_id,
-							bufReply->Head.msgh_size);
-				}
-				if (large_buf) {
-					free(large_buf);
-				}
-				// fall through
-#endif
-			default:
-				_dispatch_bug_mach_client(
-						"dispatch_mig_server: mach_msg() failed", kr);
-				break;
-			}
-			goto out;
-		}
-
-		if (!(tmp_options & MACH_RCV_MSG)) {
-			goto out;
-		}
-
-		if (assertion_token) {
-#if DISPATCH_USE_IMPORTANCE_ASSERTION
-			int r = proc_importance_assertion_complete(assertion_token);
-			(void)dispatch_assume_zero(r);
-#endif
-			assertion_token = 0;
-		}
-		received = true;
-
-		bufTemp = bufRequest;
-		bufRequest = bufReply;
-		bufReply = bufTemp;
-
-#if DISPATCH_USE_IMPORTANCE_ASSERTION
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
-		int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head,
-				NULL, &assertion_token);
-		if (r && slowpath(r != EIO)) {
-			(void)dispatch_assume_zero(r);
-		}
-#pragma clang diagnostic pop
-#endif
-		_voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head));
-		demux_success = callback(&bufRequest->Head, &bufReply->Head);
-
-		if (!demux_success) {
-			// destroy the request - but not the reply port
-			bufRequest->Head.msgh_remote_port = 0;
-			mach_msg_destroy(&bufRequest->Head);
-		} else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
-			// if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode
-			// is present
-			if (slowpath(bufReply->RetCode)) {
-				if (bufReply->RetCode == MIG_NO_REPLY) {
-					continue;
-				}
-
-				// destroy the request - but not the reply port
-				bufRequest->Head.msgh_remote_port = 0;
-				mach_msg_destroy(&bufRequest->Head);
-			}
-		}
-
-		if (bufReply->Head.msgh_remote_port) {
-			tmp_options |= MACH_SEND_MSG;
-			if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) !=
-					MACH_MSG_TYPE_MOVE_SEND_ONCE) {
-				tmp_options |= MACH_SEND_TIMEOUT;
-			}
-		}
-	}
-
-out:
-	if (assertion_token) {
-#if DISPATCH_USE_IMPORTANCE_ASSERTION
-		int r = proc_importance_assertion_complete(assertion_token);
-		(void)dispatch_assume_zero(r);
-#endif
-	}
-
-	return kr;
-}
-
-#endif /* HAVE_MACH */
-
 #pragma mark -
 #pragma mark dispatch_source_debug
 
-DISPATCH_NOINLINE
-static const char *
-_evfiltstr(short filt)
-{
-	switch (filt) {
-#define _evfilt2(f) case (f): return #f
-	_evfilt2(EVFILT_READ);
-	_evfilt2(EVFILT_WRITE);
-	_evfilt2(EVFILT_AIO);
-	_evfilt2(EVFILT_VNODE);
-	_evfilt2(EVFILT_PROC);
-	_evfilt2(EVFILT_SIGNAL);
-	_evfilt2(EVFILT_TIMER);
-#if HAVE_MACH
-	_evfilt2(EVFILT_MACHPORT);
-	_evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION);
-#endif
-	_evfilt2(EVFILT_FS);
-	_evfilt2(EVFILT_USER);
-#ifdef EVFILT_VM
-	_evfilt2(EVFILT_VM);
-#endif
-#ifdef EVFILT_SOCK
-	_evfilt2(EVFILT_SOCK);
-#endif
-#ifdef EVFILT_MEMORYSTATUS
-	_evfilt2(EVFILT_MEMORYSTATUS);
-#endif
-
-	_evfilt2(DISPATCH_EVFILT_TIMER);
-	_evfilt2(DISPATCH_EVFILT_CUSTOM_ADD);
-	_evfilt2(DISPATCH_EVFILT_CUSTOM_OR);
-	default:
-		return "EVFILT_missing";
-	}
-}
-
-#if DISPATCH_DEBUG
-static const char *
-_evflagstr2(uint16_t *flagsp)
-{
-#define _evflag2(f) \
-	if ((*flagsp & (f)) == (f) && (f)) { \
-		*flagsp &= ~(f); \
-		return #f "|"; \
-	}
-	_evflag2(EV_ADD);
-	_evflag2(EV_DELETE);
-	_evflag2(EV_ENABLE);
-	_evflag2(EV_DISABLE);
-	_evflag2(EV_ONESHOT);
-	_evflag2(EV_CLEAR);
-	_evflag2(EV_RECEIPT);
-	_evflag2(EV_DISPATCH);
-	_evflag2(EV_UDATA_SPECIFIC);
-#ifdef EV_POLL
-	_evflag2(EV_POLL);
-#endif
-#ifdef EV_OOBAND
-	_evflag2(EV_OOBAND);
-#endif
-	_evflag2(EV_ERROR);
-	_evflag2(EV_EOF);
-	_evflag2(EV_VANISHED);
-	*flagsp = 0;
-	return "EV_UNKNOWN ";
-}
-
-DISPATCH_NOINLINE
-static const char *
-_evflagstr(uint16_t flags, char *str, size_t strsize)
-{
-	str[0] = 0;
-	while (flags) {
-		strlcat(str, _evflagstr2(&flags), strsize);
-	}
-	size_t sz = strlen(str);
-	if (sz) str[sz-1] = 0;
-	return str;
-}
-#endif
-
 static size_t
 _dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz)
 {
 	dispatch_queue_t target = ds->do_targetq;
-	return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%lx, "
-			"mask = 0x%lx, pending_data = 0x%lx, registered = %d, "
+	dispatch_source_refs_t dr = ds->ds_refs;
+	return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%x, "
+			"mask = 0x%x, pending_data = 0x%lx, registered = %d, "
 			"armed = %d, deleted = %d%s, canceled = %d, ",
 			target && target->dq_label ? target->dq_label : "", target,
-			ds->ds_ident_hack, ds->ds_pending_data_mask, ds->ds_pending_data,
+			dr->du_ident, dr->du_fflags, ds->ds_pending_data,
 			ds->ds_is_installed, (bool)(ds->dq_atomic_flags & DSF_ARMED),
 			(bool)(ds->dq_atomic_flags & DSF_DELETED),
 			(ds->dq_atomic_flags & DSF_DEFERRED_DELETE) ? " (pending)" : "",
@@ -6501,341 +2409,28 @@
 static size_t
 _dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz)
 {
-	dispatch_source_refs_t dr = ds->ds_refs;
+	dispatch_timer_source_refs_t dr = ds->ds_timer_refs;
 	return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx"
-			", last_fire = 0x%llx, interval = 0x%llx, flags = 0x%lx }, ",
-			(unsigned long long)ds_timer(dr).target,
-			(unsigned long long)ds_timer(dr).deadline,
-			(unsigned long long)ds_timer(dr).last_fire,
-			(unsigned long long)ds_timer(dr).interval, ds_timer(dr).flags);
+			", interval = 0x%llx, flags = 0x%x }, ",
+			(unsigned long long)dr->dt_timer.target,
+			(unsigned long long)dr->dt_timer.deadline,
+			(unsigned long long)dr->dt_timer.interval, dr->du_fflags);
 }
 
 size_t
-_dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz)
+_dispatch_source_debug(dispatch_source_t ds, char *buf, size_t bufsiz)
 {
+	dispatch_source_refs_t dr = ds->ds_refs;
 	size_t offset = 0;
 	offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
 			dx_kind(ds), ds);
 	offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset);
 	offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset);
-	if (ds->ds_is_timer) {
+	if (dr->du_is_timer) {
 		offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset);
 	}
-	const char *filter;
-	if (!ds->ds_dkev) {
-		filter = "????";
-	} else if (ds->ds_is_custom_source) {
-		filter = _evfiltstr((int16_t)(uintptr_t)ds->ds_dkev);
-	} else {
-		filter = _evfiltstr(ds->ds_dkev->dk_kevent.filter);
-	}
 	offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, "
-			"filter = %s }", ds->ds_dkev,  ds->ds_is_direct_kevent ? " (direct)"
-			: "", filter);
+			"filter = %s }", dr,  dr->du_is_direct ? " (direct)" : "",
+			dr->du_type->dst_kind);
 	return offset;
 }
-
-#if HAVE_MACH
-static size_t
-_dispatch_mach_debug_attr(dispatch_mach_t dm, char* buf, size_t bufsiz)
-{
-	dispatch_queue_t target = dm->do_targetq;
-	return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, "
-			"send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, "
-			"send state = %016llx, disconnected = %d, canceled = %d ",
-			target && target->dq_label ? target->dq_label : "", target,
-			dm->ds_dkev ?(mach_port_t)dm->ds_dkev->dk_kevent.ident:0,
-			dm->dm_refs->dm_send,
-			dm->dm_dkev ?(mach_port_t)dm->dm_dkev->dk_kevent.ident:0,
-			dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) ?
-			" (armed)" : "", dm->dm_refs->dm_checkin_port,
-			dm->dm_refs->dm_checkin ? " (pending)" : "",
-			dm->dm_refs->dm_state, dm->dm_refs->dm_disconnect_cnt,
-			(bool)(dm->dq_atomic_flags & DSF_CANCELED));
-}
-
-size_t
-_dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz)
-{
-	size_t offset = 0;
-	offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
-			dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label :
-			dx_kind(dm), dm);
-	offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset);
-	offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset);
-	offset += dsnprintf(&buf[offset], bufsiz - offset, "}");
-	return offset;
-}
-#endif // HAVE_MACH
-
-#if DISPATCH_DEBUG
-DISPATCH_NOINLINE
-static void
-dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev,
-		int i, int n, const char *function, unsigned int line)
-{
-	char flagstr[256];
-	char i_n[31];
-
-	if (n > 1) {
-		snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n);
-	} else {
-		i_n[0] = '\0';
-	}
-#if DISPATCH_USE_KEVENT_QOS
-	_dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
-			"flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, "
-			"qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, "
-			"ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident,
-			_evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
-			sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
-			kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3],
-			function, line);
-#else
-	_dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, "
-			"flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, "
-			"ext[0] = 0x%llx, ext[1] = 0x%llx }: %s #%u", verb, kev, i_n,
-			kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
-			sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
-#ifndef IGNORE_KEVENT64_EXT
-			kev->ext[0], kev->ext[1],
-#else
-			0ull, 0ull,
-#endif
-			function, line);
-#endif
-}
-
-static void
-_dispatch_kevent_debugger2(void *context)
-{
-	struct sockaddr sa;
-	socklen_t sa_len = sizeof(sa);
-	int c, fd = (int)(long)context;
-	unsigned int i;
-	dispatch_kevent_t dk;
-	dispatch_source_t ds;
-	dispatch_source_refs_t dr;
-	FILE *debug_stream;
-
-	c = accept(fd, &sa, &sa_len);
-	if (c == -1) {
-		if (errno != EAGAIN) {
-			(void)dispatch_assume_zero(errno);
-		}
-		return;
-	}
-#if 0
-	int r = fcntl(c, F_SETFL, 0); // disable non-blocking IO
-	if (r == -1) {
-		(void)dispatch_assume_zero(errno);
-	}
-#endif
-	debug_stream = fdopen(c, "a");
-	if (!dispatch_assume(debug_stream)) {
-		close(c);
-		return;
-	}
-
-	fprintf(debug_stream, "HTTP/1.0 200 OK\r\n");
-	fprintf(debug_stream, "Content-type: text/html\r\n");
-	fprintf(debug_stream, "Pragma: nocache\r\n");
-	fprintf(debug_stream, "\r\n");
-	fprintf(debug_stream, "<html>\n");
-	fprintf(debug_stream, "<head><title>PID %u</title></head>\n", getpid());
-	fprintf(debug_stream, "<body>\n<ul>\n");
-
-	for (i = 0; i < DSL_HASH_SIZE; i++) {
-		if (TAILQ_EMPTY(&_dispatch_sources[i])) {
-			continue;
-		}
-		TAILQ_FOREACH(dk, &_dispatch_sources[i], dk_list) {
-			fprintf(debug_stream, "\t<br><li>DK %p ident %lu filter %s flags "
-					"0x%hx fflags 0x%x data 0x%lx udata %p\n",
-					dk, (unsigned long)dk->dk_kevent.ident,
-					_evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags,
-					dk->dk_kevent.fflags, (unsigned long)dk->dk_kevent.data,
-					(void*)dk->dk_kevent.udata);
-			fprintf(debug_stream, "\t\t<ul>\n");
-			TAILQ_FOREACH(dr, &dk->dk_sources, dr_list) {
-				ds = _dispatch_source_from_refs(dr);
-				fprintf(debug_stream, "\t\t\t<li>DS %p refcnt 0x%x state "
-						"0x%llx data 0x%lx mask 0x%lx flags 0x%x</li>\n",
-						ds, ds->do_ref_cnt + 1, ds->dq_state,
-						ds->ds_pending_data, ds->ds_pending_data_mask,
-						ds->dq_atomic_flags);
-				if (_dq_state_is_enqueued(ds->dq_state)) {
-					dispatch_queue_t dq = ds->do_targetq;
-					fprintf(debug_stream, "\t\t<br>DQ: %p refcnt 0x%x state "
-							"0x%llx label: %s\n", dq, dq->do_ref_cnt + 1,
-							dq->dq_state, dq->dq_label ?: "");
-				}
-			}
-			fprintf(debug_stream, "\t\t</ul>\n");
-			fprintf(debug_stream, "\t</li>\n");
-		}
-	}
-	fprintf(debug_stream, "</ul>\n</body>\n</html>\n");
-	fflush(debug_stream);
-	fclose(debug_stream);
-}
-
-static void
-_dispatch_kevent_debugger2_cancel(void *context)
-{
-	int ret, fd = (int)(long)context;
-
-	ret = close(fd);
-	if (ret != -1) {
-		(void)dispatch_assume_zero(errno);
-	}
-}
-
-static void
-_dispatch_kevent_debugger(void *context DISPATCH_UNUSED)
-{
-	union {
-		struct sockaddr_in sa_in;
-		struct sockaddr sa;
-	} sa_u = {
-		.sa_in = {
-			.sin_family = AF_INET,
-			.sin_addr = { htonl(INADDR_LOOPBACK), },
-		},
-	};
-	dispatch_source_t ds;
-	const char *valstr;
-	int val, r, fd, sock_opt = 1;
-	socklen_t slen = sizeof(sa_u);
-
-#ifndef __linux__
-	if (issetugid()) {
-		return;
-	}
-#endif
-	valstr = getenv("LIBDISPATCH_DEBUGGER");
-	if (!valstr) {
-		return;
-	}
-	val = atoi(valstr);
-	if (val == 2) {
-		sa_u.sa_in.sin_addr.s_addr = 0;
-	}
-	fd = socket(PF_INET, SOCK_STREAM, 0);
-	if (fd == -1) {
-		(void)dispatch_assume_zero(errno);
-		return;
-	}
-	r = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&sock_opt,
-			(socklen_t) sizeof sock_opt);
-	if (r == -1) {
-		(void)dispatch_assume_zero(errno);
-		goto out_bad;
-	}
-#if 0
-	r = fcntl(fd, F_SETFL, O_NONBLOCK);
-	if (r == -1) {
-		(void)dispatch_assume_zero(errno);
-		goto out_bad;
-	}
-#endif
-	r = bind(fd, &sa_u.sa, sizeof(sa_u));
-	if (r == -1) {
-		(void)dispatch_assume_zero(errno);
-		goto out_bad;
-	}
-	r = listen(fd, SOMAXCONN);
-	if (r == -1) {
-		(void)dispatch_assume_zero(errno);
-		goto out_bad;
-	}
-	r = getsockname(fd, &sa_u.sa, &slen);
-	if (r == -1) {
-		(void)dispatch_assume_zero(errno);
-		goto out_bad;
-	}
-
-	ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0,
-			&_dispatch_mgr_q);
-	if (dispatch_assume(ds)) {
-		_dispatch_log("LIBDISPATCH: debug port: %hu",
-				(in_port_t)ntohs(sa_u.sa_in.sin_port));
-
-		/* ownership of fd transfers to ds */
-		dispatch_set_context(ds, (void *)(long)fd);
-		dispatch_source_set_event_handler_f(ds, _dispatch_kevent_debugger2);
-		dispatch_source_set_cancel_handler_f(ds,
-				_dispatch_kevent_debugger2_cancel);
-		dispatch_resume(ds);
-
-		return;
-	}
-out_bad:
-	close(fd);
-}
-
-#if HAVE_MACH
-
-#ifndef MACH_PORT_TYPE_SPREQUEST
-#define MACH_PORT_TYPE_SPREQUEST 0x40000000
-#endif
-
-DISPATCH_NOINLINE
-void
-dispatch_debug_machport(mach_port_t name, const char* str)
-{
-	mach_port_type_t type;
-	mach_msg_bits_t ns = 0, nr = 0, nso = 0, nd = 0;
-	unsigned int dnreqs = 0, dnrsiz;
-	kern_return_t kr = mach_port_type(mach_task_self(), name, &type);
-	if (kr) {
-		_dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name,
-				kr, mach_error_string(kr), str);
-		return;
-	}
-	if (type & MACH_PORT_TYPE_SEND) {
-		(void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
-				MACH_PORT_RIGHT_SEND, &ns));
-	}
-	if (type & MACH_PORT_TYPE_SEND_ONCE) {
-		(void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
-				MACH_PORT_RIGHT_SEND_ONCE, &nso));
-	}
-	if (type & MACH_PORT_TYPE_DEAD_NAME) {
-		(void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
-				MACH_PORT_RIGHT_DEAD_NAME, &nd));
-	}
-	if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) {
-		kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs);
-		if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr);
-	}
-	if (type & MACH_PORT_TYPE_RECEIVE) {
-		mach_port_status_t status = { .mps_pset = 0, };
-		mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT;
-		(void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name,
-				MACH_PORT_RIGHT_RECEIVE, &nr));
-		(void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(),
-				name, MACH_PORT_RECEIVE_STATUS, (void*)&status, &cnt));
-		_dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
-				"dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) "
-				"sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) "
-				"seqno(%03u) }: %s", name, nr, ns, nso, nd, dnreqs,
-				type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N",
-				status.mps_nsrequest ? "Y":"N", status.mps_pdrequest ? "Y":"N",
-				status.mps_srights ? "Y":"N", status.mps_sorights,
-				status.mps_qlimit, status.mps_msgcount, status.mps_mscount,
-				status.mps_seqno, str);
-	} else if (type & (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE|
-			MACH_PORT_TYPE_DEAD_NAME)) {
-		_dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) "
-				"dnreqs(%03u) spreq(%s) }: %s", name, nr, ns, nso, nd, dnreqs,
-				type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", str);
-	} else {
-		_dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name, type,
-				str);
-	}
-}
-
-#endif // HAVE_MACH
-
-#endif // DISPATCH_DEBUG
diff --git a/src/source_internal.h b/src/source_internal.h
index 41b6d11..286372d 100644
--- a/src/source_internal.h
+++ b/src/source_internal.h
@@ -32,168 +32,44 @@
 #include <dispatch/base.h> // for HeaderDoc
 #endif
 
-#define DISPATCH_EVFILT_TIMER		(-EVFILT_SYSCOUNT - 1)
-#define DISPATCH_EVFILT_CUSTOM_ADD	(-EVFILT_SYSCOUNT - 2)
-#define DISPATCH_EVFILT_CUSTOM_OR	(-EVFILT_SYSCOUNT - 3)
-#define DISPATCH_EVFILT_MACH_NOTIFICATION	(-EVFILT_SYSCOUNT - 4)
-#define DISPATCH_EVFILT_SYSCOUNT	( EVFILT_SYSCOUNT + 4)
-
-#if HAVE_MACH
-// NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t
-//       bit values must not overlap as they share the same kevent fflags !
-
-/*!
- * @enum dispatch_source_mach_send_flags_t
- *
- * @constant DISPATCH_MACH_SEND_DELETED
- * Port-deleted notification. Disabled for source registration.
- */
-enum {
-	DISPATCH_MACH_SEND_DELETED = 0x4,
-};
-/*!
- * @enum dispatch_source_mach_recv_flags_t
- *
- * @constant DISPATCH_MACH_RECV_MESSAGE
- * Receive right has pending messages
- *
- * @constant DISPATCH_MACH_RECV_MESSAGE_DIRECT
- * Receive messages from receive right directly via kevent64()
- *
- * @constant DISPATCH_MACH_RECV_NO_SENDERS
- * Receive right has no more senders. TODO <rdar://problem/8132399>
- */
-enum {
-	DISPATCH_MACH_RECV_MESSAGE = 0x2,
-	DISPATCH_MACH_RECV_MESSAGE_DIRECT = 0x10,
-	DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE = 0x20,
-	DISPATCH_MACH_RECV_NO_SENDERS = 0x40,
-};
-#endif // HAVE_MACH
-
 enum {
 	/* DISPATCH_TIMER_STRICT 0x1 */
 	/* DISPATCH_TIMER_BACKGROUND = 0x2, */
-	DISPATCH_TIMER_WALL_CLOCK = 0x4,
+	DISPATCH_TIMER_CLOCK_MACH = 0x4,
 	DISPATCH_TIMER_INTERVAL = 0x8,
-	DISPATCH_TIMER_WITH_AGGREGATE = 0x10,
+	DISPATCH_TIMER_AFTER = 0x10,
 	/* DISPATCH_INTERVAL_UI_ANIMATION = 0x20 */
-	DISPATCH_TIMER_AFTER = 0x40,
 };
 
-#define DISPATCH_TIMER_QOS_NORMAL 0u
-#define DISPATCH_TIMER_QOS_CRITICAL 1u
-#define DISPATCH_TIMER_QOS_BACKGROUND 2u
-#define DISPATCH_TIMER_QOS_COUNT (DISPATCH_TIMER_QOS_BACKGROUND + 1)
-#define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 0x3ul)
-
-#define DISPATCH_TIMER_KIND_WALL 0u
-#define DISPATCH_TIMER_KIND_MACH 1u
-#define DISPATCH_TIMER_KIND_COUNT (DISPATCH_TIMER_KIND_MACH + 1)
-#define DISPATCH_TIMER_KIND(tidx) ((uintptr_t)(tidx) & 0x1ul)
-
-#define DISPATCH_TIMER_INDEX(kind, qos) ((qos) << 1 | (kind))
-#define DISPATCH_TIMER_INDEX_DISARM \
-		DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT)
-#define DISPATCH_TIMER_INDEX_COUNT (DISPATCH_TIMER_INDEX_DISARM + 1)
-#define DISPATCH_TIMER_IDENT(flags) ({ unsigned long f = (flags); \
-		DISPATCH_TIMER_INDEX(f & DISPATCH_TIMER_WALL_CLOCK ? \
-		DISPATCH_TIMER_KIND_WALL : DISPATCH_TIMER_KIND_MACH, \
-		f & DISPATCH_TIMER_STRICT ? DISPATCH_TIMER_QOS_CRITICAL : \
-		f & DISPATCH_TIMER_BACKGROUND ? DISPATCH_TIMER_QOS_BACKGROUND : \
-		DISPATCH_TIMER_QOS_NORMAL); })
-
-struct dispatch_kevent_s {
-	TAILQ_ENTRY(dispatch_kevent_s) dk_list;
-	TAILQ_HEAD(, dispatch_source_refs_s) dk_sources;
-	_dispatch_kevent_qos_s dk_kevent;
-};
-
-typedef struct dispatch_kevent_s *dispatch_kevent_t;
-
-typedef typeof(((dispatch_kevent_t)NULL)->dk_kevent.udata) _dispatch_kevent_qos_udata_t;
-
-#define DISPATCH_KEV_CUSTOM_ADD ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_ADD)
-#define DISPATCH_KEV_CUSTOM_OR  ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_OR)
-
-struct dispatch_source_type_s {
-	_dispatch_kevent_qos_s ke;
-	uint64_t mask;
-	void (*init)(dispatch_source_t ds, dispatch_source_type_t type,
-			uintptr_t handle, unsigned long mask, dispatch_queue_t q);
-};
-
-struct dispatch_timer_source_s {
-	uint64_t target;
-	uint64_t deadline;
-	uint64_t last_fire;
-	uint64_t interval;
-	uint64_t leeway;
-	unsigned long flags; // dispatch_timer_flags_t
-	unsigned long missed;
-};
-
-enum {
-	DS_EVENT_HANDLER = 0,
-	DS_CANCEL_HANDLER,
-	DS_REGISTN_HANDLER,
-};
-
-// Source state which may contain references to the source object
-// Separately allocated so that 'leaks' can see sources <rdar://problem/9050566>
-typedef struct dispatch_source_refs_s {
-	TAILQ_ENTRY(dispatch_source_refs_s) dr_list;
-	uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t
-	dispatch_continuation_t volatile ds_handler[3];
-} *dispatch_source_refs_t;
-
-typedef struct dispatch_timer_source_refs_s {
-	struct dispatch_source_refs_s _ds_refs;
-	struct dispatch_timer_source_s _ds_timer;
-	TAILQ_ENTRY(dispatch_timer_source_refs_s) dt_list;
-} *dispatch_timer_source_refs_t;
-
-typedef struct dispatch_timer_source_aggregate_refs_s {
-	struct dispatch_timer_source_refs_s _dsa_refs;
-	TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dra_list;
-	TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dta_list;
-} *dispatch_timer_source_aggregate_refs_t;
-
-#define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr))
-#define _dispatch_wref2ptr(ref) ((void*)~(ref))
-#define _dispatch_source_from_refs(dr) \
-		((dispatch_source_t)_dispatch_wref2ptr((dr)->dr_source_wref))
-#define ds_timer(dr) \
-		(((dispatch_timer_source_refs_t)(dr))->_ds_timer)
-#define ds_timer_aggregate(ds) \
-		((dispatch_timer_aggregate_t)((ds)->dq_specific_q))
-
 DISPATCH_ALWAYS_INLINE
 static inline unsigned int
-_dispatch_source_timer_idx(dispatch_source_refs_t dr)
+_dispatch_source_timer_idx(dispatch_unote_t du)
 {
-	return DISPATCH_TIMER_IDENT(ds_timer(dr).flags);
+	uint32_t clock, qos = 0, fflags = du._dt->du_fflags;
+
+	dispatch_assert(DISPATCH_CLOCK_MACH == 1);
+	dispatch_assert(DISPATCH_CLOCK_WALL == 0);
+	clock = (fflags & DISPATCH_TIMER_CLOCK_MACH) / DISPATCH_TIMER_CLOCK_MACH;
+
+#if DISPATCH_HAVE_TIMER_QOS
+	dispatch_assert(DISPATCH_TIMER_STRICT == DISPATCH_TIMER_QOS_CRITICAL);
+	dispatch_assert(DISPATCH_TIMER_BACKGROUND == DISPATCH_TIMER_QOS_BACKGROUND);
+	qos = fflags & (DISPATCH_TIMER_STRICT | DISPATCH_TIMER_BACKGROUND);
+	// flags are normalized so this should never happen
+	dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT);
+#endif
+
+	return DISPATCH_TIMER_INDEX(clock, qos);
 }
 
 #define _DISPATCH_SOURCE_HEADER(refs) \
 	DISPATCH_QUEUE_HEADER(refs); \
-	/* LP64: fills 32bit hole in QUEUE_HEADER */ \
 	unsigned int \
-		ds_is_level:1, \
-		ds_is_adder:1, \
 		ds_is_installed:1, \
-		ds_is_direct_kevent:1, \
-		ds_is_custom_source:1, \
-		ds_needs_rearm:1, \
-		ds_is_timer:1, \
-		ds_vmpressure_override:1, \
-		ds_memorypressure_override:1, \
-		dm_handler_is_block:1, \
+		dm_needs_mgr:1, \
 		dm_connect_handler_called:1, \
-		dm_cancel_handler_called:1; \
-	dispatch_kevent_t ds_dkev; \
-	dispatch_##refs##_refs_t ds_refs; \
-	unsigned long ds_pending_data_mask;
+		dm_cancel_handler_called:1, \
+		dm_is_xpc:1
 
 #define DISPATCH_SOURCE_HEADER(refs) \
 	struct dispatch_source_s _as_ds[0]; \
@@ -205,146 +81,34 @@
 #if DISPATCH_PURE_C
 struct dispatch_source_s {
 	_DISPATCH_SOURCE_HEADER(source);
-	unsigned long ds_ident_hack;
 	unsigned long ds_data;
 	unsigned long ds_pending_data;
-} DISPATCH_QUEUE_ALIGN;
-#endif
+} DISPATCH_ATOMIC64_ALIGN;
+#endif // DISPATCH_PURE_C
 
-#if HAVE_MACH
-// Mach channel state which may contain references to the channel object
-// layout must match dispatch_source_refs_s
-struct dispatch_mach_refs_s {
-	TAILQ_ENTRY(dispatch_mach_refs_s) dr_list;
-	uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t
-	dispatch_mach_handler_function_t dm_handler_func;
-	void *dm_handler_ctxt;
-};
-typedef struct dispatch_mach_refs_s *dispatch_mach_refs_t;
-
-struct dispatch_mach_reply_refs_s {
-	TAILQ_ENTRY(dispatch_mach_reply_refs_s) dr_list;
-	uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t
-	dispatch_kevent_t dmr_dkev;
-	void *dmr_ctxt;
-	mach_port_t dmr_reply;
-	dispatch_priority_t dmr_priority;
-	voucher_t dmr_voucher;
-	TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list;
-};
-typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t;
-
-#define _DISPATCH_MACH_STATE_UNUSED_MASK_2       0xff00000000000000ull
-#define DISPATCH_MACH_STATE_OVERRIDE_MASK        0x00ffff0000000000ull
-#define _DISPATCH_MACH_STATE_UNUSED_MASK_1       0x000000f000000000ull
-#define DISPATCH_MACH_STATE_DIRTY                0x0000000800000000ull
-#define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE    0x0000000400000000ull
-#define _DISPATCH_MACH_STATE_UNUSED_MASK_0       0x0000000200000000ull
-#define DISPATCH_MACH_STATE_PENDING_BARRIER      0x0000000100000000ull
-#define DISPATCH_MACH_STATE_UNLOCK_MASK          0x00000000ffffffffull
-
-struct dispatch_mach_send_refs_s {
-	TAILQ_ENTRY(dispatch_mach_send_refs_s) dr_list;
-	uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t
-	dispatch_mach_msg_t dm_checkin;
-	TAILQ_HEAD(, dispatch_mach_reply_refs_s) dm_replies;
-	dispatch_unfair_lock_s dm_replies_lock;
-#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000)
-#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0)
-#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1)
-	uint32_t volatile dm_disconnect_cnt;
-	union {
-		uint64_t volatile dm_state;
-		DISPATCH_STRUCT_LITTLE_ENDIAN_2(
-			dispatch_unfair_lock_s dm_state_lock,
-			uint32_t dm_state_bits
-		);
-	};
-	unsigned int dm_needs_mgr:1;
-	struct dispatch_object_s *volatile dm_tail;
-	struct dispatch_object_s *volatile dm_head;
-	mach_port_t dm_send, dm_checkin_port;
-};
-typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t;
-
-DISPATCH_CLASS_DECL(mach);
-#if DISPATCH_PURE_C
-struct dispatch_mach_s {
-	DISPATCH_SOURCE_HEADER(mach);
-	dispatch_kevent_t dm_dkev;
-	dispatch_mach_send_refs_t dm_refs;
-} DISPATCH_QUEUE_ALIGN;
-#endif
-
-DISPATCH_CLASS_DECL(mach_msg);
-struct dispatch_mach_msg_s {
-	DISPATCH_OBJECT_HEADER(mach_msg);
-	union {
-		mach_msg_option_t dmsg_options;
-		mach_error_t dmsg_error;
-	};
-	mach_port_t dmsg_reply;
-	pthread_priority_t dmsg_priority;
-	voucher_t dmsg_voucher;
-	dispatch_mach_msg_destructor_t dmsg_destructor;
-	size_t dmsg_size;
-	union {
-		mach_msg_header_t *dmsg_msg;
-		char dmsg_buf[0];
-	};
-};
-#endif // HAVE_MACH
-
-extern const struct dispatch_source_type_s _dispatch_source_type_after;
-
-#if TARGET_OS_EMBEDDED
-#define DSL_HASH_SIZE  64u // must be a power of two
-#else
-#define DSL_HASH_SIZE 256u // must be a power of two
-#endif
-
-dispatch_source_t
-_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp,
-		const struct dispatch_continuation_s *dc);
+dispatch_priority_t
+_dispatch_source_compute_kevent_priority(dispatch_source_t ds);
+void _dispatch_source_refs_register(dispatch_source_t ds, dispatch_priority_t bp);
+void _dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options);
 void _dispatch_source_xref_dispose(dispatch_source_t ds);
 void _dispatch_source_dispose(dispatch_source_t ds);
 void _dispatch_source_finalize_activation(dispatch_source_t ds);
 void _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags);
-void _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp,
+void _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags);
+void _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags,
+		uintptr_t data, pthread_priority_t pp);
 size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz);
-void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval);
-void _dispatch_source_set_event_handler_continuation(dispatch_source_t ds,
-		dispatch_continuation_t dc);
+
 DISPATCH_EXPORT // for firehose server
 void _dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp,
 		unsigned long val);
 
-#if HAVE_MACH
-void _dispatch_mach_dispose(dispatch_mach_t dm);
-void _dispatch_mach_finalize_activation(dispatch_mach_t dm);
-void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags);
-void _dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp,
-		dispatch_wakeup_flags_t flags);
-size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz);
-
-void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg);
-void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg,
-		dispatch_invoke_flags_t flags);
-size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf,
-		size_t bufsiz);
-
-void _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc,
-		dispatch_invoke_flags_t flags);
-void _dispatch_mach_barrier_invoke(dispatch_continuation_t dc,
-		dispatch_invoke_flags_t flags);
-#endif // HAVE_MACH
-
-void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
+void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos,
 		dispatch_wakeup_flags_t flags);
 void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_invoke_flags_t flags);
 #if DISPATCH_USE_KEVENT_WORKQUEUE
-void _dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events,
+void _dispatch_kevent_worker_thread(dispatch_kevent_t *events,
 		int *nevents);
 #endif
 
diff --git a/src/time.c b/src/time.c
index 6d00831..6db4880 100644
--- a/src/time.c
+++ b/src/time.c
@@ -20,28 +20,6 @@
 
 #include "internal.h"
 
-uint64_t
-_dispatch_get_nanoseconds(void)
-{
-#if !TARGET_OS_WIN32
-	struct timeval now;
-	int r = gettimeofday(&now, NULL);
-	dispatch_assert_zero(r);
-	dispatch_assert(sizeof(NSEC_PER_SEC) == 8);
-	dispatch_assert(sizeof(NSEC_PER_USEC) == 8);
-	return (uint64_t)now.tv_sec * NSEC_PER_SEC +
-			(uint64_t)now.tv_usec * NSEC_PER_USEC;
-#else /* TARGET_OS_WIN32 */
-	// FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC).
-	FILETIME ft;
-	ULARGE_INTEGER li;
-	GetSystemTimeAsFileTime(&ft);
-	li.LowPart = ft.dwLowDateTime;
-	li.HighPart = ft.dwHighDateTime;
-	return li.QuadPart * 100ull;
-#endif /* TARGET_OS_WIN32 */
-}
-
 #if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) \
 		|| TARGET_OS_WIN32
 DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data = {
@@ -115,7 +93,7 @@
 {
 	int64_t nsec;
 	if (inval) {
-		nsec = inval->tv_sec * 1000000000ll + inval->tv_nsec;
+		nsec = (int64_t)_dispatch_timespec_to_nano(*inval);
 	} else {
 		nsec = (int64_t)_dispatch_get_nanoseconds();
 	}
diff --git a/src/trace.h b/src/trace.h
index d73ff3f..4d0c13a 100644
--- a/src/trace.h
+++ b/src/trace.h
@@ -132,7 +132,7 @@
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
-		dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
+		dispatch_object_t _tail, dispatch_qos_t qos, unsigned int n)
 {
 	if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
 		struct dispatch_object_s *dou = _head._do;
@@ -141,20 +141,20 @@
 		} while (dou != _tail._do && (dou = dou->do_next));
 	}
 	_dispatch_introspection_queue_push_list(dq, _head, _tail);
-	_dispatch_queue_push_list(dq, _head, _tail, pp, n);
+	_dispatch_queue_push_list(dq, _head, _tail, qos, n);
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
 _dispatch_trace_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail,
-		pthread_priority_t pp, dispatch_wakeup_flags_t flags)
+		dispatch_qos_t qos, dispatch_wakeup_flags_t flags)
 {
 	if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
 		struct dispatch_object_s *dou = _tail._do;
 		_dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH);
 	}
 	_dispatch_introspection_queue_push(dq, _tail);
-	_dispatch_queue_push_inline(dq, _tail, pp, flags);
+	_dispatch_queue_push_inline(dq, _tail, qos, flags);
 }
 
 DISPATCH_ALWAYS_INLINE
@@ -189,7 +189,7 @@
 
 #if DISPATCH_USE_DTRACE
 static inline dispatch_function_t
-_dispatch_trace_timer_function(dispatch_source_refs_t dr)
+_dispatch_trace_timer_function(dispatch_timer_source_refs_t dr)
 {
 	dispatch_continuation_t dc;
 	dc = os_atomic_load(&dr->ds_handler[DS_EVENT_HANDLER], relaxed);
@@ -198,12 +198,12 @@
 
 DISPATCH_ALWAYS_INLINE
 static inline dispatch_trace_timer_params_t
-_dispatch_trace_timer_params(uintptr_t ident,
+_dispatch_trace_timer_params(dispatch_clock_t clock,
 		struct dispatch_timer_source_s *values, uint64_t deadline,
 		dispatch_trace_timer_params_t params)
 {
-	#define _dispatch_trace_time2nano3(t) (DISPATCH_TIMER_KIND(ident) \
-			== DISPATCH_TIMER_KIND_MACH ? _dispatch_time_mach2nano(t) : (t))
+	#define _dispatch_trace_time2nano3(t) \
+			(clock == DISPATCH_CLOCK_MACH ? _dispatch_time_mach2nano(t) : (t))
 	#define _dispatch_trace_time2nano2(v, t) ({ uint64_t _t = (t); \
 			(v) >= INT64_MAX ? -1ll : (int64_t)_dispatch_trace_time2nano3(_t);})
 	#define _dispatch_trace_time2nano(v) ({ uint64_t _t; \
@@ -212,14 +212,13 @@
 	if (deadline) {
 		params->deadline = (int64_t)deadline;
 	} else {
-		uint64_t now = (DISPATCH_TIMER_KIND(ident) ==
-				DISPATCH_TIMER_KIND_MACH ? _dispatch_absolute_time() :
-				 _dispatch_get_nanoseconds());
+		uint64_t now = _dispatch_time_now(clock);
 		params->deadline = _dispatch_trace_time2nano2(values->target,
 				values->target < now ? 0 : values->target - now);
 	}
+	uint64_t leeway = values->deadline - values->target;
 	params->interval = _dispatch_trace_time2nano(values->interval);
-	params->leeway = _dispatch_trace_time2nano(values->leeway);
+	params->leeway = _dispatch_trace_time2nano(leeway);
 	return params;
 }
 
@@ -232,33 +231,34 @@
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_trace_timer_configure(dispatch_source_t ds, uintptr_t ident,
+_dispatch_trace_timer_configure(dispatch_source_t ds, dispatch_clock_t clock,
 		struct dispatch_timer_source_s *values)
 {
+	dispatch_timer_source_refs_t dr = ds->ds_timer_refs;
 	struct dispatch_trace_timer_params_s params;
-	DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds->ds_refs),
-			_dispatch_trace_timer_params(ident, values, 0,
-			&params));
+	DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(dr),
+			_dispatch_trace_timer_params(clock, values, 0, &params));
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline)
+_dispatch_trace_timer_program(dispatch_timer_source_refs_t dr, uint64_t deadline)
 {
 	if (slowpath(DISPATCH_TIMER_PROGRAM_ENABLED())) {
 		if (deadline && dr) {
 			dispatch_source_t ds = _dispatch_source_from_refs(dr);
+			dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(dr->du_ident);
 			struct dispatch_trace_timer_params_s params;
 			DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(dr),
-					_dispatch_trace_timer_params(ds->ds_ident_hack,
-					&ds_timer(dr), deadline, &params));
+					_dispatch_trace_timer_params(clock, &dr->dt_timer,
+					deadline, &params));
 		}
 	}
 }
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_trace_timer_wake(dispatch_source_refs_t dr)
+_dispatch_trace_timer_wake(dispatch_timer_source_refs_t dr)
 {
 	if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) {
 		if (dr) {
@@ -270,7 +270,7 @@
 
 DISPATCH_ALWAYS_INLINE
 static inline void
-_dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data,
+_dispatch_trace_timer_fire(dispatch_timer_source_refs_t dr, unsigned long data,
 		unsigned long missed)
 {
 	if (slowpath(DISPATCH_TIMER_FIRE_ENABLED())) {
@@ -284,8 +284,8 @@
 #else
 
 #define _dispatch_trace_timer_configure_enabled() false
-#define _dispatch_trace_timer_configure(ds, ident, values) \
-		do { (void)(ds); (void)(ident); (void)(values); } while(0)
+#define _dispatch_trace_timer_configure(ds, clock, values) \
+		do { (void)(ds); (void)(clock); (void)(values); } while(0)
 #define _dispatch_trace_timer_program(dr, deadline) \
 		do { (void)(dr); (void)(deadline); } while(0)
 #define _dispatch_trace_timer_wake(dr) \
diff --git a/src/voucher.c b/src/voucher.c
index ee04e3b..f1ec92e 100644
--- a/src/voucher.c
+++ b/src/voucher.c
@@ -419,7 +419,7 @@
 
 	size = _voucher_mach_recipe_init(mvar, voucher, kvb, voucher->v_priority);
 	kr = _voucher_create_mach_voucher(mvar, size, &kv);
-	if (dispatch_assume_zero(kr) || !kv){
+	if (dispatch_assume_zero(kr) || !kv) {
 		return MACH_VOUCHER_NULL;
 	}
 	if (!os_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL,
@@ -453,7 +453,7 @@
 
 	size = _voucher_mach_recipe_init(mvar, voucher, kvb, priority);
 	kr = _voucher_create_mach_voucher(mvar, size, &kv);
-	if (dispatch_assume_zero(kr) || !kv){
+	if (dispatch_assume_zero(kr) || !kv) {
 		return MACH_VOUCHER_NULL;
 	}
 	_dispatch_kvoucher_debug("create with priority from voucher[%p]", kv,
@@ -635,7 +635,7 @@
 	};
 	kr = _voucher_create_mach_voucher(importance_remove_recipe,
 			sizeof(importance_remove_recipe), &kv);
-	if (dispatch_assume_zero(kr) || !kv){
+	if (dispatch_assume_zero(kr) || !kv) {
 		if (ov->v_ipc_kvoucher) return NULL;
 		kv = MACH_VOUCHER_NULL;
 	}
@@ -684,7 +684,7 @@
 	};
 	kr = _voucher_create_mach_voucher(&accounting_copy_recipe,
 			sizeof(accounting_copy_recipe), &kv);
-	if (dispatch_assume_zero(kr) || !kv){
+	if (dispatch_assume_zero(kr) || !kv) {
 		return NULL;
 	}
 	voucher_t v = _voucher_find_and_retain(kv);
@@ -806,10 +806,9 @@
 {
 	dispatch_mach_handler_function_t handler = NULL;
 
-	if (_voucher_libtrace_hooks && _voucher_libtrace_hooks->vah_version >= 2) {
+	if (_voucher_libtrace_hooks) {
 		handler = _voucher_libtrace_hooks->vah_debug_channel_handler;
 	}
-
 	if (!handler) return;
 
 	dispatch_mach_t dm;
@@ -989,6 +988,9 @@
 void
 voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks)
 {
+	if (hooks->vah_version < 3) {
+		DISPATCH_CLIENT_CRASH(hooks->vah_version, "unsupported vah_version");
+	}
 	if (!os_atomic_cmpxchg(&_voucher_libtrace_hooks, NULL,
 			hooks, relaxed)) {
 		DISPATCH_CLIENT_CRASH(_voucher_libtrace_hooks,
@@ -1126,30 +1128,23 @@
 }
 
 voucher_t
-voucher_activity_create(firehose_tracepoint_id_t trace_id,
-		voucher_t base, firehose_activity_flags_t flags, uint64_t location)
-{
-	return voucher_activity_create_with_location(&trace_id, base, flags, location);
-}
-
-voucher_t
-voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id,
-		voucher_t base, firehose_activity_flags_t flags, uint64_t location)
+voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id,
+		voucher_t base, firehose_activity_flags_t flags,
+		const void *pubdata, size_t publen)
 {
 	firehose_activity_id_t va_id = 0, current_id = 0, parent_id = 0;
 	firehose_tracepoint_id_u ftid = { .ftid_value = *trace_id };
-	uint16_t pubsize = sizeof(va_id) + sizeof(location);
 	uint64_t creator_id = 0;
+	uint16_t pubsize;
 	voucher_t ov = _voucher_get();
 	voucher_t v;
 
+	if (os_add_overflow(sizeof(va_id), publen, &pubsize) || pubsize > 128) {
+		DISPATCH_CLIENT_CRASH(pubsize, "Absurd publen");
+	}
 	if (base == VOUCHER_CURRENT) {
 		base = ov;
 	}
-	if (_voucher_activity_disabled()) {
-		*trace_id = 0;
-		return base ? _voucher_retain(base) : VOUCHER_NULL;
-	}
 
 	FIREHOSE_TRACE_ID_CLEAR_FLAG(ftid, base, has_unique_pid);
 	if (ov && (current_id = ov->v_activity)) {
@@ -1179,6 +1174,10 @@
 	v->v_activity_creator = _voucher_unique_pid;
 	v->v_parent_activity = parent_id;
 
+	if (_voucher_activity_disabled()) {
+		goto done;
+	}
+
 	static const firehose_stream_t streams[2] = {
 		firehose_stream_metadata,
 		firehose_stream_persist,
@@ -1202,13 +1201,22 @@
 			pubptr = _dispatch_memappend(pubptr, &parent_id);
 		}
 		pubptr = _dispatch_memappend(pubptr, &va_id);
-		pubptr = _dispatch_memappend(pubptr, &location);
+		pubptr = _dispatch_mempcpy(pubptr, pubdata, publen);
 		_voucher_activity_tracepoint_flush(ft, ftid);
 	}
+done:
 	*trace_id = ftid.ftid_value;
 	return v;
 }
 
+voucher_t
+voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id,
+		voucher_t base, firehose_activity_flags_t flags, uint64_t loc)
+{
+	return voucher_activity_create_with_data(trace_id, base, flags,
+			&loc, sizeof(loc));
+}
+
 void
 _voucher_activity_swap(firehose_activity_id_t old_id,
 		firehose_activity_id_t new_id)
@@ -1276,22 +1284,22 @@
 	firehose_buffer_stream_flush(_firehose_task_buffer, stream);
 }
 
-DISPATCH_ALWAYS_INLINE
-static inline firehose_tracepoint_id_t
-_voucher_activity_trace(firehose_stream_t stream,
-		firehose_tracepoint_id_u ftid, uint64_t stamp,
-		const void *pubdata, size_t publen,
-		const void *privdata, size_t privlen)
+DISPATCH_NOINLINE
+firehose_tracepoint_id_t
+voucher_activity_trace_v(firehose_stream_t stream,
+		firehose_tracepoint_id_t trace_id, uint64_t stamp,
+		const struct iovec *iov, size_t publen, size_t privlen)
 {
+	firehose_tracepoint_id_u ftid = { .ftid_value = trace_id };
 	const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
 	const size_t _firehose_chunk_payload_size =
-			sizeof(((struct firehose_buffer_chunk_s *)0)->fbc_data);
+			sizeof(((struct firehose_chunk_s *)0)->fc_data);
 
 	if (_voucher_activity_disabled()) return 0;
 
 	firehose_tracepoint_t ft;
 	firehose_activity_id_t va_id = 0;
-	firehose_buffer_chunk_t fbc;
+	firehose_chunk_t fc;
 	uint8_t *privptr, *pubptr;
 	size_t pubsize = publen;
 	voucher_t ov = _voucher_get();
@@ -1331,38 +1339,52 @@
 		pubptr = _dispatch_memappend(pubptr, &creator_pid);
 	}
 	if (privlen) {
-		fbc = firehose_buffer_chunk_for_address(ft);
+		fc = firehose_buffer_chunk_for_address(ft);
 		struct firehose_buffer_range_s range = {
-			.fbr_offset = (uint16_t)(privptr - fbc->fbc_start),
+			.fbr_offset = (uint16_t)(privptr - fc->fc_start),
 			.fbr_length = (uint16_t)privlen,
 		};
 		pubptr = _dispatch_memappend(pubptr, &range);
-		_dispatch_mempcpy(privptr, privdata, privlen);
 	}
-	_dispatch_mempcpy(pubptr, pubdata, publen);
+	while (publen > 0) {
+		pubptr = _dispatch_mempcpy(pubptr, iov->iov_base, iov->iov_len);
+		if (unlikely(os_sub_overflow(publen, iov->iov_len, &publen))) {
+			DISPATCH_CLIENT_CRASH(0, "Invalid arguments");
+		}
+		iov++;
+	}
+	while (privlen > 0) {
+		privptr = _dispatch_mempcpy(privptr, iov->iov_base, iov->iov_len);
+		if (unlikely(os_sub_overflow(privlen, iov->iov_len, &privlen))) {
+			DISPATCH_CLIENT_CRASH(0, "Invalid arguments");
+		}
+		iov++;
+	}
 	_voucher_activity_tracepoint_flush(ft, ftid);
 	return ftid.ftid_value;
 }
 
 firehose_tracepoint_id_t
 voucher_activity_trace(firehose_stream_t stream,
-		firehose_tracepoint_id_t trace_id, uint64_t timestamp,
+		firehose_tracepoint_id_t trace_id, uint64_t stamp,
 		const void *pubdata, size_t publen)
 {
-	firehose_tracepoint_id_u ftid = { .ftid_value = trace_id };
-	return _voucher_activity_trace(stream, ftid, timestamp, pubdata, publen,
-			NULL, 0);
+	struct iovec iov = { (void *)pubdata, publen };
+	return voucher_activity_trace_v(stream, trace_id, stamp, &iov, publen, 0);
 }
 
 firehose_tracepoint_id_t
 voucher_activity_trace_with_private_strings(firehose_stream_t stream,
-		firehose_tracepoint_id_t trace_id, uint64_t timestamp,
+		firehose_tracepoint_id_t trace_id, uint64_t stamp,
 		const void *pubdata, size_t publen,
 		const void *privdata, size_t privlen)
 {
-	firehose_tracepoint_id_u ftid = { .ftid_value = trace_id };
-	return _voucher_activity_trace(stream, ftid, timestamp,
-			pubdata, publen, privdata, privlen);
+	struct iovec iov[2] = {
+		{ (void *)pubdata, publen },
+		{ (void *)privdata, privlen },
+	};
+	return voucher_activity_trace_v(stream, trace_id, stamp,
+			iov, publen, privlen);
 }
 
 #pragma mark -
diff --git a/src/voucher_internal.h b/src/voucher_internal.h
index b34ad46..ae4e92a 100644
--- a/src/voucher_internal.h
+++ b/src/voucher_internal.h
@@ -54,7 +54,7 @@
  * @result
  * The newly created voucher object.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
 voucher_t
 voucher_create(voucher_recipe_t recipe);
@@ -78,7 +78,7 @@
  * @result
  * A mach voucher port.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+API_AVAILABLE(macos(10.10), ios(8.0))
 OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW
 mach_voucher_t
 voucher_get_mach_voucher(voucher_t voucher);
@@ -206,12 +206,7 @@
 		_dispatch_debug("voucher[%p]: " msg, v, ##__VA_ARGS__)
 #define _dispatch_kvoucher_debug(msg, kv, ...) \
 		_dispatch_debug("kvoucher[0x%08x]: " msg, kv, ##__VA_ARGS__)
-#if DISPATCH_MACHPORT_DEBUG
-#define _dispatch_voucher_debug_machport(name) \
-		dispatch_debug_machport((name), __func__)
-#else
-#define _dispatch_voucher_debug_machport(name) ((void)(name))
-#endif
+#define _dispatch_voucher_debug_machport(name) _dispatch_debug_machport(name)
 #else
 #define _dispatch_voucher_debug(msg, v, ...)
 #define _dispatch_kvoucher_debug(msg, kv, ...)
diff --git a/xcodeconfig/libdispatch-resolver_iphoneos.order b/xcodeconfig/libdispatch-resolver_iphoneos.order
deleted file mode 100644
index eea9845..0000000
--- a/xcodeconfig/libdispatch-resolver_iphoneos.order
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Copyright (c) 2013 Apple Inc. All rights reserved.
-#
-# @APPLE_APACHE_LICENSE_HEADER_START@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# @APPLE_APACHE_LICENSE_HEADER_END@
-#
-
diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases
index 65dfd04..d8a5113 100644
--- a/xcodeconfig/libdispatch.aliases
+++ b/xcodeconfig/libdispatch.aliases
@@ -19,8 +19,9 @@
 #
 
 __dispatch_data_destructor_vm_deallocate __dispatch_data_destructor_munmap
-__dispatch_source_type_memorypressure __dispatch_source_type_memorystatus
 __dispatch_queue_attrs __dispatch_queue_attr_concurrent
+__dispatch_source_type_memorypressure __dispatch_source_type_memorystatus
 _dispatch_assert_queue$V2 _dispatch_assert_queue
 _dispatch_assert_queue_not$V2 _dispatch_assert_queue_not
 _dispatch_queue_create_with_target$V2 _dispatch_queue_create_with_target
+_dispatch_source_set_timer __dispatch_source_set_runloop_timer_4CF
diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig
index d5b08d6..39d3e5f 100644
--- a/xcodeconfig/libdispatch.xcconfig
+++ b/xcodeconfig/libdispatch.xcconfig
@@ -40,6 +40,7 @@
 CLANG_LINK_OBJC_RUNTIME = NO
 GCC_C_LANGUAGE_STANDARD = gnu11
 CLANG_CXX_LANGUAGE_STANDARD = gnu++11
+ENABLE_STRICT_OBJC_MSGSEND = YES
 GCC_ENABLE_CPP_EXCEPTIONS = NO
 GCC_STRICT_ALIASING = YES
 GCC_SYMBOLS_PRIVATE_EXTERN = YES
@@ -49,20 +50,33 @@
 GCC_WARN_ABOUT_RETURN_TYPE = YES
 GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES
 GCC_WARN_ABOUT_MISSING_NEWLINE = YES
-GCC_WARN_UNUSED_VARIABLE = YES
-GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES
 GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES
+GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES
 GCC_WARN_SIGN_COMPARE = YES
+GCC_WARN_STRICT_SELECTOR_MATCH = YES
+GCC_WARN_UNDECLARED_SELECTOR = YES
 GCC_WARN_UNINITIALIZED_AUTOS = YES
+GCC_WARN_UNKNOWN_PRAGMAS = YES
+GCC_WARN_UNUSED_FUNCTION = YES
+GCC_WARN_UNUSED_LABEL = YES
+GCC_WARN_UNUSED_PARAMETER = YES
+GCC_WARN_UNUSED_VARIABLE = YES
+CLANG_WARN_ASSIGN_ENUM = YES
+CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES
+CLANG_WARN_DOCUMENTATION_COMMENTS = YES
+CLANG_WARN__DUPLICATE_METHOD_MATCH = YES
 CLANG_WARN_EMPTY_BODY = YES
 CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES
+CLANG_WARN_INFINITE_RECURSION = YES
+CLANG_WARN_OBJC_IMPLICIT_ATOMIC_PROPERTIES = YES
+CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS = YES
 CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES
-CLANG_WARN_DOCUMENTATION_COMMENTS = YES
 GCC_TREAT_WARNINGS_AS_ERRORS = YES
 GCC_OPTIMIZATION_LEVEL = s
 GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 $(DISPATCH_PREPROCESSOR_DEFINITIONS)
 GCC_NO_COMMON_BLOCKS = YES
-WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-packed -Wno-unknown-warning-option
+WARNING_CFLAGS = -Wall -Wextra -Warray-bounds-pointer-arithmetic -Watomic-properties -Wcomma -Wconditional-uninitialized -Wcovered-switch-default -Wdate-time -Wdeprecated -Wdouble-promotion -Wduplicate-enum -Wexpansion-to-defined -Wfloat-equal -Widiomatic-parentheses -Wignored-qualifiers -Wimplicit-fallthrough -Wnullable-to-nonnull-conversion -Wobjc-interface-ivars -Wover-aligned -Wpacked -Wpointer-arith -Wselector -Wstatic-in-inline -Wsuper-class-method-mismatch -Wswitch-enum -Wtautological-compare -Wunguarded-availability -Wunused -Wno-unknown-warning-option $(NO_WARNING_CFLAGS)
+NO_WARNING_CFLAGS = -Wno-pedantic -Wno-bad-function-cast -Wno-c++-compat -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-cast-align -Wno-cast-qual -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-format-nonliteral -Wno-missing-variable-declarations -Wno-old-style-cast -Wno-padded -Wno-reserved-id-macro -Wno-shift-sign-overflow -Wno-undef -Wno-unreachable-code-aggressive -Wno-unused-macros -Wno-used-but-marked-unused -Wno-vla $(NO_WARNING_CFLAGS_OS_ATOMIC)
 OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders
 OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions
 OTHER_CFLAGS_normal = -momit-leaf-frame-pointer
diff --git a/xcodeconfig/libdispatch_iphoneos.order b/xcodeconfig/libdispatch_iphoneos.order
deleted file mode 100644
index eea9845..0000000
--- a/xcodeconfig/libdispatch_iphoneos.order
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Copyright (c) 2013 Apple Inc. All rights reserved.
-#
-# @APPLE_APACHE_LICENSE_HEADER_START@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# @APPLE_APACHE_LICENSE_HEADER_END@
-#
-
diff --git a/xcodescripts/install-manpages.sh b/xcodescripts/install-manpages.sh
index d9e28af..db13163 100755
--- a/xcodescripts/install-manpages.sh
+++ b/xcodescripts/install-manpages.sh
@@ -64,7 +64,7 @@
 	ln -f dispatch_group_create.3 ${m}.3
 done
 
-for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume \
+for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume dispatch_activate \
 		dispatch_get_context dispatch_set_context dispatch_set_finalizer_f; do
 	ln -f dispatch_object.3 ${m}.3
 done