Merge pull request #334 from compnerd/reserved-asm

use reserved spelling for inline assembly
diff --git a/os/object_private.h b/os/object_private.h
index 215c3d1..3b46322 100644
--- a/os/object_private.h
+++ b/os/object_private.h
@@ -86,7 +86,7 @@
 #endif
 #define OS_OBJECT_OBJC_CLASS_DECL(name) \
 		extern void *OS_OBJECT_CLASS_SYMBOL(name) \
-				asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name)))
+				__asm__(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name)))
 #else
 #define OS_OBJECT_HAVE_OBJC1 0
 #define OS_OBJECT_HAVE_OBJC2 0
diff --git a/src/apply.c b/src/apply.c
index 6f44cf9..c682824 100644
--- a/src/apply.c
+++ b/src/apply.c
@@ -285,7 +285,7 @@
 		.dc_ctxt = ctxt,
 		.dc_data = dq,
 	};
-	dispatch_apply_t da = (typeof(da))_dispatch_continuation_alloc();
+	dispatch_apply_t da = (__typeof__(da))_dispatch_continuation_alloc();
 	da->da_index = 0;
 	da->da_todo = iterations;
 	da->da_iterations = iterations;
diff --git a/src/block.cpp b/src/block.cpp
index 6936ada..8f8113a 100644
--- a/src/block.cpp
+++ b/src/block.cpp
@@ -109,11 +109,7 @@
 // The compiler hides the name of the function it generates, and changes it if
 // we try to reference it directly, but the linker still sees it.
 extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *)
-#if defined(__linux__) || defined(__FreeBSD__)
-		asm("___dispatch_block_create_block_invoke");
-#else
-		asm("____dispatch_block_create_block_invoke");
-#endif
+		__asm__(OS_STRINGIFY(__USER_LABEL_PREFIX__) "___dispatch_block_create_block_invoke");
 void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE;
 }
 
diff --git a/src/event/event.c b/src/event/event.c
index 34abbf0..c94241e 100644
--- a/src/event/event.c
+++ b/src/event/event.c
@@ -49,7 +49,7 @@
 	du->du_can_be_wlh = dst->dst_per_trigger_qos;
 	du->du_ident = (uint32_t)handle;
 	du->du_filter = dst->dst_filter;
-	du->du_fflags = (typeof(du->du_fflags))mask;
+	du->du_fflags = (__typeof__(du->du_fflags))mask;
 	if (dst->dst_flags & EV_UDATA_SPECIFIC) {
 		du->du_is_direct = true;
 	}
diff --git a/src/event/event_config.h b/src/event/event_config.h
index 02508c0..871a3e0 100644
--- a/src/event/event_config.h
+++ b/src/event/event_config.h
@@ -60,7 +60,7 @@
 
 #if DISPATCH_TIMER_ASSERTIONS
 #define DISPATCH_TIMER_ASSERT(a, op, b, text) ({ \
-		typeof(a) _a = (a); \
+		__typeof__(a) _a = (a); \
 		if (unlikely(!(_a op (b)))) { \
 			DISPATCH_CLIENT_CRASH(_a, "Timer: " text); \
 		} \
diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c
index e7dafc7..29c20e1 100644
--- a/src/event/event_kevent.c
+++ b/src/event/event_kevent.c
@@ -32,7 +32,7 @@
 #define DISPATCH_KEVENT_MUXED_MARKER  1ul
 #define DISPATCH_MACH_AUDIT_TOKEN_PID (5)
 
-#define dispatch_kevent_udata_t  typeof(((dispatch_kevent_t)NULL)->udata)
+#define dispatch_kevent_udata_t  __typeof__(((dispatch_kevent_t)NULL)->udata)
 
 typedef struct dispatch_muxnote_s {
 	TAILQ_ENTRY(dispatch_muxnote_s) dmn_list;
@@ -738,9 +738,9 @@
 		.flags  = flags,
 		.udata  = (dispatch_kevent_udata_t)du,
 		.fflags = du->du_fflags | dst->dst_fflags,
-		.data   = (typeof(dk->data))dst->dst_data,
+		.data   = (__typeof__(dk->data))dst->dst_data,
 #if DISPATCH_USE_KEVENT_QOS
-		.qos    = (typeof(dk->qos))pp,
+		.qos    = (__typeof__(dk->qos))pp,
 #endif
 	};
 	(void)pp; // if DISPATCH_USE_KEVENT_QOS == 0
@@ -1778,7 +1778,7 @@
 		mach_port_mscount_t notify_sync)
 {
 	mach_port_t previous, port = (mach_port_t)dmn->dmn_kev.ident;
-	typeof(dmn->dmn_kev.data) prev = dmn->dmn_kev.data;
+	__typeof__(dmn->dmn_kev.data) prev = dmn->dmn_kev.data;
 	kern_return_t kr, krr = 0;
 
 	// Update notification registration state.
diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c
index 3bb790c..36a5b24 100644
--- a/src/firehose/firehose_buffer.c
+++ b/src/firehose/firehose_buffer.c
@@ -27,11 +27,11 @@
 
 #define DISPATCH_PURE_C 1
 #define _safe_cast_to_long(x) \
-		({ _Static_assert(sizeof(typeof(x)) <= sizeof(long), \
+		({ _Static_assert(sizeof(__typeof__(x)) <= sizeof(long), \
 				"__builtin_expect doesn't support types wider than long"); \
 				(long)(x); })
-#define fastpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), ~0l))
-#define slowpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), 0l))
+#define fastpath(x) ((__typeof__(x))__builtin_expect(_safe_cast_to_long(x), ~0l))
+#define slowpath(x) ((__typeof__(x))__builtin_expect(_safe_cast_to_long(x), 0l))
 #define os_likely(x) __builtin_expect(!!(x), 1)
 #define os_unlikely(x) __builtin_expect(!!(x), 0)
 #define likely(x)   __builtin_expect(!!(x), 1)
@@ -54,7 +54,7 @@
 #endif
 
 #define _dispatch_wait_until(c) ({ \
-		typeof(c) _c; \
+		__typeof__(c) _c; \
 		for (;;) { \
 			if (likely(_c = (c))) break; \
 			dispatch_hardware_pause(); \
diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h
index 3939ee2..51f8c68 100644
--- a/src/firehose/firehose_inline_internal.h
+++ b/src/firehose/firehose_inline_internal.h
@@ -27,7 +27,7 @@
 		})
 
 #define firehose_atomic_max2o(p, f, v, m)   ({ \
-		typeof((p)->f) _old; \
+		__typeof__((p)->f) _old; \
 		firehose_atomic_maxv2o(p, f, v, &_old, m); \
 	})
 
diff --git a/src/init.c b/src/init.c
index 26612c0..4ef733d 100644
--- a/src/init.c
+++ b/src/init.c
@@ -897,7 +897,7 @@
 _dispatch_temporary_resource_shortage(void)
 {
 	sleep(1);
-	asm("");  // prevent tailcall
+	__asm__ __volatile__("");  // prevent tailcall
 }
 
 void *
diff --git a/src/inline_internal.h b/src/inline_internal.h
index 4103c68..e857abe 100644
--- a/src/inline_internal.h
+++ b/src/inline_internal.h
@@ -1484,7 +1484,7 @@
 // type_t * {volatile,const,_Atomic,...} -> type_t *
 // type_t[] -> type_t *
 #define os_unqualified_pointer_type(expr) \
-		typeof(typeof(*(expr)) *)
+		__typeof__(__typeof__(*(expr)) *)
 
 #define os_mpsc_node_type(q, _ns)  \
 		os_unqualified_pointer_type((q)->_ns##_head)
@@ -1525,7 +1525,7 @@
 		_dispatch_wait_until(os_atomic_load2o(_n, _o_next, dependency))
 
 #define os_mpsc_pop_head(q, _ns, head, _o_next)  ({ \
-		typeof(q) _q = (q); \
+		__typeof__(q) _q = (q); \
 		os_mpsc_node_type(_q, _ns) _head = (head), _n; \
 		_n = os_atomic_load2o(_head, _o_next, dependency); \
 		os_atomic_store2o(_q, _ns##_head, _n, relaxed); \
@@ -1540,7 +1540,7 @@
 	})
 
 #define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next)  ({ \
-		typeof(q) _q = (q); \
+		__typeof__(q) _q = (q); \
 		os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \
 		if (unlikely(!_n && \
 				!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed))) { \
@@ -1551,7 +1551,7 @@
 	})
 
 #define os_mpsc_capture_snapshot(q, _ns, tail)  ({ \
-		typeof(q) _q = (q); \
+		__typeof__(q) _q = (q); \
 		os_mpsc_node_type(_q, _ns) _head = os_mpsc_get_head(q, _ns); \
 		os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \
 		/* 22708742: set tail to NULL with release, so that NULL write */ \
@@ -1568,7 +1568,7 @@
 		_n; })
 
 #define os_mpsc_prepend(q, _ns, head, tail, _o_next)  ({ \
-		typeof(q) _q = (q); \
+		__typeof__(q) _q = (q); \
 		os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \
 		os_atomic_store2o(_tail, _o_next, NULL, relaxed); \
 		if (unlikely(!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release))) { \
diff --git a/src/internal.h b/src/internal.h
index ef116a8..98e13a3 100644
--- a/src/internal.h
+++ b/src/internal.h
@@ -354,11 +354,11 @@
 /* I wish we had __builtin_expect_range() */
 #if __GNUC__
 #define _safe_cast_to_long(x) \
-		({ _Static_assert(sizeof(typeof(x)) <= sizeof(long), \
+		({ _Static_assert(sizeof(__typeof__(x)) <= sizeof(long), \
 				"__builtin_expect doesn't support types wider than long"); \
 				(long)(x); })
-#define fastpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), ~0l))
-#define slowpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), 0l))
+#define fastpath(x) ((__typeof__(x))__builtin_expect(_safe_cast_to_long(x), ~0l))
+#define slowpath(x) ((__typeof__(x))__builtin_expect(_safe_cast_to_long(x), 0l))
 #define likely(x) __builtin_expect(!!(x), 1)
 #define unlikely(x) __builtin_expect(!!(x), 0)
 #else
@@ -455,7 +455,7 @@
 		if (__builtin_constant_p(e)) { \
 			dispatch_static_assert(e); \
 		} else { \
-			typeof(e) _e = (e); /* always eval 'e' */ \
+			__typeof__(e) _e = (e); /* always eval 'e' */ \
 			if (unlikely(DISPATCH_DEBUG && !_e)) { \
 				_dispatch_abort(__LINE__, (long)_e); \
 			} \
@@ -479,7 +479,7 @@
 		if (__builtin_constant_p(e)) { \
 			dispatch_static_assert(e); \
 		} else { \
-			typeof(e) _e = (e); /* always eval 'e' */ \
+			__typeof__(e) _e = (e); /* always eval 'e' */ \
 			if (unlikely(DISPATCH_DEBUG && _e)) { \
 				_dispatch_abort(__LINE__, (long)_e); \
 			} \
@@ -502,7 +502,7 @@
  */
 #if __GNUC__
 #define dispatch_assume(e) ({ \
-		typeof(e) _e = (e); /* always eval 'e' */ \
+		__typeof__(e) _e = (e); /* always eval 'e' */ \
 		if (unlikely(!_e)) { \
 			if (__builtin_constant_p(e)) { \
 				dispatch_static_assert(e); \
@@ -527,7 +527,7 @@
  */
 #if __GNUC__
 #define dispatch_assume_zero(e) ({ \
-		typeof(e) _e = (e); /* always eval 'e' */ \
+		__typeof__(e) _e = (e); /* always eval 'e' */ \
 		if (unlikely(_e)) { \
 			if (__builtin_constant_p(e)) { \
 				dispatch_static_assert(e); \
@@ -554,7 +554,7 @@
 		if (__builtin_constant_p(e)) { \
 			dispatch_static_assert(e); \
 		} else { \
-			typeof(e) _e = (e); /* always eval 'e' */ \
+			__typeof__(e) _e = (e); /* always eval 'e' */ \
 			if (unlikely(DISPATCH_DEBUG && !_e)) { \
 				_dispatch_log("%s() 0x%lx: " msg, __func__, (long)_e, ##args); \
 				abort(); \
@@ -563,7 +563,7 @@
 	} while (0)
 #else
 #define dispatch_debug_assert(e, msg, args...) do { \
-	typeof(e) _e = (e); /* always eval 'e' */ \
+	__typeof__(e) _e = (e); /* always eval 'e' */ \
 	if (unlikely(DISPATCH_DEBUG && !_e)) { \
 		_dispatch_log("%s() 0x%lx: " msg, __FUNCTION__, _e, ##args); \
 		abort(); \
@@ -594,7 +594,7 @@
 		((dispatch_function_t)((struct Block_layout *)bb)->invoke)
 void *_dispatch_Block_copy(void *block);
 #if __GNUC__
-#define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x))
+#define _dispatch_Block_copy(x) ((__typeof__(x))_dispatch_Block_copy(x))
 #endif
 void _dispatch_call_block_and_release(void *block);
 #endif /* __BLOCKS__ */
diff --git a/src/introspection.c b/src/introspection.c
index 8692a8b..1332adf 100644
--- a/src/introspection.c
+++ b/src/introspection.c
@@ -439,7 +439,7 @@
 		(slowpath(_dispatch_introspection_hooks.h))
 
 #define DISPATCH_INTROSPECTION_HOOK_CALLOUT(h, ...) ({ \
-		typeof(_dispatch_introspection_hooks.h) _h; \
+		__typeof__(_dispatch_introspection_hooks.h) _h; \
 		_h = _dispatch_introspection_hooks.h; \
 		if (slowpath((void*)(_h) != DISPATCH_INTROSPECTION_NO_HOOK)) { \
 			_h(__VA_ARGS__); \
@@ -447,7 +447,7 @@
 
 #define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(h) \
 		DISPATCH_EXPORT void _dispatch_introspection_hook_##h(void) \
-		asm("_dispatch_introspection_hook_" #h); \
+		__asm__("_dispatch_introspection_hook_" #h); \
 		void _dispatch_introspection_hook_##h(void) {}
 
 #define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(h, ...)\
diff --git a/src/io.c b/src/io.c
index 80aa780..6ec45d8 100644
--- a/src/io.c
+++ b/src/io.c
@@ -119,7 +119,7 @@
 };
 
 #define _dispatch_io_Block_copy(x) \
-		((typeof(x))_dispatch_Block_copy((dispatch_block_t)(x)))
+		((__typeof__(x))_dispatch_Block_copy((dispatch_block_t)(x)))
 
 #pragma mark -
 #pragma mark dispatch_io_debug
@@ -211,7 +211,7 @@
 };
 
 #define _dispatch_iocntl_set_default(p, v) do { \
-		dispatch_io_defaults.p = (typeof(dispatch_io_defaults.p))(v); \
+		dispatch_io_defaults.p = (__typeof__(dispatch_io_defaults.p))(v); \
 	} while (0)
 
 void
@@ -1829,7 +1829,7 @@
 {
 	// On stream queue
 	dispatch_operation_t op, tmp;
-	typeof(*stream->operations) *operations;
+	__typeof__(*stream->operations) *operations;
 	operations = &stream->operations[DISPATCH_IO_RANDOM];
 	TAILQ_FOREACH_SAFE(op, operations, operation_list, tmp) {
 		if (!channel || op->channel == channel) {
diff --git a/src/object_internal.h b/src/object_internal.h
index 4504f65..94cb463 100644
--- a/src/object_internal.h
+++ b/src/object_internal.h
@@ -50,7 +50,7 @@
 #if USE_OBJC
 #define DISPATCH_OBJC_CLASS_DECL(name) \
 		extern void *DISPATCH_CLASS_SYMBOL(name) \
-				asm(DISPATCH_CLASS_RAW_SYMBOL_NAME(name))
+				__asm__(DISPATCH_CLASS_RAW_SYMBOL_NAME(name))
 #endif
 
 // define a new proper class
@@ -65,7 +65,7 @@
 		}; \
 		OS_OBJECT_EXTRA_VTABLE_DECL(name, name) \
 		extern const struct name##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) \
-				asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name)))
+				__asm__(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name)))
 
 #if OS_OBJECT_SWIFT3
 #define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \
@@ -101,7 +101,7 @@
 		struct name##_s; \
 		OS_OBJECT_EXTRA_VTABLE_DECL(name, super) \
 		extern const struct super##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) \
-				asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name)))
+				__asm__(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name)))
 
 #define DISPATCH_SUBCLASS_DECL(name, super) \
 		OS_OBJECT_SUBCLASS_DECL(dispatch_##name, super)
@@ -590,7 +590,7 @@
  *   reached -1.
  */
 #define _os_atomic_refcnt_perform2o(o, f, op, n, m)   ({ \
-		typeof(o) _o = (o); \
+		__typeof__(o) _o = (o); \
 		int _ref_cnt = _o->f; \
 		if (fastpath(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \
 			_ref_cnt = os_atomic_##op##2o(_o, f, n, m); \
diff --git a/src/queue.c b/src/queue.c
index 7725d0d..3d0f10c 100644
--- a/src/queue.c
+++ b/src/queue.c
@@ -2248,9 +2248,9 @@
 			sizeof(struct dispatch_root_queue_context_s) +
 			sizeof(struct dispatch_pthread_root_queue_context_s));
 	qc = (void*)dq + dqs;
-	dispatch_assert((uintptr_t)qc % _Alignof(typeof(*qc)) == 0);
+	dispatch_assert((uintptr_t)qc % _Alignof(__typeof__(*qc)) == 0);
 	pqc = (void*)qc + sizeof(struct dispatch_root_queue_context_s);
-	dispatch_assert((uintptr_t)pqc % _Alignof(typeof(*pqc)) == 0);
+	dispatch_assert((uintptr_t)pqc % _Alignof(__typeof__(*pqc)) == 0);
 	if (label) {
 		const char *tmp = _dispatch_strdup_if_mutable(label);
 		if (tmp != label) {
diff --git a/src/shims.h b/src/shims.h
index af3bef0..4a78bfc 100644
--- a/src/shims.h
+++ b/src/shims.h
@@ -247,7 +247,7 @@
 
 #if __has_feature(c_static_assert)
 #define __dispatch_is_array(x) \
-	_Static_assert(!__builtin_types_compatible_p(typeof((x)[0]) *, typeof(x)), \
+	_Static_assert(!__builtin_types_compatible_p(__typeof__((x)[0]) *, __typeof__(x)), \
 				#x " isn't an array")
 #define countof(x) \
 	({ __dispatch_is_array(x); sizeof(x) / sizeof((x)[0]); })
diff --git a/src/shims/atomic.h b/src/shims/atomic.h
index 5c9ce5a..60f2891 100644
--- a/src/shims/atomic.h
+++ b/src/shims/atomic.h
@@ -43,11 +43,11 @@
 #define os_atomic(type) type _Atomic
 
 #define _os_atomic_c11_atomic(p) \
-		((typeof(*(p)) _Atomic *)(p))
+		((__typeof__(*(p)) _Atomic *)(p))
 
 // This removes the _Atomic and volatile qualifiers on the type of *p
 #define _os_atomic_basetypeof(p) \
-		typeof(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed))
+		__typeof__(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed))
 
 #define os_atomic_load(p, m) \
 		atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m)
@@ -71,7 +71,7 @@
 #define _os_atomic_c11_op(p, v, m, o, op) \
 		({ _os_atomic_basetypeof(p) _v = (v), _r = \
 		atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \
-		memory_order_##m); (typeof(*(p)))(_r op _v); })
+		memory_order_##m); (__typeof__(*(p)))(_r op _v); })
 #define _os_atomic_c11_op_orig(p, v, m, o, op) \
 		atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), v, \
 		memory_order_##m)
@@ -156,7 +156,7 @@
 
 #define os_atomic_rmw_loop(p, ov, nv, m, ...)  ({ \
 		bool _result = false; \
-		typeof(p) _p = (p); \
+		__typeof__(p) _p = (p); \
 		ov = os_atomic_load(_p, relaxed); \
 		do { \
 			__VA_ARGS__; \
diff --git a/src/shims/atomic_sfb.h b/src/shims/atomic_sfb.h
index de074a4..b8e3260 100644
--- a/src/shims/atomic_sfb.h
+++ b/src/shims/atomic_sfb.h
@@ -50,7 +50,7 @@
 				 "mov	%[_all_ones], %[_bit]" "\n\t"
 				 "3: \n\t"
 				 : [_p] "=m" (*p), [_val] "=&r" (val), [_bit] "=&r" (bit)
-				 : [_all_ones] "i" ((typeof(bit))UINT_MAX) : "memory", "cc");
+				 : [_all_ones] "i" ((__typeof__(bit))UINT_MAX) : "memory", "cc");
 	} else {
 		__asm__ (
 				 "1: \n\t"
@@ -68,8 +68,8 @@
 				 "mov	%[_all_ones], %[_bit]" "\n\t"
 				 "3: \n\t"
 				 : [_p] "=m" (*p), [_val] "=&r" (val), [_bit] "=&r" (bit)
-				 : [_all_ones] "i" ((typeof(bit))UINT_MAX),
-				   [_max] "g" ((typeof(bit))max) : "memory", "cc");
+				 : [_all_ones] "i" ((__typeof__(bit))UINT_MAX),
+				   [_max] "g" ((__typeof__(bit))max) : "memory", "cc");
 	}
 	return (unsigned int)bit;
 }
diff --git a/src/shims/lock.c b/src/shims/lock.c
index 24af953..bc55450 100644
--- a/src/shims/lock.c
+++ b/src/shims/lock.c
@@ -144,8 +144,8 @@
 
 	do {
 		uint64_t nsec = _dispatch_timeout(timeout);
-		_timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
-		_timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
+		_timeout.tv_sec = (__typeof__(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
+		_timeout.tv_nsec = (__typeof__(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
 		kr = slowpath(semaphore_timedwait(*sema, _timeout));
 	} while (kr == KERN_ABORTED);
 
@@ -200,8 +200,8 @@
 
 	do {
 		uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout);
-		_timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
-		_timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
+		_timeout.tv_sec = (__typeof__(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
+		_timeout.tv_nsec = (__typeof__(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
 		ret = slowpath(sem_timedwait(sema, &_timeout));
 	} while (ret == -1 && errno == EINTR);
 
diff --git a/src/shims/yield.h b/src/shims/yield.h
index 67f8679..99864af 100644
--- a/src/shims/yield.h
+++ b/src/shims/yield.h
@@ -32,7 +32,7 @@
 
 #if DISPATCH_HW_CONFIG_UP
 #define _dispatch_wait_until(c) ({ \
-		typeof(c) _c; \
+		__typeof__(c) _c; \
 		int _spins = 0; \
 		for (;;) { \
 			if (likely(_c = (c))) break; \
@@ -46,7 +46,7 @@
 #define DISPATCH_WAIT_SPINS 1024
 #endif
 #define _dispatch_wait_until(c) ({ \
-		typeof(c) _c; \
+		__typeof__(c) _c; \
 		int _spins = -(DISPATCH_WAIT_SPINS); \
 		for (;;) { \
 			if (likely(_c = (c))) break; \
@@ -59,7 +59,7 @@
 		_c; })
 #else
 #define _dispatch_wait_until(c) ({ \
-		typeof(c) _c; \
+		__typeof__(c) _c; \
 		for (;;) { \
 			if (likely(_c = (c))) break; \
 			dispatch_hardware_pause(); \
diff --git a/src/source.c b/src/source.c
index 3f9caee..730e441 100644
--- a/src/source.c
+++ b/src/source.c
@@ -1220,7 +1220,7 @@
 	if (_dispatch_trace_timer_configure_enabled() ||
 			_dispatch_source_timer_telemetry_enabled()) {
 		_dispatch_source_timer_telemetry_slow(ds, clock, values);
-		asm(""); // prevent tailcall
+		__asm__ __volatile__ (""); // prevent tailcall
 	}
 }
 
diff --git a/tests/Foundation/bench.mm b/tests/Foundation/bench.mm
index c516366..635679f 100644
--- a/tests/Foundation/bench.mm
+++ b/tests/Foundation/bench.mm
@@ -107,7 +107,7 @@
 		d /= tbi.denom;
 	}
 
-	dd = (typeof(dd))d / (typeof(dd))cnt;
+	dd = (__typeof__(dd))d / (__typeof__(dd))cnt;
 
 	dd -= loop_cost;
 
@@ -135,7 +135,7 @@
 		d /= tbi.denom;
 	}
 
-	dd = (typeof(dd))d / (typeof(dd))cnt2;
+	dd = (__typeof__(dd))d / (__typeof__(dd))cnt2;
 
 	dd -= loop_cost;
 	dd *= cycles_per_nanosecond;
@@ -150,7 +150,7 @@
 {
 	uint32_t lo, hi;
 
-	asm volatile("rdtsc" : "=a" (lo), "=d" (hi));
+	__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
 
 	return (uint64_t)hi << 32 | lo;
 }
@@ -245,7 +245,7 @@
 
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		asm volatile("");
+		__asm__ __volatile__ ("");
 	}
 	print_result(s, "Empty loop:");
 
@@ -374,46 +374,46 @@
 
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		asm("nop");
+		__asm__ __volatile__ ("nop");
 	}
 	print_result(s, "raw 'nop':");
 
 #if defined(__i386__) || defined(__x86_64__)
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		asm("pause");
+		__asm__ __volatile__ ("pause");
 	}
 	print_result(s, "raw 'pause':");
 
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		asm("mfence");
+		__asm__ __volatile__ ("mfence");
 	}
 	print_result(s, "Atomic mfence:");
 
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		asm("lfence");
+		__asm__ __volatile__ ("lfence");
 	}
 	print_result(s, "Atomic lfence:");
 
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		asm("sfence");
+		__asm__ __volatile__ ("sfence");
 	}
 	print_result(s, "Atomic sfence:");
 
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
 		uint64_t sidt_rval;
-		asm("sidt %0" : "=m" (sidt_rval));
+		__asm__ __volatile__ ("sidt %0" : "=m" (sidt_rval));
 	}
 	print_result(s, "'sidt' instruction:");
 
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
 		long prev;
-		asm volatile("cmpxchg %1,%2"
+		__asm__ __volatile__ ("cmpxchg %1,%2"
 				: "=a" (prev) : "r" (0l), "m" (global), "0" (1l));
 	}
 	print_result(s, "'cmpxchg' without the 'lock' prefix:");
@@ -421,7 +421,7 @@
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
 		global = 0;
-		asm volatile("mfence" ::: "memory");
+		__asm__ __volatile__ ("mfence" ::: "memory");
 	}
 	print_result(s, "Store + mfence:");
 
@@ -429,14 +429,14 @@
 	for (i = cnt; i; i--) {
 		unsigned long _clbr;
 #ifdef __LP64__
-		asm volatile("cpuid" : "=a" (_clbr)
+		__asm__ __volatile__ ("cpuid" : "=a" (_clbr)
 				: "0" (0) : "rbx", "rcx", "rdx", "cc", "memory");
 #else
 #ifdef __llvm__
-		asm volatile("cpuid" : "=a" (_clbr) : "0" (0)
+		__asm__ __volatile__ ("cpuid" : "=a" (_clbr) : "0" (0)
 				: "ebx", "ecx", "edx", "cc", "memory" );
 #else // gcc does not allow inline i386 asm to clobber ebx
-		asm volatile("pushl %%ebx\n\tcpuid\n\tpopl %%ebx"
+		__asm__ __volatile__ ("pushl %%ebx\n\tcpuid\n\tpopl %%ebx"
 				: "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory" );
 #endif
 #endif
@@ -454,7 +454,7 @@
 #ifdef _ARM_ARCH_7
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		asm("yield");
+		__asm__ __volatile__ ("yield");
 	}
 	print_result(s, "raw 'yield':");
 #endif
@@ -462,9 +462,9 @@
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
 #ifdef _ARM_ARCH_7
-		asm volatile("dmb ish" : : : "memory");
+		__asm__ __volatile__ ("dmb ish" : : : "memory");
 #else
-		asm volatile("mcr	p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory");
+		__asm__ __volatile__ ("mcr	p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory");
 #endif
 	}
 	print_result(s, "'dmb ish' instruction:");
@@ -472,7 +472,7 @@
 #ifdef _ARM_ARCH_7
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		asm volatile("dmb ishst" : : : "memory");
+		__asm__ __volatile__ ("dmb ishst" : : : "memory");
 	}
 	print_result(s, "'dmb ishst' instruction:");
 #endif
@@ -480,9 +480,9 @@
 #ifdef _ARM_ARCH_7
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		asm volatile("str	%[_r], [%[_p], %[_o]]" :
+		__asm__ __volatile__ ("str	%[_r], [%[_p], %[_o]]" :
 				: [_p] "p" (&global), [_o] "M" (0), [_r] "r" (0) : "memory");
-		asm volatile("dmb ishst" : : : "memory");
+		__asm__ __volatile__ ("dmb ishst" : : : "memory");
 	}
 	print_result(s, "'str + dmb ishst' instructions:");
 #endif
@@ -493,10 +493,10 @@
 		uintptr_t prev;
 		uint32_t t;
 		do {
-		asm volatile("ldrex	%[_r], [%[_p], %[_o]]"
+		__asm__ __volatile__ ("ldrex	%[_r], [%[_p], %[_o]]"
 				: [_r] "=&r" (prev) \
 				: [_p] "p" (&global), [_o] "M" (0) : "memory");
-		asm volatile("strex	%[_t], %[_r], [%[_p], %[_o]]"
+		__asm__ __volatile__ ("strex	%[_t], %[_r], [%[_p], %[_o]]"
 				: [_t] "=&r" (t) \
 				: [_p] "p" (&global), [_o] "M" (0), [_r] "r" (0) : "memory");
 		} while (t);
@@ -507,9 +507,9 @@
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
 #ifdef _ARM_ARCH_7
-		asm volatile("dsb ish" : : : "memory");
+		__asm__ __volatile__ ("dsb ish" : : : "memory");
 #else
-		asm volatile("mcr	p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory");
+		__asm__ __volatile__ ("mcr	p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory");
 #endif
 	}
 	print_result(s, "'dsb ish' instruction:");
@@ -517,16 +517,16 @@
 #if BENCH_SLOW
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		register long _swtch_pri asm("ip") = -59;
-		asm volatile("svc	0x80" : : "r" (_swtch_pri) : "r0", "memory");
+		register long _swtch_pri __asm__("ip") = -59;
+		__asm__ __volatile__ ("svc	0x80" : : "r" (_swtch_pri) : "r0", "memory");
 	}
 	print_result(s, "swtch_pri syscall:");
 
 	s = mach_absolute_time();
 	for (i = cnt; i; i--) {
-		register long _r0 asm("r0") = 0, _r1 asm("r1") = 1, _r2 asm("r2") = 1;
-		register long _thread_switch asm("ip") = -61;
-		asm volatile("svc	0x80" : "+r" (_r0)
+		register long _r0 __asm__("r0") = 0, _r1 __asm__("r1") = 1, _r2 __asm__("r2") = 1;
+		register long _thread_switch __asm__("ip") = -61;
+		__asm__ __volatile__ ("svc	0x80" : "+r" (_r0)
 				: "r" (_r1), "r" (_r2), "r" (_thread_switch): "memory");
 	}
 	print_result(s, "thread_switch syscall:");
@@ -636,9 +636,9 @@
 		while (!__sync_bool_compare_and_swap(&global, 0, 1)) {
 			do {
 #if defined(__i386__) || defined(__x86_64__)
-				asm("pause");
+				__asm__ __volatile__ ("pause");
 #elif defined(__arm__) && defined _ARM_ARCH_7
-				asm("yield");
+				__asm__ __volatile__ ("yield");
 #endif
 			} while (global);
 		}
diff --git a/tests/dispatch_transform.c b/tests/dispatch_transform.c
index 85a4114..6205107 100644
--- a/tests/dispatch_transform.c
+++ b/tests/dispatch_transform.c
@@ -29,7 +29,7 @@
 #include <fcntl.h>
 
 #define printf_data(p, s) ({ \
-	typeof(s) _i; \
+	__typeof__(s) _i; \
 	for (_i=0; _i<s; _i++) { \
 		printf("%c", ((uint8_t *)p)[_i]); \
 	} \