[kernel][arm64][mp][timer] Make arch_spinloop_pause a no-op on arm64

This change removes arch_spinloop_signal and changes arm64's
arch_spinloop_pause to issue YIELD rather than WFE.

The purpose of this change is to eliminate potential bugs that may
arise from WFE with no corresponding SEV.

In two places (timer.cpp and mp.cpp) arch_spinloop_pause, in
conjunction with arch_spinloop_signal, is used to create a kind of
condition variable, suspending execution until another CPU calls
arch_spinloop_signal.

Elsewhere (like some UART drivers), it is simply used as a hinting
no-op in busy loops.

On x64, arch_spinloop_pause is PAUSE and arch_spinloop_signal is
empty.

On arm64, arch_spinloop_pause is WFE (Wait For Event) and
arch_spinloop_signal is SEV (Send Event). WFE suspends execution until
an event is signaled (via SEV, global monitor transition, etc.). This
means that any use of WFE without a corresponding SEV (or other
mechanism like Load-Exclusive) could potentially suspend the CPU for
an indefinite period of time.

Test: On VIM2 and Eve, ran the following:
- k ut sync_ipi_tests
- k ut timer
- k timer_stress 60

ZX-2562 #comment followup

Change-Id: If2b8facef4845865d5bfe7a4d0089cd5aef791a6
diff --git a/kernel/arch/arm64/include/arch/arch_ops.h b/kernel/arch/arm64/include/arch/arch_ops.h
index 163e393..2f7e8e6 100644
--- a/kernel/arch/arm64/include/arch/arch_ops.h
+++ b/kernel/arch/arm64/include/arch/arch_ops.h
@@ -22,12 +22,7 @@
 #define ENABLE_CYCLE_COUNTER 1
 
 static inline void arch_spinloop_pause(void) {
-    __asm__ volatile("wfe" ::
-                         : "memory");
-}
-
-static inline void arch_spinloop_signal(void) {
-    __asm__ volatile("sev" ::
+    __asm__ volatile("yield" ::
                          : "memory");
 }
 
diff --git a/kernel/arch/x86/include/arch/arch_ops.h b/kernel/arch/x86/include/arch/arch_ops.h
index b9e9cb1..1f8fa22 100644
--- a/kernel/arch/x86/include/arch/arch_ops.h
+++ b/kernel/arch/x86/include/arch/arch_ops.h
@@ -54,10 +54,6 @@
     __asm__ volatile("pause" ::: "memory");
 }
 
-static inline void arch_spinloop_signal(void)
-{
-}
-
 #define mb()        __asm__ volatile ("mfence" ::: "memory")
 #define smp_mb()    mb()
 
diff --git a/kernel/include/arch/ops.h b/kernel/include/arch/ops.h
index 2e99abf..7520389 100644
--- a/kernel/include/arch/ops.h
+++ b/kernel/include/arch/ops.h
@@ -55,9 +55,6 @@
 
 /* function to call in spinloops to idle */
 static void arch_spinloop_pause(void);
-/* function to call when an event happens that may trigger the exit from
- * a spinloop */
-static void arch_spinloop_signal(void);
 
 /* arch optimized version of a page zero routine against a page aligned buffer */
 void arch_zero_page(void *);
diff --git a/kernel/kernel/mp.cpp b/kernel/kernel/mp.cpp
index eb7352b..d2cfb04 100644
--- a/kernel/kernel/mp.cpp
+++ b/kernel/kernel/mp.cpp
@@ -96,7 +96,6 @@
     // use seq-cst atomic to ensure this update is not seen before the
     // side-effects of context->task
     atomic_and((int*)&context->outstanding_cpus, ~cpu_num_to_mask(arch_curr_cpu_num()));
-    arch_spinloop_signal();
 }
 
 /* @brief Execute a task on the specified CPUs, and block on the calling
diff --git a/kernel/kernel/timer.cpp b/kernel/kernel/timer.cpp
index af8791f..4b55183 100644
--- a/kernel/kernel/timer.cpp
+++ b/kernel/kernel/timer.cpp
@@ -285,9 +285,6 @@
     timer->cancel = true;
     mb();
 
-    // wake up any spinners on the cancel signal
-    arch_spinloop_signal();
-
     // see if we're trying to cancel the timer we're currently in the middle of handling
     if (unlikely(timer->active_cpu == (int)cpu)) {
         // zero it out
@@ -406,9 +403,6 @@
         // mark it not busy
         timer->active_cpu = -1;
         mb();
-
-        // make sure any spinners wake up
-        arch_spinloop_signal();
     }
 
     // get the deadline of the event at the head of the queue (if any)