WIP start optimizing mp

Change-Id: I142f2a38614c093220a66f2baa0e0d966eeb875d
diff --git a/kernel/include/kernel/mp.h b/kernel/include/kernel/mp.h
index b970b3e..c9afa12 100644
--- a/kernel/include/kernel/mp.h
+++ b/kernel/include/kernel/mp.h
@@ -18,7 +18,6 @@
 
 __BEGIN_CDECLS
 
-typedef void (*mp_ipi_task_func_t)(void* context);
 typedef void (*mp_sync_task_t)(void* context);
 
 // by default, mp_reschedule does not signal to cpus that are running realtime
@@ -73,14 +72,6 @@
 // called from arch code during interrupt irq
 void mp_mbx_interrupt_irq(void);
 
-// represents a pending task for some number of CPUs to execute
-struct mp_ipi_task {
-    struct list_node node;
-
-    mp_ipi_task_func_t func;
-    void* context;
-};
-
 // global mp state to track what the cpus are up to
 struct mp_state {
     // cpus that are currently online
diff --git a/kernel/kernel/mp.cpp b/kernel/kernel/mp.cpp
index 97a94f6..38323b7 100644
--- a/kernel/kernel/mp.cpp
+++ b/kernel/kernel/mp.cpp
@@ -14,6 +14,7 @@
 #include <err.h>
 #include <inttypes.h>
 #include <kernel/align.h>
+#include <kernel/auto_lock.h>
 #include <kernel/dpc.h>
 #include <kernel/event.h>
 #include <kernel/mp.h>
@@ -37,6 +38,15 @@
 struct mp_sync_context;
 static void mp_sync_task(void* context);
 
+// represents a pending task for some number of CPUs to execute
+typedef void (*mp_ipi_task_func_t)(void* context);
+struct mp_ipi_task {
+    struct list_node node;
+
+    mp_ipi_task_func_t func;
+    void* context;
+};
+
 void mp_init(void) {
     mutex_init(&mp.hotplug_lock);
     mp.ipi_task_lock = SPIN_LOCK_INITIAL_VALUE;
@@ -207,14 +217,15 @@
 
     // make sure the sync_tasks aren't in lists anymore, since they're
     // stack allocated
-    spin_lock_irqsave(&mp.ipi_task_lock, irqstate);
-    for (uint i = 0; i < num_cpus; ++i) {
-        // If a task is still around, it's because the CPU went offline.
-        if (list_in_list(&sync_tasks[i].node)) {
-            list_delete(&sync_tasks[i].node);
+    {
+        AutoSpinLock al(&mp.ipi_task_lock);
+        for (uint i = 0; i < num_cpus; ++i) {
+            // If a task is still around, it's because the CPU went offline.
+            if (list_in_list(&sync_tasks[i].node)) {
+                list_delete(&sync_tasks[i].node);
+            }
         }
     }
-    spin_unlock_irqrestore(&mp.ipi_task_lock, irqstate);
 }
 
 static void mp_unplug_trampoline(void) TA_REQ(thread_lock) __NO_RETURN;