[wlan][iwlwifi] Re-format with 'fx format-code --all'

BUG=None
TEST=Compiled and run on Eve.

Change-Id: Iefd970ec3c35dfe5f8fe092963dffe4abc729cbf
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/22000.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/22000.c
index 2e909f5..6a8c28e 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/22000.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/22000.c
@@ -34,6 +34,7 @@
 
 #include <linux/module.h>
 #include <linux/stringify.h>
+
 #include "iwl-config.h"
 
 /* Highest firmware API version supported */
@@ -69,11 +70,11 @@
 #define IWL_22000_HR_MODULE_FIRMWARE(api) IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_JF_MODULE_FIRMWARE(api) IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
-    IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
+  IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(api) \
-    IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode"
+  IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
-    IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
+  IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
@@ -111,27 +112,27 @@
     .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
-#define IWL_DEVICE_22000_COMMON                                                          \
-    .ucode_api_max = IWL_22000_UCODE_API_MAX, .ucode_api_min = IWL_22000_UCODE_API_MIN,  \
-    .led_mode = IWL_LED_RF_STATE, .nvm_hw_section_num = 10, .non_shared_ant = ANT_B,     \
-    .dccm_offset = IWL_22000_DCCM_OFFSET, .dccm_len = IWL_22000_DCCM_LEN,                \
-    .dccm2_offset = IWL_22000_DCCM2_OFFSET, .dccm2_len = IWL_22000_DCCM2_LEN,            \
-    .smem_offset = IWL_22000_SMEM_OFFSET, .smem_len = IWL_22000_SMEM_LEN,                \
-    .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, .apmg_not_supported = true,    \
-    .mq_rx_supported = true, .vht_mu_mimo_supported = true, .mac_addr_from_csr = true,   \
-    .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION,                 \
-    .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .use_tfh = true, .rf_id = true, \
-    .gen2 = true, .nvm_type = IWL_NVM_EXT, .dbgc_supported = true,                       \
-    .min_umac_error_event_table = 0x400000, .d3_debug_data_base_addr = 0x401000,         \
-    .d3_debug_data_length = 60 * 1024
+#define IWL_DEVICE_22000_COMMON                                                        \
+  .ucode_api_max = IWL_22000_UCODE_API_MAX, .ucode_api_min = IWL_22000_UCODE_API_MIN,  \
+  .led_mode = IWL_LED_RF_STATE, .nvm_hw_section_num = 10, .non_shared_ant = ANT_B,     \
+  .dccm_offset = IWL_22000_DCCM_OFFSET, .dccm_len = IWL_22000_DCCM_LEN,                \
+  .dccm2_offset = IWL_22000_DCCM2_OFFSET, .dccm2_len = IWL_22000_DCCM2_LEN,            \
+  .smem_offset = IWL_22000_SMEM_OFFSET, .smem_len = IWL_22000_SMEM_LEN,                \
+  .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, .apmg_not_supported = true,    \
+  .mq_rx_supported = true, .vht_mu_mimo_supported = true, .mac_addr_from_csr = true,   \
+  .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION,                 \
+  .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .use_tfh = true, .rf_id = true, \
+  .gen2 = true, .nvm_type = IWL_NVM_EXT, .dbgc_supported = true,                       \
+  .min_umac_error_event_table = 0x400000, .d3_debug_data_base_addr = 0x401000,         \
+  .d3_debug_data_length = 60 * 1024
 
-#define IWL_DEVICE_22500                                               \
-    IWL_DEVICE_22000_COMMON, .device_family = IWL_DEVICE_FAMILY_22000, \
-                             .base_params = &iwl_22000_base_params, .csr = &iwl_csr_v1
+#define IWL_DEVICE_22500                                             \
+  IWL_DEVICE_22000_COMMON, .device_family = IWL_DEVICE_FAMILY_22000, \
+                           .base_params = &iwl_22000_base_params, .csr = &iwl_csr_v1
 
-#define IWL_DEVICE_22560                                               \
-    IWL_DEVICE_22000_COMMON, .device_family = IWL_DEVICE_FAMILY_22560, \
-                             .base_params = &iwl_22560_base_params, .csr = &iwl_csr_v2
+#define IWL_DEVICE_22560                                             \
+  IWL_DEVICE_22000_COMMON, .device_family = IWL_DEVICE_FAMILY_22560, \
+                           .base_params = &iwl_22560_base_params, .csr = &iwl_csr_v2
 
 const struct iwl_cfg iwl22000_2ac_cfg_hr = {
     .name = "Intel(R) Dual Band Wireless AC 22000",
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/7000.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/7000.c
index c0d2dbd..6434d40 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/7000.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/7000.c
@@ -118,27 +118,27 @@
     .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
-#define IWL_DEVICE_7000_COMMON                                                               \
-    .device_family = IWL_DEVICE_FAMILY_7000, .base_params = &iwl7000_base_params,            \
-    .led_mode = IWL_LED_RF_STATE, .nvm_hw_section_num = 0, .non_shared_ant = ANT_A,          \
-    .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .dccm_offset = IWL7000_DCCM_OFFSET, \
-    .csr = &iwl_csr_v1
+#define IWL_DEVICE_7000_COMMON                                                             \
+  .device_family = IWL_DEVICE_FAMILY_7000, .base_params = &iwl7000_base_params,            \
+  .led_mode = IWL_LED_RF_STATE, .nvm_hw_section_num = 0, .non_shared_ant = ANT_A,          \
+  .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .dccm_offset = IWL7000_DCCM_OFFSET, \
+  .csr = &iwl_csr_v1
 
-#define IWL_DEVICE_7000                                             \
-    IWL_DEVICE_7000_COMMON, .ucode_api_max = IWL7260_UCODE_API_MAX, \
-                            .ucode_api_min = IWL7260_UCODE_API_MIN
+#define IWL_DEVICE_7000                                           \
+  IWL_DEVICE_7000_COMMON, .ucode_api_max = IWL7260_UCODE_API_MAX, \
+                          .ucode_api_min = IWL7260_UCODE_API_MIN
 
-#define IWL_DEVICE_7005                                             \
-    IWL_DEVICE_7000_COMMON, .ucode_api_max = IWL7265_UCODE_API_MAX, \
-                            .ucode_api_min = IWL7265_UCODE_API_MIN
+#define IWL_DEVICE_7005                                           \
+  IWL_DEVICE_7000_COMMON, .ucode_api_max = IWL7265_UCODE_API_MAX, \
+                          .ucode_api_min = IWL7265_UCODE_API_MIN
 
-#define IWL_DEVICE_3008                                             \
-    IWL_DEVICE_7000_COMMON, .ucode_api_max = IWL3168_UCODE_API_MAX, \
-                            .ucode_api_min = IWL3168_UCODE_API_MIN
+#define IWL_DEVICE_3008                                           \
+  IWL_DEVICE_7000_COMMON, .ucode_api_max = IWL3168_UCODE_API_MAX, \
+                          .ucode_api_min = IWL3168_UCODE_API_MIN
 
-#define IWL_DEVICE_7005D                                             \
-    IWL_DEVICE_7000_COMMON, .ucode_api_max = IWL7265D_UCODE_API_MAX, \
-                            .ucode_api_min = IWL7265D_UCODE_API_MIN
+#define IWL_DEVICE_7005D                                           \
+  IWL_DEVICE_7000_COMMON, .ucode_api_max = IWL7265D_UCODE_API_MAX, \
+                          .ucode_api_min = IWL7265D_UCODE_API_MIN
 
 const struct iwl_cfg iwl7260_2ac_cfg = {
     .name = "Intel(R) Dual Band Wireless AC 7260",
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/8000.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/8000.c
index baecf2b..06a93a9 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/8000.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/8000.c
@@ -35,6 +35,7 @@
 
 #include <linux/module.h>
 #include <linux/stringify.h>
+
 #include "iwl-config.h"
 
 /* Highest firmware API version supported */
@@ -104,27 +105,27 @@
     .support_tx_backoff = true,
 };
 
-#define IWL_DEVICE_8000_COMMON                                                                 \
-    .device_family = IWL_DEVICE_FAMILY_8000, .base_params = &iwl8000_base_params,              \
-    .led_mode = IWL_LED_RF_STATE, .nvm_hw_section_num = 10, .features = NETIF_F_RXCSUM,        \
-    .non_shared_ant = ANT_A, .dccm_offset = IWL8260_DCCM_OFFSET, .dccm_len = IWL8260_DCCM_LEN, \
-    .dccm2_offset = IWL8260_DCCM2_OFFSET, .dccm2_len = IWL8260_DCCM2_LEN,                      \
-    .smem_offset = IWL8260_SMEM_OFFSET, .smem_len = IWL8260_SMEM_LEN,                          \
-    .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,                                  \
-    .thermal_params = &iwl8000_tt_params, .apmg_not_supported = true, .nvm_type = IWL_NVM_EXT, \
-    .dbgc_supported = true, .min_umac_error_event_table = 0x800000, .csr = &iwl_csr_v1
+#define IWL_DEVICE_8000_COMMON                                                                    \
+  .device_family = IWL_DEVICE_FAMILY_8000, .base_params = &iwl8000_base_params,                   \
+  .led_mode = IWL_LED_RF_STATE, .nvm_hw_section_num = 10, .features = NETIF_F_RXCSUM,             \
+  .non_shared_ant = ANT_A, .dccm_offset = IWL8260_DCCM_OFFSET, .dccm_len = IWL8260_DCCM_LEN,      \
+  .dccm2_offset = IWL8260_DCCM2_OFFSET, .dccm2_len = IWL8260_DCCM2_LEN,                           \
+  .smem_offset = IWL8260_SMEM_OFFSET, .smem_len = IWL8260_SMEM_LEN,                               \
+  .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, .thermal_params = &iwl8000_tt_params, \
+  .apmg_not_supported = true, .nvm_type = IWL_NVM_EXT, .dbgc_supported = true,                    \
+  .min_umac_error_event_table = 0x800000, .csr = &iwl_csr_v1
 
-#define IWL_DEVICE_8000                                             \
-    IWL_DEVICE_8000_COMMON, .ucode_api_max = IWL8000_UCODE_API_MAX, \
-                            .ucode_api_min = IWL8000_UCODE_API_MIN
+#define IWL_DEVICE_8000                                           \
+  IWL_DEVICE_8000_COMMON, .ucode_api_max = IWL8000_UCODE_API_MAX, \
+                          .ucode_api_min = IWL8000_UCODE_API_MIN
 
-#define IWL_DEVICE_8260                                             \
-    IWL_DEVICE_8000_COMMON, .ucode_api_max = IWL8000_UCODE_API_MAX, \
-                            .ucode_api_min = IWL8000_UCODE_API_MIN
+#define IWL_DEVICE_8260                                           \
+  IWL_DEVICE_8000_COMMON, .ucode_api_max = IWL8000_UCODE_API_MAX, \
+                          .ucode_api_min = IWL8000_UCODE_API_MIN
 
-#define IWL_DEVICE_8265                                             \
-    IWL_DEVICE_8000_COMMON, .ucode_api_max = IWL8265_UCODE_API_MAX, \
-                            .ucode_api_min = IWL8265_UCODE_API_MIN
+#define IWL_DEVICE_8265                                           \
+  IWL_DEVICE_8000_COMMON, .ucode_api_max = IWL8265_UCODE_API_MAX, \
+                          .ucode_api_min = IWL8265_UCODE_API_MIN
 
 const struct iwl_cfg iwl8260_2n_cfg = {
     .name = "Intel(R) Dual Band Wireless N 8260",
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/9000.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/9000.c
index 9665e07..25ae057 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/9000.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/cfg/9000.c
@@ -34,6 +34,7 @@
 
 #include <linux/module.h>
 #include <linux/stringify.h>
+
 #include "fw/file.h"
 #include "iwl-config.h"
 
@@ -99,20 +100,19 @@
     .support_tx_backoff = true,
 };
 
-#define IWL_DEVICE_9000                                                                         \
-    .ucode_api_max = IWL9000_UCODE_API_MAX, .ucode_api_min = IWL9000_UCODE_API_MIN,             \
-    .device_family = IWL_DEVICE_FAMILY_9000, .base_params = &iwl9000_base_params,               \
-    .led_mode = IWL_LED_RF_STATE, .nvm_hw_section_num = 10, .non_shared_ant = ANT_B,            \
-    .dccm_offset = IWL9000_DCCM_OFFSET, .dccm_len = IWL9000_DCCM_LEN,                           \
-    .dccm2_offset = IWL9000_DCCM2_OFFSET, .dccm2_len = IWL9000_DCCM2_LEN,                       \
-    .smem_offset = IWL9000_SMEM_OFFSET, .smem_len = IWL9000_SMEM_LEN,                           \
-    .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, .thermal_params = &iwl9000_tt_params, \
-    .apmg_not_supported = true, .mq_rx_supported = true, .vht_mu_mimo_supported = true,         \
-    .mac_addr_from_csr = true, .rf_id = true, .nvm_type = IWL_NVM_EXT, .dbgc_supported = true,  \
-    .min_umac_error_event_table = 0x800000, .csr = &iwl_csr_v1,                                 \
-    .d3_debug_data_base_addr = 0x401000, .d3_debug_data_length = 92 * 1024,                     \
-    .ht_params = &iwl9000_ht_params, .nvm_ver = IWL9000_NVM_VERSION,                            \
-    .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+#define IWL_DEVICE_9000                                                                            \
+  .ucode_api_max = IWL9000_UCODE_API_MAX, .ucode_api_min = IWL9000_UCODE_API_MIN,                  \
+  .device_family = IWL_DEVICE_FAMILY_9000, .base_params = &iwl9000_base_params,                    \
+  .led_mode = IWL_LED_RF_STATE, .nvm_hw_section_num = 10, .non_shared_ant = ANT_B,                 \
+  .dccm_offset = IWL9000_DCCM_OFFSET, .dccm_len = IWL9000_DCCM_LEN,                                \
+  .dccm2_offset = IWL9000_DCCM2_OFFSET, .dccm2_len = IWL9000_DCCM2_LEN,                            \
+  .smem_offset = IWL9000_SMEM_OFFSET, .smem_len = IWL9000_SMEM_LEN,                                \
+  .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, .thermal_params = &iwl9000_tt_params,      \
+  .apmg_not_supported = true, .mq_rx_supported = true, .vht_mu_mimo_supported = true,              \
+  .mac_addr_from_csr = true, .rf_id = true, .nvm_type = IWL_NVM_EXT, .dbgc_supported = true,       \
+  .min_umac_error_event_table = 0x800000, .csr = &iwl_csr_v1, .d3_debug_data_base_addr = 0x401000, \
+  .d3_debug_data_length = 92 * 1024, .ht_params = &iwl9000_ht_params,                              \
+  .nvm_ver = IWL9000_NVM_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl9160_2ac_cfg = {
     .name = "Intel(R) Dual Band Wireless AC 9160",
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h
index aa82abd..6ec9b81 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h
@@ -85,8 +85,7 @@
 #define WARN_ON_ONCE(x) (false)
 #define BUILD_BUG_ON(x) (false)
 
-#define offsetofend(type, member) \
-  (offsetof(type, member) + sizeof(((type*)NULL)->member))
+#define offsetofend(type, member) (offsetof(type, member) + sizeof(((type*)NULL)->member))
 
 // NEEDS_PORTING: need to be generic
 // clang-format off
@@ -220,8 +219,6 @@
   return !ptr || (((unsigned long)ptr) >= (unsigned long)-4095);
 }
 
-static inline void* page_address(const struct page* page) {
-  return page->virtual_addr;
-}
+static inline void* page_address(const struct page* page) { return page->virtual_addr; }
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FUCHSIA_PORTING_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/acpi.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/acpi.c
index 1e62e15..a52d3c9 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/acpi.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/acpi.c
@@ -32,139 +32,144 @@
  *****************************************************************************/
 
 #include "acpi.h"
+
 #include "iwl-debug.h"
 #include "iwl-drv.h"
 
 void* iwl_acpi_get_object(struct device* dev, acpi_string method) {
-    acpi_handle root_handle;
-    acpi_handle handle;
-    struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
-    acpi_status status;
+  acpi_handle root_handle;
+  acpi_handle handle;
+  struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+  acpi_status status;
 
-    root_handle = ACPI_HANDLE(dev);
-    if (!root_handle) {
-        IWL_DEBUG_DEV_RADIO(dev, "Could not retrieve root port ACPI handle\n");
-        return ERR_PTR(-ENOENT);
-    }
+  root_handle = ACPI_HANDLE(dev);
+  if (!root_handle) {
+    IWL_DEBUG_DEV_RADIO(dev, "Could not retrieve root port ACPI handle\n");
+    return ERR_PTR(-ENOENT);
+  }
 
-    /* Get the method's handle */
-    status = acpi_get_handle(root_handle, method, &handle);
-    if (ACPI_FAILURE(status)) {
-        IWL_DEBUG_DEV_RADIO(dev, "%s method not found\n", method);
-        return ERR_PTR(-ENOENT);
-    }
+  /* Get the method's handle */
+  status = acpi_get_handle(root_handle, method, &handle);
+  if (ACPI_FAILURE(status)) {
+    IWL_DEBUG_DEV_RADIO(dev, "%s method not found\n", method);
+    return ERR_PTR(-ENOENT);
+  }
 
-    /* Call the method with no arguments */
-    status = acpi_evaluate_object(handle, NULL, NULL, &buf);
-    if (ACPI_FAILURE(status)) {
-        IWL_DEBUG_DEV_RADIO(dev, "%s invocation failed (0x%x)\n", method, status);
-        return ERR_PTR(-ENOENT);
-    }
+  /* Call the method with no arguments */
+  status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+  if (ACPI_FAILURE(status)) {
+    IWL_DEBUG_DEV_RADIO(dev, "%s invocation failed (0x%x)\n", method, status);
+    return ERR_PTR(-ENOENT);
+  }
 
-    return buf.pointer;
+  return buf.pointer;
 }
 IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
 
 union acpi_object* iwl_acpi_get_wifi_pkg(struct device* dev, union acpi_object* data,
                                          int data_size) {
-    int i;
-    union acpi_object* wifi_pkg;
+  int i;
+  union acpi_object* wifi_pkg;
 
-    /*
-     * We need at least one entry in the wifi package that
-     * describes the domain, and one more entry, otherwise there's
-     * no point in reading it.
-     */
-    if (WARN_ON_ONCE(data_size < 2)) { return ERR_PTR(-EINVAL); }
+  /*
+   * We need at least one entry in the wifi package that
+   * describes the domain, and one more entry, otherwise there's
+   * no point in reading it.
+   */
+  if (WARN_ON_ONCE(data_size < 2)) {
+    return ERR_PTR(-EINVAL);
+  }
 
-    /*
-     * We need at least two packages, one for the revision and one
-     * for the data itself.  Also check that the revision is valid
-     * (i.e. it is an integer set to 0).
-     */
-    if (data->type != ACPI_TYPE_PACKAGE || data->package.count < 2 ||
-        data->package.elements[0].type != ACPI_TYPE_INTEGER ||
-        data->package.elements[0].integer.value != 0) {
-        IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n");
-        return ERR_PTR(-EINVAL);
+  /*
+   * We need at least two packages, one for the revision and one
+   * for the data itself.  Also check that the revision is valid
+   * (i.e. it is an integer set to 0).
+   */
+  if (data->type != ACPI_TYPE_PACKAGE || data->package.count < 2 ||
+      data->package.elements[0].type != ACPI_TYPE_INTEGER ||
+      data->package.elements[0].integer.value != 0) {
+    IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n");
+    return ERR_PTR(-EINVAL);
+  }
+
+  /* loop through all the packages to find the one for WiFi */
+  for (i = 1; i < data->package.count; i++) {
+    union acpi_object* domain;
+
+    wifi_pkg = &data->package.elements[i];
+
+    /* skip entries that are not a package with the right size */
+    if (wifi_pkg->type != ACPI_TYPE_PACKAGE || wifi_pkg->package.count != data_size) {
+      continue;
     }
 
-    /* loop through all the packages to find the one for WiFi */
-    for (i = 1; i < data->package.count; i++) {
-        union acpi_object* domain;
-
-        wifi_pkg = &data->package.elements[i];
-
-        /* skip entries that are not a package with the right size */
-        if (wifi_pkg->type != ACPI_TYPE_PACKAGE || wifi_pkg->package.count != data_size) {
-            continue;
-        }
-
-        domain = &wifi_pkg->package.elements[0];
-        if (domain->type == ACPI_TYPE_INTEGER && domain->integer.value == ACPI_WIFI_DOMAIN) {
-            goto found;
-        }
+    domain = &wifi_pkg->package.elements[0];
+    if (domain->type == ACPI_TYPE_INTEGER && domain->integer.value == ACPI_WIFI_DOMAIN) {
+      goto found;
     }
+  }
 
-    return ERR_PTR(-ENOENT);
+  return ERR_PTR(-ENOENT);
 
 found:
-    return wifi_pkg;
+  return wifi_pkg;
 }
 IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg);
 
 int iwl_acpi_get_mcc(struct device* dev, char* mcc) {
-    union acpi_object *wifi_pkg, *data;
-    uint32_t mcc_val;
-    int ret;
+  union acpi_object *wifi_pkg, *data;
+  uint32_t mcc_val;
+  int ret;
 
-    data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
-    if (IS_ERR(data)) { return PTR_ERR(data); }
+  data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
+  if (IS_ERR(data)) {
+    return PTR_ERR(data);
+  }
 
-    wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE);
-    if (IS_ERR(wifi_pkg)) {
-        ret = PTR_ERR(wifi_pkg);
-        goto out_free;
-    }
+  wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE);
+  if (IS_ERR(wifi_pkg)) {
+    ret = PTR_ERR(wifi_pkg);
+    goto out_free;
+  }
 
-    if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
-        ret = -EINVAL;
-        goto out_free;
-    }
+  if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+    ret = -EINVAL;
+    goto out_free;
+  }
 
-    mcc_val = wifi_pkg->package.elements[1].integer.value;
+  mcc_val = wifi_pkg->package.elements[1].integer.value;
 
-    mcc[0] = (mcc_val >> 8) & 0xff;
-    mcc[1] = mcc_val & 0xff;
-    mcc[2] = '\0';
+  mcc[0] = (mcc_val >> 8) & 0xff;
+  mcc[1] = mcc_val & 0xff;
+  mcc[2] = '\0';
 
-    ret = 0;
+  ret = 0;
 out_free:
-    kfree(data);
-    return ret;
+  kfree(data);
+  return ret;
 }
 IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc);
 
 uint64_t iwl_acpi_get_pwr_limit(struct device* dev) {
-    union acpi_object *data, *wifi_pkg;
-    uint64_t dflt_pwr_limit;
+  union acpi_object *data, *wifi_pkg;
+  uint64_t dflt_pwr_limit;
 
-    data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
-    if (IS_ERR(data)) {
-        dflt_pwr_limit = 0;
-        goto out;
-    }
+  data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
+  if (IS_ERR(data)) {
+    dflt_pwr_limit = 0;
+    goto out;
+  }
 
-    wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_SPLC_WIFI_DATA_SIZE);
-    if (IS_ERR(wifi_pkg) || wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
-        dflt_pwr_limit = 0;
-        goto out_free;
-    }
+  wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_SPLC_WIFI_DATA_SIZE);
+  if (IS_ERR(wifi_pkg) || wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
+    dflt_pwr_limit = 0;
+    goto out_free;
+  }
 
-    dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+  dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
 out_free:
-    kfree(data);
+  kfree(data);
 out:
-    return dflt_pwr_limit;
+  return dflt_pwr_limit;
 }
 IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/acpi.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/acpi.h
index 78a7ae0..a0e40e8 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/acpi.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/acpi.h
@@ -84,23 +84,21 @@
 #else /* CONFIG_ACPI */
 
 static inline void* iwl_acpi_get_object(struct device* dev, acpi_string method) {
-    // NEEDS_PORTING return ERR_PTR(-ENOENT);
-    return NULL;
+  // NEEDS_PORTING return ERR_PTR(-ENOENT);
+  return NULL;
 }
 
 static inline union acpi_object* iwl_acpi_get_wifi_pkg(struct device* dev, union acpi_object* data,
                                                        int data_size) {
-    // NEEDS_PORTING return ERR_PTR(-ENOENT);
-    return NULL;
+  // NEEDS_PORTING return ERR_PTR(-ENOENT);
+  return NULL;
 }
 
 static inline zx_status_t iwl_acpi_get_mcc(struct device* dev, char* mcc) {
-    return ZX_ERR_NOT_FOUND;
+  return ZX_ERR_NOT_FOUND;
 }
 
-static inline uint64_t iwl_acpi_get_pwr_limit(struct device* dev) {
-    return 0;
-}
+static inline uint64_t iwl_acpi_get_pwr_limit(struct device* dev) { return 0; }
 
 #endif  /* CONFIG_ACPI */
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_ACPI_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/alive.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/alive.h
index f0ba6a2..d54aef9 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/alive.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/alive.h
@@ -43,24 +43,24 @@
 
 /* alive response ver_type values */
 enum {
-    FW_TYPE_HW = 0,
-    FW_TYPE_PROT = 1,
-    FW_TYPE_AP = 2,
-    FW_TYPE_WOWLAN = 3,
-    FW_TYPE_TIMING = 4,
-    FW_TYPE_WIPAN = 5
+  FW_TYPE_HW = 0,
+  FW_TYPE_PROT = 1,
+  FW_TYPE_AP = 2,
+  FW_TYPE_WOWLAN = 3,
+  FW_TYPE_TIMING = 4,
+  FW_TYPE_WIPAN = 5
 };
 
 /* alive response ver_subtype values */
 enum {
-    FW_SUBTYPE_FULL_FEATURE = 0,
-    FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
-    FW_SUBTYPE_REDUCED = 2,
-    FW_SUBTYPE_ALIVE_ONLY = 3,
-    FW_SUBTYPE_WOWLAN = 4,
-    FW_SUBTYPE_AP_SUBTYPE = 5,
-    FW_SUBTYPE_WIPAN = 6,
-    FW_SUBTYPE_INITIALIZE = 9
+  FW_SUBTYPE_FULL_FEATURE = 0,
+  FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
+  FW_SUBTYPE_REDUCED = 2,
+  FW_SUBTYPE_ALIVE_ONLY = 3,
+  FW_SUBTYPE_WOWLAN = 4,
+  FW_SUBTYPE_AP_SUBTYPE = 5,
+  FW_SUBTYPE_WIPAN = 6,
+  FW_SUBTYPE_INITIALIZE = 9
 };
 
 #define IWL_ALIVE_STATUS_ERR 0xDEAD
@@ -69,42 +69,42 @@
 #define IWL_ALIVE_FLG_RFKILL BIT(0)
 
 struct iwl_lmac_alive {
-    __le32 ucode_major;
-    __le32 ucode_minor;
-    uint8_t ver_subtype;
-    uint8_t ver_type;
-    uint8_t mac;
-    uint8_t opt;
-    __le32 timestamp;
-    __le32 error_event_table_ptr; /* SRAM address for error log */
-    __le32 log_event_table_ptr;   /* SRAM address for LMAC event log */
-    __le32 cpu_register_ptr;
-    __le32 dbgm_config_ptr;
-    __le32 alive_counter_ptr;
-    __le32 scd_base_ptr; /* SRAM address for SCD */
-    __le32 st_fwrd_addr; /* pointer to Store and forward */
-    __le32 st_fwrd_size;
+  __le32 ucode_major;
+  __le32 ucode_minor;
+  uint8_t ver_subtype;
+  uint8_t ver_type;
+  uint8_t mac;
+  uint8_t opt;
+  __le32 timestamp;
+  __le32 error_event_table_ptr; /* SRAM address for error log */
+  __le32 log_event_table_ptr;   /* SRAM address for LMAC event log */
+  __le32 cpu_register_ptr;
+  __le32 dbgm_config_ptr;
+  __le32 alive_counter_ptr;
+  __le32 scd_base_ptr; /* SRAM address for SCD */
+  __le32 st_fwrd_addr; /* pointer to Store and forward */
+  __le32 st_fwrd_size;
 } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
 
 struct iwl_umac_alive {
-    __le32 umac_major;      /* UMAC version: major */
-    __le32 umac_minor;      /* UMAC version: minor */
-    __le32 error_info_addr; /* SRAM address for UMAC error log */
-    __le32 dbg_print_buff_addr;
+  __le32 umac_major;      /* UMAC version: major */
+  __le32 umac_minor;      /* UMAC version: minor */
+  __le32 error_info_addr; /* SRAM address for UMAC error log */
+  __le32 dbg_print_buff_addr;
 } __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
 
 struct mvm_alive_resp_v3 {
-    __le16 status;
-    __le16 flags;
-    struct iwl_lmac_alive lmac_data;
-    struct iwl_umac_alive umac_data;
+  __le16 status;
+  __le16 flags;
+  struct iwl_lmac_alive lmac_data;
+  struct iwl_umac_alive umac_data;
 } __packed; /* ALIVE_RES_API_S_VER_3 */
 
 struct mvm_alive_resp {
-    __le16 status;
-    __le16 flags;
-    struct iwl_lmac_alive lmac_data[2];
-    struct iwl_umac_alive umac_data;
+  __le16 status;
+  __le16 flags;
+  struct iwl_lmac_alive lmac_data[2];
+  struct iwl_umac_alive umac_data;
 } __packed; /* ALIVE_RES_API_S_VER_4 */
 
 /**
@@ -115,9 +115,9 @@
  * @IWL_INIT_PHY: driver is going to send PHY_DB commands
  */
 enum iwl_extended_cfg_flags {
-    IWL_INIT_DEBUG_CFG,
-    IWL_INIT_NVM,
-    IWL_INIT_PHY,
+  IWL_INIT_DEBUG_CFG,
+  IWL_INIT_NVM,
+  IWL_INIT_PHY,
 };
 
 /**
@@ -126,7 +126,7 @@
  * @init_flags: values from iwl_extended_cfg_flags
  */
 struct iwl_init_extended_cfg_cmd {
-    __le32 init_flags;
+  __le32 init_flags;
 } __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
 
 /**
@@ -137,19 +137,19 @@
  * @radio_dash: radio version dash
  */
 struct iwl_radio_version_notif {
-    __le32 radio_flavor;
-    __le32 radio_step;
-    __le32 radio_dash;
+  __le32 radio_flavor;
+  __le32 radio_step;
+  __le32 radio_dash;
 } __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */
 
 enum iwl_card_state_flags {
-    CARD_ENABLED = 0x00,
-    HW_CARD_DISABLED = 0x01,
-    SW_CARD_DISABLED = 0x02,
-    CT_KILL_CARD_DISABLED = 0x04,
-    HALT_CARD_DISABLED = 0x08,
-    CARD_DISABLED_MSK = 0x0f,
-    CARD_IS_RX_ON = 0x10,
+  CARD_ENABLED = 0x00,
+  HW_CARD_DISABLED = 0x01,
+  SW_CARD_DISABLED = 0x02,
+  CT_KILL_CARD_DISABLED = 0x04,
+  HALT_CARD_DISABLED = 0x08,
+  CARD_DISABLED_MSK = 0x0f,
+  CARD_IS_RX_ON = 0x10,
 };
 
 /**
@@ -158,7 +158,7 @@
  * @flags: &enum iwl_card_state_flags
  */
 struct iwl_card_state_notif {
-    __le32 flags;
+  __le32 flags;
 } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_ALIVE_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/ax-softap-testmode.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/ax-softap-testmode.h
index 749f965..09997f0 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/ax-softap-testmode.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/ax-softap-testmode.h
@@ -55,18 +55,18 @@
  * @reserved: reserved for DW alignment
  */
 struct trig_frame_common_softap_testmode {
-    __le16 cmn_lsig_len;
-    uint8_t cmn_cascade_indication;
-    uint8_t cmn_carrier_sense_req;
-    uint8_t cmn_gi_ltf;
-    uint8_t cmn_mu_mimo_ltf;
-    uint8_t cmn_he_ltf_num;
-    uint8_t cmn_ldpc_ext_sym;
-    uint8_t cmn_packet_extension;
-    __le16 cmn_spatial_reuse;
-    uint8_t cmn_doppler;
-    __le16 cmn_res_he_sig_a;
-    __le16 reserved;
+  __le16 cmn_lsig_len;
+  uint8_t cmn_cascade_indication;
+  uint8_t cmn_carrier_sense_req;
+  uint8_t cmn_gi_ltf;
+  uint8_t cmn_mu_mimo_ltf;
+  uint8_t cmn_he_ltf_num;
+  uint8_t cmn_ldpc_ext_sym;
+  uint8_t cmn_packet_extension;
+  __le16 cmn_spatial_reuse;
+  uint8_t cmn_doppler;
+  __le16 cmn_res_he_sig_a;
+  __le16 reserved;
 } __packed; /* TRIG_FRAME_COMMON_SOFTAP_TESTMODE_API_S_VER_1 */
 
 /**
@@ -84,13 +84,13 @@
  * @usr_target_rssi: Target RSSI
  */
 struct trig_frame_user_softap_testmode {
-    __le16 usr_assoc_id;
-    uint8_t usr_rsrc_unit_alloc;
-    uint8_t usr_coding_type;
-    uint8_t usr_mcs;
-    uint8_t usr_dcm;
-    uint8_t usr_ss_allocation;
-    uint8_t usr_target_rssi;
+  __le16 usr_assoc_id;
+  uint8_t usr_rsrc_unit_alloc;
+  uint8_t usr_coding_type;
+  uint8_t usr_mcs;
+  uint8_t usr_dcm;
+  uint8_t usr_ss_allocation;
+  uint8_t usr_target_rssi;
 } __packed; /* TRIG_FRAME_USER_SOFTAP_TESTMODE_API_S_VER_1 */
 
 /**
@@ -105,10 +105,10 @@
  * @preferred_ac: Preferred AC
  */
 struct trig_frame_user_basic_softap_testmode {
-    uint8_t usr_space_factor;
-    uint8_t tid_agg_limit;
-    uint8_t preferred_ac_enabled;
-    uint8_t preferred_ac;
+  uint8_t usr_space_factor;
+  uint8_t tid_agg_limit;
+  uint8_t preferred_ac_enabled;
+  uint8_t preferred_ac;
 } __packed; /* TRIG_FRAME_USER_BASIC_SOFTAP_TESTMODE_API_S_VER_1 */
 
 /**
@@ -126,13 +126,13 @@
  * @reserved_for_addr2: addr data type in FW is aligned to 8 bytes
  */
 struct trig_frame_softap_testmode {
-    __le16 pad_byte_count;
-    uint8_t per_user_count;
-    uint8_t reserved;
-    uint8_t addr1[6];
-    __le16 reserved_for_addr1;
-    uint8_t addr2[6];
-    __le16 reserved_for_addr2;
+  __le16 pad_byte_count;
+  uint8_t per_user_count;
+  uint8_t reserved;
+  uint8_t addr1[6];
+  __le16 reserved_for_addr1;
+  uint8_t addr2[6];
+  __le16 reserved_for_addr2;
 } __packed; /* TRIG_FRAME_SOFTAP_TESTMODE_API_S_VER_1 */
 
 /**
@@ -154,10 +154,10 @@
  *          for each trigger frame
  */
 struct trig_frame_ax_softap_dl_basic {
-    struct trig_frame_softap_testmode frame_params;
-    struct trig_frame_common_softap_testmode common;
-    struct trig_frame_user_softap_testmode per_user[3];
-    struct trig_frame_user_basic_softap_testmode per_user_basic[3];
+  struct trig_frame_softap_testmode frame_params;
+  struct trig_frame_common_softap_testmode common;
+  struct trig_frame_user_softap_testmode per_user[3];
+  struct trig_frame_user_basic_softap_testmode per_user_basic[3];
 } __packed; /* TRIG_FRAME_SOFTAP_TESTMODE_DL_BASIC_API_S_VER_1 */
 
 /**
@@ -174,11 +174,11 @@
  *      be configured
  */
 struct ax_softap_testmode_dl_basic_cmd {
-    uint8_t enable;
-    uint8_t txop_duration_disable;
-    uint8_t configured_frames_count;
-    uint8_t reserved;
-    struct trig_frame_ax_softap_dl_basic frames[3];
+  uint8_t enable;
+  uint8_t txop_duration_disable;
+  uint8_t configured_frames_count;
+  uint8_t reserved;
+  struct trig_frame_ax_softap_dl_basic frames[3];
 } __packed; /* AX_SOFTAP_TESTMODE_DL_BASIC_API_S_VER_1 */
 
 /**
@@ -192,9 +192,9 @@
  * @reserved: reserved for DW alignment
  */
 struct trig_frame_bar_tid_ax_softap_testmode_dl_mu_bar {
-    __le16 association_id;
-    uint8_t ba_ssn_bitmap_size;
-    uint8_t reserved;
+  __le16 association_id;
+  uint8_t ba_ssn_bitmap_size;
+  uint8_t reserved;
 } __packed; /* TRIG_FRAME_BAR_TID_SOFTAP_TESTMODE_DL_MU_BAR_API_S_VER_1 */
 
 /**
@@ -211,11 +211,11 @@
  * @per_tid: MU-BAR trigger frame configuration per TID
  */
 struct trig_frame_bar_ax_softap_testmode_dl_mu_bar {
-    uint8_t block_ack_policy;
-    uint8_t block_ack_type;
-    uint8_t tid_count;
-    uint8_t reserved;
-    struct trig_frame_bar_tid_ax_softap_testmode_dl_mu_bar per_tid[3];
+  uint8_t block_ack_policy;
+  uint8_t block_ack_type;
+  uint8_t tid_count;
+  uint8_t reserved;
+  struct trig_frame_bar_tid_ax_softap_testmode_dl_mu_bar per_tid[3];
 } __packed; /* TRIG_FRAME_BAR_SOFTAP_TESTMODE_DL_MU_BAR_API_S_VER_1 */
 
 /**
@@ -236,10 +236,10 @@
  *   per user sections can be configured in this mode for each trigger frame
  */
 struct trig_frame_ax_softap_dl_mu_bar {
-    struct trig_frame_softap_testmode frame_params;
-    struct trig_frame_common_softap_testmode common;
-    struct trig_frame_user_softap_testmode per_user[3];
-    struct trig_frame_bar_ax_softap_testmode_dl_mu_bar bar[3];
+  struct trig_frame_softap_testmode frame_params;
+  struct trig_frame_common_softap_testmode common;
+  struct trig_frame_user_softap_testmode per_user[3];
+  struct trig_frame_bar_ax_softap_testmode_dl_mu_bar bar[3];
 } __packed; /* TRIG_FRAME_SOFTAP_TESTMODE_DL_MU_BAR_API_S_VER_1 */
 
 /**
@@ -252,11 +252,11 @@
  * @frame: the trigger frame content
  */
 struct ax_softap_testmode_dl_mu_bar_cmd {
-    uint8_t enable;
-    __le16 reserved1;
-    uint8_t reserved2;
-    __le32 rate_n_flags;
-    struct trig_frame_ax_softap_dl_mu_bar frame;
+  uint8_t enable;
+  __le16 reserved1;
+  uint8_t reserved2;
+  __le32 rate_n_flags;
+  struct trig_frame_ax_softap_dl_mu_bar frame;
 } __packed; /* AX_SOFTAP_TESTMODE_DL_MU_BAR_API_S_VER_1 */
 
 /**
@@ -271,11 +271,11 @@
  * @rate_n_flags: rate for TX operation of the configured trigger frame
  */
 struct per_trig_params_ax_softap_ul {
-    __le16 assoc_id;
-    __le16 duration;
-    uint8_t addr1[6];
-    __le16 reserved_for_addr1;
-    __le32 rate_n_flags;
+  __le16 assoc_id;
+  __le16 duration;
+  uint8_t addr1[6];
+  __le16 reserved_for_addr1;
+  __le32 rate_n_flags;
 } __packed; /* PER_TRIG_PARAMS_SOFTAP_TESTMODE_UL_API_S_VER_1 */
 
 /**
@@ -297,10 +297,10 @@
  *          mode for each trigger frame
  */
 struct trig_frame_ax_softap_ul {
-    struct trig_frame_softap_testmode frame_params;
-    struct trig_frame_common_softap_testmode common;
-    struct trig_frame_user_softap_testmode per_user[3];
-    struct trig_frame_user_basic_softap_testmode per_user_basic[3];
+  struct trig_frame_softap_testmode frame_params;
+  struct trig_frame_common_softap_testmode common;
+  struct trig_frame_user_softap_testmode per_user[3];
+  struct trig_frame_user_basic_softap_testmode per_user_basic[3];
 } __packed; /* TRIG_FRAME_SOFTAP_TESTMODE_UL_API_S_VER_1 */
 
 /**
@@ -315,12 +315,12 @@
  * @per_trigger: params to override config for each trigger in a sequence
  */
 struct ax_softap_testmode_ul_cmd {
-    uint8_t enable;
-    uint8_t trig_frame_periodic_msec;
-    __le16 reserved;
-    struct trig_frame_ax_softap_ul frame;
-    __le32 number_of_triggers_in_sequence;
-    struct per_trig_params_ax_softap_ul per_trigger[4];
+  uint8_t enable;
+  uint8_t trig_frame_periodic_msec;
+  __le16 reserved;
+  struct trig_frame_ax_softap_ul frame;
+  __le32 number_of_triggers_in_sequence;
+  struct per_trig_params_ax_softap_ul per_trigger[4];
 } __packed; /* AX_SOFTAP_TESTMODE_UL_API_S_VER_2 */
 
 /**
@@ -331,9 +331,9 @@
  * @reserved2: reserved for DW alignment
  */
 struct ax_softap_client_testmode_cmd {
-    uint8_t enable;
-    uint8_t reserved1;
-    __le16 reserved2;
+  uint8_t enable;
+  uint8_t reserved1;
+  __le16 reserved2;
 } __packed; /* AX_SOFTAP_CLIENT_TESTMODE_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_AX_SOFTAP_TESTMODE_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/binding.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/binding.h
index ca62cda..b47d818 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/binding.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/binding.h
@@ -51,12 +51,12 @@
  *  &enum iwl_ctxt_id_and_color
  */
 struct iwl_binding_cmd_v1 {
-    /* COMMON_INDEX_HDR_API_S_VER_1 */
-    __le32 id_and_color;
-    __le32 action;
-    /* BINDING_DATA_API_S_VER_1 */
-    __le32 macs[MAX_MACS_IN_BINDING];
-    __le32 phy;
+  /* COMMON_INDEX_HDR_API_S_VER_1 */
+  __le32 id_and_color;
+  __le32 action;
+  /* BINDING_DATA_API_S_VER_1 */
+  __le32 macs[MAX_MACS_IN_BINDING];
+  __le32 phy;
 } __packed; /* BINDING_CMD_API_S_VER_1 */
 
 /**
@@ -72,13 +72,13 @@
  * @lmac_id: the lmac id the binding belongs to
  */
 struct iwl_binding_cmd {
-    /* COMMON_INDEX_HDR_API_S_VER_1 */
-    __le32 id_and_color;
-    __le32 action;
-    /* BINDING_DATA_API_S_VER_1 */
-    __le32 macs[MAX_MACS_IN_BINDING];
-    __le32 phy;
-    __le32 lmac_id;
+  /* COMMON_INDEX_HDR_API_S_VER_1 */
+  __le32 id_and_color;
+  __le32 action;
+  /* BINDING_DATA_API_S_VER_1 */
+  __le32 macs[MAX_MACS_IN_BINDING];
+  __le32 phy;
+  __le32 lmac_id;
 } __packed; /* BINDING_CMD_API_S_VER_2 */
 
 #define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1)
@@ -97,9 +97,9 @@
  * @max_duration: max uninterrupted context duration in TU
  */
 struct iwl_time_quota_data_v1 {
-    __le32 id_and_color;
-    __le32 quota;
-    __le32 max_duration;
+  __le32 id_and_color;
+  __le32 quota;
+  __le32 max_duration;
 } __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
 
 /**
@@ -111,14 +111,14 @@
  *  On CDB the fourth one is a regular binding.
  */
 struct iwl_time_quota_cmd_v1 {
-    struct iwl_time_quota_data_v1 quotas[MAX_BINDINGS];
+  struct iwl_time_quota_data_v1 quotas[MAX_BINDINGS];
 } __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
 
 enum iwl_quota_low_latency {
-    IWL_QUOTA_LOW_LATENCY_NONE = 0,
-    IWL_QUOTA_LOW_LATENCY_TX = BIT(0),
-    IWL_QUOTA_LOW_LATENCY_RX = BIT(1),
-    IWL_QUOTA_LOW_LATENCY_TX_RX = IWL_QUOTA_LOW_LATENCY_TX | IWL_QUOTA_LOW_LATENCY_RX,
+  IWL_QUOTA_LOW_LATENCY_NONE = 0,
+  IWL_QUOTA_LOW_LATENCY_TX = BIT(0),
+  IWL_QUOTA_LOW_LATENCY_RX = BIT(1),
+  IWL_QUOTA_LOW_LATENCY_TX_RX = IWL_QUOTA_LOW_LATENCY_TX | IWL_QUOTA_LOW_LATENCY_RX,
 };
 
 /**
@@ -130,10 +130,10 @@
  * @low_latency: low latency status, &enum iwl_quota_low_latency
  */
 struct iwl_time_quota_data {
-    __le32 id_and_color;
-    __le32 quota;
-    __le32 max_duration;
-    __le32 low_latency;
+  __le32 id_and_color;
+  __le32 quota;
+  __le32 max_duration;
+  __le32 low_latency;
 } __packed; /* TIME_QUOTA_DATA_API_S_VER_2 */
 
 /**
@@ -145,7 +145,7 @@
  * @quotas: allocations per binding
  */
 struct iwl_time_quota_cmd {
-    struct iwl_time_quota_data quotas[MAX_BINDINGS];
+  struct iwl_time_quota_data quotas[MAX_BINDINGS];
 } __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_2 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_BINDING_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/cmdhdr.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/cmdhdr.h
index 903527f..4d794f6 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/cmdhdr.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/cmdhdr.h
@@ -55,20 +55,14 @@
  * the command id, the group id and the version of the command
  * and vice versa
  */
-static inline uint8_t iwl_cmd_opcode(uint32_t cmdid) {
-    return cmdid & 0xFF;
-}
+static inline uint8_t iwl_cmd_opcode(uint32_t cmdid) { return cmdid & 0xFF; }
 
-static inline uint8_t iwl_cmd_groupid(uint32_t cmdid) {
-    return ((cmdid & 0xFF00) >> 8);
-}
+static inline uint8_t iwl_cmd_groupid(uint32_t cmdid) { return ((cmdid & 0xFF00) >> 8); }
 
-static inline uint8_t iwl_cmd_version(uint32_t cmdid) {
-    return ((cmdid & 0xFF0000) >> 16);
-}
+static inline uint8_t iwl_cmd_version(uint32_t cmdid) { return ((cmdid & 0xFF0000) >> 16); }
 
 static inline uint32_t iwl_cmd_id(uint8_t opcode, uint8_t groupid, uint8_t version) {
-    return opcode + (groupid << 8) + (version << 16);
+  return opcode + (groupid << 8) + (version << 16);
 }
 
 /* make uint16_t wide id out of uint8_t group and opcode */
@@ -87,38 +81,38 @@
  * driver, and each response/notification received from uCode.
  */
 struct iwl_cmd_header {
-    /**
-     * @cmd: Command ID: REPLY_RXON, etc.
-     */
-    uint8_t cmd;
-    /**
-     * @group_id: group ID, for commands with groups
-     */
-    uint8_t group_id;
-    /**
-     * @sequence:
-     * Sequence number for the command.
-     *
-     * The driver sets up the sequence number to values of its choosing.
-     * uCode does not use this value, but passes it back to the driver
-     * when sending the response to each driver-originated command, so
-     * the driver can match the response to the command.  Since the values
-     * don't get used by uCode, the driver may set up an arbitrary format.
-     *
-     * There is one exception:  uCode sets bit 15 when it originates
-     * the response/notification, i.e. when the response/notification
-     * is not a direct response to a command sent by the driver.  For
-     * example, uCode issues REPLY_RX when it sends a received frame
-     * to the driver; it is not a direct response to any driver command.
-     *
-     * The Linux driver uses the following format:
-     *
-     *  0:7     tfd index - position within TX queue
-     *  8:12    TX queue id
-     *  13:14   reserved
-     *  15      unsolicited RX or uCode-originated notification
-     */
-    __le16 sequence;
+  /**
+   * @cmd: Command ID: REPLY_RXON, etc.
+   */
+  uint8_t cmd;
+  /**
+   * @group_id: group ID, for commands with groups
+   */
+  uint8_t group_id;
+  /**
+   * @sequence:
+   * Sequence number for the command.
+   *
+   * The driver sets up the sequence number to values of its choosing.
+   * uCode does not use this value, but passes it back to the driver
+   * when sending the response to each driver-originated command, so
+   * the driver can match the response to the command.  Since the values
+   * don't get used by uCode, the driver may set up an arbitrary format.
+   *
+   * There is one exception:  uCode sets bit 15 when it originates
+   * the response/notification, i.e. when the response/notification
+   * is not a direct response to a command sent by the driver.  For
+   * example, uCode issues REPLY_RX when it sends a received frame
+   * to the driver; it is not a direct response to any driver command.
+   *
+   * The Linux driver uses the following format:
+   *
+   *  0:7     tfd index - position within TX queue
+   *  8:12    TX queue id
+   *  13:14   reserved
+   *  15      unsolicited RX or uCode-originated notification
+   */
+  __le16 sequence;
 } __packed;
 
 /**
@@ -137,12 +131,12 @@
  * @version: command version
  */
 struct iwl_cmd_header_wide {
-    uint8_t cmd;
-    uint8_t group_id;
-    __le16 sequence;
-    __le16 length;
-    uint8_t reserved;
-    uint8_t version;
+  uint8_t cmd;
+  uint8_t group_id;
+  __le16 sequence;
+  __le16 length;
+  uint8_t reserved;
+  uint8_t version;
 } __packed;
 
 /**
@@ -152,9 +146,9 @@
  * @data: data, length in @length
  */
 struct iwl_calib_res_notif_phy_db {
-    __le16 type;
-    __le16 length;
-    uint8_t data[];
+  __le16 type;
+  __le16 length;
+  uint8_t data[];
 } __packed;
 
 /**
@@ -164,9 +158,9 @@
  * @data: data, length in @length
  */
 struct iwl_phy_db_cmd {
-    __le16 type;
-    __le16 length;
-    uint8_t data[];
+  __le16 type;
+  __le16 length;
+  uint8_t data[];
 } __packed;
 
 /**
@@ -174,7 +168,7 @@
  * @status: status of the command asked, changes for each one
  */
 struct iwl_cmd_response {
-    __le32 status;
+  __le32 status;
 };
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_CMDHDR_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/coex.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/coex.h
index 49ae459..d984f8a 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/coex.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/coex.h
@@ -39,29 +39,29 @@
 #define BITS(nb) (BIT(nb) - 1)
 
 enum iwl_bt_coex_lut_type {
-    BT_COEX_TIGHT_LUT = 0,
-    BT_COEX_LOOSE_LUT,
-    BT_COEX_TX_DIS_LUT,
+  BT_COEX_TIGHT_LUT = 0,
+  BT_COEX_LOOSE_LUT,
+  BT_COEX_TX_DIS_LUT,
 
-    BT_COEX_MAX_LUT,
-    BT_COEX_INVALID_LUT = 0xff,
+  BT_COEX_MAX_LUT,
+  BT_COEX_INVALID_LUT = 0xff,
 }; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
 
 #define BT_REDUCED_TX_POWER_BIT BIT(7)
 
 enum iwl_bt_coex_mode {
-    BT_COEX_DISABLE = 0x0,
-    BT_COEX_NW = 0x1,
-    BT_COEX_BT = 0x2,
-    BT_COEX_WIFI = 0x3,
+  BT_COEX_DISABLE = 0x0,
+  BT_COEX_NW = 0x1,
+  BT_COEX_BT = 0x2,
+  BT_COEX_WIFI = 0x3,
 }; /* BT_COEX_MODES_E */
 
 enum iwl_bt_coex_enabled_modules {
-    BT_COEX_MPLUT_ENABLED = BIT(0),
-    BT_COEX_MPLUT_BOOST_ENABLED = BIT(1),
-    BT_COEX_SYNC2SCO_ENABLED = BIT(2),
-    BT_COEX_CORUN_ENABLED = BIT(3),
-    BT_COEX_HIGH_BAND_RET = BIT(4),
+  BT_COEX_MPLUT_ENABLED = BIT(0),
+  BT_COEX_MPLUT_BOOST_ENABLED = BIT(1),
+  BT_COEX_SYNC2SCO_ENABLED = BIT(2),
+  BT_COEX_CORUN_ENABLED = BIT(3),
+  BT_COEX_HIGH_BAND_RET = BIT(4),
 }; /* BT_COEX_MODULES_ENABLE_E_VER_1 */
 
 /**
@@ -72,8 +72,8 @@
  * The structure is used for the BT_COEX command.
  */
 struct iwl_bt_coex_cmd {
-    __le32 mode;
-    __le32 enabled_modules;
+  __le32 mode;
+  __le32 enabled_modules;
 } __packed; /* BT_COEX_CMD_API_S_VER_6 */
 
 /**
@@ -82,7 +82,7 @@
  *  bits are the sta_id (value)
  */
 struct iwl_bt_coex_reduced_txp_update_cmd {
-    __le32 reduced_txp;
+  __le32 reduced_txp;
 } __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */
 
 /**
@@ -95,92 +95,92 @@
  * Used for BT_COEX_CI command
  */
 struct iwl_bt_coex_ci_cmd {
-    __le64 bt_primary_ci;
-    __le32 primary_ch_phy_id;
+  __le64 bt_primary_ci;
+  __le32 primary_ch_phy_id;
 
-    __le64 bt_secondary_ci;
-    __le32 secondary_ch_phy_id;
+  __le64 bt_secondary_ci;
+  __le32 secondary_ch_phy_id;
 } __packed; /* BT_CI_MSG_API_S_VER_2 */
 
-#define BT_MBOX(n_dw, _msg, _pos, _nbits)  \
-    BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
-    BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
+#define BT_MBOX(n_dw, _msg, _pos, _nbits)                                      \
+  BT_MBOX##n_dw##_##_msg##_POS = (_pos), BT_MBOX##n_dw##_##_msg = BITS(_nbits) \
+                                                                  << BT_MBOX##n_dw##_##_msg##_POS
 
 enum iwl_bt_mxbox_dw0 {
-    BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
-    BT_MBOX(0, LE_PROF1, 3, 1),
-    BT_MBOX(0, LE_PROF2, 4, 1),
-    BT_MBOX(0, LE_PROF_OTHER, 5, 1),
-    BT_MBOX(0, CHL_SEQ_N, 8, 4),
-    BT_MBOX(0, INBAND_S, 13, 1),
-    BT_MBOX(0, LE_MIN_RSSI, 16, 4),
-    BT_MBOX(0, LE_SCAN, 20, 1),
-    BT_MBOX(0, LE_ADV, 21, 1),
-    BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
-    BT_MBOX(0, OPEN_CON_1, 28, 2),
+  BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
+  BT_MBOX(0, LE_PROF1, 3, 1),
+  BT_MBOX(0, LE_PROF2, 4, 1),
+  BT_MBOX(0, LE_PROF_OTHER, 5, 1),
+  BT_MBOX(0, CHL_SEQ_N, 8, 4),
+  BT_MBOX(0, INBAND_S, 13, 1),
+  BT_MBOX(0, LE_MIN_RSSI, 16, 4),
+  BT_MBOX(0, LE_SCAN, 20, 1),
+  BT_MBOX(0, LE_ADV, 21, 1),
+  BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
+  BT_MBOX(0, OPEN_CON_1, 28, 2),
 };
 
 enum iwl_bt_mxbox_dw1 {
-    BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
-    BT_MBOX(1, IP_SR, 4, 1),
-    BT_MBOX(1, LE_MSTR, 5, 1),
-    BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
-    BT_MBOX(1, MSG_TYPE, 16, 3),
-    BT_MBOX(1, SSN, 19, 2),
+  BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
+  BT_MBOX(1, IP_SR, 4, 1),
+  BT_MBOX(1, LE_MSTR, 5, 1),
+  BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
+  BT_MBOX(1, MSG_TYPE, 16, 3),
+  BT_MBOX(1, SSN, 19, 2),
 };
 
 enum iwl_bt_mxbox_dw2 {
-    BT_MBOX(2, SNIFF_ACT, 0, 3),
-    BT_MBOX(2, PAG, 3, 1),
-    BT_MBOX(2, INQUIRY, 4, 1),
-    BT_MBOX(2, CONN, 5, 1),
-    BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
-    BT_MBOX(2, DISC, 13, 1),
-    BT_MBOX(2, SCO_TX_ACT, 16, 2),
-    BT_MBOX(2, SCO_RX_ACT, 18, 2),
-    BT_MBOX(2, ESCO_RE_TX, 20, 2),
-    BT_MBOX(2, SCO_DURATION, 24, 6),
+  BT_MBOX(2, SNIFF_ACT, 0, 3),
+  BT_MBOX(2, PAG, 3, 1),
+  BT_MBOX(2, INQUIRY, 4, 1),
+  BT_MBOX(2, CONN, 5, 1),
+  BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
+  BT_MBOX(2, DISC, 13, 1),
+  BT_MBOX(2, SCO_TX_ACT, 16, 2),
+  BT_MBOX(2, SCO_RX_ACT, 18, 2),
+  BT_MBOX(2, ESCO_RE_TX, 20, 2),
+  BT_MBOX(2, SCO_DURATION, 24, 6),
 };
 
 enum iwl_bt_mxbox_dw3 {
-    BT_MBOX(3, SCO_STATE, 0, 1),
-    BT_MBOX(3, SNIFF_STATE, 1, 1),
-    BT_MBOX(3, A2DP_STATE, 2, 1),
-    BT_MBOX(3, ACL_STATE, 3, 1),
-    BT_MBOX(3, MSTR_STATE, 4, 1),
-    BT_MBOX(3, OBX_STATE, 5, 1),
-    BT_MBOX(3, A2DP_SRC, 6, 1),
-    BT_MBOX(3, OPEN_CON_2, 8, 2),
-    BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
-    BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
-    BT_MBOX(3, INBAND_P, 13, 1),
-    BT_MBOX(3, MSG_TYPE_2, 16, 3),
-    BT_MBOX(3, SSN_2, 19, 2),
-    BT_MBOX(3, UPDATE_REQUEST, 21, 1),
+  BT_MBOX(3, SCO_STATE, 0, 1),
+  BT_MBOX(3, SNIFF_STATE, 1, 1),
+  BT_MBOX(3, A2DP_STATE, 2, 1),
+  BT_MBOX(3, ACL_STATE, 3, 1),
+  BT_MBOX(3, MSTR_STATE, 4, 1),
+  BT_MBOX(3, OBX_STATE, 5, 1),
+  BT_MBOX(3, A2DP_SRC, 6, 1),
+  BT_MBOX(3, OPEN_CON_2, 8, 2),
+  BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
+  BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
+  BT_MBOX(3, INBAND_P, 13, 1),
+  BT_MBOX(3, MSG_TYPE_2, 16, 3),
+  BT_MBOX(3, SSN_2, 19, 2),
+  BT_MBOX(3, UPDATE_REQUEST, 21, 1),
 };
 
-#define BT_MBOX_MSG(_notif, _num, _field)                                    \
-    ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field) >> \
-     BT_MBOX##_num##_##_field##_POS)
+#define BT_MBOX_MSG(_notif, _num, _field)                                  \
+  ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field) >> \
+   BT_MBOX##_num##_##_field##_POS)
 
-#define BT_MBOX_PRINT(_num, _field, _end)                           \
-    pos += scnprintf(buf + pos, bufsz - pos, "\t%s: %d%s", #_field, \
-                     BT_MBOX_MSG(notif, _num, _field), true ? "\n" : ", ");
+#define BT_MBOX_PRINT(_num, _field, _end)                         \
+  pos += scnprintf(buf + pos, bufsz - pos, "\t%s: %d%s", #_field, \
+                   BT_MBOX_MSG(notif, _num, _field), true ? "\n" : ", ");
 enum iwl_bt_activity_grading {
-    BT_OFF = 0,
-    BT_ON_NO_CONNECTION = 1,
-    BT_LOW_TRAFFIC = 2,
-    BT_HIGH_TRAFFIC = 3,
-    BT_VERY_HIGH_TRAFFIC = 4,
+  BT_OFF = 0,
+  BT_ON_NO_CONNECTION = 1,
+  BT_LOW_TRAFFIC = 2,
+  BT_HIGH_TRAFFIC = 3,
+  BT_VERY_HIGH_TRAFFIC = 4,
 
-    BT_MAX_AG,
+  BT_MAX_AG,
 }; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
 
 enum iwl_bt_ci_compliance {
-    BT_CI_COMPLIANCE_NONE = 0,
-    BT_CI_COMPLIANCE_PRIMARY = 1,
-    BT_CI_COMPLIANCE_SECONDARY = 2,
-    BT_CI_COMPLIANCE_BOTH = 3,
+  BT_CI_COMPLIANCE_NONE = 0,
+  BT_CI_COMPLIANCE_PRIMARY = 1,
+  BT_CI_COMPLIANCE_SECONDARY = 2,
+  BT_CI_COMPLIANCE_BOTH = 3,
 }; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */
 
 /**
@@ -196,16 +196,16 @@
  * @reserved: reserved
  */
 struct iwl_bt_coex_profile_notif {
-    __le32 mbox_msg[4];
-    __le32 msg_idx;
-    __le32 bt_ci_compliance;
+  __le32 mbox_msg[4];
+  __le32 msg_idx;
+  __le32 bt_ci_compliance;
 
-    __le32 primary_ch_lut;
-    __le32 secondary_ch_lut;
-    __le32 bt_activity_grading;
-    uint8_t ttc_status;
-    uint8_t rrc_status;
-    __le16 reserved;
+  __le32 primary_ch_lut;
+  __le32 secondary_ch_lut;
+  __le32 bt_activity_grading;
+  uint8_t ttc_status;
+  uint8_t rrc_status;
+  __le16 reserved;
 } __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
 
 #ifdef CPTCFG_IWLWIFI_FRQ_MGR
@@ -215,7 +215,7 @@
  * @enabled: 2g coex is enabled/disabled
  */
 struct iwl_config_2g_coex_cmd {
-    __le32 enabled;
+  __le32 enabled;
 } __packed; /* CONFIG_2G_COEX_CMD_API_S_VER_1 */
 #endif
 
@@ -240,15 +240,15 @@
  * Used for LTE_COEX_CONFIG_CMD command
  */
 struct iwl_lte_coex_config_cmd {
-    __le32 lte_state;
-    __le32 lte_band;
-    __le32 lte_chan;
-    __le32 lte_frame_structure[LTE_COEX_FRAME_STRUCTURE_LENGTH];
-    __le32 tx_safe_freq_min;
-    __le32 tx_safe_freq_max;
-    __le32 rx_safe_freq_min;
-    __le32 rx_safe_freq_max;
-    uint8_t max_tx_power[WIFI_BAND_24_NUM_CHANNELS];
+  __le32 lte_state;
+  __le32 lte_band;
+  __le32 lte_chan;
+  __le32 lte_frame_structure[LTE_COEX_FRAME_STRUCTURE_LENGTH];
+  __le32 tx_safe_freq_min;
+  __le32 tx_safe_freq_max;
+  __le32 rx_safe_freq_min;
+  __le32 rx_safe_freq_max;
+  uint8_t max_tx_power[WIFI_BAND_24_NUM_CHANNELS];
 } __packed; /* LTE_COEX_CONFIG_CMD_API_S_VER_1 */
 
 /**
@@ -260,8 +260,8 @@
  * Used for LTE_COEX_STATIC_PARAMS_CMD command
  */
 struct iwl_lte_coex_static_params_cmd {
-    __le32 mfu_config[LTE_COEX_MFUART_CONFIG_LENGTH];
-    int8_t tx_power_in_dbm[32];
+  __le32 mfu_config[LTE_COEX_MFUART_CONFIG_LENGTH];
+  int8_t tx_power_in_dbm[32];
 } __packed; /* LTE_COEX_STATIC_PARAMS_CMD_API_S_VER_1 */
 
 /**
@@ -273,8 +273,8 @@
  * Used for LTE_COEX_WIFI_REPORTED_CHANNEL_CMD command
  */
 struct iwl_lte_coex_wifi_reported_channel_cmd {
-    __le32 channel;
-    __le32 bandwidth;
+  __le32 channel;
+  __le32 bandwidth;
 } __packed; /* LTE_COEX_WIFI_REPORTED_CHANNEL_CMD_API_S_VER_1 */
 
 /**
@@ -285,7 +285,7 @@
  * Used for LTE_COEX_SPS_CMD command
  */
 struct iwl_lte_coex_sps_cmd {
-    __le32 lte_semi_persistent_info;
+  __le32 lte_semi_persistent_info;
 } __packed; /* LTE_COEX_WIFI_SPS_CMD_API_S_VER_1 */
 
 /**
@@ -307,16 +307,16 @@
  * Used for LTE_COEX_FINE_TUNING_PARAMS_CMD command
  */
 struct iwl_lte_coex_fine_tuning_params_cmd {
-    __le32 rx_protection_assert_timing;
-    __le32 tx_protection_assert_timing;
-    __le32 rx_protection_timeout;
-    __le32 min_tx_power;
-    __le32 lte_ul_load_uapsd_threshold;
-    __le32 rx_failure_during_ul_uapsd_threshold;
-    __le32 rx_failure_during_ul_sc_threshold;
-    __le32 rx_duration_for_ack_protection_us;
-    __le32 beacon_failure_during_ul_counter;
-    __le32 dtim_failure_during_ul_counter;
+  __le32 rx_protection_assert_timing;
+  __le32 tx_protection_assert_timing;
+  __le32 rx_protection_timeout;
+  __le32 min_tx_power;
+  __le32 lte_ul_load_uapsd_threshold;
+  __le32 rx_failure_during_ul_uapsd_threshold;
+  __le32 rx_failure_during_ul_sc_threshold;
+  __le32 rx_duration_for_ack_protection_us;
+  __le32 beacon_failure_during_ul_counter;
+  __le32 dtim_failure_during_ul_counter;
 } __packed; /* LTE_COEX_FINE_TUNING_PARAMS_CMD_API_S_VER_1 */
 
 /**
@@ -324,7 +324,7 @@
  * @statistic_placeholder: placeholder
  */
 struct iwl_lte_coex_statistic_ntfy {
-    __le32 statistic_placeholder;
+  __le32 statistic_placeholder;
 } __packed; /* LTE_COEX_STATISTIC_NTFY_API_S_VER_1 */
 #endif      /* CPTCFG_IWLWIFI_LTE_COEX */
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/commands.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/commands.h
index e39e3ca..7a2e78c 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/commands.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/commands.h
@@ -60,636 +60,636 @@
  * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds
  */
 enum iwl_mvm_command_groups {
-    LEGACY_GROUP = 0x0,
-    LONG_GROUP = 0x1,
-    SYSTEM_GROUP = 0x2,
-    MAC_CONF_GROUP = 0x3,
-    PHY_OPS_GROUP = 0x4,
-    DATA_PATH_GROUP = 0x5,
-    NAN_GROUP = 0x7,
-    TOF_GROUP = 0x8,
-    PROT_OFFLOAD_GROUP = 0xb,
-    REGULATORY_AND_NVM_GROUP = 0xc,
-    XVT_GROUP = 0xe,
-    DEBUG_GROUP = 0xf,
+  LEGACY_GROUP = 0x0,
+  LONG_GROUP = 0x1,
+  SYSTEM_GROUP = 0x2,
+  MAC_CONF_GROUP = 0x3,
+  PHY_OPS_GROUP = 0x4,
+  DATA_PATH_GROUP = 0x5,
+  NAN_GROUP = 0x7,
+  TOF_GROUP = 0x8,
+  PROT_OFFLOAD_GROUP = 0xb,
+  REGULATORY_AND_NVM_GROUP = 0xc,
+  XVT_GROUP = 0xe,
+  DEBUG_GROUP = 0xf,
 };
 
 /**
  * enum iwl_legacy_cmds - legacy group command IDs
  */
 enum iwl_legacy_cmds {
-    /**
-     * @MVM_ALIVE:
-     * Alive data from the firmware, as described in
-     * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp.
-     */
-    MVM_ALIVE = 0x1,
+  /**
+   * @MVM_ALIVE:
+   * Alive data from the firmware, as described in
+   * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp.
+   */
+  MVM_ALIVE = 0x1,
 
-    /**
-     * @REPLY_ERROR: Cause an error in the firmware, for testing purposes.
-     */
-    REPLY_ERROR = 0x2,
+  /**
+   * @REPLY_ERROR: Cause an error in the firmware, for testing purposes.
+   */
+  REPLY_ERROR = 0x2,
 
-    /**
-     * @ECHO_CMD: Send data to the device to have it returned immediately.
-     */
-    ECHO_CMD = 0x3,
+  /**
+   * @ECHO_CMD: Send data to the device to have it returned immediately.
+   */
+  ECHO_CMD = 0x3,
 
-    /**
-     * @INIT_COMPLETE_NOTIF: Notification that initialization is complete.
-     */
-    INIT_COMPLETE_NOTIF = 0x4,
+  /**
+   * @INIT_COMPLETE_NOTIF: Notification that initialization is complete.
+   */
+  INIT_COMPLETE_NOTIF = 0x4,
 
-    /**
-     * @PHY_CONTEXT_CMD:
-     * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd.
-     */
-    PHY_CONTEXT_CMD = 0x8,
+  /**
+   * @PHY_CONTEXT_CMD:
+   * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd.
+   */
+  PHY_CONTEXT_CMD = 0x8,
 
-    /**
-     * @DBG_CFG: Debug configuration command.
-     */
-    DBG_CFG = 0x9,
+  /**
+   * @DBG_CFG: Debug configuration command.
+   */
+  DBG_CFG = 0x9,
 
-    /**
-     * @SCAN_ITERATION_COMPLETE_UMAC:
-     * Firmware indicates a scan iteration completed, using
-     * &struct iwl_umac_scan_iter_complete_notif.
-     */
-    SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
+  /**
+   * @SCAN_ITERATION_COMPLETE_UMAC:
+   * Firmware indicates a scan iteration completed, using
+   * &struct iwl_umac_scan_iter_complete_notif.
+   */
+  SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
 
-    /**
-     * @SCAN_CFG_CMD:
-     * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config
-     */
-    SCAN_CFG_CMD = 0xc,
+  /**
+   * @SCAN_CFG_CMD:
+   * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config
+   */
+  SCAN_CFG_CMD = 0xc,
 
-    /**
-     * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac
-     */
-    SCAN_REQ_UMAC = 0xd,
+  /**
+   * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac
+   */
+  SCAN_REQ_UMAC = 0xd,
 
-    /**
-     * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort
-     */
-    SCAN_ABORT_UMAC = 0xe,
+  /**
+   * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort
+   */
+  SCAN_ABORT_UMAC = 0xe,
 
-    /**
-     * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete
-     */
-    SCAN_COMPLETE_UMAC = 0xf,
+  /**
+   * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete
+   */
+  SCAN_COMPLETE_UMAC = 0xf,
 
-    /**
-     * @BA_WINDOW_STATUS_NOTIFICATION_ID:
-     * uses &struct iwl_ba_window_status_notif
-     */
-    BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
+  /**
+   * @BA_WINDOW_STATUS_NOTIFICATION_ID:
+   * uses &struct iwl_ba_window_status_notif
+   */
+  BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
 
-    /**
-     * @ADD_STA_KEY:
-     * &struct iwl_mvm_add_sta_key_cmd_v1 or
-     * &struct iwl_mvm_add_sta_key_cmd.
-     */
-    ADD_STA_KEY = 0x17,
+  /**
+   * @ADD_STA_KEY:
+   * &struct iwl_mvm_add_sta_key_cmd_v1 or
+   * &struct iwl_mvm_add_sta_key_cmd.
+   */
+  ADD_STA_KEY = 0x17,
 
-    /**
-     * @ADD_STA:
-     * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7.
-     */
-    ADD_STA = 0x18,
+  /**
+   * @ADD_STA:
+   * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7.
+   */
+  ADD_STA = 0x18,
 
-    /**
-     * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd
-     */
-    REMOVE_STA = 0x19,
+  /**
+   * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd
+   */
+  REMOVE_STA = 0x19,
 
-    /**
-     * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd
-     */
-    FW_GET_ITEM_CMD = 0x1a,
+  /**
+   * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd
+   */
+  FW_GET_ITEM_CMD = 0x1a,
 
-    /**
-     * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
-     *  &struct iwl_tx_cmd_gen3,
-     *  response in &struct iwl_mvm_tx_resp or
-     *  &struct iwl_mvm_tx_resp_v3
-     */
-    TX_CMD = 0x1c,
+  /**
+   * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
+   *  &struct iwl_tx_cmd_gen3,
+   *  response in &struct iwl_mvm_tx_resp or
+   *  &struct iwl_mvm_tx_resp_v3
+   */
+  TX_CMD = 0x1c,
 
-    /**
-     * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd
-     */
-    TXPATH_FLUSH = 0x1e,
+  /**
+   * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd
+   */
+  TXPATH_FLUSH = 0x1e,
 
-    /**
-     * @MGMT_MCAST_KEY:
-     * &struct iwl_mvm_mgmt_mcast_key_cmd or
-     * &struct iwl_mvm_mgmt_mcast_key_cmd_v1
-     */
-    MGMT_MCAST_KEY = 0x1f,
+  /**
+   * @MGMT_MCAST_KEY:
+   * &struct iwl_mvm_mgmt_mcast_key_cmd or
+   * &struct iwl_mvm_mgmt_mcast_key_cmd_v1
+   */
+  MGMT_MCAST_KEY = 0x1f,
 
-    /* scheduler config */
-    /**
-     * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
-     *  &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
-     *  for newer (22000) hardware.
-     */
-    SCD_QUEUE_CFG = 0x1d,
+  /* scheduler config */
+  /**
+   * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
+   *  &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
+   *  for newer (22000) hardware.
+   */
+  SCD_QUEUE_CFG = 0x1d,
 
-    /**
-     * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd
-     */
-    WEP_KEY = 0x20,
+  /**
+   * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd
+   */
+  WEP_KEY = 0x20,
 
-    /**
-     * @SHARED_MEM_CFG:
-     * retrieve shared memory configuration - response in
-     * &struct iwl_shared_mem_cfg
-     */
-    SHARED_MEM_CFG = 0x25,
+  /**
+   * @SHARED_MEM_CFG:
+   * retrieve shared memory configuration - response in
+   * &struct iwl_shared_mem_cfg
+   */
+  SHARED_MEM_CFG = 0x25,
 
-    /**
-     * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd
-     */
-    TDLS_CHANNEL_SWITCH_CMD = 0x27,
+  /**
+   * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd
+   */
+  TDLS_CHANNEL_SWITCH_CMD = 0x27,
 
-    /**
-     * @TDLS_CHANNEL_SWITCH_NOTIFICATION:
-     * uses &struct iwl_tdls_channel_switch_notif
-     */
-    TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
+  /**
+   * @TDLS_CHANNEL_SWITCH_NOTIFICATION:
+   * uses &struct iwl_tdls_channel_switch_notif
+   */
+  TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
 
-    /**
-     * @TDLS_CONFIG_CMD:
-     * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res
-     */
-    TDLS_CONFIG_CMD = 0xa7,
+  /**
+   * @TDLS_CONFIG_CMD:
+   * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res
+   */
+  TDLS_CONFIG_CMD = 0xa7,
 
-    /**
-     * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd
-     */
-    MAC_CONTEXT_CMD = 0x28,
+  /**
+   * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd
+   */
+  MAC_CONTEXT_CMD = 0x28,
 
-    /**
-     * @TIME_EVENT_CMD:
-     * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp
-     */
-    TIME_EVENT_CMD = 0x29, /* both CMD and response */
+  /**
+   * @TIME_EVENT_CMD:
+   * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp
+   */
+  TIME_EVENT_CMD = 0x29, /* both CMD and response */
 
-    /**
-     * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif
-     */
-    TIME_EVENT_NOTIFICATION = 0x2a,
+  /**
+   * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif
+   */
+  TIME_EVENT_NOTIFICATION = 0x2a,
 
-    /**
-     * @BINDING_CONTEXT_CMD:
-     * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1
-     */
-    BINDING_CONTEXT_CMD = 0x2b,
+  /**
+   * @BINDING_CONTEXT_CMD:
+   * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1
+   */
+  BINDING_CONTEXT_CMD = 0x2b,
 
-    /**
-     * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd
-     */
-    TIME_QUOTA_CMD = 0x2c,
+  /**
+   * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd
+   */
+  TIME_QUOTA_CMD = 0x2c,
 
-    /**
-     * @NON_QOS_TX_COUNTER_CMD:
-     * command is &struct iwl_nonqos_seq_query_cmd
-     */
-    NON_QOS_TX_COUNTER_CMD = 0x2d,
+  /**
+   * @NON_QOS_TX_COUNTER_CMD:
+   * command is &struct iwl_nonqos_seq_query_cmd
+   */
+  NON_QOS_TX_COUNTER_CMD = 0x2d,
 
-    /**
-     * @FIPS_TEST_VECTOR_CMD: command is &struct iwl_fips_test_cmd
-     */
-    FIPS_TEST_VECTOR_CMD = 0x3b,
+  /**
+   * @FIPS_TEST_VECTOR_CMD: command is &struct iwl_fips_test_cmd
+   */
+  FIPS_TEST_VECTOR_CMD = 0x3b,
 
-    /**
-     * @LEDS_CMD: command is &struct iwl_led_cmd
-     */
-    LEDS_CMD = 0x48,
+  /**
+   * @LEDS_CMD: command is &struct iwl_led_cmd
+   */
+  LEDS_CMD = 0x48,
 
-    /**
-     * @LQ_CMD: using &struct iwl_lq_cmd
-     */
-    LQ_CMD = 0x4e,
+  /**
+   * @LQ_CMD: using &struct iwl_lq_cmd
+   */
+  LQ_CMD = 0x4e,
 
-    /**
-     * @FW_PAGING_BLOCK_CMD:
-     * &struct iwl_fw_paging_cmd
-     */
-    FW_PAGING_BLOCK_CMD = 0x4f,
+  /**
+   * @FW_PAGING_BLOCK_CMD:
+   * &struct iwl_fw_paging_cmd
+   */
+  FW_PAGING_BLOCK_CMD = 0x4f,
 
-    /**
-     * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac
-     */
-    SCAN_OFFLOAD_REQUEST_CMD = 0x51,
+  /**
+   * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac
+   */
+  SCAN_OFFLOAD_REQUEST_CMD = 0x51,
 
-    /**
-     * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents
-     */
-    SCAN_OFFLOAD_ABORT_CMD = 0x52,
+  /**
+   * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents
+   */
+  SCAN_OFFLOAD_ABORT_CMD = 0x52,
 
-    /**
-     * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req
-     */
-    HOT_SPOT_CMD = 0x53,
+  /**
+   * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req
+   */
+  HOT_SPOT_CMD = 0x53,
 
-    /**
-     * @SCAN_OFFLOAD_COMPLETE:
-     * notification, &struct iwl_periodic_scan_complete
-     */
-    SCAN_OFFLOAD_COMPLETE = 0x6D,
+  /**
+   * @SCAN_OFFLOAD_COMPLETE:
+   * notification, &struct iwl_periodic_scan_complete
+   */
+  SCAN_OFFLOAD_COMPLETE = 0x6D,
 
-    /**
-     * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD:
-     * update scan offload (scheduled scan) profiles/blacklist/etc.
-     */
-    SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
+  /**
+   * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD:
+   * update scan offload (scheduled scan) profiles/blacklist/etc.
+   */
+  SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
 
-    /**
-     * @MATCH_FOUND_NOTIFICATION: scan match found
-     */
-    MATCH_FOUND_NOTIFICATION = 0xd9,
+  /**
+   * @MATCH_FOUND_NOTIFICATION: scan match found
+   */
+  MATCH_FOUND_NOTIFICATION = 0xd9,
 
-    /**
-     * @SCAN_ITERATION_COMPLETE:
-     * uses &struct iwl_lmac_scan_complete_notif
-     */
-    SCAN_ITERATION_COMPLETE = 0xe7,
+  /**
+   * @SCAN_ITERATION_COMPLETE:
+   * uses &struct iwl_lmac_scan_complete_notif
+   */
+  SCAN_ITERATION_COMPLETE = 0xe7,
 
-    /* Phy */
-    /**
-     * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd
-     */
-    PHY_CONFIGURATION_CMD = 0x6a,
+  /* Phy */
+  /**
+   * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd
+   */
+  PHY_CONFIGURATION_CMD = 0x6a,
 
-    /**
-     * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db
-     */
-    CALIB_RES_NOTIF_PHY_DB = 0x6b,
+  /**
+   * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db
+   */
+  CALIB_RES_NOTIF_PHY_DB = 0x6b,
 
-    /**
-     * @PHY_DB_CMD: &struct iwl_phy_db_cmd
-     */
-    PHY_DB_CMD = 0x6c,
+  /**
+   * @PHY_DB_CMD: &struct iwl_phy_db_cmd
+   */
+  PHY_DB_CMD = 0x6c,
 
-    /**
-     * @TOF_CMD: &struct iwl_tof_config_cmd
-     */
-    TOF_CMD = 0x10,
+  /**
+   * @TOF_CMD: &struct iwl_tof_config_cmd
+   */
+  TOF_CMD = 0x10,
 
-    /**
-     * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd
-     */
-    TOF_NOTIFICATION = 0x11,
+  /**
+   * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd
+   */
+  TOF_NOTIFICATION = 0x11,
 
-    /**
-     * @CONFIG_2G_COEX_CMD: &struct iwl_config_2g_coex_cmd
-     */
-    CONFIG_2G_COEX_CMD = 0x71,
+  /**
+   * @CONFIG_2G_COEX_CMD: &struct iwl_config_2g_coex_cmd
+   */
+  CONFIG_2G_COEX_CMD = 0x71,
 
-    /**
-     * @POWER_TABLE_CMD: &struct iwl_device_power_cmd
-     */
-    POWER_TABLE_CMD = 0x77,
+  /**
+   * @POWER_TABLE_CMD: &struct iwl_device_power_cmd
+   */
+  POWER_TABLE_CMD = 0x77,
 
-    /**
-     * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION:
-     * &struct iwl_uapsd_misbehaving_ap_notif
-     */
-    PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
+  /**
+   * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION:
+   * &struct iwl_uapsd_misbehaving_ap_notif
+   */
+  PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
 
-    /**
-     * @LTR_CONFIG: &struct iwl_ltr_config_cmd
-     */
-    LTR_CONFIG = 0xee,
+  /**
+   * @LTR_CONFIG: &struct iwl_ltr_config_cmd
+   */
+  LTR_CONFIG = 0xee,
 
-    /**
-     * @REPLY_THERMAL_MNG_BACKOFF:
-     * Thermal throttling command
-     */
-    REPLY_THERMAL_MNG_BACKOFF = 0x7e,
+  /**
+   * @REPLY_THERMAL_MNG_BACKOFF:
+   * Thermal throttling command
+   */
+  REPLY_THERMAL_MNG_BACKOFF = 0x7e,
 
-    /**
-     * @DC2DC_CONFIG_CMD:
-     * Set/Get DC2DC frequency tune
-     * Command is &struct iwl_dc2dc_config_cmd,
-     * response is &struct iwl_dc2dc_config_resp
-     */
-    DC2DC_CONFIG_CMD = 0x83,
+  /**
+   * @DC2DC_CONFIG_CMD:
+   * Set/Get DC2DC frequency tune
+   * Command is &struct iwl_dc2dc_config_cmd,
+   * response is &struct iwl_dc2dc_config_resp
+   */
+  DC2DC_CONFIG_CMD = 0x83,
 
-    /**
-     * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
-     */
-    NVM_ACCESS_CMD = 0x88,
+  /**
+   * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
+   */
+  NVM_ACCESS_CMD = 0x88,
 
-    /**
-     * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif
-     */
-    BEACON_NOTIFICATION = 0x90,
+  /**
+   * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif
+   */
+  BEACON_NOTIFICATION = 0x90,
 
-    /**
-     * @BEACON_TEMPLATE_CMD:
-     *  Uses one of &struct iwl_mac_beacon_cmd_v6,
-     *  &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd
-     *  depending on the device version.
-     */
-    BEACON_TEMPLATE_CMD = 0x91,
-    /**
-     * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd
-     */
-    TX_ANT_CONFIGURATION_CMD = 0x98,
+  /**
+   * @BEACON_TEMPLATE_CMD:
+   *  Uses one of &struct iwl_mac_beacon_cmd_v6,
+   *  &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd
+   *  depending on the device version.
+   */
+  BEACON_TEMPLATE_CMD = 0x91,
+  /**
+   * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd
+   */
+  TX_ANT_CONFIGURATION_CMD = 0x98,
 
-    /**
-     * @STATISTICS_CMD:
-     * one of &struct iwl_statistics_cmd,
-     * &struct iwl_notif_statistics_v11,
-     * &struct iwl_notif_statistics_v10,
-     * &struct iwl_notif_statistics
-     */
-    STATISTICS_CMD = 0x9c,
+  /**
+   * @STATISTICS_CMD:
+   * one of &struct iwl_statistics_cmd,
+   * &struct iwl_notif_statistics_v11,
+   * &struct iwl_notif_statistics_v10,
+   * &struct iwl_notif_statistics
+   */
+  STATISTICS_CMD = 0x9c,
 
-    /**
-     * @STATISTICS_NOTIFICATION:
-     * one of &struct iwl_notif_statistics_v10,
-     * &struct iwl_notif_statistics_v11,
-     * &struct iwl_notif_statistics
-     */
-    STATISTICS_NOTIFICATION = 0x9d,
+  /**
+   * @STATISTICS_NOTIFICATION:
+   * one of &struct iwl_notif_statistics_v10,
+   * &struct iwl_notif_statistics_v11,
+   * &struct iwl_notif_statistics
+   */
+  STATISTICS_NOTIFICATION = 0x9d,
 
-    /**
-     * @EOSP_NOTIFICATION:
-     * Notify that a service period ended,
-     * &struct iwl_mvm_eosp_notification
-     */
-    EOSP_NOTIFICATION = 0x9e,
+  /**
+   * @EOSP_NOTIFICATION:
+   * Notify that a service period ended,
+   * &struct iwl_mvm_eosp_notification
+   */
+  EOSP_NOTIFICATION = 0x9e,
 
-    /**
-     * @REDUCE_TX_POWER_CMD:
-     * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd_v4
-     * or &struct iwl_dev_tx_power_cmd
-     */
-    REDUCE_TX_POWER_CMD = 0x9f,
+  /**
+   * @REDUCE_TX_POWER_CMD:
+   * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd_v4
+   * or &struct iwl_dev_tx_power_cmd
+   */
+  REDUCE_TX_POWER_CMD = 0x9f,
 
-    /**
-     * @CARD_STATE_NOTIFICATION:
-     * Card state (RF/CT kill) notification,
-     * uses &struct iwl_card_state_notif
-     */
-    CARD_STATE_NOTIFICATION = 0xa1,
+  /**
+   * @CARD_STATE_NOTIFICATION:
+   * Card state (RF/CT kill) notification,
+   * uses &struct iwl_card_state_notif
+   */
+  CARD_STATE_NOTIFICATION = 0xa1,
 
-    /**
-     * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif
-     */
-    MISSED_BEACONS_NOTIFICATION = 0xa2,
+  /**
+   * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif
+   */
+  MISSED_BEACONS_NOTIFICATION = 0xa2,
 
-    /**
-     * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd
-     */
-    MAC_PM_POWER_TABLE = 0xa9,
+  /**
+   * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd
+   */
+  MAC_PM_POWER_TABLE = 0xa9,
 
-    /**
-     * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif
-     */
-    MFUART_LOAD_NOTIFICATION = 0xb1,
+  /**
+   * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif
+   */
+  MFUART_LOAD_NOTIFICATION = 0xb1,
 
-    /**
-     * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd
-     */
-    RSS_CONFIG_CMD = 0xb3,
+  /**
+   * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd
+   */
+  RSS_CONFIG_CMD = 0xb3,
 
-    /**
-     * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info
-     */
-    REPLY_RX_PHY_CMD = 0xc0,
+  /**
+   * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info
+   */
+  REPLY_RX_PHY_CMD = 0xc0,
 
-    /**
-     * @REPLY_RX_MPDU_CMD:
-     * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc
-     */
-    REPLY_RX_MPDU_CMD = 0xc1,
+  /**
+   * @REPLY_RX_MPDU_CMD:
+   * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc
+   */
+  REPLY_RX_MPDU_CMD = 0xc1,
 
-    /**
-     * @FRAME_RELEASE:
-     * Frame release (reorder helper) notification, uses
-     * &struct iwl_frame_release
-     */
-    FRAME_RELEASE = 0xc3,
+  /**
+   * @FRAME_RELEASE:
+   * Frame release (reorder helper) notification, uses
+   * &struct iwl_frame_release
+   */
+  FRAME_RELEASE = 0xc3,
 
-    /**
-     * @BA_NOTIF:
-     * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif
-     * or &struct iwl_mvm_ba_notif depending on the HW
-     */
-    BA_NOTIF = 0xc5,
+  /**
+   * @BA_NOTIF:
+   * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif
+   * or &struct iwl_mvm_ba_notif depending on the HW
+   */
+  BA_NOTIF = 0xc5,
 
-    /* Location Aware Regulatory */
-    /**
-     * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd
-     */
-    MCC_UPDATE_CMD = 0xc8,
+  /* Location Aware Regulatory */
+  /**
+   * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd
+   */
+  MCC_UPDATE_CMD = 0xc8,
 
-    /**
-     * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif
-     */
-    MCC_CHUB_UPDATE_CMD = 0xc9,
+  /**
+   * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif
+   */
+  MCC_CHUB_UPDATE_CMD = 0xc9,
 
-    /**
-     * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker
-     * with &struct iwl_mvm_marker_rsp
-     */
-    MARKER_CMD = 0xcb,
+  /**
+   * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker
+   * with &struct iwl_mvm_marker_rsp
+   */
+  MARKER_CMD = 0xcb,
 
-    /**
-     * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif
-     */
-    BT_PROFILE_NOTIFICATION = 0xce,
+  /**
+   * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif
+   */
+  BT_PROFILE_NOTIFICATION = 0xce,
 
-    /**
-     * @BT_CONFIG: &struct iwl_bt_coex_cmd
-     */
-    BT_CONFIG = 0x9b,
+  /**
+   * @BT_CONFIG: &struct iwl_bt_coex_cmd
+   */
+  BT_CONFIG = 0x9b,
 
-    /**
-     * @BT_COEX_UPDATE_REDUCED_TXP:
-     * &struct iwl_bt_coex_reduced_txp_update_cmd
-     */
-    BT_COEX_UPDATE_REDUCED_TXP = 0x5c,
+  /**
+   * @BT_COEX_UPDATE_REDUCED_TXP:
+   * &struct iwl_bt_coex_reduced_txp_update_cmd
+   */
+  BT_COEX_UPDATE_REDUCED_TXP = 0x5c,
 
-    /**
-     * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd
-     */
-    BT_COEX_CI = 0x5d,
+  /**
+   * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd
+   */
+  BT_COEX_CI = 0x5d,
 
 #ifdef CPTCFG_IWLWIFI_LTE_COEX
-    /**
-     * @LTE_COEX_CONFIG_CMD: &struct iwl_lte_coex_config_cmd
-     */
-    LTE_COEX_CONFIG_CMD = 0x4a,
+  /**
+   * @LTE_COEX_CONFIG_CMD: &struct iwl_lte_coex_config_cmd
+   */
+  LTE_COEX_CONFIG_CMD = 0x4a,
 
-    /**
-     * @LTE_COEX_WIFI_REPORTED_CHANNEL_CMD:
-     * &struct iwl_lte_coex_wifi_reported_channel_cmd
-     */
-    LTE_COEX_WIFI_REPORTED_CHANNEL_CMD = 0X4b,
+  /**
+   * @LTE_COEX_WIFI_REPORTED_CHANNEL_CMD:
+   * &struct iwl_lte_coex_wifi_reported_channel_cmd
+   */
+  LTE_COEX_WIFI_REPORTED_CHANNEL_CMD = 0X4b,
 
-    /**
-     * @LTE_COEX_STATIC_PARAMS_CMD: &struct iwl_lte_coex_static_params_cmd
-     */
-    LTE_COEX_STATIC_PARAMS_CMD = 0x4c,
+  /**
+   * @LTE_COEX_STATIC_PARAMS_CMD: &struct iwl_lte_coex_static_params_cmd
+   */
+  LTE_COEX_STATIC_PARAMS_CMD = 0x4c,
 
-    /**
-     * @LTE_COEX_SPS_CMD: struct iwl_lte_coex_sps_cmd
-     */
-    LTE_COEX_SPS_CMD = 0x4d,
+  /**
+   * @LTE_COEX_SPS_CMD: struct iwl_lte_coex_sps_cmd
+   */
+  LTE_COEX_SPS_CMD = 0x4d,
 
-    /**
-     * @LTE_COEX_FINE_TUNING_PARAMS_CMD:
-     * &struct iwl_lte_coex_fine_tuning_params_cmd
-     */
-    LTE_COEX_FINE_TUNING_PARAMS_CMD = 0x57,
+  /**
+   * @LTE_COEX_FINE_TUNING_PARAMS_CMD:
+   * &struct iwl_lte_coex_fine_tuning_params_cmd
+   */
+  LTE_COEX_FINE_TUNING_PARAMS_CMD = 0x57,
 #endif
 
-    /**
-     * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd
-     */
-    REPLY_SF_CFG_CMD = 0xd1,
-    /**
-     * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd
-     */
-    REPLY_BEACON_FILTERING_CMD = 0xd2,
+  /**
+   * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd
+   */
+  REPLY_SF_CFG_CMD = 0xd1,
+  /**
+   * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd
+   */
+  REPLY_BEACON_FILTERING_CMD = 0xd2,
 
-    /**
-     * @DTS_MEASUREMENT_NOTIFICATION:
-     * &struct iwl_dts_measurement_notif_v1 or
-     * &struct iwl_dts_measurement_notif_v2
-     */
-    DTS_MEASUREMENT_NOTIFICATION = 0xdd,
+  /**
+   * @DTS_MEASUREMENT_NOTIFICATION:
+   * &struct iwl_dts_measurement_notif_v1 or
+   * &struct iwl_dts_measurement_notif_v2
+   */
+  DTS_MEASUREMENT_NOTIFICATION = 0xdd,
 
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
-    /**
-     * @DEBUG_HOST_COMMAND: &struct iwl_dhc_cmd
-     */
-    DEBUG_HOST_COMMAND = 0xf1,
+  /**
+   * @DEBUG_HOST_COMMAND: &struct iwl_dhc_cmd
+   */
+  DEBUG_HOST_COMMAND = 0xf1,
 #endif
 
-    /**
-     * @LDBG_CONFIG_CMD: configure continuous trace recording
-     */
-    LDBG_CONFIG_CMD = 0xf6,
+  /**
+   * @LDBG_CONFIG_CMD: configure continuous trace recording
+   */
+  LDBG_CONFIG_CMD = 0xf6,
 
-    /**
-     * @DEBUG_LOG_MSG: Debugging log data from firmware
-     */
-    DEBUG_LOG_MSG = 0xf7,
+  /**
+   * @DEBUG_LOG_MSG: Debugging log data from firmware
+   */
+  DEBUG_LOG_MSG = 0xf7,
 
-    /**
-     * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
-     */
-    BCAST_FILTER_CMD = 0xcf,
+  /**
+   * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
+   */
+  BCAST_FILTER_CMD = 0xcf,
 
-    /**
-     * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
-     */
-    MCAST_FILTER_CMD = 0xd0,
+  /**
+   * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
+   */
+  MCAST_FILTER_CMD = 0xd0,
 
-    /**
-     * @D3_CONFIG_CMD: &struct iwl_d3_manager_config
-     */
-    D3_CONFIG_CMD = 0xd3,
+  /**
+   * @D3_CONFIG_CMD: &struct iwl_d3_manager_config
+   */
+  D3_CONFIG_CMD = 0xd3,
 
-    /**
-     * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of
-     * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2,
-     * &struct iwl_proto_offload_cmd_v3_small,
-     * &struct iwl_proto_offload_cmd_v3_large
-     */
-    PROT_OFFLOAD_CONFIG_CMD = 0xd4,
+  /**
+   * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of
+   * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2,
+   * &struct iwl_proto_offload_cmd_v3_small,
+   * &struct iwl_proto_offload_cmd_v3_large
+   */
+  PROT_OFFLOAD_CONFIG_CMD = 0xd4,
 
-    /**
-     * @OFFLOADS_QUERY_CMD:
-     * No data in command, response in &struct iwl_wowlan_status
-     */
-    OFFLOADS_QUERY_CMD = 0xd5,
+  /**
+   * @OFFLOADS_QUERY_CMD:
+   * No data in command, response in &struct iwl_wowlan_status
+   */
+  OFFLOADS_QUERY_CMD = 0xd5,
 
-    /**
-     * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config
-     */
-    REMOTE_WAKE_CONFIG_CMD = 0xd6,
+  /**
+   * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config
+   */
+  REMOTE_WAKE_CONFIG_CMD = 0xd6,
 
-    /**
-     * @D0I3_END_CMD: End D0i3/D3 state, no command data
-     */
-    D0I3_END_CMD = 0xed,
+  /**
+   * @D0I3_END_CMD: End D0i3/D3 state, no command data
+   */
+  D0I3_END_CMD = 0xed,
 
-    /**
-     * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd
-     */
-    WOWLAN_PATTERNS = 0xe0,
+  /**
+   * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd
+   */
+  WOWLAN_PATTERNS = 0xe0,
 
-    /**
-     * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd
-     */
-    WOWLAN_CONFIGURATION = 0xe1,
+  /**
+   * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd
+   */
+  WOWLAN_CONFIGURATION = 0xe1,
 
-    /**
-     * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd
-     */
-    WOWLAN_TSC_RSC_PARAM = 0xe2,
+  /**
+   * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd
+   */
+  WOWLAN_TSC_RSC_PARAM = 0xe2,
 
-    /**
-     * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd
-     */
-    WOWLAN_TKIP_PARAM = 0xe3,
+  /**
+   * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd
+   */
+  WOWLAN_TKIP_PARAM = 0xe3,
 
-    /**
-     * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd
-     */
-    WOWLAN_KEK_KCK_MATERIAL = 0xe4,
+  /**
+   * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd
+   */
+  WOWLAN_KEK_KCK_MATERIAL = 0xe4,
 
-    /**
-     * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status
-     */
-    WOWLAN_GET_STATUSES = 0xe5,
+  /**
+   * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status
+   */
+  WOWLAN_GET_STATUSES = 0xe5,
 
-    /**
-     * @SCAN_OFFLOAD_PROFILES_QUERY_CMD:
-     * No command data, response is &struct iwl_scan_offload_profiles_query
-     */
-    SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
+  /**
+   * @SCAN_OFFLOAD_PROFILES_QUERY_CMD:
+   * No command data, response is &struct iwl_scan_offload_profiles_query
+   */
+  SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
 };
 
 /**
  * enum iwl_system_subcmd_ids - system group command IDs
  */
 enum iwl_system_subcmd_ids {
-    /**
-     * @SHARED_MEM_CFG_CMD:
-     * response in &struct iwl_shared_mem_cfg or
-     * &struct iwl_shared_mem_cfg_v2
-     */
-    SHARED_MEM_CFG_CMD = 0x0,
+  /**
+   * @SHARED_MEM_CFG_CMD:
+   * response in &struct iwl_shared_mem_cfg or
+   * &struct iwl_shared_mem_cfg_v2
+   */
+  SHARED_MEM_CFG_CMD = 0x0,
 
-    /**
-     * @SOC_CONFIGURATION_CMD: &struct iwl_soc_configuration_cmd
-     */
-    SOC_CONFIGURATION_CMD = 0x01,
+  /**
+   * @SOC_CONFIGURATION_CMD: &struct iwl_soc_configuration_cmd
+   */
+  SOC_CONFIGURATION_CMD = 0x01,
 
-    /**
-     * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
-     */
-    INIT_EXTENDED_CFG_CMD = 0x03,
+  /**
+   * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
+   */
+  INIT_EXTENDED_CFG_CMD = 0x03,
 };
 
 /**
  * enum iwl_xvt_subcmd_ids - XVT group command IDs
  */
 enum iwl_xvt_subcmd_ids {
-    /**
-     * @IQ_CALIB_CONFIG_NOTIF : Notification about IQ calibration finished
-     * Handled by user space component
-     */
-    IQ_CALIB_CONFIG_NOTIF = 0xFF,
+  /**
+   * @IQ_CALIB_CONFIG_NOTIF : Notification about IQ calibration finished
+   * Handled by user space component
+   */
+  IQ_CALIB_CONFIG_NOTIF = 0xFF,
 };
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_COMMANDS_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/config.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/config.h
index 20747c7..be72bb6 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/config.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/config.h
@@ -42,7 +42,7 @@
  * @cmd_queue: the TXQ number of the command queue
  */
 struct iwl_dqa_enable_cmd {
-    __le32 cmd_queue;
+  __le32 cmd_queue;
 } __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */
 
 /*
@@ -50,7 +50,7 @@
  * @valid: valid antenna configuration
  */
 struct iwl_tx_ant_cfg_cmd {
-    __le32 valid;
+  __le32 valid;
 } __packed;
 
 /**
@@ -62,33 +62,33 @@
  *      event triggers, using &enum iwl_calib_cfg
  */
 struct iwl_calib_ctrl {
-    __le32 flow_trigger;
-    __le32 event_trigger;
+  __le32 flow_trigger;
+  __le32 event_trigger;
 } __packed;
 
 /* This enum defines the bitmap of various calibrations to enable in both
  * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
  */
 enum iwl_calib_cfg {
-    IWL_CALIB_CFG_XTAL_IDX = BIT(0),
-    IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1),
-    IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2),
-    IWL_CALIB_CFG_PAPD_IDX = BIT(3),
-    IWL_CALIB_CFG_TX_PWR_IDX = BIT(4),
-    IWL_CALIB_CFG_DC_IDX = BIT(5),
-    IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6),
-    IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7),
-    IWL_CALIB_CFG_TX_IQ_IDX = BIT(8),
-    IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9),
-    IWL_CALIB_CFG_RX_IQ_IDX = BIT(10),
-    IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11),
-    IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12),
-    IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13),
-    IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14),
-    IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15),
-    IWL_CALIB_CFG_DAC_IDX = BIT(16),
-    IWL_CALIB_CFG_ABS_IDX = BIT(17),
-    IWL_CALIB_CFG_AGC_IDX = BIT(18),
+  IWL_CALIB_CFG_XTAL_IDX = BIT(0),
+  IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1),
+  IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2),
+  IWL_CALIB_CFG_PAPD_IDX = BIT(3),
+  IWL_CALIB_CFG_TX_PWR_IDX = BIT(4),
+  IWL_CALIB_CFG_DC_IDX = BIT(5),
+  IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6),
+  IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7),
+  IWL_CALIB_CFG_TX_IQ_IDX = BIT(8),
+  IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9),
+  IWL_CALIB_CFG_RX_IQ_IDX = BIT(10),
+  IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11),
+  IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12),
+  IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13),
+  IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14),
+  IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15),
+  IWL_CALIB_CFG_DAC_IDX = BIT(16),
+  IWL_CALIB_CFG_ABS_IDX = BIT(17),
+  IWL_CALIB_CFG_AGC_IDX = BIT(18),
 };
 
 /**
@@ -97,8 +97,8 @@
  * @calib_control: calibration control data
  */
 struct iwl_phy_cfg_cmd {
-    __le32 phy_cfg;
-    struct iwl_calib_ctrl calib_control;
+  __le32 phy_cfg;
+  struct iwl_calib_ctrl calib_control;
 } __packed;
 
 /*
@@ -107,8 +107,8 @@
  * Ids of dc2dc configuration flags
  */
 enum iwl_dc2dc_config_id {
-    DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */
-    DCDC_FREQ_TUNE_SET = 0x2,
+  DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */
+  DCDC_FREQ_TUNE_SET = 0x2,
 }; /* MARKER_ID_API_E_VER_1 */
 
 /**
@@ -125,10 +125,10 @@
  * @dc2dc_freq_tune1: frequency divider - analog domain
  */
 struct iwl_dc2dc_config_cmd {
-    __le32 flags;
-    __le32 enable_low_power_mode; /* not used */
-    __le32 dc2dc_freq_tune0;
-    __le32 dc2dc_freq_tune1;
+  __le32 flags;
+  __le32 enable_low_power_mode; /* not used */
+  __le32 dc2dc_freq_tune0;
+  __le32 dc2dc_freq_tune1;
 } __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
 
 /**
@@ -140,8 +140,8 @@
  * @dc2dc_freq_tune1: frequency divider - analog domain
  */
 struct iwl_dc2dc_config_resp {
-    __le32 dc2dc_freq_tune0;
-    __le32 dc2dc_freq_tune1;
+  __le32 dc2dc_freq_tune0;
+  __le32 dc2dc_freq_tune1;
 } __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_CONFIG_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/context.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/context.h
index 35681bc..a737a8b 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/context.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/context.h
@@ -45,23 +45,23 @@
  * @FW_CTXT_INVALID: value used to indicate unused/invalid
  */
 enum iwl_ctxt_id_and_color {
-    FW_CTXT_ID_POS = 0,
-    FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS,
-    FW_CTXT_COLOR_POS = 8,
-    FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS,
-    FW_CTXT_INVALID = 0xffffffff,
+  FW_CTXT_ID_POS = 0,
+  FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS,
+  FW_CTXT_COLOR_POS = 8,
+  FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS,
+  FW_CTXT_INVALID = 0xffffffff,
 };
 
 #define FW_CMD_ID_AND_COLOR(_id, _color) \
-    (((_id) << FW_CTXT_ID_POS) | ((_color) << FW_CTXT_COLOR_POS))
+  (((_id) << FW_CTXT_ID_POS) | ((_color) << FW_CTXT_COLOR_POS))
 
 /* Possible actions on PHYs, MACs and Bindings */
 enum iwl_ctxt_action {
-    FW_CTXT_ACTION_STUB = 0,
-    FW_CTXT_ACTION_ADD,
-    FW_CTXT_ACTION_MODIFY,
-    FW_CTXT_ACTION_REMOVE,
-    FW_CTXT_ACTION_NUM
+  FW_CTXT_ACTION_STUB = 0,
+  FW_CTXT_ACTION_ADD,
+  FW_CTXT_ACTION_MODIFY,
+  FW_CTXT_ACTION_REMOVE,
+  FW_CTXT_ACTION_NUM
 }; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_CONTEXT_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/d3.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/d3.h
index c24534c..922d8cf 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/d3.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/d3.h
@@ -43,7 +43,7 @@
  * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
  */
 enum iwl_d3_wakeup_flags {
-    IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
+  IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
 }; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */
 
 /**
@@ -55,9 +55,9 @@
  * The structure is used for the D3_CONFIG_CMD command.
  */
 struct iwl_d3_manager_config {
-    __le32 min_sleep_time;
-    __le32 wakeup_flags;
-    __le32 wakeup_host_timer;
+  __le32 min_sleep_time;
+  __le32 wakeup_flags;
+  __le32 wakeup_host_timer;
 } __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
 
 /* TODO: OFFLOADS_QUERY_API_S_VER_1 */
@@ -70,10 +70,10 @@
  * @IWL_D3_PROTO_IPV6_VALID: IPv6 data is valid
  */
 enum iwl_proto_offloads {
-    IWL_D3_PROTO_OFFLOAD_ARP = BIT(0),
-    IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
-    IWL_D3_PROTO_IPV4_VALID = BIT(2),
-    IWL_D3_PROTO_IPV6_VALID = BIT(3),
+  IWL_D3_PROTO_OFFLOAD_ARP = BIT(0),
+  IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
+  IWL_D3_PROTO_IPV4_VALID = BIT(2),
+  IWL_D3_PROTO_IPV6_VALID = BIT(3),
 };
 
 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
@@ -94,11 +94,11 @@
  * @reserved: unused
  */
 struct iwl_proto_offload_cmd_common {
-    __le32 enabled;
-    __be32 remote_ipv4_addr;
-    __be32 host_ipv4_addr;
-    uint8_t arp_mac_addr[ETH_ALEN];
-    __le16 reserved;
+  __le32 enabled;
+  __be32 remote_ipv4_addr;
+  __be32 host_ipv4_addr;
+  uint8_t arp_mac_addr[ETH_ALEN];
+  __le16 reserved;
 } __packed;
 
 /**
@@ -112,12 +112,12 @@
  * @reserved2: reserved
  */
 struct iwl_proto_offload_cmd_v1 {
-    struct iwl_proto_offload_cmd_common common;
-    uint8_t remote_ipv6_addr[16];
-    uint8_t solicited_node_ipv6_addr[16];
-    uint8_t target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
-    uint8_t ndp_mac_addr[ETH_ALEN];
-    __le16 reserved2;
+  struct iwl_proto_offload_cmd_common common;
+  uint8_t remote_ipv6_addr[16];
+  uint8_t solicited_node_ipv6_addr[16];
+  uint8_t target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
+  uint8_t ndp_mac_addr[ETH_ALEN];
+  __le16 reserved2;
 } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
 
 /**
@@ -132,13 +132,13 @@
  * @reserved2: reserved
  */
 struct iwl_proto_offload_cmd_v2 {
-    struct iwl_proto_offload_cmd_common common;
-    uint8_t remote_ipv6_addr[16];
-    uint8_t solicited_node_ipv6_addr[16];
-    uint8_t target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
-    uint8_t ndp_mac_addr[ETH_ALEN];
-    uint8_t num_valid_ipv6_addrs;
-    uint8_t reserved2[3];
+  struct iwl_proto_offload_cmd_common common;
+  uint8_t remote_ipv6_addr[16];
+  uint8_t solicited_node_ipv6_addr[16];
+  uint8_t target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
+  uint8_t ndp_mac_addr[ETH_ALEN];
+  uint8_t num_valid_ipv6_addrs;
+  uint8_t reserved2[3];
 } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
 
 #if 0   // NEEDS_PORTING
@@ -190,11 +190,11 @@
 #define IWL_WOWLAN_MAX_PATTERN_LEN 128
 
 struct iwl_wowlan_pattern {
-    uint8_t mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
-    uint8_t pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
-    uint8_t mask_size;
-    uint8_t pattern_size;
-    __le16 reserved;
+  uint8_t mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+  uint8_t pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
+  uint8_t mask_size;
+  uint8_t pattern_size;
+  __le16 reserved;
 } __packed; /* WOWLAN_PATTERN_API_S_VER_1 */
 
 #define IWL_WOWLAN_MAX_PATTERNS 20
@@ -203,43 +203,43 @@
  * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
  */
 struct iwl_wowlan_patterns_cmd {
-    /**
-     * @n_patterns: number of patterns
-     */
-    __le32 n_patterns;
+  /**
+   * @n_patterns: number of patterns
+   */
+  __le32 n_patterns;
 
-    /**
-     * @patterns: the patterns, array length in @n_patterns
-     */
-    struct iwl_wowlan_pattern patterns[];
+  /**
+   * @patterns: the patterns, array length in @n_patterns
+   */
+  struct iwl_wowlan_pattern patterns[];
 } __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
 
 enum iwl_wowlan_wakeup_filters {
-    IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
-    IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
-    IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
-    IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
-    IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
-    IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
-    IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
-    IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7),
-    IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
-    IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
-    IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
-    IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
-    IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
-    IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
-    IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
-    IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
-    IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
+  IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
+  IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
+  IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
+  IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
+  IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
+  IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
+  IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
+  IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7),
+  IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
+  IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
+  IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
+  IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
+  IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
+  IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
+  IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
+  IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
+  IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
 }; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
 
 enum iwl_wowlan_flags {
-    IS_11W_ASSOC = BIT(0),
-    ENABLE_L3_FILTERING = BIT(1),
-    ENABLE_NBNS_FILTERING = BIT(2),
-    ENABLE_DHCP_FILTERING = BIT(3),
-    ENABLE_STORE_BEACON = BIT(4),
+  IS_11W_ASSOC = BIT(0),
+  ENABLE_L3_FILTERING = BIT(1),
+  ENABLE_NBNS_FILTERING = BIT(2),
+  ENABLE_DHCP_FILTERING = BIT(3),
+  ENABLE_STORE_BEACON = BIT(4),
 };
 
 /**
@@ -254,14 +254,14 @@
  * @reserved: reserved
  */
 struct iwl_wowlan_config_cmd {
-    __le32 wakeup_filter;
-    __le16 non_qos_seq;
-    __le16 qos_seq[8];
-    uint8_t wowlan_ba_teardown_tids;
-    uint8_t is_11n_connection;
-    uint8_t offloading_tid;
-    uint8_t flags;
-    uint8_t reserved[2];
+  __le32 wakeup_filter;
+  __le16 non_qos_seq;
+  __le16 qos_seq[8];
+  uint8_t wowlan_ba_teardown_tids;
+  uint8_t is_11n_connection;
+  uint8_t offloading_tid;
+  uint8_t flags;
+  uint8_t reserved[2];
 } __packed; /* WOWLAN_CONFIG_API_S_VER_4 */
 
 /*
@@ -270,103 +270,103 @@
 #define IWL_NUM_RSC 16
 
 struct tkip_sc {
-    __le16 iv16;
-    __le16 pad;
-    __le32 iv32;
+  __le16 iv16;
+  __le16 pad;
+  __le32 iv32;
 } __packed; /* TKIP_SC_API_U_VER_1 */
 
 struct iwl_tkip_rsc_tsc {
-    struct tkip_sc unicast_rsc[IWL_NUM_RSC];
-    struct tkip_sc multicast_rsc[IWL_NUM_RSC];
-    struct tkip_sc tsc;
+  struct tkip_sc unicast_rsc[IWL_NUM_RSC];
+  struct tkip_sc multicast_rsc[IWL_NUM_RSC];
+  struct tkip_sc tsc;
 } __packed; /* TKIP_TSC_RSC_API_S_VER_1 */
 
 struct aes_sc {
-    __le64 pn;
+  __le64 pn;
 } __packed; /* TKIP_AES_SC_API_U_VER_1 */
 
 struct iwl_aes_rsc_tsc {
-    struct aes_sc unicast_rsc[IWL_NUM_RSC];
-    struct aes_sc multicast_rsc[IWL_NUM_RSC];
-    struct aes_sc tsc;
+  struct aes_sc unicast_rsc[IWL_NUM_RSC];
+  struct aes_sc multicast_rsc[IWL_NUM_RSC];
+  struct aes_sc tsc;
 } __packed; /* AES_TSC_RSC_API_S_VER_1 */
 
 union iwl_all_tsc_rsc {
-    struct iwl_tkip_rsc_tsc tkip;
-    struct iwl_aes_rsc_tsc aes;
+  struct iwl_tkip_rsc_tsc tkip;
+  struct iwl_aes_rsc_tsc aes;
 }; /* ALL_TSC_RSC_API_S_VER_2 */
 
 struct iwl_wowlan_rsc_tsc_params_cmd {
-    union iwl_all_tsc_rsc all_tsc_rsc;
+  union iwl_all_tsc_rsc all_tsc_rsc;
 } __packed; /* ALL_TSC_RSC_API_S_VER_2 */
 
 #define IWL_MIC_KEY_SIZE 8
 struct iwl_mic_keys {
-    uint8_t tx[IWL_MIC_KEY_SIZE];
-    uint8_t rx_unicast[IWL_MIC_KEY_SIZE];
-    uint8_t rx_mcast[IWL_MIC_KEY_SIZE];
+  uint8_t tx[IWL_MIC_KEY_SIZE];
+  uint8_t rx_unicast[IWL_MIC_KEY_SIZE];
+  uint8_t rx_mcast[IWL_MIC_KEY_SIZE];
 } __packed; /* MIC_KEYS_API_S_VER_1 */
 
 #define IWL_P1K_SIZE 5
 struct iwl_p1k_cache {
-    __le16 p1k[IWL_P1K_SIZE];
+  __le16 p1k[IWL_P1K_SIZE];
 } __packed;
 
 #define IWL_NUM_RX_P1K_CACHE 2
 
 struct iwl_wowlan_tkip_params_cmd {
-    struct iwl_mic_keys mic_keys;
-    struct iwl_p1k_cache tx;
-    struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
-    struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
+  struct iwl_mic_keys mic_keys;
+  struct iwl_p1k_cache tx;
+  struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
+  struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
 } __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */
 
 #define IWL_KCK_MAX_SIZE 32
 #define IWL_KEK_MAX_SIZE 32
 
 struct iwl_wowlan_kek_kck_material_cmd {
-    uint8_t kck[IWL_KCK_MAX_SIZE];
-    uint8_t kek[IWL_KEK_MAX_SIZE];
-    __le16 kck_len;
-    __le16 kek_len;
-    __le64 replay_ctr;
+  uint8_t kck[IWL_KCK_MAX_SIZE];
+  uint8_t kek[IWL_KEK_MAX_SIZE];
+  __le16 kck_len;
+  __le16 kek_len;
+  __le64 replay_ctr;
 } __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */
 
 #define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
 
 enum iwl_wowlan_rekey_status {
-    IWL_WOWLAN_REKEY_POST_REKEY = 0,
-    IWL_WOWLAN_REKEY_WHILE_REKEY = 1,
+  IWL_WOWLAN_REKEY_POST_REKEY = 0,
+  IWL_WOWLAN_REKEY_WHILE_REKEY = 1,
 }; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */
 
 enum iwl_wowlan_wakeup_reason {
-    IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0,
-    IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0),
-    IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1),
-    IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2),
-    IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3),
-    IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4),
-    IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5),
-    IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6),
-    IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7),
-    IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
-    IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
-    IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
-    IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11),
-    IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
-    IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13),
-    IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14),
-    IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15),
-    IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16),
+  IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0,
+  IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0),
+  IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1),
+  IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2),
+  IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3),
+  IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4),
+  IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5),
+  IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6),
+  IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7),
+  IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
+  IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
+  IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
+  IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11),
+  IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
+  IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13),
+  IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14),
+  IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15),
+  IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16),
 
 }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
 
 struct iwl_wowlan_gtk_status_v1 {
-    uint8_t key_index;
-    uint8_t reserved[3];
-    uint8_t decrypt_key[16];
-    uint8_t tkip_mic_key[8];
-    struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+  uint8_t key_index;
+  uint8_t reserved[3];
+  uint8_t decrypt_key[16];
+  uint8_t tkip_mic_key[8];
+  struct iwl_wowlan_rsc_tsc_params_cmd rsc;
 } __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
 
 #define WOWLAN_KEY_MAX_SIZE 32
@@ -386,12 +386,12 @@
  * @rsc: TSC RSC counters
  */
 struct iwl_wowlan_gtk_status {
-    uint8_t key[WOWLAN_KEY_MAX_SIZE];
-    uint8_t key_len;
-    uint8_t key_flags;
-    uint8_t reserved[2];
-    uint8_t tkip_mic_key[8];
-    struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+  uint8_t key[WOWLAN_KEY_MAX_SIZE];
+  uint8_t key_len;
+  uint8_t key_flags;
+  uint8_t reserved[2];
+  uint8_t tkip_mic_key[8];
+  struct iwl_wowlan_rsc_tsc_params_cmd rsc;
 } __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */
 
 #define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1))
@@ -407,10 +407,10 @@
  *  bit[6]:     Set iff this is the currently used IGTK
  */
 struct iwl_wowlan_igtk_status {
-    uint8_t key[WOWLAN_KEY_MAX_SIZE];
-    uint8_t ipn[6];
-    uint8_t key_len;
-    uint8_t key_flags;
+  uint8_t key[WOWLAN_KEY_MAX_SIZE];
+  uint8_t ipn[6];
+  uint8_t key_len;
+  uint8_t key_flags;
 } __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */
 
 /**
@@ -429,19 +429,19 @@
  * @wake_packet: wakeup packet
  */
 struct iwl_wowlan_status_v6 {
-    struct iwl_wowlan_gtk_status_v1 gtk;
-    __le64 replay_ctr;
-    __le16 pattern_number;
-    __le16 non_qos_seq_ctr;
-    __le16 qos_seq_ctr[8];
-    __le32 wakeup_reasons;
-    __le32 num_of_gtk_rekeys;
-    __le32 transmitted_ndps;
-    __le32 received_beacons;
-    __le32 wake_packet_length;
-    __le32 wake_packet_bufsize;
-    uint8_t wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed;                /* WOWLAN_STATUSES_API_S_VER_6 */
+  struct iwl_wowlan_gtk_status_v1 gtk;
+  __le64 replay_ctr;
+  __le16 pattern_number;
+  __le16 non_qos_seq_ctr;
+  __le16 qos_seq_ctr[8];
+  __le32 wakeup_reasons;
+  __le32 num_of_gtk_rekeys;
+  __le32 transmitted_ndps;
+  __le32 received_beacons;
+  __le32 wake_packet_length;
+  __le32 wake_packet_bufsize;
+  uint8_t wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed;              /* WOWLAN_STATUSES_API_S_VER_6 */
 
 /**
  * struct iwl_wowlan_status - WoWLAN status
@@ -460,23 +460,23 @@
  * @wake_packet: wakeup packet
  */
 struct iwl_wowlan_status {
-    struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
-    struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
-    __le64 replay_ctr;
-    __le16 pattern_number;
-    __le16 non_qos_seq_ctr;
-    __le16 qos_seq_ctr[8];
-    __le32 wakeup_reasons;
-    __le32 num_of_gtk_rekeys;
-    __le32 transmitted_ndps;
-    __le32 received_beacons;
-    __le32 wake_packet_length;
-    __le32 wake_packet_bufsize;
-    uint8_t wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed;                /* WOWLAN_STATUSES_API_S_VER_7 */
+  struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
+  struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+  __le64 replay_ctr;
+  __le16 pattern_number;
+  __le16 non_qos_seq_ctr;
+  __le16 qos_seq_ctr[8];
+  __le32 wakeup_reasons;
+  __le32 num_of_gtk_rekeys;
+  __le32 transmitted_ndps;
+  __le32 received_beacons;
+  __le32 wake_packet_length;
+  __le32 wake_packet_bufsize;
+  uint8_t wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed;              /* WOWLAN_STATUSES_API_S_VER_7 */
 
 static inline uint8_t iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status* gtk) {
-    return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
+  return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
 }
 
 #define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
@@ -484,48 +484,48 @@
 #define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
 
 struct iwl_tcp_packet_info {
-    __le16 tcp_pseudo_header_checksum;
-    __le16 tcp_payload_length;
+  __le16 tcp_pseudo_header_checksum;
+  __le16 tcp_payload_length;
 } __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
 
 struct iwl_tcp_packet {
-    struct iwl_tcp_packet_info info;
-    uint8_t rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
-    uint8_t data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
+  struct iwl_tcp_packet_info info;
+  uint8_t rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+  uint8_t data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
 } __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
 
 struct iwl_remote_wake_packet {
-    struct iwl_tcp_packet_info info;
-    uint8_t rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
-    uint8_t data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
+  struct iwl_tcp_packet_info info;
+  uint8_t rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+  uint8_t data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
 } __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
 
 struct iwl_wowlan_remote_wake_config {
-    __le32 connection_max_time; /* unused */
-    /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
-    uint8_t max_syn_retries;
-    uint8_t max_data_retries;
-    uint8_t tcp_syn_ack_timeout;
-    uint8_t tcp_ack_timeout;
+  __le32 connection_max_time; /* unused */
+  /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
+  uint8_t max_syn_retries;
+  uint8_t max_data_retries;
+  uint8_t tcp_syn_ack_timeout;
+  uint8_t tcp_ack_timeout;
 
-    struct iwl_tcp_packet syn_tx;
-    struct iwl_tcp_packet synack_rx;
-    struct iwl_tcp_packet keepalive_ack_rx;
-    struct iwl_tcp_packet fin_tx;
+  struct iwl_tcp_packet syn_tx;
+  struct iwl_tcp_packet synack_rx;
+  struct iwl_tcp_packet keepalive_ack_rx;
+  struct iwl_tcp_packet fin_tx;
 
-    struct iwl_remote_wake_packet keepalive_tx;
-    struct iwl_remote_wake_packet wake_rx;
+  struct iwl_remote_wake_packet keepalive_tx;
+  struct iwl_remote_wake_packet wake_rx;
 
-    /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
-    uint8_t sequence_number_offset;
-    uint8_t sequence_number_length;
-    uint8_t token_offset;
-    uint8_t token_length;
-    /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
-    __le32 initial_sequence_number;
-    __le16 keepalive_interval;
-    __le16 num_tokens;
-    uint8_t tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
+  /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
+  uint8_t sequence_number_offset;
+  uint8_t sequence_number_length;
+  uint8_t token_offset;
+  uint8_t token_length;
+  /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
+  __le32 initial_sequence_number;
+  __le16 keepalive_interval;
+  __le16 num_tokens;
+  uint8_t tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
 } __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
 
 /* TODO: NetDetect API */
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/datapath.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/datapath.h
index 4500d6d..b43e6af 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/datapath.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/datapath.h
@@ -41,88 +41,88 @@
  * enum iwl_data_path_subcmd_ids - data path group commands
  */
 enum iwl_data_path_subcmd_ids {
-    /**
-     * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd
-     */
-    DQA_ENABLE_CMD = 0x0,
+  /**
+   * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd
+   */
+  DQA_ENABLE_CMD = 0x0,
 
-    /**
-     * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd
-     */
-    UPDATE_MU_GROUPS_CMD = 0x1,
+  /**
+   * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd
+   */
+  UPDATE_MU_GROUPS_CMD = 0x1,
 
-    /**
-     * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd
-     */
-    TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
+  /**
+   * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd
+   */
+  TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
 
-    /**
-     * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
-     */
-    STA_HE_CTXT_CMD = 0x7,
+  /**
+   * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
+   */
+  STA_HE_CTXT_CMD = 0x7,
 
 #ifdef CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE
-    /**
-     * @AX_SOFTAP_TESTMODE_DL_BASIC: &struct ax_softap_testmode_dl_basic_cmd
-     */
-    AX_SOFTAP_TESTMODE_DL_BASIC = 0x9,
+  /**
+   * @AX_SOFTAP_TESTMODE_DL_BASIC: &struct ax_softap_testmode_dl_basic_cmd
+   */
+  AX_SOFTAP_TESTMODE_DL_BASIC = 0x9,
 
-    /**
-     * @AX_SOFTAP_TESTMODE_DL_MU_BAR:
-     *  &struct ax_softap_testmode_dl_mu_bar_cmd
-     */
-    AX_SOFTAP_TESTMODE_DL_MU_BAR = 0xA,
+  /**
+   * @AX_SOFTAP_TESTMODE_DL_MU_BAR:
+   *  &struct ax_softap_testmode_dl_mu_bar_cmd
+   */
+  AX_SOFTAP_TESTMODE_DL_MU_BAR = 0xA,
 
-    /**
-     * @AX_SOFTAP_TESTMODE_UL: &struct ax_softap_testmode_ul_cmd
-     */
-    AX_SOFTAP_TESTMODE_UL = 0xB,
+  /**
+   * @AX_SOFTAP_TESTMODE_UL: &struct ax_softap_testmode_ul_cmd
+   */
+  AX_SOFTAP_TESTMODE_UL = 0xB,
 #endif
 
-    /**
-     * @AX_SOFTAP_CLIENT_TESTMODE: &struct ax_softap_client_testmode_cmd
-     */
-    AX_SOFTAP_CLIENT_TESTMODE = 0xC,
+  /**
+   * @AX_SOFTAP_CLIENT_TESTMODE: &struct ax_softap_client_testmode_cmd
+   */
+  AX_SOFTAP_CLIENT_TESTMODE = 0xC,
 
-    /**
-     * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
-     */
-    RFH_QUEUE_CONFIG_CMD = 0xD,
+  /**
+   * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
+   */
+  RFH_QUEUE_CONFIG_CMD = 0xD,
 
-    /**
-     * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
-     */
-    TLC_MNG_CONFIG_CMD = 0xF,
+  /**
+   * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+   */
+  TLC_MNG_CONFIG_CMD = 0xF,
 
-    /**
-     * @HE_AIR_SNIFFER_CONFIG_CMD: &struct iwl_he_monitor_cmd
-     */
-    HE_AIR_SNIFFER_CONFIG_CMD = 0x13,
+  /**
+   * @HE_AIR_SNIFFER_CONFIG_CMD: &struct iwl_he_monitor_cmd
+   */
+  HE_AIR_SNIFFER_CONFIG_CMD = 0x13,
 
-    /**
-     * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
-     */
-    RX_NO_DATA_NOTIF = 0xF5,
+  /**
+   * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
+   */
+  RX_NO_DATA_NOTIF = 0xF5,
 
-    /**
-     * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
-     */
-    TLC_MNG_UPDATE_NOTIF = 0xF7,
+  /**
+   * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
+   */
+  TLC_MNG_UPDATE_NOTIF = 0xF7,
 
-    /**
-     * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification
-     */
-    STA_PM_NOTIF = 0xFD,
+  /**
+   * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification
+   */
+  STA_PM_NOTIF = 0xFD,
 
-    /**
-     * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif
-     */
-    MU_GROUP_MGMT_NOTIF = 0xFE,
+  /**
+   * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif
+   */
+  MU_GROUP_MGMT_NOTIF = 0xFE,
 
-    /**
-     * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification
-     */
-    RX_QUEUES_NOTIFICATION = 0xFF,
+  /**
+   * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification
+   */
+  RX_QUEUES_NOTIFICATION = 0xFF,
 };
 
 /**
@@ -134,9 +134,9 @@
  *  group then bits (group * 2) is the position -1
  */
 struct iwl_mu_group_mgmt_cmd {
-    __le32 reserved;
-    __le32 membership_status[2];
-    __le32 user_position[4];
+  __le32 reserved;
+  __le32 membership_status[2];
+  __le32 user_position[4];
 } __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */
 
 /**
@@ -147,8 +147,8 @@
  *  group then bits (group * 2) is the position -1
  */
 struct iwl_mu_group_mgmt_notif {
-    __le32 membership_status[2];
-    __le32 user_position[4];
+  __le32 membership_status[2];
+  __le32 user_position[4];
 } __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_DATAPATH_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/dbg-tlv.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/dbg-tlv.h
index fb826db..ebcfb6e 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -42,9 +42,9 @@
  * @data: TLV data followed
  **/
 struct iwl_fw_ini_header {
-    __le32 tlv_version;
-    __le32 apply_point;
-    uint8_t data[0];
+  __le32 tlv_version;
+  __le32 apply_point;
+  uint8_t data[0];
 } __packed; /* FW_INI_HEADER_TLV_S */
 
 /**
@@ -61,12 +61,12 @@
  * @min_frag_size: the minimum allowed fragmentation size in bytes
  */
 struct iwl_fw_ini_allocation_tlv {
-    struct iwl_fw_ini_header header;
-    __le32 allocation_id;
-    __le32 buffer_location;
-    __le32 size;
-    __le32 max_fragments;
-    __le32 min_frag_size;
+  struct iwl_fw_ini_header header;
+  __le32 allocation_id;
+  __le32 buffer_location;
+  __le32 size;
+  __le32 max_fragments;
+  __le32 min_frag_size;
 } __packed; /* FW_INI_BUFFER_ALLOCATION_TLV_S_VER_1 */
 
 /**
@@ -79,10 +79,10 @@
  * @data: all of the relevant command (0xf6/0xf5) to be sent
  */
 struct iwl_fw_ini_hcmd {
-    uint8_t id;
-    uint8_t group;
-    __le16 padding;
-    uint8_t data[0];
+  uint8_t id;
+  uint8_t group;
+  __le16 padding;
+  uint8_t data[0];
 } __packed; /* FW_INI_HCMD_S */
 
 /**
@@ -91,8 +91,8 @@
  * @hcmd: a variable length host-command to be sent to apply the configuration.
  */
 struct iwl_fw_ini_hcmd_tlv {
-    struct iwl_fw_ini_header header;
-    struct iwl_fw_ini_hcmd hcmd;
+  struct iwl_fw_ini_header header;
+  struct iwl_fw_ini_hcmd hcmd;
 } __packed; /* FW_INI_HCMD_TLV_S_VER_1 */
 
 /*
@@ -102,8 +102,8 @@
  * @debug_flow_cfg: &enum iwl_fw_ini_debug_flow
  */
 struct iwl_fw_ini_debug_flow_tlv {
-    struct iwl_fw_ini_header header;
-    __le32 debug_flow_cfg;
+  struct iwl_fw_ini_header header;
+  __le32 debug_flow_cfg;
 } __packed; /* FW_INI_DEBUG_FLOW_TLV_S_VER_1 */
 
 #define IWL_FW_INI_MAX_REGION_ID 20
@@ -120,16 +120,16 @@
  * @start_addr: array of addresses. (unused for IWL_FW_INI_REGION_DRAM_BUFFER)
  */
 struct iwl_fw_ini_region_cfg {
-    __le32 region_id;
-    __le32 region_type;
-    __le32 name_len;
-    uint8_t name[IWL_FW_INI_MAX_NAME];
-    union {
-        __le32 num_regions;
-        __le32 allocation_id;
-    };
-    __le32 size;
-    __le32 start_addr[];
+  __le32 region_id;
+  __le32 region_type;
+  __le32 name_len;
+  uint8_t name[IWL_FW_INI_MAX_NAME];
+  union {
+    __le32 num_regions;
+    __le32 allocation_id;
+  };
+  __le32 size;
+  __le32 start_addr[];
 } __packed; /* FW_INI_REGION_CONFIG_S */
 
 /**
@@ -140,9 +140,9 @@
  * @iwl_fw_ini_dump dump_config: list of dump configurations
  */
 struct iwl_fw_ini_region_tlv {
-    struct iwl_fw_ini_header header;
-    __le32 num_regions;
-    struct iwl_fw_ini_region_cfg region_config[];
+  struct iwl_fw_ini_header header;
+  __le32 num_regions;
+  struct iwl_fw_ini_region_cfg region_config[];
 } __packed; /* FW_INI_REGION_CFG_S */
 
 /**
@@ -161,16 +161,16 @@
  * @data: region IDs
  */
 struct iwl_fw_ini_trigger {
-    __le32 trigger_id;
-    __le32 ignore_default;
-    __le32 dump_delay;
-    __le32 occurrences;
-    __le32 ignore_consec;
-    __le32 force_restart;
-    __le32 multi_dut;
-    __le32 trigger_data;
-    __le32 num_regions;
-    __le32 data[];
+  __le32 trigger_id;
+  __le32 ignore_default;
+  __le32 dump_delay;
+  __le32 occurrences;
+  __le32 ignore_consec;
+  __le32 force_restart;
+  __le32 multi_dut;
+  __le32 trigger_data;
+  __le32 num_regions;
+  __le32 data[];
 } __packed; /* FW_INI_TRIGGER_CONFIG_S */
 
 /**
@@ -182,9 +182,9 @@
  * @trigger_config: list of trigger configurations
  */
 struct iwl_fw_ini_trigger_tlv {
-    struct iwl_fw_ini_header header;
-    __le32 num_triggers;
-    struct iwl_fw_ini_trigger trigger_config[];
+  struct iwl_fw_ini_header header;
+  __le32 num_triggers;
+  struct iwl_fw_ini_trigger trigger_config[];
 } __packed; /* FW_INI_TRIGGER_CFG_S */
 
 /**
@@ -232,49 +232,49 @@
  * @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs
  */
 enum iwl_fw_ini_trigger_id {
-    /* Errors triggers */
-    IWL_FW_TRIGGER_ID_FW_ASSERT = 1,
-    IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 2,
-    IWL_FW_TRIGGER_ID_FW_HW_ERROR = 3,
-    /* Generic triggers */
-    IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR = 4,
-    IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING = 5,
-    IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO = 6,
-    IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG = 7,
-    /* User Trigger */
-    IWL_FW_TRIGGER_ID_USER_TRIGGER = 8,
-    /* Host triggers */
-    IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 9,
-    IWL_FW_TRIGGER_ID_HOST_DID_INITIATED_EVENT = 10,
-    IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 11,
-    IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 12,
-    IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 13,
-    IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 14,
-    IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 15,
-    IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 16,
-    IWL_FW_TRIGGER_ID_HOST_SCAN_START = 17,
-    IWL_FW_TRIGGER_ID_HOST_SCAN_SUBITTED = 18,
-    IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 19,
-    IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 20,
-    IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 21,
-    IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 22,
-    IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 23,
-    IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 24,
-    IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 25,
-    IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 26,
-    IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 27,
-    IWL_FW_TRIGGER_ID_HOST_D3_START = 28,
-    IWL_FW_TRIGGER_ID_HOST_D3_END = 29,
-    IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 30,
-    IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 31,
-    IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 32,
-    IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 33,
-    IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 34,
-    IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 35,
-    IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 36,
-    IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 37,
-    IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 38,
-    IWL_FW_TRIGGER_ID_NUM,
+  /* Errors triggers */
+  IWL_FW_TRIGGER_ID_FW_ASSERT = 1,
+  IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 2,
+  IWL_FW_TRIGGER_ID_FW_HW_ERROR = 3,
+  /* Generic triggers */
+  IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR = 4,
+  IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING = 5,
+  IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO = 6,
+  IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG = 7,
+  /* User Trigger */
+  IWL_FW_TRIGGER_ID_USER_TRIGGER = 8,
+  /* Host triggers */
+  IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 9,
+  IWL_FW_TRIGGER_ID_HOST_DID_INITIATED_EVENT = 10,
+  IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 11,
+  IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 12,
+  IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 13,
+  IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 14,
+  IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 15,
+  IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 16,
+  IWL_FW_TRIGGER_ID_HOST_SCAN_START = 17,
+  IWL_FW_TRIGGER_ID_HOST_SCAN_SUBITTED = 18,
+  IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 19,
+  IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 20,
+  IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 21,
+  IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 22,
+  IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 23,
+  IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 24,
+  IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 25,
+  IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 26,
+  IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 27,
+  IWL_FW_TRIGGER_ID_HOST_D3_START = 28,
+  IWL_FW_TRIGGER_ID_HOST_D3_END = 29,
+  IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 30,
+  IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 31,
+  IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 32,
+  IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 33,
+  IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 34,
+  IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 35,
+  IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 36,
+  IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 37,
+  IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 38,
+  IWL_FW_TRIGGER_ID_NUM,
 }; /* FW_INI_TRIGGER_ID_E_VER_1 */
 
 /**
@@ -288,13 +288,13 @@
  * @IWL_FW_INI_APPLY_NUM: number of apply points
  */
 enum iwl_fw_ini_apply_point {
-    IWL_FW_INI_APPLY_INVALID,
-    IWL_FW_INI_APPLY_EARLY,
-    IWL_FW_INI_APPLY_AFTER_ALIVE,
-    IWL_FW_INI_APPLY_POST_INIT,
-    IWL_FW_INI_APPLY_MISSED_BEACONS,
-    IWL_FW_INI_APPLY_SCAN_COMPLETE,
-    IWL_FW_INI_APPLY_NUM,
+  IWL_FW_INI_APPLY_INVALID,
+  IWL_FW_INI_APPLY_EARLY,
+  IWL_FW_INI_APPLY_AFTER_ALIVE,
+  IWL_FW_INI_APPLY_POST_INIT,
+  IWL_FW_INI_APPLY_MISSED_BEACONS,
+  IWL_FW_INI_APPLY_SCAN_COMPLETE,
+  IWL_FW_INI_APPLY_NUM,
 }; /* FW_INI_APPLY_POINT_E_VER_1 */
 
 /**
@@ -308,13 +308,13 @@
  * @IWL_FW_INI_ALLOCATION_ID_USER_DEFINED: for future user scenarios
  */
 enum iwl_fw_ini_allocation_id {
-    IWL_FW_INI_ALLOCATION_INVALID,
-    IWL_FW_INI_ALLOCATION_ID_DBGC1,
-    IWL_FW_INI_ALLOCATION_ID_DBGC2,
-    IWL_FW_INI_ALLOCATION_ID_DBGC3,
-    IWL_FW_INI_ALLOCATION_ID_SDFX,
-    IWL_FW_INI_ALLOCATION_ID_FW_DUMP,
-    IWL_FW_INI_ALLOCATION_ID_USER_DEFINED,
+  IWL_FW_INI_ALLOCATION_INVALID,
+  IWL_FW_INI_ALLOCATION_ID_DBGC1,
+  IWL_FW_INI_ALLOCATION_ID_DBGC2,
+  IWL_FW_INI_ALLOCATION_ID_DBGC3,
+  IWL_FW_INI_ALLOCATION_ID_SDFX,
+  IWL_FW_INI_ALLOCATION_ID_FW_DUMP,
+  IWL_FW_INI_ALLOCATION_ID_USER_DEFINED,
 }; /* FW_INI_ALLOCATION_ID_E_VER_1 */
 
 /**
@@ -324,9 +324,9 @@
  * @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
  */
 enum iwl_fw_ini_buffer_location {
-    IWL_FW_INI_LOCATION_SRAM_INVALID,
-    IWL_FW_INI_LOCATION_SRAM_PATH,
-    IWL_FW_INI_LOCATION_DRAM_PATH,
+  IWL_FW_INI_LOCATION_SRAM_INVALID,
+  IWL_FW_INI_LOCATION_SRAM_PATH,
+  IWL_FW_INI_LOCATION_DRAM_PATH,
 }; /* FW_INI_BUFFER_LOCATION_E_VER_1 */
 
 /**
@@ -336,9 +336,9 @@
  * @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined
  */
 enum iwl_fw_ini_debug_flow {
-    IWL_FW_INI_DEBUG_INVALID,
-    IWL_FW_INI_DEBUG_DBTR_FLOW,
-    IWL_FW_INI_DEBUG_TB2DTF_FLOW,
+  IWL_FW_INI_DEBUG_INVALID,
+  IWL_FW_INI_DEBUG_DBTR_FLOW,
+  IWL_FW_INI_DEBUG_TB2DTF_FLOW,
 }; /* FW_INI_DEBUG_FLOW_E_VER_1 */
 
 /**
@@ -358,19 +358,19 @@
  * @IWL_FW_INI_REGION_NUM: number of region types
  */
 enum iwl_fw_ini_region_type {
-    IWL_FW_INI_REGION_INVALID,
-    IWL_FW_INI_REGION_DEVICE_MEMORY,
-    IWL_FW_INI_REGION_PERIPHERY_MAC,
-    IWL_FW_INI_REGION_PERIPHERY_PHY,
-    IWL_FW_INI_REGION_PERIPHERY_AUX,
-    IWL_FW_INI_REGION_DRAM_BUFFER,
-    IWL_FW_INI_REGION_DRAM_IMR,
-    IWL_FW_INI_REGION_INTERNAL_BUFFER,
-    IWL_FW_INI_REGION_TXF,
-    IWL_FW_INI_REGION_RXF,
-    IWL_FW_INI_REGION_PAGING,
-    IWL_FW_INI_REGION_CSR,
-    IWL_FW_INI_REGION_NUM
+  IWL_FW_INI_REGION_INVALID,
+  IWL_FW_INI_REGION_DEVICE_MEMORY,
+  IWL_FW_INI_REGION_PERIPHERY_MAC,
+  IWL_FW_INI_REGION_PERIPHERY_PHY,
+  IWL_FW_INI_REGION_PERIPHERY_AUX,
+  IWL_FW_INI_REGION_DRAM_BUFFER,
+  IWL_FW_INI_REGION_DRAM_IMR,
+  IWL_FW_INI_REGION_INTERNAL_BUFFER,
+  IWL_FW_INI_REGION_TXF,
+  IWL_FW_INI_REGION_RXF,
+  IWL_FW_INI_REGION_PAGING,
+  IWL_FW_INI_REGION_CSR,
+  IWL_FW_INI_REGION_NUM
 }; /* FW_INI_REGION_TYPE_E_VER_1*/
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_DBG_TLV_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/debug.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/debug.h
index 65eb935..5007c17 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/debug.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/debug.h
@@ -40,47 +40,47 @@
  * enum iwl_debug_cmds - debug commands
  */
 enum iwl_debug_cmds {
-    /**
-     * @LMAC_RD_WR:
-     * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
-     * &struct iwl_dbg_mem_access_rsp
-     */
-    LMAC_RD_WR = 0x0,
-    /**
-     * @UMAC_RD_WR:
-     * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
-     * &struct iwl_dbg_mem_access_rsp
-     */
-    UMAC_RD_WR = 0x1,
+  /**
+   * @LMAC_RD_WR:
+   * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
+   * &struct iwl_dbg_mem_access_rsp
+   */
+  LMAC_RD_WR = 0x0,
+  /**
+   * @UMAC_RD_WR:
+   * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
+   * &struct iwl_dbg_mem_access_rsp
+   */
+  UMAC_RD_WR = 0x1,
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
-    /**
-     * @DEBUG_HOST_NTF:
-     * &struct iwl_adwell_fine_tune_metrics_report or
-     * &struct iwl_channel_dwell_report or
-     * &struct iwl_profiling_notification
-     */
-    DEBUG_HOST_NTF = 0xFC,
+  /**
+   * @DEBUG_HOST_NTF:
+   * &struct iwl_adwell_fine_tune_metrics_report or
+   * &struct iwl_channel_dwell_report or
+   * &struct iwl_profiling_notification
+   */
+  DEBUG_HOST_NTF = 0xFC,
 #endif
-    /**
-     * @MFU_ASSERT_DUMP_NTF:
-     * &struct iwl_mfu_assert_dump_notif
-     */
-    MFU_ASSERT_DUMP_NTF = 0xFE,
+  /**
+   * @MFU_ASSERT_DUMP_NTF:
+   * &struct iwl_mfu_assert_dump_notif
+   */
+  MFU_ASSERT_DUMP_NTF = 0xFE,
 };
 
 /* Error response/notification */
 enum {
-    FW_ERR_UNKNOWN_CMD = 0x0,
-    FW_ERR_INVALID_CMD_PARAM = 0x1,
-    FW_ERR_SERVICE = 0x2,
-    FW_ERR_ARC_MEMORY = 0x3,
-    FW_ERR_ARC_CODE = 0x4,
-    FW_ERR_WATCH_DOG = 0x5,
-    FW_ERR_WEP_GRP_KEY_INDX = 0x10,
-    FW_ERR_WEP_KEY_SIZE = 0x11,
-    FW_ERR_OBSOLETE_FUNC = 0x12,
-    FW_ERR_UNEXPECTED = 0xFE,
-    FW_ERR_FATAL = 0xFF
+  FW_ERR_UNKNOWN_CMD = 0x0,
+  FW_ERR_INVALID_CMD_PARAM = 0x1,
+  FW_ERR_SERVICE = 0x2,
+  FW_ERR_ARC_MEMORY = 0x3,
+  FW_ERR_ARC_CODE = 0x4,
+  FW_ERR_WATCH_DOG = 0x5,
+  FW_ERR_WEP_GRP_KEY_INDX = 0x10,
+  FW_ERR_WEP_KEY_SIZE = 0x11,
+  FW_ERR_OBSOLETE_FUNC = 0x12,
+  FW_ERR_UNEXPECTED = 0xFE,
+  FW_ERR_FATAL = 0xFF
 };
 
 /**
@@ -95,12 +95,12 @@
  * @timestamp: TSF in usecs.
  */
 struct iwl_error_resp {
-    __le32 error_type;
-    uint8_t cmd_id;
-    uint8_t reserved1;
-    __le16 bad_cmd_seq_num;
-    __le32 error_service;
-    __le64 timestamp;
+  __le32 error_type;
+  uint8_t cmd_id;
+  uint8_t reserved1;
+  __le16 bad_cmd_seq_num;
+  __le32 error_service;
+  __le64 timestamp;
 } __packed;
 
 #define TX_FIFO_MAX_NUM_9000 8
@@ -132,18 +132,18 @@
  *   set, the last 3 members don't exist.
  */
 struct iwl_shared_mem_cfg_v2 {
-    __le32 shared_mem_addr;
-    __le32 shared_mem_size;
-    __le32 sample_buff_addr;
-    __le32 sample_buff_size;
-    __le32 txfifo_addr;
-    __le32 txfifo_size[TX_FIFO_MAX_NUM_9000];
-    __le32 rxfifo_size[RX_FIFO_MAX_NUM];
-    __le32 page_buff_addr;
-    __le32 page_buff_size;
-    __le32 rxfifo_addr;
-    __le32 internal_txfifo_addr;
-    __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+  __le32 shared_mem_addr;
+  __le32 shared_mem_size;
+  __le32 sample_buff_addr;
+  __le32 sample_buff_size;
+  __le32 txfifo_addr;
+  __le32 txfifo_size[TX_FIFO_MAX_NUM_9000];
+  __le32 rxfifo_size[RX_FIFO_MAX_NUM];
+  __le32 page_buff_addr;
+  __le32 page_buff_size;
+  __le32 rxfifo_addr;
+  __le32 internal_txfifo_addr;
+  __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
 } __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
 
 /**
@@ -155,10 +155,10 @@
  * @rxfifo1_size: RXF1 size
  */
 struct iwl_shared_mem_lmac_cfg {
-    __le32 txfifo_addr;
-    __le32 txfifo_size[TX_FIFO_MAX_NUM];
-    __le32 rxfifo1_addr;
-    __le32 rxfifo1_size;
+  __le32 txfifo_addr;
+  __le32 txfifo_size[TX_FIFO_MAX_NUM];
+  __le32 rxfifo1_addr;
+  __le32 rxfifo1_size;
 
 } __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
 
@@ -178,16 +178,16 @@
  * @lmac_smem: per - LMAC smem data
  */
 struct iwl_shared_mem_cfg {
-    __le32 shared_mem_addr;
-    __le32 shared_mem_size;
-    __le32 sample_buff_addr;
-    __le32 sample_buff_size;
-    __le32 rxfifo2_addr;
-    __le32 rxfifo2_size;
-    __le32 page_buff_addr;
-    __le32 page_buff_size;
-    __le32 lmac_num;
-    struct iwl_shared_mem_lmac_cfg lmac_smem[2];
+  __le32 shared_mem_addr;
+  __le32 shared_mem_size;
+  __le32 sample_buff_addr;
+  __le32 sample_buff_size;
+  __le32 rxfifo2_addr;
+  __le32 rxfifo2_size;
+  __le32 page_buff_addr;
+  __le32 page_buff_size;
+  __le32 lmac_num;
+  struct iwl_shared_mem_lmac_cfg lmac_smem[2];
 } __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
 
 /**
@@ -200,12 +200,12 @@
  * @image_size: MFUART image size in bytes
  */
 struct iwl_mfuart_load_notif {
-    __le32 installed_ver;
-    __le32 external_ver;
-    __le32 status;
-    __le32 duration;
-    /* image size valid only in v2 of the command */
-    __le32 image_size;
+  __le32 installed_ver;
+  __le32 external_ver;
+  __le32 status;
+  __le32 duration;
+  /* image size valid only in v2 of the command */
+  __le32 image_size;
 } __packed; /* MFU_LOADER_NTFY_API_S_VER_2 */
 
 /**
@@ -219,12 +219,12 @@
  * @data: data buffer
  */
 struct iwl_mfu_assert_dump_notif {
-    __le32 assert_id;
-    __le32 curr_reset_num;
-    __le16 index_num;
-    __le16 parts_num;
-    __le32 data_size;
-    __le32 data[0];
+  __le32 assert_id;
+  __le32 curr_reset_num;
+  __le16 index_num;
+  __le16 parts_num;
+  __le32 data_size;
+  __le32 data[0];
 } __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */
 
 /**
@@ -236,8 +236,8 @@
  * @MARKER_ID_SYNC_CLOCK: sync FW time and systime
  */
 enum iwl_mvm_marker_id {
-    MARKER_ID_TX_FRAME_LATENCY = 1,
-    MARKER_ID_SYNC_CLOCK = 2,
+  MARKER_ID_TX_FRAME_LATENCY = 1,
+  MARKER_ID_SYNC_CLOCK = 2,
 }; /* MARKER_ID_API_E_VER_2 */
 
 /**
@@ -256,11 +256,11 @@
  * @metadata: additional meta data that will be written to the unsiffer log
  */
 struct iwl_mvm_marker {
-    uint8_t dw_len;
-    uint8_t marker_id;
-    __le16 reserved;
-    __le64 timestamp;
-    __le32 metadata[0];
+  uint8_t dw_len;
+  uint8_t marker_id;
+  __le16 reserved;
+  __le64 timestamp;
+  __le32 metadata[0];
 } __packed; /* MARKER_API_S_VER_1 */
 
 /**
@@ -269,14 +269,14 @@
  * @gp2: The gp2 clock value in the FW
  */
 struct iwl_mvm_marker_rsp {
-    __le32 gp2;
+  __le32 gp2;
 } __packed;
 
 /* Operation types for the debug mem access */
 enum {
-    DEBUG_MEM_OP_READ = 0,
-    DEBUG_MEM_OP_WRITE = 1,
-    DEBUG_MEM_OP_WRITE_BYTES = 2,
+  DEBUG_MEM_OP_READ = 0,
+  DEBUG_MEM_OP_WRITE = 1,
+  DEBUG_MEM_OP_WRITE_BYTES = 2,
 };
 
 #define DEBUG_MEM_MAX_SIZE_DWORDS 32
@@ -289,19 +289,19 @@
  * @data: for write opeations, contains the source buffer
  */
 struct iwl_dbg_mem_access_cmd {
-    __le32 op;
-    __le32 addr;
-    __le32 len;
-    __le32 data[];
+  __le32 op;
+  __le32 addr;
+  __le32 len;
+  __le32 data[];
 } __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */
 
 /* Status responses for the debug mem access */
 enum {
-    DEBUG_MEM_STATUS_SUCCESS = 0x0,
-    DEBUG_MEM_STATUS_FAILED = 0x1,
-    DEBUG_MEM_STATUS_LOCKED = 0x2,
-    DEBUG_MEM_STATUS_HIDDEN = 0x3,
-    DEBUG_MEM_STATUS_LENGTH = 0x4,
+  DEBUG_MEM_STATUS_SUCCESS = 0x0,
+  DEBUG_MEM_STATUS_FAILED = 0x1,
+  DEBUG_MEM_STATUS_LOCKED = 0x2,
+  DEBUG_MEM_STATUS_HIDDEN = 0x3,
+  DEBUG_MEM_STATUS_LENGTH = 0x4,
 };
 
 /**
@@ -311,9 +311,9 @@
  * @data: contains the read DWs
  */
 struct iwl_dbg_mem_access_rsp {
-    __le32 status;
-    __le32 len;
-    __le32 data[];
+  __le32 status;
+  __le32 len;
+  __le32 data[];
 } __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
 
 #define CONT_REC_COMMAND_SIZE 80
@@ -327,15 +327,15 @@
  * struct iwl_continuous_record_mode - recording mode
  */
 struct iwl_continuous_record_mode {
-    __le16 enable_recording;
+  __le16 enable_recording;
 } __packed;
 
 /*
  * struct iwl_continuous_record_cmd - enable/disable continuous recording
  */
 struct iwl_continuous_record_cmd {
-    struct iwl_continuous_record_mode record_mode;
-    uint8_t pad[CONT_REC_COMMAND_SIZE - sizeof(struct iwl_continuous_record_mode)];
+  struct iwl_continuous_record_mode record_mode;
+  uint8_t pad[CONT_REC_COMMAND_SIZE - sizeof(struct iwl_continuous_record_mode)];
 } __packed;
 
 /* maximum fragments to be allocated per target of allocationId */
@@ -347,8 +347,8 @@
  * @size: size in bytes
  */
 struct iwl_fragment_data {
-    __le64 address;
-    __le32 size;
+  __le64 address;
+  __le32 size;
 } __packed; /* FRAGMENT_STRUCTURE_API_S_VER_1 */
 
 /**
@@ -359,10 +359,10 @@
  * @fragments: memory fragments
  */
 struct iwl_buffer_allocation_cmd {
-    __le32 allocation_id;
-    __le32 buffer_location;
-    __le32 num_frags;
-    struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS];
+  __le32 allocation_id;
+  __le32 buffer_location;
+  __le32 num_frags;
+  struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS];
 } __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_DEBUG_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/dhc.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/dhc.h
index 4e44fdb..a2e95ce 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/dhc.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/dhc.h
@@ -49,12 +49,12 @@
  * @DHC_TABLE_MAX: maximal id value
  */
 enum iwl_dhc_table_id {
-    DHC_TABLE_TOOLS = 0,
-    DHC_TABLE_AUTOMATION = 1 << DHC_TABLE_MASK_POS,
-    DHC_TABLE_INTEGRATION = 2 << DHC_TABLE_MASK_POS,
-    DHC_TABLE_DEVELOPMENT = 3 << DHC_TABLE_MASK_POS,
-    DHC_TABLE_UT = 4 << DHC_TABLE_MASK_POS,
-    DHC_TABLE_MAX = DHC_TABLE_UT,
+  DHC_TABLE_TOOLS = 0,
+  DHC_TABLE_AUTOMATION = 1 << DHC_TABLE_MASK_POS,
+  DHC_TABLE_INTEGRATION = 2 << DHC_TABLE_MASK_POS,
+  DHC_TABLE_DEVELOPMENT = 3 << DHC_TABLE_MASK_POS,
+  DHC_TABLE_UT = 4 << DHC_TABLE_MASK_POS,
+  DHC_TABLE_MAX = DHC_TABLE_UT,
 };
 
 /**
@@ -63,7 +63,7 @@
  *  1 DW param - bitmask of tx fifos to disable interrupts for
  */
 enum iwl_dhc_lmac_tools_table {
-    DHC_TOOLS_LMAC_TXF_FIFO_DISABLE = 6,
+  DHC_TOOLS_LMAC_TXF_FIFO_DISABLE = 6,
 };
 
 /**
@@ -78,13 +78,13 @@
  * @DHC_MAX_AUTO_LMAC_REQUEST: the size of the Automation table in lmac
  */
 enum iwl_dhc_lmac_automation_table {
-    DHC_AUTO_LMAC_PHY_GET_STAT = 0,
-    DHC_AUTO_LMAC_CONFIG_DEBUG_EBS = 1,
-    DHC_AUTO_LMAC_PHY_ENABLE_CRC_CHECK = 2,
-    DHC_AUTO_LMAC_SAD_RETURN_PREF_ANTS = 3,
-    DHC_AUTO_LMAC_PYFI_TIMING = 4,
-    DHC_AUTO_LMAC_REPORT_POWER_STATISTICS = 5,
-    DHC_MAX_AUTO_LMAC_REQUEST = 6,
+  DHC_AUTO_LMAC_PHY_GET_STAT = 0,
+  DHC_AUTO_LMAC_CONFIG_DEBUG_EBS = 1,
+  DHC_AUTO_LMAC_PHY_ENABLE_CRC_CHECK = 2,
+  DHC_AUTO_LMAC_SAD_RETURN_PREF_ANTS = 3,
+  DHC_AUTO_LMAC_PYFI_TIMING = 4,
+  DHC_AUTO_LMAC_REPORT_POWER_STATISTICS = 5,
+  DHC_MAX_AUTO_LMAC_REQUEST = 6,
 };
 
 /**
@@ -101,13 +101,13 @@
  * @DHC_MAX_AUTO_UMAC_REQUEST: the size of the Automation table in umac
  */
 enum iwl_dhc_umac_automation_table {
-    DHC_AUTO_UMAC_SET_PROFILING_REPORT_CONF = 0,
-    DHC_AUTO_UMAC_REPORT_PROFILING = 1,
-    DHC_AUTO_UMAC_SCAN_CHANNEL_DWELL_ENABLE_REPORT = 2,
-    DHC_AUTO_UMAC_ADAPTIVE_DWELL_SCAN_FINE_TUNE_ENABLE_REPORT = 3,
-    DHC_AUTO_UMAC_CONFIGURE_POWER_FLAGS = 4,
-    DHC_AUTO_UMAC_REPORT_POWER_STATISTICS = 5,
-    DHC_MAX_AUTO_UMAC_REQUEST = 6,
+  DHC_AUTO_UMAC_SET_PROFILING_REPORT_CONF = 0,
+  DHC_AUTO_UMAC_REPORT_PROFILING = 1,
+  DHC_AUTO_UMAC_SCAN_CHANNEL_DWELL_ENABLE_REPORT = 2,
+  DHC_AUTO_UMAC_ADAPTIVE_DWELL_SCAN_FINE_TUNE_ENABLE_REPORT = 3,
+  DHC_AUTO_UMAC_CONFIGURE_POWER_FLAGS = 4,
+  DHC_AUTO_UMAC_REPORT_POWER_STATISTICS = 5,
+  DHC_MAX_AUTO_UMAC_REQUEST = 6,
 };
 
 /**
@@ -120,12 +120,12 @@
  * @DHC_INTEGRATION_MAX: Maximum UMAC integration table entries
  */
 enum iwl_dhc_umac_integration_table {
-    DHC_INTEGRATION_POWER_FLAGS,
-    DHC_INTEGRATION_TLC_DEBUG_CONFIG,
-    DHC_INTEGRATION_QUOTA_ENFORCE,
-    DHC_INT_UMAC_BT_COEX_USER_OVERRIDES,
-    DHC_INT_UMAC_TWT_OPERATION,
-    DHC_INTEGRATION_MAX
+  DHC_INTEGRATION_POWER_FLAGS,
+  DHC_INTEGRATION_TLC_DEBUG_CONFIG,
+  DHC_INTEGRATION_QUOTA_ENFORCE,
+  DHC_INT_UMAC_BT_COEX_USER_OVERRIDES,
+  DHC_INT_UMAC_TWT_OPERATION,
+  DHC_INTEGRATION_MAX
 };
 
 #define DHC_TARGET_UMAC BIT(27)
@@ -148,9 +148,9 @@
  * @data: the concatenated data.
  */
 struct iwl_dhc_cmd {
-    __le32 length;
-    __le32 index_and_mask;
-    __le32 data[0];
+  __le32 length;
+  __le32 index_and_mask;
+  __le32 data[0];
 } __packed; /* DHC_CMD_API_S */
 
 /**
@@ -159,8 +159,8 @@
  * @data: the response data
  */
 struct iwl_dhc_cmd_resp {
-    __le32 status;
-    __le32 data[0];
+  __le32 status;
+  __le32 data[0];
 } __packed;
 
 /**
@@ -179,85 +179,85 @@
  *  bit 5 - fifos_metric
  */
 struct iwl_dhc_profile_cmd {
-    __le32 period;
-    __le32 reset;
-    __le32 enabled_metrics;
+  __le32 period;
+  __le32 reset;
+  __le32 enabled_metrics;
 } __packed;
 
 enum iwl_profiling_context_id {
-    PROFILING_CONTEXT_PS_THREAD,
-    PROFILING_CONTEXT_FMAC_THREAD,
-    PROFILING_CONTEXT_MAIN_THREAD,
-    PROFILING_CONTEXT_AIRTIME_THREAD,
-    PROFILING_CONTEXT_MPAPD_THREAD,
-    PROFILING_CONTEXT_TIMER_IRQ,
-    PROFILING_CONTEXT_RXF2_IRQ,
-    PROFILING_CONTEXT_CMD_IRQ,
-    PROFILING_CONTEXT_MAX_NUM
+  PROFILING_CONTEXT_PS_THREAD,
+  PROFILING_CONTEXT_FMAC_THREAD,
+  PROFILING_CONTEXT_MAIN_THREAD,
+  PROFILING_CONTEXT_AIRTIME_THREAD,
+  PROFILING_CONTEXT_MPAPD_THREAD,
+  PROFILING_CONTEXT_TIMER_IRQ,
+  PROFILING_CONTEXT_RXF2_IRQ,
+  PROFILING_CONTEXT_CMD_IRQ,
+  PROFILING_CONTEXT_MAX_NUM
 }; /* PROFILING_CONTEXT_ID_API_E */
 
 enum iwl_profiling_tasks_id {
-    PROFILING_MAIN_INIT_TASK,
-    PROFILING_FMAC_INIT_TASK,
-    PROFILING_ELOOP_TASK,
-    PROFILING_UMAC_TO_FMAC_EVENT_TASK,
-    PROFILING_LMAC_RXF_TASK,
-    PROFILING_MPAPD_TASK,
-    PROFILING_TASKS_MAX_NUM
+  PROFILING_MAIN_INIT_TASK,
+  PROFILING_FMAC_INIT_TASK,
+  PROFILING_ELOOP_TASK,
+  PROFILING_UMAC_TO_FMAC_EVENT_TASK,
+  PROFILING_LMAC_RXF_TASK,
+  PROFILING_MPAPD_TASK,
+  PROFILING_TASKS_MAX_NUM
 }; /* PROFILING_TASKS_ID_API_E */
 
 enum iwl_profiling_flow_id {
-    PROFILING_HANDLING_PRB_RQST_UMAC_FLOW,
-    PROFILING_UMAC_BCN_HANDLING_FLOW,
-    PROFILING_UMAC_NON_TKIP_HANDLING_FLOW,
-    PROFILING_UMAC_TKIP_HANDLING_FLOW,
-    PROFILING_UMAC_LMAC_NOTIFICATION_THREAD_HANDLING,
-    PROFILING_UMAC_RXF2_DROPPABLE_FRAME_ISR_HANDLING,
-    PROFILING_UMAC_OTHER_FRAMES_HANDLING_FLOW,
-    PROFILING_AIRTIME_CONTEXT_GET_FLOW,
-    PROFILING_AIRTIME_CONTEXT_LOSE_FLOW,
-    PROFILING_MAC_CONTEXT_LOSE_FLOW,
-    PROFILING_AUX_CONTEXT_GET_FLOW,
-    PROFILING_AUX_CONTEXT_CLEAR_FLOW,
-    PROFILING_AUX_CONTEXT_SET_FLOW,
-    PROFILING_AIRTIME_SCHEDULER_SESSION_CALC_FLOW,
-    PROFILING_TLC_STATISTICS_HANDLING_FLOW,
-    PROFILING_CHANNEL_SWITCH_FLOW,
-    PROFILING_THREAD_CONTEXT_SWITCH_FLOW,
-    PROFILING_SYSTEM_POWER_DOWN_FLOW,
-    PROFILING_FLOW_MAX_NUM
+  PROFILING_HANDLING_PRB_RQST_UMAC_FLOW,
+  PROFILING_UMAC_BCN_HANDLING_FLOW,
+  PROFILING_UMAC_NON_TKIP_HANDLING_FLOW,
+  PROFILING_UMAC_TKIP_HANDLING_FLOW,
+  PROFILING_UMAC_LMAC_NOTIFICATION_THREAD_HANDLING,
+  PROFILING_UMAC_RXF2_DROPPABLE_FRAME_ISR_HANDLING,
+  PROFILING_UMAC_OTHER_FRAMES_HANDLING_FLOW,
+  PROFILING_AIRTIME_CONTEXT_GET_FLOW,
+  PROFILING_AIRTIME_CONTEXT_LOSE_FLOW,
+  PROFILING_MAC_CONTEXT_LOSE_FLOW,
+  PROFILING_AUX_CONTEXT_GET_FLOW,
+  PROFILING_AUX_CONTEXT_CLEAR_FLOW,
+  PROFILING_AUX_CONTEXT_SET_FLOW,
+  PROFILING_AIRTIME_SCHEDULER_SESSION_CALC_FLOW,
+  PROFILING_TLC_STATISTICS_HANDLING_FLOW,
+  PROFILING_CHANNEL_SWITCH_FLOW,
+  PROFILING_THREAD_CONTEXT_SWITCH_FLOW,
+  PROFILING_SYSTEM_POWER_DOWN_FLOW,
+  PROFILING_FLOW_MAX_NUM
 }; /* PROFILING_FLOW_ID_API_E */
 
 enum iwl_profiling_fifo_id {
-    PROFILING_FIFO_UMAC_TO_LMAC1,
-    PROFILING_FIFO_UMAC_TO_LMAC2,
-    PROFILING_FIFO_LMAC1_TO_UMAC,
-    PROFILING_FIFO_LMAC2_TO_UMAC,
-    PROFILING_FIFO_RXF2,
-    PROFILING_FIFO_MAX_NUM
+  PROFILING_FIFO_UMAC_TO_LMAC1,
+  PROFILING_FIFO_UMAC_TO_LMAC2,
+  PROFILING_FIFO_LMAC1_TO_UMAC,
+  PROFILING_FIFO_LMAC2_TO_UMAC,
+  PROFILING_FIFO_RXF2,
+  PROFILING_FIFO_MAX_NUM
 }; /* PROFILING_FIFO_ID_API_E */
 
 enum iwl_profiling_pool_id {
-    PROFILING_POOL_MGMT_FRAME,
-    PROFILING_POOL_MPDU_FRWK_1,
-    PROFILING_POOL_MPDU_FRWK_2,
-    PROFILING_POOL_MSG_QUEUE_AIRTIME,
-    PROFILING_POOL_MSG_QUEUE_MAIN,
-    PROFILING_POOL_MSG_QUEUE_BACKGROUND,
-    PROFILING_POOL_MSG_QUEUE_MPAPD,
-    PROFILING_POOL_MSG_QUEUE_FMAC,
-    PROFILING_POOL_BLOCK_MSG_QUEUE_AIRTIME_BIG,
-    PROFILING_POOL_BLOCK_MSG_QUEUE_AIRTIME_SMALL,
-    PROFILING_POOL_BLOCK_MSG_QUEUE_MAIN_BIG,
-    PROFILING_POOL_BLOCK_MSG_QUEUE_MAIN_SMALL,
-    PROFILING_POOL_BLOCK_MSG_QUEUE_FMAC_BIG,
-    PROFILING_POOL_BLOCK_MSG_QUEUE_FMAC_SMALL,
-    PROFILING_POOL_INTERNAL_TX,
-    PROFILING_POOL_CYCLIC_LMAC_RX,
-    PROFILING_POOL_CYCLIC_UMAC_2_FMAC,
-    PROFILING_POOL_BYTE_UMAC_TX,
-    PROFILING_POOL_BYTE_UMAC_OS,
-    PROFILING_POOL_MAX_NUM
+  PROFILING_POOL_MGMT_FRAME,
+  PROFILING_POOL_MPDU_FRWK_1,
+  PROFILING_POOL_MPDU_FRWK_2,
+  PROFILING_POOL_MSG_QUEUE_AIRTIME,
+  PROFILING_POOL_MSG_QUEUE_MAIN,
+  PROFILING_POOL_MSG_QUEUE_BACKGROUND,
+  PROFILING_POOL_MSG_QUEUE_MPAPD,
+  PROFILING_POOL_MSG_QUEUE_FMAC,
+  PROFILING_POOL_BLOCK_MSG_QUEUE_AIRTIME_BIG,
+  PROFILING_POOL_BLOCK_MSG_QUEUE_AIRTIME_SMALL,
+  PROFILING_POOL_BLOCK_MSG_QUEUE_MAIN_BIG,
+  PROFILING_POOL_BLOCK_MSG_QUEUE_MAIN_SMALL,
+  PROFILING_POOL_BLOCK_MSG_QUEUE_FMAC_BIG,
+  PROFILING_POOL_BLOCK_MSG_QUEUE_FMAC_SMALL,
+  PROFILING_POOL_INTERNAL_TX,
+  PROFILING_POOL_CYCLIC_LMAC_RX,
+  PROFILING_POOL_CYCLIC_UMAC_2_FMAC,
+  PROFILING_POOL_BYTE_UMAC_TX,
+  PROFILING_POOL_BYTE_UMAC_OS,
+  PROFILING_POOL_MAX_NUM
 }; /* PROFILING_POOL_ID_API_E */
 
 /**
@@ -268,9 +268,9 @@
  * @enabled_metrics: Enabled metrics bitmap
  */
 struct iwl_profiling_configuration {
-    __le32 time_since_last_metrics_reset;
-    __le32 current_system_time;
-    __le32 enabled_metrics;
+  __le32 time_since_last_metrics_reset;
+  __le32 current_system_time;
+  __le32 enabled_metrics;
 } __packed; /* PROFILING_CONFIGURATION_API_S */
 
 /**
@@ -323,28 +323,28 @@
  * @stack_max_usage_task: Task ID that used the max stack space
  */
 struct iwl_profiling_umac_cpu_usage {
-    __le32 context_id;
-    __le32 run_time;
-    __le32 enabled_metrics;
-    __le32 max_processing_time;
-    __le32 num_of_page_faults_dl;
-    __le32 num_of_page_faults_dl_up;
-    __le32 max_processing_time_task;
-    __le32 max_block_time;
-    __le16 max_pf_handle_time_dl;
-    __le16 max_pf_handle_time_dl_up;
-    __le16 min_pf_handle_time_dl;
-    __le16 min_pf_handle_time_dl_up;
-    __le32 sum_pf_handle_time_dl;
-    __le32 sum_pf_handle_time_dl_up;
-    __le16 p_fHandle_time_bucket1;
-    __le16 p_fHandle_time_bucket2;
-    __le16 p_fHandle_time_bucket3;
-    __le16 p_fHandle_time_bucket4;
-    __le16 p_fHandle_time_bucket5;
-    __le16 stack_size;
-    __le16 stack_max_usage;
-    __le32 stack_max_usage_task;
+  __le32 context_id;
+  __le32 run_time;
+  __le32 enabled_metrics;
+  __le32 max_processing_time;
+  __le32 num_of_page_faults_dl;
+  __le32 num_of_page_faults_dl_up;
+  __le32 max_processing_time_task;
+  __le32 max_block_time;
+  __le16 max_pf_handle_time_dl;
+  __le16 max_pf_handle_time_dl_up;
+  __le16 min_pf_handle_time_dl;
+  __le16 min_pf_handle_time_dl_up;
+  __le32 sum_pf_handle_time_dl;
+  __le32 sum_pf_handle_time_dl_up;
+  __le16 p_fHandle_time_bucket1;
+  __le16 p_fHandle_time_bucket2;
+  __le16 p_fHandle_time_bucket3;
+  __le16 p_fHandle_time_bucket4;
+  __le16 p_fHandle_time_bucket5;
+  __le16 stack_size;
+  __le16 stack_max_usage;
+  __le32 stack_max_usage_task;
 
 } __packed; /* PROFILING_UMAC_CPU_USAGE_API_S */
 
@@ -373,14 +373,14 @@
  *  encountered (measured each time a PF is queued to be handled).
  */
 struct iwl_profiling_umac_general_paging {
-    __le32 num_of_page_faults;
-    __le32 inter_page_fault_time_bucket1;
-    __le32 inter_page_fault_time_bucket2;
-    __le32 inter_page_fault_time_bucket3;
-    __le32 inter_page_fault_time_bucket4;
-    __le32 inter_page_fault_time_bucket5;
-    __le16 max_page_fault_wait_time;
-    __le16 max_num_of_pending_pfs;
+  __le32 num_of_page_faults;
+  __le32 inter_page_fault_time_bucket1;
+  __le32 inter_page_fault_time_bucket2;
+  __le32 inter_page_fault_time_bucket3;
+  __le32 inter_page_fault_time_bucket4;
+  __le32 inter_page_fault_time_bucket5;
+  __le16 max_page_fault_wait_time;
+  __le16 max_num_of_pending_pfs;
 } __packed; /* PROFILING_UMAC_GENERAL_PAGING_API_S */
 
 /**
@@ -393,11 +393,11 @@
  * @min_run_time: Minimal time this flow was timed running
  */
 struct iwl_profiling_umac_flow_timing {
-    __le32 flow_id;
-    __le32 num_of_runs;
-    __le32 total_run_time;
-    __le32 max_run_time;
-    __le32 min_run_time;
+  __le32 flow_id;
+  __le32 num_of_runs;
+  __le32 total_run_time;
+  __le32 max_run_time;
+  __le32 min_run_time;
 } __packed; /* PROFILING_UMAC_FLOW_TIMING_API_S */
 
 /**
@@ -405,7 +405,7 @@
  * @max_critical_section_time: Maximal length of time of all critical sections
  */
 struct iwl_profiling_umac_critical_section {
-    __le32 max_critical_section_time;
+  __le32 max_critical_section_time;
 } __packed; /* PROFILING_UMAC_CRITICAL_SECTION_API_S */
 
 /**
@@ -418,9 +418,9 @@
  *  fragmentation
  */
 struct iwl_profiling_umac_memory_pools {
-    __le32 pool_id;
-    __le32 min_free_space;
-    __le32 largest_allocated_size;
+  __le32 pool_id;
+  __le32 min_free_space;
+  __le32 largest_allocated_size;
 } __packed; /* PROFILING_UMAC_MEMORY_POOLS_API_S */
 
 /**
@@ -431,8 +431,8 @@
  *  reset
  */
 struct iwl_profiling_umac_fifos {
-    __le32 fifo_id;
-    __le32 min_free_bytes;
+  __le32 fifo_id;
+  __le32 min_free_bytes;
 } __packed; /* PROFILING_UMAC_FIFOS_API_S */
 
 /**
@@ -448,13 +448,13 @@
  * @umac_fifos_arr: UMAC FIFOs (provided for each enumerated FIFO)
  */
 struct iwl_profiling_umac_metrics_report {
-    struct iwl_profiling_configuration configuration;
-    struct iwl_profiling_umac_cpu_usage umac_cpu_usage_ctx[PROFILING_CONTEXT_MAX_NUM];
-    struct iwl_profiling_umac_general_paging umac_general_paging;
-    struct iwl_profiling_umac_flow_timing umac_flows_timing[PROFILING_FLOW_MAX_NUM];
-    struct iwl_profiling_umac_critical_section umac_critical_section;
-    struct iwl_profiling_umac_memory_pools umac_memory_pools[PROFILING_POOL_MAX_NUM];
-    struct iwl_profiling_umac_fifos umac_fifos_arr[PROFILING_FIFO_MAX_NUM];
+  struct iwl_profiling_configuration configuration;
+  struct iwl_profiling_umac_cpu_usage umac_cpu_usage_ctx[PROFILING_CONTEXT_MAX_NUM];
+  struct iwl_profiling_umac_general_paging umac_general_paging;
+  struct iwl_profiling_umac_flow_timing umac_flows_timing[PROFILING_FLOW_MAX_NUM];
+  struct iwl_profiling_umac_critical_section umac_critical_section;
+  struct iwl_profiling_umac_memory_pools umac_memory_pools[PROFILING_POOL_MAX_NUM];
+  struct iwl_profiling_umac_fifos umac_fifos_arr[PROFILING_FIFO_MAX_NUM];
 } __packed; /* PROFILING_UMAC_METRICS_REPORT_API_S */
 
 /**
@@ -471,13 +471,13 @@
  *      apply.
  */
 struct iwl_ps_report {
-    __le32 sleep_allowed_count;
-    __le32 sleep_time;
-    __le32 max_sleep_time;
-    __le32 missed_beacon_count;
-    __le32 missed_3_consecutive_beacon_count;
-    __le32 ps_flags;
-    __le32 max_active_duration;
+  __le32 sleep_allowed_count;
+  __le32 sleep_time;
+  __le32 max_sleep_time;
+  __le32 missed_beacon_count;
+  __le32 missed_3_consecutive_beacon_count;
+  __le32 ps_flags;
+  __le32 max_active_duration;
 } __packed; /* PS_REPORT_API_S */
 
 /**
@@ -494,8 +494,8 @@
  *  bits 0-25 is a specific entry index in the table specified in bits 28-30
  */
 struct iwl_dhn_hdr {
-    __le32 length;
-    __le32 index_and_mask;
+  __le32 length;
+  __le32 index_and_mask;
 } __packed; /* DHC_NOTIFICATION_API_S */
 
 /**
@@ -504,8 +504,8 @@
  * @profiling_metrics: the profiling metrics
  */
 struct iwl_profiling_notification {
-    struct iwl_dhn_hdr header;
-    struct iwl_profiling_umac_metrics_report profiling_metrics;
+  struct iwl_dhn_hdr header;
+  struct iwl_profiling_umac_metrics_report profiling_metrics;
 } __packed; /* DHC_NOTIFICATION_API_S */
 
 /**
@@ -521,10 +521,10 @@
  * @dwell_time: requested dwell time
  */
 struct iwl_channel_dwell_report {
-    struct iwl_dhn_hdr header;
-    __le32 channel_num;
-    __le32 dwell_tsf;
-    __le32 dwell_time;
+  struct iwl_dhn_hdr header;
+  __le32 channel_num;
+  __le32 dwell_tsf;
+  __le32 dwell_time;
 } __packed; /* SCAN_CHANNEL_DWELL_REPORT_API_S */
 
 /**
@@ -539,10 +539,10 @@
  * @success_counter: fine tune success counter
  */
 struct iwl_adwell_fine_tune_metrics_report {
-    struct iwl_dhn_hdr header;
-    int8_t index[IWL_SCAN_MAX_NUM_OF_CHANNELS];
-    uint8_t scan_counter[IWL_SCAN_MAX_NUM_OF_CHANNELS];
-    uint8_t success_counter[IWL_SCAN_MAX_NUM_OF_CHANNELS];
+  struct iwl_dhn_hdr header;
+  int8_t index[IWL_SCAN_MAX_NUM_OF_CHANNELS];
+  uint8_t scan_counter[IWL_SCAN_MAX_NUM_OF_CHANNELS];
+  uint8_t success_counter[IWL_SCAN_MAX_NUM_OF_CHANNELS];
 } __packed; /* ADAPTIVE_DWELL_SCAN_FINE_TUNE_METRICS_REPORT_API_S */
 
 /**
@@ -552,8 +552,8 @@
  * @QUOTA_ENFORCE_TYPE_LIMITATION: Enforce maximum quota.
  */
 enum iwl_dhc_quota_enforce_type {
-    QUOTA_ENFORCE_TYPE_RESERVATION,
-    QUOTA_ENFORCE_TYPE_LIMITATION,
+  QUOTA_ENFORCE_TYPE_RESERVATION,
+  QUOTA_ENFORCE_TYPE_LIMITATION,
 }; /* DHC_QUOTA_ENFORCE_TYPE_API_E */
 
 /**
@@ -565,10 +565,10 @@
  * @quota_percentage: quota to enforce as percentage [0 - 100]
  */
 struct iwl_dhc_quota_enforce {
-    uint8_t macs;
-    uint8_t quota_enforce_type;
-    __le16 reserved;
-    __le32 quota_percentage[MAC_INDEX_AUX];
+  uint8_t macs;
+  uint8_t quota_enforce_type;
+  __le16 reserved;
+  __le32 quota_percentage[MAC_INDEX_AUX];
 } __packed; /* DHC_QUOTA_ENFORCE_API_S */
 
 /**
@@ -585,15 +585,15 @@
  * @DHC_TWT_TEARDOWN: Send a TearDown TWT command
  */
 enum iwl_dhc_twt_operation_type {
-    DHC_TWT_REQUEST,
-    DHC_TWT_SUGGEST,
-    DHC_TWT_DEMAND,
-    DHC_TWT_GROUPING,
-    DHC_TWT_ACCEPT,
-    DHC_TWT_ALTERNATE,
-    DHC_TWT_DICTATE,
-    DHC_TWT_REJECT,
-    DHC_TWT_TEARDOWN,
+  DHC_TWT_REQUEST,
+  DHC_TWT_SUGGEST,
+  DHC_TWT_DEMAND,
+  DHC_TWT_GROUPING,
+  DHC_TWT_ACCEPT,
+  DHC_TWT_ALTERNATE,
+  DHC_TWT_DICTATE,
+  DHC_TWT_REJECT,
+  DHC_TWT_TEARDOWN,
 }; /* DHC_TWT_OPERATION_TYPE_E */
 
 /**
@@ -619,24 +619,24 @@
  * @reserved: reserved
  */
 struct iwl_dhc_twt_operation {
-    __le32 mac_id;
-    __le32 twt_operation;
-    __le64 target_wake_time;
-    __le32 interval_exp;
-    __le32 interval_mantissa;
-    __le32 min_wake_duration;
-    uint8_t trigger;
-    uint8_t flow_type;
-    uint8_t flow_id;
-    uint8_t protection;
-    uint8_t ndo_paging_indicator;
-    uint8_t responder_pm_mode;
-    uint8_t negotiation_type;
-    uint8_t twt_request;
-    uint8_t implicit;
-    uint8_t twt_group_assignment;
-    uint8_t twt_channel;
-    uint8_t reserved;
+  __le32 mac_id;
+  __le32 twt_operation;
+  __le64 target_wake_time;
+  __le32 interval_exp;
+  __le32 interval_mantissa;
+  __le32 min_wake_duration;
+  uint8_t trigger;
+  uint8_t flow_type;
+  uint8_t flow_id;
+  uint8_t protection;
+  uint8_t ndo_paging_indicator;
+  uint8_t responder_pm_mode;
+  uint8_t negotiation_type;
+  uint8_t twt_request;
+  uint8_t implicit;
+  uint8_t twt_group_assignment;
+  uint8_t twt_channel;
+  uint8_t reserved;
 }; /* DHC_TWT_OPERATION_API_S */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_DHC_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/filter.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/filter.h
index c307382..5307150 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/filter.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/filter.h
@@ -56,13 +56,13 @@
  *      IMPORTANT: add padding if necessary to ensure DWORD alignment.
  */
 struct iwl_mcast_filter_cmd {
-    uint8_t filter_own;
-    uint8_t port_id;
-    uint8_t count;
-    uint8_t pass_all;
-    uint8_t bssid[6];
-    uint8_t reserved[2];
-    uint8_t addr_list[0];
+  uint8_t filter_own;
+  uint8_t port_id;
+  uint8_t count;
+  uint8_t pass_all;
+  uint8_t bssid[6];
+  uint8_t reserved[2];
+  uint8_t addr_list[0];
 } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
 
 #define MAX_BCAST_FILTERS 8
@@ -75,8 +75,8 @@
  *  start of ip payload).
  */
 enum iwl_mvm_bcast_filter_attr_offset {
-    BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
-    BCAST_FILTER_OFFSET_IP_END = 1,
+  BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
+  BCAST_FILTER_OFFSET_IP_END = 1,
 };
 
 /**
@@ -89,11 +89,11 @@
  * @mask:   mask to match (big endian).
  */
 struct iwl_fw_bcast_filter_attr {
-    uint8_t offset_type;
-    uint8_t offset;
-    __le16 reserved1;
-    __be32 val;
-    __be32 mask;
+  uint8_t offset_type;
+  uint8_t offset;
+  __le16 reserved1;
+  __be32 val;
+  __be32 mask;
 } __packed; /* BCAST_FILTER_ATT_S_VER_1 */
 
 /**
@@ -102,8 +102,8 @@
  * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
  */
 enum iwl_mvm_bcast_filter_frame_type {
-    BCAST_FILTER_FRAME_TYPE_ALL = 0,
-    BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
+  BCAST_FILTER_FRAME_TYPE_ALL = 0,
+  BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
 };
 
 /**
@@ -116,11 +116,11 @@
  *  only when all its attributes are matched (i.e. AND relationship)
  */
 struct iwl_fw_bcast_filter {
-    uint8_t discard;
-    uint8_t frame_type;
-    uint8_t num_attrs;
-    uint8_t reserved1;
-    struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
+  uint8_t discard;
+  uint8_t frame_type;
+  uint8_t num_attrs;
+  uint8_t reserved1;
+  struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
 } __packed; /* BCAST_FILTER_S_VER_1 */
 
 /**
@@ -130,9 +130,9 @@
  * @attached_filters: bitmap of relevant filters for this mac.
  */
 struct iwl_fw_bcast_mac {
-    uint8_t default_discard;
-    uint8_t reserved1;
-    __le16 attached_filters;
+  uint8_t default_discard;
+  uint8_t reserved1;
+  __le16 attached_filters;
 } __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
 
 /**
@@ -145,12 +145,12 @@
  * @macs: broadcast filtering configuration per-mac
  */
 struct iwl_bcast_filter_cmd {
-    uint8_t disable;
-    uint8_t max_bcast_filters;
-    uint8_t max_macs;
-    uint8_t reserved1;
-    struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
-    struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
+  uint8_t disable;
+  uint8_t max_bcast_filters;
+  uint8_t max_macs;
+  uint8_t reserved1;
+  struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
+  struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
 } __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_FILTER_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/fmac.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/fmac.h
index ad14d73..23b223c 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/fmac.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/fmac.h
@@ -41,318 +41,318 @@
  * enum iwl_fmac_cmds - Supported FMAC commands and notifications
  */
 enum iwl_fmac_cmds {
-    /* Commands */
-    /**
-     * @FMAC_SCAN:
-     * Perform a scan using configuration defined in
-     * &struct iwl_fmac_scan_cmd.
-     * The scan flow is asynchronous and upon completion a
-     * %FMAC_SCAN_COMPLETE notification is sent by fmac using
-     * &struct iwl_fmac_scan_complete_notif.
-     */
-    FMAC_SCAN = 0x0,
+  /* Commands */
+  /**
+   * @FMAC_SCAN:
+   * Perform a scan using configuration defined in
+   * &struct iwl_fmac_scan_cmd.
+   * The scan flow is asynchronous and upon completion a
+   * %FMAC_SCAN_COMPLETE notification is sent by fmac using
+   * &struct iwl_fmac_scan_complete_notif.
+   */
+  FMAC_SCAN = 0x0,
 
-    /**
-     * @FMAC_SCAN_ABORT:
-     * Stop an ongoing scan. The command is defined in
-     * &struct iwl_fmac_scan_abort_cmd.
-     */
-    FMAC_SCAN_ABORT = 0x1,
+  /**
+   * @FMAC_SCAN_ABORT:
+   * Stop an ongoing scan. The command is defined in
+   * &struct iwl_fmac_scan_abort_cmd.
+   */
+  FMAC_SCAN_ABORT = 0x1,
 
-    /**
-     * @FMAC_ADD_VIF:
-     * Add a virtual interface. The interface configuration is
-     * defined in &struct iwl_fmac_add_vif_cmd.
-     */
-    FMAC_ADD_VIF = 0x2,
+  /**
+   * @FMAC_ADD_VIF:
+   * Add a virtual interface. The interface configuration is
+   * defined in &struct iwl_fmac_add_vif_cmd.
+   */
+  FMAC_ADD_VIF = 0x2,
 
-    /**
-     * @FMAC_DEL_VIF:
-     * Delete a virtual interface. The command is defined in
-     * &struct iwl_fmac_del_vif_cmd.
-     */
-    FMAC_DEL_VIF = 0x3,
+  /**
+   * @FMAC_DEL_VIF:
+   * Delete a virtual interface. The command is defined in
+   * &struct iwl_fmac_del_vif_cmd.
+   */
+  FMAC_DEL_VIF = 0x3,
 
-    /**
-     * @FMAC_CONNECT:
-     * As a station interface, connect to a network, using the configuration
-     * defined in &struct iwl_fmac_connect_cmd. The connect flow is
-     * asynchronous and upon completion a %FMAC_CONNECT_RESULT notification
-     * is sent by FMAC using &struct iwl_fmac_connect_result.
-     */
-    FMAC_CONNECT = 0x4,
+  /**
+   * @FMAC_CONNECT:
+   * As a station interface, connect to a network, using the configuration
+   * defined in &struct iwl_fmac_connect_cmd. The connect flow is
+   * asynchronous and upon completion a %FMAC_CONNECT_RESULT notification
+   * is sent by FMAC using &struct iwl_fmac_connect_result.
+   */
+  FMAC_CONNECT = 0x4,
 
-    /**
-     * @FMAC_DISCONNECT:
-     * As station interface, disconnect. The command is defined in
-     * &struct iwl_fmac_disconnect_cmd.
-     */
-    FMAC_DISCONNECT = 0x5,
+  /**
+   * @FMAC_DISCONNECT:
+   * As station interface, disconnect. The command is defined in
+   * &struct iwl_fmac_disconnect_cmd.
+   */
+  FMAC_DISCONNECT = 0x5,
 
-    /**
-     * @FMAC_SAR: TODO
-     */
-    FMAC_SAR = 0x6,
+  /**
+   * @FMAC_SAR: TODO
+   */
+  FMAC_SAR = 0x6,
 
-    /**
-     * @FMAC_NVM:
-     * Apply the global NVM configuration using configuration defined in
-     * &struct iwl_fmac_nvm_cmd.
-     */
-    FMAC_NVM = 0x7,
+  /**
+   * @FMAC_NVM:
+   * Apply the global NVM configuration using configuration defined in
+   * &struct iwl_fmac_nvm_cmd.
+   */
+  FMAC_NVM = 0x7,
 
 #ifdef CPTCFG_IWLFMAC_9000_SUPPORT
-    /**
-     * @FMAC_REQ_QUEUE:
-     * Request a new transmit queue, using the configuration in
-     * &struct iwl_fmac_req_queue. Only used with 9000-series devices.
-     */
-    FMAC_REQ_QUEUE = 0x8,
+  /**
+   * @FMAC_REQ_QUEUE:
+   * Request a new transmit queue, using the configuration in
+   * &struct iwl_fmac_req_queue. Only used with 9000-series devices.
+   */
+  FMAC_REQ_QUEUE = 0x8,
 
-    /**
-     * @FMAC_REL_QUEUE:
-     * Release a queue allocated for <RA, TID>, using the configuration in
-     * &struct iwl_fmac_rel_queue. Only used with 9000-series devices.
-     */
-    FMAC_REL_QUEUE = 0x9,
+  /**
+   * @FMAC_REL_QUEUE:
+   * Release a queue allocated for <RA, TID>, using the configuration in
+   * &struct iwl_fmac_rel_queue. Only used with 9000-series devices.
+   */
+  FMAC_REL_QUEUE = 0x9,
 #endif
 
 #ifdef CPTCFG_IWLFMAC_9000_SUPPORT
-    /**
-     * @FMAC_SCD_QUEUE_CFG:
-     * Configure a transmit queue, as defined in
-     * &struct iwl_fmac_scd_txq_cfg_cmd.
-     * Only used with 9000-series devices.
-     */
-    FMAC_SCD_QUEUE_CFG = 0xb,
+  /**
+   * @FMAC_SCD_QUEUE_CFG:
+   * Configure a transmit queue, as defined in
+   * &struct iwl_fmac_scd_txq_cfg_cmd.
+   * Only used with 9000-series devices.
+   */
+  FMAC_SCD_QUEUE_CFG = 0xb,
 #endif
 
-    /**
-     * @FMAC_CONFIG:
-     * Configure global or interface specific settings as defined
-     * in &struct iwl_fmac_config_cmd.
-     */
-    FMAC_CONFIG = 0xc,
+  /**
+   * @FMAC_CONFIG:
+   * Configure global or interface specific settings as defined
+   * in &struct iwl_fmac_config_cmd.
+   */
+  FMAC_CONFIG = 0xc,
 
-    /* 0xd is reserved */
-    /* 0xe is reserved */
+  /* 0xd is reserved */
+  /* 0xe is reserved */
 
-    /**
-     * @FMAC_REG_CFG: TODO
-     */
-    FMAC_REG_CFG = 0xf,
+  /**
+   * @FMAC_REG_CFG: TODO
+   */
+  FMAC_REG_CFG = 0xf,
 
-    /* 0x10 is reverved */
-    /* 0x11 is reverved */
-    /* 0x12 is reverved */
-    /* 0x13 is reverved */
+  /* 0x10 is reverved */
+  /* 0x11 is reverved */
+  /* 0x12 is reverved */
+  /* 0x13 is reverved */
 
-    /**
-     * @FMAC_SET_PMK:
-     * Set the key after a successful IEEE802.1X authentication.
-     * The available key types are defined in &iwl_fmac_key_type.
-     * &struct iwl_fmac_mlme_set_pmk_cmd as the command struct.
-     */
-    FMAC_SET_PMK = 0x14,
+  /**
+   * @FMAC_SET_PMK:
+   * Set the key after a successful IEEE802.1X authentication.
+   * The available key types are defined in &iwl_fmac_key_type.
+   * &struct iwl_fmac_mlme_set_pmk_cmd as the command struct.
+   */
+  FMAC_SET_PMK = 0x14,
 
-    /**
-     * @FMAC_ACK_STA_REMOVED:
-     * Acknowledge that station removal was processed and the driver has
-     * stopped using the station ID; uses the notification
-     * &struct iwl_fmac_sta_removed as the command struct.
-     */
-    FMAC_ACK_STA_REMOVED = 0x15,
+  /**
+   * @FMAC_ACK_STA_REMOVED:
+   * Acknowledge that station removal was processed and the driver has
+   * stopped using the station ID; uses the notification
+   * &struct iwl_fmac_sta_removed as the command struct.
+   */
+  FMAC_ACK_STA_REMOVED = 0x15,
 
-    /**
-     * @FMAC_TEST_FIPS:
-     * Test security algorithms implemented in FMAC
-     */
-    FMAC_TEST_FIPS = 0x16,
+  /**
+   * @FMAC_TEST_FIPS:
+   * Test security algorithms implemented in FMAC
+   */
+  FMAC_TEST_FIPS = 0x16,
 
-    /* 0x17 is reserved */
-    /* 0x19 is reserved */
-    /* 0x1a is reserved */
-    /* 0x1b is reserved */
-    /* 0x1c is reserved */
-    /* 0x1d is reserved */
-    /* 0x1e is reserved */
+  /* 0x17 is reserved */
+  /* 0x19 is reserved */
+  /* 0x1a is reserved */
+  /* 0x1b is reserved */
+  /* 0x1c is reserved */
+  /* 0x1d is reserved */
+  /* 0x1e is reserved */
 
-    /**
-     * @FMAC_MIC_FAILURE:
-     * Inform FMAC about TKIP MMIC failures, FMAC will run countermeasures.
-     * &struct iwl_fmac_mic_failure as the command struct.
-     */
-    FMAC_MIC_FAILURE = 0x1f,
+  /**
+   * @FMAC_MIC_FAILURE:
+   * Inform FMAC about TKIP MMIC failures, FMAC will run countermeasures.
+   * &struct iwl_fmac_mic_failure as the command struct.
+   */
+  FMAC_MIC_FAILURE = 0x1f,
 
-    /**
-     * @FMAC_SET_MONITOR_CHAN:
-     * Set channel of monitor interface.
-     * &struct iwl_fmac_set_monitor_chan_cmd as the command struct.
-     */
-    FMAC_SET_MONITOR_CHAN = 0x20,
+  /**
+   * @FMAC_SET_MONITOR_CHAN:
+   * Set channel of monitor interface.
+   * &struct iwl_fmac_set_monitor_chan_cmd as the command struct.
+   */
+  FMAC_SET_MONITOR_CHAN = 0x20,
 
-    /* 0x21 is reserved */
+  /* 0x21 is reserved */
 
-    /**
-     * @FMAC_HOST_BASED_AP:
-     * Manage (start / modify / stop) a host based AP.
-     * &struct iwl_fmac_host_ap_cmd as the command struct or
-     * &struct iwl_fmac_host_ap_resp for the response
-     */
-    FMAC_HOST_BASED_AP = 0x22,
+  /**
+   * @FMAC_HOST_BASED_AP:
+   * Manage (start / modify / stop) a host based AP.
+   * &struct iwl_fmac_host_ap_cmd as the command struct or
+   * &struct iwl_fmac_host_ap_resp for the response
+   */
+  FMAC_HOST_BASED_AP = 0x22,
 
-    /**
-     * @FMAC_HOST_BASED_AP_STA:
-     * Add / modify / remove stations for the host based AP.
-     * &struct iwl_fmac_host_ap_sta_cmd as the command struct.
-     */
-    FMAC_HOST_BASED_AP_STA = 0x23,
+  /**
+   * @FMAC_HOST_BASED_AP_STA:
+   * Add / modify / remove stations for the host based AP.
+   * &struct iwl_fmac_host_ap_sta_cmd as the command struct.
+   */
+  FMAC_HOST_BASED_AP_STA = 0x23,
 
-    /**
-     * @FMAC_TEMPORAL_KEY:
-     * Add / remove keys for the host based AP.
-     * &struct iwl_fmac_temporal_key_cmd as the command struct.
-     * &struct iwl_fmac_temporal_key_resp is the response.
-     */
-    FMAC_TEMPORAL_KEY = 0x24,
+  /**
+   * @FMAC_TEMPORAL_KEY:
+   * Add / remove keys for the host based AP.
+   * &struct iwl_fmac_temporal_key_cmd as the command struct.
+   * &struct iwl_fmac_temporal_key_resp is the response.
+   */
+  FMAC_TEMPORAL_KEY = 0x24,
 
-    /**
-     * @FMAC_TKIP_SET_MCAST_RSC:
-     * Update TKIP MCAST Receive Sequence Counter. The driver should send
-     * this command every time the 4 high bytes of the RSC change.
-     * &struct iwl_fmac_tkip_mcast_rsc is the command struct.
-     */
-    FMAC_TKIP_SET_MCAST_RSC = 0x25,
+  /**
+   * @FMAC_TKIP_SET_MCAST_RSC:
+   * Update TKIP MCAST Receive Sequence Counter. The driver should send
+   * this command every time the 4 high bytes of the RSC change.
+   * &struct iwl_fmac_tkip_mcast_rsc is the command struct.
+   */
+  FMAC_TKIP_SET_MCAST_RSC = 0x25,
 
-    /**
-     * @FMAC_PORT_AUTHORIZED:
-     * Inform FMAC that VIF is authorized.
-     * &struct iwl_fmac_port_authorized_cmd as the command struct.
-     */
-    FMAC_PORT_AUTHORIZED = 0x26,
+  /**
+   * @FMAC_PORT_AUTHORIZED:
+   * Inform FMAC that VIF is authorized.
+   * &struct iwl_fmac_port_authorized_cmd as the command struct.
+   */
+  FMAC_PORT_AUTHORIZED = 0x26,
 
-    /**
-     * @FMAC_ROAM:
-     * Roam to the current network, using the configuration defined in
-     * &struct iwl_fmac_connect_cmd.
-     * The roam flow is asynchronous and upon completion
-     * a %FMAC_ROAM_RESULT notification is sent by FMAC using &struct
-     * iwl_fmac_roam_result.
-     */
-    FMAC_ROAM = 0x27,
+  /**
+   * @FMAC_ROAM:
+   * Roam to the current network, using the configuration defined in
+   * &struct iwl_fmac_connect_cmd.
+   * The roam flow is asynchronous and upon completion
+   * a %FMAC_ROAM_RESULT notification is sent by FMAC using &struct
+   * iwl_fmac_roam_result.
+   */
+  FMAC_ROAM = 0x27,
 
-    /**
-     * @FMAC_RECOVER:
-     * Ask FMAC to recover after a firmware reset using the configuration
-     * blob in &struct iwl_fmac_recover_cmd.
-     */
-    FMAC_RECOVER = 0x28,
+  /**
+   * @FMAC_RECOVER:
+   * Ask FMAC to recover after a firmware reset using the configuration
+   * blob in &struct iwl_fmac_recover_cmd.
+   */
+  FMAC_RECOVER = 0x28,
 
-    /* Notifications */
+  /* Notifications */
 
-    /**
-     * @FMAC_RECOVERY_COMPLETE:
-     * Notifies that the recovery is complete. Uses the
-     * &struct iwl_fmac_recovery_complete as the notification structure.
-     */
-    FMAC_RECOVERY_COMPLETE = 0xe8,
+  /**
+   * @FMAC_RECOVERY_COMPLETE:
+   * Notifies that the recovery is complete. Uses the
+   * &struct iwl_fmac_recovery_complete as the notification structure.
+   */
+  FMAC_RECOVERY_COMPLETE = 0xe8,
 
-    /**
-     * @FMAC_INACTIVE_STATION:
-     * Notifies about a station that we haven't heard from and that
-     * does't reply to our probe (Null Data Packet). This station
-     * should be disconnected.
-     * &struct iwl_fmac_inactive_sta is the notification struct.
-     */
-    FMAC_INACTIVE_STATION = 0xe9,
+  /**
+   * @FMAC_INACTIVE_STATION:
+   * Notifies about a station that we haven't heard from and that
+   * does't reply to our probe (Null Data Packet). This station
+   * should be disconnected.
+   * &struct iwl_fmac_inactive_sta is the notification struct.
+   */
+  FMAC_INACTIVE_STATION = 0xe9,
 
-    /**
-     * @FMAC_ROAM_IS_NEEDED:
-     * Roam is needed notification, with roam information
-     * given in &struct iwl_fmac_roam_is_needed.
-     */
-    FMAC_ROAM_IS_NEEDED = 0xea,
+  /**
+   * @FMAC_ROAM_IS_NEEDED:
+   * Roam is needed notification, with roam information
+   * given in &struct iwl_fmac_roam_is_needed.
+   */
+  FMAC_ROAM_IS_NEEDED = 0xea,
 
-    /**
-     * @FMAC_ROAM_RESULT:
-     * Roam result notification, with information given in &struct
-     * iwl_fmac_roam_result.
-     */
-    FMAC_ROAM_RESULT = 0xeb,
+  /**
+   * @FMAC_ROAM_RESULT:
+   * Roam result notification, with information given in &struct
+   * iwl_fmac_roam_result.
+   */
+  FMAC_ROAM_RESULT = 0xeb,
 
 #ifdef CPTCFG_IWLFMAC_9000_SUPPORT
-    /**
-     * @FMAC_SEND_FRAME:
-     * Notification about a frame that should be sent by the host
-     * on FMAC's behalf as defined in &struct iwl_fmac_send_frame_notif
-     * Only used with 9000-series devices.
-     */
-    FMAC_SEND_FRAME = 0xf0,
+  /**
+   * @FMAC_SEND_FRAME:
+   * Notification about a frame that should be sent by the host
+   * on FMAC's behalf as defined in &struct iwl_fmac_send_frame_notif
+   * Only used with 9000-series devices.
+   */
+  FMAC_SEND_FRAME = 0xf0,
 #endif
 
-    /* 0xf1 is reserved */
-    /* 0xf2 is reserved */
+  /* 0xf1 is reserved */
+  /* 0xf2 is reserved */
 
-    /**
-     * @FMAC_EAPOL:
-     * Notification about a received EAPOL frame. This notification is
-     * used to notify the host about EAPOL frames required for IEEE802.1X
-     * authentication. Other EAPOL frames are not passed to the host.
-     */
-    FMAC_EAPOL = 0xf3,
+  /**
+   * @FMAC_EAPOL:
+   * Notification about a received EAPOL frame. This notification is
+   * used to notify the host about EAPOL frames required for IEEE802.1X
+   * authentication. Other EAPOL frames are not passed to the host.
+   */
+  FMAC_EAPOL = 0xf3,
 
-    /* 0xf4 is reserved */
-    /* 0xf5 is reserved */
+  /* 0xf4 is reserved */
+  /* 0xf5 is reserved */
 
-    /**
-     * @FMAC_REG_UPDATE: TODO
-     */
-    FMAC_REG_UPDATE = 0xf6,
+  /**
+   * @FMAC_REG_UPDATE: TODO
+   */
+  FMAC_REG_UPDATE = 0xf6,
 
-    /**
-     * @FMAC_TRIGGER_NOTIF: TODO
-     */
-    FMAC_TRIGGER_NOTIF = 0xf7,
+  /**
+   * @FMAC_TRIGGER_NOTIF: TODO
+   */
+  FMAC_TRIGGER_NOTIF = 0xf7,
 
-    /* 0xf8 is reserved */
+  /* 0xf8 is reserved */
 
-    /* 0xf9 is reserved */
-    /* 0xfa is reserved */
+  /* 0xf9 is reserved */
+  /* 0xfa is reserved */
 
-    /**
-     * @FMAC_KEYS_UPDATE:
-     * Notification about new keys, where the new key configuration is
-     * given in &struct iwl_fmac_keys_update_notif.
-     */
-    FMAC_KEYS_UPDATE = 0xfb,
+  /**
+   * @FMAC_KEYS_UPDATE:
+   * Notification about new keys, where the new key configuration is
+   * given in &struct iwl_fmac_keys_update_notif.
+   */
+  FMAC_KEYS_UPDATE = 0xfb,
 
-    /**
-     * @FMAC_DISCONNECTED:
-     * For station interface, disconnection from a network notification,
-     * with additional information given in &struct iwl_fmac_disconnect_cmd.
-     */
-    FMAC_DISCONNECTED = 0xfc,
+  /**
+   * @FMAC_DISCONNECTED:
+   * For station interface, disconnection from a network notification,
+   * with additional information given in &struct iwl_fmac_disconnect_cmd.
+   */
+  FMAC_DISCONNECTED = 0xfc,
 
-    /**
-     * @FMAC_DEBUG:
-     * Debug information notification with additional information given
-     * in &struct iwl_fmac_debug_notif.
-     */
-    FMAC_DEBUG = 0xfd,
+  /**
+   * @FMAC_DEBUG:
+   * Debug information notification with additional information given
+   * in &struct iwl_fmac_debug_notif.
+   */
+  FMAC_DEBUG = 0xfd,
 
-    /**
-     * @FMAC_CONNECT_RESULT:
-     * Connect request result notification, with the
-     * connection information given in &struct iwl_fmac_connect_result.
-     */
-    FMAC_CONNECT_RESULT = 0xfe,
+  /**
+   * @FMAC_CONNECT_RESULT:
+   * Connect request result notification, with the
+   * connection information given in &struct iwl_fmac_connect_result.
+   */
+  FMAC_CONNECT_RESULT = 0xfe,
 
-    /**
-     * @FMAC_SCAN_COMPLETE:
-     * Scan completed notification, with additional information
-     * in &struct iwl_fmac_scan_complete_notif.
-     */
-    FMAC_SCAN_COMPLETE = 0xff,
+  /**
+   * @FMAC_SCAN_COMPLETE:
+   * Scan completed notification, with additional information
+   * in &struct iwl_fmac_scan_complete_notif.
+   */
+  FMAC_SCAN_COMPLETE = 0xff,
 };
 
 #define IWL_FMAC_MAX_SSIDS 20
@@ -389,22 +389,22 @@
  * notification.
  */
 struct iwl_fmac_scan_cmd {
-    uint8_t vif_id;
-    uint8_t random_mac;
-    uint8_t n_ssids;
-    uint8_t n_freqs;
-    __le32 flags;
-    __le16 rates_24;
-    __le16 rates_52;
-    uint8_t ssids[IWL_FMAC_MAX_SSIDS][IEEE80211_MAX_SSID_LEN];
-    uint8_t ssids_lengths[IWL_FMAC_MAX_SSIDS];
-    __le16 freqs[IWL_FMAC_MAX_CHANS];
-    uint8_t bssid[ETH_ALEN];
-    __le16 ie_len;
+  uint8_t vif_id;
+  uint8_t random_mac;
+  uint8_t n_ssids;
+  uint8_t n_freqs;
+  __le32 flags;
+  __le16 rates_24;
+  __le16 rates_52;
+  uint8_t ssids[IWL_FMAC_MAX_SSIDS][IEEE80211_MAX_SSID_LEN];
+  uint8_t ssids_lengths[IWL_FMAC_MAX_SSIDS];
+  __le16 freqs[IWL_FMAC_MAX_CHANS];
+  uint8_t bssid[ETH_ALEN];
+  __le16 ie_len;
 #ifndef _MSC_VER
-    uint8_t ie[0];
+  uint8_t ie[0];
 #endif
-    /* pad to a multiple of 4 bytes */
+  /* pad to a multiple of 4 bytes */
 } __packed;
 
 /**
@@ -415,8 +415,8 @@
  * Request to abort an ongoing scan operation initiated by %FMAC_SCAN command.
  */
 struct iwl_fmac_scan_abort_cmd {
-    uint8_t vif_id;
-    uint8_t reserved[3];
+  uint8_t vif_id;
+  uint8_t reserved[3];
 } __packed;
 
 /**
@@ -432,27 +432,27 @@
  * @IWL_FMAC_IFTYPE_ANY: catch-all interface type for config command.
  */
 enum iwl_fmac_vif_type {
-    IWL_FMAC_IFTYPE_MGD = 1,
-    /* 2 is reserved */
-    IWL_FMAC_IFTYPE_P2P_CLIENT = 3,
-    IWL_FMAC_IFTYPE_P2P_GO,
-    IWL_FMAC_IFTYPE_P2P_DEVICE,
-    /* 6 is reserved */
-    IWL_FMAC_IFTYPE_MONITOR = 7,
-    IWL_FMAC_IFTYPE_HOST_BASED_AP,
-    /* 7 is reserved */
-    IWL_FMAC_IFTYPE_ANY = 0xff,
+  IWL_FMAC_IFTYPE_MGD = 1,
+  /* 2 is reserved */
+  IWL_FMAC_IFTYPE_P2P_CLIENT = 3,
+  IWL_FMAC_IFTYPE_P2P_GO,
+  IWL_FMAC_IFTYPE_P2P_DEVICE,
+  /* 6 is reserved */
+  IWL_FMAC_IFTYPE_MONITOR = 7,
+  IWL_FMAC_IFTYPE_HOST_BASED_AP,
+  /* 7 is reserved */
+  IWL_FMAC_IFTYPE_ANY = 0xff,
 };
 
 #define IWL_FMAC_STATION_COUNT 16
 
 enum iwl_fmac_tx_fifo {
-    IWL_FMAC_TX_FIFO_BK = 0,
-    IWL_FMAC_TX_FIFO_BE,
-    IWL_FMAC_TX_FIFO_VI,
-    IWL_FMAC_TX_FIFO_VO,
-    IWL_FMAC_TX_FIFO_MCAST = 5,
-    IWL_FMAC_TX_FIFO_CMD = 7,
+  IWL_FMAC_TX_FIFO_BK = 0,
+  IWL_FMAC_TX_FIFO_BE,
+  IWL_FMAC_TX_FIFO_VI,
+  IWL_FMAC_TX_FIFO_VO,
+  IWL_FMAC_TX_FIFO_MCAST = 5,
+  IWL_FMAC_TX_FIFO_CMD = 7,
 };
 
 static const uint8_t iwl_fmac_tid_to_tx_fifo[] = {
@@ -471,9 +471,9 @@
  * completion, the operation result is conveyed using &iwl_fmac_add_vif_resp.
  */
 struct iwl_fmac_add_vif_cmd {
-    uint8_t addr[ETH_ALEN];
-    uint8_t type;
-    uint8_t reserved;
+  uint8_t addr[ETH_ALEN];
+  uint8_t type;
+  uint8_t reserved;
 } __packed;
 
 /**
@@ -482,8 +482,8 @@
  * @IWL_ADD_VIF_FAILURE: Failure to add a new interface.
  */
 enum iwl_fw_add_vif_resp_status {
-    IWL_ADD_VIF_SUCCESS = 0,
-    IWL_ADD_VIF_FAILURE,
+  IWL_ADD_VIF_SUCCESS = 0,
+  IWL_ADD_VIF_FAILURE,
 };
 
 /**
@@ -493,8 +493,8 @@
  * successful %FMAC_ADD_VIF command.
  */
 struct iwl_fmac_del_vif_cmd {
-    uint8_t id;
-    uint8_t reserved[3];
+  uint8_t id;
+  uint8_t reserved[3];
 } __packed;
 
 /**
@@ -504,9 +504,9 @@
  * @reserved: for alignment.
  */
 struct iwl_fmac_add_vif_resp {
-    uint8_t status;
-    uint8_t id;
-    __le16 reserved;
+  uint8_t status;
+  uint8_t id;
+  __le16 reserved;
 } __packed;
 
 /**
@@ -518,9 +518,9 @@
  *  Otherwise, the BSSIDs list is a blacklist specifying disallowed BSSIDs.
  */
 enum iwl_fmac_connection_flags {
-    IWL_FMAC_FREQ_IN_USE = BIT(0),
-    IWL_FMAC_FREQ_HINT = BIT(1),
-    IWL_FMAC_CONNECT_FLAGS_BSSID_WHITELIST = BIT(2),
+  IWL_FMAC_FREQ_IN_USE = BIT(0),
+  IWL_FMAC_FREQ_HINT = BIT(1),
+  IWL_FMAC_CONNECT_FLAGS_BSSID_WHITELIST = BIT(2),
 };
 
 /*
@@ -543,10 +543,10 @@
 #define IWL_FMAC_CIPHER_GCMP BIT(6)
 #define IWL_FMAC_CIPHER_GCMP_256 BIT(8)
 #define IWL_FMAC_CIPHER_CCMP_256 BIT(9)
-#define IWL_FMAC_SUPPORTED_CIPHERS                                                \
-    (IWL_FMAC_CIPHER_NONE | IWL_FMAC_CIPHER_WEP40 | IWL_FMAC_CIPHER_WEP104 |      \
-     IWL_FMAC_CIPHER_TKIP | IWL_FMAC_CIPHER_CCMP | IWL_FMAC_CIPHER_AES_128_CMAC | \
-     IWL_FMAC_CIPHER_GCMP | IWL_FMAC_CIPHER_GCMP_256 | IWL_FMAC_CIPHER_CCMP_256)
+#define IWL_FMAC_SUPPORTED_CIPHERS                                                                \
+  (IWL_FMAC_CIPHER_NONE | IWL_FMAC_CIPHER_WEP40 | IWL_FMAC_CIPHER_WEP104 | IWL_FMAC_CIPHER_TKIP | \
+   IWL_FMAC_CIPHER_CCMP | IWL_FMAC_CIPHER_AES_128_CMAC | IWL_FMAC_CIPHER_GCMP |                   \
+   IWL_FMAC_CIPHER_GCMP_256 | IWL_FMAC_CIPHER_CCMP_256)
 
 /**
  * Supported key management suites:
@@ -565,10 +565,10 @@
 #define IWL_FMAC_KEY_MGMT_PSK_SHA256 BIT(8)
 #define IWL_FMAC_KEY_MGMT_IEEE8021X_SUITE_B BIT(16)
 #define IWL_FMAC_KEY_MGMT_IEEE8021X_SUITE_B_192 BIT(17)
-#define IWL_FMAC_SUPPORTED_KEY_MGMT                                                                \
-    (IWL_FMAC_KEY_MGMT_PSK | IWL_FMAC_KEY_MGMT_PSK_SHA256 | IWL_FMAC_KEY_MGMT_FT_IEEE8021X |       \
-     IWL_FMAC_KEY_MGMT_FT_PSK | IWL_FMAC_KEY_MGMT_IEEE8021X | IWL_FMAC_KEY_MGMT_IEEE8021X_SHA256 | \
-     IWL_FMAC_KEY_MGMT_IEEE8021X_SUITE_B | IWL_FMAC_KEY_MGMT_IEEE8021X_SUITE_B_192)
+#define IWL_FMAC_SUPPORTED_KEY_MGMT                                                              \
+  (IWL_FMAC_KEY_MGMT_PSK | IWL_FMAC_KEY_MGMT_PSK_SHA256 | IWL_FMAC_KEY_MGMT_FT_IEEE8021X |       \
+   IWL_FMAC_KEY_MGMT_FT_PSK | IWL_FMAC_KEY_MGMT_IEEE8021X | IWL_FMAC_KEY_MGMT_IEEE8021X_SHA256 | \
+   IWL_FMAC_KEY_MGMT_IEEE8021X_SUITE_B | IWL_FMAC_KEY_MGMT_IEEE8021X_SUITE_B_192)
 
 /**
  * Supported security protocols:
@@ -586,9 +586,9 @@
  * @IWL_FMAC_MFP_REQUIRED: management frame protection is required
  */
 enum iwl_fmac_mfp_mode {
-    IWL_FMAC_MFP_NO,
-    IWL_FMAC_MFP_OPTIONAL,
-    IWL_FMAC_MFP_REQUIRED,
+  IWL_FMAC_MFP_NO,
+  IWL_FMAC_MFP_OPTIONAL,
+  IWL_FMAC_MFP_REQUIRED,
 };
 
 #define IWL_NUM_WEP_KEYS 4
@@ -619,23 +619,23 @@
  * @u: union of the various types of key material
  */
 struct iwl_fmac_crypto {
-    __le32 cipher_group;
-    __le32 ciphers_pairwise;
-    __le32 key_mgmt;
-    uint8_t mfp;
-    uint8_t reserved[3];
-    union {
-        struct {
-            uint8_t psk[32];
-            __le32 proto;
-        } __packed wpa;
-        struct {
-            uint8_t key[IWL_NUM_WEP_KEYS][IWL_MAX_WEP_KEY_LEN];
-            uint8_t key_len[IWL_NUM_WEP_KEYS];
-            uint8_t def_key;
-            uint8_t reserved1[3];
-        } __packed wep;
-    } u;
+  __le32 cipher_group;
+  __le32 ciphers_pairwise;
+  __le32 key_mgmt;
+  uint8_t mfp;
+  uint8_t reserved[3];
+  union {
+    struct {
+      uint8_t psk[32];
+      __le32 proto;
+    } __packed wpa;
+    struct {
+      uint8_t key[IWL_NUM_WEP_KEYS][IWL_MAX_WEP_KEY_LEN];
+      uint8_t key_len[IWL_NUM_WEP_KEYS];
+      uint8_t def_key;
+      uint8_t reserved1[3];
+    } __packed wep;
+  } u;
 } __packed;
 
 #define IWL_FMAC_MAX_BSSIDS 10
@@ -669,19 +669,19 @@
  * the operation result is conveyed by %FMAC_CONNECT_RESULT.
  */
 struct iwl_fmac_connect_cmd {
-    uint8_t vif_id;
-    uint8_t max_retries;
-    __le16 center_freq;
-    __le32 flags;
-    uint8_t bssid[ETH_ALEN];
-    uint8_t reserved1;
-    uint8_t ssid_len;
-    uint8_t ssid[IEEE80211_MAX_SSID_LEN];
+  uint8_t vif_id;
+  uint8_t max_retries;
+  __le16 center_freq;
+  __le32 flags;
+  uint8_t bssid[ETH_ALEN];
+  uint8_t reserved1;
+  uint8_t ssid_len;
+  uint8_t ssid[IEEE80211_MAX_SSID_LEN];
 
-    struct iwl_fmac_crypto crypto;
-    uint8_t reserved2[3];
-    uint8_t n_bssids;
-    uint8_t bssids[IWL_FMAC_MAX_BSSIDS * ETH_ALEN];
+  struct iwl_fmac_crypto crypto;
+  uint8_t reserved2[3];
+  uint8_t n_bssids;
+  uint8_t bssids[IWL_FMAC_MAX_BSSIDS * ETH_ALEN];
 } __packed;
 
 /**
@@ -690,8 +690,8 @@
  * @reserved: reserved for 4 byte alignment.
  */
 struct iwl_fmac_port_authorized_cmd {
-    uint8_t vif_id;
-    uint8_t reserved[3];
+  uint8_t vif_id;
+  uint8_t reserved[3];
 } __packed;
 
 #define UMAC_DEFAULT_KEYS 4
@@ -712,16 +712,16 @@
  * @reserved: reserved for none 9000 family support
  */
 struct iwl_fmac_key {
-    uint8_t valid;
-    uint8_t keyidx;
-    uint8_t hw_keyidx;
-    uint8_t rx_pn_len;
-    uint8_t rx_pn[IWL_FMAC_MAX_PN_LEN];
-    __le32 cipher;
+  uint8_t valid;
+  uint8_t keyidx;
+  uint8_t hw_keyidx;
+  uint8_t rx_pn_len;
+  uint8_t rx_pn[IWL_FMAC_MAX_PN_LEN];
+  __le32 cipher;
 #ifdef CPTCFG_IWLFMAC_9000_SUPPORT
-    uint8_t tkip_mcast_rx_mic_key[IWL_FMAC_TKIP_MCAST_RX_MIC_KEY];
+  uint8_t tkip_mcast_rx_mic_key[IWL_FMAC_TKIP_MCAST_RX_MIC_KEY];
 #else
-    uint8_t reserved[IWL_FMAC_TKIP_MCAST_RX_MIC_KEY];
+  uint8_t reserved[IWL_FMAC_TKIP_MCAST_RX_MIC_KEY];
 #endif /* CPTCFG_IWLFMAC_9000_SUPPORT */
 } __packed;
 
@@ -733,160 +733,160 @@
  * @reserved: for alignment.
  */
 struct iwl_fmac_keys {
-    struct iwl_fmac_key ptk[UMAC_DEFAULT_KEYS];
-    struct iwl_fmac_key gtk[UMAC_DEFAULT_KEYS];
-    uint8_t wep_tx_keyidx;
-    uint8_t reserved[3];
+  struct iwl_fmac_key ptk[UMAC_DEFAULT_KEYS];
+  struct iwl_fmac_key gtk[UMAC_DEFAULT_KEYS];
+  uint8_t wep_tx_keyidx;
+  uint8_t reserved[3];
 } __packed;
 
 /**
  * struct iwl_fmac_connect_result - connect result notification.
  */
 struct iwl_fmac_connect_result {
-    /**
-     * @vif_id:
-     * the interface identifier returned in &iwl_fmac_add_vif_resp
-     */
-    uint8_t vif_id;
+  /**
+   * @vif_id:
+   * the interface identifier returned in &iwl_fmac_add_vif_resp
+   */
+  uint8_t vif_id;
 
-    /**
-     * @sta_id:
-     * on successful connection, holds a station entry index associated
-     * with AP the station interface associated with.
-     */
-    uint8_t sta_id;
+  /**
+   * @sta_id:
+   * on successful connection, holds a station entry index associated
+   * with AP the station interface associated with.
+   */
+  uint8_t sta_id;
 
-    /**
-     * @center_freq:
-     * on successful connection, the center frequency of the BSS.
-     */
-    __le16 center_freq;
+  /**
+   * @center_freq:
+   * on successful connection, the center frequency of the BSS.
+   */
+  __le16 center_freq;
 
-    /**
-     * @status:
-     * status code as defined in IEEE 802.11-2016 Table 9-46
-     * ("Status codes").
-     */
-    __le16 status;
+  /**
+   * @status:
+   * status code as defined in IEEE 802.11-2016 Table 9-46
+   * ("Status codes").
+   */
+  __le16 status;
 
-    /**
-     * @bssid:
-     * on successful connection, the bssid of the BSS.
-     */
-    uint8_t bssid[ETH_ALEN];
+  /**
+   * @bssid:
+   * on successful connection, the bssid of the BSS.
+   */
+  uint8_t bssid[ETH_ALEN];
 
-    /**
-     * @signal:
-     * on successful connection, the signal in dBm of the BSS.
-     */
-    __le32 signal;
+  /**
+   * @signal:
+   * on successful connection, the signal in dBm of the BSS.
+   */
+  __le32 signal;
 
-    /**
-     * @capability:
-     * on successful connection, the BSS capabilities as reported in
-     * the beacon/probe response.
-     */
-    __le16 capability;
+  /**
+   * @capability:
+   * on successful connection, the BSS capabilities as reported in
+   * the beacon/probe response.
+   */
+  __le16 capability;
 
-    /**
-     * @beacon_int:
-     * on successful connection, the beacon interval of the BSS.
-     */
-    __le16 beacon_int;
+  /**
+   * @beacon_int:
+   * on successful connection, the beacon interval of the BSS.
+   */
+  __le16 beacon_int;
 
-    /**
-     * @tsf: TODO
-     */
-    __le64 tsf;
+  /**
+   * @tsf: TODO
+   */
+  __le64 tsf;
 
-    /**
-     * @presp_ielen:
-     * the length of the probe response ies.
-     */
-    __le32 presp_ielen;
+  /**
+   * @presp_ielen:
+   * the length of the probe response ies.
+   */
+  __le32 presp_ielen;
 
-    /**
-     * @beacon_ielen:
-     * the length of the beacon ies.
-     */
-    __le32 beacon_ielen;
+  /**
+   * @beacon_ielen:
+   * the length of the beacon ies.
+   */
+  __le32 beacon_ielen;
 
-    /**
-     * @assoc_req_ie_len:
-     * the length of the association request body (fixed part + IEs).
-     */
-    __le32 assoc_req_ie_len;
+  /**
+   * @assoc_req_ie_len:
+   * the length of the association request body (fixed part + IEs).
+   */
+  __le32 assoc_req_ie_len;
 
-    /**
-     * @assoc_resp_ie_len:
-     * the length of the association response body (fixed part + IEs).
-     */
-    __le32 assoc_resp_ie_len;
+  /**
+   * @assoc_resp_ie_len:
+   * the length of the association response body (fixed part + IEs).
+   */
+  __le32 assoc_resp_ie_len;
 
-    /**
-     * @qos:
-     * 1 iff the BSS supports WMM.
-     */
-    uint8_t qos;
+  /**
+   * @qos:
+   * 1 iff the BSS supports WMM.
+   */
+  uint8_t qos;
 
-    /**
-     * @bk_acm:
-     * 1 iff %qos and the BK AC requires admission control.
-     */
-    uint8_t bk_acm;
+  /**
+   * @bk_acm:
+   * 1 iff %qos and the BK AC requires admission control.
+   */
+  uint8_t bk_acm;
 
-    /**
-     * @be_acm:
-     * 1 iff %qos and the BE AC requires admission control.
-     */
-    uint8_t be_acm;
+  /**
+   * @be_acm:
+   * 1 iff %qos and the BE AC requires admission control.
+   */
+  uint8_t be_acm;
 
-    /**
-     * @vi_acm:
-     * 1 iff %qos and the VI AC requires admission control.
-     */
-    uint8_t vi_acm;
+  /**
+   * @vi_acm:
+   * 1 iff %qos and the VI AC requires admission control.
+   */
+  uint8_t vi_acm;
 
-    /**
-     * @vo_acm:
-     * 1 iff %qos and the VO AC requires admission control.
-     */
-    uint8_t vo_acm;
+  /**
+   * @vo_acm:
+   * 1 iff %qos and the VO AC requires admission control.
+   */
+  uint8_t vo_acm;
 
-    /**
-     * @not_found:
-     * 1 iff no BSS was found suitable for connection.
-     */
-    uint8_t not_found;
+  /**
+   * @not_found:
+   * 1 iff no BSS was found suitable for connection.
+   */
+  uint8_t not_found;
 
-    /**
-     * @authorized: TODO
-     */
-    uint8_t authorized;
+  /**
+   * @authorized: TODO
+   */
+  uint8_t authorized;
 
-    /**
-     * @reassoc:
-     * flag indicates if the assoc request was reassoc.
-     */
-    uint8_t reassoc;
+  /**
+   * @reassoc:
+   * flag indicates if the assoc request was reassoc.
+   */
+  uint8_t reassoc;
 
-    /**
-     * @keys:
-     * On successful connection to a secure network that does not require
-     * 802.1x authentication and key derivation, holds the security keys as
-     * defined in &iwl_fmac_keys.
-     */
-    struct iwl_fmac_keys keys;
+  /**
+   * @keys:
+   * On successful connection to a secure network that does not require
+   * 802.1x authentication and key derivation, holds the security keys as
+   * defined in &iwl_fmac_keys.
+   */
+  struct iwl_fmac_keys keys;
 
-    /**
-     * @ie_data:
-     * the probe response ies (&presp_ielen), followed by the beacon ies
-     * (&beacon_ielen), followed by the association request ies
-     * (&assoc_req_ie_len) followed by the association response ies
-     * (&assoc_resp_ie_len).
-     */
+  /**
+   * @ie_data:
+   * the probe response ies (&presp_ielen), followed by the beacon ies
+   * (&beacon_ielen), followed by the association request ies
+   * (&assoc_req_ie_len) followed by the association response ies
+   * (&assoc_resp_ie_len).
+   */
 #ifndef _MSC_VER
-    uint8_t ie_data[0];
+  uint8_t ie_data[0];
 #endif
 } __packed;
 
@@ -903,9 +903,9 @@
  * connection is no longer valid.
  */
 struct iwl_fmac_disconnect_cmd {
-    uint8_t vif_id;
-    uint8_t locally_generated;
-    __le16 reason;
+  uint8_t vif_id;
+  uint8_t locally_generated;
+  __le16 reason;
 } __packed;
 
 /**
@@ -920,10 +920,10 @@
  *  transmitter by fmac.
  */
 enum iwl_fmac_dbg_type {
-    IWL_FMAC_DBG_INT_CMD,
-    IWL_FMAC_DBG_INT_RESP,
-    IWL_FMAC_DBG_INT_NOTIF,
-    IWL_FMAC_DBG_INT_TX,
+  IWL_FMAC_DBG_INT_CMD,
+  IWL_FMAC_DBG_INT_RESP,
+  IWL_FMAC_DBG_INT_NOTIF,
+  IWL_FMAC_DBG_INT_TX,
 };
 
 /**
@@ -936,10 +936,10 @@
  * components.
  */
 struct iwl_fmac_debug_notif {
-    uint8_t type;
-    uint8_t reserved[3];
+  uint8_t type;
+  uint8_t reserved[3];
 #ifndef _MSC_VER
-    uint8_t data[0];
+  uint8_t data[0];
 #endif
 } __packed;
 
@@ -956,11 +956,11 @@
  * the given station.
  */
 struct iwl_fmac_keys_update_notif {
-    uint8_t vif_id;
-    uint8_t sta_id;
-    uint8_t reserved[2];
+  uint8_t vif_id;
+  uint8_t sta_id;
+  uint8_t reserved[2];
 
-    struct iwl_fmac_keys keys;
+  struct iwl_fmac_keys keys;
 } __packed;
 
 /**
@@ -972,8 +972,8 @@
  * %FMAC_SCAN.
  */
 struct iwl_fmac_scan_complete_notif {
-    uint8_t aborted;
-    uint8_t reserved[3];
+  uint8_t aborted;
+  uint8_t reserved[3];
 } __packed;
 
 /**
@@ -988,14 +988,14 @@
  * @NVM_SKU_CAP_11AX_ENABLED: 80211.11ax support enabled.
  */
 enum iwl_fmac_nvm_sku_cap {
-    NVM_SKU_CAP_BAND_24GHZ_ENABLED = 0x1,
-    NVM_SKU_CAP_BAND_52GHZ_ENABLED = 0x2,
-    NVM_SKU_CAP_11N_ENABLED = 0x4,
-    NVM_SKU_CAP_11AC_ENABLED = 0x8,
-    NVM_SKU_CAP_AMT_ENABLED = 0x10,
-    NVM_SKU_CAP_IPAN_ENABLED = 0x20,
-    NVM_SKU_CAP_MIMO_DISABLED = 0x40,
-    NVM_SKU_CAP_11AX_ENABLED = 0x80,
+  NVM_SKU_CAP_BAND_24GHZ_ENABLED = 0x1,
+  NVM_SKU_CAP_BAND_52GHZ_ENABLED = 0x2,
+  NVM_SKU_CAP_11N_ENABLED = 0x4,
+  NVM_SKU_CAP_11AC_ENABLED = 0x8,
+  NVM_SKU_CAP_AMT_ENABLED = 0x10,
+  NVM_SKU_CAP_IPAN_ENABLED = 0x20,
+  NVM_SKU_CAP_MIMO_DISABLED = 0x40,
+  NVM_SKU_CAP_11AX_ENABLED = 0x80,
 };
 
 /**
@@ -1018,20 +1018,20 @@
  * See 9.4.2.56.2 ("HT Capability Information field") in P802.11Revmc_D5.0.
  */
 enum iwl_fmac_nvm_ht_cap {
-    NVM_HT_CAP_LDPC_CODING = 0x0001,
-    NVM_HT_CAP_SUP_WIDTH_20_40 = 0x0002,
-    NVM_HT_CAP_SM_PS = 0x000C,
-    NVM_HT_CAP_GRN_FLD = 0x0010,
-    NVM_HT_CAP_SGI_20 = 0x0020,
-    NVM_HT_CAP_SGI_40 = 0x0040,
-    NVM_HT_CAP_TX_STBC = 0x0080,
-    NVM_HT_CAP_RX_STBC = 0x0300,
-    NVM_HT_CAP_DELAY_BA = 0x0400,
-    NVM_HT_CAP_MAX_AMSDU = 0x0800,
-    NVM_HT_CAP_DSSSCCK40 = 0x1000,
-    NVM_HT_CAP_RESERVED = 0x2000,
-    NVM_HT_CAP_40MHZ_INTOLERANT = 0x4000,
-    NVM_HT_CAP_LSIG_TXOP_PROT = 0x8000,
+  NVM_HT_CAP_LDPC_CODING = 0x0001,
+  NVM_HT_CAP_SUP_WIDTH_20_40 = 0x0002,
+  NVM_HT_CAP_SM_PS = 0x000C,
+  NVM_HT_CAP_GRN_FLD = 0x0010,
+  NVM_HT_CAP_SGI_20 = 0x0020,
+  NVM_HT_CAP_SGI_40 = 0x0040,
+  NVM_HT_CAP_TX_STBC = 0x0080,
+  NVM_HT_CAP_RX_STBC = 0x0300,
+  NVM_HT_CAP_DELAY_BA = 0x0400,
+  NVM_HT_CAP_MAX_AMSDU = 0x0800,
+  NVM_HT_CAP_DSSSCCK40 = 0x1000,
+  NVM_HT_CAP_RESERVED = 0x2000,
+  NVM_HT_CAP_40MHZ_INTOLERANT = 0x4000,
+  NVM_HT_CAP_LSIG_TXOP_PROT = 0x8000,
 };
 
 /**
@@ -1072,35 +1072,35 @@
  * See 9.4.2.158.2 ("VHT Capabilities Information field") in P802.11Revmc_D5.0.
  */
 enum iwl_fmac_nvm_vht_cap {
-    NVM_VHT_CAP_MAX_MPDU_LENGTH_3895 = 0x00000000,
-    NVM_VHT_CAP_MAX_MPDU_LENGTH_7991 = 0x00000001,
-    NVM_VHT_CAP_MAX_MPDU_LENGTH_11454 = 0x00000002,
-    NVM_VHT_CAP_MAX_MPDU_MASK = 0x00000003,
-    NVM_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ = 0x00000004,
-    NVM_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ = 0x00000008,
-    NVM_VHT_CAP_SUPP_CHAN_WIDTH_MASK = 0x0000000C,
-    NVM_VHT_CAP_RXLDPC = 0x00000010,
-    NVM_VHT_CAP_SHORT_GI_80 = 0x00000020,
-    NVM_VHT_CAP_SHORT_GI_160 = 0x00000040,
-    NVM_VHT_CAP_TXSTBC = 0x00000080,
-    NVM_VHT_CAP_RXSTBC_1 = 0x00000100,
-    NVM_VHT_CAP_RXSTBC_2 = 0x00000200,
-    NVM_VHT_CAP_RXSTBC_3 = 0x00000300,
-    NVM_VHT_CAP_RXSTBC_4 = 0x00000400,
-    NVM_VHT_CAP_RXSTBC_MASK = 0x00000700,
-    NVM_VHT_CAP_SU_BEAMFORMER_CAPABLE = 0x00000800,
-    NVM_VHT_CAP_SU_BEAMFORMEE_CAPABLE = 0x00001000,
-    NVM_VHT_CAP_BEAMFORMEE_STS_MASK = 0x0000e000,
-    NVM_VHT_CAP_SOUNDING_DIMENSIONS_MASK = 0x00070000,
-    NVM_VHT_CAP_MU_BEAMFORMER_CAPABLE = 0x00080000,
-    NVM_VHT_CAP_MU_BEAMFORMEE_CAPABLE = 0x00100000,
-    NVM_VHT_CAP_VHT_TXOP_PS = 0x00200000,
-    NVM_VHT_CAP_HTC_VHT = 0x00400000,
-    NVM_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK = 0x03800000,
-    NVM_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB = 0x08000000,
-    NVM_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB = 0x0c000000,
-    NVM_VHT_CAP_RX_ANTENNA_PATTERN = 0x10000000,
-    NVM_VHT_CAP_TX_ANTENNA_PATTERN = 0x20000000,
+  NVM_VHT_CAP_MAX_MPDU_LENGTH_3895 = 0x00000000,
+  NVM_VHT_CAP_MAX_MPDU_LENGTH_7991 = 0x00000001,
+  NVM_VHT_CAP_MAX_MPDU_LENGTH_11454 = 0x00000002,
+  NVM_VHT_CAP_MAX_MPDU_MASK = 0x00000003,
+  NVM_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ = 0x00000004,
+  NVM_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ = 0x00000008,
+  NVM_VHT_CAP_SUPP_CHAN_WIDTH_MASK = 0x0000000C,
+  NVM_VHT_CAP_RXLDPC = 0x00000010,
+  NVM_VHT_CAP_SHORT_GI_80 = 0x00000020,
+  NVM_VHT_CAP_SHORT_GI_160 = 0x00000040,
+  NVM_VHT_CAP_TXSTBC = 0x00000080,
+  NVM_VHT_CAP_RXSTBC_1 = 0x00000100,
+  NVM_VHT_CAP_RXSTBC_2 = 0x00000200,
+  NVM_VHT_CAP_RXSTBC_3 = 0x00000300,
+  NVM_VHT_CAP_RXSTBC_4 = 0x00000400,
+  NVM_VHT_CAP_RXSTBC_MASK = 0x00000700,
+  NVM_VHT_CAP_SU_BEAMFORMER_CAPABLE = 0x00000800,
+  NVM_VHT_CAP_SU_BEAMFORMEE_CAPABLE = 0x00001000,
+  NVM_VHT_CAP_BEAMFORMEE_STS_MASK = 0x0000e000,
+  NVM_VHT_CAP_SOUNDING_DIMENSIONS_MASK = 0x00070000,
+  NVM_VHT_CAP_MU_BEAMFORMER_CAPABLE = 0x00080000,
+  NVM_VHT_CAP_MU_BEAMFORMEE_CAPABLE = 0x00100000,
+  NVM_VHT_CAP_VHT_TXOP_PS = 0x00200000,
+  NVM_VHT_CAP_HTC_VHT = 0x00400000,
+  NVM_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK = 0x03800000,
+  NVM_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB = 0x08000000,
+  NVM_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB = 0x0c000000,
+  NVM_VHT_CAP_RX_ANTENNA_PATTERN = 0x10000000,
+  NVM_VHT_CAP_TX_ANTENNA_PATTERN = 0x20000000,
 };
 
 #define NVM_HT_MCS_MASK_LEN 10
@@ -1115,10 +1115,10 @@
  * See 9.4.2.56.4 ("Supported MCS Set field") in P802.11Revmc_D5.0.
  */
 struct iwl_fmac_nvm_mcs_info {
-    uint8_t rx_mask[NVM_HT_MCS_MASK_LEN];
-    __le16 rx_highest;
-    uint8_t tx_params;
-    uint8_t reserved[3];
+  uint8_t rx_mask[NVM_HT_MCS_MASK_LEN];
+  __le16 rx_highest;
+  uint8_t tx_params;
+  uint8_t reserved[3];
 } __packed;
 
 /**
@@ -1131,10 +1131,10 @@
  * See 9.4.2.158.3 ("Supported VHT-MCS and NSS Set field") in P802.11Revmc_D5.0.
  */
 struct iwl_fmac_nvm_vht_mcs_info {
-    __le16 rx_mcs_map;
-    __le16 rx_highest;
-    __le16 tx_mcs_map;
-    __le16 tx_highest;
+  __le16 rx_mcs_map;
+  __le16 rx_highest;
+  __le16 tx_mcs_map;
+  __le16 tx_highest;
 } __packed;
 
 /**
@@ -1155,12 +1155,12 @@
  * @mcs: See &iwl_fmac_nvm_mcs_info.
  */
 struct iwl_fmac_nvm_ht {
-    uint8_t ht_supported;
-    uint8_t reserved[3];
-    __le16 cap;
-    uint8_t ampdu_factor;
-    uint8_t ampdu_density;
-    struct iwl_fmac_nvm_mcs_info mcs;
+  uint8_t ht_supported;
+  uint8_t reserved[3];
+  __le16 cap;
+  uint8_t ampdu_factor;
+  uint8_t ampdu_density;
+  struct iwl_fmac_nvm_mcs_info mcs;
 } __packed;
 
 /**
@@ -1171,10 +1171,10 @@
  * @vht_mcs: See %iwl_fmac_nvm_vht_mcs_info
  */
 struct iwl_fmac_nvm_vht {
-    uint8_t vht_supported;
-    uint8_t reserved[3];
-    __le32 cap;
-    struct iwl_fmac_nvm_vht_mcs_info vht_mcs;
+  uint8_t vht_supported;
+  uint8_t reserved[3];
+  __le32 cap;
+  struct iwl_fmac_nvm_vht_mcs_info vht_mcs;
 } __packed;
 
 /**
@@ -1192,15 +1192,15 @@
  * features of fmac.
  */
 struct iwl_fmac_nvm_cmd {
-    uint8_t sku_cap;
-    uint8_t n_addr;
-    uint8_t hw_addr[ETH_ALEN];
+  uint8_t sku_cap;
+  uint8_t n_addr;
+  uint8_t hw_addr[ETH_ALEN];
 #define NVM_CMD_TX_ANT(_x) ((_x)&0xf)
 #define NVM_CMD_RX_ANT(_x) (((_x)&0xf0) >> 4)
-    uint8_t valid_ant;
-    uint8_t reserved[3];
-    struct iwl_fmac_nvm_ht ht[NVM_NUM_BANDS];
-    struct iwl_fmac_nvm_vht vht[NVM_NUM_BANDS];
+  uint8_t valid_ant;
+  uint8_t reserved[3];
+  struct iwl_fmac_nvm_ht ht[NVM_NUM_BANDS];
+  struct iwl_fmac_nvm_vht vht[NVM_NUM_BANDS];
 } __packed;
 
 #ifdef CPTCFG_IWLFMAC_9000_SUPPORT
@@ -1217,10 +1217,10 @@
  * Note that this is only used with 9000-series devices.
  */
 struct iwl_fmac_req_queue {
-    uint8_t vif_id;
-    uint8_t sta_id;
-    uint8_t tid;
-    uint8_t reserved;
+  uint8_t vif_id;
+  uint8_t sta_id;
+  uint8_t tid;
+  uint8_t reserved;
 } __packed;
 
 /**
@@ -1232,8 +1232,8 @@
  * Note that this is only used with 9000-series devices.
  */
 struct iwl_fmac_req_queue_response {
-    uint8_t queue;
-    uint8_t reserved[3];
+  uint8_t queue;
+  uint8_t reserved[3];
 } __packed;
 
 /**
@@ -1249,10 +1249,10 @@
  * Note that this is only used with 9000-series devices.
  */
 struct iwl_fmac_rel_queue {
-    uint8_t vif_id;
-    uint8_t sta_id;
-    uint8_t tid;
-    uint8_t reserved;
+  uint8_t vif_id;
+  uint8_t sta_id;
+  uint8_t tid;
+  uint8_t reserved;
 } __packed;
 
 /**
@@ -1264,8 +1264,8 @@
  * Note that this is only used with 9000-series devices.
  */
 struct iwl_fmac_rel_queue_response {
-    uint8_t free_queue;
-    uint8_t reserved[3];
+  uint8_t free_queue;
+  uint8_t reserved[3];
 } __packed;
 #endif
 
@@ -1281,11 +1281,11 @@
  * algorithm, and instead use the given fixed rate.
  */
 struct iwl_fmac_rs_fixed_cmd {
-    uint8_t sta_id;
-    uint8_t vif_id;
-    uint8_t reduced_txp;
-    uint8_t reserved;
-    __le32 hw_rate;
+  uint8_t sta_id;
+  uint8_t vif_id;
+  uint8_t reduced_txp;
+  uint8_t reserved;
+  __le32 hw_rate;
 } __packed;
 
 #ifdef CPTCFG_IWLFMAC_9000_SUPPORT
@@ -1307,18 +1307,18 @@
  * Note that this is only used with 9000-series devices.
  */
 struct iwl_fmac_scd_txq_cfg_cmd {
-    uint8_t vif_id;
-    uint8_t reserved1[3];
-    uint8_t token;
-    uint8_t sta_id;
-    uint8_t tid;
-    uint8_t scd_queue;
-    uint8_t enable;
-    uint8_t aggregate;
-    uint8_t tx_fifo;
-    uint8_t window;
-    __le16 ssn;
-    __le16 reserved2;
+  uint8_t vif_id;
+  uint8_t reserved1[3];
+  uint8_t token;
+  uint8_t sta_id;
+  uint8_t tid;
+  uint8_t scd_queue;
+  uint8_t enable;
+  uint8_t aggregate;
+  uint8_t tx_fifo;
+  uint8_t window;
+  __le16 ssn;
+  __le16 reserved2;
 } __packed;
 #endif
 
@@ -1329,8 +1329,8 @@
  * @IWL_FMAC_SYNC_SRC_FMAC: the request originated in fmac.
  */
 enum iwl_fmac_sync_source {
-    IWL_FMAC_SYNC_SRC_DRIVER,
-    IWL_FMAC_SYNC_SRC_FMAC,
+  IWL_FMAC_SYNC_SRC_DRIVER,
+  IWL_FMAC_SYNC_SRC_FMAC,
 };
 
 /**
@@ -1338,7 +1338,7 @@
  * @IWL_FMAC_SYNC_TYPE_DELBA: request due to Rx delba.
  */
 enum iwl_fmac_sync_type {
-    IWL_FMAC_SYNC_TYPE_DELBA,
+  IWL_FMAC_SYNC_TYPE_DELBA,
 };
 
 /**
@@ -1352,11 +1352,11 @@
  * FMAC firmware and possibly for use by the driver.
  */
 struct iwl_rxq_sync_payload {
-    uint8_t src;
-    uint8_t type;
-    uint8_t reserved[2];
+  uint8_t src;
+  uint8_t type;
+  uint8_t reserved[2];
 #ifndef _MSC_VER
-    uint8_t payload[0];
+  uint8_t payload[0];
 #endif
 } __packed;
 
@@ -1368,10 +1368,10 @@
  * @reserved: for alignment.
  */
 struct iwl_rx_sync_delba {
-    struct iwl_rxq_sync_payload hdr;
-    uint8_t sta_id;
-    uint8_t ba_id;
-    uint8_t reserved[2];
+  struct iwl_rxq_sync_payload hdr;
+  uint8_t sta_id;
+  uint8_t ba_id;
+  uint8_t reserved[2];
 } __packed;
 
 /**
@@ -1381,9 +1381,9 @@
  * @FMAC_PS_MODE_LP: Low power save mode.
  */
 enum fmac_ps_mode {
-    FMAC_PS_MODE_CAM = 1,
-    FMAC_PS_MODE_BALANCED,
-    FMAC_PS_MODE_LP,
+  FMAC_PS_MODE_CAM = 1,
+  FMAC_PS_MODE_BALANCED,
+  FMAC_PS_MODE_LP,
 };
 
 /**
@@ -1394,10 +1394,10 @@
  * @FMAC_BT_CFG_WIFI: WIFI always gets the antenna.
  */
 enum fmac_bt_cfg_mode {
-    FMAC_BT_CFG_NW = 0,
-    FMAC_BT_CFG_DISABLE,
-    FMAC_BT_CFG_BT,
-    FMAC_BT_CFG_WIFI,
+  FMAC_BT_CFG_NW = 0,
+  FMAC_BT_CFG_DISABLE,
+  FMAC_BT_CFG_BT,
+  FMAC_BT_CFG_WIFI,
 };
 
 /**
@@ -1406,8 +1406,8 @@
  * @FMAC_UAPSD_ENABLE_P2P_CLIENT: U-APSD is enabled for P2P Client role.
  */
 enum fmac_uapsd_enable_mode {
-    FMAC_UAPSD_ENABLE_BSS = BIT(0),
-    FMAC_UAPSD_ENABLE_P2P_CLIENT = BIT(1),
+  FMAC_UAPSD_ENABLE_BSS = BIT(0),
+  FMAC_UAPSD_ENABLE_P2P_CLIENT = BIT(1),
 };
 
 /**
@@ -1424,12 +1424,12 @@
  * @IWL_SCAN_TYPE_MAX: highest index of scan.
  */
 enum umac_scan_type {
-    IWL_SCAN_TYPE_NOT_SET,
-    IWL_SCAN_TYPE_UNASSOC,
-    IWL_SCAN_TYPE_WILD,
-    IWL_SCAN_TYPE_MILD,
-    IWL_SCAN_TYPE_FRAGMENTED,
-    IWL_SCAN_TYPE_MAX,
+  IWL_SCAN_TYPE_NOT_SET,
+  IWL_SCAN_TYPE_UNASSOC,
+  IWL_SCAN_TYPE_WILD,
+  IWL_SCAN_TYPE_MILD,
+  IWL_SCAN_TYPE_FRAGMENTED,
+  IWL_SCAN_TYPE_MAX,
 };
 
 #define IWL_FMAC_POWER_LEVEL_UNSET 0xff
@@ -1442,10 +1442,10 @@
  * @FMAC_SAD_ANT_B: choose antenna B by default
  */
 enum fmac_sad_mode {
-    FMAC_SAD_ENABLED = BIT(0),
-    FMAC_SAD_NIC_DEFAULT = 0 << 1,
-    FMAC_SAD_ANT_A = 1 << 1,
-    FMAC_SAD_ANT_B = 2 << 1,
+  FMAC_SAD_ENABLED = BIT(0),
+  FMAC_SAD_NIC_DEFAULT = 0 << 1,
+  FMAC_SAD_ANT_A = 1 << 1,
+  FMAC_SAD_ANT_B = 2 << 1,
 };
 
 /**
@@ -1512,46 +1512,46 @@
  *  life. No data should be attached to this configuration.
  */
 enum iwl_fmac_config_id {
-    IWL_FMAC_STATIC_CONFIG_U32_START = 0x0,
-    IWL_FMAC_STATIC_CONFIG_POWER_SCHEME = IWL_FMAC_STATIC_CONFIG_U32_START,
-    IWL_FMAC_STATIC_CONFIG_COEX_MODE,
-    IWL_FMAC_STATIC_CONFIG_COEX_SYNC2SCO,
-    IWL_FMAC_STATIC_CONFIG_COEX_PLCR,
-    IWL_FMAC_STATIC_CONFIG_COEX_MPLUT,
-    IWL_FMAC_STATIC_CONFIG_DEPRECATED_1,
-    IWL_FMAC_STATIC_CONFIG_DEPRECATED_2,
-    IWL_FMAC_STATIC_CONFIG_UAPSD_ENABLED,
-    IWL_FMAC_STATIC_CONFIG_LTR_MODE,
-    IWL_FMAC_STATIC_CONFIG_SINGLE_ANT_DIVERSITY_CONF,
-    IWL_FMAC_STATIC_CONFIG_EXTERNAL_WPA,
-    IWL_FMAC_STATIC_CONFIG_U32_MAX,
-    IWL_FMAC_STATIC_CONFIG_U32_NUM =
-        IWL_FMAC_STATIC_CONFIG_U32_MAX - IWL_FMAC_STATIC_CONFIG_U32_START,
+  IWL_FMAC_STATIC_CONFIG_U32_START = 0x0,
+  IWL_FMAC_STATIC_CONFIG_POWER_SCHEME = IWL_FMAC_STATIC_CONFIG_U32_START,
+  IWL_FMAC_STATIC_CONFIG_COEX_MODE,
+  IWL_FMAC_STATIC_CONFIG_COEX_SYNC2SCO,
+  IWL_FMAC_STATIC_CONFIG_COEX_PLCR,
+  IWL_FMAC_STATIC_CONFIG_COEX_MPLUT,
+  IWL_FMAC_STATIC_CONFIG_DEPRECATED_1,
+  IWL_FMAC_STATIC_CONFIG_DEPRECATED_2,
+  IWL_FMAC_STATIC_CONFIG_UAPSD_ENABLED,
+  IWL_FMAC_STATIC_CONFIG_LTR_MODE,
+  IWL_FMAC_STATIC_CONFIG_SINGLE_ANT_DIVERSITY_CONF,
+  IWL_FMAC_STATIC_CONFIG_EXTERNAL_WPA,
+  IWL_FMAC_STATIC_CONFIG_U32_MAX,
+  IWL_FMAC_STATIC_CONFIG_U32_NUM =
+      IWL_FMAC_STATIC_CONFIG_U32_MAX - IWL_FMAC_STATIC_CONFIG_U32_START,
 
-    IWL_FMAC_CONFIG_U32_START = 0x100,
-    IWL_FMAC_CONFIG_INTERNAL_CMD_TO_HOST = IWL_FMAC_CONFIG_U32_START,
-    IWL_FMAC_CONFIG_RS_STAT_THOLD,
-    IWL_FMAC_CONFIG_SCAN_TYPE,
-    IWL_FMAC_CONFIG_U32_MAX,
-    IWL_FMAC_CONFIG_U32_NUM = IWL_FMAC_CONFIG_U32_MAX - IWL_FMAC_CONFIG_U32_START,
+  IWL_FMAC_CONFIG_U32_START = 0x100,
+  IWL_FMAC_CONFIG_INTERNAL_CMD_TO_HOST = IWL_FMAC_CONFIG_U32_START,
+  IWL_FMAC_CONFIG_RS_STAT_THOLD,
+  IWL_FMAC_CONFIG_SCAN_TYPE,
+  IWL_FMAC_CONFIG_U32_MAX,
+  IWL_FMAC_CONFIG_U32_NUM = IWL_FMAC_CONFIG_U32_MAX - IWL_FMAC_CONFIG_U32_START,
 
-    IWL_FMAC_CONFIG_START = 0x200,
-    IWL_FMAC_CONFIG_DEBUG_LEVEL = IWL_FMAC_CONFIG_START,
-    IWL_FMAC_CONFIG_TRIGGER,
-    IWL_FMAC_CONFIG_MAX,
-    IWL_FMAC_CONFIG_NUM = IWL_FMAC_CONFIG_MAX - IWL_FMAC_CONFIG_START,
+  IWL_FMAC_CONFIG_START = 0x200,
+  IWL_FMAC_CONFIG_DEBUG_LEVEL = IWL_FMAC_CONFIG_START,
+  IWL_FMAC_CONFIG_TRIGGER,
+  IWL_FMAC_CONFIG_MAX,
+  IWL_FMAC_CONFIG_NUM = IWL_FMAC_CONFIG_MAX - IWL_FMAC_CONFIG_START,
 
-    IWL_FMAC_CONFIG_VIF_START = 0x300,
-    IWL_FMAC_CONFIG_VIF_POWER_DISABLED = IWL_FMAC_CONFIG_VIF_START,
-    IWL_FMAC_CONFIG_VIF_TXPOWER_USER,
-    IWL_FMAC_CONFIG_VIF_LOW_LATENCY,
-    IWL_FMAC_CONFIG_VIF_INDICATE_ROAM_IS_NEEDED,
-    IWL_FMAC_CONFIG_VIF_MAX,
-    IWL_FMAC_CONFIG_VIF_NUM = IWL_FMAC_CONFIG_VIF_MAX - IWL_FMAC_CONFIG_VIF_START,
+  IWL_FMAC_CONFIG_VIF_START = 0x300,
+  IWL_FMAC_CONFIG_VIF_POWER_DISABLED = IWL_FMAC_CONFIG_VIF_START,
+  IWL_FMAC_CONFIG_VIF_TXPOWER_USER,
+  IWL_FMAC_CONFIG_VIF_LOW_LATENCY,
+  IWL_FMAC_CONFIG_VIF_INDICATE_ROAM_IS_NEEDED,
+  IWL_FMAC_CONFIG_VIF_MAX,
+  IWL_FMAC_CONFIG_VIF_NUM = IWL_FMAC_CONFIG_VIF_MAX - IWL_FMAC_CONFIG_VIF_START,
 
-    IWL_FMAC_CONFIG_WPAS_GLOBAL = 0x400,
+  IWL_FMAC_CONFIG_WPAS_GLOBAL = 0x400,
 
-    IWL_FMAC_STATIC_CONFIG_COMPLETE = 0xffff,
+  IWL_FMAC_STATIC_CONFIG_COMPLETE = 0xffff,
 };
 
 #define IWL_FMAC_VIF_ID_GLOBAL 0xff
@@ -1565,12 +1565,12 @@
  * @data: the data of the configuration.
  */
 struct iwl_fmac_config_cmd {
-    uint8_t vif_id;
-    uint8_t reserved[3];
-    __le16 config_id;
-    __le16 len;
+  uint8_t vif_id;
+  uint8_t reserved[3];
+  __le16 config_id;
+  __le16 len;
 #ifndef _MSC_VER
-    uint8_t data[0];
+  uint8_t data[0];
 #endif
 } __packed;
 
@@ -1584,19 +1584,19 @@
  * @IWL_NUM_CHAN_WIDTH: number of supported channel width values
  */
 enum iwl_fmac_chan_width {
-    IWL_CHAN_WIDTH_20_NOHT,
-    IWL_CHAN_WIDTH_20,
-    IWL_CHAN_WIDTH_40,
-    IWL_CHAN_WIDTH_80,
-    IWL_CHAN_WIDTH_160,
-    IWL_NUM_CHAN_WIDTH
+  IWL_CHAN_WIDTH_20_NOHT,
+  IWL_CHAN_WIDTH_20,
+  IWL_CHAN_WIDTH_40,
+  IWL_CHAN_WIDTH_80,
+  IWL_CHAN_WIDTH_160,
+  IWL_NUM_CHAN_WIDTH
 };
 
 #define IWL_FMAC_NUM_CHAIN_LIMITS 2
 #define IWL_FMAC_NUM_SUB_BANDS 5
 
 struct iwl_fmac_sar_restrictions {
-    __le16 per_chain_restriction[IWL_FMAC_NUM_CHAIN_LIMITS][IWL_FMAC_NUM_SUB_BANDS];
+  __le16 per_chain_restriction[IWL_FMAC_NUM_CHAIN_LIMITS][IWL_FMAC_NUM_SUB_BANDS];
 } __packed;
 
 /**
@@ -1606,9 +1606,9 @@
  * @IWL_FMAC_HIDDEN_SSID_ZERO_BYTES: use real length, but zero the SSID bytes
  */
 enum iwl_fmac_hidden_ssid {
-    IWL_FMAC_HIDDEN_SSID_NONE = 0,
-    IWL_FMAC_HIDDEN_SSID_ZERO_LEN = 1,
-    IWL_FMAC_HIDDEN_SSID_ZERO_BYTES = 2,
+  IWL_FMAC_HIDDEN_SSID_NONE = 0,
+  IWL_FMAC_HIDDEN_SSID_ZERO_LEN = 1,
+  IWL_FMAC_HIDDEN_SSID_ZERO_BYTES = 2,
 };
 
 /**
@@ -1620,11 +1620,11 @@
  * @reserved2: for alignment.
  */
 struct iwl_fmac_chandef {
-    __le16 control_freq;
-    __le16 center_freq1;
-    __le16 reserved;
-    uint8_t bandwidth;
-    uint8_t reserved2;
+  __le16 control_freq;
+  __le16 center_freq1;
+  __le16 reserved;
+  uint8_t bandwidth;
+  uint8_t reserved2;
 } __packed;
 
 /**
@@ -1633,8 +1633,8 @@
  * @IWL_FMAC_START_AP_FAILURE: Fail to start AP.
  */
 enum iwl_fmac_start_ap_resp_status {
-    IWL_FMAC_START_AP_SUCCESS = 0,
-    IWL_FMAC_START_AP_FAILURE,
+  IWL_FMAC_START_AP_SUCCESS = 0,
+  IWL_FMAC_START_AP_FAILURE,
 };
 
 /**
@@ -1644,9 +1644,9 @@
  * @IWL_FMAC_MODIFY_HOST_BASED_AP: modify the host based AP
  */
 enum iwl_fmac_action_host_based_ap {
-    IWL_FMAC_START_HOST_BASED_AP = 0,
-    IWL_FMAC_STOP_HOST_BASED_AP = 1,
-    IWL_FMAC_MODIFY_HOST_BASED_AP = 2,
+  IWL_FMAC_START_HOST_BASED_AP = 0,
+  IWL_FMAC_STOP_HOST_BASED_AP = 1,
+  IWL_FMAC_MODIFY_HOST_BASED_AP = 2,
 };
 
 /**
@@ -1663,16 +1663,16 @@
  * @IWL_FMAC_BEACON_CHANGED: beacon frame has been updated
  */
 enum iwl_fmac_host_ap_changed {
-    IWL_FMAC_CTS_PROT_CHANGED = BIT(0),
-    IWL_FMAC_SHORT_PREAMBLE_CHANGED = BIT(1),
-    IWL_FMAC_SHORT_SLOT_CHANGED = BIT(2),
-    IWL_FMAC_BASIC_RATES_CHANGED = BIT(3),
-    IWL_FMAC_HT_OPMODE_CHANGED = BIT(4),
-    IWL_FMAC_AC_PARAMS_CHANGED_BK = BIT(5),
-    IWL_FMAC_AC_PARAMS_CHANGED_BE = BIT(6),
-    IWL_FMAC_AC_PARAMS_CHANGED_VI = BIT(7),
-    IWL_FMAC_AC_PARAMS_CHANGED_VO = BIT(8),
-    IWL_FMAC_BEACON_CHANGED = BIT(9),
+  IWL_FMAC_CTS_PROT_CHANGED = BIT(0),
+  IWL_FMAC_SHORT_PREAMBLE_CHANGED = BIT(1),
+  IWL_FMAC_SHORT_SLOT_CHANGED = BIT(2),
+  IWL_FMAC_BASIC_RATES_CHANGED = BIT(3),
+  IWL_FMAC_HT_OPMODE_CHANGED = BIT(4),
+  IWL_FMAC_AC_PARAMS_CHANGED_BK = BIT(5),
+  IWL_FMAC_AC_PARAMS_CHANGED_BE = BIT(6),
+  IWL_FMAC_AC_PARAMS_CHANGED_VI = BIT(7),
+  IWL_FMAC_AC_PARAMS_CHANGED_VO = BIT(8),
+  IWL_FMAC_BEACON_CHANGED = BIT(9),
 };
 
 /**
@@ -1684,11 +1684,11 @@
  * @reserved: for alignment
  */
 struct iwl_fmac_ac_params {
-    __le16 txop;
-    __le16 cw_min;
-    __le16 cw_max;
-    uint8_t aifs;
-    uint8_t reserved;
+  __le16 txop;
+  __le16 cw_min;
+  __le16 cw_max;
+  uint8_t aifs;
+  uint8_t reserved;
 } __packed;
 
 /**
@@ -1732,23 +1732,23 @@
  * in the host.
  */
 struct iwl_fmac_host_ap_cmd {
-    uint8_t vif_id;
-    uint8_t action;
-    uint8_t dtim_period;
-    uint8_t use_cts_prot;
-    uint8_t use_short_preamble;
-    uint8_t use_short_slot;
-    __le16 basic_rates_bitmap;
-    __le16 ht_opmode;
-    __le16 beacon_int;
-    __le32 inactivity_timeout;
-    struct iwl_fmac_chandef chandef;
-    struct iwl_fmac_ac_params ac_params[4];
-    __le16 byte_cnt;
-    __le16 tim_idx;
-    __le32 changed;
+  uint8_t vif_id;
+  uint8_t action;
+  uint8_t dtim_period;
+  uint8_t use_cts_prot;
+  uint8_t use_short_preamble;
+  uint8_t use_short_slot;
+  __le16 basic_rates_bitmap;
+  __le16 ht_opmode;
+  __le16 beacon_int;
+  __le32 inactivity_timeout;
+  struct iwl_fmac_chandef chandef;
+  struct iwl_fmac_ac_params ac_params[4];
+  __le16 byte_cnt;
+  __le16 tim_idx;
+  __le32 changed;
 #ifndef _MSC_VER
-    uint8_t frame[0];
+  uint8_t frame[0];
 #endif
 } __packed;
 
@@ -1756,64 +1756,64 @@
  * struct iwl_fmac_host_ap_resp - Response of the %FMAC_HOST_BASED_AP
  */
 struct iwl_fmac_host_ap_resp {
-    /**
-     * @vif_id:
-     * the interface identifier returned in &iwl_fmac_add_vif_resp.
-     */
-    uint8_t vif_id;
+  /**
+   * @vif_id:
+   * the interface identifier returned in &iwl_fmac_add_vif_resp.
+   */
+  uint8_t vif_id;
 
-    /**
-     * @mcast_sta_id:
-     * the identifier allocation for the used for broadcast and  multicast
-     * transmissions. Relevant only if the %action was
-     * %IWL_FMAC_START_HOST_BASED_AP.
-     */
-    uint8_t mcast_sta_id;
+  /**
+   * @mcast_sta_id:
+   * the identifier allocation for the used for broadcast and  multicast
+   * transmissions. Relevant only if the %action was
+   * %IWL_FMAC_START_HOST_BASED_AP.
+   */
+  uint8_t mcast_sta_id;
 
-    /**
-     * @bcast_sta_id:
-     * the identifier allocation for the used for broadcast management
-     * frames. Relevant only if the %action was
-     * %IWL_FMAC_START_HOST_BASED_AP.
-     */
-    uint8_t bcast_sta_id;
+  /**
+   * @bcast_sta_id:
+   * the identifier allocation for the used for broadcast management
+   * frames. Relevant only if the %action was
+   * %IWL_FMAC_START_HOST_BASED_AP.
+   */
+  uint8_t bcast_sta_id;
 
 #ifdef CPTCFG_IWLFMAC_9000_SUPPORT
-    /**
-     * @mcast_queue:
-     * queue allocation for broadcast and multicast transmissions.
-     * Only valid for 9000-series devices, otherwise reserved.
-     * Relevant only if the %action was
-     * %IWL_FMAC_START_HOST_BASED_AP.
-     */
-    uint8_t mcast_queue;
+  /**
+   * @mcast_queue:
+   * queue allocation for broadcast and multicast transmissions.
+   * Only valid for 9000-series devices, otherwise reserved.
+   * Relevant only if the %action was
+   * %IWL_FMAC_START_HOST_BASED_AP.
+   */
+  uint8_t mcast_queue;
 
-    /**
-     * @bcast_queue:
-     * queue allocation for broadcast management frames.
-     * Only valid for 9000-series devices, otherwise reserved.
-     * Relevant only if the %action was
-     * %IWL_FMAC_START_HOST_BASED_AP.
-     */
-    uint8_t bcast_queue;
+  /**
+   * @bcast_queue:
+   * queue allocation for broadcast management frames.
+   * Only valid for 9000-series devices, otherwise reserved.
+   * Relevant only if the %action was
+   * %IWL_FMAC_START_HOST_BASED_AP.
+   */
+  uint8_t bcast_queue;
 
-    /**
-     * @reserved:
-     * for alignment.
-     */
-    uint8_t reserved[3];
+  /**
+   * @reserved:
+   * for alignment.
+   */
+  uint8_t reserved[3];
 #else
-    /**
-     * @reserved: reserved
-     */
-    uint8_t reserved[5];
+  /**
+   * @reserved: reserved
+   */
+  uint8_t reserved[5];
 #endif
 
-    /**
-     * @status:
-     * status defined in &enum iwl_fmac_start_ap_resp_status.
-     */
-    __le32 status;
+  /**
+   * @status:
+   * status defined in &enum iwl_fmac_start_ap_resp_status.
+   */
+  __le32 status;
 } __packed;
 
 /**
@@ -1823,9 +1823,9 @@
  * @IWL_FMAC_MOD_HOST_BASED_STA: to modify a station of the host based AP
  */
 enum iwl_fmac_action_host_based_ap_sta {
-    IWL_FMAC_ADD_HOST_BASED_STA = 0,
-    IWL_FMAC_REM_HOST_BASED_STA = 1,
-    IWL_FMAC_MOD_HOST_BASED_STA = 2,
+  IWL_FMAC_ADD_HOST_BASED_STA = 0,
+  IWL_FMAC_REM_HOST_BASED_STA = 1,
+  IWL_FMAC_MOD_HOST_BASED_STA = 2,
 };
 
 /**
@@ -1837,11 +1837,11 @@
  * @IWL_FMAC_STA_UAPSD_PARAMS_CHANGED: uapsd_ac/sp_length was updated
  */
 enum iwl_fmac_host_ap_sta_changed {
-    IWL_FMAC_STA_AID_CHANGED = BIT(0),
-    IWL_FMAC_STA_SUPP_RATE_CHANGED = BIT(1),
-    IWL_FMAC_STA_HT_CAP_CHANGED = BIT(2),
-    IWL_FMAC_STA_VHT_CAP_CHANGED = BIT(3),
-    IWL_FMAC_STA_UAPSD_PARAMS_CHANGED = BIT(4),
+  IWL_FMAC_STA_AID_CHANGED = BIT(0),
+  IWL_FMAC_STA_SUPP_RATE_CHANGED = BIT(1),
+  IWL_FMAC_STA_HT_CAP_CHANGED = BIT(2),
+  IWL_FMAC_STA_VHT_CAP_CHANGED = BIT(3),
+  IWL_FMAC_STA_UAPSD_PARAMS_CHANGED = BIT(4),
 };
 
 /**
@@ -1850,8 +1850,8 @@
  * @IWL_FMAC_STA_VHT_CAPABLE: the station is VHT capable
  */
 enum iwl_fmac_host_ap_sta_flags {
-    IWL_FMAC_STA_HT_CAPABLE = BIT(0),
-    IWL_FMAC_STA_VHT_CAPABLE = BIT(1),
+  IWL_FMAC_STA_HT_CAPABLE = BIT(0),
+  IWL_FMAC_STA_VHT_CAPABLE = BIT(1),
 };
 
 /**
@@ -1877,18 +1877,18 @@
  * @vht_cap: the VHT capability Information Element
  */
 struct iwl_fmac_host_ap_sta_cmd {
-    uint8_t action;
-    uint8_t sta_id;
-    uint8_t vif_id;
-    uint8_t flags;
-    uint8_t addr[ETH_ALEN];
-    __le16 aid;
-    __le16 changed;
-    __le16 supp_rates_bitmap;
-    uint8_t ht_cap[26];
-    uint8_t uapsd_ac;
-    uint8_t sp_length;
-    uint8_t vht_cap[12];
+  uint8_t action;
+  uint8_t sta_id;
+  uint8_t vif_id;
+  uint8_t flags;
+  uint8_t addr[ETH_ALEN];
+  __le16 aid;
+  __le16 changed;
+  __le16 supp_rates_bitmap;
+  uint8_t ht_cap[26];
+  uint8_t uapsd_ac;
+  uint8_t sp_length;
+  uint8_t vht_cap[12];
 } __packed;
 
 #define IWL_FMAC_HOST_AP_INVALID_STA 0xffffffff
@@ -1900,7 +1900,7 @@
  *  For any action other than %IWL_FMAC_ADD, the value will be 0.
  */
 struct iwl_fmac_host_ap_sta_resp {
-    __le32 sta_id;
+  __le32 sta_id;
 };
 
 /**
@@ -1909,8 +1909,8 @@
  * @IWL_FMAC_REM_TEMPORAL_KEY: to remove a temporal key
  */
 enum iwl_fmac_action_temporal_key {
-    IWL_FMAC_ADD_TEMPORAL_KEY = 0,
-    IWL_FMAC_REM_TEMPORAL_KEY = 1,
+  IWL_FMAC_ADD_TEMPORAL_KEY = 0,
+  IWL_FMAC_REM_TEMPORAL_KEY = 1,
 };
 
 /**
@@ -1920,9 +1920,9 @@
  * @IWL_FMAC_TEMPORAL_KEY_TYPE_IGTK: IGTK
  */
 enum iwl_fmac_temporal_key_type {
-    IWL_FMAC_TEMPORAL_KEY_TYPE_PTK = 0,
-    IWL_FMAC_TEMPORAL_KEY_TYPE_GTK = 1,
-    IWL_FMAC_TEMPORAL_KEY_TYPE_IGTK = 2,
+  IWL_FMAC_TEMPORAL_KEY_TYPE_PTK = 0,
+  IWL_FMAC_TEMPORAL_KEY_TYPE_GTK = 1,
+  IWL_FMAC_TEMPORAL_KEY_TYPE_IGTK = 2,
 };
 
 /**
@@ -1940,15 +1940,15 @@
  * @reserved: reserved
  */
 struct iwl_fmac_temporal_key_cmd {
-    uint8_t action;
-    uint8_t sta_id;
-    uint8_t keyidx;
-    uint8_t keylen;
-    __le32 cipher;
-    uint8_t key[32];
-    uint8_t key_type;
-    uint8_t vif_id;
-    uint8_t reserved[2];
+  uint8_t action;
+  uint8_t sta_id;
+  uint8_t keyidx;
+  uint8_t keylen;
+  __le32 cipher;
+  uint8_t key[32];
+  uint8_t key_type;
+  uint8_t vif_id;
+  uint8_t reserved[2];
 };
 
 /**
@@ -1956,7 +1956,7 @@
  * @hw_keyoffset: the index to be used in the Tx command to use this key
  */
 struct iwl_fmac_temporal_key_resp {
-    __le32 hw_keyoffset;
+  __le32 hw_keyoffset;
 };
 
 /**
@@ -1966,38 +1966,38 @@
  * @reserved: reserved
  */
 struct iwl_fmac_sta_removed {
-    uint8_t vif_id;
-    uint8_t sta_id;
-    uint8_t reserved[2];
+  uint8_t vif_id;
+  uint8_t sta_id;
+  uint8_t reserved[2];
 } __packed;
 
 /**
  * enum iwl_fmac_dbg_trigger - triggers available
  */
 enum iwl_fmac_dbg_trigger {
-    /**
-     * @IWL_FMAC_DBG_TRIGGER_INVALID:
-     * (reserved)
-     */
-    IWL_FMAC_DBG_TRIGGER_INVALID = 0,
+  /**
+   * @IWL_FMAC_DBG_TRIGGER_INVALID:
+   * (reserved)
+   */
+  IWL_FMAC_DBG_TRIGGER_INVALID = 0,
 
-    /**
-     * @IWL_FMAC_DBG_TRIGGER_MISSED_BEACONS:
-     * trigger on missed beacons
-     */
-    IWL_FMAC_DBG_TRIGGER_MISSED_BEACONS = 3,
+  /**
+   * @IWL_FMAC_DBG_TRIGGER_MISSED_BEACONS:
+   * trigger on missed beacons
+   */
+  IWL_FMAC_DBG_TRIGGER_MISSED_BEACONS = 3,
 
-    /**
-     * @IWL_FMAC_DBG_TRIGGER_CHANNEL_SWITCH:
-     * trigger on channel switch
-     */
-    IWL_FMAC_DBG_TRIGGER_CHANNEL_SWITCH = 4,
+  /**
+   * @IWL_FMAC_DBG_TRIGGER_CHANNEL_SWITCH:
+   * trigger on channel switch
+   */
+  IWL_FMAC_DBG_TRIGGER_CHANNEL_SWITCH = 4,
 
-    /**
-     * @IWL_FMAC_DBG_TRIGGER_MAX:
-     * maximum number of triggers supported
-     */
-    IWL_FMAC_DBG_TRIGGER_MAX /* must be last */
+  /**
+   * @IWL_FMAC_DBG_TRIGGER_MAX:
+   * maximum number of triggers supported
+   */
+  IWL_FMAC_DBG_TRIGGER_MAX /* must be last */
 };
 
 /**
@@ -2008,11 +2008,11 @@
  * @data: trigger-dependent data
  */
 struct iwl_fmac_trigger_cmd {
-    __le32 len;
-    __le32 id;
-    __le32 vif_type;
+  __le32 len;
+  __le32 id;
+  __le32 vif_type;
 #ifndef _MSC_VER
-    uint8_t data[0];
+  uint8_t data[0];
 #endif
 } __packed;
 
@@ -2023,23 +2023,23 @@
  * @data: string that describes what happened
  */
 struct iwl_fmac_trigger_notif {
-    __le32 id;
-    uint8_t data[MAX_TRIGGER_STR];
+  __le32 id;
+  uint8_t data[MAX_TRIGGER_STR];
 } __packed;
 
 enum iwl_fmac_mcc_source {
-    IWL_FMAC_MCC_SOURCE_OLD_FW = 0,
-    IWL_FMAC_MCC_SOURCE_ME = 1,
-    IWL_FMAC_MCC_SOURCE_BIOS = 2,
-    IWL_FMAC_MCC_SOURCE_3G_LTE_HOST = 3,
-    IWL_FMAC_MCC_SOURCE_3G_LTE_DEVICE = 4,
-    IWL_FMAC_MCC_SOURCE_WIFI = 5,
-    IWL_FMAC_MCC_SOURCE_RESERVED = 6,
-    IWL_FMAC_MCC_SOURCE_DEFAULT = 7,
-    IWL_FMAC_MCC_SOURCE_UNINITIALIZED = 8,
-    IWL_FMAC_MCC_SOURCE_MCC_API = 9,
-    IWL_FMAC_MCC_SOURCE_GET_CURRENT = 0x10,
-    IWL_FMAC_MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
+  IWL_FMAC_MCC_SOURCE_OLD_FW = 0,
+  IWL_FMAC_MCC_SOURCE_ME = 1,
+  IWL_FMAC_MCC_SOURCE_BIOS = 2,
+  IWL_FMAC_MCC_SOURCE_3G_LTE_HOST = 3,
+  IWL_FMAC_MCC_SOURCE_3G_LTE_DEVICE = 4,
+  IWL_FMAC_MCC_SOURCE_WIFI = 5,
+  IWL_FMAC_MCC_SOURCE_RESERVED = 6,
+  IWL_FMAC_MCC_SOURCE_DEFAULT = 7,
+  IWL_FMAC_MCC_SOURCE_UNINITIALIZED = 8,
+  IWL_FMAC_MCC_SOURCE_MCC_API = 9,
+  IWL_FMAC_MCC_SOURCE_GET_CURRENT = 0x10,
+  IWL_FMAC_MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
 };
 
 /**
@@ -2049,9 +2049,9 @@
  * @reserved: reserved
  */
 struct iwl_fmac_reg_cmd {
-    __le16 mcc;
-    uint8_t source_id;
-    uint8_t reserved;
+  __le16 mcc;
+  uint8_t source_id;
+  uint8_t reserved;
 } __packed;
 
 /**
@@ -2068,12 +2068,12 @@
  * FMAC_REG_CFG, if this is a cmd response.
  */
 struct iwl_fmac_reg_resp {
-    __le16 mcc;
-    uint8_t source_id;
-    uint8_t reserved[1];
-    __le32 n_channels;
+  __le16 mcc;
+  uint8_t source_id;
+  uint8_t reserved[1];
+  __le32 n_channels;
 #ifndef _MSC_VER
-    __le32 channels[0];
+  __le32 channels[0];
 #endif
 } __packed;
 
@@ -2086,9 +2086,9 @@
  * @reserved: reserved
  */
 struct iwl_fmac_dbg_trigger_missed_bcon {
-    __le32 stop_consec_missed_bcon;
-    __le32 stop_consec_missed_bcon_since_rx;
-    uint8_t reserved[24];
+  __le32 stop_consec_missed_bcon;
+  __le32 stop_consec_missed_bcon_since_rx;
+  uint8_t reserved[24];
 } __packed;
 
 /**
@@ -2103,10 +2103,10 @@
  * disconnect reason 23 (IEEE 802.1x authentication failed).
  */
 struct iwl_fmac_rx_eapol_notif {
-    uint8_t addr[ETH_ALEN];
-    __le16 len;
+  uint8_t addr[ETH_ALEN];
+  __le16 len;
 #ifndef _MSC_VER
-    uint8_t data[0];
+  uint8_t data[0];
 #endif
 } __packed;
 
@@ -2126,14 +2126,14 @@
  * FMAC.
  */
 struct iwl_fmac_send_frame_notif {
-    uint8_t vif_id;
-    uint8_t reserved;
-    __le16 len;
-    uint8_t dst_addr[ETH_ALEN];
-    uint8_t src_addr[ETH_ALEN];
-    __be16 proto;
+  uint8_t vif_id;
+  uint8_t reserved;
+  __le16 len;
+  uint8_t dst_addr[ETH_ALEN];
+  uint8_t src_addr[ETH_ALEN];
+  __be16 proto;
 #ifndef _MSC_VER
-    uint8_t data[0];
+  uint8_t data[0];
 #endif
 } __packed;
 #endif
@@ -2150,9 +2150,9 @@
  *  suite_b_192 is used. The PMK length is 48 bytes.
  */
 enum iwl_fmac_key_type {
-    IWL_FMAC_KEY_TYPE_PMK,
-    IWL_FMAC_KEY_TYPE_PMK_EAP_LEAP,
-    IWL_FMAC_KEY_TYPE_PMK_SUITE_B_192,
+  IWL_FMAC_KEY_TYPE_PMK,
+  IWL_FMAC_KEY_TYPE_PMK_EAP_LEAP,
+  IWL_FMAC_KEY_TYPE_PMK_SUITE_B_192,
 };
 
 /**
@@ -2165,10 +2165,10 @@
  *  of the key as specified in &key_type. See also &enum iwl_fmac_key_type.
  */
 struct iwl_fmac_mlme_set_pmk_cmd {
-    uint8_t vif_id;
-    uint8_t key_type;
-    uint8_t aa[ETH_ALEN];
-    uint8_t key[KEY_MAX_LEN];
+  uint8_t vif_id;
+  uint8_t key_type;
+  uint8_t aa[ETH_ALEN];
+  uint8_t key[KEY_MAX_LEN];
 };
 
 /**
@@ -2178,9 +2178,9 @@
  * @reserved: reserved for 4 byte alignment.
  */
 struct iwl_fmac_mic_failure {
-    uint8_t vif_id;
-    uint8_t pairwise;
-    uint8_t reserved[2];
+  uint8_t vif_id;
+  uint8_t pairwise;
+  uint8_t reserved[2];
 } __packed;
 
 /**
@@ -2190,9 +2190,9 @@
  * @IWL_FMAC_SHA_TYPE_SHA384: SHA384
  */
 enum iwl_fmac_sha_type {
-    IWL_FMAC_SHA_TYPE_SHA1,
-    IWL_FMAC_SHA_TYPE_SHA256,
-    IWL_FMAC_SHA_TYPE_SHA384,
+  IWL_FMAC_SHA_TYPE_SHA1,
+  IWL_FMAC_SHA_TYPE_SHA256,
+  IWL_FMAC_SHA_TYPE_SHA384,
 };
 
 #define SHA_MAX_MSG_LEN 128
@@ -2205,10 +2205,10 @@
  * @msg: the message to generate the hash for.
  */
 struct iwl_fmac_vector_sha {
-    uint8_t type;
-    uint8_t msg_len;
-    __le16 reserved;
-    uint8_t msg[SHA_MAX_MSG_LEN];
+  uint8_t type;
+  uint8_t msg_len;
+  __le16 reserved;
+  uint8_t msg[SHA_MAX_MSG_LEN];
 } __packed;
 
 #define HMAC_KDF_MAX_KEY_LEN 192
@@ -2224,12 +2224,12 @@
  * @msg: the message to generate the MAC for.
  */
 struct iwl_fmac_vector_hmac_kdf {
-    uint8_t type;
-    uint8_t res_len;
-    uint8_t key_len;
-    uint8_t msg_len;
-    uint8_t key[HMAC_KDF_MAX_KEY_LEN];
-    uint8_t msg[HMAC_KDF_MAX_MSG_LEN];
+  uint8_t type;
+  uint8_t res_len;
+  uint8_t key_len;
+  uint8_t msg_len;
+  uint8_t key[HMAC_KDF_MAX_KEY_LEN];
+  uint8_t msg[HMAC_KDF_MAX_MSG_LEN];
 } __packed;
 
 /**
@@ -2239,14 +2239,14 @@
  * @IWL_FMAC_FIPS_TEST_KDF: test KDF functions.
  */
 enum iwl_fmac_fips_test_type {
-    IWL_FMAC_FIPS_TEST_SHA,
-    IWL_FMAC_FIPS_TEST_HMAC,
-    IWL_FMAC_FIPS_TEST_KDF,
+  IWL_FMAC_FIPS_TEST_SHA,
+  IWL_FMAC_FIPS_TEST_HMAC,
+  IWL_FMAC_FIPS_TEST_KDF,
 };
 
 union iwl_fmac_fips_test_vector {
-    struct iwl_fmac_vector_sha sha_vector;
-    struct iwl_fmac_vector_hmac_kdf hmac_kdf_vector;
+  struct iwl_fmac_vector_sha sha_vector;
+  struct iwl_fmac_vector_hmac_kdf hmac_kdf_vector;
 };
 
 #define MAX_FIPS_VECTOR_LEN sizeof(union iwl_fmac_fips_test_vector)
@@ -2258,9 +2258,9 @@
  * @vector: buffer with vector data. Union &iwl_fmac_fips_test_vector.
  */
 struct iwl_fmac_test_fips_cmd {
-    uint8_t type;
-    uint8_t reserved[3];
-    uint8_t vector[MAX_FIPS_VECTOR_LEN];
+  uint8_t type;
+  uint8_t reserved[3];
+  uint8_t vector[MAX_FIPS_VECTOR_LEN];
 } __packed;
 
 /**
@@ -2270,8 +2270,8 @@
  * @IWL_FMAC_TEST_FIPS_STATUS_FAIL: The requested operation failed.
  */
 enum iwl_fmac_test_fips_status {
-    IWL_FMAC_TEST_FIPS_STATUS_SUCCESS,
-    IWL_FMAC_TEST_FIPS_STATUS_FAIL,
+  IWL_FMAC_TEST_FIPS_STATUS_SUCCESS,
+  IWL_FMAC_TEST_FIPS_STATUS_FAIL,
 };
 
 #define FIPS_MAX_RES_LEN 88
@@ -2290,10 +2290,10 @@
  * &IWL_FMAC_TEST_FIPS_STATUS_SUCCESS. Otherwise it should be ignored.
  */
 struct iwl_fmac_test_fips_resp {
-    uint8_t status;
-    uint8_t len;
-    __le16 reserved;
-    uint8_t buf[FIPS_MAX_RES_LEN];
+  uint8_t status;
+  uint8_t len;
+  __le16 reserved;
+  uint8_t buf[FIPS_MAX_RES_LEN];
 } __packed;
 
 /**
@@ -2303,9 +2303,9 @@
  * @chandef: channel to set
  */
 struct iwl_fmac_set_monitor_chan_cmd {
-    uint8_t vif_id;
-    uint8_t reserved[3];
-    struct iwl_fmac_chandef chandef;
+  uint8_t vif_id;
+  uint8_t reserved[3];
+  struct iwl_fmac_chandef chandef;
 } __packed;
 
 /**
@@ -2317,9 +2317,9 @@
  *  is the candidate list for roam.
  */
 struct iwl_fmac_roam_is_needed {
-    uint8_t vif_id;
-    uint8_t n_bssids;
-    uint8_t bssids[IWL_FMAC_MAX_BSSIDS * ETH_ALEN];
+  uint8_t vif_id;
+  uint8_t n_bssids;
+  uint8_t bssids[IWL_FMAC_MAX_BSSIDS * ETH_ALEN];
 } __packed;
 
 /**
@@ -2333,10 +2333,10 @@
  *  connected.
  */
 enum iwl_fmac_roam_result_status {
-    IWL_FMAC_ROAM_RESULT_STATUS_ROAMED_NEW_AP,
-    IWL_FMAC_ROAM_RESULT_STATUS_ROAM_FAILED,
-    IWL_FMAC_ROAM_RESULT_STATUS_LEFT_WITH_CURRENT_AP,
-    IWL_FMAC_ROAM_RESULT_STATUS_NOT_CONNECTED,
+  IWL_FMAC_ROAM_RESULT_STATUS_ROAMED_NEW_AP,
+  IWL_FMAC_ROAM_RESULT_STATUS_ROAM_FAILED,
+  IWL_FMAC_ROAM_RESULT_STATUS_LEFT_WITH_CURRENT_AP,
+  IWL_FMAC_ROAM_RESULT_STATUS_NOT_CONNECTED,
 };
 
 /**
@@ -2349,10 +2349,10 @@
  * @connect_result: as defined in &struct iwl_fmac_connect_result.
  */
 struct iwl_fmac_roam_result {
-    uint8_t status;
-    uint8_t vif_id;
-    uint8_t reserved[2];
-    struct iwl_fmac_connect_result connect_result;
+  uint8_t status;
+  uint8_t vif_id;
+  uint8_t reserved[2];
+  struct iwl_fmac_connect_result connect_result;
 } __packed;
 
 /**
@@ -2366,11 +2366,11 @@
  * @reserved: for alignment.
  */
 struct iwl_fmac_tkip_mcast_rsc {
-    uint8_t vif_id;
-    uint8_t key_idx;
-    uint8_t addr[ETH_ALEN];
-    uint8_t rsc[6];
-    uint8_t reserved[2];
+  uint8_t vif_id;
+  uint8_t key_idx;
+  uint8_t addr[ETH_ALEN];
+  uint8_t rsc[6];
+  uint8_t reserved[2];
 };
 
 /**
@@ -2380,9 +2380,9 @@
  * @reserved: for alignment
  */
 struct iwl_fmac_inactive_sta {
-    uint8_t vif_id;
-    uint8_t sta_id;
-    __le16 reserved;
+  uint8_t vif_id;
+  uint8_t sta_id;
+  __le16 reserved;
 };
 
 #define IWL_FMAC_RECOVERY_NUM_VIFS 4
@@ -2399,13 +2399,13 @@
  * @blob: raw data read by the host upon firmware crash
  */
 struct iwl_fmac_recover_cmd {
-    uint8_t add_vif_bitmap;
-    uint8_t restore_vif_bitmap;
-    uint8_t reserved[2];
-    uint8_t vif_types[IWL_FMAC_RECOVERY_NUM_VIFS];
-    uint8_t vif_addrs[IWL_FMAC_RECOVERY_NUM_VIFS * ETH_ALEN];
+  uint8_t add_vif_bitmap;
+  uint8_t restore_vif_bitmap;
+  uint8_t reserved[2];
+  uint8_t vif_types[IWL_FMAC_RECOVERY_NUM_VIFS];
+  uint8_t vif_addrs[IWL_FMAC_RECOVERY_NUM_VIFS * ETH_ALEN];
 #ifndef _MSC_VER
-    uint8_t blob[0];
+  uint8_t blob[0];
 #endif
 } __packed;
 
@@ -2415,8 +2415,8 @@
  * @IWL_FMAC_RECOV_CORRUPTED: the buffer was corrupted, no vifs were added
  */
 enum iwl_fmac_recovery_complete_status {
-    IWL_FMAC_RECOV_SUCCESS = 0,
-    IWL_FMAC_RECOV_CORRUPTED = 1,
+  IWL_FMAC_RECOV_SUCCESS = 0,
+  IWL_FMAC_RECOV_CORRUPTED = 1,
 };
 
 /**
@@ -2431,9 +2431,9 @@
  * @reserved: for alignment
  */
 struct iwl_fmac_recovery_complete {
-    uint8_t status;
-    uint8_t vif_id_bitmap;
-    uint8_t reserved[2];
+  uint8_t status;
+  uint8_t vif_id_bitmap;
+  uint8_t reserved[2];
 } __packed;
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_FMAC_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/led.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/led.h
index 5e30604..7d7fa06 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/led.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/led.h
@@ -40,7 +40,7 @@
  * @status: LED status (on/off)
  */
 struct iwl_led_cmd {
-    __le32 status;
+  __le32 status;
 } __packed; /* LEDS_CMD_API_S_VER_2 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_LED_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/mac-cfg.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/mac-cfg.h
index 6e2a996..6115467 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/mac-cfg.h
@@ -41,19 +41,19 @@
  * enum iwl_mac_conf_subcmd_ids - mac configuration command IDs
  */
 enum iwl_mac_conf_subcmd_ids {
-    /**
-     * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd
-     */
-    LOW_LATENCY_CMD = 0x3,
-    /**
-     * @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif
-     */
-    PROBE_RESPONSE_DATA_NOTIF = 0xFC,
+  /**
+   * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd
+   */
+  LOW_LATENCY_CMD = 0x3,
+  /**
+   * @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif
+   */
+  PROBE_RESPONSE_DATA_NOTIF = 0xFC,
 
-    /**
-     * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
-     */
-    CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
+  /**
+   * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
+   */
+  CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
 };
 
 #define IWL_P2P_NOA_DESC_COUNT (2)
@@ -70,13 +70,13 @@
  * @reserved: reserved for alignment purposes
  */
 struct iwl_p2p_noa_attr {
-    uint8_t id;
-    uint8_t len_low;
-    uint8_t len_high;
-    uint8_t idx;
-    uint8_t ctwin;
-    struct ieee80211_p2p_noa_desc desc[IWL_P2P_NOA_DESC_COUNT];
-    uint8_t reserved;
+  uint8_t id;
+  uint8_t len_low;
+  uint8_t len_high;
+  uint8_t idx;
+  uint8_t ctwin;
+  struct ieee80211_p2p_noa_desc desc[IWL_P2P_NOA_DESC_COUNT];
+  uint8_t reserved;
 } __packed;
 
 #define IWL_PROBE_RESP_DATA_NO_CSA (0xff)
@@ -91,11 +91,11 @@
  * @reserved: reserved for alignment purposes
  */
 struct iwl_probe_resp_data_notif {
-    __le32 mac_id;
-    __le32 noa_active;
-    struct iwl_p2p_noa_attr noa_attr;
-    uint8_t csa_counter;
-    uint8_t reserved[3];
+  __le32 mac_id;
+  __le32 noa_active;
+  struct iwl_p2p_noa_attr noa_attr;
+  uint8_t csa_counter;
+  uint8_t reserved[3];
 } __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */
 
 /**
@@ -104,7 +104,7 @@
  * @id_and_color: ID and color of the MAC
  */
 struct iwl_channel_switch_noa_notif {
-    __le32 id_and_color;
+  __le32 id_and_color;
 } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
 
 /**
@@ -116,10 +116,10 @@
  * @reserved: reserved for alignment purposes
  */
 struct iwl_mac_low_latency_cmd {
-    __le32 mac_id;
-    uint8_t low_latency_rx;
-    uint8_t low_latency_tx;
-    __le16 reserved;
+  __le32 mac_id;
+  uint8_t low_latency_rx;
+  uint8_t low_latency_tx;
+  __le16 reserved;
 } __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_MAC_CFG_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/nan.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/nan.h
index 91e362d..120ec2c 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/nan.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/nan.h
@@ -38,7 +38,6 @@
 #include <stdint.h>
 
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h"
-
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/phy-ctxt.h"
 
 /* TODO: read it from tlv */
@@ -48,42 +47,42 @@
  * enum iwl_nan_subcmd_ids - Neighbor Awareness Networking (NaN) commands IDS
  */
 enum iwl_nan_subcmd_ids {
-    /**
-     * @NAN_CONFIG_CMD:
-     * &struct iwl_nan_cfg_cmd_v2 or &struct iwl_nan_cfg_cmd
-     */
-    NAN_CONFIG_CMD = 0,
+  /**
+   * @NAN_CONFIG_CMD:
+   * &struct iwl_nan_cfg_cmd_v2 or &struct iwl_nan_cfg_cmd
+   */
+  NAN_CONFIG_CMD = 0,
 
-    /**
-     * @NAN_DISCOVERY_FUNC_CMD:
-     * &struct iwl_nan_add_func_cmd or &struct iwl_nan_add_func_cmd_v2
-     */
-    NAN_DISCOVERY_FUNC_CMD = 0x1,
+  /**
+   * @NAN_DISCOVERY_FUNC_CMD:
+   * &struct iwl_nan_add_func_cmd or &struct iwl_nan_add_func_cmd_v2
+   */
+  NAN_DISCOVERY_FUNC_CMD = 0x1,
 
-    /**
-     * @NAN_FAW_CONFIG_CMD:
-     * &struct iwl_nan_faw_config
-     */
-    NAN_FAW_CONFIG_CMD = 0x2,
+  /**
+   * @NAN_FAW_CONFIG_CMD:
+   * &struct iwl_nan_faw_config
+   */
+  NAN_FAW_CONFIG_CMD = 0x2,
 
-    /**
-     * @NAN_DISCOVERY_EVENT_NOTIF:
-     * &struct iwl_nan_disc_evt_notify_v1 or
-     * &struct iwl_nan_disc_evt_notify_v2
-     */
-    NAN_DISCOVERY_EVENT_NOTIF = 0xFD,
+  /**
+   * @NAN_DISCOVERY_EVENT_NOTIF:
+   * &struct iwl_nan_disc_evt_notify_v1 or
+   * &struct iwl_nan_disc_evt_notify_v2
+   */
+  NAN_DISCOVERY_EVENT_NOTIF = 0xFD,
 
-    /**
-     * @NAN_DISCOVERY_TERMINATE_NOTIF:
-     * &struct iwl_nan_de_term
-     */
-    NAN_DISCOVERY_TERMINATE_NOTIF = 0xFE,
+  /**
+   * @NAN_DISCOVERY_TERMINATE_NOTIF:
+   * &struct iwl_nan_de_term
+   */
+  NAN_DISCOVERY_TERMINATE_NOTIF = 0xFE,
 
-    /**
-     * @NAN_FAW_START_NOTIF:
-     * Further availability window started.
-     */
-    NAN_FAW_START_NOTIF = 0xFF,
+  /**
+   * @NAN_FAW_START_NOTIF:
+   * Further availability window started.
+   */
+  NAN_FAW_START_NOTIF = 0xFF,
 };
 
 /**
@@ -93,8 +92,8 @@
  * @chan_bitmap: channel bitmap
  */
 struct iwl_fw_chan_avail {
-    uint8_t op_class;
-    __le16 chan_bitmap;
+  uint8_t op_class;
+  __le16 chan_bitmap;
 } __packed;
 
 /**
@@ -112,16 +111,16 @@
  * @beacon_template_id: beacon template id for NAN
  */
 struct iwl_nan_umac_cfg {
-    __le32 action;
-    __le32 tsf_id;
-    __le32 sta_id;
-    uint8_t node_addr[6];
-    __le16 reserved1;
-    uint8_t master_pref;
-    uint8_t master_rand;
-    __le16 cluster_id;
-    __le32 dual_band;
-    __le32 beacon_template_id;
+  __le32 action;
+  __le32 tsf_id;
+  __le32 sta_id;
+  uint8_t node_addr[6];
+  __le16 reserved1;
+  uint8_t master_pref;
+  uint8_t master_rand;
+  __le16 cluster_id;
+  __le32 dual_band;
+  __le32 beacon_template_id;
 } __packed; /* _NAN_UMAC_CONFIG_CMD_API_S_VER_1 */
 
 /**
@@ -136,13 +135,13 @@
  * @action_delay: usecs to delay SDFs (DEBUG)
  */
 struct iwl_nan_testbed_cfg {
-    uint8_t chan24;
-    uint8_t chan52;
-    uint8_t hop_count;
-    uint8_t op_bands;
-    __le32 warmup_timer;
-    __le64 custom_tsf;
-    __le32 action_delay;
+  uint8_t chan24;
+  uint8_t chan52;
+  uint8_t hop_count;
+  uint8_t op_bands;
+  __le32 warmup_timer;
+  __le64 custom_tsf;
+  __le32 action_delay;
 } __packed; /* NAN_TEST_BED_SPECIFIC_CONFIG_S_VER_1 */
 
 /*
@@ -154,10 +153,10 @@
  * @pot_avail: potential availability per op. class (NAN2)
  */
 struct iwl_nan_nan2_cfg {
-    __le16 cdw;
-    uint8_t op_mode;
-    uint8_t pot_avail_len;
-    struct iwl_fw_chan_avail pot_avail[20];
+  __le16 cdw;
+  uint8_t op_mode;
+  uint8_t pot_avail_len;
+  struct iwl_fw_chan_avail pot_avail[20];
 } __packed; /* NAN_CONFIG_CMD_API_S_VER_1 */
 
 /**
@@ -170,10 +169,10 @@
  * @nan2_cfg: nan2 specific configuration
  */
 struct iwl_nan_cfg_cmd {
-    struct iwl_nan_umac_cfg umac_cfg;
-    struct iwl_nan_testbed_cfg tb_cfg;
-    /* NAN 2 specific configuration */
-    struct iwl_nan_nan2_cfg nan2_cfg;
+  struct iwl_nan_umac_cfg umac_cfg;
+  struct iwl_nan_testbed_cfg tb_cfg;
+  /* NAN 2 specific configuration */
+  struct iwl_nan_nan2_cfg nan2_cfg;
 } __packed; /* NAN_CONFIG_CMD_API_S_VER_1 */
 
 /**
@@ -188,33 +187,33 @@
  * @nan2_cfg: nan2 specific configuration
  */
 struct iwl_nan_cfg_cmd_v2 {
-    struct iwl_nan_umac_cfg umac_cfg;
-    struct iwl_nan_testbed_cfg tb_cfg;
-    __le32 unavailable_slots;
-    /* NAN 2 specific configuration */
-    struct iwl_nan_nan2_cfg nan2_cfg;
+  struct iwl_nan_umac_cfg umac_cfg;
+  struct iwl_nan_testbed_cfg tb_cfg;
+  __le32 unavailable_slots;
+  /* NAN 2 specific configuration */
+  struct iwl_nan_nan2_cfg nan2_cfg;
 } __packed; /* NAN_CONFIG_CMD_API_S_VER_2 */
 
 /* NAN DE function type */
 enum iwl_fw_nan_func_type {
-    IWL_NAN_DE_FUNC_PUBLISH = 0,
-    IWL_NAN_DE_FUNC_SUBSCRIBE = 1,
-    IWL_NAN_DE_FUNC_FOLLOW_UP = 2,
+  IWL_NAN_DE_FUNC_PUBLISH = 0,
+  IWL_NAN_DE_FUNC_SUBSCRIBE = 1,
+  IWL_NAN_DE_FUNC_FOLLOW_UP = 2,
 
-    /* keep last */
-    IWL_NAN_DE_FUNC_NOT_VALID,
+  /* keep last */
+  IWL_NAN_DE_FUNC_NOT_VALID,
 };
 
 /* NAN DE function flags */
 enum iwl_fw_nan_func_flags {
-    IWL_NAN_DE_FUNC_FLAG_UNSOLICITED_OR_ACTIVE = BIT(0),
-    IWL_NAN_DE_FUNC_FLAG_SOLICITED = BIT(1),
-    IWL_NAN_DE_FUNC_FLAG_UNICAST = BIT(2),
-    IWL_NAN_DE_FUNC_FLAG_CLOSE_RANGE = BIT(3),
-    IWL_NAN_DE_FUNC_FLAG_FAW_PRESENT = BIT(4),
-    IWL_NAN_DE_FUNC_FLAG_FAW_TYPE = BIT(5),
-    IWL_NAN_DE_FUNC_FLAG_FAW_NOTIFY = BIT(6),
-    IWL_NAN_DE_FUNC_FLAG_RAISE_EVENTS = BIT(7),
+  IWL_NAN_DE_FUNC_FLAG_UNSOLICITED_OR_ACTIVE = BIT(0),
+  IWL_NAN_DE_FUNC_FLAG_SOLICITED = BIT(1),
+  IWL_NAN_DE_FUNC_FLAG_UNICAST = BIT(2),
+  IWL_NAN_DE_FUNC_FLAG_CLOSE_RANGE = BIT(3),
+  IWL_NAN_DE_FUNC_FLAG_FAW_PRESENT = BIT(4),
+  IWL_NAN_DE_FUNC_FLAG_FAW_TYPE = BIT(5),
+  IWL_NAN_DE_FUNC_FLAG_FAW_NOTIFY = BIT(6),
+  IWL_NAN_DE_FUNC_FLAG_RAISE_EVENTS = BIT(7),
 };
 
 /**
@@ -239,23 +238,23 @@
  * @dw_interval: awake dw interval
  */
 struct iwl_nan_add_func_common {
-    __le32 action;
-    uint8_t instance_id;
-    uint8_t type;
-    uint8_t service_id[6];
-    __le16 flags;
-    uint8_t flw_up_id;
-    uint8_t flw_up_req_id;
-    uint8_t flw_up_addr[6];
-    __le16 reserved1;
-    __le32 ttl;
-    struct iwl_fw_channel_info faw_ci;
-    uint8_t faw_attrtype;
-    uint8_t serv_info_len;
-    uint8_t srf_len;
-    uint8_t rx_filter_len;
-    uint8_t tx_filter_len;
-    uint8_t dw_interval;
+  __le32 action;
+  uint8_t instance_id;
+  uint8_t type;
+  uint8_t service_id[6];
+  __le16 flags;
+  uint8_t flw_up_id;
+  uint8_t flw_up_req_id;
+  uint8_t flw_up_addr[6];
+  __le16 reserved1;
+  __le32 ttl;
+  struct iwl_fw_channel_info faw_ci;
+  uint8_t faw_attrtype;
+  uint8_t serv_info_len;
+  uint8_t srf_len;
+  uint8_t rx_filter_len;
+  uint8_t tx_filter_len;
+  uint8_t dw_interval;
 } __packed; /* NAN_DISCO_FUNC_FIXED_CMD_API_S_VER_1 */
 
 /**
@@ -270,12 +269,12 @@
  *  security_ctx
  */
 struct iwl_nan_add_func_cmd_v2 {
-    struct iwl_nan_add_func_common cmn;
-    uint8_t cipher_capa;
-    uint8_t cipher_suite_id;
-    __le16 security_ctx_len;
-    __le16 sdea_ctrl;
-    uint8_t data[0];
+  struct iwl_nan_add_func_common cmn;
+  uint8_t cipher_capa;
+  uint8_t cipher_suite_id;
+  __le16 security_ctx_len;
+  __le16 sdea_ctrl;
+  uint8_t data[0];
 } __packed; /* NAN_DISCO_FUNC_FIXED_CMD_API_S_VER_2 */
 
 /**
@@ -286,17 +285,17 @@
  * @data: dw aligned fields -service_info, srf, rxFilter, txFilter
  */
 struct iwl_nan_add_func_cmd {
-    struct iwl_nan_add_func_common cmn;
-    uint8_t reserved[2];
-    uint8_t data[0];
+  struct iwl_nan_add_func_common cmn;
+  uint8_t reserved[2];
+  uint8_t data[0];
 } __packed; /* NAN_DISCO_FUNC_FIXED_CMD_API_S_VER_1 */
 
 enum iwl_nan_add_func_resp_status {
-    IWL_NAN_DE_FUNC_STATUS_SUCCESSFUL,
-    IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_ENTRIES,
-    IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_MEMORY,
-    IWL_NAN_DE_FUNC_STATUS_INVALID_INSTANCE,
-    IWL_NAN_DE_FUNC_STATUS_UNSPECIFIED,
+  IWL_NAN_DE_FUNC_STATUS_SUCCESSFUL,
+  IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_ENTRIES,
+  IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_MEMORY,
+  IWL_NAN_DE_FUNC_STATUS_INVALID_INSTANCE,
+  IWL_NAN_DE_FUNC_STATUS_UNSPECIFIED,
 };
 
 /**
@@ -307,9 +306,9 @@
  * @reserved: reserved
  */
 struct iwl_nan_add_func_res {
-    uint8_t instance_id;
-    uint8_t status;
-    __le16 reserved;
+  uint8_t instance_id;
+  uint8_t status;
+  __le16 reserved;
 } __packed; /* NAN_DISCO_FUNC_CMD_API_S_VER_1 */
 
 /* Shared key cipher suite with CCMP with a 128 bit TK */
@@ -334,14 +333,14 @@
  *     to the service
  */
 enum iwl_nan_de_func_sdea_flags {
-    IWL_NAN_DE_FUNC_SDEA_FSD_REQ = BIT(0),
-    IWL_NAN_DE_FUNC_SDEA_FSD_GAS = BIT(1),
-    IWL_NAN_DE_FUNC_SDEA_DP_REQ = BIT(2),
-    IWL_NAN_DE_FUNC_SDEA_DP_MCAST = BIT(3),
-    IWL_NAN_DE_FUNC_SDEA_DP_MCAST_M_TO_M = BIT(4),
-    IWL_NAN_DE_FUNC_SDEA_QOS_REQ = BIT(5),
-    IWL_NAN_DE_FUNC_SDEA_SEC_REQ = BIT(6),
-    IWL_NAN_DE_FUNC_SDEA_RANGIGN_REQ = BIT(7),
+  IWL_NAN_DE_FUNC_SDEA_FSD_REQ = BIT(0),
+  IWL_NAN_DE_FUNC_SDEA_FSD_GAS = BIT(1),
+  IWL_NAN_DE_FUNC_SDEA_DP_REQ = BIT(2),
+  IWL_NAN_DE_FUNC_SDEA_DP_MCAST = BIT(3),
+  IWL_NAN_DE_FUNC_SDEA_DP_MCAST_M_TO_M = BIT(4),
+  IWL_NAN_DE_FUNC_SDEA_QOS_REQ = BIT(5),
+  IWL_NAN_DE_FUNC_SDEA_SEC_REQ = BIT(6),
+  IWL_NAN_DE_FUNC_SDEA_RANGIGN_REQ = BIT(7),
 };
 
 /**
@@ -357,14 +356,14 @@
  * @buf: service specific information followed by attributes
  */
 struct iwl_nan_disc_evt_notify_v1 {
-    uint8_t peer_mac_addr[6];
-    __le16 reserved1;
-    uint8_t type;
-    uint8_t instance_id;
-    uint8_t peer_instance;
-    uint8_t service_info_len;
-    __le32 attrs_len;
-    uint8_t buf[0];
+  uint8_t peer_mac_addr[6];
+  __le16 reserved1;
+  uint8_t type;
+  uint8_t instance_id;
+  uint8_t peer_instance;
+  uint8_t service_info_len;
+  __le32 attrs_len;
+  uint8_t buf[0];
 } __packed; /* NAN_DISCO_EVENT_NTFY_API_S_VER_1 */
 
 /**
@@ -376,10 +375,10 @@
  * @buf: security context data
  */
 struct iwl_nan_sec_ctxt_info {
-    uint8_t type;
-    uint8_t reserved;
-    __le16 len;
-    uint8_t buf[0];
+  uint8_t type;
+  uint8_t reserved;
+  __le16 len;
+  uint8_t buf[0];
 } __packed; /* NAN_DISCO_SEC_CTXT_ID_API_S_VER_1 */
 
 /**
@@ -399,16 +398,16 @@
  *     more iwl_nan_sec_ctxt_info entries.
  */
 struct iwl_nan_disc_info {
-    uint8_t type;
-    uint8_t instance_id;
-    uint8_t peer_instance;
-    uint8_t service_info_len;
-    __le16 sdea_control;
-    __le16 sdea_service_info_len;
-    __le16 sec_ctxt_len;
-    uint8_t cipher_suite_ids;
-    uint8_t sdea_update_indicator;
-    uint8_t buf[0];
+  uint8_t type;
+  uint8_t instance_id;
+  uint8_t peer_instance;
+  uint8_t service_info_len;
+  __le16 sdea_control;
+  __le16 sdea_service_info_len;
+  __le16 sec_ctxt_len;
+  uint8_t cipher_suite_ids;
+  uint8_t sdea_update_indicator;
+  uint8_t buf[0];
 } __packed; /* NAN_DISCO_INFO_API_S_VER_1 */
 
 /**
@@ -424,18 +423,18 @@
  *     dword aligned address.
  */
 struct iwl_nan_disc_evt_notify_v2 {
-    uint8_t peer_mac_addr[6];
-    __le16 reserved1;
-    __le32 match_len;
-    __le32 avail_attrs_len;
-    uint8_t buf[0];
+  uint8_t peer_mac_addr[6];
+  __le16 reserved1;
+  __le32 match_len;
+  __le32 avail_attrs_len;
+  uint8_t buf[0];
 } __packed; /* NAN_DISCO_EVENT_NTFY_API_S_VER_2 */
 
 /* NAN function termination reasons */
 enum iwl_fw_nan_de_term_reason {
-    IWL_NAN_DE_TERM_FAILURE = 0,
-    IWL_NAN_DE_TERM_TTL_REACHED,
-    IWL_NAN_DE_TERM_USER_REQUEST,
+  IWL_NAN_DE_TERM_FAILURE = 0,
+  IWL_NAN_DE_TERM_TTL_REACHED,
+  IWL_NAN_DE_TERM_USER_REQUEST,
 };
 
 /**
@@ -447,22 +446,22 @@
  * @reserved1: reserved
  */
 struct iwl_nan_de_term {
-    uint8_t type;
-    uint8_t instance_id;
-    uint8_t reason;
-    uint8_t reserved1;
+  uint8_t type;
+  uint8_t instance_id;
+  uint8_t reason;
+  uint8_t reserved1;
 } __packed; /* NAN_DISCO_TERM_NTFY_API_S_VER_1 */
 
 enum iwl_fw_post_nan_type {
-    IWL_NAN_POST_NAN_ATTR_WLAN = 0,
-    IWL_NAN_POST_NAN_ATTR_P2P,
-    IWL_NAN_POST_NAN_ATTR_IBSS,
-    IWL_NAN_POST_NAN_ATTR_MESH,
-    IWL_NAN_POST_NAN_ATTR_FURTHER_NAN,
+  IWL_NAN_POST_NAN_ATTR_WLAN = 0,
+  IWL_NAN_POST_NAN_ATTR_P2P,
+  IWL_NAN_POST_NAN_ATTR_IBSS,
+  IWL_NAN_POST_NAN_ATTR_MESH,
+  IWL_NAN_POST_NAN_ATTR_FURTHER_NAN,
 };
 
 enum iwl_fw_config_flags {
-    NAN_FAW_FLAG_NOTIFY_HOST = BIT(0),
+  NAN_FAW_FLAG_NOTIFY_HOST = BIT(0),
 };
 
 /**
@@ -476,12 +475,12 @@
  * @op_class: operating class which corresponds to faw_ci
  */
 struct iwl_nan_faw_config {
-    __le32 id_n_color;
-    struct iwl_fw_channel_info faw_ci;
-    uint8_t type;
-    uint8_t slots;
-    uint8_t flags;
-    uint8_t op_class;
+  __le32 id_n_color;
+  struct iwl_fw_channel_info faw_ci;
+  uint8_t type;
+  uint8_t slots;
+  uint8_t flags;
+  uint8_t op_class;
 } __packed; /* _NAN_DISCO_FAW_CMD_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_NAN_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/nvm-reg.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/nvm-reg.h
index c3685f4..f4b46eee 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/nvm-reg.h
@@ -41,17 +41,17 @@
  * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands
  */
 enum iwl_regulatory_and_nvm_subcmd_ids {
-    /**
-     * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd
-     */
-    NVM_ACCESS_COMPLETE = 0x0,
+  /**
+   * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd
+   */
+  NVM_ACCESS_COMPLETE = 0x0,
 
-    /**
-     * @NVM_GET_INFO:
-     * Command is &struct iwl_nvm_get_info,
-     * response is &struct iwl_nvm_get_info_rsp
-     */
-    NVM_GET_INFO = 0x2,
+  /**
+   * @NVM_GET_INFO:
+   * Command is &struct iwl_nvm_get_info,
+   * response is &struct iwl_nvm_get_info_rsp
+   */
+  NVM_GET_INFO = 0x2,
 };
 
 /**
@@ -60,8 +60,8 @@
  * @IWL_NVM_WRITE: write NVM
  */
 enum iwl_nvm_access_op {
-    IWL_NVM_READ = 0,
-    IWL_NVM_WRITE = 1,
+  IWL_NVM_READ = 0,
+  IWL_NVM_WRITE = 1,
 };
 
 /**
@@ -71,9 +71,9 @@
  * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM
  */
 enum iwl_nvm_access_target {
-    NVM_ACCESS_TARGET_CACHE = 0,
-    NVM_ACCESS_TARGET_OTP = 1,
-    NVM_ACCESS_TARGET_EEPROM = 2,
+  NVM_ACCESS_TARGET_CACHE = 0,
+  NVM_ACCESS_TARGET_OTP = 1,
+  NVM_ACCESS_TARGET_EEPROM = 2,
 };
 
 /**
@@ -88,14 +88,14 @@
  * @NVM_MAX_NUM_SECTIONS: number of sections
  */
 enum iwl_nvm_section_type {
-    NVM_SECTION_TYPE_SW = 1,
-    NVM_SECTION_TYPE_REGULATORY = 3,
-    NVM_SECTION_TYPE_CALIBRATION = 4,
-    NVM_SECTION_TYPE_PRODUCTION = 5,
-    NVM_SECTION_TYPE_REGULATORY_SDP = 8,
-    NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
-    NVM_SECTION_TYPE_PHY_SKU = 12,
-    NVM_MAX_NUM_SECTIONS = 13,
+  NVM_SECTION_TYPE_SW = 1,
+  NVM_SECTION_TYPE_REGULATORY = 3,
+  NVM_SECTION_TYPE_CALIBRATION = 4,
+  NVM_SECTION_TYPE_PRODUCTION = 5,
+  NVM_SECTION_TYPE_REGULATORY_SDP = 8,
+  NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
+  NVM_SECTION_TYPE_PHY_SKU = 12,
+  NVM_MAX_NUM_SECTIONS = 13,
 };
 
 /**
@@ -108,12 +108,12 @@
  * @data: if write operation, the data to write. On read its empty
  */
 struct iwl_nvm_access_cmd {
-    uint8_t op_code;
-    uint8_t target;
-    __le16 type;
-    __le16 offset;
-    __le16 length;
-    uint8_t data[];
+  uint8_t op_code;
+  uint8_t target;
+  __le16 type;
+  __le16 offset;
+  __le16 length;
+  uint8_t data[];
 } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
 
 /**
@@ -125,18 +125,18 @@
  * @data: if read operation, the data returned. Empty on write.
  */
 struct iwl_nvm_access_resp {
-    __le16 offset;
-    __le16 length;
-    __le16 type;
-    __le16 status;
-    uint8_t data[];
+  __le16 offset;
+  __le16 length;
+  __le16 type;
+  __le16 status;
+  uint8_t data[];
 } __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */
 
 /*
  * struct iwl_nvm_get_info - request to get NVM data
  */
 struct iwl_nvm_get_info {
-    __le32 reserved;
+  __le32 reserved;
 } __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */
 
 /**
@@ -144,7 +144,7 @@
  * @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty
  */
 enum iwl_nvm_info_general_flags {
-    NVM_GENERAL_FLAGS_EMPTY_OTP = BIT(0),
+  NVM_GENERAL_FLAGS_EMPTY_OTP = BIT(0),
 };
 
 /**
@@ -155,10 +155,10 @@
  * @n_hw_addrs: number of reserved MAC addresses
  */
 struct iwl_nvm_get_info_general {
-    __le32 flags;
-    __le16 nvm_version;
-    uint8_t board_type;
-    uint8_t n_hw_addrs;
+  __le32 flags;
+  __le16 nvm_version;
+  uint8_t board_type;
+  uint8_t n_hw_addrs;
 } __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */
 
 /**
@@ -173,18 +173,18 @@
  * @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled
  */
 enum iwl_nvm_mac_sku_flags {
-    NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED = BIT(0),
-    NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
-    NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
-    NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
-    /**
-     * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
-     */
-    NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
-    NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
-    NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
-    NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED = BIT(14),
-    NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED = BIT(15),
+  NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED = BIT(0),
+  NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
+  NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
+  NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
+  /**
+   * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+   */
+  NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
+  NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
+  NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
+  NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED = BIT(14),
+  NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED = BIT(15),
 };
 
 /**
@@ -192,7 +192,7 @@
  * @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags
  */
 struct iwl_nvm_get_info_sku {
-    __le32 mac_sku_flags;
+  __le32 mac_sku_flags;
 } __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */
 
 /**
@@ -201,8 +201,8 @@
  * @rx_chains: BIT 0 chain A, BIT 1 chain B
  */
 struct iwl_nvm_get_info_phy {
-    __le32 tx_chains;
-    __le32 rx_chains;
+  __le32 tx_chains;
+  __le32 rx_chains;
 } __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
 
 #define IWL_NUM_CHANNELS (51)
@@ -214,9 +214,9 @@
  * @reserved: reserved
  */
 struct iwl_nvm_get_info_regulatory {
-    __le32 lar_enabled;
-    __le16 channel_profile[IWL_NUM_CHANNELS];
-    __le16 reserved;
+  __le32 lar_enabled;
+  __le16 channel_profile[IWL_NUM_CHANNELS];
+  __le16 reserved;
 } __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
 
 /**
@@ -227,10 +227,10 @@
  * @regulatory: regulatory data
  */
 struct iwl_nvm_get_info_rsp {
-    struct iwl_nvm_get_info_general general;
-    struct iwl_nvm_get_info_sku mac_sku;
-    struct iwl_nvm_get_info_phy phy_sku;
-    struct iwl_nvm_get_info_regulatory regulatory;
+  struct iwl_nvm_get_info_general general;
+  struct iwl_nvm_get_info_sku mac_sku;
+  struct iwl_nvm_get_info_phy phy_sku;
+  struct iwl_nvm_get_info_regulatory regulatory;
 } __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
 
 /**
@@ -238,7 +238,7 @@
  * @reserved: reserved
  */
 struct iwl_nvm_access_complete_cmd {
-    __le32 reserved;
+  __le32 reserved;
 } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
 
 /**
@@ -254,11 +254,11 @@
  * @reserved2: reserved
  */
 struct iwl_mcc_update_cmd {
-    __le16 mcc;
-    uint8_t source_id;
-    uint8_t reserved;
-    __le32 key;
-    uint8_t reserved2[20];
+  __le16 mcc;
+  uint8_t source_id;
+  uint8_t reserved;
+  __le32 key;
+  uint8_t reserved2[20];
 } __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
 
 /**
@@ -268,8 +268,8 @@
  *  for the 5 GHz band.
  */
 enum iwl_geo_information {
-    GEO_NO_INFO = 0,
-    GEO_WMM_ETSI_5GHZ_INFO = BIT(0),
+  GEO_NO_INFO = 0,
+  GEO_WMM_ETSI_5GHZ_INFO = BIT(0),
 };
 
 /**
@@ -289,14 +289,14 @@
  *  16bits are used.
  */
 struct iwl_mcc_update_resp_v3 {
-    __le32 status;
-    __le16 mcc;
-    uint8_t cap;
-    uint8_t source_id;
-    __le16 time;
-    __le16 geo_info;
-    __le32 n_channels;
-    __le32 channels[0];
+  __le32 status;
+  __le16 mcc;
+  uint8_t cap;
+  uint8_t source_id;
+  __le16 time;
+  __le16 geo_info;
+  __le32 n_channels;
+  __le32 channels[0];
 } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
 
 /**
@@ -317,15 +317,15 @@
  *  16bits are used.
  */
 struct iwl_mcc_update_resp {
-    __le32 status;
-    __le16 mcc;
-    __le16 cap;
-    __le16 time;
-    __le16 geo_info;
-    uint8_t source_id;
-    uint8_t reserved[3];
-    __le32 n_channels;
-    __le32 channels[0];
+  __le32 status;
+  __le16 mcc;
+  __le16 cap;
+  __le16 time;
+  __le16 geo_info;
+  uint8_t source_id;
+  uint8_t reserved[3];
+  __le32 n_channels;
+  __le32 channels[0];
 } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */
 
 /**
@@ -344,36 +344,36 @@
  * @reserved1: reserved for alignment
  */
 struct iwl_mcc_chub_notif {
-    __le16 mcc;
-    uint8_t source_id;
-    uint8_t reserved1;
+  __le16 mcc;
+  uint8_t source_id;
+  uint8_t reserved1;
 } __packed; /* LAR_MCC_NOTIFY_S */
 
 enum iwl_mcc_update_status {
-    MCC_RESP_NEW_CHAN_PROFILE,
-    MCC_RESP_SAME_CHAN_PROFILE,
-    MCC_RESP_INVALID,
-    MCC_RESP_NVM_DISABLED,
-    MCC_RESP_ILLEGAL,
-    MCC_RESP_LOW_PRIORITY,
-    MCC_RESP_TEST_MODE_ACTIVE,
-    MCC_RESP_TEST_MODE_NOT_ACTIVE,
-    MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE,
+  MCC_RESP_NEW_CHAN_PROFILE,
+  MCC_RESP_SAME_CHAN_PROFILE,
+  MCC_RESP_INVALID,
+  MCC_RESP_NVM_DISABLED,
+  MCC_RESP_ILLEGAL,
+  MCC_RESP_LOW_PRIORITY,
+  MCC_RESP_TEST_MODE_ACTIVE,
+  MCC_RESP_TEST_MODE_NOT_ACTIVE,
+  MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE,
 };
 
 enum iwl_mcc_source {
-    MCC_SOURCE_OLD_FW = 0,
-    MCC_SOURCE_ME = 1,
-    MCC_SOURCE_BIOS = 2,
-    MCC_SOURCE_3G_LTE_HOST = 3,
-    MCC_SOURCE_3G_LTE_DEVICE = 4,
-    MCC_SOURCE_WIFI = 5,
-    MCC_SOURCE_RESERVED = 6,
-    MCC_SOURCE_DEFAULT = 7,
-    MCC_SOURCE_UNINITIALIZED = 8,
-    MCC_SOURCE_MCC_API = 9,
-    MCC_SOURCE_GET_CURRENT = 0x10,
-    MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
+  MCC_SOURCE_OLD_FW = 0,
+  MCC_SOURCE_ME = 1,
+  MCC_SOURCE_BIOS = 2,
+  MCC_SOURCE_3G_LTE_HOST = 3,
+  MCC_SOURCE_3G_LTE_DEVICE = 4,
+  MCC_SOURCE_WIFI = 5,
+  MCC_SOURCE_RESERVED = 6,
+  MCC_SOURCE_DEFAULT = 7,
+  MCC_SOURCE_UNINITIALIZED = 8,
+  MCC_SOURCE_MCC_API = 9,
+  MCC_SOURCE_GET_CURRENT = 0x10,
+  MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
 };
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_NVM_REG_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/offload.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/offload.h
index d356c99..6cd4d05 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/offload.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/offload.h
@@ -40,10 +40,10 @@
  * enum iwl_prot_offload_subcmd_ids - protocol offload commands
  */
 enum iwl_prot_offload_subcmd_ids {
-    /**
-     * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif
-     */
-    STORED_BEACON_NTF = 0xFF,
+  /**
+   * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif
+   */
+  STORED_BEACON_NTF = 0xFF,
 };
 
 #define MAX_STORED_BEACON_SIZE 600
@@ -61,14 +61,14 @@
  * @data: beacon data, length in @byte_count
  */
 struct iwl_stored_beacon_notif {
-    __le32 system_time;
-    __le64 tsf;
-    __le32 beacon_timestamp;
-    __le16 band;
-    __le16 channel;
-    __le32 rates;
-    __le32 byte_count;
-    uint8_t data[MAX_STORED_BEACON_SIZE];
+  __le32 system_time;
+  __le64 tsf;
+  __le32 beacon_timestamp;
+  __le16 band;
+  __le16 channel;
+  __le32 rates;
+  __le32 byte_count;
+  uint8_t data[MAX_STORED_BEACON_SIZE];
 } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_OFFLOAD_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/paging.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/paging.h
index fa7acedb..4e0e2b1e 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/paging.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/paging.h
@@ -48,10 +48,10 @@
  * @device_phy_addr: virtual addresses from device side
  */
 struct iwl_fw_paging_cmd {
-    __le32 flags;
-    __le32 block_size;
-    __le32 block_num;
-    __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+  __le32 flags;
+  __le32 block_size;
+  __le32 block_num;
+  __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
 } __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_PAGING_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/phy-ctxt.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/phy-ctxt.h
index 8e9c19ee..b32de53 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -74,10 +74,10 @@
  * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
  */
 struct iwl_fw_channel_info {
-    uint8_t band;
-    uint8_t channel;
-    uint8_t width;
-    uint8_t ctrl_pos;
+  uint8_t band;
+  uint8_t channel;
+  uint8_t width;
+  uint8_t ctrl_pos;
 } __packed;
 
 #define PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
@@ -114,17 +114,17 @@
  * @dsp_cfg_flags: set to 0
  */
 struct iwl_phy_context_cmd {
-    /* COMMON_INDEX_HDR_API_S_VER_1 */
-    __le32 id_and_color;
-    __le32 action;
-    /* PHY_CONTEXT_DATA_API_S_VER_1 */
-    __le32 apply_time;
-    __le32 tx_param_color;
-    struct iwl_fw_channel_info ci;
-    __le32 txchain_info;
-    __le32 rxchain_info;
-    __le32 acquisition_data;
-    __le32 dsp_cfg_flags;
+  /* COMMON_INDEX_HDR_API_S_VER_1 */
+  __le32 id_and_color;
+  __le32 action;
+  /* PHY_CONTEXT_DATA_API_S_VER_1 */
+  __le32 apply_time;
+  __le32 tx_param_color;
+  struct iwl_fw_channel_info ci;
+  __le32 txchain_info;
+  __le32 rxchain_info;
+  __le32 acquisition_data;
+  __le32 dsp_cfg_flags;
 } __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_PHY_CTXT_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/phy.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/phy.h
index 127dd46f..441553c 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/phy.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/phy.h
@@ -40,46 +40,46 @@
  * enum iwl_phy_ops_subcmd_ids - PHY group commands
  */
 enum iwl_phy_ops_subcmd_ids {
-    /**
-     * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE:
-     * Uses either &struct iwl_dts_measurement_cmd or
-     * &struct iwl_ext_dts_measurement_cmd
-     */
-    CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
+  /**
+   * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE:
+   * Uses either &struct iwl_dts_measurement_cmd or
+   * &struct iwl_ext_dts_measurement_cmd
+   */
+  CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
 
-    /**
-     * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd
-     */
-    CTDP_CONFIG_CMD = 0x03,
+  /**
+   * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd
+   */
+  CTDP_CONFIG_CMD = 0x03,
 
-    /**
-     * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd
-     */
-    TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
+  /**
+   * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd
+   */
+  TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
 
-    /**
-     * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd
-     */
-    GEO_TX_POWER_LIMIT = 0x05,
+  /**
+   * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd
+   */
+  GEO_TX_POWER_LIMIT = 0x05,
 
-    /**
-     * @CT_KILL_NOTIFICATION: &struct ct_kill_notif
-     */
-    CT_KILL_NOTIFICATION = 0xFE,
+  /**
+   * @CT_KILL_NOTIFICATION: &struct ct_kill_notif
+   */
+  CT_KILL_NOTIFICATION = 0xFE,
 
-    /**
-     * @DTS_MEASUREMENT_NOTIF_WIDE:
-     * &struct iwl_dts_measurement_notif_v1 or
-     * &struct iwl_dts_measurement_notif_v2
-     */
-    DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
+  /**
+   * @DTS_MEASUREMENT_NOTIF_WIDE:
+   * &struct iwl_dts_measurement_notif_v1 or
+   * &struct iwl_dts_measurement_notif_v2
+   */
+  DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
 };
 
 /* DTS measurements */
 
 enum iwl_dts_measurement_flags {
-    DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0),
-    DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1),
+  DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0),
+  DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1),
 };
 
 /**
@@ -89,7 +89,7 @@
  *  &enum iwl_dts_measurement_flags
  */
 struct iwl_dts_measurement_cmd {
-    __le32 flags;
+  __le32 flags;
 } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */
 
 /**
@@ -105,10 +105,10 @@
  *                              without measurement trigger.
  */
 enum iwl_dts_control_measurement_mode {
-    DTS_AUTOMATIC = 0,
-    DTS_REQUEST_READ = 1,
-    DTS_OVER_WRITE = 2,
-    DTS_DIRECT_WITHOUT_MEASURE = 3,
+  DTS_AUTOMATIC = 0,
+  DTS_REQUEST_READ = 1,
+  DTS_OVER_WRITE = 2,
+  DTS_DIRECT_WITHOUT_MEASURE = 3,
 };
 
 /**
@@ -120,11 +120,11 @@
  * @XTAL_TEMPERATURE: read temperature from xtal
  */
 enum iwl_dts_used {
-    DTS_USE_TOP = 0,
-    DTS_USE_CHAIN_A = 1,
-    DTS_USE_CHAIN_B = 2,
-    DTS_USE_CHAIN_C = 3,
-    XTAL_TEMPERATURE = 4,
+  DTS_USE_TOP = 0,
+  DTS_USE_CHAIN_A = 1,
+  DTS_USE_CHAIN_B = 2,
+  DTS_USE_CHAIN_C = 3,
+  XTAL_TEMPERATURE = 4,
 };
 
 /**
@@ -133,8 +133,8 @@
  * @DTS_BIT8_MODE: bit 8 mode
  */
 enum iwl_dts_bit_mode {
-    DTS_BIT6_MODE = 0,
-    DTS_BIT8_MODE = 1,
+  DTS_BIT6_MODE = 0,
+  DTS_BIT8_MODE = 1,
 };
 
 /**
@@ -147,12 +147,12 @@
  * @step_duration: step duration for the DTS
  */
 struct iwl_ext_dts_measurement_cmd {
-    __le32 control_mode;
-    __le32 temperature;
-    __le32 sensor;
-    __le32 avg_factor;
-    __le32 bit_mode;
-    __le32 step_duration;
+  __le32 control_mode;
+  __le32 temperature;
+  __le32 sensor;
+  __le32 avg_factor;
+  __le32 bit_mode;
+  __le32 step_duration;
 } __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
 
 /**
@@ -162,8 +162,8 @@
  * @voltage: the measured voltage
  */
 struct iwl_dts_measurement_notif_v1 {
-    __le32 temp;
-    __le32 voltage;
+  __le32 temp;
+  __le32 voltage;
 } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
 
 /**
@@ -174,9 +174,9 @@
  * @threshold_idx: the trip index that was crossed
  */
 struct iwl_dts_measurement_notif_v2 {
-    __le32 temp;
-    __le32 voltage;
-    __le32 threshold_idx;
+  __le32 temp;
+  __le32 voltage;
+  __le32 threshold_idx;
 } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */
 
 /**
@@ -186,8 +186,8 @@
  * @reserved: reserved
  */
 struct ct_kill_notif {
-    __le16 temperature;
-    __le16 reserved;
+  __le16 temperature;
+  __le16 reserved;
 } __packed; /* GRP_PHY_CT_KILL_NTF */
 
 /**
@@ -197,9 +197,9 @@
  * @CTDP_CMD_OPERATION_REPORT: get the average budget
  */
 enum iwl_mvm_ctdp_cmd_operation {
-    CTDP_CMD_OPERATION_START = 0x1,
-    CTDP_CMD_OPERATION_STOP = 0x2,
-    CTDP_CMD_OPERATION_REPORT = 0x4,
+  CTDP_CMD_OPERATION_START = 0x1,
+  CTDP_CMD_OPERATION_STOP = 0x2,
+  CTDP_CMD_OPERATION_REPORT = 0x4,
 }; /* CTDP_CMD_OPERATION_TYPE_E */
 
 /**
@@ -210,9 +210,9 @@
  * @window_size: defined in API but not used
  */
 struct iwl_mvm_ctdp_cmd {
-    __le32 operation;
-    __le32 budget;
-    __le32 window_size;
+  __le32 operation;
+  __le32 budget;
+  __le32 window_size;
 } __packed;
 
 #define IWL_MAX_DTS_TRIPS 8
@@ -224,8 +224,8 @@
  * @thresholds: array with the thresholds to be configured
  */
 struct temp_report_ths_cmd {
-    __le32 num_temps;
-    __le16 thresholds[IWL_MAX_DTS_TRIPS];
+  __le32 num_temps;
+  __le16 thresholds[IWL_MAX_DTS_TRIPS];
 } __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_PHY_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/power.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/power.h
index a4ec5e4..cdb063d 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/power.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/power.h
@@ -55,14 +55,14 @@
  *  idle timeout
  */
 enum iwl_ltr_config_flags {
-    LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
-    LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
-    LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
-    LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
-    LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
-    LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
-    LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
-    LTR_CFG_FLAG_UPDATE_VALUES = BIT(7),
+  LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
+  LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
+  LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
+  LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
+  LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
+  LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
+  LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
+  LTR_CFG_FLAG_UPDATE_VALUES = BIT(7),
 };
 
 /**
@@ -72,9 +72,9 @@
  * @static_short: static LTR Short register value.
  */
 struct iwl_ltr_config_cmd_v1 {
-    __le32 flags;
-    __le32 static_long;
-    __le32 static_short;
+  __le32 flags;
+  __le32 static_long;
+  __le32 static_short;
 } __packed; /* LTR_CAPABLE_API_S_VER_1 */
 
 #define LTR_VALID_STATES_NUM 4
@@ -91,11 +91,11 @@
  *  %LTR_CFG_FLAG_UPDATE_VALUES is set.
  */
 struct iwl_ltr_config_cmd {
-    __le32 flags;
-    __le32 static_long;
-    __le32 static_short;
-    __le32 ltr_cfg_values[LTR_VALID_STATES_NUM];
-    __le32 ltr_short_idle_timeout;
+  __le32 flags;
+  __le32 static_long;
+  __le32 static_short;
+  __le32 ltr_cfg_values[LTR_VALID_STATES_NUM];
+  __le32 ltr_short_idle_timeout;
 } __packed; /* LTR_CAPABLE_API_S_VER_2 */
 
 /* Radio LP RX Energy Threshold measured in dBm */
@@ -121,14 +121,14 @@
  *      detection enablement
  */
 enum iwl_power_flags {
-    POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
-    POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1),
-    POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2),
-    POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5),
-    POWER_FLAGS_BT_SCO_ENA = BIT(8),
-    POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
-    POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
-    POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12),
+  POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
+  POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1),
+  POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2),
+  POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5),
+  POWER_FLAGS_BT_SCO_ENA = BIT(8),
+  POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
+  POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
+  POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12),
 };
 
 #define IWL_POWER_VEC_SIZE 5
@@ -156,15 +156,15 @@
  *          Default: 80dbm
  */
 struct iwl_powertable_cmd {
-    /* PM_POWER_TABLE_CMD_API_S_VER_6 */
-    __le16 flags;
-    uint8_t keep_alive_seconds;
-    uint8_t debug_flags;
-    __le32 rx_data_timeout;
-    __le32 tx_data_timeout;
-    __le32 sleep_interval[IWL_POWER_VEC_SIZE];
-    __le32 skip_dtim_periods;
-    __le32 lprx_rssi_threshold;
+  /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+  __le16 flags;
+  uint8_t keep_alive_seconds;
+  uint8_t debug_flags;
+  __le32 rx_data_timeout;
+  __le32 tx_data_timeout;
+  __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+  __le32 skip_dtim_periods;
+  __le32 lprx_rssi_threshold;
 } __packed;
 
 /**
@@ -174,7 +174,7 @@
  *  receiver and transmitter. '0' - does not allow.
  */
 enum iwl_device_power_flags {
-    DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
+  DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
 };
 
 /**
@@ -185,9 +185,9 @@
  * @reserved: reserved (padding)
  */
 struct iwl_device_power_cmd {
-    /* PM_POWER_TABLE_CMD_API_S_VER_6 */
-    __le16 flags;
-    __le16 reserved;
+  /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+  __le16 flags;
+  __le16 reserved;
 } __packed;
 
 /**
@@ -233,30 +233,30 @@
  * @reserved: reserved (padding)
  */
 struct iwl_mac_power_cmd {
-    /* CONTEXT_DESC_API_T_VER_1 */
-    __le32 id_and_color;
+  /* CONTEXT_DESC_API_T_VER_1 */
+  __le32 id_and_color;
 
-    /* CLIENT_PM_POWER_TABLE_S_VER_1 */
-    __le16 flags;
-    __le16 keep_alive_seconds;
-    __le32 rx_data_timeout;
-    __le32 tx_data_timeout;
-    __le32 rx_data_timeout_uapsd;
-    __le32 tx_data_timeout_uapsd;
-    uint8_t lprx_rssi_threshold;
-    uint8_t skip_dtim_periods;
-    __le16 snooze_interval;
-    __le16 snooze_window;
-    uint8_t snooze_step;
-    uint8_t qndp_tid;
-    uint8_t uapsd_ac_flags;
-    uint8_t uapsd_max_sp;
-    uint8_t heavy_tx_thld_packets;
-    uint8_t heavy_rx_thld_packets;
-    uint8_t heavy_tx_thld_percentage;
-    uint8_t heavy_rx_thld_percentage;
-    uint8_t limited_ps_threshold;
-    uint8_t reserved;
+  /* CLIENT_PM_POWER_TABLE_S_VER_1 */
+  __le16 flags;
+  __le16 keep_alive_seconds;
+  __le32 rx_data_timeout;
+  __le32 tx_data_timeout;
+  __le32 rx_data_timeout_uapsd;
+  __le32 tx_data_timeout_uapsd;
+  uint8_t lprx_rssi_threshold;
+  uint8_t skip_dtim_periods;
+  __le16 snooze_interval;
+  __le16 snooze_window;
+  uint8_t snooze_step;
+  uint8_t qndp_tid;
+  uint8_t uapsd_ac_flags;
+  uint8_t uapsd_max_sp;
+  uint8_t heavy_tx_thld_packets;
+  uint8_t heavy_rx_thld_packets;
+  uint8_t heavy_tx_thld_percentage;
+  uint8_t heavy_rx_thld_percentage;
+  uint8_t limited_ps_threshold;
+  uint8_t reserved;
 } __packed;
 
 /*
@@ -267,9 +267,9 @@
  *      this context.
  */
 struct iwl_uapsd_misbehaving_ap_notif {
-    __le32 sta_id;
-    uint8_t mac_id;
-    uint8_t reserved[3];
+  __le32 sta_id;
+  uint8_t mac_id;
+  uint8_t reserved[3];
 } __packed;
 
 /**
@@ -280,18 +280,18 @@
  * @pwr_restriction: TX power restriction in dBms.
  */
 struct iwl_reduce_tx_power_cmd {
-    uint8_t flags;
-    uint8_t mac_context_id;
-    __le16 pwr_restriction;
+  uint8_t flags;
+  uint8_t mac_context_id;
+  __le16 pwr_restriction;
 } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
 
 enum iwl_dev_tx_power_cmd_mode {
-    IWL_TX_POWER_MODE_SET_MAC = 0,
-    IWL_TX_POWER_MODE_SET_DEVICE = 1,
-    IWL_TX_POWER_MODE_SET_CHAINS = 2,
-    IWL_TX_POWER_MODE_SET_ACK = 3,
-    IWL_TX_POWER_MODE_SET_SAR_TIMER = 4,
-    IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5,
+  IWL_TX_POWER_MODE_SET_MAC = 0,
+  IWL_TX_POWER_MODE_SET_DEVICE = 1,
+  IWL_TX_POWER_MODE_SET_CHAINS = 2,
+  IWL_TX_POWER_MODE_SET_ACK = 3,
+  IWL_TX_POWER_MODE_SET_SAR_TIMER = 4,
+  IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5,
 }; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */
 ;
 
@@ -309,13 +309,13 @@
  * @per_chain_restriction: per chain restrictions
  */
 struct iwl_dev_tx_power_cmd_v3 {
-    __le32 set_mode;
-    __le32 mac_context_id;
-    __le16 pwr_restriction;
-    __le16 dev_24;
-    __le16 dev_52_low;
-    __le16 dev_52_high;
-    __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+  __le32 set_mode;
+  __le32 mac_context_id;
+  __le16 pwr_restriction;
+  __le16 dev_24;
+  __le16 dev_52_low;
+  __le16 dev_52_high;
+  __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
 } __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
 
 #define IWL_DEV_MAX_TX_POWER 0x7FFF
@@ -328,10 +328,10 @@
  * @reserved: reserved (padding)
  */
 struct iwl_dev_tx_power_cmd_v4 {
-    /* v4 is just an extension of v3 - keep this here */
-    struct iwl_dev_tx_power_cmd_v3 v3;
-    uint8_t enable_ack_reduction;
-    uint8_t reserved[3];
+  /* v4 is just an extension of v3 - keep this here */
+  struct iwl_dev_tx_power_cmd_v3 v3;
+  uint8_t enable_ack_reduction;
+  uint8_t reserved[3];
 } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
 
 /**
@@ -348,12 +348,12 @@
  *  BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
  */
 struct iwl_dev_tx_power_cmd {
-    /* v5 is just an extension of v3 - keep this here */
-    struct iwl_dev_tx_power_cmd_v3 v3;
-    uint8_t enable_ack_reduction;
-    uint8_t per_chain_restriction_changed;
-    uint8_t reserved[2];
-    __le32 timer_period;
+  /* v5 is just an extension of v3 - keep this here */
+  struct iwl_dev_tx_power_cmd_v3 v3;
+  uint8_t enable_ack_reduction;
+  uint8_t per_chain_restriction_changed;
+  uint8_t reserved[2];
+  __le32 timer_period;
 } __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
 
 #define IWL_NUM_GEO_PROFILES 3
@@ -364,8 +364,8 @@
  * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table.
  */
 enum iwl_geo_per_chain_offset_operation {
-    IWL_PER_CHAIN_OFFSET_SET_TABLES,
-    IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
+  IWL_PER_CHAIN_OFFSET_SET_TABLES,
+  IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
 }; /* GEO_TX_POWER_LIMIT FLAGS TYPE */
 
 /**
@@ -375,14 +375,14 @@
  * @chain_b: tx power offset for chain b.
  */
 struct iwl_per_chain_offset {
-    __le16 max_tx_power;
-    uint8_t chain_a;
-    uint8_t chain_b;
+  __le16 max_tx_power;
+  uint8_t chain_a;
+  uint8_t chain_b;
 } __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
 
 struct iwl_per_chain_offset_group {
-    struct iwl_per_chain_offset lb;
-    struct iwl_per_chain_offset hb;
+  struct iwl_per_chain_offset lb;
+  struct iwl_per_chain_offset hb;
 } __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
 
 /**
@@ -391,8 +391,8 @@
  * @table: offset profile per band.
  */
 struct iwl_geo_tx_power_profiles_cmd {
-    __le32 ops;
-    struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+  __le32 ops;
+  struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
 } __packed; /* GEO_TX_POWER_LIMIT */
 
 /**
@@ -400,7 +400,7 @@
  * @profile_idx: current geo profile in use
  */
 struct iwl_geo_tx_power_profiles_resp {
-    __le32 profile_idx;
+  __le32 profile_idx;
 } __packed; /* GEO_TX_POWER_LIMIT_RESP */
 
 /**
@@ -445,17 +445,17 @@
  * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
  */
 struct iwl_beacon_filter_cmd {
-    __le32 bf_energy_delta;
-    __le32 bf_roaming_energy_delta;
-    __le32 bf_roaming_state;
-    __le32 bf_temp_threshold;
-    __le32 bf_temp_fast_filter;
-    __le32 bf_temp_slow_filter;
-    __le32 bf_enable_beacon_filter;
-    __le32 bf_debug_flag;
-    __le32 bf_escape_timer;
-    __le32 ba_escape_timer;
-    __le32 ba_enable_beacon_abort;
+  __le32 bf_energy_delta;
+  __le32 bf_roaming_energy_delta;
+  __le32 bf_roaming_state;
+  __le32 bf_temp_threshold;
+  __le32 bf_temp_fast_filter;
+  __le32 bf_temp_slow_filter;
+  __le32 bf_enable_beacon_filter;
+  __le32 bf_debug_flag;
+  __le32 bf_escape_timer;
+  __le32 ba_escape_timer;
+  __le32 ba_enable_beacon_abort;
 } __packed;
 
 /* Beacon filtering and beacon abort */
@@ -507,16 +507,16 @@
 
 #define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
 
-#define IWL_BF_CMD_CONFIG(mode)                                                \
-    .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA##mode),                 \
-    .bf_roaming_energy_delta = cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA##mode), \
-    .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE##mode),               \
-    .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD##mode),             \
-    .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER##mode),         \
-    .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER##mode),         \
-    .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG##mode),                     \
-    .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER##mode),                 \
-    .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER##mode)
+#define IWL_BF_CMD_CONFIG(mode)                                              \
+  .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA##mode),                 \
+  .bf_roaming_energy_delta = cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA##mode), \
+  .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE##mode),               \
+  .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD##mode),             \
+  .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER##mode),         \
+  .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER##mode),         \
+  .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG##mode),                     \
+  .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER##mode),                 \
+  .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER##mode)
 
 #define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
 #define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/rs.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/rs.h
index 4b66bfd..c362205 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/rs.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/rs.h
@@ -52,11 +52,11 @@
  *                      streams
  */
 enum iwl_tlc_mng_cfg_flags {
-    IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0),
-    IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1),
-    IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK = BIT(2),
-    IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3),
-    IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4),
+  IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0),
+  IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1),
+  IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK = BIT(2),
+  IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3),
+  IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4),
 };
 
 /**
@@ -68,11 +68,11 @@
  * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value
  */
 enum iwl_tlc_mng_cfg_cw {
-    IWL_TLC_MNG_CH_WIDTH_20MHZ,
-    IWL_TLC_MNG_CH_WIDTH_40MHZ,
-    IWL_TLC_MNG_CH_WIDTH_80MHZ,
-    IWL_TLC_MNG_CH_WIDTH_160MHZ,
-    IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ,
+  IWL_TLC_MNG_CH_WIDTH_20MHZ,
+  IWL_TLC_MNG_CH_WIDTH_40MHZ,
+  IWL_TLC_MNG_CH_WIDTH_80MHZ,
+  IWL_TLC_MNG_CH_WIDTH_160MHZ,
+  IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ,
 };
 
 /**
@@ -81,8 +81,8 @@
  * @IWL_TLC_MNG_CHAIN_B_MSK: chain B
  */
 enum iwl_tlc_mng_cfg_chains {
-    IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
-    IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
+  IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
+  IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
 };
 
 /**
@@ -97,14 +97,14 @@
  * @IWL_TLC_MNG_MODE_NUM: a count of possible modes
  */
 enum iwl_tlc_mng_cfg_mode {
-    IWL_TLC_MNG_MODE_CCK = 0,
-    IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK,
-    IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK,
-    IWL_TLC_MNG_MODE_HT,
-    IWL_TLC_MNG_MODE_VHT,
-    IWL_TLC_MNG_MODE_HE,
-    IWL_TLC_MNG_MODE_INVALID,
-    IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
+  IWL_TLC_MNG_MODE_CCK = 0,
+  IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK,
+  IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK,
+  IWL_TLC_MNG_MODE_HT,
+  IWL_TLC_MNG_MODE_VHT,
+  IWL_TLC_MNG_MODE_HE,
+  IWL_TLC_MNG_MODE_INVALID,
+  IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
 };
 
 /**
@@ -124,19 +124,19 @@
  * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
  */
 enum iwl_tlc_mng_ht_rates {
-    IWL_TLC_MNG_HT_RATE_MCS0 = 0,
-    IWL_TLC_MNG_HT_RATE_MCS1,
-    IWL_TLC_MNG_HT_RATE_MCS2,
-    IWL_TLC_MNG_HT_RATE_MCS3,
-    IWL_TLC_MNG_HT_RATE_MCS4,
-    IWL_TLC_MNG_HT_RATE_MCS5,
-    IWL_TLC_MNG_HT_RATE_MCS6,
-    IWL_TLC_MNG_HT_RATE_MCS7,
-    IWL_TLC_MNG_HT_RATE_MCS8,
-    IWL_TLC_MNG_HT_RATE_MCS9,
-    IWL_TLC_MNG_HT_RATE_MCS10,
-    IWL_TLC_MNG_HT_RATE_MCS11,
-    IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11,
+  IWL_TLC_MNG_HT_RATE_MCS0 = 0,
+  IWL_TLC_MNG_HT_RATE_MCS1,
+  IWL_TLC_MNG_HT_RATE_MCS2,
+  IWL_TLC_MNG_HT_RATE_MCS3,
+  IWL_TLC_MNG_HT_RATE_MCS4,
+  IWL_TLC_MNG_HT_RATE_MCS5,
+  IWL_TLC_MNG_HT_RATE_MCS6,
+  IWL_TLC_MNG_HT_RATE_MCS7,
+  IWL_TLC_MNG_HT_RATE_MCS8,
+  IWL_TLC_MNG_HT_RATE_MCS9,
+  IWL_TLC_MNG_HT_RATE_MCS10,
+  IWL_TLC_MNG_HT_RATE_MCS11,
+  IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11,
 };
 
 /* Maximum supported tx antennas number */
@@ -160,18 +160,18 @@
  * @reserved2: reserved
  */
 struct iwl_tlc_config_cmd {
-    uint8_t sta_id;
-    uint8_t reserved1[3];
-    uint8_t max_ch_width;
-    uint8_t mode;
-    uint8_t chains;
-    uint8_t amsdu;
-    __le16 flags;
-    __le16 non_ht_rates;
-    __le16 ht_rates[MAX_NSS][2];
-    __le16 max_mpdu_len;
-    uint8_t sgi_ch_width_supp;
-    uint8_t reserved2[1];
+  uint8_t sta_id;
+  uint8_t reserved1[3];
+  uint8_t max_ch_width;
+  uint8_t mode;
+  uint8_t chains;
+  uint8_t amsdu;
+  __le16 flags;
+  __le16 non_ht_rates;
+  __le16 ht_rates[MAX_NSS][2];
+  __le16 max_mpdu_len;
+  uint8_t sgi_ch_width_supp;
+  uint8_t reserved2[1];
 } __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_2 */
 
 /**
@@ -180,8 +180,8 @@
  * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
  */
 enum iwl_tlc_update_flags {
-    IWL_TLC_NOTIF_FLAG_RATE = BIT(0),
-    IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1),
+  IWL_TLC_NOTIF_FLAG_RATE = BIT(0),
+  IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1),
 };
 
 /**
@@ -194,12 +194,12 @@
  * @amsdu_enabled: bitmap for per-TID AMSDU enablement
  */
 struct iwl_tlc_update_notif {
-    uint8_t sta_id;
-    uint8_t reserved[3];
-    __le32 flags;
-    __le32 rate;
-    __le32 amsdu_size;
-    __le32 amsdu_enabled;
+  uint8_t sta_id;
+  uint8_t reserved[3];
+  __le32 flags;
+  __le32 rate;
+  __le32 amsdu_size;
+  __le32 amsdu_enabled;
 } __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
 
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
@@ -214,13 +214,13 @@
  * @IWL_TLC_DEBUG_TPC_STATS: get number of frames Tx'ed in each tpc step
  */
 enum iwl_tlc_debug_flags {
-    IWL_TLC_DEBUG_FIXED_RATE,
-    IWL_TLC_DEBUG_AGG_DURATION_LIM,
-    IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
-    IWL_TLC_DEBUG_TPC_ENABLED,
-    IWL_TLC_DEBUG_TPC_STATS,
+  IWL_TLC_DEBUG_FIXED_RATE,
+  IWL_TLC_DEBUG_AGG_DURATION_LIM,
+  IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
+  IWL_TLC_DEBUG_TPC_ENABLED,
+  IWL_TLC_DEBUG_TPC_STATS,
 
-    IWL_TLC_DEBUG_FLAGS_NUM,
+  IWL_TLC_DEBUG_FLAGS_NUM,
 }; /* TLC_MNG_DEBUG_FLAGS_API_E */
 
 /**
@@ -231,10 +231,10 @@
  * @data: for each bit i set in te %flags, data[i] holds the corresponding data
  */
 struct iwl_dhc_tlc_cmd {
-    uint8_t sta_id;
-    uint8_t reserved1[3];
-    __le32 flags;
-    __le32 data[IWL_TLC_DEBUG_FLAGS_NUM];
+  uint8_t sta_id;
+  uint8_t reserved1[3];
+  __le32 flags;
+  __le32 data[IWL_TLC_DEBUG_FLAGS_NUM];
 } __packed; /* TLC_MNG_DEBUG_CMD_S */
 
 /**
@@ -245,8 +245,8 @@
  *        reduction)
  */
 struct iwl_tpc_stats {
-    __le32 no_tpc;
-    __le32 step[5];
+  __le32 no_tpc;
+  __le32 step[5];
 } __packed;
 #endif /* CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED */
 
@@ -256,61 +256,61 @@
  * TODO: avoid overlap between legacy and HT rates
  */
 enum {
-    IWL_RATE_1M_INDEX = 0,
-    IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
-    IWL_RATE_2M_INDEX,
-    IWL_RATE_5M_INDEX,
-    IWL_RATE_11M_INDEX,
-    IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
-    IWL_RATE_6M_INDEX,
-    IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
-    IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
-    IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
-    IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
-    IWL_RATE_9M_INDEX,
-    IWL_RATE_12M_INDEX,
-    IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
-    IWL_RATE_18M_INDEX,
-    IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
-    IWL_RATE_24M_INDEX,
-    IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
-    IWL_RATE_36M_INDEX,
-    IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
-    IWL_RATE_48M_INDEX,
-    IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
-    IWL_RATE_54M_INDEX,
-    IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
-    IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
-    IWL_RATE_60M_INDEX,
-    IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
-    IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
-    IWL_RATE_MCS_8_INDEX,
-    IWL_RATE_MCS_9_INDEX,
-    IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
-    IWL_RATE_MCS_10_INDEX,
-    IWL_RATE_MCS_11_INDEX,
-    IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
-    IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
-    IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
+  IWL_RATE_1M_INDEX = 0,
+  IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+  IWL_RATE_2M_INDEX,
+  IWL_RATE_5M_INDEX,
+  IWL_RATE_11M_INDEX,
+  IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+  IWL_RATE_6M_INDEX,
+  IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+  IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
+  IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
+  IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
+  IWL_RATE_9M_INDEX,
+  IWL_RATE_12M_INDEX,
+  IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
+  IWL_RATE_18M_INDEX,
+  IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
+  IWL_RATE_24M_INDEX,
+  IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
+  IWL_RATE_36M_INDEX,
+  IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
+  IWL_RATE_48M_INDEX,
+  IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
+  IWL_RATE_54M_INDEX,
+  IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
+  IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
+  IWL_RATE_60M_INDEX,
+  IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
+  IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
+  IWL_RATE_MCS_8_INDEX,
+  IWL_RATE_MCS_9_INDEX,
+  IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
+  IWL_RATE_MCS_10_INDEX,
+  IWL_RATE_MCS_11_INDEX,
+  IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
+  IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
+  IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
 };
 
 #define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
 
 /* fw API values for legacy bit rates, both OFDM and CCK */
 enum {
-    IWL_RATE_6M_PLCP = 13,
-    IWL_RATE_9M_PLCP = 15,
-    IWL_RATE_12M_PLCP = 5,
-    IWL_RATE_18M_PLCP = 7,
-    IWL_RATE_24M_PLCP = 9,
-    IWL_RATE_36M_PLCP = 11,
-    IWL_RATE_48M_PLCP = 1,
-    IWL_RATE_54M_PLCP = 3,
-    IWL_RATE_1M_PLCP = 10,
-    IWL_RATE_2M_PLCP = 20,
-    IWL_RATE_5M_PLCP = 55,
-    IWL_RATE_11M_PLCP = 110,
-    IWL_RATE_INVM_PLCP = -1,
+  IWL_RATE_6M_PLCP = 13,
+  IWL_RATE_9M_PLCP = 15,
+  IWL_RATE_12M_PLCP = 5,
+  IWL_RATE_18M_PLCP = 7,
+  IWL_RATE_24M_PLCP = 9,
+  IWL_RATE_36M_PLCP = 11,
+  IWL_RATE_48M_PLCP = 1,
+  IWL_RATE_54M_PLCP = 3,
+  IWL_RATE_1M_PLCP = 10,
+  IWL_RATE_2M_PLCP = 20,
+  IWL_RATE_5M_PLCP = 55,
+  IWL_RATE_11M_PLCP = 110,
+  IWL_RATE_INVM_PLCP = -1,
 };
 
 /*
@@ -593,22 +593,22 @@
  * @ss_params: single stream features. declare whether STBC or BFER are allowed.
  */
 struct iwl_lq_cmd {
-    uint8_t sta_id;
-    uint8_t reduced_tpc;
-    __le16 control;
-    /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
-    uint8_t flags;
-    uint8_t mimo_delim;
-    uint8_t single_stream_ant_msk;
-    uint8_t dual_stream_ant_msk;
-    uint8_t initial_rate_index[AC_NUM];
-    /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */
-    __le16 agg_time_limit;
-    uint8_t agg_disable_start_th;
-    uint8_t agg_frame_cnt_limit;
-    __le32 reserved2;
-    __le32 rs_table[LQ_MAX_RETRY_NUM];
-    __le32 ss_params;
+  uint8_t sta_id;
+  uint8_t reduced_tpc;
+  __le16 control;
+  /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
+  uint8_t flags;
+  uint8_t mimo_delim;
+  uint8_t single_stream_ant_msk;
+  uint8_t dual_stream_ant_msk;
+  uint8_t initial_rate_index[AC_NUM];
+  /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */
+  __le16 agg_time_limit;
+  uint8_t agg_disable_start_th;
+  uint8_t agg_frame_cnt_limit;
+  __le32 reserved2;
+  __le32 rs_table[LQ_MAX_RETRY_NUM];
+  __le32 ss_params;
 }; /* LINK_QUALITY_CMD_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_RS_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/rx.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/rx.h
index b08448c..32dbea3 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/rx.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/rx.h
@@ -49,8 +49,8 @@
 #define IWL_RX_INFO_ENERGY_ANT_C_POS 16
 
 enum iwl_mac_context_info {
-    MAC_CONTEXT_INFO_NONE,
-    MAC_CONTEXT_INFO_GSCAN,
+  MAC_CONTEXT_INFO_NONE,
+  MAC_CONTEXT_INFO_GSCAN,
 };
 
 /**
@@ -78,21 +78,21 @@
  * about the reception of the packet.
  */
 struct iwl_rx_phy_info {
-    uint8_t non_cfg_phy_cnt;
-    uint8_t cfg_phy_cnt;
-    uint8_t stat_id;
-    uint8_t reserved1;
-    __le32 system_timestamp;
-    __le64 timestamp;
-    __le32 beacon_time_stamp;
-    __le16 phy_flags;
-    __le16 channel;
-    __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
-    __le32 rate_n_flags;
-    __le32 byte_count;
-    uint8_t mac_active_msk;
-    uint8_t mac_context_info;
-    __le16 frame_time;
+  uint8_t non_cfg_phy_cnt;
+  uint8_t cfg_phy_cnt;
+  uint8_t stat_id;
+  uint8_t reserved1;
+  __le32 system_timestamp;
+  __le64 timestamp;
+  __le32 beacon_time_stamp;
+  __le16 phy_flags;
+  __le16 channel;
+  __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
+  __le32 rate_n_flags;
+  __le32 byte_count;
+  uint8_t mac_active_msk;
+  uint8_t mac_context_info;
+  __le16 frame_time;
 } __packed;
 
 /*
@@ -106,12 +106,12 @@
  * bit 15 - Offload enabled
  */
 enum iwl_csum_rx_assist_info {
-    CSUM_RXA_RESERVED_MASK = 0x000f,
-    CSUM_RXA_MICSIZE_MASK = 0x00f0,
-    CSUM_RXA_HEADERLEN_MASK = 0x1f00,
-    CSUM_RXA_PADD = BIT(13),
-    CSUM_RXA_AMSDU = BIT(14),
-    CSUM_RXA_ENA = BIT(15)
+  CSUM_RXA_RESERVED_MASK = 0x000f,
+  CSUM_RXA_MICSIZE_MASK = 0x00f0,
+  CSUM_RXA_HEADERLEN_MASK = 0x1f00,
+  CSUM_RXA_PADD = BIT(13),
+  CSUM_RXA_AMSDU = BIT(14),
+  CSUM_RXA_ENA = BIT(15)
 };
 
 /**
@@ -120,8 +120,8 @@
  * @assist: see &enum iwl_csum_rx_assist_info
  */
 struct iwl_rx_mpdu_res_start {
-    __le16 byte_count;
-    __le16 assist;
+  __le16 byte_count;
+  __le16 assist;
 } __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
 
 /**
@@ -138,16 +138,16 @@
  * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
  */
 enum iwl_rx_phy_flags {
-    RX_RES_PHY_FLAGS_BAND_24 = BIT(0),
-    RX_RES_PHY_FLAGS_MOD_CCK = BIT(1),
-    RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2),
-    RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3),
-    RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4),
-    RX_RES_PHY_FLAGS_ANTENNA_POS = 4,
-    RX_RES_PHY_FLAGS_AGG = BIT(7),
-    RX_RES_PHY_FLAGS_OFDM_HT = BIT(8),
-    RX_RES_PHY_FLAGS_OFDM_GF = BIT(9),
-    RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10),
+  RX_RES_PHY_FLAGS_BAND_24 = BIT(0),
+  RX_RES_PHY_FLAGS_MOD_CCK = BIT(1),
+  RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2),
+  RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3),
+  RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4),
+  RX_RES_PHY_FLAGS_ANTENNA_POS = 4,
+  RX_RES_PHY_FLAGS_AGG = BIT(7),
+  RX_RES_PHY_FLAGS_OFDM_HT = BIT(8),
+  RX_RES_PHY_FLAGS_OFDM_GF = BIT(9),
+  RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10),
 };
 
 /**
@@ -183,478 +183,478 @@
  * @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift
  */
 enum iwl_mvm_rx_status {
-    RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
-    RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1),
-    RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2),
-    RX_MPDU_RES_STATUS_KEY_VALID = BIT(3),
-    RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4),
-    RX_MPDU_RES_STATUS_ICV_OK = BIT(5),
-    RX_MPDU_RES_STATUS_MIC_OK = BIT(6),
-    RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
-    RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7),
-    RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8),
-    RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
-    RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
-    RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
-    RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
-    RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
-    RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
-    RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
-    RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
-    RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
-    RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
-    RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
-    RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
-    RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
-    RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24,
-    RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT,
+  RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
+  RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1),
+  RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2),
+  RX_MPDU_RES_STATUS_KEY_VALID = BIT(3),
+  RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4),
+  RX_MPDU_RES_STATUS_ICV_OK = BIT(5),
+  RX_MPDU_RES_STATUS_MIC_OK = BIT(6),
+  RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
+  RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7),
+  RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8),
+  RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
+  RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
+  RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
+  RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
+  RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
+  RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
+  RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
+  RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
+  RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
+  RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
+  RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
+  RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
+  RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
+  RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24,
+  RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT,
 };
 
 /* 9000 series API */
 enum iwl_rx_mpdu_mac_flags1 {
-    IWL_RX_MDPU_MFLG1_ADDRTYPE_MASK = 0x03,
-    IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK = 0xf0,
-    /* shift should be 4, but the length is measured in 2-byte
-     * words, so shifting only by 3 gives a byte result
-     */
-    IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT = 3,
+  IWL_RX_MDPU_MFLG1_ADDRTYPE_MASK = 0x03,
+  IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK = 0xf0,
+  /* shift should be 4, but the length is measured in 2-byte
+   * words, so shifting only by 3 gives a byte result
+   */
+  IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT = 3,
 };
 
 enum iwl_rx_mpdu_mac_flags2 {
-    /* in 2-byte words */
-    IWL_RX_MPDU_MFLG2_HDR_LEN_MASK = 0x1f,
-    IWL_RX_MPDU_MFLG2_PAD = 0x20,
-    IWL_RX_MPDU_MFLG2_AMSDU = 0x40,
+  /* in 2-byte words */
+  IWL_RX_MPDU_MFLG2_HDR_LEN_MASK = 0x1f,
+  IWL_RX_MPDU_MFLG2_PAD = 0x20,
+  IWL_RX_MPDU_MFLG2_AMSDU = 0x40,
 };
 
 enum iwl_rx_mpdu_amsdu_info {
-    IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x7f,
-    IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
+  IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x7f,
+  IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
 };
 
 enum iwl_rx_l3_proto_values {
-    IWL_RX_L3_TYPE_NONE,
-    IWL_RX_L3_TYPE_IPV4,
-    IWL_RX_L3_TYPE_IPV4_FRAG,
-    IWL_RX_L3_TYPE_IPV6_FRAG,
-    IWL_RX_L3_TYPE_IPV6,
-    IWL_RX_L3_TYPE_IPV6_IN_IPV4,
-    IWL_RX_L3_TYPE_ARP,
-    IWL_RX_L3_TYPE_EAPOL,
+  IWL_RX_L3_TYPE_NONE,
+  IWL_RX_L3_TYPE_IPV4,
+  IWL_RX_L3_TYPE_IPV4_FRAG,
+  IWL_RX_L3_TYPE_IPV6_FRAG,
+  IWL_RX_L3_TYPE_IPV6,
+  IWL_RX_L3_TYPE_IPV6_IN_IPV4,
+  IWL_RX_L3_TYPE_ARP,
+  IWL_RX_L3_TYPE_EAPOL,
 };
 
 #define IWL_RX_L3_PROTO_POS 4
 
 enum iwl_rx_l3l4_flags {
-    IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0),
-    IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1),
-    IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2),
-    IWL_RX_L3L4_TCP_ACK = BIT(3),
-    IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS,
-    IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8,
-    IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12,
+  IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0),
+  IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1),
+  IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2),
+  IWL_RX_L3L4_TCP_ACK = BIT(3),
+  IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS,
+  IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8,
+  IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12,
 };
 
 enum iwl_rx_mpdu_status {
-    IWL_RX_MPDU_STATUS_CRC_OK = BIT(0),
-    IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1),
-    IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2),
-    IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3),
-    IWL_RX_MPDU_STATUS_KEY_PARAM_OK = BIT(4),
-    IWL_RX_MPDU_STATUS_ICV_OK = BIT(5),
-    IWL_RX_MPDU_STATUS_MIC_OK = BIT(6),
-    IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
-    IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8,
-    IWL_RX_MPDU_STATUS_SEC_UNKNOWN = IWL_RX_MPDU_STATUS_SEC_MASK,
-    IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8,
-    IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8,
-    IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8,
-    IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8,
-    IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8,
-    IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8,
-    IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11),
-    IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12),
-    IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13),
-    IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14),
-    IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15),
+  IWL_RX_MPDU_STATUS_CRC_OK = BIT(0),
+  IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1),
+  IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2),
+  IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3),
+  IWL_RX_MPDU_STATUS_KEY_PARAM_OK = BIT(4),
+  IWL_RX_MPDU_STATUS_ICV_OK = BIT(5),
+  IWL_RX_MPDU_STATUS_MIC_OK = BIT(6),
+  IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
+  IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8,
+  IWL_RX_MPDU_STATUS_SEC_UNKNOWN = IWL_RX_MPDU_STATUS_SEC_MASK,
+  IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8,
+  IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8,
+  IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8,
+  IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8,
+  IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8,
+  IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8,
+  IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11),
+  IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12),
+  IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13),
+  IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14),
+  IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15),
 };
 
 enum iwl_rx_mpdu_hash_filter {
-    IWL_RX_MPDU_HF_A1_HASH_MASK = 0x3f,
-    IWL_RX_MPDU_HF_FILTER_STATUS_MASK = 0xc0,
+  IWL_RX_MPDU_HF_A1_HASH_MASK = 0x3f,
+  IWL_RX_MPDU_HF_FILTER_STATUS_MASK = 0xc0,
 };
 
 enum iwl_rx_mpdu_sta_id_flags {
-    IWL_RX_MPDU_SIF_STA_ID_MASK = 0x1f,
-    IWL_RX_MPDU_SIF_RRF_ABORT = 0x20,
-    IWL_RX_MPDU_SIF_FILTER_STATUS_MASK = 0xc0,
+  IWL_RX_MPDU_SIF_STA_ID_MASK = 0x1f,
+  IWL_RX_MPDU_SIF_RRF_ABORT = 0x20,
+  IWL_RX_MPDU_SIF_FILTER_STATUS_MASK = 0xc0,
 };
 
 #define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f
 
 enum iwl_rx_mpdu_reorder_data {
-    IWL_RX_MPDU_REORDER_NSSN_MASK = 0x00000fff,
-    IWL_RX_MPDU_REORDER_SN_MASK = 0x00fff000,
-    IWL_RX_MPDU_REORDER_SN_SHIFT = 12,
-    IWL_RX_MPDU_REORDER_BAID_MASK = 0x7f000000,
-    IWL_RX_MPDU_REORDER_BAID_SHIFT = 24,
-    IWL_RX_MPDU_REORDER_BA_OLD_SN = 0x80000000,
+  IWL_RX_MPDU_REORDER_NSSN_MASK = 0x00000fff,
+  IWL_RX_MPDU_REORDER_SN_MASK = 0x00fff000,
+  IWL_RX_MPDU_REORDER_SN_SHIFT = 12,
+  IWL_RX_MPDU_REORDER_BAID_MASK = 0x7f000000,
+  IWL_RX_MPDU_REORDER_BAID_SHIFT = 24,
+  IWL_RX_MPDU_REORDER_BA_OLD_SN = 0x80000000,
 };
 
 enum iwl_rx_mpdu_phy_info {
-    IWL_RX_MPDU_PHY_8023 = BIT(0),
-    IWL_RX_MPDU_PHY_AMPDU = BIT(5),
-    IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6),
-    IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7),
-    IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8),
+  IWL_RX_MPDU_PHY_8023 = BIT(0),
+  IWL_RX_MPDU_PHY_AMPDU = BIT(5),
+  IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6),
+  IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7),
+  IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8),
 };
 
 enum iwl_rx_mpdu_mac_info {
-    IWL_RX_MPDU_PHY_MAC_INDEX_MASK = 0x0f,
-    IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
+  IWL_RX_MPDU_PHY_MAC_INDEX_MASK = 0x0f,
+  IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
 };
 
 /* TSF overload low dword */
 enum iwl_rx_phy_data0 {
-    /* info type: HE any */
-    IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 0x00000001,
-    IWL_RX_PHY_DATA0_HE_UPLINK = 0x00000002,
-    IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK = 0x000000fc,
-    IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK = 0x00000f00,
-    /* 1 bit reserved */
-    IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK = 0x000fe000,
-    IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM = 0x00100000,
-    IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK = 0x00600000,
-    IWL_RX_PHY_DATA0_HE_PE_DISAMBIG = 0x00800000,
-    IWL_RX_PHY_DATA0_HE_DOPPLER = 0x01000000,
-    /* 6 bits reserved */
-    IWL_RX_PHY_DATA0_HE_DELIM_EOF = 0x80000000,
+  /* info type: HE any */
+  IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 0x00000001,
+  IWL_RX_PHY_DATA0_HE_UPLINK = 0x00000002,
+  IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK = 0x000000fc,
+  IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK = 0x00000f00,
+  /* 1 bit reserved */
+  IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK = 0x000fe000,
+  IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM = 0x00100000,
+  IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK = 0x00600000,
+  IWL_RX_PHY_DATA0_HE_PE_DISAMBIG = 0x00800000,
+  IWL_RX_PHY_DATA0_HE_DOPPLER = 0x01000000,
+  /* 6 bits reserved */
+  IWL_RX_PHY_DATA0_HE_DELIM_EOF = 0x80000000,
 };
 
 enum iwl_rx_phy_info_type {
-    IWL_RX_PHY_INFO_TYPE_NONE = 0,
-    IWL_RX_PHY_INFO_TYPE_CCK = 1,
-    IWL_RX_PHY_INFO_TYPE_OFDM_LGCY = 2,
-    IWL_RX_PHY_INFO_TYPE_HT = 3,
-    IWL_RX_PHY_INFO_TYPE_VHT_SU = 4,
-    IWL_RX_PHY_INFO_TYPE_VHT_MU = 5,
-    IWL_RX_PHY_INFO_TYPE_HE_SU = 6,
-    IWL_RX_PHY_INFO_TYPE_HE_MU = 7,
-    IWL_RX_PHY_INFO_TYPE_HE_TB = 8,
-    IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9,
-    IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10,
+  IWL_RX_PHY_INFO_TYPE_NONE = 0,
+  IWL_RX_PHY_INFO_TYPE_CCK = 1,
+  IWL_RX_PHY_INFO_TYPE_OFDM_LGCY = 2,
+  IWL_RX_PHY_INFO_TYPE_HT = 3,
+  IWL_RX_PHY_INFO_TYPE_VHT_SU = 4,
+  IWL_RX_PHY_INFO_TYPE_VHT_MU = 5,
+  IWL_RX_PHY_INFO_TYPE_HE_SU = 6,
+  IWL_RX_PHY_INFO_TYPE_HE_MU = 7,
+  IWL_RX_PHY_INFO_TYPE_HE_TB = 8,
+  IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9,
+  IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10,
 };
 
 /* TSF overload high dword */
 enum iwl_rx_phy_data1 {
-    /*
-     * check this first - if TSF overload is set,
-     * see &enum iwl_rx_phy_info_type
-     */
-    IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 0xf0000000,
+  /*
+   * check this first - if TSF overload is set,
+   * see &enum iwl_rx_phy_info_type
+   */
+  IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 0xf0000000,
 
-    /* info type: HT/VHT/HE any */
-    IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 0x0fff0000,
+  /* info type: HT/VHT/HE any */
+  IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 0x0fff0000,
 
-    /* info type: HE MU/MU-EXT */
-    IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 0x00000001,
-    IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x0000001e,
+  /* info type: HE MU/MU-EXT */
+  IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 0x00000001,
+  IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x0000001e,
 
-    /* info type: HE any */
-    IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK = 0x000000e0,
-    IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80 = 0x00000100,
-    /* trigger encoded */
-    IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK = 0x0000fe00,
+  /* info type: HE any */
+  IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK = 0x000000e0,
+  IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80 = 0x00000100,
+  /* trigger encoded */
+  IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK = 0x0000fe00,
 
-    /* info type: HE TB/TX-EXT */
-    IWL_RX_PHY_DATA1_HE_TB_PILOT_TYPE = 0x00000001,
-    IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 0x0000000e,
+  /* info type: HE TB/TX-EXT */
+  IWL_RX_PHY_DATA1_HE_TB_PILOT_TYPE = 0x00000001,
+  IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 0x0000000e,
 };
 
 /* goes into Metadata DW 7 */
 enum iwl_rx_phy_data2 {
-    /* info type: HE MU-EXT */
-    /* the a1/a2/... is what the PHY/firmware calls the values */
-    IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 0x000000ff, /* a1 */
-    IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2 = 0x0000ff00, /* a2 */
-    IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0 = 0x00ff0000, /* b1 */
-    IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2 = 0xff000000, /* b2 */
+  /* info type: HE MU-EXT */
+  /* the a1/a2/... is what the PHY/firmware calls the values */
+  IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 0x000000ff, /* a1 */
+  IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2 = 0x0000ff00, /* a2 */
+  IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0 = 0x00ff0000, /* b1 */
+  IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2 = 0xff000000, /* b2 */
 
-    /* info type: HE TB-EXT */
-    IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1 = 0x0000000f,
-    IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2 = 0x000000f0,
-    IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3 = 0x00000f00,
-    IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 0x0000f000,
+  /* info type: HE TB-EXT */
+  IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1 = 0x0000000f,
+  IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2 = 0x000000f0,
+  IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3 = 0x00000f00,
+  IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 0x0000f000,
 };
 
 /* goes into Metadata DW 8 */
 enum iwl_rx_phy_data3 {
-    /* info type: HE MU-EXT */
-    IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */
-    IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 0x0000ff00, /* c2 */
-    IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1 = 0x00ff0000, /* d1 */
-    IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3 = 0xff000000, /* d2 */
+  /* info type: HE MU-EXT */
+  IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */
+  IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 0x0000ff00, /* c2 */
+  IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1 = 0x00ff0000, /* d1 */
+  IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3 = 0xff000000, /* d2 */
 };
 
 /* goes into Metadata DW 4 high 16 bits */
 enum iwl_rx_phy_data4 {
-    /* info type: HE MU-EXT */
-    IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 0x0001,
-    IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 0x0002,
-    IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK = 0x0004,
-    IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK = 0x0008,
-    IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK = 0x00f0,
-    IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM = 0x0100,
-    IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600,
+  /* info type: HE MU-EXT */
+  IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 0x0001,
+  IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 0x0002,
+  IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK = 0x0004,
+  IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK = 0x0008,
+  IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK = 0x00f0,
+  IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM = 0x0100,
+  IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600,
 };
 
 /**
  * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
  */
 struct iwl_rx_mpdu_desc_v1 {
-    /* DW7 - carries rss_hash only when rpa_en == 1 */
-    union {
-        /**
-         * @rss_hash: RSS hash value
-         */
-        __le32 rss_hash;
+  /* DW7 - carries rss_hash only when rpa_en == 1 */
+  union {
+    /**
+     * @rss_hash: RSS hash value
+     */
+    __le32 rss_hash;
 
-        /**
-         * @phy_data2: depends on info type (see @phy_data1)
-         */
-        __le32 phy_data2;
+    /**
+     * @phy_data2: depends on info type (see @phy_data1)
+     */
+    __le32 phy_data2;
+  };
+
+  /* DW8 - carries filter_match only when rpa_en == 1 */
+  union {
+    /**
+     * @filter_match: filter match value
+     */
+    __le32 filter_match;
+
+    /**
+     * @phy_data3: depends on info type (see @phy_data1)
+     */
+    __le32 phy_data3;
+  };
+
+  /* DW9 */
+  /**
+   * @rate_n_flags: RX rate/flags encoding
+   */
+  __le32 rate_n_flags;
+  /* DW10 */
+  /**
+   * @energy_a: energy chain A
+   */
+  uint8_t energy_a;
+  /**
+   * @energy_b: energy chain B
+   */
+  uint8_t energy_b;
+  /**
+   * @channel: channel number
+   */
+  uint8_t channel;
+  /**
+   * @mac_context: MAC context mask
+   */
+  uint8_t mac_context;
+  /* DW11 */
+  /**
+   * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+   */
+  __le32 gp2_on_air_rise;
+  /* DW12 & DW13 */
+  union {
+    /**
+     * @tsf_on_air_rise:
+     * TSF value on air rise (INA), only valid if
+     * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+     */
+    __le64 tsf_on_air_rise;
+
+    struct {
+      /**
+       * @phy_data0: depends on info_type, see @phy_data1
+       */
+      __le32 phy_data0;
+      /**
+       * @phy_data1: valid only if
+       * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
+       * see &enum iwl_rx_phy_data1.
+       */
+      __le32 phy_data1;
     };
-
-    /* DW8 - carries filter_match only when rpa_en == 1 */
-    union {
-        /**
-         * @filter_match: filter match value
-         */
-        __le32 filter_match;
-
-        /**
-         * @phy_data3: depends on info type (see @phy_data1)
-         */
-        __le32 phy_data3;
-    };
-
-    /* DW9 */
-    /**
-     * @rate_n_flags: RX rate/flags encoding
-     */
-    __le32 rate_n_flags;
-    /* DW10 */
-    /**
-     * @energy_a: energy chain A
-     */
-    uint8_t energy_a;
-    /**
-     * @energy_b: energy chain B
-     */
-    uint8_t energy_b;
-    /**
-     * @channel: channel number
-     */
-    uint8_t channel;
-    /**
-     * @mac_context: MAC context mask
-     */
-    uint8_t mac_context;
-    /* DW11 */
-    /**
-     * @gp2_on_air_rise: GP2 timer value on air rise (INA)
-     */
-    __le32 gp2_on_air_rise;
-    /* DW12 & DW13 */
-    union {
-        /**
-         * @tsf_on_air_rise:
-         * TSF value on air rise (INA), only valid if
-         * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
-         */
-        __le64 tsf_on_air_rise;
-
-        struct {
-            /**
-             * @phy_data0: depends on info_type, see @phy_data1
-             */
-            __le32 phy_data0;
-            /**
-             * @phy_data1: valid only if
-             * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
-             * see &enum iwl_rx_phy_data1.
-             */
-            __le32 phy_data1;
-        };
-    };
+  };
 } __packed;
 
 /**
  * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
  */
 struct iwl_rx_mpdu_desc_v3 {
-    /* DW7 - carries filter_match only when rpa_en == 1 */
-    union {
-        /**
-         * @filter_match: filter match value
-         */
-        __le32 filter_match;
+  /* DW7 - carries filter_match only when rpa_en == 1 */
+  union {
+    /**
+     * @filter_match: filter match value
+     */
+    __le32 filter_match;
 
-        /**
-         * @phy_data2: depends on info type (see @phy_data1)
-         */
-        __le32 phy_data2;
+    /**
+     * @phy_data2: depends on info type (see @phy_data1)
+     */
+    __le32 phy_data2;
+  };
+
+  /* DW8 - carries rss_hash only when rpa_en == 1 */
+  union {
+    /**
+     * @rss_hash: RSS hash value
+     */
+    __le32 rss_hash;
+
+    /**
+     * @phy_data3: depends on info type (see @phy_data1)
+     */
+    __le32 phy_data3;
+  };
+  /* DW9 */
+  /**
+   * @partial_hash: 31:0 ip/tcp header hash
+   *  w/o some fields (such as IP SRC addr)
+   */
+  __le32 partial_hash;
+  /* DW10 */
+  /**
+   * @raw_xsum: raw xsum value
+   */
+  __le32 raw_xsum;
+  /* DW11 */
+  /**
+   * @rate_n_flags: RX rate/flags encoding
+   */
+  __le32 rate_n_flags;
+  /* DW12 */
+  /**
+   * @energy_a: energy chain A
+   */
+  uint8_t energy_a;
+  /**
+   * @energy_b: energy chain B
+   */
+  uint8_t energy_b;
+  /**
+   * @channel: channel number
+   */
+  uint8_t channel;
+  /**
+   * @mac_context: MAC context mask
+   */
+  uint8_t mac_context;
+  /* DW13 */
+  /**
+   * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+   */
+  __le32 gp2_on_air_rise;
+  /* DW14 & DW15 */
+  union {
+    /**
+     * @tsf_on_air_rise:
+     * TSF value on air rise (INA), only valid if
+     * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+     */
+    __le64 tsf_on_air_rise;
+
+    struct {
+      /**
+       * @phy_data0: depends on info_type, see @phy_data1
+       */
+      __le32 phy_data0;
+      /**
+       * @phy_data1: valid only if
+       * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
+       * see &enum iwl_rx_phy_data1.
+       */
+      __le32 phy_data1;
     };
-
-    /* DW8 - carries rss_hash only when rpa_en == 1 */
-    union {
-        /**
-         * @rss_hash: RSS hash value
-         */
-        __le32 rss_hash;
-
-        /**
-         * @phy_data3: depends on info type (see @phy_data1)
-         */
-        __le32 phy_data3;
-    };
-    /* DW9 */
-    /**
-     * @partial_hash: 31:0 ip/tcp header hash
-     *  w/o some fields (such as IP SRC addr)
-     */
-    __le32 partial_hash;
-    /* DW10 */
-    /**
-     * @raw_xsum: raw xsum value
-     */
-    __le32 raw_xsum;
-    /* DW11 */
-    /**
-     * @rate_n_flags: RX rate/flags encoding
-     */
-    __le32 rate_n_flags;
-    /* DW12 */
-    /**
-     * @energy_a: energy chain A
-     */
-    uint8_t energy_a;
-    /**
-     * @energy_b: energy chain B
-     */
-    uint8_t energy_b;
-    /**
-     * @channel: channel number
-     */
-    uint8_t channel;
-    /**
-     * @mac_context: MAC context mask
-     */
-    uint8_t mac_context;
-    /* DW13 */
-    /**
-     * @gp2_on_air_rise: GP2 timer value on air rise (INA)
-     */
-    __le32 gp2_on_air_rise;
-    /* DW14 & DW15 */
-    union {
-        /**
-         * @tsf_on_air_rise:
-         * TSF value on air rise (INA), only valid if
-         * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
-         */
-        __le64 tsf_on_air_rise;
-
-        struct {
-            /**
-             * @phy_data0: depends on info_type, see @phy_data1
-             */
-            __le32 phy_data0;
-            /**
-             * @phy_data1: valid only if
-             * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
-             * see &enum iwl_rx_phy_data1.
-             */
-            __le32 phy_data1;
-        };
-    };
-    /* DW16 & DW17 */
-    /**
-     * @reserved: reserved
-     */
-    __le32 reserved[2];
+  };
+  /* DW16 & DW17 */
+  /**
+   * @reserved: reserved
+   */
+  __le32 reserved[2];
 } __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
 
 /**
  * struct iwl_rx_mpdu_desc - RX MPDU descriptor
  */
 struct iwl_rx_mpdu_desc {
-    /* DW2 */
-    /**
-     * @mpdu_len: MPDU length
-     */
-    __le16 mpdu_len;
-    /**
-     * @mac_flags1: &enum iwl_rx_mpdu_mac_flags1
-     */
-    uint8_t mac_flags1;
-    /**
-     * @mac_flags2: &enum iwl_rx_mpdu_mac_flags2
-     */
-    uint8_t mac_flags2;
-    /* DW3 */
-    /**
-     * @amsdu_info: &enum iwl_rx_mpdu_amsdu_info
-     */
-    uint8_t amsdu_info;
-    /**
-     * @phy_info: &enum iwl_rx_mpdu_phy_info
-     */
-    __le16 phy_info;
-    /**
-     * @mac_phy_idx: MAC/PHY index
-     */
-    uint8_t mac_phy_idx;
-    /* DW4 - carries csum data only when rpa_en == 1 */
-    /**
-     * @raw_csum: raw checksum (alledgedly unreliable)
-     */
-    __le16 raw_csum;
+  /* DW2 */
+  /**
+   * @mpdu_len: MPDU length
+   */
+  __le16 mpdu_len;
+  /**
+   * @mac_flags1: &enum iwl_rx_mpdu_mac_flags1
+   */
+  uint8_t mac_flags1;
+  /**
+   * @mac_flags2: &enum iwl_rx_mpdu_mac_flags2
+   */
+  uint8_t mac_flags2;
+  /* DW3 */
+  /**
+   * @amsdu_info: &enum iwl_rx_mpdu_amsdu_info
+   */
+  uint8_t amsdu_info;
+  /**
+   * @phy_info: &enum iwl_rx_mpdu_phy_info
+   */
+  __le16 phy_info;
+  /**
+   * @mac_phy_idx: MAC/PHY index
+   */
+  uint8_t mac_phy_idx;
+  /* DW4 - carries csum data only when rpa_en == 1 */
+  /**
+   * @raw_csum: raw checksum (alledgedly unreliable)
+   */
+  __le16 raw_csum;
 
-    union {
-        /**
-         * @l3l4_flags: &enum iwl_rx_l3l4_flags
-         */
-        __le16 l3l4_flags;
+  union {
+    /**
+     * @l3l4_flags: &enum iwl_rx_l3l4_flags
+     */
+    __le16 l3l4_flags;
 
-        /**
-         * @phy_data4: depends on info type, see phy_data1
-         */
-        __le16 phy_data4;
-    };
-    /* DW5 */
     /**
-     * @status: &enum iwl_rx_mpdu_status
+     * @phy_data4: depends on info type, see phy_data1
      */
-    __le16 status;
-    /**
-     * @hash_filter: hash filter value
-     */
-    uint8_t hash_filter;
-    /**
-     * @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags
-     */
-    uint8_t sta_id_flags;
-    /* DW6 */
-    /**
-     * @reorder_data: &enum iwl_rx_mpdu_reorder_data
-     */
-    __le32 reorder_data;
+    __le16 phy_data4;
+  };
+  /* DW5 */
+  /**
+   * @status: &enum iwl_rx_mpdu_status
+   */
+  __le16 status;
+  /**
+   * @hash_filter: hash filter value
+   */
+  uint8_t hash_filter;
+  /**
+   * @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags
+   */
+  uint8_t sta_id_flags;
+  /* DW6 */
+  /**
+   * @reorder_data: &enum iwl_rx_mpdu_reorder_data
+   */
+  __le32 reorder_data;
 
-    union {
-        struct iwl_rx_mpdu_desc_v1 v1;
-        struct iwl_rx_mpdu_desc_v3 v3;
-    };
+  union {
+    struct iwl_rx_mpdu_desc_v1 v1;
+    struct iwl_rx_mpdu_desc_v3 v3;
+  };
 } __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
 
 #define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
@@ -709,13 +709,13 @@
  *  for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT
  */
 struct iwl_rx_no_data {
-    __le32 info;
-    __le32 rssi;
-    __le32 on_air_rise_time;
-    __le32 fr_time;
-    __le32 rate;
-    __le32 phy_info[2];
-    __le32 rx_vec[2];
+  __le32 info;
+  __le32 rssi;
+  __le32 on_air_rise_time;
+  __le32 fr_time;
+  __le32 rate;
+  __le32 phy_info[2];
+  __le32 rx_vec[2];
 } __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */
 
 /**
@@ -730,12 +730,12 @@
  * @IWL_CD_STTS_ERROR: general error (RX)
  */
 enum iwl_completion_desc_transfer_status {
-    IWL_CD_STTS_UNUSED,
-    IWL_CD_STTS_UNUSED_2,
-    IWL_CD_STTS_END_TRANSFER,
-    IWL_CD_STTS_OVERFLOW,
-    IWL_CD_STTS_ABORTED,
-    IWL_CD_STTS_ERROR,
+  IWL_CD_STTS_UNUSED,
+  IWL_CD_STTS_UNUSED_2,
+  IWL_CD_STTS_END_TRANSFER,
+  IWL_CD_STTS_OVERFLOW,
+  IWL_CD_STTS_ABORTED,
+  IWL_CD_STTS_ERROR,
 };
 
 /**
@@ -757,36 +757,36 @@
  * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
  */
 enum iwl_completion_desc_wifi_status {
-    IWL_CD_STTS_VALID,
-    IWL_CD_STTS_FCS_ERR,
-    IWL_CD_STTS_SEC_KEY_ERR,
-    IWL_CD_STTS_DECRYPTION_ERR,
-    IWL_CD_STTS_DUP,
-    IWL_CD_STTS_ICV_MIC_ERR,
-    IWL_CD_STTS_INTERNAL_SNAP_ERR,
-    IWL_CD_STTS_SEC_PORT_FAIL,
-    IWL_CD_STTS_BA_OLD_SN,
-    IWL_CD_STTS_QOS_NULL,
-    IWL_CD_STTS_MAC_HDR_ERR,
-    IWL_CD_STTS_MAX_RETRANS,
-    IWL_CD_STTS_EX_LIFETIME,
-    IWL_CD_STTS_NOT_USED,
-    IWL_CD_STTS_REPLAY_ERR,
+  IWL_CD_STTS_VALID,
+  IWL_CD_STTS_FCS_ERR,
+  IWL_CD_STTS_SEC_KEY_ERR,
+  IWL_CD_STTS_DECRYPTION_ERR,
+  IWL_CD_STTS_DUP,
+  IWL_CD_STTS_ICV_MIC_ERR,
+  IWL_CD_STTS_INTERNAL_SNAP_ERR,
+  IWL_CD_STTS_SEC_PORT_FAIL,
+  IWL_CD_STTS_BA_OLD_SN,
+  IWL_CD_STTS_QOS_NULL,
+  IWL_CD_STTS_MAC_HDR_ERR,
+  IWL_CD_STTS_MAX_RETRANS,
+  IWL_CD_STTS_EX_LIFETIME,
+  IWL_CD_STTS_NOT_USED,
+  IWL_CD_STTS_REPLAY_ERR,
 };
 
 struct iwl_frame_release {
-    uint8_t baid;
-    uint8_t reserved;
-    __le16 nssn;
+  uint8_t baid;
+  uint8_t reserved;
+  __le16 nssn;
 };
 
 enum iwl_rss_hash_func_en {
-    IWL_RSS_HASH_TYPE_IPV4_TCP,
-    IWL_RSS_HASH_TYPE_IPV4_UDP,
-    IWL_RSS_HASH_TYPE_IPV4_PAYLOAD,
-    IWL_RSS_HASH_TYPE_IPV6_TCP,
-    IWL_RSS_HASH_TYPE_IPV6_UDP,
-    IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+  IWL_RSS_HASH_TYPE_IPV4_TCP,
+  IWL_RSS_HASH_TYPE_IPV4_UDP,
+  IWL_RSS_HASH_TYPE_IPV4_PAYLOAD,
+  IWL_RSS_HASH_TYPE_IPV6_TCP,
+  IWL_RSS_HASH_TYPE_IPV6_UDP,
+  IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
 };
 
 #define IWL_RSS_HASH_KEY_CNT 10
@@ -803,11 +803,11 @@
  * @indirection_table: indirection table
  */
 struct iwl_rss_config_cmd {
-    __le32 flags;
-    uint8_t hash_mask;
-    uint8_t reserved[3];
-    __le32 secret_key[IWL_RSS_HASH_KEY_CNT];
-    uint8_t indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
+  __le32 flags;
+  uint8_t hash_mask;
+  uint8_t reserved[3];
+  __le32 secret_key[IWL_RSS_HASH_KEY_CNT];
+  uint8_t indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
 } __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
 
 #define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128
@@ -823,10 +823,10 @@
  * @payload: data to send to rx queues
  */
 struct iwl_rxq_sync_cmd {
-    __le32 flags;
-    __le32 rxq_mask;
-    __le32 count;
-    uint8_t payload[];
+  __le32 flags;
+  __le32 rxq_mask;
+  __le32 count;
+  uint8_t payload[];
 } __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
 
 /**
@@ -837,8 +837,8 @@
  * @payload: data to send to rx queues
  */
 struct iwl_rxq_sync_notification {
-    __le32 count;
-    uint8_t payload[];
+  __le32 count;
+  uint8_t payload[];
 } __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
 
 /**
@@ -848,8 +848,8 @@
  * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
  */
 enum iwl_mvm_rxq_notif_type {
-    IWL_MVM_RXQ_EMPTY,
-    IWL_MVM_RXQ_NOTIF_DEL_BA,
+  IWL_MVM_RXQ_EMPTY,
+  IWL_MVM_RXQ_NOTIF_DEL_BA,
 };
 
 /**
@@ -863,10 +863,10 @@
  * @data: payload
  */
 struct iwl_mvm_internal_rxq_notif {
-    uint16_t type;
-    uint16_t sync;
-    uint32_t cookie;
-    uint8_t data[0];
+  uint16_t type;
+  uint16_t sync;
+  uint32_t cookie;
+  uint8_t data[0];
 } __packed;
 
 /**
@@ -877,10 +877,10 @@
  * @IWL_MVM_PM_EVENT_PS_POLL: station sent PS-Poll
  */
 enum iwl_mvm_pm_event {
-    IWL_MVM_PM_EVENT_AWAKE,
-    IWL_MVM_PM_EVENT_ASLEEP,
-    IWL_MVM_PM_EVENT_UAPSD,
-    IWL_MVM_PM_EVENT_PS_POLL,
+  IWL_MVM_PM_EVENT_AWAKE,
+  IWL_MVM_PM_EVENT_ASLEEP,
+  IWL_MVM_PM_EVENT_UAPSD,
+  IWL_MVM_PM_EVENT_PS_POLL,
 }; /* PEER_PM_NTFY_API_E_VER_1 */
 
 /**
@@ -889,10 +889,10 @@
  * @type: the new powersave state, see &enum iwl_mvm_pm_event
  */
 struct iwl_mvm_pm_state_notification {
-    uint8_t sta_id;
-    uint8_t type;
-    /* private: */
-    __le16 reserved;
+  uint8_t sta_id;
+  uint8_t type;
+  /* private: */
+  __le16 reserved;
 } __packed; /* PEER_PM_NTFY_API_S_VER_1 */
 
 #define BA_WINDOW_STREAMS_MAX 16
@@ -909,10 +909,10 @@
  * @mpdu_rx_count: the number of received MPDUs since entering D0i3
  */
 struct iwl_ba_window_status_notif {
-    __le64 bitmap[BA_WINDOW_STREAMS_MAX];
-    __le16 ra_tid[BA_WINDOW_STREAMS_MAX];
-    __le32 start_seq_num[BA_WINDOW_STREAMS_MAX];
-    __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
+  __le64 bitmap[BA_WINDOW_STREAMS_MAX];
+  __le16 ra_tid[BA_WINDOW_STREAMS_MAX];
+  __le32 start_seq_num[BA_WINDOW_STREAMS_MAX];
+  __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
 } __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
 
 /**
@@ -926,13 +926,13 @@
  * @fr_bd_wid: Initial index of the free table
  */
 struct iwl_rfh_queue_data {
-    uint8_t q_num;
-    uint8_t enable;
-    __le16 reserved;
-    __le64 urbd_stts_wrptr;
-    __le64 fr_bd_cb;
-    __le64 ur_bd_cb;
-    __le32 fr_bd_wid;
+  uint8_t q_num;
+  uint8_t enable;
+  __le16 reserved;
+  __le64 urbd_stts_wrptr;
+  __le64 fr_bd_cb;
+  __le64 ur_bd_cb;
+  __le32 fr_bd_wid;
 } __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */
 
 /**
@@ -942,9 +942,9 @@
  * @data: DMA addresses per-queue
  */
 struct iwl_rfh_queue_config {
-    uint8_t num_queues;
-    uint8_t reserved[3];
-    struct iwl_rfh_queue_data data[];
+  uint8_t num_queues;
+  uint8_t reserved[3];
+  struct iwl_rfh_queue_data data[];
 } __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_RX_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/scan.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/scan.h
index 0bc6a12..ab94864 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/scan.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/scan.h
@@ -59,9 +59,9 @@
  * @ssid: element (SSID) data
  */
 struct iwl_ssid_ie {
-    uint8_t id;
-    uint8_t len;
-    uint8_t ssid[IEEE80211_MAX_SSID_LEN];
+  uint8_t id;
+  uint8_t len;
+  uint8_t ssid[IEEE80211_MAX_SSID_LEN];
 } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
 
 /* scan offload */
@@ -82,9 +82,9 @@
 #define IWL_SCAN_MAX_NUM_OF_CHANNELS 52
 
 enum scan_framework_client {
-    SCAN_CLIENT_SCHED_SCAN = BIT(0),
-    SCAN_CLIENT_NETDETECT = BIT(1),
-    SCAN_CLIENT_ASSET_TRACKING = BIT(2),
+  SCAN_CLIENT_SCHED_SCAN = BIT(0),
+  SCAN_CLIENT_NETDETECT = BIT(1),
+  SCAN_CLIENT_ASSET_TRACKING = BIT(2),
 };
 
 /**
@@ -94,21 +94,21 @@
  * @client_bitmap: clients ignore this entry  - enum scan_framework_client
  */
 struct iwl_scan_offload_blacklist {
-    uint8_t ssid[ETH_ALEN];
-    uint8_t reported_rssi;
-    uint8_t client_bitmap;
+  uint8_t ssid[ETH_ALEN];
+  uint8_t reported_rssi;
+  uint8_t client_bitmap;
 } __packed;
 
 enum iwl_scan_offload_network_type {
-    IWL_NETWORK_TYPE_BSS = 1,
-    IWL_NETWORK_TYPE_IBSS = 2,
-    IWL_NETWORK_TYPE_ANY = 3,
+  IWL_NETWORK_TYPE_BSS = 1,
+  IWL_NETWORK_TYPE_IBSS = 2,
+  IWL_NETWORK_TYPE_ANY = 3,
 };
 
 enum iwl_scan_offload_band_selection {
-    IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4,
-    IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8,
-    IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc,
+  IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4,
+  IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8,
+  IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc,
 };
 
 /**
@@ -122,13 +122,13 @@
  * @reserved:       reserved
  */
 struct iwl_scan_offload_profile {
-    uint8_t ssid_index;
-    uint8_t unicast_cipher;
-    uint8_t auth_alg;
-    uint8_t network_type;
-    uint8_t band_selection;
-    uint8_t client_bitmap;
-    uint8_t reserved[2];
+  uint8_t ssid_index;
+  uint8_t unicast_cipher;
+  uint8_t auth_alg;
+  uint8_t network_type;
+  uint8_t band_selection;
+  uint8_t client_bitmap;
+  uint8_t reserved[2];
 } __packed;
 
 /**
@@ -143,14 +143,14 @@
  * @reserved:       reserved
  */
 struct iwl_scan_offload_profile_cfg {
-    struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
-    uint8_t blacklist_len;
-    uint8_t num_profiles;
-    uint8_t match_notify;
-    uint8_t pass_match;
-    uint8_t active_clients;
-    uint8_t any_beacon_notify;
-    uint8_t reserved[2];
+  struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
+  uint8_t blacklist_len;
+  uint8_t num_profiles;
+  uint8_t match_notify;
+  uint8_t pass_match;
+  uint8_t active_clients;
+  uint8_t any_beacon_notify;
+  uint8_t reserved[2];
 } __packed;
 
 /**
@@ -160,21 +160,21 @@
  * @full_scan_mul:  number of partial scans before each full scan
  */
 struct iwl_scan_schedule_lmac {
-    __le16 delay;
-    uint8_t iterations;
-    uint8_t full_scan_mul;
+  __le16 delay;
+  uint8_t iterations;
+  uint8_t full_scan_mul;
 } __packed; /* SCAN_SCHEDULE_API_S */
 
 enum iwl_scan_offload_complete_status {
-    IWL_SCAN_OFFLOAD_COMPLETED = 1,
-    IWL_SCAN_OFFLOAD_ABORTED = 2,
+  IWL_SCAN_OFFLOAD_COMPLETED = 1,
+  IWL_SCAN_OFFLOAD_ABORTED = 2,
 };
 
 enum iwl_scan_ebs_status {
-    IWL_SCAN_EBS_SUCCESS,
-    IWL_SCAN_EBS_FAILED,
-    IWL_SCAN_EBS_CHAN_NOT_FOUND,
-    IWL_SCAN_EBS_INACTIVE,
+  IWL_SCAN_EBS_SUCCESS,
+  IWL_SCAN_EBS_FAILED,
+  IWL_SCAN_EBS_CHAN_NOT_FOUND,
+  IWL_SCAN_EBS_INACTIVE,
 };
 
 /**
@@ -186,15 +186,15 @@
  * @reserved: for alignment and future use
  */
 struct iwl_scan_req_tx_cmd {
-    __le32 tx_flags;
-    __le32 rate_n_flags;
-    uint8_t sta_id;
-    uint8_t reserved[3];
+  __le32 tx_flags;
+  __le32 rate_n_flags;
+  uint8_t sta_id;
+  uint8_t reserved[3];
 } __packed;
 
 enum iwl_scan_channel_flags_lmac {
-    IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27),
-    IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28),
+  IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27),
+  IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28),
 };
 
 /**
@@ -206,10 +206,10 @@
  * @iter_interval:  interval in seconds between iterations on one channel
  */
 struct iwl_scan_channel_cfg_lmac {
-    __le32 flags;
-    __le16 channel_num;
-    __le16 iter_count;
-    __le32 iter_interval;
+  __le32 flags;
+  __le16 channel_num;
+  __le16 iter_count;
+  __le32 iter_interval;
 } __packed;
 
 /*
@@ -218,8 +218,8 @@
  * @len: length of the segment
  */
 struct iwl_scan_probe_segment {
-    __le16 offset;
-    __le16 len;
+  __le16 offset;
+  __le16 len;
 } __packed;
 
 /* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
@@ -229,17 +229,17 @@
  * @buf: raw data block
  */
 struct iwl_scan_probe_req {
-    struct iwl_scan_probe_segment mac_header;
-    struct iwl_scan_probe_segment band_data[2];
-    struct iwl_scan_probe_segment common_data;
-    uint8_t buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
+  struct iwl_scan_probe_segment mac_header;
+  struct iwl_scan_probe_segment band_data[2];
+  struct iwl_scan_probe_segment common_data;
+  uint8_t buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
 } __packed;
 
 enum iwl_scan_channel_flags {
-    IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0),
-    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1),
-    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
-    IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = BIT(3),
+  IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0),
+  IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1),
+  IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
+  IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = BIT(3),
 };
 
 /* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
@@ -250,8 +250,8 @@
  *  2 - every second scan will be full scan(and so on).
  */
 struct iwl_scan_channel_opt {
-    __le16 flags;
-    __le16 non_ebs_ratio;
+  __le16 flags;
+  __le16 non_ebs_ratio;
 } __packed;
 
 /**
@@ -270,32 +270,32 @@
  * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
  */
 enum iwl_mvm_lmac_scan_flags {
-    IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
-    IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1),
-    IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2),
-    IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3),
-    IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
-    IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
-    IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
-    IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL = BIT(7),
-    IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
+  IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
+  IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1),
+  IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2),
+  IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3),
+  IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
+  IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
+  IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
+  IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL = BIT(7),
+  IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
 };
 
 enum iwl_scan_priority {
-    IWL_SCAN_PRIORITY_LOW,
-    IWL_SCAN_PRIORITY_MEDIUM,
-    IWL_SCAN_PRIORITY_HIGH,
+  IWL_SCAN_PRIORITY_LOW,
+  IWL_SCAN_PRIORITY_MEDIUM,
+  IWL_SCAN_PRIORITY_HIGH,
 };
 
 enum iwl_scan_priority_ext {
-    IWL_SCAN_PRIORITY_EXT_0_LOWEST,
-    IWL_SCAN_PRIORITY_EXT_1,
-    IWL_SCAN_PRIORITY_EXT_2,
-    IWL_SCAN_PRIORITY_EXT_3,
-    IWL_SCAN_PRIORITY_EXT_4,
-    IWL_SCAN_PRIORITY_EXT_5,
-    IWL_SCAN_PRIORITY_EXT_6,
-    IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
+  IWL_SCAN_PRIORITY_EXT_0_LOWEST,
+  IWL_SCAN_PRIORITY_EXT_1,
+  IWL_SCAN_PRIORITY_EXT_2,
+  IWL_SCAN_PRIORITY_EXT_3,
+  IWL_SCAN_PRIORITY_EXT_4,
+  IWL_SCAN_PRIORITY_EXT_5,
+  IWL_SCAN_PRIORITY_EXT_6,
+  IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
 };
 
 /**
@@ -324,30 +324,30 @@
  * @data: channel configuration and probe request packet.
  */
 struct iwl_scan_req_lmac {
-    /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
-    __le32 reserved1;
-    uint8_t n_channels;
-    uint8_t active_dwell;
-    uint8_t passive_dwell;
-    uint8_t fragmented_dwell;
-    uint8_t extended_dwell;
-    uint8_t reserved2;
-    __le16 rx_chain_select;
-    __le32 scan_flags;
-    __le32 max_out_time;
-    __le32 suspend_time;
-    /* RX_ON_FLAGS_API_S_VER_1 */
-    __le32 flags;
-    __le32 filter_flags;
-    struct iwl_scan_req_tx_cmd tx_cmd[2];
-    struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
-    __le32 scan_prio;
-    /* SCAN_REQ_PERIODIC_PARAMS_API_S */
-    __le32 iter_num;
-    __le32 delay;
-    struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS];
-    struct iwl_scan_channel_opt channel_opt[2];
-    uint8_t data[];
+  /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
+  __le32 reserved1;
+  uint8_t n_channels;
+  uint8_t active_dwell;
+  uint8_t passive_dwell;
+  uint8_t fragmented_dwell;
+  uint8_t extended_dwell;
+  uint8_t reserved2;
+  __le16 rx_chain_select;
+  __le32 scan_flags;
+  __le32 max_out_time;
+  __le32 suspend_time;
+  /* RX_ON_FLAGS_API_S_VER_1 */
+  __le32 flags;
+  __le32 filter_flags;
+  struct iwl_scan_req_tx_cmd tx_cmd[2];
+  struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+  __le32 scan_prio;
+  /* SCAN_REQ_PERIODIC_PARAMS_API_S */
+  __le32 iter_num;
+  __le32 delay;
+  struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS];
+  struct iwl_scan_channel_opt channel_opt[2];
+  uint8_t data[];
 } __packed;
 
 /**
@@ -360,11 +360,11 @@
  * @duration: duration spent in channel, in usecs
  */
 struct iwl_scan_results_notif {
-    uint8_t channel;
-    uint8_t band;
-    uint8_t probe_status;
-    uint8_t num_probe_not_sent;
-    __le32 duration;
+  uint8_t channel;
+  uint8_t band;
+  uint8_t probe_status;
+  uint8_t num_probe_not_sent;
+  __le32 duration;
 } __packed;
 
 /**
@@ -379,13 +379,13 @@
  * @results: an array of scan results, only "scanned_channels" of them are valid
  */
 struct iwl_lmac_scan_complete_notif {
-    uint8_t scanned_channels;
-    uint8_t status;
-    uint8_t bt_status;
-    uint8_t last_channel;
-    __le32 tsf_low;
-    __le32 tsf_high;
-    struct iwl_scan_results_notif results[];
+  uint8_t scanned_channels;
+  uint8_t status;
+  uint8_t bt_status;
+  uint8_t last_channel;
+  __le32 tsf_low;
+  __le32 tsf_high;
+  struct iwl_scan_results_notif results[];
 } __packed;
 
 /**
@@ -398,12 +398,12 @@
  * @reserved: reserved
  */
 struct iwl_periodic_scan_complete {
-    uint8_t last_schedule_line;
-    uint8_t last_schedule_iteration;
-    uint8_t status;
-    uint8_t ebs_status;
-    __le32 time_after_last_iter;
-    __le32 reserved;
+  uint8_t last_schedule_line;
+  uint8_t last_schedule_iteration;
+  uint8_t status;
+  uint8_t ebs_status;
+  __le32 time_after_last_iter;
+  __le32 reserved;
 } __packed;
 
 /* UMAC Scan API */
@@ -415,56 +415,56 @@
 #define IWL_MVM_MAX_LMAC_SCANS 1
 
 enum scan_config_flags {
-    SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
-    SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1),
-    SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2),
-    SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3),
-    SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8),
-    SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9),
-    SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10),
-    SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11),
-    SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12),
-    SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13),
-    SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14),
-    SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15),
-    SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16),
-    SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17),
-    SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18),
-    SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
-    SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
-    SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
-    SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22),
-    SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23),
+  SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
+  SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1),
+  SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2),
+  SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3),
+  SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8),
+  SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9),
+  SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10),
+  SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11),
+  SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12),
+  SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13),
+  SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14),
+  SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15),
+  SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16),
+  SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17),
+  SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18),
+  SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
+  SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
+  SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
+  SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22),
+  SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23),
 
 /* Bits 26-31 are for num of channels in channel_array */
 #define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
 };
 
 enum scan_config_rates {
-    /* OFDM basic rates */
-    SCAN_CONFIG_RATE_6M = BIT(0),
-    SCAN_CONFIG_RATE_9M = BIT(1),
-    SCAN_CONFIG_RATE_12M = BIT(2),
-    SCAN_CONFIG_RATE_18M = BIT(3),
-    SCAN_CONFIG_RATE_24M = BIT(4),
-    SCAN_CONFIG_RATE_36M = BIT(5),
-    SCAN_CONFIG_RATE_48M = BIT(6),
-    SCAN_CONFIG_RATE_54M = BIT(7),
-    /* CCK basic rates */
-    SCAN_CONFIG_RATE_1M = BIT(8),
-    SCAN_CONFIG_RATE_2M = BIT(9),
-    SCAN_CONFIG_RATE_5M = BIT(10),
-    SCAN_CONFIG_RATE_11M = BIT(11),
+  /* OFDM basic rates */
+  SCAN_CONFIG_RATE_6M = BIT(0),
+  SCAN_CONFIG_RATE_9M = BIT(1),
+  SCAN_CONFIG_RATE_12M = BIT(2),
+  SCAN_CONFIG_RATE_18M = BIT(3),
+  SCAN_CONFIG_RATE_24M = BIT(4),
+  SCAN_CONFIG_RATE_36M = BIT(5),
+  SCAN_CONFIG_RATE_48M = BIT(6),
+  SCAN_CONFIG_RATE_54M = BIT(7),
+  /* CCK basic rates */
+  SCAN_CONFIG_RATE_1M = BIT(8),
+  SCAN_CONFIG_RATE_2M = BIT(9),
+  SCAN_CONFIG_RATE_5M = BIT(10),
+  SCAN_CONFIG_RATE_11M = BIT(11),
 
 /* Bits 16-27 are for supported rates */
 #define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
 };
 
 enum iwl_channel_flags {
-    IWL_CHANNEL_FLAG_EBS = BIT(0),
-    IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1),
-    IWL_CHANNEL_FLAG_EBS_ADD = BIT(2),
-    IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3),
+  IWL_CHANNEL_FLAG_EBS = BIT(0),
+  IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1),
+  IWL_CHANNEL_FLAG_EBS_ADD = BIT(2),
+  IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3),
 };
 
 /**
@@ -475,10 +475,10 @@
  * @extended:       default dwell time for channels 1, 6 and 11
  */
 struct iwl_scan_dwell {
-    uint8_t active;
-    uint8_t passive;
-    uint8_t fragmented;
-    uint8_t extended;
+  uint8_t active;
+  uint8_t passive;
+  uint8_t fragmented;
+  uint8_t extended;
 } __packed;
 
 /**
@@ -497,17 +497,17 @@
  * @channel_array:      default supported channels
  */
 struct iwl_scan_config_v1 {
-    __le32 flags;
-    __le32 tx_chains;
-    __le32 rx_chains;
-    __le32 legacy_rates;
-    __le32 out_of_channel_time;
-    __le32 suspend_time;
-    struct iwl_scan_dwell dwell;
-    uint8_t mac_addr[ETH_ALEN];
-    uint8_t bcast_sta_id;
-    uint8_t channel_flags;
-    uint8_t channel_array[];
+  __le32 flags;
+  __le32 tx_chains;
+  __le32 rx_chains;
+  __le32 legacy_rates;
+  __le32 out_of_channel_time;
+  __le32 suspend_time;
+  struct iwl_scan_dwell dwell;
+  uint8_t mac_addr[ETH_ALEN];
+  uint8_t bcast_sta_id;
+  uint8_t channel_flags;
+  uint8_t channel_array[];
 } __packed; /* SCAN_CONFIG_DB_CMD_API_S */
 
 #define SCAN_TWO_LMACS 2
@@ -515,17 +515,17 @@
 #define SCAN_HB_LMAC_IDX 1
 
 struct iwl_scan_config {
-    __le32 flags;
-    __le32 tx_chains;
-    __le32 rx_chains;
-    __le32 legacy_rates;
-    __le32 out_of_channel_time[SCAN_TWO_LMACS];
-    __le32 suspend_time[SCAN_TWO_LMACS];
-    struct iwl_scan_dwell dwell;
-    uint8_t mac_addr[ETH_ALEN];
-    uint8_t bcast_sta_id;
-    uint8_t channel_flags;
-    uint8_t channel_array[];
+  __le32 flags;
+  __le32 tx_chains;
+  __le32 rx_chains;
+  __le32 legacy_rates;
+  __le32 out_of_channel_time[SCAN_TWO_LMACS];
+  __le32 suspend_time[SCAN_TWO_LMACS];
+  struct iwl_scan_dwell dwell;
+  uint8_t mac_addr[ETH_ALEN];
+  uint8_t bcast_sta_id;
+  uint8_t channel_flags;
+  uint8_t channel_array[];
 } __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
 
 /**
@@ -538,35 +538,35 @@
  *  when scan starts.
  */
 enum iwl_umac_scan_flags {
-    IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0),
-    IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1),
+  IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0),
+  IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1),
 };
 
 enum iwl_umac_scan_uid_offsets {
-    IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0,
-    IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8,
+  IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0,
+  IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8,
 };
 
 enum iwl_umac_scan_general_flags {
-    IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
-    IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
-    IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
-    IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
-    IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
-    IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
-    IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
-    IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
-    IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
-    IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
-    IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
-    /* Extended dwell is obselete when adaptive dwell is used, making this
-     * bit reusable. Hence, probe request defer is used only when adaptive
-     * dwell is supported. */
-    IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP = BIT(10),
-    IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
-    IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
-    IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME = BIT(14),
-    IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE = BIT(15),
+  IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
+  IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
+  IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
+  IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
+  IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
+  IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
+  IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
+  IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
+  IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
+  IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
+  IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
+  /* Extended dwell is obselete when adaptive dwell is used, making this
+   * bit reusable. Hence, probe request defer is used only when adaptive
+   * dwell is supported. */
+  IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP = BIT(10),
+  IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
+  IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
+  IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME = BIT(14),
+  IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE = BIT(15),
 };
 
 /**
@@ -577,8 +577,8 @@
  *  reorder optimization or not.
  */
 enum iwl_umac_scan_general_flags2 {
-    IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
-    IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1),
+  IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
+  IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1),
 };
 
 /**
@@ -589,10 +589,10 @@
  * @iter_interval:  interval between two scan iterations on one channel.
  */
 struct iwl_scan_channel_cfg_umac {
-    __le32 flags;
-    uint8_t channel_num;
-    uint8_t iter_count;
-    __le16 iter_interval;
+  __le32 flags;
+  uint8_t channel_num;
+  uint8_t iter_count;
+  __le16 iter_interval;
 } __packed; /* SCAN_CHANNEL_CFG_S_VER2 */
 
 /**
@@ -602,9 +602,9 @@
  * @reserved: for alignment and future use
  */
 struct iwl_scan_umac_schedule {
-    __le16 interval;
-    uint8_t iter_count;
-    uint8_t reserved;
+  __le16 interval;
+  uint8_t iter_count;
+  uint8_t reserved;
 } __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
 
 /**
@@ -617,13 +617,13 @@
  * @direct_scan: list of SSIDs for directed active scan
  */
 struct iwl_scan_req_umac_tail {
-    /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
-    struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
-    __le16 delay;
-    __le16 reserved;
-    /* SCAN_PROBE_PARAMS_API_S_VER_1 */
-    struct iwl_scan_probe_req preq;
-    struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+  /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+  struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
+  __le16 delay;
+  __le16 reserved;
+  /* SCAN_PROBE_PARAMS_API_S_VER_1 */
+  struct iwl_scan_probe_req preq;
+  struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
 } __packed;
 
 /**
@@ -633,9 +633,9 @@
  * @reserved: for future use and alignment
  */
 struct iwl_scan_umac_chan_param {
-    uint8_t flags;
-    uint8_t count;
-    __le16 reserved;
+  uint8_t flags;
+  uint8_t count;
+  __le16 reserved;
 } __packed; /*SCAN_CHANNEL_PARAMS_API_S_VER_1 */
 
 /**
@@ -669,65 +669,65 @@
  *  &struct iwl_scan_req_umac_tail
  */
 struct iwl_scan_req_umac {
-    __le32 flags;
-    __le32 uid;
-    __le32 ooc_priority;
-    __le16 general_flags;
-    uint8_t reserved;
-    uint8_t scan_start_mac_id;
-    union {
-        struct {
-            uint8_t extended_dwell;
-            uint8_t active_dwell;
-            uint8_t passive_dwell;
-            uint8_t fragmented_dwell;
-            __le32 max_out_time;
-            __le32 suspend_time;
-            __le32 scan_priority;
-            struct iwl_scan_umac_chan_param channel;
-            uint8_t data[];
-        } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
-        struct {
-            uint8_t extended_dwell;
-            uint8_t active_dwell;
-            uint8_t passive_dwell;
-            uint8_t fragmented_dwell;
-            __le32 max_out_time[SCAN_TWO_LMACS];
-            __le32 suspend_time[SCAN_TWO_LMACS];
-            __le32 scan_priority;
-            struct iwl_scan_umac_chan_param channel;
-            uint8_t data[];
-        } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
-        struct {
-            uint8_t active_dwell;
-            uint8_t passive_dwell;
-            uint8_t fragmented_dwell;
-            uint8_t adwell_default_n_aps;
-            uint8_t adwell_default_n_aps_social;
-            uint8_t reserved3;
-            __le16 adwell_max_budget;
-            __le32 max_out_time[SCAN_TWO_LMACS];
-            __le32 suspend_time[SCAN_TWO_LMACS];
-            __le32 scan_priority;
-            struct iwl_scan_umac_chan_param channel;
-            uint8_t data[];
-        } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
-        struct {
-            uint8_t active_dwell[SCAN_TWO_LMACS];
-            uint8_t reserved2;
-            uint8_t adwell_default_n_aps;
-            uint8_t adwell_default_n_aps_social;
-            uint8_t general_flags2;
-            __le16 adwell_max_budget;
-            __le32 max_out_time[SCAN_TWO_LMACS];
-            __le32 suspend_time[SCAN_TWO_LMACS];
-            __le32 scan_priority;
-            uint8_t passive_dwell[SCAN_TWO_LMACS];
-            uint8_t num_of_fragments[SCAN_TWO_LMACS];
-            struct iwl_scan_umac_chan_param channel;
-            uint8_t data[];
-        } v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */
-    };
+  __le32 flags;
+  __le32 uid;
+  __le32 ooc_priority;
+  __le16 general_flags;
+  uint8_t reserved;
+  uint8_t scan_start_mac_id;
+  union {
+    struct {
+      uint8_t extended_dwell;
+      uint8_t active_dwell;
+      uint8_t passive_dwell;
+      uint8_t fragmented_dwell;
+      __le32 max_out_time;
+      __le32 suspend_time;
+      __le32 scan_priority;
+      struct iwl_scan_umac_chan_param channel;
+      uint8_t data[];
+    } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+    struct {
+      uint8_t extended_dwell;
+      uint8_t active_dwell;
+      uint8_t passive_dwell;
+      uint8_t fragmented_dwell;
+      __le32 max_out_time[SCAN_TWO_LMACS];
+      __le32 suspend_time[SCAN_TWO_LMACS];
+      __le32 scan_priority;
+      struct iwl_scan_umac_chan_param channel;
+      uint8_t data[];
+    } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+    struct {
+      uint8_t active_dwell;
+      uint8_t passive_dwell;
+      uint8_t fragmented_dwell;
+      uint8_t adwell_default_n_aps;
+      uint8_t adwell_default_n_aps_social;
+      uint8_t reserved3;
+      __le16 adwell_max_budget;
+      __le32 max_out_time[SCAN_TWO_LMACS];
+      __le32 suspend_time[SCAN_TWO_LMACS];
+      __le32 scan_priority;
+      struct iwl_scan_umac_chan_param channel;
+      uint8_t data[];
+    } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
+    struct {
+      uint8_t active_dwell[SCAN_TWO_LMACS];
+      uint8_t reserved2;
+      uint8_t adwell_default_n_aps;
+      uint8_t adwell_default_n_aps_social;
+      uint8_t general_flags2;
+      __le16 adwell_max_budget;
+      __le32 max_out_time[SCAN_TWO_LMACS];
+      __le32 suspend_time[SCAN_TWO_LMACS];
+      __le32 scan_priority;
+      uint8_t passive_dwell[SCAN_TWO_LMACS];
+      uint8_t num_of_fragments[SCAN_TWO_LMACS];
+      struct iwl_scan_umac_chan_param channel;
+      uint8_t data[];
+    } v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */
+  };
 } __packed;
 
 #define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac)
@@ -741,8 +741,8 @@
  * @flags: reserved
  */
 struct iwl_umac_scan_abort {
-    __le32 uid;
-    __le32 flags;
+  __le32 uid;
+  __le32 flags;
 } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
 
 /**
@@ -756,13 +756,13 @@
  * @reserved: for future use
  */
 struct iwl_umac_scan_complete {
-    __le32 uid;
-    uint8_t last_schedule;
-    uint8_t last_iter;
-    uint8_t status;
-    uint8_t ebs_status;
-    __le32 time_from_last_iter;
-    __le32 reserved;
+  __le32 uid;
+  uint8_t last_schedule;
+  uint8_t last_iter;
+  uint8_t status;
+  uint8_t ebs_status;
+  __le32 time_from_last_iter;
+  __le32 reserved;
 } __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
 
 #define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
@@ -777,12 +777,12 @@
  *  the channels passed in tue scan offload request
  */
 struct iwl_scan_offload_profile_match {
-    uint8_t bssid[ETH_ALEN];
-    __le16 reserved;
-    uint8_t channel;
-    uint8_t energy;
-    uint8_t matching_feature;
-    uint8_t matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
+  uint8_t bssid[ETH_ALEN];
+  __le16 reserved;
+  uint8_t channel;
+  uint8_t energy;
+  uint8_t matching_feature;
+  uint8_t matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
 } __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
 
 /**
@@ -799,15 +799,15 @@
  * @matches: array of match information, one for each match
  */
 struct iwl_scan_offload_profiles_query {
-    __le32 matched_profiles;
-    __le32 last_scan_age;
-    __le32 n_scans_done;
-    __le32 gp2_d0u;
-    __le32 gp2_invoked;
-    uint8_t resume_while_scanning;
-    uint8_t self_recovery;
-    __le16 reserved;
-    struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+  __le32 matched_profiles;
+  __le32 last_scan_age;
+  __le32 n_scans_done;
+  __le32 gp2_d0u;
+  __le32 gp2_invoked;
+  uint8_t resume_while_scanning;
+  uint8_t self_recovery;
+  __le16 reserved;
+  struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
 } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
 
 /**
@@ -823,13 +823,13 @@
  * @results: array of scan results, length in @scanned_channels
  */
 struct iwl_umac_scan_iter_complete_notif {
-    __le32 uid;
-    uint8_t scanned_channels;
-    uint8_t status;
-    uint8_t bt_status;
-    uint8_t last_channel;
-    __le64 start_tsf;
-    struct iwl_scan_results_notif results[];
+  __le32 uid;
+  uint8_t scanned_channels;
+  uint8_t status;
+  uint8_t bt_status;
+  uint8_t last_channel;
+  __le64 start_tsf;
+  struct iwl_scan_results_notif results[];
 } __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_SCAN_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/sf.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/sf.h
index c5cfde0f..d73f9db 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/sf.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/sf.h
@@ -38,21 +38,21 @@
 
 /* Smart Fifo state */
 enum iwl_sf_state {
-    SF_LONG_DELAY_ON = 0, /* should never be called by driver */
-    SF_FULL_ON,
-    SF_UNINIT,
-    SF_INIT_OFF,
-    SF_HW_NUM_STATES
+  SF_LONG_DELAY_ON = 0, /* should never be called by driver */
+  SF_FULL_ON,
+  SF_UNINIT,
+  SF_INIT_OFF,
+  SF_HW_NUM_STATES
 };
 
 /* Smart Fifo possible scenario */
 enum iwl_sf_scenario {
-    SF_SCENARIO_SINGLE_UNICAST,
-    SF_SCENARIO_AGG_UNICAST,
-    SF_SCENARIO_MULTICAST,
-    SF_SCENARIO_BA_RESP,
-    SF_SCENARIO_TX_RESP,
-    SF_NUM_SCENARIO
+  SF_SCENARIO_SINGLE_UNICAST,
+  SF_SCENARIO_AGG_UNICAST,
+  SF_SCENARIO_MULTICAST,
+  SF_SCENARIO_BA_RESP,
+  SF_SCENARIO_TX_RESP,
+  SF_NUM_SCENARIO
 };
 
 #define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */
@@ -102,10 +102,10 @@
  * @full_on_timeouts: timer values for each scenario in full on state.
  */
 struct iwl_sf_cfg_cmd {
-    __le32 state;
-    __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
-    __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
-    __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+  __le32 state;
+  __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
+  __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+  __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
 } __packed; /* SF_CFG_API_S_VER_2 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_SF_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/soc.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/soc.h
index e0ee0d8..0542cb9 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/soc.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/soc.h
@@ -38,8 +38,8 @@
 
 /* type of devices for defining SOC latency */
 enum iwl_soc_device_types {
-    SOC_CONFIG_CMD_INTEGRATED = 0x0,
-    SOC_CONFIG_CMD_DISCRETE = 0x1,
+  SOC_CONFIG_CMD_INTEGRATED = 0x0,
+  SOC_CONFIG_CMD_DISCRETE = 0x1,
 };
 
 /**
@@ -49,8 +49,8 @@
  * @soc_latency: time for SOC to ensure stable power & XTAL
  */
 struct iwl_soc_configuration_cmd {
-    __le32 device_type;
-    __le32 soc_latency;
+  __le32 device_type;
+  __le32 soc_latency;
 } __packed; /* SOC_CONFIGURATION_CMD_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_SOC_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/sta.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/sta.h
index 839d282..118461a 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/sta.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/sta.h
@@ -79,46 +79,46 @@
  * @STA_FLG_AGG_MPDU_DENS_16US: A-MPDU density (16 usec gap)
  */
 enum iwl_sta_flags {
-    STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3),
-    STA_FLG_REDUCED_TX_PWR_DATA = BIT(6),
+  STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3),
+  STA_FLG_REDUCED_TX_PWR_DATA = BIT(6),
 
-    STA_FLG_DISABLE_TX = BIT(4),
+  STA_FLG_DISABLE_TX = BIT(4),
 
-    STA_FLG_PS = BIT(8),
-    STA_FLG_DRAIN_FLOW = BIT(12),
-    STA_FLG_PAN = BIT(13),
-    STA_FLG_CLASS_AUTH = BIT(14),
-    STA_FLG_CLASS_ASSOC = BIT(15),
-    STA_FLG_RTS_MIMO_PROT = BIT(17),
+  STA_FLG_PS = BIT(8),
+  STA_FLG_DRAIN_FLOW = BIT(12),
+  STA_FLG_PAN = BIT(13),
+  STA_FLG_CLASS_AUTH = BIT(14),
+  STA_FLG_CLASS_ASSOC = BIT(15),
+  STA_FLG_RTS_MIMO_PROT = BIT(17),
 
-    STA_FLG_MAX_AGG_SIZE_SHIFT = 19,
-    STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT),
-    STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT),
-    STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT),
-    STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT),
-    STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT),
-    STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
-    STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
-    STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
-    STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+  STA_FLG_MAX_AGG_SIZE_SHIFT = 19,
+  STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+  STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+  STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+  STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+  STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+  STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+  STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+  STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+  STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
 
-    STA_FLG_AGG_MPDU_DENS_SHIFT = 23,
-    STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
-    STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT),
-    STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT),
-    STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
-    STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+  STA_FLG_AGG_MPDU_DENS_SHIFT = 23,
+  STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+  STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+  STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+  STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+  STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
 
-    STA_FLG_FAT_EN_20MHZ = (0 << 26),
-    STA_FLG_FAT_EN_40MHZ = (1 << 26),
-    STA_FLG_FAT_EN_80MHZ = (2 << 26),
-    STA_FLG_FAT_EN_160MHZ = (3 << 26),
-    STA_FLG_FAT_EN_MSK = (3 << 26),
+  STA_FLG_FAT_EN_20MHZ = (0 << 26),
+  STA_FLG_FAT_EN_40MHZ = (1 << 26),
+  STA_FLG_FAT_EN_80MHZ = (2 << 26),
+  STA_FLG_FAT_EN_160MHZ = (3 << 26),
+  STA_FLG_FAT_EN_MSK = (3 << 26),
 
-    STA_FLG_MIMO_EN_SISO = (0 << 28),
-    STA_FLG_MIMO_EN_MIMO2 = (1 << 28),
-    STA_FLG_MIMO_EN_MIMO3 = (2 << 28),
-    STA_FLG_MIMO_EN_MSK = (3 << 28),
+  STA_FLG_MIMO_EN_SISO = (0 << 28),
+  STA_FLG_MIMO_EN_MIMO2 = (1 << 28),
+  STA_FLG_MIMO_EN_MIMO3 = (2 << 28),
+  STA_FLG_MIMO_EN_MSK = (3 << 28),
 };
 
 /**
@@ -143,24 +143,24 @@
  * @STA_KEY_MFP: key is used for Management Frame Protection
  */
 enum iwl_sta_key_flag {
-    STA_KEY_FLG_NO_ENC = (0 << 0),
-    STA_KEY_FLG_WEP = (1 << 0),
-    STA_KEY_FLG_CCM = (2 << 0),
-    STA_KEY_FLG_TKIP = (3 << 0),
-    STA_KEY_FLG_EXT = (4 << 0),
-    STA_KEY_FLG_GCMP = (5 << 0),
-    STA_KEY_FLG_CMAC = (6 << 0),
-    STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
-    STA_KEY_FLG_EN_MSK = (7 << 0),
+  STA_KEY_FLG_NO_ENC = (0 << 0),
+  STA_KEY_FLG_WEP = (1 << 0),
+  STA_KEY_FLG_CCM = (2 << 0),
+  STA_KEY_FLG_TKIP = (3 << 0),
+  STA_KEY_FLG_EXT = (4 << 0),
+  STA_KEY_FLG_GCMP = (5 << 0),
+  STA_KEY_FLG_CMAC = (6 << 0),
+  STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
+  STA_KEY_FLG_EN_MSK = (7 << 0),
 
-    STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
-    STA_KEY_FLG_KEYID_POS = 8,
-    STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
-    STA_KEY_NOT_VALID = BIT(11),
-    STA_KEY_FLG_WEP_13BYTES = BIT(12),
-    STA_KEY_FLG_KEY_32BYTES = BIT(12),
-    STA_KEY_MULTICAST = BIT(14),
-    STA_KEY_MFP = BIT(15),
+  STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
+  STA_KEY_FLG_KEYID_POS = 8,
+  STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
+  STA_KEY_NOT_VALID = BIT(11),
+  STA_KEY_FLG_WEP_13BYTES = BIT(12),
+  STA_KEY_FLG_KEY_32BYTES = BIT(12),
+  STA_KEY_MULTICAST = BIT(14),
+  STA_KEY_MFP = BIT(15),
 };
 
 /**
@@ -175,14 +175,14 @@
  * @STA_MODIFY_QUEUES: modify the queues used by this station
  */
 enum iwl_sta_modify_flag {
-    STA_MODIFY_QUEUE_REMOVAL = BIT(0),
-    STA_MODIFY_TID_DISABLE_TX = BIT(1),
-    STA_MODIFY_UAPSD_ACS = BIT(2),
-    STA_MODIFY_ADD_BA_TID = BIT(3),
-    STA_MODIFY_REMOVE_BA_TID = BIT(4),
-    STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5),
-    STA_MODIFY_PROT_TH = BIT(6),
-    STA_MODIFY_QUEUES = BIT(7),
+  STA_MODIFY_QUEUE_REMOVAL = BIT(0),
+  STA_MODIFY_TID_DISABLE_TX = BIT(1),
+  STA_MODIFY_UAPSD_ACS = BIT(2),
+  STA_MODIFY_ADD_BA_TID = BIT(3),
+  STA_MODIFY_REMOVE_BA_TID = BIT(4),
+  STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5),
+  STA_MODIFY_PROT_TH = BIT(6),
+  STA_MODIFY_QUEUES = BIT(7),
 };
 
 /**
@@ -191,8 +191,8 @@
  * @STA_MODE_MODIFY: modify the station
  */
 enum iwl_sta_mode {
-    STA_MODE_ADD = 0,
-    STA_MODE_MODIFY = 1,
+  STA_MODE_ADD = 0,
+  STA_MODE_MODIFY = 1,
 };
 
 /**
@@ -204,10 +204,10 @@
  *  (last) released frame
  */
 enum iwl_sta_sleep_flag {
-    STA_SLEEP_STATE_AWAKE = 0,
-    STA_SLEEP_STATE_PS_POLL = BIT(0),
-    STA_SLEEP_STATE_UAPSD = BIT(1),
-    STA_SLEEP_STATE_MOREDATA = BIT(2),
+  STA_SLEEP_STATE_AWAKE = 0,
+  STA_SLEEP_STATE_PS_POLL = BIT(0),
+  STA_SLEEP_STATE_UAPSD = BIT(1),
+  STA_SLEEP_STATE_MOREDATA = BIT(2),
 };
 
 #define STA_KEY_MAX_NUM (16)
@@ -231,16 +231,16 @@
  * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
  */
 struct iwl_mvm_keyinfo {
-    __le16 key_flags;
-    uint8_t tkip_rx_tsc_byte2;
-    uint8_t reserved1;
-    __le16 tkip_rx_ttak[5];
-    uint8_t key_offset;
-    uint8_t reserved2;
-    uint8_t key[16];
-    __le64 tx_secur_seq_cnt;
-    __le64 hw_tkip_mic_rx_key;
-    __le64 hw_tkip_mic_tx_key;
+  __le16 key_flags;
+  uint8_t tkip_rx_tsc_byte2;
+  uint8_t reserved1;
+  __le16 tkip_rx_ttak[5];
+  uint8_t key_offset;
+  uint8_t reserved2;
+  uint8_t key[16];
+  __le64 tx_secur_seq_cnt;
+  __le64 hw_tkip_mic_rx_key;
+  __le64 hw_tkip_mic_tx_key;
 } __packed;
 
 #define IWL_ADD_STA_STATUS_MASK 0xFF
@@ -290,25 +290,25 @@
  * entry, or modifying a pre-existing one.
  */
 struct iwl_mvm_add_sta_cmd_v7 {
-    uint8_t add_modify;
-    uint8_t awake_acs;
-    __le16 tid_disable_tx;
-    __le32 mac_id_n_color;
-    uint8_t addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
-    __le16 reserved2;
-    uint8_t sta_id;
-    uint8_t modify_mask;
-    __le16 reserved3;
-    __le32 station_flags;
-    __le32 station_flags_msk;
-    uint8_t add_immediate_ba_tid;
-    uint8_t remove_immediate_ba_tid;
-    __le16 add_immediate_ba_ssn;
-    __le16 sleep_tx_count;
-    __le16 sleep_state_flags;
-    __le16 assoc_id;
-    __le16 beamform_flags;
-    __le32 tfd_queue_msk;
+  uint8_t add_modify;
+  uint8_t awake_acs;
+  __le16 tid_disable_tx;
+  __le32 mac_id_n_color;
+  uint8_t addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+  __le16 reserved2;
+  uint8_t sta_id;
+  uint8_t modify_mask;
+  __le16 reserved3;
+  __le32 station_flags;
+  __le32 station_flags_msk;
+  uint8_t add_immediate_ba_tid;
+  uint8_t remove_immediate_ba_tid;
+  __le16 add_immediate_ba_ssn;
+  __le16 sleep_tx_count;
+  __le16 sleep_state_flags;
+  __le16 assoc_id;
+  __le16 beamform_flags;
+  __le32 tfd_queue_msk;
 } __packed; /* ADD_STA_CMD_API_S_VER_7 */
 
 /**
@@ -322,11 +322,11 @@
  * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on).
  */
 enum iwl_sta_type {
-    IWL_STA_LINK,
-    IWL_STA_GENERAL_PURPOSE,
-    IWL_STA_MULTICAST,
-    IWL_STA_TDLS_LINK,
-    IWL_STA_AUX_ACTIVITY,
+  IWL_STA_LINK,
+  IWL_STA_GENERAL_PURPOSE,
+  IWL_STA_MULTICAST,
+  IWL_STA_TDLS_LINK,
+  IWL_STA_AUX_ACTIVITY,
 };
 
 /**
@@ -377,29 +377,29 @@
  * entry, or modifying a pre-existing one.
  */
 struct iwl_mvm_add_sta_cmd {
-    uint8_t add_modify;
-    uint8_t awake_acs;
-    __le16 tid_disable_tx;
-    __le32 mac_id_n_color;
-    uint8_t addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
-    __le16 reserved2;
-    uint8_t sta_id;
-    uint8_t modify_mask;
-    __le16 reserved3;
-    __le32 station_flags;
-    __le32 station_flags_msk;
-    uint8_t add_immediate_ba_tid;
-    uint8_t remove_immediate_ba_tid;
-    __le16 add_immediate_ba_ssn;
-    __le16 sleep_tx_count;
-    uint8_t sleep_state_flags;
-    uint8_t station_type;
-    __le16 assoc_id;
-    __le16 beamform_flags;
-    __le32 tfd_queue_msk;
-    __le16 rx_ba_window;
-    uint8_t sp_length;
-    uint8_t uapsd_acs;
+  uint8_t add_modify;
+  uint8_t awake_acs;
+  __le16 tid_disable_tx;
+  __le32 mac_id_n_color;
+  uint8_t addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+  __le16 reserved2;
+  uint8_t sta_id;
+  uint8_t modify_mask;
+  __le16 reserved3;
+  __le32 station_flags;
+  __le32 station_flags_msk;
+  uint8_t add_immediate_ba_tid;
+  uint8_t remove_immediate_ba_tid;
+  __le16 add_immediate_ba_ssn;
+  __le16 sleep_tx_count;
+  uint8_t sleep_state_flags;
+  uint8_t station_type;
+  __le16 assoc_id;
+  __le16 beamform_flags;
+  __le32 tfd_queue_msk;
+  __le16 rx_ba_window;
+  uint8_t sp_length;
+  uint8_t uapsd_acs;
 } __packed; /* ADD_STA_CMD_API_S_VER_10 */
 
 /**
@@ -412,11 +412,11 @@
  * @rx_secur_seq_cnt: RX security sequence counter for the key
  */
 struct iwl_mvm_add_sta_key_common {
-    uint8_t sta_id;
-    uint8_t key_offset;
-    __le16 key_flags;
-    uint8_t key[32];
-    uint8_t rx_secur_seq_cnt[16];
+  uint8_t sta_id;
+  uint8_t key_offset;
+  __le16 key_flags;
+  uint8_t key[32];
+  uint8_t rx_secur_seq_cnt[16];
 } __packed;
 
 /**
@@ -427,10 +427,10 @@
  * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
  */
 struct iwl_mvm_add_sta_key_cmd_v1 {
-    struct iwl_mvm_add_sta_key_common common;
-    uint8_t tkip_rx_tsc_byte2;
-    uint8_t reserved;
-    __le16 tkip_rx_ttak[5];
+  struct iwl_mvm_add_sta_key_common common;
+  uint8_t tkip_rx_tsc_byte2;
+  uint8_t reserved;
+  __le16 tkip_rx_ttak[5];
 } __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
 
 /**
@@ -441,10 +441,10 @@
  * @transmit_seq_cnt: TSC, transmit packet number
  */
 struct iwl_mvm_add_sta_key_cmd {
-    struct iwl_mvm_add_sta_key_common common;
-    __le64 rx_mic_key;
-    __le64 tx_mic_key;
-    __le64 transmit_seq_cnt;
+  struct iwl_mvm_add_sta_key_common common;
+  __le64 rx_mic_key;
+  __le64 tx_mic_key;
+  __le64 transmit_seq_cnt;
 } __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */
 
 /**
@@ -456,10 +456,10 @@
  *  doesn't exist.
  */
 enum iwl_mvm_add_sta_rsp_status {
-    ADD_STA_SUCCESS = 0x1,
-    ADD_STA_STATIONS_OVERLOAD = 0x2,
-    ADD_STA_IMMEDIATE_BA_FAILURE = 0x4,
-    ADD_STA_MODIFY_NON_EXISTING_STA = 0x8,
+  ADD_STA_SUCCESS = 0x1,
+  ADD_STA_STATIONS_OVERLOAD = 0x2,
+  ADD_STA_IMMEDIATE_BA_FAILURE = 0x4,
+  ADD_STA_MODIFY_NON_EXISTING_STA = 0x8,
 };
 
 /**
@@ -469,8 +469,8 @@
  * @reserved: reserved
  */
 struct iwl_mvm_rm_sta_cmd {
-    uint8_t sta_id;
-    uint8_t reserved[3];
+  uint8_t sta_id;
+  uint8_t reserved[3];
 } __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
 
 /**
@@ -485,13 +485,13 @@
  * @receive_seq_cnt: initial RSC/PN needed for replay check
  */
 struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
-    __le32 ctrl_flags;
-    uint8_t igtk[16];
-    uint8_t k1[16];
-    uint8_t k2[16];
-    __le32 key_id;
-    __le32 sta_id;
-    __le64 receive_seq_cnt;
+  __le32 ctrl_flags;
+  uint8_t igtk[16];
+  uint8_t k1[16];
+  uint8_t k2[16];
+  __le32 key_id;
+  __le32 sta_id;
+  __le64 receive_seq_cnt;
 } __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
 
 /**
@@ -504,29 +504,29 @@
  * @receive_seq_cnt: initial RSC/PN needed for replay check
  */
 struct iwl_mvm_mgmt_mcast_key_cmd {
-    __le32 ctrl_flags;
-    uint8_t igtk[32];
-    __le32 key_id;
-    __le32 sta_id;
-    __le64 receive_seq_cnt;
+  __le32 ctrl_flags;
+  uint8_t igtk[32];
+  __le32 key_id;
+  __le32 sta_id;
+  __le64 receive_seq_cnt;
 } __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */
 
 struct iwl_mvm_wep_key {
-    uint8_t key_index;
-    uint8_t key_offset;
-    __le16 reserved1;
-    uint8_t key_size;
-    uint8_t reserved2[3];
-    uint8_t key[16];
+  uint8_t key_index;
+  uint8_t key_offset;
+  __le16 reserved1;
+  uint8_t key_size;
+  uint8_t reserved2[3];
+  uint8_t key[16];
 } __packed;
 
 struct iwl_mvm_wep_key_cmd {
-    __le32 mac_id_n_color;
-    uint8_t num_keys;
-    uint8_t decryption_type;
-    uint8_t flags;
-    uint8_t reserved;
-    struct iwl_mvm_wep_key wep_key[0];
+  __le32 mac_id_n_color;
+  uint8_t num_keys;
+  uint8_t decryption_type;
+  uint8_t flags;
+  uint8_t reserved;
+  struct iwl_mvm_wep_key wep_key[0];
 } __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
 
 /**
@@ -536,8 +536,8 @@
  * @sta_id: station ID
  */
 struct iwl_mvm_eosp_notification {
-    __le32 remain_frame_count;
-    __le32 sta_id;
+  __le32 remain_frame_count;
+  __le32 sta_id;
 } __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_STA_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/stats.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/stats.h
index 1fffe1d..f88d73d1 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/stats.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/stats.h
@@ -38,19 +38,19 @@
 #include "mac.h"
 
 struct mvm_statistics_dbg {
-    __le32 burst_check;
-    __le32 burst_count;
-    __le32 wait_for_silence_timeout_cnt;
-    uint8_t reserved[12];
+  __le32 burst_check;
+  __le32 burst_count;
+  __le32 wait_for_silence_timeout_cnt;
+  uint8_t reserved[12];
 } __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
 
 struct mvm_statistics_div {
-    __le32 tx_on_a;
-    __le32 tx_on_b;
-    __le32 exec_time;
-    __le32 probe_time;
-    __le32 rssi_ant;
-    __le32 reserved2;
+  __le32 tx_on_a;
+  __le32 tx_on_b;
+  __le32 exec_time;
+  __le32 probe_time;
+  __le32 rssi_ant;
+  __le32 reserved2;
 } __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
 
 /**
@@ -79,268 +79,268 @@
  * @mac_id: mac ID
  */
 struct mvm_statistics_rx_non_phy {
-    __le32 bogus_cts;
-    __le32 bogus_ack;
-    __le32 non_channel_beacons;
-    __le32 channel_beacons;
-    __le32 num_missed_bcon;
-    __le32 adc_rx_saturation_time;
-    __le32 ina_detection_search_time;
-    __le32 beacon_silence_rssi_a;
-    __le32 beacon_silence_rssi_b;
-    __le32 beacon_silence_rssi_c;
-    __le32 interference_data_flag;
-    __le32 channel_load;
-    __le32 beacon_rssi_a;
-    __le32 beacon_rssi_b;
-    __le32 beacon_rssi_c;
-    __le32 beacon_energy_a;
-    __le32 beacon_energy_b;
-    __le32 beacon_energy_c;
-    __le32 num_bt_kills;
-    __le32 mac_id;
+  __le32 bogus_cts;
+  __le32 bogus_ack;
+  __le32 non_channel_beacons;
+  __le32 channel_beacons;
+  __le32 num_missed_bcon;
+  __le32 adc_rx_saturation_time;
+  __le32 ina_detection_search_time;
+  __le32 beacon_silence_rssi_a;
+  __le32 beacon_silence_rssi_b;
+  __le32 beacon_silence_rssi_c;
+  __le32 interference_data_flag;
+  __le32 channel_load;
+  __le32 beacon_rssi_a;
+  __le32 beacon_rssi_b;
+  __le32 beacon_rssi_c;
+  __le32 beacon_energy_a;
+  __le32 beacon_energy_b;
+  __le32 beacon_energy_c;
+  __le32 num_bt_kills;
+  __le32 mac_id;
 } __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_4 */
 
 struct mvm_statistics_rx_non_phy_v3 {
-    __le32 bogus_cts;                 /* CTS received when not expecting CTS */
-    __le32 bogus_ack;                 /* ACK received when not expecting ACK */
-    __le32 non_bssid_frames;          /* number of frames with BSSID that
-                                       * doesn't belong to the STA BSSID */
-    __le32 filtered_frames;           /* count frames that were dumped in the
-                                       * filtering process */
-    __le32 non_channel_beacons;       /* beacons with our bss id but not on
-                                       * our serving channel */
-    __le32 channel_beacons;           /* beacons with our bss id and in our
-                                       * serving channel */
-    __le32 num_missed_bcon;           /* number of missed beacons */
-    __le32 adc_rx_saturation_time;    /* count in 0.8us units the time the
-                                       * ADC was in saturation */
-    __le32 ina_detection_search_time; /* total time (in 0.8us) searched
-                                       * for INA */
-    __le32 beacon_silence_rssi_a;     /* RSSI silence after beacon frame */
-    __le32 beacon_silence_rssi_b;     /* RSSI silence after beacon frame */
-    __le32 beacon_silence_rssi_c;     /* RSSI silence after beacon frame */
-    __le32 interference_data_flag;    /* flag for interference data
-                                       * availability. 1 when data is
-                                       * available. */
-    __le32 channel_load;              /* counts RX Enable time in uSec */
-    __le32 dsp_false_alarms;          /* DSP false alarm (both OFDM
-                                       * and CCK) counter */
-    __le32 beacon_rssi_a;
-    __le32 beacon_rssi_b;
-    __le32 beacon_rssi_c;
-    __le32 beacon_energy_a;
-    __le32 beacon_energy_b;
-    __le32 beacon_energy_c;
-    __le32 num_bt_kills;
-    __le32 mac_id;
-    __le32 directed_data_mpdu;
+  __le32 bogus_cts;                 /* CTS received when not expecting CTS */
+  __le32 bogus_ack;                 /* ACK received when not expecting ACK */
+  __le32 non_bssid_frames;          /* number of frames with BSSID that
+                                     * doesn't belong to the STA BSSID */
+  __le32 filtered_frames;           /* count frames that were dumped in the
+                                     * filtering process */
+  __le32 non_channel_beacons;       /* beacons with our bss id but not on
+                                     * our serving channel */
+  __le32 channel_beacons;           /* beacons with our bss id and in our
+                                     * serving channel */
+  __le32 num_missed_bcon;           /* number of missed beacons */
+  __le32 adc_rx_saturation_time;    /* count in 0.8us units the time the
+                                     * ADC was in saturation */
+  __le32 ina_detection_search_time; /* total time (in 0.8us) searched
+                                     * for INA */
+  __le32 beacon_silence_rssi_a;     /* RSSI silence after beacon frame */
+  __le32 beacon_silence_rssi_b;     /* RSSI silence after beacon frame */
+  __le32 beacon_silence_rssi_c;     /* RSSI silence after beacon frame */
+  __le32 interference_data_flag;    /* flag for interference data
+                                     * availability. 1 when data is
+                                     * available. */
+  __le32 channel_load;              /* counts RX Enable time in uSec */
+  __le32 dsp_false_alarms;          /* DSP false alarm (both OFDM
+                                     * and CCK) counter */
+  __le32 beacon_rssi_a;
+  __le32 beacon_rssi_b;
+  __le32 beacon_rssi_c;
+  __le32 beacon_energy_a;
+  __le32 beacon_energy_b;
+  __le32 beacon_energy_c;
+  __le32 num_bt_kills;
+  __le32 mac_id;
+  __le32 directed_data_mpdu;
 } __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
 
 struct mvm_statistics_rx_phy {
-    __le32 unresponded_rts;
-    __le32 rxe_frame_lmt_overrun;
-    __le32 sent_ba_rsp_cnt;
-    __le32 dsp_self_kill;
-    __le32 reserved;
+  __le32 unresponded_rts;
+  __le32 rxe_frame_lmt_overrun;
+  __le32 sent_ba_rsp_cnt;
+  __le32 dsp_self_kill;
+  __le32 reserved;
 } __packed; /* STATISTICS_RX_PHY_API_S_VER_3 */
 
 struct mvm_statistics_rx_phy_v2 {
-    __le32 ina_cnt;
-    __le32 fina_cnt;
-    __le32 plcp_err;
-    __le32 crc32_err;
-    __le32 overrun_err;
-    __le32 early_overrun_err;
-    __le32 crc32_good;
-    __le32 false_alarm_cnt;
-    __le32 fina_sync_err_cnt;
-    __le32 sfd_timeout;
-    __le32 fina_timeout;
-    __le32 unresponded_rts;
-    __le32 rxe_frame_lmt_overrun;
-    __le32 sent_ack_cnt;
-    __le32 sent_cts_cnt;
-    __le32 sent_ba_rsp_cnt;
-    __le32 dsp_self_kill;
-    __le32 mh_format_err;
-    __le32 re_acq_main_rssi_sum;
-    __le32 reserved;
+  __le32 ina_cnt;
+  __le32 fina_cnt;
+  __le32 plcp_err;
+  __le32 crc32_err;
+  __le32 overrun_err;
+  __le32 early_overrun_err;
+  __le32 crc32_good;
+  __le32 false_alarm_cnt;
+  __le32 fina_sync_err_cnt;
+  __le32 sfd_timeout;
+  __le32 fina_timeout;
+  __le32 unresponded_rts;
+  __le32 rxe_frame_lmt_overrun;
+  __le32 sent_ack_cnt;
+  __le32 sent_cts_cnt;
+  __le32 sent_ba_rsp_cnt;
+  __le32 dsp_self_kill;
+  __le32 mh_format_err;
+  __le32 re_acq_main_rssi_sum;
+  __le32 reserved;
 } __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
 
 struct mvm_statistics_rx_ht_phy_v1 {
-    __le32 plcp_err;
-    __le32 overrun_err;
-    __le32 early_overrun_err;
-    __le32 crc32_good;
-    __le32 crc32_err;
-    __le32 mh_format_err;
-    __le32 agg_crc32_good;
-    __le32 agg_mpdu_cnt;
-    __le32 agg_cnt;
-    __le32 unsupport_mcs;
+  __le32 plcp_err;
+  __le32 overrun_err;
+  __le32 early_overrun_err;
+  __le32 crc32_good;
+  __le32 crc32_err;
+  __le32 mh_format_err;
+  __le32 agg_crc32_good;
+  __le32 agg_mpdu_cnt;
+  __le32 agg_cnt;
+  __le32 unsupport_mcs;
 } __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
 
 struct mvm_statistics_rx_ht_phy {
-    __le32 mh_format_err;
-    __le32 agg_mpdu_cnt;
-    __le32 agg_cnt;
-    __le32 unsupport_mcs;
+  __le32 mh_format_err;
+  __le32 agg_mpdu_cnt;
+  __le32 agg_cnt;
+  __le32 unsupport_mcs;
 } __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_2 */
 
 struct mvm_statistics_tx_non_phy_v3 {
-    __le32 preamble_cnt;
-    __le32 rx_detected_cnt;
-    __le32 bt_prio_defer_cnt;
-    __le32 bt_prio_kill_cnt;
-    __le32 few_bytes_cnt;
-    __le32 cts_timeout;
-    __le32 ack_timeout;
-    __le32 expected_ack_cnt;
-    __le32 actual_ack_cnt;
-    __le32 dump_msdu_cnt;
-    __le32 burst_abort_next_frame_mismatch_cnt;
-    __le32 burst_abort_missing_next_frame_cnt;
-    __le32 cts_timeout_collision;
-    __le32 ack_or_ba_timeout_collision;
+  __le32 preamble_cnt;
+  __le32 rx_detected_cnt;
+  __le32 bt_prio_defer_cnt;
+  __le32 bt_prio_kill_cnt;
+  __le32 few_bytes_cnt;
+  __le32 cts_timeout;
+  __le32 ack_timeout;
+  __le32 expected_ack_cnt;
+  __le32 actual_ack_cnt;
+  __le32 dump_msdu_cnt;
+  __le32 burst_abort_next_frame_mismatch_cnt;
+  __le32 burst_abort_missing_next_frame_cnt;
+  __le32 cts_timeout_collision;
+  __le32 ack_or_ba_timeout_collision;
 } __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */
 
 struct mvm_statistics_tx_non_phy {
-    __le32 bt_prio_defer_cnt;
-    __le32 bt_prio_kill_cnt;
-    __le32 few_bytes_cnt;
-    __le32 cts_timeout;
-    __le32 ack_timeout;
-    __le32 dump_msdu_cnt;
-    __le32 burst_abort_next_frame_mismatch_cnt;
-    __le32 burst_abort_missing_next_frame_cnt;
-    __le32 cts_timeout_collision;
-    __le32 ack_or_ba_timeout_collision;
+  __le32 bt_prio_defer_cnt;
+  __le32 bt_prio_kill_cnt;
+  __le32 few_bytes_cnt;
+  __le32 cts_timeout;
+  __le32 ack_timeout;
+  __le32 dump_msdu_cnt;
+  __le32 burst_abort_next_frame_mismatch_cnt;
+  __le32 burst_abort_missing_next_frame_cnt;
+  __le32 cts_timeout_collision;
+  __le32 ack_or_ba_timeout_collision;
 } __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_4 */
 
 #define MAX_CHAINS 3
 
 struct mvm_statistics_tx_non_phy_agg {
-    __le32 ba_timeout;
-    __le32 ba_reschedule_frames;
-    __le32 scd_query_agg_frame_cnt;
-    __le32 scd_query_no_agg;
-    __le32 scd_query_agg;
-    __le32 scd_query_mismatch;
-    __le32 frame_not_ready;
-    __le32 underrun;
-    __le32 bt_prio_kill;
-    __le32 rx_ba_rsp_cnt;
-    __s8 txpower[MAX_CHAINS];
-    __s8 reserved;
-    __le32 reserved2;
+  __le32 ba_timeout;
+  __le32 ba_reschedule_frames;
+  __le32 scd_query_agg_frame_cnt;
+  __le32 scd_query_no_agg;
+  __le32 scd_query_agg;
+  __le32 scd_query_mismatch;
+  __le32 frame_not_ready;
+  __le32 underrun;
+  __le32 bt_prio_kill;
+  __le32 rx_ba_rsp_cnt;
+  __s8 txpower[MAX_CHAINS];
+  __s8 reserved;
+  __le32 reserved2;
 } __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
 
 struct mvm_statistics_tx_channel_width {
-    __le32 ext_cca_narrow_ch20[1];
-    __le32 ext_cca_narrow_ch40[2];
-    __le32 ext_cca_narrow_ch80[3];
-    __le32 ext_cca_narrow_ch160[4];
-    __le32 last_tx_ch_width_indx;
-    __le32 rx_detected_per_ch_width[4];
-    __le32 success_per_ch_width[4];
-    __le32 fail_per_ch_width[4];
+  __le32 ext_cca_narrow_ch20[1];
+  __le32 ext_cca_narrow_ch40[2];
+  __le32 ext_cca_narrow_ch80[3];
+  __le32 ext_cca_narrow_ch160[4];
+  __le32 last_tx_ch_width_indx;
+  __le32 rx_detected_per_ch_width[4];
+  __le32 success_per_ch_width[4];
+  __le32 fail_per_ch_width[4];
 }; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
 
 struct mvm_statistics_tx_v4 {
-    struct mvm_statistics_tx_non_phy_v3 general;
-    struct mvm_statistics_tx_non_phy_agg agg;
-    struct mvm_statistics_tx_channel_width channel_width;
+  struct mvm_statistics_tx_non_phy_v3 general;
+  struct mvm_statistics_tx_non_phy_agg agg;
+  struct mvm_statistics_tx_channel_width channel_width;
 } __packed; /* STATISTICS_TX_API_S_VER_4 */
 
 struct mvm_statistics_tx {
-    struct mvm_statistics_tx_non_phy general;
-    struct mvm_statistics_tx_non_phy_agg agg;
-    struct mvm_statistics_tx_channel_width channel_width;
+  struct mvm_statistics_tx_non_phy general;
+  struct mvm_statistics_tx_non_phy_agg agg;
+  struct mvm_statistics_tx_channel_width channel_width;
 } __packed; /* STATISTICS_TX_API_S_VER_5 */
 
 struct mvm_statistics_bt_activity {
-    __le32 hi_priority_tx_req_cnt;
-    __le32 hi_priority_tx_denied_cnt;
-    __le32 lo_priority_tx_req_cnt;
-    __le32 lo_priority_tx_denied_cnt;
-    __le32 hi_priority_rx_req_cnt;
-    __le32 hi_priority_rx_denied_cnt;
-    __le32 lo_priority_rx_req_cnt;
-    __le32 lo_priority_rx_denied_cnt;
+  __le32 hi_priority_tx_req_cnt;
+  __le32 hi_priority_tx_denied_cnt;
+  __le32 lo_priority_tx_req_cnt;
+  __le32 lo_priority_tx_denied_cnt;
+  __le32 hi_priority_rx_req_cnt;
+  __le32 hi_priority_rx_denied_cnt;
+  __le32 lo_priority_rx_req_cnt;
+  __le32 lo_priority_rx_denied_cnt;
 } __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
 
 struct mvm_statistics_general_common_v19 {
-    __le32 radio_temperature;
-    __le32 radio_voltage;
-    struct mvm_statistics_dbg dbg;
-    __le32 sleep_time;
-    __le32 slots_out;
-    __le32 slots_idle;
-    __le32 ttl_timestamp;
-    struct mvm_statistics_div slow_div;
-    __le32 rx_enable_counter;
-    /*
-     * num_of_sos_states:
-     *  count the number of times we have to re-tune
-     *  in order to get out of bad PHY status
-     */
-    __le32 num_of_sos_states;
-    __le32 beacon_filtered;
-    __le32 missed_beacons;
-    uint8_t beacon_filter_average_energy;
-    uint8_t beacon_filter_reason;
-    uint8_t beacon_filter_current_energy;
-    uint8_t beacon_filter_reserved;
-    __le32 beacon_filter_delta_time;
-    struct mvm_statistics_bt_activity bt_activity;
-    __le64 rx_time;
-    __le64 on_time_rf;
-    __le64 on_time_scan;
-    __le64 tx_time;
+  __le32 radio_temperature;
+  __le32 radio_voltage;
+  struct mvm_statistics_dbg dbg;
+  __le32 sleep_time;
+  __le32 slots_out;
+  __le32 slots_idle;
+  __le32 ttl_timestamp;
+  struct mvm_statistics_div slow_div;
+  __le32 rx_enable_counter;
+  /*
+   * num_of_sos_states:
+   *  count the number of times we have to re-tune
+   *  in order to get out of bad PHY status
+   */
+  __le32 num_of_sos_states;
+  __le32 beacon_filtered;
+  __le32 missed_beacons;
+  uint8_t beacon_filter_average_energy;
+  uint8_t beacon_filter_reason;
+  uint8_t beacon_filter_current_energy;
+  uint8_t beacon_filter_reserved;
+  __le32 beacon_filter_delta_time;
+  struct mvm_statistics_bt_activity bt_activity;
+  __le64 rx_time;
+  __le64 on_time_rf;
+  __le64 on_time_scan;
+  __le64 tx_time;
 } __packed;
 
 struct mvm_statistics_general_common {
-    __le32 radio_temperature;
-    struct mvm_statistics_dbg dbg;
-    __le32 sleep_time;
-    __le32 slots_out;
-    __le32 slots_idle;
-    __le32 ttl_timestamp;
-    struct mvm_statistics_div slow_div;
-    __le32 rx_enable_counter;
-    /*
-     * num_of_sos_states:
-     *  count the number of times we have to re-tune
-     *  in order to get out of bad PHY status
-     */
-    __le32 num_of_sos_states;
-    __le32 beacon_filtered;
-    __le32 missed_beacons;
-    uint8_t beacon_filter_average_energy;
-    uint8_t beacon_filter_reason;
-    uint8_t beacon_filter_current_energy;
-    uint8_t beacon_filter_reserved;
-    __le32 beacon_filter_delta_time;
-    struct mvm_statistics_bt_activity bt_activity;
-    __le64 rx_time;
-    __le64 on_time_rf;
-    __le64 on_time_scan;
-    __le64 tx_time;
+  __le32 radio_temperature;
+  struct mvm_statistics_dbg dbg;
+  __le32 sleep_time;
+  __le32 slots_out;
+  __le32 slots_idle;
+  __le32 ttl_timestamp;
+  struct mvm_statistics_div slow_div;
+  __le32 rx_enable_counter;
+  /*
+   * num_of_sos_states:
+   *  count the number of times we have to re-tune
+   *  in order to get out of bad PHY status
+   */
+  __le32 num_of_sos_states;
+  __le32 beacon_filtered;
+  __le32 missed_beacons;
+  uint8_t beacon_filter_average_energy;
+  uint8_t beacon_filter_reason;
+  uint8_t beacon_filter_current_energy;
+  uint8_t beacon_filter_reserved;
+  __le32 beacon_filter_delta_time;
+  struct mvm_statistics_bt_activity bt_activity;
+  __le64 rx_time;
+  __le64 on_time_rf;
+  __le64 on_time_scan;
+  __le64 tx_time;
 } __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
 
 struct mvm_statistics_general_v8 {
-    struct mvm_statistics_general_common_v19 common;
-    __le32 beacon_counter[NUM_MAC_INDEX];
-    uint8_t beacon_average_energy[NUM_MAC_INDEX];
-    uint8_t reserved[4 - (NUM_MAC_INDEX % 4)];
+  struct mvm_statistics_general_common_v19 common;
+  __le32 beacon_counter[NUM_MAC_INDEX];
+  uint8_t beacon_average_energy[NUM_MAC_INDEX];
+  uint8_t reserved[4 - (NUM_MAC_INDEX % 4)];
 } __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
 
 struct mvm_statistics_general {
-    struct mvm_statistics_general_common common;
-    __le32 beacon_counter[MAC_INDEX_AUX];
-    uint8_t beacon_average_energy[MAC_INDEX_AUX];
-    uint8_t reserved[8 - MAC_INDEX_AUX];
+  struct mvm_statistics_general_common common;
+  __le32 beacon_counter[MAC_INDEX_AUX];
+  uint8_t beacon_average_energy[MAC_INDEX_AUX];
+  uint8_t reserved[8 - MAC_INDEX_AUX];
 } __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
 
 /**
@@ -351,31 +351,31 @@
  * @avg_energy: average RSSI, per station
  */
 struct mvm_statistics_load {
-    __le32 air_time[MAC_INDEX_AUX];
-    __le32 byte_count[MAC_INDEX_AUX];
-    __le32 pkt_count[MAC_INDEX_AUX];
-    uint8_t avg_energy[IWL_MVM_STATION_COUNT];
+  __le32 air_time[MAC_INDEX_AUX];
+  __le32 byte_count[MAC_INDEX_AUX];
+  __le32 pkt_count[MAC_INDEX_AUX];
+  uint8_t avg_energy[IWL_MVM_STATION_COUNT];
 } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */
 
 struct mvm_statistics_load_v1 {
-    __le32 air_time[NUM_MAC_INDEX];
-    __le32 byte_count[NUM_MAC_INDEX];
-    __le32 pkt_count[NUM_MAC_INDEX];
-    uint8_t avg_energy[IWL_MVM_STATION_COUNT];
+  __le32 air_time[NUM_MAC_INDEX];
+  __le32 byte_count[NUM_MAC_INDEX];
+  __le32 pkt_count[NUM_MAC_INDEX];
+  uint8_t avg_energy[IWL_MVM_STATION_COUNT];
 } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
 
 struct mvm_statistics_rx {
-    struct mvm_statistics_rx_phy ofdm;
-    struct mvm_statistics_rx_phy cck;
-    struct mvm_statistics_rx_non_phy general;
-    struct mvm_statistics_rx_ht_phy ofdm_ht;
+  struct mvm_statistics_rx_phy ofdm;
+  struct mvm_statistics_rx_phy cck;
+  struct mvm_statistics_rx_non_phy general;
+  struct mvm_statistics_rx_ht_phy ofdm_ht;
 } __packed; /* STATISTICS_RX_API_S_VER_4 */
 
 struct mvm_statistics_rx_v3 {
-    struct mvm_statistics_rx_phy_v2 ofdm;
-    struct mvm_statistics_rx_phy_v2 cck;
-    struct mvm_statistics_rx_non_phy_v3 general;
-    struct mvm_statistics_rx_ht_phy_v1 ofdm_ht;
+  struct mvm_statistics_rx_phy_v2 ofdm;
+  struct mvm_statistics_rx_phy_v2 cck;
+  struct mvm_statistics_rx_non_phy_v3 general;
+  struct mvm_statistics_rx_ht_phy_v1 ofdm_ht;
 } __packed; /* STATISTICS_RX_API_S_VER_3 */
 
 /*
@@ -387,26 +387,26 @@
  */
 
 struct iwl_notif_statistics_v10 {
-    __le32 flag;
-    struct mvm_statistics_rx_v3 rx;
-    struct mvm_statistics_tx_v4 tx;
-    struct mvm_statistics_general_v8 general;
+  __le32 flag;
+  struct mvm_statistics_rx_v3 rx;
+  struct mvm_statistics_tx_v4 tx;
+  struct mvm_statistics_general_v8 general;
 } __packed; /* STATISTICS_NTFY_API_S_VER_10 */
 
 struct iwl_notif_statistics_v11 {
-    __le32 flag;
-    struct mvm_statistics_rx_v3 rx;
-    struct mvm_statistics_tx_v4 tx;
-    struct mvm_statistics_general_v8 general;
-    struct mvm_statistics_load_v1 load_stats;
+  __le32 flag;
+  struct mvm_statistics_rx_v3 rx;
+  struct mvm_statistics_tx_v4 tx;
+  struct mvm_statistics_general_v8 general;
+  struct mvm_statistics_load_v1 load_stats;
 } __packed; /* STATISTICS_NTFY_API_S_VER_11 */
 
 struct iwl_notif_statistics {
-    __le32 flag;
-    struct mvm_statistics_rx rx;
-    struct mvm_statistics_tx tx;
-    struct mvm_statistics_general general;
-    struct mvm_statistics_load load_stats;
+  __le32 flag;
+  struct mvm_statistics_rx rx;
+  struct mvm_statistics_tx tx;
+  struct mvm_statistics_general general;
+  struct mvm_statistics_load load_stats;
 } __packed; /* STATISTICS_NTFY_API_S_VER_13 */
 
 /**
@@ -414,7 +414,7 @@
  * @IWL_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report
  */
 enum iwl_statistics_notif_flags {
-    IWL_STATISTICS_REPLY_FLG_CLEAR = 0x1,
+  IWL_STATISTICS_REPLY_FLG_CLEAR = 0x1,
 };
 
 /**
@@ -425,8 +425,8 @@
  *  notifications
  */
 enum iwl_statistics_cmd_flags {
-    IWL_STATISTICS_FLG_CLEAR = 0x1,
-    IWL_STATISTICS_FLG_DISABLE_NOTIF = 0x2,
+  IWL_STATISTICS_FLG_CLEAR = 0x1,
+  IWL_STATISTICS_FLG_DISABLE_NOTIF = 0x2,
 };
 
 /**
@@ -434,7 +434,7 @@
  * @flags: flags from &enum iwl_statistics_cmd_flags
  */
 struct iwl_statistics_cmd {
-    __le32 flags;
+  __le32 flags;
 } __packed; /* STATISTICS_CMD_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_STATS_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tdls.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tdls.h
index 14ce8c2..f7464f0 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tdls.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tdls.h
@@ -43,9 +43,9 @@
 
 /* Type of TDLS request */
 enum iwl_tdls_channel_switch_type {
-    TDLS_SEND_CHAN_SW_REQ = 0,
-    TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH,
-    TDLS_MOVE_CH,
+  TDLS_SEND_CHAN_SW_REQ = 0,
+  TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH,
+  TDLS_MOVE_CH,
 }; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
 
 /**
@@ -60,11 +60,11 @@
  * @switch_timeout: switch timeout the peer sent in its channel switch timing IE
  */
 struct iwl_tdls_channel_switch_timing {
-    __le32 frame_timestamp;      /* GP2 time of peer packet Rx */
-    __le32 max_offchan_duration; /* given in micro-seconds */
-    __le32 switch_time;          /* given in micro-seconds */
-    __le32 switch_timeout;       /* given in micro-seconds */
-} __packed;                      /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */
+  __le32 frame_timestamp;      /* GP2 time of peer packet Rx */
+  __le32 max_offchan_duration; /* given in micro-seconds */
+  __le32 switch_time;          /* given in micro-seconds */
+  __le32 switch_timeout;       /* given in micro-seconds */
+} __packed;                    /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */
 
 #define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
 
@@ -78,9 +78,9 @@
  * @data: frame data
  */
 struct iwl_tdls_channel_switch_frame {
-    __le32 switch_time_offset;
-    struct iwl_tx_cmd tx_cmd;
-    uint8_t data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
+  __le32 switch_time_offset;
+  struct iwl_tx_cmd tx_cmd;
+  uint8_t data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
 } __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
 
 /**
@@ -96,11 +96,11 @@
  * @frame: channel-switch request/response template, depending to switch_type
  */
 struct iwl_tdls_channel_switch_cmd {
-    uint8_t switch_type;
-    __le32 peer_sta_id;
-    struct iwl_fw_channel_info ci;
-    struct iwl_tdls_channel_switch_timing timing;
-    struct iwl_tdls_channel_switch_frame frame;
+  uint8_t switch_type;
+  __le32 peer_sta_id;
+  struct iwl_fw_channel_info ci;
+  struct iwl_tdls_channel_switch_timing timing;
+  struct iwl_tdls_channel_switch_frame frame;
 } __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
 
 /**
@@ -111,9 +111,9 @@
  * @sta_id: peer currently performing the channel-switch with
  */
 struct iwl_tdls_channel_switch_notif {
-    __le32 status;
-    __le32 offchannel_duration;
-    __le32 sta_id;
+  __le32 status;
+  __le32 offchannel_duration;
+  __le32 sta_id;
 } __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
 
 /**
@@ -125,10 +125,10 @@
  * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise
  */
 struct iwl_tdls_sta_info {
-    uint8_t sta_id;
-    uint8_t tx_to_peer_tid;
-    __le16 tx_to_peer_ssn;
-    __le32 is_initiator;
+  uint8_t sta_id;
+  uint8_t tx_to_peer_tid;
+  __le16 tx_to_peer_ssn;
+  __le32 is_initiator;
 } __packed; /* TDLS_STA_INFO_VER_1 */
 
 /**
@@ -144,15 +144,15 @@
  * @pti_req_template: PTI request template data
  */
 struct iwl_tdls_config_cmd {
-    __le32 id_and_color; /* mac id and color */
-    uint8_t tdls_peer_count;
-    uint8_t tx_to_ap_tid;
-    __le16 tx_to_ap_ssn;
-    struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
+  __le32 id_and_color; /* mac id and color */
+  uint8_t tdls_peer_count;
+  uint8_t tx_to_ap_tid;
+  __le16 tx_to_ap_ssn;
+  struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
 
-    __le32 pti_req_data_offset;
-    struct iwl_tx_cmd pti_req_tx_cmd;
-    uint8_t pti_req_template[0];
+  __le32 pti_req_data_offset;
+  struct iwl_tx_cmd pti_req_tx_cmd;
+  uint8_t pti_req_template[0];
 } __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
 
 /**
@@ -163,8 +163,8 @@
  *  the peer
  */
 struct iwl_tdls_config_sta_info_res {
-    __le16 sta_id;
-    __le16 tx_to_peer_last_seq;
+  __le16 sta_id;
+  __le16 tx_to_peer_last_seq;
 } __packed; /* TDLS_STA_INFO_RSP_VER_1 */
 
 /**
@@ -174,8 +174,8 @@
  * @sta_info: per-station TDLS config information
  */
 struct iwl_tdls_config_res {
-    __le32 tx_to_ap_last_seq;
-    struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
+  __le32 tx_to_ap_last_seq;
+  struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
 } __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_TDLS_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/testing.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/testing.h
index 6a02fc5..08b3bc7 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/testing.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/testing.h
@@ -55,11 +55,11 @@
  *  256 bit key. Otherwise 128 bit key is used.
  */
 enum iwl_fips_test_vector_flags {
-    IWL_FIPS_TEST_VECTOR_FLAGS_AES = BIT(0),
-    IWL_FIPS_TEST_VECTOR_FLAGS_CCM = BIT(1),
-    IWL_FIPS_TEST_VECTOR_FLAGS_GCM = BIT(2),
-    IWL_FIPS_TEST_VECTOR_FLAGS_ENC = BIT(3),
-    IWL_FIPS_TEST_VECTOR_FLAGS_KEY_256 = BIT(5),
+  IWL_FIPS_TEST_VECTOR_FLAGS_AES = BIT(0),
+  IWL_FIPS_TEST_VECTOR_FLAGS_CCM = BIT(1),
+  IWL_FIPS_TEST_VECTOR_FLAGS_GCM = BIT(2),
+  IWL_FIPS_TEST_VECTOR_FLAGS_ENC = BIT(3),
+  IWL_FIPS_TEST_VECTOR_FLAGS_KEY_256 = BIT(5),
 };
 
 /**
@@ -77,15 +77,15 @@
  * @payload: the plaintext to encrypt or the cipher text to decrypt + MIC.
  */
 struct iwl_fips_test_cmd {
-    __le32 flags;
-    __le32 payload_len;
-    __le32 aad_len;
-    uint8_t key[FIPS_MAX_KEY_LEN];
-    uint8_t aad[FIPS_MAX_AAD_LEN];
-    __le16 reserved;
-    uint8_t nonce[FIPS_MAX_NONCE_LEN];
-    uint8_t reserved2[3];
-    uint8_t payload[0];
+  __le32 flags;
+  __le32 payload_len;
+  __le32 aad_len;
+  uint8_t key[FIPS_MAX_KEY_LEN];
+  uint8_t aad[FIPS_MAX_AAD_LEN];
+  __le16 reserved;
+  uint8_t nonce[FIPS_MAX_NONCE_LEN];
+  uint8_t reserved2[3];
+  uint8_t payload[0];
 } __packed; /* AES_SEC_TEST_VECTOR_HDR_API_S_VER_1 */
 
 /**
@@ -95,8 +95,8 @@
  *  successfully. The result buffer is valid.
  */
 enum iwl_fips_test_status {
-    IWL_FIPS_TEST_STATUS_FAILURE,
-    IWL_FIPS_TEST_STATUS_SUCCESS,
+  IWL_FIPS_TEST_STATUS_FAILURE,
+  IWL_FIPS_TEST_STATUS_SUCCESS,
 };
 
 /**
@@ -106,8 +106,8 @@
  *  &enum iwl_fips_test_status).
  */
 struct iwl_fips_test_resp {
-    __le32 len;
-    uint8_t payload[0];
+  __le32 len;
+  uint8_t payload[0];
 } __packed; /* AES_SEC_TEST_VECTOR_RESP_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_TESTING_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/time-event.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/time-event.h
index 27dbce7..8372d9f 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/time-event.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/time-event.h
@@ -40,35 +40,35 @@
 
 /* Time Event types, according to MAC type */
 enum iwl_time_event_type {
-    /* BSS Station Events */
-    TE_BSS_STA_AGGRESSIVE_ASSOC,
-    TE_BSS_STA_ASSOC,
-    TE_BSS_EAP_DHCP_PROT,
-    TE_BSS_QUIET_PERIOD,
+  /* BSS Station Events */
+  TE_BSS_STA_AGGRESSIVE_ASSOC,
+  TE_BSS_STA_ASSOC,
+  TE_BSS_EAP_DHCP_PROT,
+  TE_BSS_QUIET_PERIOD,
 
-    /* P2P Device Events */
-    TE_P2P_DEVICE_DISCOVERABLE,
-    TE_P2P_DEVICE_LISTEN,
-    TE_P2P_DEVICE_ACTION_SCAN,
-    TE_P2P_DEVICE_FULL_SCAN,
+  /* P2P Device Events */
+  TE_P2P_DEVICE_DISCOVERABLE,
+  TE_P2P_DEVICE_LISTEN,
+  TE_P2P_DEVICE_ACTION_SCAN,
+  TE_P2P_DEVICE_FULL_SCAN,
 
-    /* P2P Client Events */
-    TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
-    TE_P2P_CLIENT_ASSOC,
-    TE_P2P_CLIENT_QUIET_PERIOD,
+  /* P2P Client Events */
+  TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
+  TE_P2P_CLIENT_ASSOC,
+  TE_P2P_CLIENT_QUIET_PERIOD,
 
-    /* P2P GO Events */
-    TE_P2P_GO_ASSOC_PROT,
-    TE_P2P_GO_REPETITIVET_NOA,
-    TE_P2P_GO_CT_WINDOW,
+  /* P2P GO Events */
+  TE_P2P_GO_ASSOC_PROT,
+  TE_P2P_GO_REPETITIVET_NOA,
+  TE_P2P_GO_CT_WINDOW,
 
-    /* WiDi Sync Events */
-    TE_WIDI_TX_SYNC,
+  /* WiDi Sync Events */
+  TE_WIDI_TX_SYNC,
 
-    /* Channel Switch NoA */
-    TE_CHANNEL_SWITCH_PERIOD,
+  /* Channel Switch NoA */
+  TE_CHANNEL_SWITCH_PERIOD,
 
-    TE_MAX
+  TE_MAX
 }; /* MAC_EVENT_TYPE_API_E_VER_1 */
 
 /* Time event - defines for command API v1 */
@@ -87,10 +87,10 @@
  * scheduled.
  */
 enum {
-    TE_V1_FRAG_NONE = 0,
-    TE_V1_FRAG_SINGLE = 1,
-    TE_V1_FRAG_DUAL = 2,
-    TE_V1_FRAG_ENDLESS = 0xffffffff
+  TE_V1_FRAG_NONE = 0,
+  TE_V1_FRAG_SINGLE = 1,
+  TE_V1_FRAG_DUAL = 2,
+  TE_V1_FRAG_ENDLESS = 0xffffffff
 };
 
 /* If a Time Event can be fragmented, this is the max number of fragments */
@@ -102,10 +102,10 @@
 
 /* Time Event dependencies: none, on another TE, or in a specific time */
 enum {
-    TE_V1_INDEPENDENT = 0,
-    TE_V1_DEP_OTHER = BIT(0),
-    TE_V1_DEP_TSF = BIT(1),
-    TE_V1_EVENT_SOCIOPATHIC = BIT(2),
+  TE_V1_INDEPENDENT = 0,
+  TE_V1_DEP_OTHER = BIT(0),
+  TE_V1_DEP_TSF = BIT(1),
+  TE_V1_EVENT_SOCIOPATHIC = BIT(2),
 }; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
 
 /*
@@ -126,15 +126,15 @@
  * notification for monolithic events.
  */
 enum {
-    TE_V1_NOTIF_NONE = 0,
-    TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
-    TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
-    TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
-    TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
-    TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
-    TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
-    TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
-    TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
+  TE_V1_NOTIF_NONE = 0,
+  TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
+  TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
+  TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
+  TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
+  TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
+  TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
+  TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
+  TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
 }; /* MAC_EVENT_ACTION_API_E_VER_2 */
 
 /* Time event - defines for command API */
@@ -153,11 +153,11 @@
  * scheduled.
  */
 enum {
-    TE_V2_FRAG_NONE = 0,
-    TE_V2_FRAG_SINGLE = 1,
-    TE_V2_FRAG_DUAL = 2,
-    TE_V2_FRAG_MAX = 0xfe,
-    TE_V2_FRAG_ENDLESS = 0xff
+  TE_V2_FRAG_NONE = 0,
+  TE_V2_FRAG_SINGLE = 1,
+  TE_V2_FRAG_DUAL = 2,
+  TE_V2_FRAG_MAX = 0xfe,
+  TE_V2_FRAG_ENDLESS = 0xff
 };
 
 /* Repeat the time event endlessly (until removed) */
@@ -191,27 +191,27 @@
  * @TE_V2_ABSENCE: are we present or absent during the Time Event.
  */
 enum iwl_time_event_policy {
-    TE_V2_DEFAULT_POLICY = 0x0,
+  TE_V2_DEFAULT_POLICY = 0x0,
 
-    /* notifications (event start/stop, fragment start/stop) */
-    TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
-    TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
-    TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
-    TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
+  /* notifications (event start/stop, fragment start/stop) */
+  TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
+  TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
+  TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
+  TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
 
-    TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
-    TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
-    TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
-    TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
-    TE_V2_START_IMMEDIATELY = BIT(11),
+  TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
+  TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
+  TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
+  TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
+  TE_V2_START_IMMEDIATELY = BIT(11),
 
-    /* placement characteristics */
-    TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
-    TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
-    TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
+  /* placement characteristics */
+  TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
+  TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
+  TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
 
-    /* are we present or absent during the Time Event. */
-    TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
+  /* are we present or absent during the Time Event. */
+  TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
 };
 
 /**
@@ -241,19 +241,19 @@
  *  &enum iwl_time_event_policy
  */
 struct iwl_time_event_cmd {
-    /* COMMON_INDEX_HDR_API_S_VER_1 */
-    __le32 id_and_color;
-    __le32 action;
-    __le32 id;
-    /* MAC_TIME_EVENT_DATA_API_S_VER_2 */
-    __le32 apply_time;
-    __le32 max_delay;
-    __le32 depends_on;
-    __le32 interval;
-    __le32 duration;
-    uint8_t repeat;
-    uint8_t max_frags;
-    __le16 policy;
+  /* COMMON_INDEX_HDR_API_S_VER_1 */
+  __le32 id_and_color;
+  __le32 action;
+  __le32 id;
+  /* MAC_TIME_EVENT_DATA_API_S_VER_2 */
+  __le32 apply_time;
+  __le32 max_delay;
+  __le32 depends_on;
+  __le32 interval;
+  __le32 duration;
+  uint8_t repeat;
+  uint8_t max_frags;
+  __le16 policy;
 } __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
 
 /**
@@ -265,10 +265,10 @@
  *  &enum iwl_ctxt_id_and_color
  */
 struct iwl_time_event_resp {
-    __le32 status;
-    __le32 id;
-    __le32 unique_id;
-    __le32 id_and_color;
+  __le32 status;
+  __le32 id;
+  __le32 unique_id;
+  __le32 id_and_color;
 } __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */
 
 /**
@@ -282,12 +282,12 @@
  * @status: true if scheduled, false otherwise (not executed)
  */
 struct iwl_time_event_notif {
-    __le32 timestamp;
-    __le32 session_id;
-    __le32 unique_id;
-    __le32 id_and_color;
-    __le32 action;
-    __le32 status;
+  __le32 timestamp;
+  __le32 session_id;
+  __le32 unique_id;
+  __le32 id_and_color;
+  __le32 action;
+  __le32 status;
 } __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
 
 /*
@@ -318,26 +318,26 @@
  *  timeEventDuration = min(duration, remainingQuota)
  */
 struct iwl_hs20_roc_req {
-    /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
-    __le32 id_and_color;
-    __le32 action;
-    __le32 event_unique_id;
-    __le32 sta_id_and_color;
-    struct iwl_fw_channel_info channel_info;
-    uint8_t node_addr[ETH_ALEN];
-    __le16 reserved;
-    __le32 apply_time;
-    __le32 apply_time_max_delay;
-    __le32 duration;
+  /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
+  __le32 id_and_color;
+  __le32 action;
+  __le32 event_unique_id;
+  __le32 sta_id_and_color;
+  struct iwl_fw_channel_info channel_info;
+  uint8_t node_addr[ETH_ALEN];
+  __le16 reserved;
+  __le32 apply_time;
+  __le32 apply_time_max_delay;
+  __le32 duration;
 } __packed; /* HOT_SPOT_CMD_API_S_VER_1 */
 
 /*
  * values for AUX ROC result values
  */
 enum iwl_mvm_hot_spot {
-    HOT_SPOT_RSP_STATUS_OK,
-    HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS,
-    HOT_SPOT_MAX_NUM_OF_SESSIONS,
+  HOT_SPOT_RSP_STATUS_OK,
+  HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS,
+  HOT_SPOT_MAX_NUM_OF_SESSIONS,
 };
 
 /*
@@ -352,8 +352,8 @@
  * @status: Return status 0 is success, all the rest used for specific errors
  */
 struct iwl_hs20_roc_res {
-    __le32 event_unique_id;
-    __le32 status;
+  __le32 event_unique_id;
+  __le32 status;
 } __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_TIME_EVENT_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tof.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tof.h
index 0b5711a..e7f8007 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tof.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tof.h
@@ -37,17 +37,17 @@
 
 /* ToF sub-group command IDs */
 enum iwl_mvm_tof_sub_grp_ids {
-    TOF_RANGE_REQ_CMD = 0x1,
-    TOF_CONFIG_CMD = 0x2,
-    TOF_RANGE_ABORT_CMD = 0x3,
-    TOF_RANGE_REQ_EXT_CMD = 0x4,
-    TOF_RESPONDER_CONFIG_CMD = 0x5,
-    TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
-    TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
-    TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
-    TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
-    TOF_RANGE_RESPONSE_NOTIF = 0xFE,
-    TOF_MCSI_DEBUG_NOTIF = 0xFB,
+  TOF_RANGE_REQ_CMD = 0x1,
+  TOF_CONFIG_CMD = 0x2,
+  TOF_RANGE_ABORT_CMD = 0x3,
+  TOF_RANGE_REQ_EXT_CMD = 0x4,
+  TOF_RESPONDER_CONFIG_CMD = 0x5,
+  TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
+  TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
+  TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
+  TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
+  TOF_RANGE_RESPONSE_NOTIF = 0xFE,
+  TOF_MCSI_DEBUG_NOTIF = 0xFB,
 };
 
 /**
@@ -59,11 +59,11 @@
  * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
  */
 struct iwl_tof_config_cmd {
-    __le32 sub_grp_cmd_id;
-    uint8_t tof_disabled;
-    uint8_t one_sided_disabled;
-    uint8_t is_debug_mode;
-    uint8_t is_buf_required;
+  __le32 sub_grp_cmd_id;
+  uint8_t tof_disabled;
+  uint8_t one_sided_disabled;
+  uint8_t is_debug_mode;
+  uint8_t is_buf_required;
 } __packed;
 
 /**
@@ -113,25 +113,25 @@
  * @bssid: Current AP BSSID
  */
 struct iwl_tof_responder_config_cmd {
-    __le32 sub_grp_cmd_id;
-    __le16 burst_period;
-    uint8_t min_delta_ftm;
-    uint8_t burst_duration;
-    uint8_t num_of_burst_exp;
-    uint8_t get_ch_est;
-    uint8_t abort_responder;
-    uint8_t recv_sta_req_params;
-    uint8_t channel_num;
-    uint8_t bandwidth;
-    uint8_t rate;
-    uint8_t ctrl_ch_position;
-    uint8_t ftm_per_burst;
-    uint8_t ftm_resp_ts_avail;
-    uint8_t asap_mode;
-    uint8_t sta_id;
-    __le16 tsf_timer_offset_msecs;
-    __le16 toa_offset;
-    uint8_t bssid[ETH_ALEN];
+  __le32 sub_grp_cmd_id;
+  __le16 burst_period;
+  uint8_t min_delta_ftm;
+  uint8_t burst_duration;
+  uint8_t num_of_burst_exp;
+  uint8_t get_ch_est;
+  uint8_t abort_responder;
+  uint8_t recv_sta_req_params;
+  uint8_t channel_num;
+  uint8_t bandwidth;
+  uint8_t rate;
+  uint8_t ctrl_ch_position;
+  uint8_t ftm_per_burst;
+  uint8_t ftm_resp_ts_avail;
+  uint8_t asap_mode;
+  uint8_t sta_id;
+  __le16 tsf_timer_offset_msecs;
+  __le16 toa_offset;
+  uint8_t bssid[ETH_ALEN];
 } __packed;
 
 /**
@@ -149,13 +149,13 @@
  *          value to be sent to the AP
  */
 struct iwl_tof_range_req_ext_cmd {
-    __le32 sub_grp_cmd_id;
-    __le16 tsf_timer_offset_msec;
-    __le16 reserved;
-    uint8_t min_delta_ftm;
-    uint8_t ftm_format_and_bw20M;
-    uint8_t ftm_format_and_bw40M;
-    uint8_t ftm_format_and_bw80M;
+  __le32 sub_grp_cmd_id;
+  __le16 tsf_timer_offset_msec;
+  __le16 reserved;
+  uint8_t min_delta_ftm;
+  uint8_t ftm_format_and_bw20M;
+  uint8_t ftm_format_and_bw40M;
+  uint8_t ftm_format_and_bw80M;
 } __packed;
 
 #define IWL_MVM_TOF_MAX_APS 21
@@ -192,21 +192,21 @@
  *    leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
  */
 struct iwl_tof_range_req_ap_entry {
-    uint8_t channel_num;
-    uint8_t bandwidth;
-    uint8_t tsf_delta_direction;
-    uint8_t ctrl_ch_position;
-    uint8_t bssid[ETH_ALEN];
-    uint8_t measure_type;
-    uint8_t num_of_bursts;
-    __le16 burst_period;
-    uint8_t samples_per_burst;
-    uint8_t retries_per_sample;
-    __le32 tsf_delta;
-    uint8_t location_req;
-    uint8_t asap_mode;
-    uint8_t enable_dyn_ack;
-    int8_t rssi;
+  uint8_t channel_num;
+  uint8_t bandwidth;
+  uint8_t tsf_delta_direction;
+  uint8_t ctrl_ch_position;
+  uint8_t bssid[ETH_ALEN];
+  uint8_t measure_type;
+  uint8_t num_of_bursts;
+  __le16 burst_period;
+  uint8_t samples_per_burst;
+  uint8_t retries_per_sample;
+  __le32 tsf_delta;
+  uint8_t location_req;
+  uint8_t asap_mode;
+  uint8_t enable_dyn_ack;
+  int8_t rssi;
 } __packed;
 
 /**
@@ -220,9 +220,9 @@
  *                expiration.
  */
 enum iwl_tof_response_mode {
-    IWL_MVM_TOF_RESPOSE_ASAP = 1,
-    IWL_MVM_TOF_RESPOSE_TIMEOUT,
-    IWL_MVM_TOF_RESPOSE_COMPLETE,
+  IWL_MVM_TOF_RESPOSE_ASAP = 1,
+  IWL_MVM_TOF_RESPOSE_TIMEOUT,
+  IWL_MVM_TOF_RESPOSE_COMPLETE,
 };
 
 /**
@@ -250,18 +250,18 @@
  * @ap: per-AP request data
  */
 struct iwl_tof_range_req_cmd {
-    __le32 sub_grp_cmd_id;
-    uint8_t request_id;
-    uint8_t initiator;
-    uint8_t one_sided_los_disable;
-    uint8_t req_timeout;
-    uint8_t report_policy;
-    uint8_t los_det_disable;
-    uint8_t num_of_ap;
-    uint8_t macaddr_random;
-    uint8_t macaddr_template[ETH_ALEN];
-    uint8_t macaddr_mask[ETH_ALEN];
-    struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+  __le32 sub_grp_cmd_id;
+  uint8_t request_id;
+  uint8_t initiator;
+  uint8_t one_sided_los_disable;
+  uint8_t req_timeout;
+  uint8_t report_policy;
+  uint8_t los_det_disable;
+  uint8_t num_of_ap;
+  uint8_t macaddr_random;
+  uint8_t macaddr_template[ETH_ALEN];
+  uint8_t macaddr_mask[ETH_ALEN];
+  struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
 } __packed;
 
 /**
@@ -270,8 +270,8 @@
  * @data: response data
  */
 struct iwl_tof_gen_resp_cmd {
-    __le32 sub_grp_cmd_id;
-    uint8_t data[];
+  __le32 sub_grp_cmd_id;
+  uint8_t data[];
 } __packed;
 
 /**
@@ -295,18 +295,18 @@
  *         uploaded by the LMAC
  */
 struct iwl_tof_range_rsp_ap_entry_ntfy {
-    uint8_t bssid[ETH_ALEN];
-    uint8_t measure_status;
-    uint8_t measure_bw;
-    __le32 rtt;
-    __le32 rtt_variance;
-    __le32 rtt_spread;
-    int8_t rssi;
-    uint8_t rssi_spread;
-    __le16 reserved;
-    __le32 range;
-    __le32 range_variance;
-    __le32 timestamp;
+  uint8_t bssid[ETH_ALEN];
+  uint8_t measure_status;
+  uint8_t measure_bw;
+  __le32 rtt;
+  __le32 rtt_variance;
+  __le32 rtt_spread;
+  int8_t rssi;
+  uint8_t rssi_spread;
+  __le16 reserved;
+  __le32 range;
+  __le32 range_variance;
+  __le32 timestamp;
 } __packed;
 
 /**
@@ -318,11 +318,11 @@
  * @ap: per-AP data
  */
 struct iwl_tof_range_rsp_ntfy {
-    uint8_t request_id;
-    uint8_t request_status;
-    uint8_t last_in_batch;
-    uint8_t num_of_aps;
-    struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+  uint8_t request_id;
+  uint8_t request_status;
+  uint8_t last_in_batch;
+  uint8_t num_of_aps;
+  struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
 } __packed;
 
 #define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
@@ -336,12 +336,12 @@
  * @mcsi_buffer: debug data
  */
 struct iwl_tof_mcsi_notif {
-    uint8_t token;
-    uint8_t role;
-    __le16 reserved;
-    uint8_t initiator_bssid[ETH_ALEN];
-    uint8_t responder_bssid[ETH_ALEN];
-    uint8_t mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+  uint8_t token;
+  uint8_t role;
+  __le16 reserved;
+  uint8_t initiator_bssid[ETH_ALEN];
+  uint8_t responder_bssid[ETH_ALEN];
+  uint8_t mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
 } __packed;
 
 /**
@@ -353,11 +353,11 @@
  * @data: the IEs
  */
 struct iwl_tof_neighbor_report {
-    uint8_t bssid[ETH_ALEN];
-    uint8_t request_token;
-    uint8_t status;
-    __le16 report_ie_len;
-    uint8_t data[];
+  uint8_t bssid[ETH_ALEN];
+  uint8_t request_token;
+  uint8_t status;
+  __le16 report_ie_len;
+  uint8_t data[];
 } __packed;
 
 /**
@@ -367,9 +367,9 @@
  * @reserved: reserved
  */
 struct iwl_tof_range_abort_cmd {
-    __le32 sub_grp_cmd_id;
-    uint8_t request_id;
-    uint8_t reserved[3];
+  __le32 sub_grp_cmd_id;
+  uint8_t request_id;
+  uint8_t reserved[3];
 } __packed;
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_TOF_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tx.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tx.h
index 20d2355..dab400a 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tx.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tx.h
@@ -69,30 +69,30 @@
  * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk
  */
 enum iwl_tx_flags {
-    TX_CMD_FLG_PROT_REQUIRE = BIT(0),
-    TX_CMD_FLG_WRITE_TX_POWER = BIT(1),
-    TX_CMD_FLG_ACK = BIT(3),
-    TX_CMD_FLG_STA_RATE = BIT(4),
-    TX_CMD_FLG_BAR = BIT(6),
-    TX_CMD_FLG_TXOP_PROT = BIT(7),
-    TX_CMD_FLG_VHT_NDPA = BIT(8),
-    TX_CMD_FLG_HT_NDPA = BIT(9),
-    TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
-    TX_CMD_FLG_BT_PRIO_POS = 11,
-    TX_CMD_FLG_BT_DIS = BIT(12),
-    TX_CMD_FLG_SEQ_CTL = BIT(13),
-    TX_CMD_FLG_MORE_FRAG = BIT(14),
-    TX_CMD_FLG_TSF = BIT(16),
-    TX_CMD_FLG_CALIB = BIT(17),
-    TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18),
-    TX_CMD_FLG_MH_PAD = BIT(20),
-    TX_CMD_FLG_RESP_TO_DRV = BIT(21),
-    TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
-    TX_CMD_FLG_DUR = BIT(25),
-    TX_CMD_FLG_FW_DROP = BIT(26),
-    TX_CMD_FLG_EXEC_PAPD = BIT(27),
-    TX_CMD_FLG_PAPD_TYPE = BIT(28),
-    TX_CMD_FLG_HCCA_CHUNK = BIT(31)
+  TX_CMD_FLG_PROT_REQUIRE = BIT(0),
+  TX_CMD_FLG_WRITE_TX_POWER = BIT(1),
+  TX_CMD_FLG_ACK = BIT(3),
+  TX_CMD_FLG_STA_RATE = BIT(4),
+  TX_CMD_FLG_BAR = BIT(6),
+  TX_CMD_FLG_TXOP_PROT = BIT(7),
+  TX_CMD_FLG_VHT_NDPA = BIT(8),
+  TX_CMD_FLG_HT_NDPA = BIT(9),
+  TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
+  TX_CMD_FLG_BT_PRIO_POS = 11,
+  TX_CMD_FLG_BT_DIS = BIT(12),
+  TX_CMD_FLG_SEQ_CTL = BIT(13),
+  TX_CMD_FLG_MORE_FRAG = BIT(14),
+  TX_CMD_FLG_TSF = BIT(16),
+  TX_CMD_FLG_CALIB = BIT(17),
+  TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18),
+  TX_CMD_FLG_MH_PAD = BIT(20),
+  TX_CMD_FLG_RESP_TO_DRV = BIT(21),
+  TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
+  TX_CMD_FLG_DUR = BIT(25),
+  TX_CMD_FLG_FW_DROP = BIT(26),
+  TX_CMD_FLG_EXEC_PAPD = BIT(27),
+  TX_CMD_FLG_PAPD_TYPE = BIT(28),
+  TX_CMD_FLG_HCCA_CHUNK = BIT(31)
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
 /**
@@ -104,9 +104,9 @@
  *  selection, retry limits and BT kill
  */
 enum iwl_tx_cmd_flags {
-    IWL_TX_FLAGS_CMD_RATE = BIT(0),
-    IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1),
-    IWL_TX_FLAGS_HIGH_PRI = BIT(2),
+  IWL_TX_FLAGS_CMD_RATE = BIT(0),
+  IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1),
+  IWL_TX_FLAGS_HIGH_PRI = BIT(2),
 }; /* TX_FLAGS_BITS_API_S_VER_3 */
 
 /**
@@ -116,9 +116,9 @@
  * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
  */
 enum iwl_tx_pm_timeouts {
-    PM_FRAME_NONE = 0,
-    PM_FRAME_MGMT = 2,
-    PM_FRAME_ASSOC = 3,
+  PM_FRAME_NONE = 0,
+  PM_FRAME_MGMT = 2,
+  PM_FRAME_ASSOC = 3,
 };
 
 #define TX_CMD_SEC_MSK 0x07
@@ -139,13 +139,13 @@
  *  first byte of the TX command key field.
  */
 enum iwl_tx_cmd_sec_ctrl {
-    TX_CMD_SEC_WEP = 0x01,
-    TX_CMD_SEC_CCM = 0x02,
-    TX_CMD_SEC_TKIP = 0x03,
-    TX_CMD_SEC_EXT = 0x04,
-    TX_CMD_SEC_GCMP = 0x05,
-    TX_CMD_SEC_KEY128 = 0x08,
-    TX_CMD_SEC_KEY_FROM_TABLE = 0x10,
+  TX_CMD_SEC_WEP = 0x01,
+  TX_CMD_SEC_CCM = 0x02,
+  TX_CMD_SEC_TKIP = 0x03,
+  TX_CMD_SEC_EXT = 0x04,
+  TX_CMD_SEC_GCMP = 0x05,
+  TX_CMD_SEC_KEY128 = 0x08,
+  TX_CMD_SEC_KEY_FROM_TABLE = 0x10,
 };
 
 /*
@@ -188,12 +188,12 @@
  * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
  */
 enum iwl_tx_offload_assist_flags_pos {
-    TX_CMD_OFFLD_IP_HDR = 0,
-    TX_CMD_OFFLD_L4_EN = 6,
-    TX_CMD_OFFLD_L3_EN = 7,
-    TX_CMD_OFFLD_MH_SIZE = 8,
-    TX_CMD_OFFLD_PAD = 13,
-    TX_CMD_OFFLD_AMSDU = 14,
+  TX_CMD_OFFLD_IP_HDR = 0,
+  TX_CMD_OFFLD_L4_EN = 6,
+  TX_CMD_OFFLD_L3_EN = 7,
+  TX_CMD_OFFLD_MH_SIZE = 8,
+  TX_CMD_OFFLD_PAD = 13,
+  TX_CMD_OFFLD_AMSDU = 14,
 };
 
 #define IWL_TX_CMD_OFFLD_MH_MASK 0x1f
@@ -242,37 +242,37 @@
  * and then the actial payload.
  */
 struct iwl_tx_cmd {
-    __le16 len;
-    __le16 offload_assist;
-    __le32 tx_flags;
-    struct {
-        uint8_t try_cnt;
-        uint8_t btkill_cnt;
-        __le16 reserved;
-    } scratch; /* DRAM_SCRATCH_API_U_VER_1 */
-    __le32 rate_n_flags;
-    uint8_t sta_id;
-    uint8_t sec_ctl;
-    uint8_t initial_rate_index;
-    uint8_t reserved2;
-    uint8_t key[16];
-    __le32 reserved3;
-    __le32 life_time;
-    __le32 dram_lsb_ptr;
-    uint8_t dram_msb_ptr;
-    uint8_t rts_retry_limit;
-    uint8_t data_retry_limit;
-    uint8_t tid_tspec;
-    __le16 pm_frame_timeout;
-    __le16 reserved4;
-    uint8_t payload[0];
-    struct ieee80211_hdr hdr[0];
+  __le16 len;
+  __le16 offload_assist;
+  __le32 tx_flags;
+  struct {
+    uint8_t try_cnt;
+    uint8_t btkill_cnt;
+    __le16 reserved;
+  } scratch; /* DRAM_SCRATCH_API_U_VER_1 */
+  __le32 rate_n_flags;
+  uint8_t sta_id;
+  uint8_t sec_ctl;
+  uint8_t initial_rate_index;
+  uint8_t reserved2;
+  uint8_t key[16];
+  __le32 reserved3;
+  __le32 life_time;
+  __le32 dram_lsb_ptr;
+  uint8_t dram_msb_ptr;
+  uint8_t rts_retry_limit;
+  uint8_t data_retry_limit;
+  uint8_t tid_tspec;
+  __le16 pm_frame_timeout;
+  __le16 reserved4;
+  uint8_t payload[0];
+  struct ieee80211_hdr hdr[0];
 } __packed; /* TX_CMD_API_S_VER_6 */
 
 struct iwl_dram_sec_info {
-    __le32 pn_low;
-    __le16 pn_high;
-    __le16 aux_info;
+  __le32 pn_low;
+  __le16 pn_high;
+  __le16 aux_info;
 } __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
 
 /**
@@ -287,12 +287,12 @@
  * @hdr: 802.11 header
  */
 struct iwl_tx_cmd_gen2 {
-    __le16 len;
-    __le16 offload_assist;
-    __le32 flags;
-    struct iwl_dram_sec_info dram_info;
-    __le32 rate_n_flags;
-    struct ieee80211_hdr hdr[0];
+  __le16 len;
+  __le16 offload_assist;
+  __le32 flags;
+  struct iwl_dram_sec_info dram_info;
+  __le32 rate_n_flags;
+  struct ieee80211_hdr hdr[0];
 } __packed; /* TX_CMD_API_S_VER_7 */
 
 /**
@@ -309,13 +309,13 @@
  * @hdr: 802.11 header
  */
 struct iwl_tx_cmd_gen3 {
-    __le16 len;
-    __le16 flags;
-    __le32 offload_assist;
-    struct iwl_dram_sec_info dram_info;
-    __le32 rate_n_flags;
-    __le64 ttl;
-    struct ieee80211_hdr hdr[0];
+  __le16 len;
+  __le16 flags;
+  __le32 offload_assist;
+  struct iwl_dram_sec_info dram_info;
+  __le32 rate_n_flags;
+  __le64 ttl;
+  struct ieee80211_hdr hdr[0];
 } __packed; /* TX_CMD_API_S_VER_8 */
 
 /*
@@ -360,43 +360,43 @@
  * TODO: complete documentation
  */
 enum iwl_tx_status {
-    TX_STATUS_MSK = 0x000000ff,
-    TX_STATUS_SUCCESS = 0x01,
-    TX_STATUS_DIRECT_DONE = 0x02,
-    /* postpone TX */
-    TX_STATUS_POSTPONE_DELAY = 0x40,
-    TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
-    TX_STATUS_POSTPONE_BT_PRIO = 0x42,
-    TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
-    TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
-    /* abort TX */
-    TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
-    TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
-    TX_STATUS_FAIL_LONG_LIMIT = 0x83,
-    TX_STATUS_FAIL_UNDERRUN = 0x84,
-    TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
-    TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
-    TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
-    TX_STATUS_FAIL_DEST_PS = 0x88,
-    TX_STATUS_FAIL_HOST_ABORTED = 0x89,
-    TX_STATUS_FAIL_BT_RETRY = 0x8a,
-    TX_STATUS_FAIL_STA_INVALID = 0x8b,
-    TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
-    TX_STATUS_FAIL_TID_DISABLE = 0x8d,
-    TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
-    TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f,
-    TX_STATUS_FAIL_FW_DROP = 0x90,
-    TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91,
-    TX_STATUS_INTERNAL_ABORT = 0x92,
-    TX_MODE_MSK = 0x00000f00,
-    TX_MODE_NO_BURST = 0x00000000,
-    TX_MODE_IN_BURST_SEQ = 0x00000100,
-    TX_MODE_FIRST_IN_BURST = 0x00000200,
-    TX_QUEUE_NUM_MSK = 0x0001f000,
-    TX_NARROW_BW_MSK = 0x00060000,
-    TX_NARROW_BW_1DIV2 = 0x00020000,
-    TX_NARROW_BW_1DIV4 = 0x00040000,
-    TX_NARROW_BW_1DIV8 = 0x00060000,
+  TX_STATUS_MSK = 0x000000ff,
+  TX_STATUS_SUCCESS = 0x01,
+  TX_STATUS_DIRECT_DONE = 0x02,
+  /* postpone TX */
+  TX_STATUS_POSTPONE_DELAY = 0x40,
+  TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+  TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+  TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+  TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+  /* abort TX */
+  TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+  TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+  TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+  TX_STATUS_FAIL_UNDERRUN = 0x84,
+  TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+  TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+  TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+  TX_STATUS_FAIL_DEST_PS = 0x88,
+  TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+  TX_STATUS_FAIL_BT_RETRY = 0x8a,
+  TX_STATUS_FAIL_STA_INVALID = 0x8b,
+  TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+  TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+  TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+  TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f,
+  TX_STATUS_FAIL_FW_DROP = 0x90,
+  TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91,
+  TX_STATUS_INTERNAL_ABORT = 0x92,
+  TX_MODE_MSK = 0x00000f00,
+  TX_MODE_NO_BURST = 0x00000000,
+  TX_MODE_IN_BURST_SEQ = 0x00000100,
+  TX_MODE_FIRST_IN_BURST = 0x00000200,
+  TX_QUEUE_NUM_MSK = 0x0001f000,
+  TX_NARROW_BW_MSK = 0x00060000,
+  TX_NARROW_BW_1DIV2 = 0x00020000,
+  TX_NARROW_BW_1DIV4 = 0x00040000,
+  TX_NARROW_BW_1DIV8 = 0x00060000,
 };
 
 /*
@@ -426,22 +426,22 @@
  * TODO: complete documentation
  */
 enum iwl_tx_agg_status {
-    AGG_TX_STATE_STATUS_MSK = 0x00fff,
-    AGG_TX_STATE_TRANSMITTED = 0x000,
-    AGG_TX_STATE_UNDERRUN = 0x001,
-    AGG_TX_STATE_BT_PRIO = 0x002,
-    AGG_TX_STATE_FEW_BYTES = 0x004,
-    AGG_TX_STATE_ABORT = 0x008,
-    AGG_TX_STATE_TX_ON_AIR_DROP = 0x010,
-    AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020,
-    AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040,
-    AGG_TX_STATE_SCD_QUERY = 0x080,
-    AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100,
-    AGG_TX_STATE_RESPONSE = 0x1ff,
-    AGG_TX_STATE_DUMP_TX = 0x200,
-    AGG_TX_STATE_DELAY_TX = 0x400,
-    AGG_TX_STATE_TRY_CNT_POS = 12,
-    AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS,
+  AGG_TX_STATE_STATUS_MSK = 0x00fff,
+  AGG_TX_STATE_TRANSMITTED = 0x000,
+  AGG_TX_STATE_UNDERRUN = 0x001,
+  AGG_TX_STATE_BT_PRIO = 0x002,
+  AGG_TX_STATE_FEW_BYTES = 0x004,
+  AGG_TX_STATE_ABORT = 0x008,
+  AGG_TX_STATE_TX_ON_AIR_DROP = 0x010,
+  AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020,
+  AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040,
+  AGG_TX_STATE_SCD_QUERY = 0x080,
+  AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100,
+  AGG_TX_STATE_RESPONSE = 0x1ff,
+  AGG_TX_STATE_DUMP_TX = 0x200,
+  AGG_TX_STATE_DELAY_TX = 0x400,
+  AGG_TX_STATE_TRY_CNT_POS = 12,
+  AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS,
 };
 
 /*
@@ -450,7 +450,7 @@
  * written the bytes to the TXE, but we know nothing about what the DSP did.
  */
 #define AGG_TX_STAT_FRAME_NOT_SENT \
-    (AGG_TX_STATE_FEW_BYTES | AGG_TX_STATE_ABORT | AGG_TX_STATE_SCD_QUERY)
+  (AGG_TX_STATE_FEW_BYTES | AGG_TX_STATE_ABORT | AGG_TX_STATE_SCD_QUERY)
 
 /*
  * REPLY_TX = 0x1c (response)
@@ -481,8 +481,8 @@
  * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
  */
 struct agg_tx_status {
-    __le16 status;
-    __le16 sequence;
+  __le16 status;
+  __le16 sequence;
 } __packed;
 
 /*
@@ -496,7 +496,7 @@
 #define TX_RES_RATE_TABLE_COLOR_MSK 0x70
 #define TX_RES_INV_RATE_INDEX_MSK 0x80
 #define TX_RES_RATE_TABLE_COL_GET(_f) \
-    (((_f)&TX_RES_RATE_TABLE_COLOR_MSK) >> TX_RES_RATE_TABLE_COLOR_POS)
+  (((_f)&TX_RES_RATE_TABLE_COLOR_MSK) >> TX_RES_RATE_TABLE_COLOR_POS)
 
 #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid)&0x0f)
 #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
@@ -534,28 +534,28 @@
  * %iwl_mvm_get_scd_ssn for more details.
  */
 struct iwl_mvm_tx_resp_v3 {
-    uint8_t frame_count;
-    uint8_t bt_kill_count;
-    uint8_t failure_rts;
-    uint8_t failure_frame;
-    __le32 initial_rate;
-    __le16 wireless_media_time;
+  uint8_t frame_count;
+  uint8_t bt_kill_count;
+  uint8_t failure_rts;
+  uint8_t failure_frame;
+  __le32 initial_rate;
+  __le16 wireless_media_time;
 
-    uint8_t pa_status;
-    uint8_t pa_integ_res_a[3];
-    uint8_t pa_integ_res_b[3];
-    uint8_t pa_integ_res_c[3];
-    __le16 measurement_req_id;
-    uint8_t reduced_tpc;
-    uint8_t reserved;
+  uint8_t pa_status;
+  uint8_t pa_integ_res_a[3];
+  uint8_t pa_integ_res_b[3];
+  uint8_t pa_integ_res_c[3];
+  __le16 measurement_req_id;
+  uint8_t reduced_tpc;
+  uint8_t reserved;
 
-    __le32 tfd_info;
-    __le16 seq_ctl;
-    __le16 byte_cnt;
-    uint8_t tlc_info;
-    uint8_t ra_tid;
-    __le16 frame_ctrl;
-    struct agg_tx_status status[];
+  __le32 tfd_info;
+  __le16 seq_ctl;
+  __le16 byte_cnt;
+  uint8_t tlc_info;
+  uint8_t ra_tid;
+  __le16 frame_ctrl;
+  struct agg_tx_status status[];
 } __packed; /* TX_RSP_API_S_VER_3 */
 
 /**
@@ -592,30 +592,30 @@
  * %iwl_mvm_get_scd_ssn for more details.
  */
 struct iwl_mvm_tx_resp {
-    uint8_t frame_count;
-    uint8_t bt_kill_count;
-    uint8_t failure_rts;
-    uint8_t failure_frame;
-    __le32 initial_rate;
-    __le16 wireless_media_time;
+  uint8_t frame_count;
+  uint8_t bt_kill_count;
+  uint8_t failure_rts;
+  uint8_t failure_frame;
+  __le32 initial_rate;
+  __le16 wireless_media_time;
 
-    uint8_t pa_status;
-    uint8_t pa_integ_res_a[3];
-    uint8_t pa_integ_res_b[3];
-    uint8_t pa_integ_res_c[3];
-    __le16 measurement_req_id;
-    uint8_t reduced_tpc;
-    uint8_t reserved;
+  uint8_t pa_status;
+  uint8_t pa_integ_res_a[3];
+  uint8_t pa_integ_res_b[3];
+  uint8_t pa_integ_res_c[3];
+  __le16 measurement_req_id;
+  uint8_t reduced_tpc;
+  uint8_t reserved;
 
-    __le32 tfd_info;
-    __le16 seq_ctl;
-    __le16 byte_cnt;
-    uint8_t tlc_info;
-    uint8_t ra_tid;
-    __le16 frame_ctrl;
-    __le16 tx_queue;
-    __le16 reserved2;
-    struct agg_tx_status status;
+  __le32 tfd_info;
+  __le16 seq_ctl;
+  __le16 byte_cnt;
+  uint8_t tlc_info;
+  uint8_t ra_tid;
+  __le16 frame_ctrl;
+  __le16 tx_queue;
+  __le16 reserved2;
+  struct agg_tx_status status;
 } __packed; /* TX_RSP_API_S_VER_6 */
 
 /**
@@ -637,19 +637,19 @@
  * @reserved1: reserved
  */
 struct iwl_mvm_ba_notif {
-    uint8_t sta_addr[ETH_ALEN];
-    __le16 reserved;
+  uint8_t sta_addr[ETH_ALEN];
+  __le16 reserved;
 
-    uint8_t sta_id;
-    uint8_t tid;
-    __le16 seq_ctl;
-    __le64 bitmap;
-    __le16 scd_flow;
-    __le16 scd_ssn;
-    uint8_t txed;
-    uint8_t txed_2_done;
-    uint8_t reduced_txp;
-    uint8_t reserved1;
+  uint8_t sta_id;
+  uint8_t tid;
+  __le16 seq_ctl;
+  __le64 bitmap;
+  __le16 scd_flow;
+  __le16 scd_ssn;
+  uint8_t txed;
+  uint8_t txed_2_done;
+  uint8_t reduced_txp;
+  uint8_t reserved1;
 } __packed;
 
 /**
@@ -661,11 +661,11 @@
  * @reserved: reserved for alignment
  */
 struct iwl_mvm_compressed_ba_tfd {
-    __le16 q_num;
-    __le16 tfd_index;
-    uint8_t scd_queue;
-    uint8_t tid;
-    uint8_t reserved[2];
+  __le16 q_num;
+  __le16 tfd_index;
+  uint8_t scd_queue;
+  uint8_t tid;
+  uint8_t reserved[2];
 } __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
 
 /**
@@ -675,9 +675,9 @@
  * @ssn: BA window current SSN
  */
 struct iwl_mvm_compressed_ba_ratid {
-    uint8_t q_num;
-    uint8_t tid;
-    __le16 ssn;
+  uint8_t q_num;
+  uint8_t tid;
+  __le16 ssn;
 } __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */
 
 /*
@@ -691,12 +691,12 @@
  *  expected time
  */
 enum iwl_mvm_ba_resp_flags {
-    IWL_MVM_BA_RESP_TX_AGG,
-    IWL_MVM_BA_RESP_TX_BAR,
-    IWL_MVM_BA_RESP_TX_AGG_FAIL,
-    IWL_MVM_BA_RESP_TX_UNDERRUN,
-    IWL_MVM_BA_RESP_TX_BT_KILL,
-    IWL_MVM_BA_RESP_TX_DSP_TIMEOUT
+  IWL_MVM_BA_RESP_TX_AGG,
+  IWL_MVM_BA_RESP_TX_BAR,
+  IWL_MVM_BA_RESP_TX_AGG_FAIL,
+  IWL_MVM_BA_RESP_TX_UNDERRUN,
+  IWL_MVM_BA_RESP_TX_BT_KILL,
+  IWL_MVM_BA_RESP_TX_DSP_TIMEOUT
 };
 
 /**
@@ -724,22 +724,22 @@
  *  &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
  */
 struct iwl_mvm_compressed_ba_notif {
-    __le32 flags;
-    uint8_t sta_id;
-    uint8_t reduced_txp;
-    uint8_t tlc_rate_info;
-    uint8_t retry_cnt;
-    __le32 query_byte_cnt;
-    __le16 query_frame_cnt;
-    __le16 txed;
-    __le16 done;
-    __le16 reserved;
-    __le32 wireless_time;
-    __le32 tx_rate;
-    __le16 tfd_cnt;
-    __le16 ra_tid_cnt;
-    struct iwl_mvm_compressed_ba_tfd tfd[0];
-    struct iwl_mvm_compressed_ba_ratid ra_tid[0];
+  __le32 flags;
+  uint8_t sta_id;
+  uint8_t reduced_txp;
+  uint8_t tlc_rate_info;
+  uint8_t retry_cnt;
+  __le32 query_byte_cnt;
+  __le16 query_frame_cnt;
+  __le16 txed;
+  __le16 done;
+  __le16 reserved;
+  __le32 wireless_time;
+  __le32 tx_rate;
+  __le16 tfd_cnt;
+  __le16 ra_tid_cnt;
+  struct iwl_mvm_compressed_ba_tfd tfd[0];
+  struct iwl_mvm_compressed_ba_ratid ra_tid[0];
 } __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
 
 /**
@@ -752,11 +752,11 @@
  * @frame: the template of the beacon frame
  */
 struct iwl_mac_beacon_cmd_v6 {
-    struct iwl_tx_cmd tx;
-    __le32 template_id;
-    __le32 tim_idx;
-    __le32 tim_size;
-    struct ieee80211_hdr frame[0];
+  struct iwl_tx_cmd tx;
+  __le32 template_id;
+  __le32 tim_idx;
+  __le32 tim_size;
+  struct ieee80211_hdr frame[0];
 } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */
 
 /**
@@ -771,20 +771,20 @@
  * @frame: the template of the beacon frame
  */
 struct iwl_mac_beacon_cmd_v7 {
-    struct iwl_tx_cmd tx;
-    __le32 template_id;
-    __le32 tim_idx;
-    __le32 tim_size;
-    __le32 ecsa_offset;
-    __le32 csa_offset;
-    struct ieee80211_hdr frame[0];
+  struct iwl_tx_cmd tx;
+  __le32 template_id;
+  __le32 tim_idx;
+  __le32 tim_size;
+  __le32 ecsa_offset;
+  __le32 csa_offset;
+  struct ieee80211_hdr frame[0];
 } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
 
 enum iwl_mac_beacon_flags {
-    IWL_MAC_BEACON_CCK = BIT(8),
-    IWL_MAC_BEACON_ANT_A = BIT(9),
-    IWL_MAC_BEACON_ANT_B = BIT(10),
-    IWL_MAC_BEACON_ANT_C = BIT(11),
+  IWL_MAC_BEACON_CCK = BIT(8),
+  IWL_MAC_BEACON_ANT_A = BIT(9),
+  IWL_MAC_BEACON_ANT_B = BIT(10),
+  IWL_MAC_BEACON_ANT_C = BIT(11),
 };
 
 /**
@@ -801,21 +801,21 @@
  * @frame: the template of the beacon frame
  */
 struct iwl_mac_beacon_cmd {
-    __le16 byte_cnt;
-    __le16 flags;
-    __le64 reserved;
-    __le32 template_id;
-    __le32 tim_idx;
-    __le32 tim_size;
-    __le32 ecsa_offset;
-    __le32 csa_offset;
-    struct ieee80211_hdr frame[0];
+  __le16 byte_cnt;
+  __le16 flags;
+  __le64 reserved;
+  __le32 template_id;
+  __le32 tim_idx;
+  __le32 tim_size;
+  __le32 ecsa_offset;
+  __le32 csa_offset;
+  struct ieee80211_hdr frame[0];
 } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_9 */
 
 struct iwl_beacon_notif {
-    struct iwl_mvm_tx_resp beacon_notify_hdr;
-    __le64 tsf;
-    __le32 ibss_mgr_status;
+  struct iwl_mvm_tx_resp beacon_notify_hdr;
+  __le64 tsf;
+  __le32 ibss_mgr_status;
 } __packed;
 
 /**
@@ -826,10 +826,10 @@
  * @gp2: last beacon time in gp2
  */
 struct iwl_extended_beacon_notif {
-    struct iwl_mvm_tx_resp beacon_notify_hdr;
-    __le64 tsf;
-    __le32 ibss_mgr_status;
-    __le32 gp2;
+  struct iwl_mvm_tx_resp beacon_notify_hdr;
+  __le64 tsf;
+  __le32 ibss_mgr_status;
+  __le32 gp2;
 } __packed; /* BEACON_NTFY_API_S_VER_5 */
 
 /**
@@ -838,7 +838,7 @@
  *  and the TFD queues are empty.
  */
 enum iwl_dump_control {
-    DUMP_TX_FIFO_FLUSH = BIT(1),
+  DUMP_TX_FIFO_FLUSH = BIT(1),
 };
 
 /**
@@ -848,9 +848,9 @@
  * @reserved: reserved
  */
 struct iwl_tx_path_flush_cmd_v1 {
-    __le32 queues_ctl;
-    __le16 flush_ctl;
-    __le16 reserved;
+  __le32 queues_ctl;
+  __le16 flush_ctl;
+  __le16 reserved;
 } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
 
 /**
@@ -860,16 +860,16 @@
  * @reserved: reserved
  */
 struct iwl_tx_path_flush_cmd {
-    __le32 sta_id;
-    __le16 tid_mask;
-    __le16 reserved;
+  __le32 sta_id;
+  __le16 tid_mask;
+  __le16 reserved;
 } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
 
 /* Available options for the SCD_QUEUE_CFG HCMD */
 enum iwl_scd_cfg_actions {
-    SCD_CFG_DISABLE_QUEUE = 0x0,
-    SCD_CFG_ENABLE_QUEUE = 0x1,
-    SCD_CFG_UPDATE_QUEUE_TID = 0x2,
+  SCD_CFG_DISABLE_QUEUE = 0x0,
+  SCD_CFG_ENABLE_QUEUE = 0x1,
+  SCD_CFG_UPDATE_QUEUE_TID = 0x2,
 };
 
 /**
@@ -887,16 +887,16 @@
  * @reserved: reserved
  */
 struct iwl_scd_txq_cfg_cmd {
-    uint8_t token;
-    uint8_t sta_id;
-    uint8_t tid;
-    uint8_t scd_queue;
-    uint8_t action;
-    uint8_t aggregate;
-    uint8_t tx_fifo;
-    uint8_t window;
-    __le16 ssn;
-    __le16 reserved;
+  uint8_t token;
+  uint8_t sta_id;
+  uint8_t tid;
+  uint8_t scd_queue;
+  uint8_t action;
+  uint8_t aggregate;
+  uint8_t tx_fifo;
+  uint8_t window;
+  __le16 ssn;
+  __le16 reserved;
 } __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
 
 /**
@@ -907,10 +907,10 @@
  * @scd_queue: scd_queue from the command
  */
 struct iwl_scd_txq_cfg_rsp {
-    uint8_t token;
-    uint8_t sta_id;
-    uint8_t tid;
-    uint8_t scd_queue;
+  uint8_t token;
+  uint8_t sta_id;
+  uint8_t tid;
+  uint8_t scd_queue;
 } __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_TX_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/txq.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/txq.h
index 9d7fa93..fd2aae2 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/txq.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/txq.h
@@ -60,38 +60,38 @@
  * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
  */
 enum iwl_mvm_dqa_txq {
-    IWL_MVM_DQA_CMD_QUEUE = 0,
-    IWL_MVM_DQA_AUX_QUEUE = 1,
-    IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
-    IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,
-    IWL_MVM_DQA_GCAST_QUEUE = 3,
-    IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
-    IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
-    IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
-    IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
-    IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
-    IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
+  IWL_MVM_DQA_CMD_QUEUE = 0,
+  IWL_MVM_DQA_AUX_QUEUE = 1,
+  IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
+  IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,
+  IWL_MVM_DQA_GCAST_QUEUE = 3,
+  IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
+  IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
+  IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
+  IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
+  IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
+  IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
 };
 
 enum iwl_mvm_tx_fifo {
-    IWL_MVM_TX_FIFO_BK = 0,
-    IWL_MVM_TX_FIFO_BE,
-    IWL_MVM_TX_FIFO_VI,
-    IWL_MVM_TX_FIFO_VO,
-    IWL_MVM_TX_FIFO_MCAST = 5,
-    IWL_MVM_TX_FIFO_CMD = 7,
+  IWL_MVM_TX_FIFO_BK = 0,
+  IWL_MVM_TX_FIFO_BE,
+  IWL_MVM_TX_FIFO_VI,
+  IWL_MVM_TX_FIFO_VO,
+  IWL_MVM_TX_FIFO_MCAST = 5,
+  IWL_MVM_TX_FIFO_CMD = 7,
 };
 
 enum iwl_gen2_tx_fifo {
-    IWL_GEN2_TX_FIFO_CMD = 0,
-    IWL_GEN2_EDCA_TX_FIFO_BK,
-    IWL_GEN2_EDCA_TX_FIFO_BE,
-    IWL_GEN2_EDCA_TX_FIFO_VI,
-    IWL_GEN2_EDCA_TX_FIFO_VO,
-    IWL_GEN2_TRIG_TX_FIFO_BK,
-    IWL_GEN2_TRIG_TX_FIFO_BE,
-    IWL_GEN2_TRIG_TX_FIFO_VI,
-    IWL_GEN2_TRIG_TX_FIFO_VO,
+  IWL_GEN2_TX_FIFO_CMD = 0,
+  IWL_GEN2_EDCA_TX_FIFO_BK,
+  IWL_GEN2_EDCA_TX_FIFO_BE,
+  IWL_GEN2_EDCA_TX_FIFO_VI,
+  IWL_GEN2_EDCA_TX_FIFO_VO,
+  IWL_GEN2_TRIG_TX_FIFO_BK,
+  IWL_GEN2_TRIG_TX_FIFO_BE,
+  IWL_GEN2_TRIG_TX_FIFO_VI,
+  IWL_GEN2_TRIG_TX_FIFO_VO,
 };
 
 /**
@@ -100,8 +100,8 @@
  * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format
  */
 enum iwl_tx_queue_cfg_actions {
-    TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
-    TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
+  TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
+  TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
 };
 
 #define IWL_DEFAULT_QUEUE_SIZE 256
@@ -117,12 +117,12 @@
  * @tfdq_addr: address of TFD circular buffer
  */
 struct iwl_tx_queue_cfg_cmd {
-    uint8_t sta_id;
-    uint8_t tid;
-    __le16 flags;
-    __le32 cb_size;
-    __le64 byte_cnt_addr;
-    __le64 tfdq_addr;
+  uint8_t sta_id;
+  uint8_t tid;
+  __le16 flags;
+  __le32 cb_size;
+  __le64 byte_cnt_addr;
+  __le64 tfdq_addr;
 } __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
 
 /**
@@ -133,10 +133,10 @@
  * @reserved: reserved
  */
 struct iwl_tx_queue_cfg_rsp {
-    __le16 queue_number;
-    __le16 flags;
-    __le16 write_pointer;
-    __le16 reserved;
+  __le16 queue_number;
+  __le16 flags;
+  __le16 write_pointer;
+  __le16 reserved;
 } __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_API_TXQ_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.c
index 2ee4c00..a244b79 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.c
@@ -34,7 +34,9 @@
  *
  *****************************************************************************/
 #include "dbg.h"
+
 #include <linux/devcoredump.h>
+
 #include "debugfs.h"
 #include "iwl-csr.h"
 #include "iwl-drv.h"
@@ -52,230 +54,240 @@
  * @fwrt_len: length of the valid data in fwrt_ptr
  */
 struct iwl_fw_dump_ptrs {
-    struct iwl_trans_dump_data* trans_ptr;
-    void* fwrt_ptr;
-    uint32_t fwrt_len;
+  struct iwl_trans_dump_data* trans_ptr;
+  void* fwrt_ptr;
+  uint32_t fwrt_len;
 };
 
 #define RADIO_REG_MAX_READ 0x2ad
 static void iwl_read_radio_regs(struct iwl_fw_runtime* fwrt,
                                 struct iwl_fw_error_dump_data** dump_data) {
-    uint8_t* pos = (void*)(*dump_data)->data;
-    unsigned long flags;
-    int i;
+  uint8_t* pos = (void*)(*dump_data)->data;
+  unsigned long flags;
+  int i;
 
-    IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
+  IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
 
-    if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) { return; }
+  if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
+    return;
+  }
 
-    (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
-    (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
+  (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
+  (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
 
-    for (i = 0; i < RADIO_REG_MAX_READ; i++) {
-        uint32_t rd_cmd = RADIO_RSP_RD_CMD;
+  for (i = 0; i < RADIO_REG_MAX_READ; i++) {
+    uint32_t rd_cmd = RADIO_RSP_RD_CMD;
 
-        rd_cmd |= i << RADIO_RSP_ADDR_POS;
-        iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
-        *pos = (uint8_t)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
+    rd_cmd |= i << RADIO_RSP_ADDR_POS;
+    iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
+    *pos = (uint8_t)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
 
-        pos++;
-    }
+    pos++;
+  }
 
-    *dump_data = iwl_fw_error_next_data(*dump_data);
+  *dump_data = iwl_fw_error_next_data(*dump_data);
 
-    iwl_trans_release_nic_access(fwrt->trans, &flags);
+  iwl_trans_release_nic_access(fwrt->trans, &flags);
 }
 
 static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime* fwrt,
                               struct iwl_fw_error_dump_data** dump_data, int size, uint32_t offset,
                               int fifo_num) {
-    struct iwl_fw_error_dump_fifo* fifo_hdr;
-    uint32_t* fifo_data;
-    uint32_t fifo_len;
-    int i;
+  struct iwl_fw_error_dump_fifo* fifo_hdr;
+  uint32_t* fifo_data;
+  uint32_t fifo_len;
+  int i;
 
-    fifo_hdr = (void*)(*dump_data)->data;
-    fifo_data = (void*)fifo_hdr->data;
-    fifo_len = size;
+  fifo_hdr = (void*)(*dump_data)->data;
+  fifo_data = (void*)fifo_hdr->data;
+  fifo_len = size;
 
-    /* No need to try to read the data if the length is 0 */
-    if (fifo_len == 0) { return; }
+  /* No need to try to read the data if the length is 0 */
+  if (fifo_len == 0) {
+    return;
+  }
 
-    /* Add a TLV for the RXF */
-    (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
-    (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+  /* Add a TLV for the RXF */
+  (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+  (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
 
-    fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
-    fifo_hdr->available_bytes =
-        cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_D_SPACE + offset));
-    fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_WR_PTR + offset));
-    fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_RD_PTR + offset));
-    fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_FENCE_PTR + offset));
-    fifo_hdr->fence_mode =
-        cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset));
+  fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+  fifo_hdr->available_bytes =
+      cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_D_SPACE + offset));
+  fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_WR_PTR + offset));
+  fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_RD_PTR + offset));
+  fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_FENCE_PTR + offset));
+  fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset));
 
-    /* Lock fence */
-    iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
-    /* Set fence pointer to the same place like WR pointer */
-    iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
-    /* Set fence offset */
-    iwl_trans_write_prph(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
+  /* Lock fence */
+  iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
+  /* Set fence pointer to the same place like WR pointer */
+  iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
+  /* Set fence offset */
+  iwl_trans_write_prph(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
 
-    /* Read FIFO */
-    fifo_len /= sizeof(uint32_t); /* Size in DWORDS */
-    for (i = 0; i < fifo_len; i++) {
-        fifo_data[i] = iwl_trans_read_prph(fwrt->trans, RXF_FIFO_RD_FENCE_INC + offset);
-    }
-    *dump_data = iwl_fw_error_next_data(*dump_data);
+  /* Read FIFO */
+  fifo_len /= sizeof(uint32_t); /* Size in DWORDS */
+  for (i = 0; i < fifo_len; i++) {
+    fifo_data[i] = iwl_trans_read_prph(fwrt->trans, RXF_FIFO_RD_FENCE_INC + offset);
+  }
+  *dump_data = iwl_fw_error_next_data(*dump_data);
 }
 
 static void iwl_fwrt_dump_txf(struct iwl_fw_runtime* fwrt,
                               struct iwl_fw_error_dump_data** dump_data, int size, uint32_t offset,
                               int fifo_num) {
-    struct iwl_fw_error_dump_fifo* fifo_hdr;
-    uint32_t* fifo_data;
-    uint32_t fifo_len;
-    int i;
+  struct iwl_fw_error_dump_fifo* fifo_hdr;
+  uint32_t* fifo_data;
+  uint32_t fifo_len;
+  int i;
 
-    fifo_hdr = (void*)(*dump_data)->data;
-    fifo_data = (void*)fifo_hdr->data;
-    fifo_len = size;
+  fifo_hdr = (void*)(*dump_data)->data;
+  fifo_data = (void*)fifo_hdr->data;
+  fifo_len = size;
 
-    /* No need to try to read the data if the length is 0 */
-    if (fifo_len == 0) { return; }
+  /* No need to try to read the data if the length is 0 */
+  if (fifo_len == 0) {
+    return;
+  }
 
-    /* Add a TLV for the FIFO */
-    (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
-    (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+  /* Add a TLV for the FIFO */
+  (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
+  (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
 
-    fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
-    fifo_hdr->available_bytes =
-        cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FIFO_ITEM_CNT + offset));
-    fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_WR_PTR + offset));
-    fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_RD_PTR + offset));
-    fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FENCE_PTR + offset));
-    fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_LOCK_FENCE + offset));
+  fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+  fifo_hdr->available_bytes =
+      cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FIFO_ITEM_CNT + offset));
+  fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_WR_PTR + offset));
+  fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_RD_PTR + offset));
+  fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FENCE_PTR + offset));
+  fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_LOCK_FENCE + offset));
 
-    /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
-    iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset, TXF_WR_PTR + offset);
+  /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
+  iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset, TXF_WR_PTR + offset);
 
-    /* Dummy-read to advance the read pointer to the head */
-    iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
+  /* Dummy-read to advance the read pointer to the head */
+  iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
 
-    /* Read FIFO */
-    fifo_len /= sizeof(uint32_t); /* Size in DWORDS */
-    for (i = 0; i < fifo_len; i++) {
-        fifo_data[i] = iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
-    }
-    *dump_data = iwl_fw_error_next_data(*dump_data);
+  /* Read FIFO */
+  fifo_len /= sizeof(uint32_t); /* Size in DWORDS */
+  for (i = 0; i < fifo_len; i++) {
+    fifo_data[i] = iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
+  }
+  *dump_data = iwl_fw_error_next_data(*dump_data);
 }
 
 static void iwl_fw_dump_rxf(struct iwl_fw_runtime* fwrt,
                             struct iwl_fw_error_dump_data** dump_data) {
-    struct iwl_fwrt_shared_mem_cfg* cfg = &fwrt->smem_cfg;
-    unsigned long flags;
+  struct iwl_fwrt_shared_mem_cfg* cfg = &fwrt->smem_cfg;
+  unsigned long flags;
 
-    IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n");
+  IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n");
 
-    if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) { return; }
+  if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
+    return;
+  }
 
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
-        /* Pull RXF1 */
-        iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
-        /* Pull RXF2 */
-        iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, RXF_DIFF_FROM_PREV, 1);
-        /* Pull LMAC2 RXF1 */
-        if (fwrt->smem_cfg.num_lmacs > 1) {
-            iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, LMAC2_PRPH_OFFSET, 2);
-        }
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
+    /* Pull RXF1 */
+    iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
+    /* Pull RXF2 */
+    iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, RXF_DIFF_FROM_PREV, 1);
+    /* Pull LMAC2 RXF1 */
+    if (fwrt->smem_cfg.num_lmacs > 1) {
+      iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, LMAC2_PRPH_OFFSET, 2);
     }
+  }
 
-    iwl_trans_release_nic_access(fwrt->trans, &flags);
+  iwl_trans_release_nic_access(fwrt->trans, &flags);
 }
 
 static void iwl_fw_dump_txf(struct iwl_fw_runtime* fwrt,
                             struct iwl_fw_error_dump_data** dump_data) {
-    struct iwl_fw_error_dump_fifo* fifo_hdr;
-    struct iwl_fwrt_shared_mem_cfg* cfg = &fwrt->smem_cfg;
-    uint32_t* fifo_data;
-    uint32_t fifo_len;
-    unsigned long flags;
-    int i, j;
+  struct iwl_fw_error_dump_fifo* fifo_hdr;
+  struct iwl_fwrt_shared_mem_cfg* cfg = &fwrt->smem_cfg;
+  uint32_t* fifo_data;
+  uint32_t fifo_len;
+  unsigned long flags;
+  int i, j;
 
-    IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n");
+  IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n");
 
-    if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) { return; }
+  if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
+    return;
+  }
 
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
-        /* Pull TXF data from LMAC1 */
-        for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
-            /* Mark the number of TXF we're pulling now */
-            iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
-            iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], 0, i);
-        }
-
-        /* Pull TXF data from LMAC2 */
-        if (fwrt->smem_cfg.num_lmacs > 1) {
-            for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
-                /* Mark the number of TXF we're pulling now */
-                iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM + LMAC2_PRPH_OFFSET, i);
-                iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[1].txfifo_size[i], LMAC2_PRPH_OFFSET,
-                                  i + cfg->num_txfifo_entries);
-            }
-        }
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
+    /* Pull TXF data from LMAC1 */
+    for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
+      /* Mark the number of TXF we're pulling now */
+      iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
+      iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], 0, i);
     }
 
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
-        fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
-        /* Pull UMAC internal TXF data from all TXFs */
-        for (i = 0; i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) {
-            fifo_hdr = (void*)(*dump_data)->data;
-            fifo_data = (void*)fifo_hdr->data;
-            fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
-
-            /* No need to try to read the data if the length is 0 */
-            if (fifo_len == 0) { continue; }
-
-            /* Add a TLV for the internal FIFOs */
-            (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
-            (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
-
-            fifo_hdr->fifo_num = cpu_to_le32(i);
-
-            /* Mark the number of TXF we're pulling now */
-            iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i + fwrt->smem_cfg.num_txfifo_entries);
-
-            fifo_hdr->available_bytes =
-                cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FIFO_ITEM_CNT));
-            fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_WR_PTR));
-            fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_RD_PTR));
-            fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FENCE_PTR));
-            fifo_hdr->fence_mode =
-                cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_LOCK_FENCE));
-
-            /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
-            iwl_trans_write_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_ADDR, TXF_CPU2_WR_PTR);
-
-            /* Dummy-read to advance the read pointer to head */
-            iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA);
-
-            /* Read FIFO */
-            fifo_len /= sizeof(uint32_t); /* Size in DWORDS */
-            for (j = 0; j < fifo_len; j++) {
-                fifo_data[j] = iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA);
-            }
-            *dump_data = iwl_fw_error_next_data(*dump_data);
-        }
+    /* Pull TXF data from LMAC2 */
+    if (fwrt->smem_cfg.num_lmacs > 1) {
+      for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
+        /* Mark the number of TXF we're pulling now */
+        iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM + LMAC2_PRPH_OFFSET, i);
+        iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[1].txfifo_size[i], LMAC2_PRPH_OFFSET,
+                          i + cfg->num_txfifo_entries);
+      }
     }
+  }
 
-    iwl_trans_release_nic_access(fwrt->trans, &flags);
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+      fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+    /* Pull UMAC internal TXF data from all TXFs */
+    for (i = 0; i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) {
+      fifo_hdr = (void*)(*dump_data)->data;
+      fifo_data = (void*)fifo_hdr->data;
+      fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
+
+      /* No need to try to read the data if the length is 0 */
+      if (fifo_len == 0) {
+        continue;
+      }
+
+      /* Add a TLV for the internal FIFOs */
+      (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
+      (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+      fifo_hdr->fifo_num = cpu_to_le32(i);
+
+      /* Mark the number of TXF we're pulling now */
+      iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i + fwrt->smem_cfg.num_txfifo_entries);
+
+      fifo_hdr->available_bytes =
+          cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FIFO_ITEM_CNT));
+      fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_WR_PTR));
+      fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_RD_PTR));
+      fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FENCE_PTR));
+      fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_LOCK_FENCE));
+
+      /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
+      iwl_trans_write_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_ADDR, TXF_CPU2_WR_PTR);
+
+      /* Dummy-read to advance the read pointer to head */
+      iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA);
+
+      /* Read FIFO */
+      fifo_len /= sizeof(uint32_t); /* Size in DWORDS */
+      for (j = 0; j < fifo_len; j++) {
+        fifo_data[j] = iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA);
+      }
+      *dump_data = iwl_fw_error_next_data(*dump_data);
+    }
+  }
+
+  iwl_trans_release_nic_access(fwrt->trans, &flags);
 }
 
 #define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
 #define IWL8260_ICCM_LEN 0xC000     /* Only for B-step */
 
 struct iwl_prph_range {
-    uint32_t start, end;
+  uint32_t start, end;
 };
 
 static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
@@ -382,46 +394,50 @@
 
 static void iwl_read_prph_block(struct iwl_trans* trans, uint32_t start, uint32_t len_bytes,
                                 __le32* data) {
-    uint32_t i;
+  uint32_t i;
 
-    for (i = 0; i < len_bytes; i += 4) {
-        *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
-    }
+  for (i = 0; i < len_bytes; i += 4) {
+    *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
+  }
 }
 
 static void iwl_dump_prph(struct iwl_fw_runtime* fwrt,
                           const struct iwl_prph_range* iwl_prph_dump_addr, uint32_t range_len,
                           void* ptr) {
-    struct iwl_fw_error_dump_prph* prph;
-    struct iwl_trans* trans = fwrt->trans;
-    struct iwl_fw_error_dump_data** data = (struct iwl_fw_error_dump_data**)ptr;
-    unsigned long flags;
-    uint32_t i;
+  struct iwl_fw_error_dump_prph* prph;
+  struct iwl_trans* trans = fwrt->trans;
+  struct iwl_fw_error_dump_data** data = (struct iwl_fw_error_dump_data**)ptr;
+  unsigned long flags;
+  uint32_t i;
 
-    if (!data) { return; }
+  if (!data) {
+    return;
+  }
 
-    IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
+  IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
 
-    if (!iwl_trans_grab_nic_access(trans, &flags)) { return; }
+  if (!iwl_trans_grab_nic_access(trans, &flags)) {
+    return;
+  }
 
-    for (i = 0; i < range_len; i++) {
-        /* The range includes both boundaries */
-        int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4;
+  for (i = 0; i < range_len; i++) {
+    /* The range includes both boundaries */
+    int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4;
 
-        (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
-        (*data)->len = cpu_to_le32(sizeof(*prph) + num_bytes_in_chunk);
-        prph = (void*)(*data)->data;
-        prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
+    (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
+    (*data)->len = cpu_to_le32(sizeof(*prph) + num_bytes_in_chunk);
+    prph = (void*)(*data)->data;
+    prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
 
-        iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
-                            /* our range is inclusive, hence + 4 */
-                            iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4,
-                            (void*)prph->data);
+    iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
+                        /* our range is inclusive, hence + 4 */
+                        iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4,
+                        (void*)prph->data);
 
-        *data = iwl_fw_error_next_data(*data);
-    }
+    *data = iwl_fw_error_next_data(*data);
+  }
 
-    iwl_trans_release_nic_access(trans, &flags);
+  iwl_trans_release_nic_access(trans, &flags);
 }
 
 /*
@@ -430,656 +446,712 @@
  * @size: the size (in bytes) of the table
  */
 static struct scatterlist* alloc_sgtable(int size) {
-    int alloc_size, nents, i;
-    struct page* new_page;
-    struct scatterlist* iter;
-    struct scatterlist* table;
+  int alloc_size, nents, i;
+  struct page* new_page;
+  struct scatterlist* iter;
+  struct scatterlist* table;
 
-    nents = DIV_ROUND_UP(size, PAGE_SIZE);
-    table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
-    if (!table) { return NULL; }
-    sg_init_table(table, nents);
-    iter = table;
-    for_each_sg(table, iter, sg_nents(table), i) {
-        new_page = alloc_page(GFP_KERNEL);
-        if (!new_page) {
-            /* release all previous allocated pages in the table */
-            iter = table;
-            for_each_sg(table, iter, sg_nents(table), i) {
-                new_page = sg_page(iter);
-                if (new_page) { __free_page(new_page); }
-            }
-            return NULL;
+  nents = DIV_ROUND_UP(size, PAGE_SIZE);
+  table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
+  if (!table) {
+    return NULL;
+  }
+  sg_init_table(table, nents);
+  iter = table;
+  for_each_sg(table, iter, sg_nents(table), i) {
+    new_page = alloc_page(GFP_KERNEL);
+    if (!new_page) {
+      /* release all previous allocated pages in the table */
+      iter = table;
+      for_each_sg(table, iter, sg_nents(table), i) {
+        new_page = sg_page(iter);
+        if (new_page) {
+          __free_page(new_page);
         }
-        alloc_size = min_t(int, size, PAGE_SIZE);
-        size -= PAGE_SIZE;
-        sg_set_page(iter, new_page, alloc_size, 0);
+      }
+      return NULL;
     }
-    return table;
+    alloc_size = min_t(int, size, PAGE_SIZE);
+    size -= PAGE_SIZE;
+    sg_set_page(iter, new_page, alloc_size, 0);
+  }
+  return table;
 }
 
 static void iwl_fw_get_prph_len(struct iwl_fw_runtime* fwrt,
                                 const struct iwl_prph_range* iwl_prph_dump_addr, uint32_t range_len,
                                 void* ptr) {
-    uint32_t* prph_len = (uint32_t*)ptr;
-    int i, num_bytes_in_chunk;
+  uint32_t* prph_len = (uint32_t*)ptr;
+  int i, num_bytes_in_chunk;
 
-    if (!prph_len) { return; }
+  if (!prph_len) {
+    return;
+  }
 
-    for (i = 0; i < range_len; i++) {
-        /* The range includes both boundaries */
-        num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4;
+  for (i = 0; i < range_len; i++) {
+    /* The range includes both boundaries */
+    num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4;
 
-        *prph_len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_prph) +
-                     num_bytes_in_chunk;
-    }
+    *prph_len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_prph) +
+                 num_bytes_in_chunk;
+  }
 }
 
 static void iwl_fw_prph_handler(struct iwl_fw_runtime* fwrt, void* ptr,
                                 void (*handler)(struct iwl_fw_runtime*,
                                                 const struct iwl_prph_range*, uint32_t, void*)) {
-    uint32_t range_len;
+  uint32_t range_len;
 
-    if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
-        range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
-        handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
-    } else {
-        range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm);
-        handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr);
+  if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+    range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
+    handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
+  } else {
+    range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm);
+    handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr);
 
-        if (fwrt->trans->cfg->mq_rx_supported) {
-            range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000);
-            handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr);
-        }
+    if (fwrt->trans->cfg->mq_rx_supported) {
+      range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000);
+      handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr);
     }
+  }
 }
 
 static void iwl_fw_dump_mem(struct iwl_fw_runtime* fwrt, struct iwl_fw_error_dump_data** dump_data,
                             uint32_t len, uint32_t ofs, uint32_t type) {
-    struct iwl_fw_error_dump_mem* dump_mem;
+  struct iwl_fw_error_dump_mem* dump_mem;
 
-    if (!len) { return; }
+  if (!len) {
+    return;
+  }
 
-    (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
-    (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
-    dump_mem = (void*)(*dump_data)->data;
-    dump_mem->type = cpu_to_le32(type);
-    dump_mem->offset = cpu_to_le32(ofs);
-    iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
-    *dump_data = iwl_fw_error_next_data(*dump_data);
+  (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+  (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
+  dump_mem = (void*)(*dump_data)->data;
+  dump_mem->type = cpu_to_le32(type);
+  dump_mem->offset = cpu_to_le32(ofs);
+  iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
+  *dump_data = iwl_fw_error_next_data(*dump_data);
 
-    IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
+  IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
 }
 
 static void iwl_fw_dump_named_mem(struct iwl_fw_runtime* fwrt,
                                   struct iwl_fw_error_dump_data** dump_data, uint32_t len,
                                   uint32_t ofs, uint8_t* name, uint8_t name_len) {
-    struct iwl_fw_error_dump_named_mem* dump_mem;
+  struct iwl_fw_error_dump_named_mem* dump_mem;
 
-    if (!len) { return; }
+  if (!len) {
+    return;
+  }
 
-    (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
-    (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
-    dump_mem = (void*)(*dump_data)->data;
-    dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_NAMED_MEM);
-    dump_mem->offset = cpu_to_le32(ofs);
-    dump_mem->name_len = name_len;
-    memcpy(dump_mem->name, name, name_len);
-    iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
-    *dump_data = iwl_fw_error_next_data(*dump_data);
+  (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+  (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
+  dump_mem = (void*)(*dump_data)->data;
+  dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_NAMED_MEM);
+  dump_mem->offset = cpu_to_le32(ofs);
+  dump_mem->name_len = name_len;
+  memcpy(dump_mem->name, name, name_len);
+  iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
+  *dump_data = iwl_fw_error_next_data(*dump_data);
 
-    IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
+  IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
 }
 
-#define ADD_LEN(len, item_len, const_len)   \
-    do {                                    \
-        size_t item = item_len;             \
-        len += (!!item) * const_len + item; \
-    } while (0)
+#define ADD_LEN(len, item_len, const_len) \
+  do {                                    \
+    size_t item = item_len;               \
+    len += (!!item) * const_len + item;   \
+  } while (0)
 
 static int iwl_fw_rxf_len(struct iwl_fw_runtime* fwrt, struct iwl_fwrt_shared_mem_cfg* mem_cfg) {
-    size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo);
-    uint32_t fifo_len = 0;
-    int i;
+  size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo);
+  uint32_t fifo_len = 0;
+  int i;
 
-    if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) { return 0; }
+  if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
+    return 0;
+  }
 
-    /* Count RXF2 size */
-    ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
+  /* Count RXF2 size */
+  ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
 
-    /* Count RXF1 sizes */
-    if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) { mem_cfg->num_lmacs = MAX_NUM_LMAC; }
+  /* Count RXF1 sizes */
+  if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) {
+    mem_cfg->num_lmacs = MAX_NUM_LMAC;
+  }
 
-    for (i = 0; i < mem_cfg->num_lmacs; i++) {
-        ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
-    }
+  for (i = 0; i < mem_cfg->num_lmacs; i++) {
+    ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
+  }
 
-    return fifo_len;
+  return fifo_len;
 }
 
 static int iwl_fw_txf_len(struct iwl_fw_runtime* fwrt, struct iwl_fwrt_shared_mem_cfg* mem_cfg) {
-    size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo);
-    uint32_t fifo_len = 0;
-    int i;
+  size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo);
+  uint32_t fifo_len = 0;
+  int i;
 
-    if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) { goto dump_internal_txf; }
+  if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
+    goto dump_internal_txf;
+  }
 
-    /* Count TXF sizes */
-    if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) { mem_cfg->num_lmacs = MAX_NUM_LMAC; }
+  /* Count TXF sizes */
+  if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) {
+    mem_cfg->num_lmacs = MAX_NUM_LMAC;
+  }
 
-    for (i = 0; i < mem_cfg->num_lmacs; i++) {
-        int j;
+  for (i = 0; i < mem_cfg->num_lmacs; i++) {
+    int j;
 
-        for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
-            ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j], hdr_len);
-        }
+    for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
+      ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j], hdr_len);
     }
+  }
 
 dump_internal_txf:
-    if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
-          fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))) {
-        goto out;
-    }
+  if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+        fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))) {
+    goto out;
+  }
 
-    for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++) {
-        ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
-    }
+  for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++) {
+    ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
+  }
 
 out:
-    return fifo_len;
+  return fifo_len;
 }
 
 static void iwl_dump_paging(struct iwl_fw_runtime* fwrt, struct iwl_fw_error_dump_data** data) {
-    int i;
+  int i;
 
-    IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
-    for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
-        struct iwl_fw_error_dump_paging* paging;
-        struct page* pages = fwrt->fw_paging_db[i].fw_paging_block;
-        dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
+  IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
+  for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
+    struct iwl_fw_error_dump_paging* paging;
+    struct page* pages = fwrt->fw_paging_db[i].fw_paging_block;
+    dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
 
-        (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
-        (*data)->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE);
-        paging = (void*)(*data)->data;
-        paging->index = cpu_to_le32(i);
-        dma_sync_single_for_cpu(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL);
-        memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE);
-        (*data) = iwl_fw_error_next_data(*data);
-    }
+    (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
+    (*data)->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE);
+    paging = (void*)(*data)->data;
+    paging->index = cpu_to_le32(i);
+    dma_sync_single_for_cpu(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL);
+    memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE);
+    (*data) = iwl_fw_error_next_data(*data);
+  }
 }
 
 static struct iwl_fw_error_dump_file* _iwl_fw_error_dump(struct iwl_fw_runtime* fwrt,
                                                          struct iwl_fw_dump_ptrs* fw_error_dump) {
-    struct iwl_fw_error_dump_file* dump_file;
-    struct iwl_fw_error_dump_data* dump_data;
-    struct iwl_fw_error_dump_info* dump_info;
-    struct iwl_fw_error_dump_smem_cfg* dump_smem_cfg;
-    struct iwl_fw_error_dump_trigger_desc* dump_trig;
-    uint32_t sram_len, sram_ofs;
-    const struct iwl_fw_dbg_mem_seg_tlv* fw_mem = fwrt->fw->dbg.mem_tlv;
-    struct iwl_fwrt_shared_mem_cfg* mem_cfg = &fwrt->smem_cfg;
-    uint32_t file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
-    uint32_t smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
-    uint32_t sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len;
-    int i;
+  struct iwl_fw_error_dump_file* dump_file;
+  struct iwl_fw_error_dump_data* dump_data;
+  struct iwl_fw_error_dump_info* dump_info;
+  struct iwl_fw_error_dump_smem_cfg* dump_smem_cfg;
+  struct iwl_fw_error_dump_trigger_desc* dump_trig;
+  uint32_t sram_len, sram_ofs;
+  const struct iwl_fw_dbg_mem_seg_tlv* fw_mem = fwrt->fw->dbg.mem_tlv;
+  struct iwl_fwrt_shared_mem_cfg* mem_cfg = &fwrt->smem_cfg;
+  uint32_t file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
+  uint32_t smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
+  uint32_t sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len;
+  int i;
 
-    /* SRAM - include stack CCM if driver knows the values for it */
-    if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
-        const struct fw_img* img;
+  /* SRAM - include stack CCM if driver knows the values for it */
+  if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
+    const struct fw_img* img;
 
-        if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX) { return NULL; }
-        img = &fwrt->fw->img[fwrt->cur_fw_img];
-        sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
-        sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
-    } else {
-        sram_ofs = fwrt->trans->cfg->dccm_offset;
-        sram_len = fwrt->trans->cfg->dccm_len;
+    if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX) {
+      return NULL;
+    }
+    img = &fwrt->fw->img[fwrt->cur_fw_img];
+    sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+    sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+  } else {
+    sram_ofs = fwrt->trans->cfg->dccm_offset;
+    sram_len = fwrt->trans->cfg->dccm_len;
+  }
+
+  /* reading RXF/TXF sizes */
+  if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
+    fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg);
+    fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
+
+    /* Make room for PRPH registers */
+    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) {
+      iwl_fw_prph_handler(fwrt, &prph_len, iwl_fw_get_prph_len);
     }
 
-    /* reading RXF/TXF sizes */
-    if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
-        fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg);
-        fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
+    if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
+        iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) {
+      radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
+    }
+  }
 
-        /* Make room for PRPH registers */
-        if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) {
-            iwl_fw_prph_handler(fwrt, &prph_len, iwl_fw_get_prph_len);
-        }
+  file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
 
-        if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
-            iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) {
-            radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
-        }
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+    file_len += sizeof(*dump_data) + sizeof(*dump_info);
+  }
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
+    file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
+  }
+
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
+    size_t hdr_len = sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_mem);
+
+    /* Dump SRAM only if no mem_tlvs */
+    if (!fwrt->fw->dbg.n_mem_tlv) {
+      ADD_LEN(file_len, sram_len, hdr_len);
     }
 
-    file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
+    /* Make room for all mem types that exist */
+    ADD_LEN(file_len, smem_len, hdr_len);
+    ADD_LEN(file_len, sram2_len, hdr_len);
 
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
-        file_len += sizeof(*dump_data) + sizeof(*dump_info);
+    for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
+      ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len);
     }
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
-        file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
+  }
+
+  /* Make room for fw's virtual image pages, if it exists */
+  if (iwl_fw_dbg_is_paging_enabled(fwrt))
+    file_len += fwrt->num_of_paging_blk *
+                (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE);
+
+  if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
+    file_len += sizeof(*dump_data) + fwrt->trans->cfg->d3_debug_data_length * 2;
+  }
+
+  /* If we only want a monitor dump, reset the file length */
+  if (fwrt->dump.monitor_only) {
+    file_len =
+        sizeof(*dump_file) + sizeof(*dump_data) * 2 + sizeof(*dump_info) + sizeof(*dump_smem_cfg);
+  }
+
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) {
+    file_len += sizeof(*dump_data) + sizeof(*dump_trig) + fwrt->dump.desc->len;
+  }
+
+  dump_file = vzalloc(file_len);
+  if (!dump_file) {
+    return NULL;
+  }
+
+  fw_error_dump->fwrt_ptr = dump_file;
+
+  dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+  dump_data = (void*)dump_file->data;
+
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+    dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
+    dump_data->len = cpu_to_le32(sizeof(*dump_info));
+    dump_info = (void*)dump_data->data;
+    dump_info->device_family = fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000
+                                   ? cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7)
+                                   : cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+    dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
+    memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
+           sizeof(dump_info->fw_human_readable));
+    strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
+            sizeof(dump_info->dev_human_readable) - 1);
+    strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
+            sizeof(dump_info->bus_human_readable) - 1);
+    dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
+    dump_info->lmac_err_id[0] = cpu_to_le32(fwrt->dump.lmac_err_id[0]);
+    if (fwrt->smem_cfg.num_lmacs > 1) {
+      dump_info->lmac_err_id[1] = cpu_to_le32(fwrt->dump.lmac_err_id[1]);
+    }
+    dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id);
+
+    dump_data = iwl_fw_error_next_data(dump_data);
+  }
+
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
+    /* Dump shared memory configuration */
+    dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
+    dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
+    dump_smem_cfg = (void*)dump_data->data;
+    dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
+    dump_smem_cfg->num_txfifo_entries = cpu_to_le32(mem_cfg->num_txfifo_entries);
+    for (i = 0; i < MAX_NUM_LMAC; i++) {
+      int j;
+      uint32_t* txf_size = mem_cfg->lmac[i].txfifo_size;
+
+      for (j = 0; j < TX_FIFO_MAX_NUM; j++) {
+        dump_smem_cfg->lmac[i].txfifo_size[j] = cpu_to_le32(txf_size[j]);
+      }
+      dump_smem_cfg->lmac[i].rxfifo1_size = cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
+    }
+    dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size);
+    dump_smem_cfg->internal_txfifo_addr = cpu_to_le32(mem_cfg->internal_txfifo_addr);
+    for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
+      dump_smem_cfg->internal_txfifo_size[i] = cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
     }
 
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
-        size_t hdr_len = sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_mem);
+    dump_data = iwl_fw_error_next_data(dump_data);
+  }
 
-        /* Dump SRAM only if no mem_tlvs */
-        if (!fwrt->fw->dbg.n_mem_tlv) { ADD_LEN(file_len, sram_len, hdr_len); }
+  /* We only dump the FIFOs if the FW is in error state */
+  if (fifo_len) {
+    iwl_fw_dump_rxf(fwrt, &dump_data);
+    iwl_fw_dump_txf(fwrt, &dump_data);
+    if (radio_len) {
+      iwl_read_radio_regs(fwrt, &dump_data);
+    }
+  }
 
-        /* Make room for all mem types that exist */
-        ADD_LEN(file_len, smem_len, hdr_len);
-        ADD_LEN(file_len, sram2_len, hdr_len);
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) {
+    dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
+    dump_data->len = cpu_to_le32(sizeof(*dump_trig) + fwrt->dump.desc->len);
+    dump_trig = (void*)dump_data->data;
+    memcpy(dump_trig, &fwrt->dump.desc->trig_desc, sizeof(*dump_trig) + fwrt->dump.desc->len);
 
-        for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
-            ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len);
-        }
+    dump_data = iwl_fw_error_next_data(dump_data);
+  }
+
+  /* In case we only want monitor dump, skip to dump trasport data */
+  if (fwrt->dump.monitor_only) {
+    goto out;
+  }
+
+  if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
+    const struct iwl_fw_dbg_mem_seg_tlv* fw_dbg_mem = fwrt->fw->dbg.mem_tlv;
+
+    if (!fwrt->fw->dbg.n_mem_tlv) {
+      iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, IWL_FW_ERROR_DUMP_MEM_SRAM);
     }
 
-    /* Make room for fw's virtual image pages, if it exists */
-    if (iwl_fw_dbg_is_paging_enabled(fwrt))
-        file_len +=
-            fwrt->num_of_paging_blk *
-            (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE);
+    for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
+      uint32_t len = le32_to_cpu(fw_dbg_mem[i].len);
+      uint32_t ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
 
-    if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
-        file_len += sizeof(*dump_data) + fwrt->trans->cfg->d3_debug_data_length * 2;
+      iwl_fw_dump_mem(fwrt, &dump_data, len, ofs, le32_to_cpu(fw_dbg_mem[i].data_type));
     }
 
-    /* If we only want a monitor dump, reset the file length */
-    if (fwrt->dump.monitor_only) {
-        file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + sizeof(*dump_info) +
-                   sizeof(*dump_smem_cfg);
-    }
+    iwl_fw_dump_mem(fwrt, &dump_data, smem_len, fwrt->trans->cfg->smem_offset,
+                    IWL_FW_ERROR_DUMP_MEM_SMEM);
 
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) {
-        file_len += sizeof(*dump_data) + sizeof(*dump_trig) + fwrt->dump.desc->len;
-    }
+    iwl_fw_dump_mem(fwrt, &dump_data, sram2_len, fwrt->trans->cfg->dccm2_offset,
+                    IWL_FW_ERROR_DUMP_MEM_SRAM);
+  }
 
-    dump_file = vzalloc(file_len);
-    if (!dump_file) { return NULL; }
+  if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
+    uint32_t addr = fwrt->trans->cfg->d3_debug_data_base_addr;
+    size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
 
-    fw_error_dump->fwrt_ptr = dump_file;
+    dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
+    dump_data->len = cpu_to_le32(data_size * 2);
 
-    dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
-    dump_data = (void*)dump_file->data;
+    memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size);
 
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
-        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
-        dump_data->len = cpu_to_le32(sizeof(*dump_info));
-        dump_info = (void*)dump_data->data;
-        dump_info->device_family = fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000
-                                       ? cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7)
-                                       : cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
-        dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
-        memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
-               sizeof(dump_info->fw_human_readable));
-        strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
-                sizeof(dump_info->dev_human_readable) - 1);
-        strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
-                sizeof(dump_info->bus_human_readable) - 1);
-        dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
-        dump_info->lmac_err_id[0] = cpu_to_le32(fwrt->dump.lmac_err_id[0]);
-        if (fwrt->smem_cfg.num_lmacs > 1) {
-            dump_info->lmac_err_id[1] = cpu_to_le32(fwrt->dump.lmac_err_id[1]);
-        }
-        dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id);
+    kfree(fwrt->dump.d3_debug_data);
+    fwrt->dump.d3_debug_data = NULL;
 
-        dump_data = iwl_fw_error_next_data(dump_data);
-    }
+    iwl_trans_read_mem_bytes(fwrt->trans, addr, dump_data->data + data_size, data_size);
 
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
-        /* Dump shared memory configuration */
-        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
-        dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
-        dump_smem_cfg = (void*)dump_data->data;
-        dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
-        dump_smem_cfg->num_txfifo_entries = cpu_to_le32(mem_cfg->num_txfifo_entries);
-        for (i = 0; i < MAX_NUM_LMAC; i++) {
-            int j;
-            uint32_t* txf_size = mem_cfg->lmac[i].txfifo_size;
+    dump_data = iwl_fw_error_next_data(dump_data);
+  }
 
-            for (j = 0; j < TX_FIFO_MAX_NUM; j++) {
-                dump_smem_cfg->lmac[i].txfifo_size[j] = cpu_to_le32(txf_size[j]);
-            }
-            dump_smem_cfg->lmac[i].rxfifo1_size = cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
-        }
-        dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size);
-        dump_smem_cfg->internal_txfifo_addr = cpu_to_le32(mem_cfg->internal_txfifo_addr);
-        for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
-            dump_smem_cfg->internal_txfifo_size[i] = cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
-        }
+  /* Dump fw's virtual image */
+  if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
+    iwl_dump_paging(fwrt, &dump_data);
+  }
 
-        dump_data = iwl_fw_error_next_data(dump_data);
-    }
-
-    /* We only dump the FIFOs if the FW is in error state */
-    if (fifo_len) {
-        iwl_fw_dump_rxf(fwrt, &dump_data);
-        iwl_fw_dump_txf(fwrt, &dump_data);
-        if (radio_len) { iwl_read_radio_regs(fwrt, &dump_data); }
-    }
-
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) {
-        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
-        dump_data->len = cpu_to_le32(sizeof(*dump_trig) + fwrt->dump.desc->len);
-        dump_trig = (void*)dump_data->data;
-        memcpy(dump_trig, &fwrt->dump.desc->trig_desc, sizeof(*dump_trig) + fwrt->dump.desc->len);
-
-        dump_data = iwl_fw_error_next_data(dump_data);
-    }
-
-    /* In case we only want monitor dump, skip to dump trasport data */
-    if (fwrt->dump.monitor_only) { goto out; }
-
-    if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
-        const struct iwl_fw_dbg_mem_seg_tlv* fw_dbg_mem = fwrt->fw->dbg.mem_tlv;
-
-        if (!fwrt->fw->dbg.n_mem_tlv) {
-            iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, IWL_FW_ERROR_DUMP_MEM_SRAM);
-        }
-
-        for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
-            uint32_t len = le32_to_cpu(fw_dbg_mem[i].len);
-            uint32_t ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
-
-            iwl_fw_dump_mem(fwrt, &dump_data, len, ofs, le32_to_cpu(fw_dbg_mem[i].data_type));
-        }
-
-        iwl_fw_dump_mem(fwrt, &dump_data, smem_len, fwrt->trans->cfg->smem_offset,
-                        IWL_FW_ERROR_DUMP_MEM_SMEM);
-
-        iwl_fw_dump_mem(fwrt, &dump_data, sram2_len, fwrt->trans->cfg->dccm2_offset,
-                        IWL_FW_ERROR_DUMP_MEM_SRAM);
-    }
-
-    if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
-        uint32_t addr = fwrt->trans->cfg->d3_debug_data_base_addr;
-        size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
-
-        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
-        dump_data->len = cpu_to_le32(data_size * 2);
-
-        memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size);
-
-        kfree(fwrt->dump.d3_debug_data);
-        fwrt->dump.d3_debug_data = NULL;
-
-        iwl_trans_read_mem_bytes(fwrt->trans, addr, dump_data->data + data_size, data_size);
-
-        dump_data = iwl_fw_error_next_data(dump_data);
-    }
-
-    /* Dump fw's virtual image */
-    if (iwl_fw_dbg_is_paging_enabled(fwrt)) { iwl_dump_paging(fwrt, &dump_data); }
-
-    if (prph_len) { iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph); }
+  if (prph_len) {
+    iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph);
+  }
 
 out:
-    dump_file->file_len = cpu_to_le32(file_len);
-    return dump_file;
+  dump_file->file_len = cpu_to_le32(file_len);
+  return dump_file;
 }
 
 static void iwl_dump_prph_ini(struct iwl_trans* trans, struct iwl_fw_error_dump_data** data,
                               struct iwl_fw_ini_region_cfg* reg) {
-    struct iwl_fw_error_dump_prph* prph;
-    unsigned long flags;
-    uint32_t i, size = le32_to_cpu(reg->num_regions);
+  struct iwl_fw_error_dump_prph* prph;
+  unsigned long flags;
+  uint32_t i, size = le32_to_cpu(reg->num_regions);
 
-    IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
+  IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
 
-    if (!iwl_trans_grab_nic_access(trans, &flags)) { return; }
+  if (!iwl_trans_grab_nic_access(trans, &flags)) {
+    return;
+  }
 
-    for (i = 0; i < size; i++) {
-        (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
-        (*data)->len = cpu_to_le32(le32_to_cpu(reg->size) + sizeof(*prph));
-        prph = (void*)(*data)->data;
-        prph->prph_start = reg->start_addr[i];
-        prph->data[0] = cpu_to_le32(iwl_read_prph_no_grab(trans, le32_to_cpu(prph->prph_start)));
-        *data = iwl_fw_error_next_data(*data);
-    }
-    iwl_trans_release_nic_access(trans, &flags);
+  for (i = 0; i < size; i++) {
+    (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
+    (*data)->len = cpu_to_le32(le32_to_cpu(reg->size) + sizeof(*prph));
+    prph = (void*)(*data)->data;
+    prph->prph_start = reg->start_addr[i];
+    prph->data[0] = cpu_to_le32(iwl_read_prph_no_grab(trans, le32_to_cpu(prph->prph_start)));
+    *data = iwl_fw_error_next_data(*data);
+  }
+  iwl_trans_release_nic_access(trans, &flags);
 }
 
 static void iwl_dump_csr_ini(struct iwl_trans* trans, struct iwl_fw_error_dump_data** data,
                              struct iwl_fw_ini_region_cfg* reg) {
-    int i, num = le32_to_cpu(reg->num_regions);
-    uint32_t size = le32_to_cpu(reg->size);
+  int i, num = le32_to_cpu(reg->num_regions);
+  uint32_t size = le32_to_cpu(reg->size);
 
-    IWL_DEBUG_INFO(trans, "WRT CSR dump\n");
+  IWL_DEBUG_INFO(trans, "WRT CSR dump\n");
 
-    for (i = 0; i < num; i++) {
-        uint32_t add = le32_to_cpu(reg->start_addr[i]);
-        __le32* val;
-        int j;
+  for (i = 0; i < num; i++) {
+    uint32_t add = le32_to_cpu(reg->start_addr[i]);
+    __le32* val;
+    int j;
 
-        (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
-        (*data)->len = cpu_to_le32(size);
-        val = (void*)(*data)->data;
+    (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
+    (*data)->len = cpu_to_le32(size);
+    val = (void*)(*data)->data;
 
-        for (j = 0; j < size; j += 4) {
-            *val++ = cpu_to_le32(iwl_trans_read32(trans, j + add));
-        }
-
-        *data = iwl_fw_error_next_data(*data);
+    for (j = 0; j < size; j += 4) {
+      *val++ = cpu_to_le32(iwl_trans_read32(trans, j + add));
     }
+
+    *data = iwl_fw_error_next_data(*data);
+  }
 }
 
 static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime* fwrt,
                                       struct iwl_fw_ini_trigger* trigger) {
-    int i, num, size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data);
+  int i, num, size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data);
 
-    if (!trigger || !trigger->num_regions) { return 0; }
+  if (!trigger || !trigger->num_regions) {
+    return 0;
+  }
 
-    num = le32_to_cpu(trigger->num_regions);
-    for (i = 0; i < num; i++) {
-        uint32_t reg_id = le32_to_cpu(trigger->data[i]);
-        struct iwl_fw_ini_region_cfg* reg;
-        enum iwl_fw_ini_region_type type;
-        uint32_t num_entries;
+  num = le32_to_cpu(trigger->num_regions);
+  for (i = 0; i < num; i++) {
+    uint32_t reg_id = le32_to_cpu(trigger->data[i]);
+    struct iwl_fw_ini_region_cfg* reg;
+    enum iwl_fw_ini_region_type type;
+    uint32_t num_entries;
 
-        if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))) { continue; }
-
-        reg = fwrt->dump.active_regs[reg_id].reg;
-        if (WARN(!reg, "Unassigned region %d\n", reg_id)) { continue; }
-
-        type = le32_to_cpu(reg->region_type);
-        num_entries = le32_to_cpu(reg->num_regions);
-
-        switch (type) {
-        case IWL_FW_INI_REGION_DEVICE_MEMORY:
-            size += hdr_len + sizeof(struct iwl_fw_error_dump_named_mem) + le32_to_cpu(reg->size);
-            break;
-        case IWL_FW_INI_REGION_PERIPHERY_MAC:
-        case IWL_FW_INI_REGION_PERIPHERY_PHY:
-        case IWL_FW_INI_REGION_PERIPHERY_AUX:
-            size +=
-                num_entries * (hdr_len + sizeof(struct iwl_fw_error_dump_prph) + sizeof(uint32_t));
-            break;
-        case IWL_FW_INI_REGION_TXF:
-            size += iwl_fw_txf_len(fwrt, &fwrt->smem_cfg);
-            break;
-        case IWL_FW_INI_REGION_RXF:
-            size += iwl_fw_rxf_len(fwrt, &fwrt->smem_cfg);
-            break;
-        case IWL_FW_INI_REGION_PAGING:
-            if (!iwl_fw_dbg_is_paging_enabled(fwrt)) { break; }
-            size += fwrt->num_of_paging_blk *
-                    (hdr_len + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE);
-            break;
-        case IWL_FW_INI_REGION_CSR:
-            size += num_entries * (hdr_len + le32_to_cpu(reg->size));
-            break;
-        case IWL_FW_INI_REGION_DRAM_BUFFER:
-        /* Transport takes care of DRAM dumping */
-        case IWL_FW_INI_REGION_INTERNAL_BUFFER:
-        case IWL_FW_INI_REGION_DRAM_IMR:
-        /* Undefined yet */
-        default:
-            break;
-        }
+    if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))) {
+      continue;
     }
-    return size;
+
+    reg = fwrt->dump.active_regs[reg_id].reg;
+    if (WARN(!reg, "Unassigned region %d\n", reg_id)) {
+      continue;
+    }
+
+    type = le32_to_cpu(reg->region_type);
+    num_entries = le32_to_cpu(reg->num_regions);
+
+    switch (type) {
+      case IWL_FW_INI_REGION_DEVICE_MEMORY:
+        size += hdr_len + sizeof(struct iwl_fw_error_dump_named_mem) + le32_to_cpu(reg->size);
+        break;
+      case IWL_FW_INI_REGION_PERIPHERY_MAC:
+      case IWL_FW_INI_REGION_PERIPHERY_PHY:
+      case IWL_FW_INI_REGION_PERIPHERY_AUX:
+        size += num_entries * (hdr_len + sizeof(struct iwl_fw_error_dump_prph) + sizeof(uint32_t));
+        break;
+      case IWL_FW_INI_REGION_TXF:
+        size += iwl_fw_txf_len(fwrt, &fwrt->smem_cfg);
+        break;
+      case IWL_FW_INI_REGION_RXF:
+        size += iwl_fw_rxf_len(fwrt, &fwrt->smem_cfg);
+        break;
+      case IWL_FW_INI_REGION_PAGING:
+        if (!iwl_fw_dbg_is_paging_enabled(fwrt)) {
+          break;
+        }
+        size += fwrt->num_of_paging_blk *
+                (hdr_len + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE);
+        break;
+      case IWL_FW_INI_REGION_CSR:
+        size += num_entries * (hdr_len + le32_to_cpu(reg->size));
+        break;
+      case IWL_FW_INI_REGION_DRAM_BUFFER:
+      /* Transport takes care of DRAM dumping */
+      case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+      case IWL_FW_INI_REGION_DRAM_IMR:
+      /* Undefined yet */
+      default:
+        break;
+    }
+  }
+  return size;
 }
 
 static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime* fwrt, struct iwl_fw_ini_trigger* trigger,
                                     struct iwl_fw_error_dump_data** data, uint32_t* dump_mask) {
-    int i, num = le32_to_cpu(trigger->num_regions);
+  int i, num = le32_to_cpu(trigger->num_regions);
 
-    for (i = 0; i < num; i++) {
-        uint32_t reg_id = le32_to_cpu(trigger->data[i]);
-        enum iwl_fw_ini_region_type type;
-        struct iwl_fw_ini_region_cfg* reg;
+  for (i = 0; i < num; i++) {
+    uint32_t reg_id = le32_to_cpu(trigger->data[i]);
+    enum iwl_fw_ini_region_type type;
+    struct iwl_fw_ini_region_cfg* reg;
 
-        if (reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)) { continue; }
-
-        reg = fwrt->dump.active_regs[reg_id].reg;
-        /* Don't warn, get_trigger_len already warned */
-        if (!reg) { continue; }
-
-        type = le32_to_cpu(reg->region_type);
-        switch (type) {
-        case IWL_FW_INI_REGION_DEVICE_MEMORY:
-            if (WARN_ON(le32_to_cpu(reg->num_regions) > 1)) { continue; }
-            iwl_fw_dump_named_mem(fwrt, data, le32_to_cpu(reg->size),
-                                  le32_to_cpu(reg->start_addr[0]), reg->name,
-                                  le32_to_cpu(reg->name_len));
-            break;
-        case IWL_FW_INI_REGION_PERIPHERY_MAC:
-        case IWL_FW_INI_REGION_PERIPHERY_PHY:
-        case IWL_FW_INI_REGION_PERIPHERY_AUX:
-            iwl_dump_prph_ini(fwrt->trans, data, reg);
-            break;
-        case IWL_FW_INI_REGION_DRAM_BUFFER:
-            *dump_mask |= IWL_FW_ERROR_DUMP_FW_MONITOR;
-            break;
-        case IWL_FW_INI_REGION_PAGING:
-            if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
-                iwl_dump_paging(fwrt, data);
-            } else {
-                *dump_mask |= IWL_FW_ERROR_DUMP_PAGING;
-            }
-            break;
-        case IWL_FW_INI_REGION_TXF:
-            iwl_fw_dump_txf(fwrt, data);
-            break;
-        case IWL_FW_INI_REGION_RXF:
-            iwl_fw_dump_rxf(fwrt, data);
-            break;
-        case IWL_FW_INI_REGION_CSR:
-            iwl_dump_csr_ini(fwrt->trans, data, reg);
-            break;
-        case IWL_FW_INI_REGION_DRAM_IMR:
-        case IWL_FW_INI_REGION_INTERNAL_BUFFER:
-        /* This is undefined yet */
-        default:
-            break;
-        }
+    if (reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)) {
+      continue;
     }
+
+    reg = fwrt->dump.active_regs[reg_id].reg;
+    /* Don't warn, get_trigger_len already warned */
+    if (!reg) {
+      continue;
+    }
+
+    type = le32_to_cpu(reg->region_type);
+    switch (type) {
+      case IWL_FW_INI_REGION_DEVICE_MEMORY:
+        if (WARN_ON(le32_to_cpu(reg->num_regions) > 1)) {
+          continue;
+        }
+        iwl_fw_dump_named_mem(fwrt, data, le32_to_cpu(reg->size), le32_to_cpu(reg->start_addr[0]),
+                              reg->name, le32_to_cpu(reg->name_len));
+        break;
+      case IWL_FW_INI_REGION_PERIPHERY_MAC:
+      case IWL_FW_INI_REGION_PERIPHERY_PHY:
+      case IWL_FW_INI_REGION_PERIPHERY_AUX:
+        iwl_dump_prph_ini(fwrt->trans, data, reg);
+        break;
+      case IWL_FW_INI_REGION_DRAM_BUFFER:
+        *dump_mask |= IWL_FW_ERROR_DUMP_FW_MONITOR;
+        break;
+      case IWL_FW_INI_REGION_PAGING:
+        if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
+          iwl_dump_paging(fwrt, data);
+        } else {
+          *dump_mask |= IWL_FW_ERROR_DUMP_PAGING;
+        }
+        break;
+      case IWL_FW_INI_REGION_TXF:
+        iwl_fw_dump_txf(fwrt, data);
+        break;
+      case IWL_FW_INI_REGION_RXF:
+        iwl_fw_dump_rxf(fwrt, data);
+        break;
+      case IWL_FW_INI_REGION_CSR:
+        iwl_dump_csr_ini(fwrt->trans, data, reg);
+        break;
+      case IWL_FW_INI_REGION_DRAM_IMR:
+      case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+      /* This is undefined yet */
+      default:
+        break;
+    }
+  }
 }
 
 static struct iwl_fw_error_dump_file* _iwl_fw_error_ini_dump(struct iwl_fw_runtime* fwrt,
                                                              struct iwl_fw_dump_ptrs* fw_error_dump,
                                                              uint32_t* dump_mask) {
-    int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type);
-    struct iwl_fw_error_dump_data* dump_data;
-    struct iwl_fw_error_dump_file* dump_file;
-    struct iwl_fw_ini_trigger *trigger, *ext;
+  int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type);
+  struct iwl_fw_error_dump_data* dump_data;
+  struct iwl_fw_error_dump_file* dump_file;
+  struct iwl_fw_ini_trigger *trigger, *ext;
 
-    if (id == FW_DBG_TRIGGER_FW_ASSERT) {
-        id = IWL_FW_TRIGGER_ID_FW_ASSERT;
-    } else if (id == FW_DBG_TRIGGER_USER) {
-        id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
-    } else if (id < FW_DBG_TRIGGER_MAX) {
-        return NULL;
-    }
+  if (id == FW_DBG_TRIGGER_FW_ASSERT) {
+    id = IWL_FW_TRIGGER_ID_FW_ASSERT;
+  } else if (id == FW_DBG_TRIGGER_USER) {
+    id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
+  } else if (id < FW_DBG_TRIGGER_MAX) {
+    return NULL;
+  }
 
-    if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs))) { return NULL; }
+  if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs))) {
+    return NULL;
+  }
 
-    trigger = fwrt->dump.active_trigs[id].conf;
-    ext = fwrt->dump.active_trigs[id].conf_ext;
+  trigger = fwrt->dump.active_trigs[id].conf;
+  ext = fwrt->dump.active_trigs[id].conf_ext;
 
-    size = sizeof(*dump_file);
-    size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
-    size += iwl_fw_ini_get_trigger_len(fwrt, ext);
+  size = sizeof(*dump_file);
+  size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
+  size += iwl_fw_ini_get_trigger_len(fwrt, ext);
 
-    if (!size) { return NULL; }
+  if (!size) {
+    return NULL;
+  }
 
-    dump_file = vzalloc(size);
-    if (!dump_file) { return NULL; }
+  dump_file = vzalloc(size);
+  if (!dump_file) {
+    return NULL;
+  }
 
-    fw_error_dump->fwrt_ptr = dump_file;
+  fw_error_dump->fwrt_ptr = dump_file;
 
-    dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
-    dump_data = (void*)dump_file->data;
-    dump_file->file_len = cpu_to_le32(size);
+  dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+  dump_data = (void*)dump_file->data;
+  dump_file->file_len = cpu_to_le32(size);
 
-    *dump_mask = 0;
-    if (trigger) { iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data, dump_mask); }
-    if (ext) { iwl_fw_ini_dump_trigger(fwrt, ext, &dump_data, dump_mask); }
+  *dump_mask = 0;
+  if (trigger) {
+    iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data, dump_mask);
+  }
+  if (ext) {
+    iwl_fw_ini_dump_trigger(fwrt, ext, &dump_data, dump_mask);
+  }
 
-    return dump_file;
+  return dump_file;
 }
 
 void iwl_fw_error_dump(struct iwl_fw_runtime* fwrt) {
-    struct iwl_fw_dump_ptrs* fw_error_dump;
-    struct iwl_fw_error_dump_file* dump_file;
-    struct scatterlist* sg_dump_data;
-    uint32_t file_len;
-    uint32_t dump_mask = fwrt->fw->dbg.dump_mask;
+  struct iwl_fw_dump_ptrs* fw_error_dump;
+  struct iwl_fw_error_dump_file* dump_file;
+  struct scatterlist* sg_dump_data;
+  uint32_t file_len;
+  uint32_t dump_mask = fwrt->fw->dbg.dump_mask;
 
-    IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
+  IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
 
-    /* there's no point in fw dump if the bus is dead */
-    if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
-        IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
-        goto out;
-    }
+  /* there's no point in fw dump if the bus is dead */
+  if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
+    IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
+    goto out;
+  }
 
-    fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
-    if (!fw_error_dump) { goto out; }
+  fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
+  if (!fw_error_dump) {
+    goto out;
+  }
 
-    if (fwrt->trans->ini_valid) {
-        dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump, &dump_mask);
-    } else {
-        dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
-    }
+  if (fwrt->trans->ini_valid) {
+    dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump, &dump_mask);
+  } else {
+    dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
+  }
 
-    if (!dump_file) {
-        kfree(fw_error_dump);
-        goto out;
-    }
-
-    if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only) {
-        dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
-    }
-
-    fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
-    file_len = le32_to_cpu(dump_file->file_len);
-    fw_error_dump->fwrt_len = file_len;
-    if (fw_error_dump->trans_ptr) {
-        file_len += fw_error_dump->trans_ptr->len;
-        dump_file->file_len = cpu_to_le32(file_len);
-    }
-
-    sg_dump_data = alloc_sgtable(file_len);
-    if (sg_dump_data) {
-        sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump->fwrt_ptr,
-                             fw_error_dump->fwrt_len, 0);
-        if (fw_error_dump->trans_ptr)
-            sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data),
-                                 fw_error_dump->trans_ptr->data, fw_error_dump->trans_ptr->len,
-                                 fw_error_dump->fwrt_len);
-        dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL);
-    }
-    vfree(fw_error_dump->fwrt_ptr);
-    vfree(fw_error_dump->trans_ptr);
+  if (!dump_file) {
     kfree(fw_error_dump);
+    goto out;
+  }
+
+  if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only) {
+    dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
+  }
+
+  fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
+  file_len = le32_to_cpu(dump_file->file_len);
+  fw_error_dump->fwrt_len = file_len;
+  if (fw_error_dump->trans_ptr) {
+    file_len += fw_error_dump->trans_ptr->len;
+    dump_file->file_len = cpu_to_le32(file_len);
+  }
+
+  sg_dump_data = alloc_sgtable(file_len);
+  if (sg_dump_data) {
+    sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump->fwrt_ptr,
+                         fw_error_dump->fwrt_len, 0);
+    if (fw_error_dump->trans_ptr)
+      sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump->trans_ptr->data,
+                           fw_error_dump->trans_ptr->len, fw_error_dump->fwrt_len);
+    dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL);
+  }
+  vfree(fw_error_dump->fwrt_ptr);
+  vfree(fw_error_dump->trans_ptr);
+  kfree(fw_error_dump);
 
 out:
-    iwl_fw_free_dump_desc(fwrt);
-    clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
-    IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
+  iwl_fw_free_dump_desc(fwrt);
+  clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
+  IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
 }
 IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
 
@@ -1092,214 +1164,248 @@
 IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
 
 void iwl_fw_assert_error_dump(struct iwl_fw_runtime* fwrt) {
-    IWL_INFO(fwrt, "error dump due to fw assert\n");
-    fwrt->dump.desc = &iwl_dump_desc_assert;
-    iwl_fw_error_dump(fwrt);
+  IWL_INFO(fwrt, "error dump due to fw assert\n");
+  fwrt->dump.desc = &iwl_dump_desc_assert;
+  iwl_fw_error_dump(fwrt);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_assert_error_dump);
 
 void iwl_fw_alive_error_dump(struct iwl_fw_runtime* fwrt) {
-    struct iwl_fw_dump_desc* iwl_dump_desc_no_alive =
-        kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL);
+  struct iwl_fw_dump_desc* iwl_dump_desc_no_alive =
+      kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL);
 
-    if (!iwl_dump_desc_no_alive) { return; }
+  if (!iwl_dump_desc_no_alive) {
+    return;
+  }
 
-    iwl_dump_desc_no_alive->trig_desc.type = cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE);
-    iwl_dump_desc_no_alive->len = 0;
+  iwl_dump_desc_no_alive->trig_desc.type = cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE);
+  iwl_dump_desc_no_alive->len = 0;
 
-    if (WARN_ON(fwrt->dump.desc)) { iwl_fw_free_dump_desc(fwrt); }
+  if (WARN_ON(fwrt->dump.desc)) {
+    iwl_fw_free_dump_desc(fwrt);
+  }
 
-    IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", FW_DBG_TRIGGER_NO_ALIVE);
+  IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", FW_DBG_TRIGGER_NO_ALIVE);
 
-    fwrt->dump.desc = iwl_dump_desc_no_alive;
-    iwl_fw_error_dump(fwrt);
-    clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status);
+  fwrt->dump.desc = iwl_dump_desc_no_alive;
+  iwl_fw_error_dump(fwrt);
+  clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
 
 int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime* fwrt, const struct iwl_fw_dump_desc* desc,
                             bool monitor_only, unsigned int delay) {
-    /*
-     * If the loading of the FW completed successfully, the next step is to
-     * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
-     * zero, the FW was already loaded successully. If the state is "NO_FW"
-     * in such a case - exit, since FW may be dead. Otherwise, we
-     * can try to collect the data, since FW might just not be fully
-     * loaded (no "ALIVE" yet), and the debug data is accessible.
-     *
-     * Corner case: got the FW alive but crashed before getting the SMEM
-     *  config. In such a case, due to HW access problems, we might
-     *  collect garbage.
-     */
-    if (fwrt->trans->state == IWL_TRANS_NO_FW && fwrt->smem_cfg.num_lmacs) { return -EIO; }
+  /*
+   * If the loading of the FW completed successfully, the next step is to
+   * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
+   * zero, the FW was already loaded successully. If the state is "NO_FW"
+   * in such a case - exit, since FW may be dead. Otherwise, we
+   * can try to collect the data, since FW might just not be fully
+   * loaded (no "ALIVE" yet), and the debug data is accessible.
+   *
+   * Corner case: got the FW alive but crashed before getting the SMEM
+   *  config. In such a case, due to HW access problems, we might
+   *  collect garbage.
+   */
+  if (fwrt->trans->state == IWL_TRANS_NO_FW && fwrt->smem_cfg.num_lmacs) {
+    return -EIO;
+  }
 
-    if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) ||
-        test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status)) {
-        return -EBUSY;
-    }
+  if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) ||
+      test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status)) {
+    return -EBUSY;
+  }
 
-    if (WARN_ON(fwrt->dump.desc)) { iwl_fw_free_dump_desc(fwrt); }
+  if (WARN_ON(fwrt->dump.desc)) {
+    iwl_fw_free_dump_desc(fwrt);
+  }
 
-    IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", le32_to_cpu(desc->trig_desc.type));
+  IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", le32_to_cpu(desc->trig_desc.type));
 
-    fwrt->dump.desc = desc;
-    fwrt->dump.monitor_only = monitor_only;
+  fwrt->dump.desc = desc;
+  fwrt->dump.monitor_only = monitor_only;
 
-    schedule_delayed_work(&fwrt->dump.wk, delay);
+  schedule_delayed_work(&fwrt->dump.wk, delay);
 
-    return 0;
+  return 0;
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
 
 int _iwl_fw_dbg_collect(struct iwl_fw_runtime* fwrt, enum iwl_fw_dbg_trigger trig, const char* str,
                         size_t len, struct iwl_fw_dbg_trigger_tlv* trigger) {
-    struct iwl_fw_dump_desc* desc;
-    unsigned int delay = 0;
-    bool monitor_only = false;
+  struct iwl_fw_dump_desc* desc;
+  unsigned int delay = 0;
+  bool monitor_only = false;
 
-    if (trigger) {
-        uint16_t occurrences = le16_to_cpu(trigger->occurrences) - 1;
+  if (trigger) {
+    uint16_t occurrences = le16_to_cpu(trigger->occurrences) - 1;
 
-        if (!le16_to_cpu(trigger->occurrences)) { return 0; }
-
-        if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
-            IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig);
-            iwl_force_nmi(fwrt->trans);
-            return 0;
-        }
-
-        trigger->occurrences = cpu_to_le16(occurrences);
-        delay = le16_to_cpu(trigger->trig_dis_ms);
-        monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
+    if (!le16_to_cpu(trigger->occurrences)) {
+      return 0;
     }
 
-    desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
-    if (!desc) { return -ENOMEM; }
+    if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
+      IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig);
+      iwl_force_nmi(fwrt->trans);
+      return 0;
+    }
 
-    desc->len = len;
-    desc->trig_desc.type = cpu_to_le32(trig);
-    memcpy(desc->trig_desc.data, str, len);
+    trigger->occurrences = cpu_to_le16(occurrences);
+    delay = le16_to_cpu(trigger->trig_dis_ms);
+    monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
+  }
 
-    return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
+  desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+  if (!desc) {
+    return -ENOMEM;
+  }
+
+  desc->len = len;
+  desc->trig_desc.type = cpu_to_le32(trig);
+  memcpy(desc->trig_desc.data, str, len);
+
+  return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
 }
 IWL_EXPORT_SYMBOL(_iwl_fw_dbg_collect);
 
 int iwl_fw_dbg_collect(struct iwl_fw_runtime* fwrt, uint32_t id, const char* str, size_t len) {
-    struct iwl_fw_dump_desc* desc;
-    uint32_t occur, delay;
+  struct iwl_fw_dump_desc* desc;
+  uint32_t occur, delay;
 
-    if (!fwrt->trans->ini_valid) { return _iwl_fw_dbg_collect(fwrt, id, str, len, NULL); }
+  if (!fwrt->trans->ini_valid) {
+    return _iwl_fw_dbg_collect(fwrt, id, str, len, NULL);
+  }
 
-    if (id == FW_DBG_TRIGGER_USER) { id = IWL_FW_TRIGGER_ID_USER_TRIGGER; }
+  if (id == FW_DBG_TRIGGER_USER) {
+    id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
+  }
 
-    if (WARN_ON(!fwrt->dump.active_trigs[id].active)) { return -EINVAL; }
+  if (WARN_ON(!fwrt->dump.active_trigs[id].active)) {
+    return -EINVAL;
+  }
 
-    delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->ignore_consec);
-    occur = le32_to_cpu(fwrt->dump.active_trigs[id].conf->occurrences);
-    if (!occur) { return 0; }
+  delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->ignore_consec);
+  occur = le32_to_cpu(fwrt->dump.active_trigs[id].conf->occurrences);
+  if (!occur) {
+    return 0;
+  }
 
-    if (le32_to_cpu(fwrt->dump.active_trigs[id].conf->force_restart)) {
-        IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id);
-        iwl_force_nmi(fwrt->trans);
-        return 0;
-    }
+  if (le32_to_cpu(fwrt->dump.active_trigs[id].conf->force_restart)) {
+    IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id);
+    iwl_force_nmi(fwrt->trans);
+    return 0;
+  }
 
-    desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
-    if (!desc) { return -ENOMEM; }
+  desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+  if (!desc) {
+    return -ENOMEM;
+  }
 
-    occur--;
-    fwrt->dump.active_trigs[id].conf->occurrences = cpu_to_le32(occur);
+  occur--;
+  fwrt->dump.active_trigs[id].conf->occurrences = cpu_to_le32(occur);
 
-    desc->len = len;
-    desc->trig_desc.type = cpu_to_le32(id);
-    memcpy(desc->trig_desc.data, str, len);
+  desc->len = len;
+  desc->trig_desc.type = cpu_to_le32(id);
+  memcpy(desc->trig_desc.data, str, len);
 
-    return iwl_fw_dbg_collect_desc(fwrt, desc, true, delay);
+  return iwl_fw_dbg_collect_desc(fwrt, desc, true, delay);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
 
 int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime* fwrt, struct iwl_fw_dbg_trigger_tlv* trigger,
                             const char* fmt, ...) {
-    int ret, len = 0;
-    char buf[64];
+  int ret, len = 0;
+  char buf[64];
 
-    if (fwrt->trans->ini_valid) { return 0; }
+  if (fwrt->trans->ini_valid) {
+    return 0;
+  }
 
-    if (fmt) {
-        va_list ap;
+  if (fmt) {
+    va_list ap;
 
-        buf[sizeof(buf) - 1] = '\0';
+    buf[sizeof(buf) - 1] = '\0';
 
-        va_start(ap, fmt);
-        vsnprintf(buf, sizeof(buf), fmt, ap);
-        va_end(ap);
+    va_start(ap, fmt);
+    vsnprintf(buf, sizeof(buf), fmt, ap);
+    va_end(ap);
 
-        /* check for truncation */
-        if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) { buf[sizeof(buf) - 1] = '\0'; }
-
-        len = strlen(buf) + 1;
+    /* check for truncation */
+    if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) {
+      buf[sizeof(buf) - 1] = '\0';
     }
 
-    ret = _iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, trigger);
+    len = strlen(buf) + 1;
+  }
 
-    if (ret) { return ret; }
+  ret = _iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, trigger);
 
-    return 0;
+  if (ret) {
+    return ret;
+  }
+
+  return 0;
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
 
 int iwl_fw_start_dbg_conf(struct iwl_fw_runtime* fwrt, uint8_t conf_id) {
-    uint8_t* ptr;
-    int ret;
-    int i;
+  uint8_t* ptr;
+  int ret;
+  int i;
 
-    if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv), "Invalid configuration %d\n",
-                  conf_id)) {
-        return -EINVAL;
-    }
+  if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv), "Invalid configuration %d\n",
+                conf_id)) {
+    return -EINVAL;
+  }
 
-    /* EARLY START - firmware's configuration is hard coded */
-    if ((!fwrt->fw->dbg.conf_tlv[conf_id] || !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
-        conf_id == FW_DBG_START_FROM_ALIVE) {
-        return 0;
-    }
+  /* EARLY START - firmware's configuration is hard coded */
+  if ((!fwrt->fw->dbg.conf_tlv[conf_id] || !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
+      conf_id == FW_DBG_START_FROM_ALIVE) {
+    return 0;
+  }
 
-    if (!fwrt->fw->dbg.conf_tlv[conf_id]) { return -EINVAL; }
+  if (!fwrt->fw->dbg.conf_tlv[conf_id]) {
+    return -EINVAL;
+  }
 
-    if (fwrt->dump.conf != FW_DBG_INVALID) {
-        IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n", fwrt->dump.conf);
-    }
+  if (fwrt->dump.conf != FW_DBG_INVALID) {
+    IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n", fwrt->dump.conf);
+  }
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    /* start default config marker cmd for syncing logs */
-    if (fwrt->trans->dbg_cfg.enable_timestamp_marker_cmd) { iwl_fw_trigger_timestamp(fwrt, 1); }
+  /* start default config marker cmd for syncing logs */
+  if (fwrt->trans->dbg_cfg.enable_timestamp_marker_cmd) {
+    iwl_fw_trigger_timestamp(fwrt, 1);
+  }
 #endif
-    /* Send all HCMDs for configuring the FW debug */
-    ptr = (void*)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
-    for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
-        struct iwl_fw_dbg_conf_hcmd* cmd = (void*)ptr;
-        struct iwl_host_cmd hcmd = {
-            .id = cmd->id,
-            .len =
-                {
-                    le16_to_cpu(cmd->len),
-                },
-            .data =
-                {
-                    cmd->data,
-                },
-        };
+  /* Send all HCMDs for configuring the FW debug */
+  ptr = (void*)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
+  for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
+    struct iwl_fw_dbg_conf_hcmd* cmd = (void*)ptr;
+    struct iwl_host_cmd hcmd = {
+        .id = cmd->id,
+        .len =
+            {
+                le16_to_cpu(cmd->len),
+            },
+        .data =
+            {
+                cmd->data,
+            },
+    };
 
-        ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
-        if (ret) { return ret; }
-
-        ptr += sizeof(*cmd);
-        ptr += le16_to_cpu(cmd->len);
+    ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+    if (ret) {
+      return ret;
     }
 
-    fwrt->dump.conf = conf_id;
+    ptr += sizeof(*cmd);
+    ptr += le16_to_cpu(cmd->len);
+  }
 
-    return 0;
+  fwrt->dump.conf = conf_id;
+
+  return 0;
 }
 IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
 
@@ -1307,251 +1413,270 @@
  * called afterwards
  */
 void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime* fwrt) {
-    struct iwl_fw_dbg_params params = {0};
+  struct iwl_fw_dbg_params params = {0};
 
-    if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) { return; }
+  if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) {
+    return;
+  }
 
-    if (fwrt->ops && fwrt->ops->fw_running && !fwrt->ops->fw_running(fwrt->ops_ctx)) {
-        IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
-        iwl_fw_free_dump_desc(fwrt);
-        clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
-        return;
-    }
+  if (fwrt->ops && fwrt->ops->fw_running && !fwrt->ops->fw_running(fwrt->ops_ctx)) {
+    IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
+    iwl_fw_free_dump_desc(fwrt);
+    clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
+    return;
+  }
 
-    iwl_fw_dbg_stop_recording(fwrt, &params);
+  iwl_fw_dbg_stop_recording(fwrt, &params);
 
-    iwl_fw_error_dump(fwrt);
+  iwl_fw_error_dump(fwrt);
 
-    /* start recording again if the firmware is not crashed */
-    if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && fwrt->fw->dbg.dest_tlv) {
-        /* wait before we collect the data till the DBGC stop */
-        udelay(500);
-        iwl_fw_dbg_restart_recording(fwrt, &params);
-    }
+  /* start recording again if the firmware is not crashed */
+  if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && fwrt->fw->dbg.dest_tlv) {
+    /* wait before we collect the data till the DBGC stop */
+    udelay(500);
+    iwl_fw_dbg_restart_recording(fwrt, &params);
+  }
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
 
 void iwl_fw_error_dump_wk(struct work_struct* work) {
-    struct iwl_fw_runtime* fwrt = container_of(work, struct iwl_fw_runtime, dump.wk.work);
+  struct iwl_fw_runtime* fwrt = container_of(work, struct iwl_fw_runtime, dump.wk.work);
 
-    if (fwrt->ops && fwrt->ops->dump_start && fwrt->ops->dump_start(fwrt->ops_ctx)) { return; }
+  if (fwrt->ops && fwrt->ops->dump_start && fwrt->ops->dump_start(fwrt->ops_ctx)) {
+    return;
+  }
 
-    iwl_fw_dbg_collect_sync(fwrt);
+  iwl_fw_dbg_collect_sync(fwrt);
 
-    if (fwrt->ops && fwrt->ops->dump_end) { fwrt->ops->dump_end(fwrt->ops_ctx); }
+  if (fwrt->ops && fwrt->ops->dump_end) {
+    fwrt->ops->dump_end(fwrt->ops_ctx);
+  }
 }
 
 void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime* fwrt) {
-    const struct iwl_cfg* cfg = fwrt->trans->cfg;
+  const struct iwl_cfg* cfg = fwrt->trans->cfg;
 
-    if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt)) { return; }
+  if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt)) {
+    return;
+  }
 
+  if (!fwrt->dump.d3_debug_data) {
+    fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length, GFP_KERNEL);
     if (!fwrt->dump.d3_debug_data) {
-        fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length, GFP_KERNEL);
-        if (!fwrt->dump.d3_debug_data) {
-            IWL_ERR(fwrt, "failed to allocate memory for D3 debug data\n");
-            return;
-        }
+      IWL_ERR(fwrt, "failed to allocate memory for D3 debug data\n");
+      return;
     }
+  }
 
-    /* if the buffer holds previous debug data it is overwritten */
-    iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr, fwrt->dump.d3_debug_data,
-                             cfg->d3_debug_data_length);
+  /* if the buffer holds previous debug data it is overwritten */
+  iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr, fwrt->dump.d3_debug_data,
+                           cfg->d3_debug_data_length);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
 
 static void iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime* fwrt,
                                          struct iwl_fw_ini_allocation_tlv* alloc) {
-    struct iwl_trans* trans = fwrt->trans;
-    struct iwl_continuous_record_cmd cont_rec = {};
-    struct iwl_buffer_allocation_cmd* cmd = (void*)&cont_rec.pad[0];
-    struct iwl_host_cmd hcmd = {
-        .id = LDBG_CONFIG_CMD,
-        .flags = CMD_ASYNC,
-        .data[0] = &cont_rec,
-        .len[0] = sizeof(cont_rec),
-    };
-    void* virtual_addr = NULL;
-    uint32_t size = le32_to_cpu(alloc->size);
-    dma_addr_t phys_addr;
+  struct iwl_trans* trans = fwrt->trans;
+  struct iwl_continuous_record_cmd cont_rec = {};
+  struct iwl_buffer_allocation_cmd* cmd = (void*)&cont_rec.pad[0];
+  struct iwl_host_cmd hcmd = {
+      .id = LDBG_CONFIG_CMD,
+      .flags = CMD_ASYNC,
+      .data[0] = &cont_rec,
+      .len[0] = sizeof(cont_rec),
+  };
+  void* virtual_addr = NULL;
+  uint32_t size = le32_to_cpu(alloc->size);
+  dma_addr_t phys_addr;
 
-    cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION);
+  cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION);
 
-    if (!trans->num_blocks &&
-        le32_to_cpu(alloc->buffer_location) != IWL_FW_INI_LOCATION_DRAM_PATH) {
-        return;
-    }
+  if (!trans->num_blocks && le32_to_cpu(alloc->buffer_location) != IWL_FW_INI_LOCATION_DRAM_PATH) {
+    return;
+  }
 
-    virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size, &phys_addr, GFP_KERNEL);
+  virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size, &phys_addr, GFP_KERNEL);
 
-    /* TODO: alloc fragments if needed */
-    if (!virtual_addr) { IWL_ERR(fwrt, "Failed to allocate debug memory\n"); }
+  /* TODO: alloc fragments if needed */
+  if (!virtual_addr) {
+    IWL_ERR(fwrt, "Failed to allocate debug memory\n");
+  }
 
-    if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon))) { return; }
+  if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon))) {
+    return;
+  }
 
-    trans->fw_mon[trans->num_blocks].block = virtual_addr;
-    trans->fw_mon[trans->num_blocks].physical = phys_addr;
-    trans->fw_mon[trans->num_blocks].size = size;
-    trans->num_blocks++;
+  trans->fw_mon[trans->num_blocks].block = virtual_addr;
+  trans->fw_mon[trans->num_blocks].physical = phys_addr;
+  trans->fw_mon[trans->num_blocks].size = size;
+  trans->num_blocks++;
 
-    IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
+  IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
 
-    /* First block is assigned via registers / context info */
-    if (trans->num_blocks == 1) { return; }
+  /* First block is assigned via registers / context info */
+  if (trans->num_blocks == 1) {
+    return;
+  }
 
-    cmd->num_frags = cpu_to_le32(1);
-    cmd->fragments[0].address = cpu_to_le64(phys_addr);
-    cmd->fragments[0].size = alloc->size;
-    cmd->allocation_id = alloc->allocation_id;
-    cmd->buffer_location = alloc->buffer_location;
+  cmd->num_frags = cpu_to_le32(1);
+  cmd->fragments[0].address = cpu_to_le64(phys_addr);
+  cmd->fragments[0].size = alloc->size;
+  cmd->allocation_id = alloc->allocation_id;
+  cmd->buffer_location = alloc->buffer_location;
 
-    iwl_trans_send_cmd(trans, &hcmd);
+  iwl_trans_send_cmd(trans, &hcmd);
 }
 
 static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime* fwrt, struct iwl_ucode_tlv* tlv) {
-    struct iwl_fw_ini_hcmd_tlv* hcmd_tlv = (void*)&tlv->data[0];
-    struct iwl_fw_ini_hcmd* data = &hcmd_tlv->hcmd;
-    uint16_t len = le32_to_cpu(tlv->length) - sizeof(*hcmd_tlv);
+  struct iwl_fw_ini_hcmd_tlv* hcmd_tlv = (void*)&tlv->data[0];
+  struct iwl_fw_ini_hcmd* data = &hcmd_tlv->hcmd;
+  uint16_t len = le32_to_cpu(tlv->length) - sizeof(*hcmd_tlv);
 
-    struct iwl_host_cmd hcmd = {
-        .id = WIDE_ID(data->group, data->id),
-        .len =
-            {
-                len,
-            },
-        .data =
-            {
-                data->data,
-            },
-    };
+  struct iwl_host_cmd hcmd = {
+      .id = WIDE_ID(data->group, data->id),
+      .len =
+          {
+              len,
+          },
+      .data =
+          {
+              data->data,
+          },
+  };
 
-    iwl_trans_send_cmd(fwrt->trans, &hcmd);
+  iwl_trans_send_cmd(fwrt->trans, &hcmd);
 }
 
 static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime* fwrt,
                                       struct iwl_fw_ini_region_tlv* tlv, bool ext,
                                       enum iwl_fw_ini_apply_point pnt) {
-    void* iter = (void*)tlv->region_config;
-    int i, size = le32_to_cpu(tlv->num_regions);
+  void* iter = (void*)tlv->region_config;
+  int i, size = le32_to_cpu(tlv->num_regions);
 
-    for (i = 0; i < size; i++) {
-        struct iwl_fw_ini_region_cfg* reg = iter;
-        int id = le32_to_cpu(reg->region_id);
-        struct iwl_fw_ini_active_regs* active;
+  for (i = 0; i < size; i++) {
+    struct iwl_fw_ini_region_cfg* reg = iter;
+    int id = le32_to_cpu(reg->region_id);
+    struct iwl_fw_ini_active_regs* active;
 
-        if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs),
-                 "Invalid region id %d for apply point %d\n", id, pnt)) {
-            break;
-        }
-
-        active = &fwrt->dump.active_regs[id];
-
-        if (ext && active->apply_point == pnt) {
-            IWL_WARN(fwrt->trans, "External region TLV overrides FW default %x\n", id);
-        }
-
-        IWL_DEBUG_FW(fwrt, "%s: apply point %d, activating region ID %d\n", __func__, pnt, id);
-
-        active->reg = reg;
-        active->apply_point = pnt;
-
-        if (le32_to_cpu(reg->region_type) != IWL_FW_INI_REGION_DRAM_BUFFER) {
-            iter += le32_to_cpu(reg->num_regions) * sizeof(__le32);
-        }
-
-        iter += sizeof(*reg);
+    if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs), "Invalid region id %d for apply point %d\n",
+             id, pnt)) {
+      break;
     }
+
+    active = &fwrt->dump.active_regs[id];
+
+    if (ext && active->apply_point == pnt) {
+      IWL_WARN(fwrt->trans, "External region TLV overrides FW default %x\n", id);
+    }
+
+    IWL_DEBUG_FW(fwrt, "%s: apply point %d, activating region ID %d\n", __func__, pnt, id);
+
+    active->reg = reg;
+    active->apply_point = pnt;
+
+    if (le32_to_cpu(reg->region_type) != IWL_FW_INI_REGION_DRAM_BUFFER) {
+      iter += le32_to_cpu(reg->num_regions) * sizeof(__le32);
+    }
+
+    iter += sizeof(*reg);
+  }
 }
 
 static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime* fwrt,
                                        struct iwl_fw_ini_trigger_tlv* tlv, bool ext,
                                        enum iwl_fw_ini_apply_point apply_point) {
-    int i, size = le32_to_cpu(tlv->num_triggers);
-    void* iter = (void*)tlv->trigger_config;
+  int i, size = le32_to_cpu(tlv->num_triggers);
+  void* iter = (void*)tlv->trigger_config;
 
-    for (i = 0; i < size; i++) {
-        struct iwl_fw_ini_trigger* trig = iter;
-        struct iwl_fw_ini_active_triggers* active;
-        int id = le32_to_cpu(trig->trigger_id);
-        uint32_t num;
+  for (i = 0; i < size; i++) {
+    struct iwl_fw_ini_trigger* trig = iter;
+    struct iwl_fw_ini_active_triggers* active;
+    int id = le32_to_cpu(trig->trigger_id);
+    uint32_t num;
 
-        if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs))) { break; }
-
-        active = &fwrt->dump.active_trigs[id];
-
-        if (active->apply_point != apply_point) {
-            active->conf = NULL;
-            active->conf_ext = NULL;
-        }
-
-        num = le32_to_cpu(trig->num_regions);
-
-        if (ext && active->apply_point == apply_point) {
-            num += le32_to_cpu(active->conf->num_regions);
-            if (trig->ignore_default) {
-                active->conf_ext = active->conf;
-                active->conf = trig;
-            } else {
-                active->conf_ext = trig;
-            }
-        } else {
-            active->conf = trig;
-        }
-
-        /* Since zero means infinity - just set to -1 */
-        if (!le32_to_cpu(trig->occurrences)) { trig->occurrences = cpu_to_le32(-1); }
-        if (!le32_to_cpu(trig->ignore_consec)) { trig->ignore_consec = cpu_to_le32(-1); }
-
-        iter += sizeof(*trig) + le32_to_cpu(trig->num_regions) * sizeof(__le32);
-
-        active->active = num;
-        active->apply_point = apply_point;
+    if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs))) {
+      break;
     }
+
+    active = &fwrt->dump.active_trigs[id];
+
+    if (active->apply_point != apply_point) {
+      active->conf = NULL;
+      active->conf_ext = NULL;
+    }
+
+    num = le32_to_cpu(trig->num_regions);
+
+    if (ext && active->apply_point == apply_point) {
+      num += le32_to_cpu(active->conf->num_regions);
+      if (trig->ignore_default) {
+        active->conf_ext = active->conf;
+        active->conf = trig;
+      } else {
+        active->conf_ext = trig;
+      }
+    } else {
+      active->conf = trig;
+    }
+
+    /* Since zero means infinity - just set to -1 */
+    if (!le32_to_cpu(trig->occurrences)) {
+      trig->occurrences = cpu_to_le32(-1);
+    }
+    if (!le32_to_cpu(trig->ignore_consec)) {
+      trig->ignore_consec = cpu_to_le32(-1);
+    }
+
+    iter += sizeof(*trig) + le32_to_cpu(trig->num_regions) * sizeof(__le32);
+
+    active->active = num;
+    active->apply_point = apply_point;
+  }
 }
 
 static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime* fwrt, struct iwl_apply_point_data* data,
                                     enum iwl_fw_ini_apply_point pnt, bool ext) {
-    void* iter = data->data;
+  void* iter = data->data;
 
-    while (iter && iter < data->data + data->size) {
-        struct iwl_ucode_tlv* tlv = iter;
-        void* ini_tlv = (void*)tlv->data;
-        uint32_t type = le32_to_cpu(tlv->type);
+  while (iter && iter < data->data + data->size) {
+    struct iwl_ucode_tlv* tlv = iter;
+    void* ini_tlv = (void*)tlv->data;
+    uint32_t type = le32_to_cpu(tlv->type);
 
-        switch (type) {
-        case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
-            iwl_fw_dbg_buffer_allocation(fwrt, ini_tlv);
-            break;
-        case IWL_UCODE_TLV_TYPE_HCMD:
-            if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
-                IWL_ERR(fwrt, "Invalid apply point %x for host command\n", pnt);
-                goto next;
-            }
-            iwl_fw_dbg_send_hcmd(fwrt, tlv);
-            break;
-        case IWL_UCODE_TLV_TYPE_REGIONS:
-            iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt);
-            break;
-        case IWL_UCODE_TLV_TYPE_TRIGGERS:
-            iwl_fw_dbg_update_triggers(fwrt, ini_tlv, ext, pnt);
-            break;
-        case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
-            break;
-        default:
-            WARN_ONCE(1, "Invalid TLV %x for apply point\n", type);
-            break;
+    switch (type) {
+      case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
+        iwl_fw_dbg_buffer_allocation(fwrt, ini_tlv);
+        break;
+      case IWL_UCODE_TLV_TYPE_HCMD:
+        if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
+          IWL_ERR(fwrt, "Invalid apply point %x for host command\n", pnt);
+          goto next;
         }
-    next:
-        iter += sizeof(*tlv) + le32_to_cpu(tlv->length);
+        iwl_fw_dbg_send_hcmd(fwrt, tlv);
+        break;
+      case IWL_UCODE_TLV_TYPE_REGIONS:
+        iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt);
+        break;
+      case IWL_UCODE_TLV_TYPE_TRIGGERS:
+        iwl_fw_dbg_update_triggers(fwrt, ini_tlv, ext, pnt);
+        break;
+      case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
+        break;
+      default:
+        WARN_ONCE(1, "Invalid TLV %x for apply point\n", type);
+        break;
     }
+  next:
+    iter += sizeof(*tlv) + le32_to_cpu(tlv->length);
+  }
 }
 
 void iwl_fw_dbg_apply_point(struct iwl_fw_runtime* fwrt, enum iwl_fw_ini_apply_point apply_point) {
-    void* data = &fwrt->trans->apply_points[apply_point];
+  void* data = &fwrt->trans->apply_points[apply_point];
 
-    _iwl_fw_dbg_apply_point(fwrt, data, apply_point, false);
+  _iwl_fw_dbg_apply_point(fwrt, data, apply_point, false);
 
-    data = &fwrt->trans->apply_points_ext[apply_point];
-    _iwl_fw_dbg_apply_point(fwrt, data, apply_point, true);
+  data = &fwrt->trans->apply_points_ext[apply_point];
+  _iwl_fw_dbg_apply_point(fwrt, data, apply_point, true);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.h
index f9485e2..45e3c11 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.h
@@ -51,9 +51,9 @@
  * @trig_desc: the description of the dump
  */
 struct iwl_fw_dump_desc {
-    size_t len;
-    /* must be last */
-    struct iwl_fw_error_dump_trigger_desc trig_desc;
+  size_t len;
+  /* must be last */
+  struct iwl_fw_error_dump_trigger_desc trig_desc;
 };
 
 /**
@@ -62,18 +62,22 @@
  * @out_ctrl: DBGC_OUT_CTRL value
  */
 struct iwl_fw_dbg_params {
-    uint32_t in_sample;
-    uint32_t out_ctrl;
+  uint32_t in_sample;
+  uint32_t out_ctrl;
 };
 
 extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
 
 static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime* fwrt) {
-    if (fwrt->dump.desc != &iwl_dump_desc_assert) { kfree((void*)fwrt->dump.desc); }
-    fwrt->dump.desc = NULL;
-    fwrt->dump.lmac_err_id[0] = 0;
-    if (fwrt->smem_cfg.num_lmacs > 1) { fwrt->dump.lmac_err_id[1] = 0; }
-    fwrt->dump.umac_err_id = 0;
+  if (fwrt->dump.desc != &iwl_dump_desc_assert) {
+    kfree((void*)fwrt->dump.desc);
+  }
+  fwrt->dump.desc = NULL;
+  fwrt->dump.lmac_err_id[0] = 0;
+  if (fwrt->smem_cfg.num_lmacs > 1) {
+    fwrt->dump.lmac_err_id[1] = 0;
+  }
+  fwrt->dump.umac_err_id = 0;
 }
 
 void iwl_fw_error_dump(struct iwl_fw_runtime* fwrt);
@@ -86,39 +90,39 @@
                             const char* fmt, ...);
 int iwl_fw_start_dbg_conf(struct iwl_fw_runtime* fwrt, uint8_t id);
 
-#define iwl_fw_dbg_trigger_enabled(fw, id)                 \
-    ({                                                     \
-        void* __dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \
-        unlikely(__dbg_trigger);                           \
-    })
+#define iwl_fw_dbg_trigger_enabled(fw, id)             \
+  ({                                                   \
+    void* __dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \
+    unlikely(__dbg_trigger);                           \
+  })
 
 static inline struct iwl_fw_dbg_trigger_tlv* _iwl_fw_dbg_get_trigger(const struct iwl_fw* fw,
                                                                      enum iwl_fw_dbg_trigger id) {
-    return fw->dbg.trigger_tlv[id];
+  return fw->dbg.trigger_tlv[id];
 }
 
-#define iwl_fw_dbg_get_trigger(fw, id)            \
-    ({                                            \
-        BUILD_BUG_ON(!__builtin_constant_p(id));  \
-        BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \
-        _iwl_fw_dbg_get_trigger((fw), (id));      \
-    })
+#define iwl_fw_dbg_get_trigger(fw, id)        \
+  ({                                          \
+    BUILD_BUG_ON(!__builtin_constant_p(id));  \
+    BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \
+    _iwl_fw_dbg_get_trigger((fw), (id));      \
+  })
 
 static inline bool iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv* trig,
                                                 struct wireless_dev* wdev) {
-    uint32_t trig_vif = le32_to_cpu(trig->vif_type);
+  uint32_t trig_vif = le32_to_cpu(trig->vif_type);
 
-    return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || wdev->iftype == trig_vif;
+  return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || wdev->iftype == trig_vif;
 }
 
 static inline bool iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime* fwrt,
                                                       struct iwl_fw_dbg_trigger_tlv* trig) {
-    return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
-            (fwrt->dump.conf == FW_DBG_INVALID ||
-             (BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids))));
+  return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
+          (fwrt->dump.conf == FW_DBG_INVALID ||
+           (BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids))));
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static inline bool iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime* fwrt, uint32_t id,
                                              uint32_t dis_ms) {
     unsigned long wind_jiff = msecs_to_jiffies(dis_ms);
@@ -161,12 +165,12 @@
     return trig;
 }
 
-#define iwl_fw_dbg_trigger_on(fwrt, wdev, id)         \
-    ({                                                \
-        BUILD_BUG_ON(!__builtin_constant_p(id));      \
-        BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX);     \
-        _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
-    })
+#define iwl_fw_dbg_trigger_on(fwrt, wdev, id)     \
+  ({                                              \
+    BUILD_BUG_ON(!__builtin_constant_p(id));      \
+    BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX);     \
+    _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
+  })
 
 static inline bool _iwl_fw_ini_trigger_on(struct iwl_fw_runtime* fwrt,
                                           const enum iwl_fw_dbg_trigger id) {
@@ -188,12 +192,12 @@
     return true;
 }
 
-#define iwl_fw_ini_trigger_on(fwrt, wdev, id)         \
-    ({                                                \
-        BUILD_BUG_ON(!__builtin_constant_p(id));      \
-        BUILD_BUG_ON((id) >= IWL_FW_TRIGGER_ID_NUM);  \
-        _iwl_fw_ini_trigger_on((fwrt), (wdev), (id)); \
-    })
+#define iwl_fw_ini_trigger_on(fwrt, wdev, id)     \
+  ({                                              \
+    BUILD_BUG_ON(!__builtin_constant_p(id));      \
+    BUILD_BUG_ON((id) >= IWL_FW_TRIGGER_ID_NUM);  \
+    _iwl_fw_ini_trigger_on((fwrt), (wdev), (id)); \
+  })
 
 static inline void _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime* fwrt,
                                                    struct wireless_dev* wdev,
@@ -206,7 +210,7 @@
 }
 
 #define iwl_fw_dbg_trigger_simple_stop(fwrt, wdev, trig) \
-    _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), iwl_fw_dbg_get_trigger((fwrt)->fw, (trig)))
+  _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), iwl_fw_dbg_get_trigger((fwrt)->fw, (trig)))
 
 static int iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime* fwrt, bool start) {
     struct iwl_continuous_record_cmd cont_rec = {};
@@ -289,7 +293,7 @@
 #endif  // NEEDS_PORTING
 
 static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime* fwrt) {
-    fwrt->dump.conf = FW_DBG_INVALID;
+  fwrt->dump.conf = FW_DBG_INVALID;
 }
 
 #if 0   // NEEDS_PORTING
@@ -301,7 +305,7 @@
 #endif  // NEEDS_PORTING
 
 static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime* fwrt) {
-    return false;
+  return false;
 #if 0   // NEEDS_PORTING
     return fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
            fwrt->trans->cfg->d3_debug_data_length &&
@@ -328,20 +332,22 @@
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
 static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime* fwrt) {
-    fwrt->timestamp.delay = 0;
-    cancel_delayed_work_sync(&fwrt->timestamp.wk);
+  fwrt->timestamp.delay = 0;
+  cancel_delayed_work_sync(&fwrt->timestamp.wk);
 }
 
 void iwl_fw_trigger_timestamp(struct iwl_fw_runtime* fwrt, uint32_t delay);
 
 static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime* fwrt) {
-    cancel_delayed_work_sync(&fwrt->timestamp.wk);
+  cancel_delayed_work_sync(&fwrt->timestamp.wk);
 }
 
 static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime* fwrt) {
-    if (!fwrt->timestamp.delay) { return; }
+  if (!fwrt->timestamp.delay) {
+    return;
+  }
 
-    schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(fwrt->timestamp.delay));
+  schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(fwrt->timestamp.delay));
 }
 
 #else
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/debugfs.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/debugfs.c
index e377480..9547a2e6 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/debugfs.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/debugfs.c
@@ -34,6 +34,7 @@
  *
  *****************************************************************************/
 #include "debugfs.h"
+
 #include "api/commands.h"
 #include "dbg.h"
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
@@ -41,477 +42,498 @@
 #endif
 #include "api/rs.h"
 
-#define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)                          \
-    struct dbgfs_##name##_data {                                                  \
-        argtype* arg;                                                             \
-        bool read_done;                                                           \
-        ssize_t rlen;                                                             \
-        char rbuf[buflen];                                                        \
-    };                                                                            \
-    static int _iwl_dbgfs_##name##_open(struct inode* inode, struct file* file) { \
-        struct dbgfs_##name##_data* data;                                         \
-                                                                                  \
-        data = kzalloc(sizeof(*data), GFP_KERNEL);                                \
-        if (!data) return -ENOMEM;                                                \
-                                                                                  \
-        data->read_done = false;                                                  \
-        data->arg = inode->i_private;                                             \
-        file->private_data = data;                                                \
-                                                                                  \
-        return 0;                                                                 \
-    }
+#define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)                        \
+  struct dbgfs_##name##_data {                                                  \
+    argtype* arg;                                                               \
+    bool read_done;                                                             \
+    ssize_t rlen;                                                               \
+    char rbuf[buflen];                                                          \
+  };                                                                            \
+  static int _iwl_dbgfs_##name##_open(struct inode* inode, struct file* file) { \
+    struct dbgfs_##name##_data* data;                                           \
+                                                                                \
+    data = kzalloc(sizeof(*data), GFP_KERNEL);                                  \
+    if (!data)                                                                  \
+      return -ENOMEM;                                                           \
+                                                                                \
+    data->read_done = false;                                                    \
+    data->arg = inode->i_private;                                               \
+    file->private_data = data;                                                  \
+                                                                                \
+    return 0;                                                                   \
+  }
 
-#define FWRT_DEBUGFS_READ_WRAPPER(name)                                                      \
-    static ssize_t _iwl_dbgfs_##name##_read(struct file* file, char __user* user_buf,        \
-                                            size_t count, loff_t* ppos) {                    \
-        struct dbgfs_##name##_data* data = file->private_data;                               \
-                                                                                             \
-        if (!data->read_done) {                                                              \
-            data->read_done = true;                                                          \
-            data->rlen = iwl_dbgfs_##name##_read(data->arg, sizeof(data->rbuf), data->rbuf); \
-        }                                                                                    \
-                                                                                             \
-        if (data->rlen < 0) return data->rlen;                                               \
-        return simple_read_from_buffer(user_buf, count, ppos, data->rbuf, data->rlen);       \
-    }
+#define FWRT_DEBUGFS_READ_WRAPPER(name)                                                           \
+  static ssize_t _iwl_dbgfs_##name##_read(struct file* file, char __user* user_buf, size_t count, \
+                                          loff_t* ppos) {                                         \
+    struct dbgfs_##name##_data* data = file->private_data;                                        \
+                                                                                                  \
+    if (!data->read_done) {                                                                       \
+      data->read_done = true;                                                                     \
+      data->rlen = iwl_dbgfs_##name##_read(data->arg, sizeof(data->rbuf), data->rbuf);            \
+    }                                                                                             \
+                                                                                                  \
+    if (data->rlen < 0)                                                                           \
+      return data->rlen;                                                                          \
+    return simple_read_from_buffer(user_buf, count, ppos, data->rbuf, data->rlen);                \
+  }
 
 static int _iwl_dbgfs_release(struct inode* inode, struct file* file) {
-    kfree(file->private_data);
+  kfree(file->private_data);
 
-    return 0;
+  return 0;
 }
 
-#define _FWRT_DEBUGFS_READ_FILE_OPS(name, buflen, argtype)         \
-    FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)               \
-    FWRT_DEBUGFS_READ_WRAPPER(name)                                \
-    static const struct file_operations iwl_dbgfs_##name##_ops = { \
-        .read = _iwl_dbgfs_##name##_read,                          \
-        .open = _iwl_dbgfs_##name##_open,                          \
-        .llseek = generic_file_llseek,                             \
-        .release = _iwl_dbgfs_release,                             \
-    }
+#define _FWRT_DEBUGFS_READ_FILE_OPS(name, buflen, argtype)       \
+  FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)               \
+  FWRT_DEBUGFS_READ_WRAPPER(name)                                \
+  static const struct file_operations iwl_dbgfs_##name##_ops = { \
+      .read = _iwl_dbgfs_##name##_read,                          \
+      .open = _iwl_dbgfs_##name##_open,                          \
+      .llseek = generic_file_llseek,                             \
+      .release = _iwl_dbgfs_release,                             \
+  }
 
-#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)                                    \
-    static ssize_t _iwl_dbgfs_##name##_write(struct file* file, const char __user* user_buf, \
-                                             size_t count, loff_t* ppos) {                   \
-        argtype* arg = ((struct dbgfs_##name##_data*)file->private_data)->arg;               \
-        char buf[buflen] = {};                                                               \
-        size_t buf_size = min(count, sizeof(buf) - 1);                                       \
-                                                                                             \
-        if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT;                         \
-                                                                                             \
-        return iwl_dbgfs_##name##_write(arg, buf, buf_size);                                 \
-    }
+#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)                                  \
+  static ssize_t _iwl_dbgfs_##name##_write(struct file* file, const char __user* user_buf, \
+                                           size_t count, loff_t* ppos) {                   \
+    argtype* arg = ((struct dbgfs_##name##_data*)file->private_data)->arg;                 \
+    char buf[buflen] = {};                                                                 \
+    size_t buf_size = min(count, sizeof(buf) - 1);                                         \
+                                                                                           \
+    if (copy_from_user(buf, user_buf, buf_size))                                           \
+      return -EFAULT;                                                                      \
+                                                                                           \
+    return iwl_dbgfs_##name##_write(arg, buf, buf_size);                                   \
+  }
 
-#define _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype)   \
-    FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)               \
-    FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)              \
-    FWRT_DEBUGFS_READ_WRAPPER(name)                                \
-    static const struct file_operations iwl_dbgfs_##name##_ops = { \
-        .write = _iwl_dbgfs_##name##_write,                        \
-        .read = _iwl_dbgfs_##name##_read,                          \
-        .open = _iwl_dbgfs_##name##_open,                          \
-        .llseek = generic_file_llseek,                             \
-        .release = _iwl_dbgfs_release,                             \
-    }
+#define _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
+  FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)               \
+  FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)              \
+  FWRT_DEBUGFS_READ_WRAPPER(name)                                \
+  static const struct file_operations iwl_dbgfs_##name##_ops = { \
+      .write = _iwl_dbgfs_##name##_write,                        \
+      .read = _iwl_dbgfs_##name##_read,                          \
+      .open = _iwl_dbgfs_##name##_open,                          \
+      .llseek = generic_file_llseek,                             \
+      .release = _iwl_dbgfs_release,                             \
+  }
 
-#define _FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype)        \
-    FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)               \
-    FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)              \
-    static const struct file_operations iwl_dbgfs_##name##_ops = { \
-        .write = _iwl_dbgfs_##name##_write,                        \
-        .open = _iwl_dbgfs_##name##_open,                          \
-        .llseek = generic_file_llseek,                             \
-        .release = _iwl_dbgfs_release,                             \
-    }
+#define _FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype)      \
+  FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype)               \
+  FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)              \
+  static const struct file_operations iwl_dbgfs_##name##_ops = { \
+      .write = _iwl_dbgfs_##name##_write,                        \
+      .open = _iwl_dbgfs_##name##_open,                          \
+      .llseek = generic_file_llseek,                             \
+      .release = _iwl_dbgfs_release,                             \
+  }
 
 #define FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz) \
-    _FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+  _FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
 
 #define FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
-    _FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+  _FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
 
 #define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
-    _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+  _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
 
-#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode)                                  \
-    do {                                                                                        \
-        if (!debugfs_create_file(alias, mode, parent, fwrt, &iwl_dbgfs_##name##_ops)) goto err; \
-    } while (0)
+#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode)                    \
+  do {                                                                            \
+    if (!debugfs_create_file(alias, mode, parent, fwrt, &iwl_dbgfs_##name##_ops)) \
+      goto err;                                                                   \
+  } while (0)
 #define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
-    FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+  FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
 
 static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime* fwrt) {
-    struct iwl_mvm_marker marker = {
-        .dw_len = sizeof(struct iwl_mvm_marker) / 4,
-        .marker_id = MARKER_ID_SYNC_CLOCK,
+  struct iwl_mvm_marker marker = {
+      .dw_len = sizeof(struct iwl_mvm_marker) / 4,
+      .marker_id = MARKER_ID_SYNC_CLOCK,
 
-        /* the real timestamp is taken from the ftrace clock
-         * this is for finding the match between fw and kernel logs
-         */
-        .timestamp = cpu_to_le64(fwrt->timestamp.seq++),
-    };
+      /* the real timestamp is taken from the ftrace clock
+       * this is for finding the match between fw and kernel logs
+       */
+      .timestamp = cpu_to_le64(fwrt->timestamp.seq++),
+  };
 
-    struct iwl_host_cmd hcmd = {
-        .id = MARKER_CMD,
-        .flags = CMD_ASYNC,
-        .data[0] = &marker,
-        .len[0] = sizeof(marker),
-    };
+  struct iwl_host_cmd hcmd = {
+      .id = MARKER_CMD,
+      .flags = CMD_ASYNC,
+      .data[0] = &marker,
+      .len[0] = sizeof(marker),
+  };
 
-    return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+  return iwl_trans_send_cmd(fwrt->trans, &hcmd);
 }
 
 static void iwl_fw_timestamp_marker_wk(struct work_struct* work) {
-    int ret;
-    struct iwl_fw_runtime* fwrt = container_of(work, struct iwl_fw_runtime, timestamp.wk.work);
-    unsigned long delay = fwrt->timestamp.delay;
+  int ret;
+  struct iwl_fw_runtime* fwrt = container_of(work, struct iwl_fw_runtime, timestamp.wk.work);
+  unsigned long delay = fwrt->timestamp.delay;
 
-    ret = iwl_fw_send_timestamp_marker_cmd(fwrt);
-    if (!ret && delay) {
-        schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(delay));
-    } else
-        IWL_INFO(fwrt, "stopping timestamp_marker, ret: %d, delay: %u\n", ret,
-                 jiffies_to_msecs(delay) / 1000);
+  ret = iwl_fw_send_timestamp_marker_cmd(fwrt);
+  if (!ret && delay) {
+    schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(delay));
+  } else
+    IWL_INFO(fwrt, "stopping timestamp_marker, ret: %d, delay: %u\n", ret,
+             jiffies_to_msecs(delay) / 1000);
 }
 
 void iwl_fw_trigger_timestamp(struct iwl_fw_runtime* fwrt, uint32_t delay) {
-    IWL_INFO(fwrt, "starting timestamp_marker trigger with delay: %us\n", delay);
+  IWL_INFO(fwrt, "starting timestamp_marker trigger with delay: %us\n", delay);
 
-    iwl_fw_cancel_timestamp(fwrt);
+  iwl_fw_cancel_timestamp(fwrt);
 
-    fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000);
+  fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000);
 
-    schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(fwrt->timestamp.delay));
+  schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(fwrt->timestamp.delay));
 }
 
 static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime* fwrt, char* buf,
                                                 size_t count) {
-    int ret;
-    uint32_t delay;
+  int ret;
+  uint32_t delay;
 
-    ret = kstrtou32(buf, 10, &delay);
-    if (ret < 0) { return ret; }
+  ret = kstrtou32(buf, 10, &delay);
+  if (ret < 0) {
+    return ret;
+  }
 
-    iwl_fw_trigger_timestamp(fwrt, delay);
+  iwl_fw_trigger_timestamp(fwrt, delay);
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime* fwrt, size_t size,
                                                char* buf) {
-    uint32_t delay_secs = jiffies_to_msecs(fwrt->timestamp.delay) / 1000;
+  uint32_t delay_secs = jiffies_to_msecs(fwrt->timestamp.delay) / 1000;
 
-    return scnprintf(buf, size, "%d\n", delay_secs);
+  return scnprintf(buf, size, "%d\n", delay_secs);
 }
 
 FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16);
 
 struct hcmd_write_data {
-    __be32 cmd_id;
-    __be32 flags;
-    __be16 length;
-    uint8_t data[0];
+  __be32 cmd_id;
+  __be32 flags;
+  __be16 length;
+  uint8_t data[0];
 } __packed;
 
 static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime* fwrt, char* buf, size_t count) {
-    size_t header_size = (sizeof(uint32_t) * 2 + sizeof(uint16_t)) * 2;
-    size_t data_size = (count - 1) / 2;
-    int ret;
-    struct hcmd_write_data* data;
-    struct iwl_host_cmd hcmd = {
-        .len =
-            {
-                0,
-            },
-        .data =
-            {
-                NULL,
-            },
-    };
+  size_t header_size = (sizeof(uint32_t) * 2 + sizeof(uint16_t)) * 2;
+  size_t data_size = (count - 1) / 2;
+  int ret;
+  struct hcmd_write_data* data;
+  struct iwl_host_cmd hcmd = {
+      .len =
+          {
+              0,
+          },
+      .data =
+          {
+              NULL,
+          },
+  };
 
-    if (fwrt->ops && fwrt->ops->fw_running && !fwrt->ops->fw_running(fwrt->ops_ctx)) {
-        return -EIO;
-    }
+  if (fwrt->ops && fwrt->ops->fw_running && !fwrt->ops->fw_running(fwrt->ops_ctx)) {
+    return -EIO;
+  }
 
-    if (count < header_size + 1 || count > 1024 * 4) { return -EINVAL; }
+  if (count < header_size + 1 || count > 1024 * 4) {
+    return -EINVAL;
+  }
 
-    data = kmalloc(data_size, GFP_KERNEL);
-    if (!data) { return -ENOMEM; }
+  data = kmalloc(data_size, GFP_KERNEL);
+  if (!data) {
+    return -ENOMEM;
+  }
 
-    ret = hex2bin((uint8_t*)data, buf, data_size);
-    if (ret) { goto out; }
+  ret = hex2bin((uint8_t*)data, buf, data_size);
+  if (ret) {
+    goto out;
+  }
 
-    hcmd.id = be32_to_cpu(data->cmd_id);
-    hcmd.flags = be32_to_cpu(data->flags);
-    hcmd.len[0] = be16_to_cpu(data->length);
-    hcmd.data[0] = data->data;
+  hcmd.id = be32_to_cpu(data->cmd_id);
+  hcmd.flags = be32_to_cpu(data->flags);
+  hcmd.len[0] = be16_to_cpu(data->length);
+  hcmd.data[0] = data->data;
 
-    if (count != header_size + hcmd.len[0] * 2 + 1) {
-        IWL_ERR(fwrt, "host command data size does not match header length\n");
-        ret = -EINVAL;
-        goto out;
-    }
+  if (count != header_size + hcmd.len[0] * 2 + 1) {
+    IWL_ERR(fwrt, "host command data size does not match header length\n");
+    ret = -EINVAL;
+    goto out;
+  }
 
-    if (fwrt->ops && fwrt->ops->send_hcmd) {
-        ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
-    } else {
-        ret = -EPERM;
-    }
+  if (fwrt->ops && fwrt->ops->send_hcmd) {
+    ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
+  } else {
+    ret = -EPERM;
+  }
 
-    if (ret < 0) { goto out; }
+  if (ret < 0) {
+    goto out;
+  }
 
-    if (hcmd.flags & CMD_WANT_SKB) { iwl_free_resp(&hcmd); }
+  if (hcmd.flags & CMD_WANT_SKB) {
+    iwl_free_resp(&hcmd);
+  }
 out:
-    kfree(data);
-    return ret ?: count;
+  kfree(data);
+  return ret ?: count;
 }
 
 FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
 
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
 struct iwl_dhc_write_data {
-    __be32 length;
-    __be32 index_and_mask;
-    __be32 data[0];
+  __be32 length;
+  __be32 index_and_mask;
+  __be32 data[0];
 } __packed;
 
 static ssize_t iwl_dbgfs_send_dhc_write(struct iwl_fw_runtime* fwrt, char* buf, size_t count) {
-    int ret, i;
-    struct iwl_dhc_write_data* data;
-    uint32_t length;
-    size_t header_size = sizeof(uint32_t) * 2 * 2;
-    size_t data_size = (count - 1) / 2, cmd_size;
-    struct iwl_dhc_cmd* dhc_cmd = NULL;
-    struct iwl_host_cmd hcmd = {
-        .id = iwl_cmd_id(DEBUG_HOST_COMMAND, LEGACY_GROUP, 0),
-        .flags = CMD_ASYNC,
-        .len =
-            {
-                0,
-            },
-        .data =
-            {
-                NULL,
-            },
-    };
+  int ret, i;
+  struct iwl_dhc_write_data* data;
+  uint32_t length;
+  size_t header_size = sizeof(uint32_t) * 2 * 2;
+  size_t data_size = (count - 1) / 2, cmd_size;
+  struct iwl_dhc_cmd* dhc_cmd = NULL;
+  struct iwl_host_cmd hcmd = {
+      .id = iwl_cmd_id(DEBUG_HOST_COMMAND, LEGACY_GROUP, 0),
+      .flags = CMD_ASYNC,
+      .len =
+          {
+              0,
+          },
+      .data =
+          {
+              NULL,
+          },
+  };
 
-    if (fwrt->ops && fwrt->ops->fw_running && !fwrt->ops->fw_running(fwrt->ops_ctx)) {
-        return -EIO;
-    }
+  if (fwrt->ops && fwrt->ops->fw_running && !fwrt->ops->fw_running(fwrt->ops_ctx)) {
+    return -EIO;
+  }
 
-    if (count < header_size + 1 || count > 1024 * 4) { return -EINVAL; }
+  if (count < header_size + 1 || count > 1024 * 4) {
+    return -EINVAL;
+  }
 
-    data = kmalloc(data_size, GFP_KERNEL);
-    if (!data) { return -ENOMEM; }
+  data = kmalloc(data_size, GFP_KERNEL);
+  if (!data) {
+    return -ENOMEM;
+  }
 
-    ret = hex2bin((uint8_t*)data, buf, data_size);
-    if (ret) { goto out; }
+  ret = hex2bin((uint8_t*)data, buf, data_size);
+  if (ret) {
+    goto out;
+  }
 
-    length = be32_to_cpu(data->length);
+  length = be32_to_cpu(data->length);
 
-    if (count != header_size + sizeof(uint32_t) * length * 2 + 1) {
-        IWL_ERR(fwrt, "DHC data size does not match length header\n");
-        ret = -EINVAL;
-        goto out;
-    }
+  if (count != header_size + sizeof(uint32_t) * length * 2 + 1) {
+    IWL_ERR(fwrt, "DHC data size does not match length header\n");
+    ret = -EINVAL;
+    goto out;
+  }
 
-    cmd_size = sizeof(*dhc_cmd) + length * sizeof(uint32_t);
-    dhc_cmd = kzalloc(cmd_size, GFP_KERNEL);
-    if (!dhc_cmd) {
-        ret = -ENOMEM;
-        goto out;
-    }
+  cmd_size = sizeof(*dhc_cmd) + length * sizeof(uint32_t);
+  dhc_cmd = kzalloc(cmd_size, GFP_KERNEL);
+  if (!dhc_cmd) {
+    ret = -ENOMEM;
+    goto out;
+  }
 
-    dhc_cmd->length = cpu_to_le32(length);
-    dhc_cmd->index_and_mask = cpu_to_le32(be32_to_cpu(data->index_and_mask));
-    for (i = 0; i < length; i++) {
-        dhc_cmd->data[i] = cpu_to_le32(be32_to_cpu(data->data[i]));
-    }
+  dhc_cmd->length = cpu_to_le32(length);
+  dhc_cmd->index_and_mask = cpu_to_le32(be32_to_cpu(data->index_and_mask));
+  for (i = 0; i < length; i++) {
+    dhc_cmd->data[i] = cpu_to_le32(be32_to_cpu(data->data[i]));
+  }
 
-    hcmd.len[0] = cmd_size;
-    hcmd.data[0] = dhc_cmd;
+  hcmd.len[0] = cmd_size;
+  hcmd.data[0] = dhc_cmd;
 
-    if (fwrt->ops && fwrt->ops->send_hcmd) {
-        ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
-    } else {
-        ret = -EPERM;
-    }
+  if (fwrt->ops && fwrt->ops->send_hcmd) {
+    ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
+  } else {
+    ret = -EPERM;
+  }
 out:
-    kfree(dhc_cmd);
-    kfree(data);
-    return ret ?: count;
+  kfree(dhc_cmd);
+  kfree(data);
+  return ret ?: count;
 }
 
 FWRT_DEBUGFS_WRITE_FILE_OPS(send_dhc, 512);
 
 struct iwl_dhc_tlc_whole_cmd {
-    struct iwl_dhc_cmd dhc;
-    struct iwl_dhc_tlc_cmd tlc_data;
+  struct iwl_dhc_cmd dhc;
+  struct iwl_dhc_tlc_cmd tlc_data;
 } __packed;
 
 static void iwl_fw_build_dhc_tlc_cmd(struct iwl_dhc_tlc_whole_cmd* cmd,
                                      enum iwl_tlc_debug_flags flag, uint32_t data) {
-    cmd->dhc.length = cpu_to_le32(sizeof(cmd->tlc_data) >> 2);
-    cmd->dhc.index_and_mask =
-        cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | DHC_INTEGRATION_TLC_DEBUG_CONFIG);
+  cmd->dhc.length = cpu_to_le32(sizeof(cmd->tlc_data) >> 2);
+  cmd->dhc.index_and_mask =
+      cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | DHC_INTEGRATION_TLC_DEBUG_CONFIG);
 
-    cmd->tlc_data.flags = cpu_to_le32(BIT(flag));
-    cmd->tlc_data.data[flag] = cpu_to_le32(data);
+  cmd->tlc_data.flags = cpu_to_le32(BIT(flag));
+  cmd->tlc_data.data[flag] = cpu_to_le32(data);
 }
 
 static ssize_t iwl_dbgfs_tpc_enable_write(struct iwl_fw_runtime* fwrt, char* buf, size_t count) {
-    struct iwl_dhc_tlc_whole_cmd dhc_cmd = {{0}};
-    struct iwl_host_cmd hcmd = {
-        .id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0),
-        .data[0] = &dhc_cmd,
-        .len[0] = sizeof(dhc_cmd),
-    };
-    bool enabled;
-    int ret;
+  struct iwl_dhc_tlc_whole_cmd dhc_cmd = {{0}};
+  struct iwl_host_cmd hcmd = {
+      .id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0),
+      .data[0] = &dhc_cmd,
+      .len[0] = sizeof(dhc_cmd),
+  };
+  bool enabled;
+  int ret;
 
-    ret = kstrtobool(buf, &enabled);
-    iwl_fw_build_dhc_tlc_cmd(&dhc_cmd, IWL_TLC_DEBUG_TPC_ENABLED, enabled);
+  ret = kstrtobool(buf, &enabled);
+  iwl_fw_build_dhc_tlc_cmd(&dhc_cmd, IWL_TLC_DEBUG_TPC_ENABLED, enabled);
 
-    ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
-    if (ret) {
-        IWL_ERR(fwrt, "Failed to send TLC Debug command: %d\n", ret);
-        return ret;
-    }
+  ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+  if (ret) {
+    IWL_ERR(fwrt, "Failed to send TLC Debug command: %d\n", ret);
+    return ret;
+  }
 
-    fwrt->tpc_enabled = enabled;
+  fwrt->tpc_enabled = enabled;
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_tpc_enable_read(struct iwl_fw_runtime* fwrt, size_t size, char* buf) {
-    return scnprintf(buf, size, "tpc is currently %s\n",
-                     fwrt->tpc_enabled ? "enabled" : "disabled");
+  return scnprintf(buf, size, "tpc is currently %s\n", fwrt->tpc_enabled ? "enabled" : "disabled");
 }
 
 FWRT_DEBUGFS_READ_WRITE_FILE_OPS(tpc_enable, 30);
 
 static ssize_t iwl_dbgfs_tpc_stats_read(struct iwl_fw_runtime* fwrt, size_t size, char* buf) {
-    struct iwl_dhc_tlc_whole_cmd dhc_cmd = {{0}};
-    struct iwl_host_cmd hcmd = {
-        .id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0),
-        .flags = CMD_WANT_SKB,
-        .data[0] = &dhc_cmd,
-        .len[0] = sizeof(dhc_cmd),
-    };
-    struct iwl_dhc_cmd_resp* resp;
-    struct iwl_tpc_stats* stats;
-    int ret = 0;
+  struct iwl_dhc_tlc_whole_cmd dhc_cmd = {{0}};
+  struct iwl_host_cmd hcmd = {
+      .id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0),
+      .flags = CMD_WANT_SKB,
+      .data[0] = &dhc_cmd,
+      .len[0] = sizeof(dhc_cmd),
+  };
+  struct iwl_dhc_cmd_resp* resp;
+  struct iwl_tpc_stats* stats;
+  int ret = 0;
 
-    iwl_fw_build_dhc_tlc_cmd(&dhc_cmd, IWL_TLC_DEBUG_TPC_STATS, 0);
+  iwl_fw_build_dhc_tlc_cmd(&dhc_cmd, IWL_TLC_DEBUG_TPC_STATS, 0);
 
-    ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
-    if (ret) {
-        IWL_ERR(fwrt, "Failed to send TLC Debug command: %d\n", ret);
-        goto err;
-    }
+  ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+  if (ret) {
+    IWL_ERR(fwrt, "Failed to send TLC Debug command: %d\n", ret);
+    goto err;
+  }
 
-    if (!hcmd.resp_pkt) {
-        IWL_ERR(fwrt, "Response expected\n");
-        goto err;
-    }
+  if (!hcmd.resp_pkt) {
+    IWL_ERR(fwrt, "Response expected\n");
+    goto err;
+  }
 
-    if (iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*resp) + sizeof(*stats)) {
-        IWL_ERR(fwrt, "Invalid size for TPC stats request response (%u instead of %lu)\n",
-                iwl_rx_packet_payload_len(hcmd.resp_pkt), sizeof(*resp) + sizeof(*stats));
-        ret = -EINVAL;
-        goto err;
-    }
+  if (iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*resp) + sizeof(*stats)) {
+    IWL_ERR(fwrt, "Invalid size for TPC stats request response (%u instead of %lu)\n",
+            iwl_rx_packet_payload_len(hcmd.resp_pkt), sizeof(*resp) + sizeof(*stats));
+    ret = -EINVAL;
+    goto err;
+  }
 
-    resp = (struct iwl_dhc_cmd_resp*)hcmd.resp_pkt->data;
-    if (le32_to_cpu(resp->status) != 1) {
-        IWL_ERR(fwrt, "response status is not success: %d\n", resp->status);
-        ret = -EINVAL;
-        goto err;
-    }
+  resp = (struct iwl_dhc_cmd_resp*)hcmd.resp_pkt->data;
+  if (le32_to_cpu(resp->status) != 1) {
+    IWL_ERR(fwrt, "response status is not success: %d\n", resp->status);
+    ret = -EINVAL;
+    goto err;
+  }
 
-    stats = (struct iwl_tpc_stats*)resp->data;
+  stats = (struct iwl_tpc_stats*)resp->data;
 
-    return scnprintf(
-        buf, size, "tpc stats: no-tpc %u, step1 %u, step2 %u, step3 %u, step4 %u, step5 %u\n",
-        le32_to_cpu(stats->no_tpc), le32_to_cpu(stats->step[0]), le32_to_cpu(stats->step[1]),
-        le32_to_cpu(stats->step[2]), le32_to_cpu(stats->step[3]), le32_to_cpu(stats->step[4]));
+  return scnprintf(
+      buf, size, "tpc stats: no-tpc %u, step1 %u, step2 %u, step3 %u, step4 %u, step5 %u\n",
+      le32_to_cpu(stats->no_tpc), le32_to_cpu(stats->step[0]), le32_to_cpu(stats->step[1]),
+      le32_to_cpu(stats->step[2]), le32_to_cpu(stats->step[3]), le32_to_cpu(stats->step[4]));
 
 err:
-    return ret ?: -EIO;
+  return ret ?: -EIO;
 }
 
 FWRT_DEBUGFS_READ_FILE_OPS(tpc_stats, 150);
 
 static ssize_t iwl_dbgfs_ps_report_read(struct iwl_fw_runtime* fwrt, size_t size, char* buf,
                                         int mac_mask) {
-    __le32 cmd_data;
+  __le32 cmd_data;
 
-    struct iwl_dhc_cmd cmd = {
-        .length = cpu_to_le32(1),
-        .index_and_mask =
-            cpu_to_le32(DHC_TABLE_AUTOMATION | mac_mask | DHC_AUTO_UMAC_REPORT_POWER_STATISTICS),
-    };
+  struct iwl_dhc_cmd cmd = {
+      .length = cpu_to_le32(1),
+      .index_and_mask =
+          cpu_to_le32(DHC_TABLE_AUTOMATION | mac_mask | DHC_AUTO_UMAC_REPORT_POWER_STATISTICS),
+  };
 
-    struct iwl_host_cmd hcmd = {
-        .id = iwl_cmd_id(DEBUG_HOST_COMMAND, LEGACY_GROUP, 0),
-        .flags = CMD_WANT_SKB,
-        .data = {&cmd, &cmd_data},
-        .len = {sizeof(cmd), sizeof(cmd_data)},
-    };
-    struct iwl_dhc_cmd_resp* resp;
-    struct iwl_ps_report* report;
-    int ret = 0;
+  struct iwl_host_cmd hcmd = {
+      .id = iwl_cmd_id(DEBUG_HOST_COMMAND, LEGACY_GROUP, 0),
+      .flags = CMD_WANT_SKB,
+      .data = {&cmd, &cmd_data},
+      .len = {sizeof(cmd), sizeof(cmd_data)},
+  };
+  struct iwl_dhc_cmd_resp* resp;
+  struct iwl_ps_report* report;
+  int ret = 0;
 
-    ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
-    if (ret) {
-        IWL_ERR(fwrt, "Failed to send power-save report command: %d\n", ret);
-        goto err;
-    }
+  ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+  if (ret) {
+    IWL_ERR(fwrt, "Failed to send power-save report command: %d\n", ret);
+    goto err;
+  }
 
-    if (!hcmd.resp_pkt) {
-        IWL_ERR(fwrt, "Response expected\n");
-        goto err;
-    }
+  if (!hcmd.resp_pkt) {
+    IWL_ERR(fwrt, "Response expected\n");
+    goto err;
+  }
 
-    if (iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*resp) + sizeof(*report)) {
-        IWL_ERR(fwrt, "Invalid size for power-save report response (%u instead of %lu)\n",
-                iwl_rx_packet_payload_len(hcmd.resp_pkt), sizeof(*resp) + sizeof(*report));
-        ret = -EINVAL;
-        goto err;
-    }
+  if (iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*resp) + sizeof(*report)) {
+    IWL_ERR(fwrt, "Invalid size for power-save report response (%u instead of %lu)\n",
+            iwl_rx_packet_payload_len(hcmd.resp_pkt), sizeof(*resp) + sizeof(*report));
+    ret = -EINVAL;
+    goto err;
+  }
 
-    resp = (struct iwl_dhc_cmd_resp*)hcmd.resp_pkt->data;
-    if (le32_to_cpu(resp->status) != 1) {
-        IWL_ERR(fwrt, "response status is not success: %d\n", resp->status);
-        ret = -EINVAL;
-        goto err;
-    }
+  resp = (struct iwl_dhc_cmd_resp*)hcmd.resp_pkt->data;
+  if (le32_to_cpu(resp->status) != 1) {
+    IWL_ERR(fwrt, "response status is not success: %d\n", resp->status);
+    ret = -EINVAL;
+    goto err;
+  }
 
-    report = (struct iwl_ps_report*)resp->data;
+  report = (struct iwl_ps_report*)resp->data;
 
-    return scnprintf(
-        buf, size, "power-save report:\n%s %u\n%s %u\n%s %u\n%s %u\n%s %u\n%s %u\n%s %u\n",
-        "sleep_allowed_count", le32_to_cpu(report->sleep_allowed_count), "sleep_time",
-        le32_to_cpu(report->sleep_time), "max_sleep_time", le32_to_cpu(report->max_sleep_time),
-        "missed_beacon_count", le32_to_cpu(report->missed_beacon_count),
-        "missed_3_consecutive_beacon_count", le32_to_cpu(report->missed_3_consecutive_beacon_count),
-        "ps_flags", le32_to_cpu(report->ps_flags), "max_active_duration",
-        le32_to_cpu(report->max_active_duration));
+  return scnprintf(
+      buf, size, "power-save report:\n%s %u\n%s %u\n%s %u\n%s %u\n%s %u\n%s %u\n%s %u\n",
+      "sleep_allowed_count", le32_to_cpu(report->sleep_allowed_count), "sleep_time",
+      le32_to_cpu(report->sleep_time), "max_sleep_time", le32_to_cpu(report->max_sleep_time),
+      "missed_beacon_count", le32_to_cpu(report->missed_beacon_count),
+      "missed_3_consecutive_beacon_count", le32_to_cpu(report->missed_3_consecutive_beacon_count),
+      "ps_flags", le32_to_cpu(report->ps_flags), "max_active_duration",
+      le32_to_cpu(report->max_active_duration));
 
 err:
-    return ret ?: -EIO;
+  return ret ?: -EIO;
 }
 
 static ssize_t iwl_dbgfs_ps_report_umac_read(struct iwl_fw_runtime* fwrt, size_t size, char* buf) {
-    return iwl_dbgfs_ps_report_read(fwrt, size, buf, DHC_TARGET_UMAC);
+  return iwl_dbgfs_ps_report_read(fwrt, size, buf, DHC_TARGET_UMAC);
 }
 
 FWRT_DEBUGFS_READ_FILE_OPS(ps_report_umac, 224);
 
 static ssize_t iwl_dbgfs_ps_report_lmac_read(struct iwl_fw_runtime* fwrt, size_t size, char* buf) {
-    // LMAC value is 0 for backwards compatibility
-    return iwl_dbgfs_ps_report_read(fwrt, size, buf, 0);
+  // LMAC value is 0 for backwards compatibility
+  return iwl_dbgfs_ps_report_read(fwrt, size, buf, 0);
 }
 
 FWRT_DEBUGFS_READ_FILE_OPS(ps_report_lmac, 224);
@@ -519,20 +541,20 @@
 #endif
 
 int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime* fwrt, struct dentry* dbgfs_dir) {
-    INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
-    FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
-    FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
+  INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
+  FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
+  FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
-    if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)) {
-        FWRT_DEBUGFS_ADD_FILE(tpc_enable, dbgfs_dir, 0600);
-        FWRT_DEBUGFS_ADD_FILE(tpc_stats, dbgfs_dir, 0400);
-    }
-    FWRT_DEBUGFS_ADD_FILE(ps_report_umac, dbgfs_dir, 0400);
-    FWRT_DEBUGFS_ADD_FILE(ps_report_lmac, dbgfs_dir, 0400);
-    FWRT_DEBUGFS_ADD_FILE(send_dhc, dbgfs_dir, 0200);
+  if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)) {
+    FWRT_DEBUGFS_ADD_FILE(tpc_enable, dbgfs_dir, 0600);
+    FWRT_DEBUGFS_ADD_FILE(tpc_stats, dbgfs_dir, 0400);
+  }
+  FWRT_DEBUGFS_ADD_FILE(ps_report_umac, dbgfs_dir, 0400);
+  FWRT_DEBUGFS_ADD_FILE(ps_report_lmac, dbgfs_dir, 0400);
+  FWRT_DEBUGFS_ADD_FILE(send_dhc, dbgfs_dir, 0200);
 #endif
-    return 0;
+  return 0;
 err:
-    IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
-    return -ENOMEM;
+  IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
+  return -ENOMEM;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/debugfs.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/debugfs.h
index a69a5b5a..342cda6 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/debugfs.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/debugfs.h
@@ -43,7 +43,7 @@
 
 #else
 static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime* fwrt, struct dentry* dbgfs_dir) {
-    return 0;
+  return 0;
 }
 
 #endif /* CPTCFG_IWLWIFI_DEBUGFS */
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/file.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/file.h
index 9338bb5..b8636cf 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/file.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/file.h
@@ -38,26 +38,26 @@
 
 /* v1/v2 uCode file layout */
 struct iwl_ucode_header {
-    __le32 ver; /* major/minor/API/serial */
-    union {
-        struct {
-            __le32 inst_size;      /* bytes of runtime code */
-            __le32 data_size;      /* bytes of runtime data */
-            __le32 init_size;      /* bytes of init code */
-            __le32 init_data_size; /* bytes of init data */
-            __le32 boot_size;      /* bytes of bootstrap code */
-            uint8_t data[0];       /* in same order as sizes */
-        } v1;
-        struct {
-            __le32 build;          /* build number */
-            __le32 inst_size;      /* bytes of runtime code */
-            __le32 data_size;      /* bytes of runtime data */
-            __le32 init_size;      /* bytes of init code */
-            __le32 init_data_size; /* bytes of init data */
-            __le32 boot_size;      /* bytes of bootstrap code */
-            uint8_t data[0];       /* in same order as sizes */
-        } v2;
-    } u;
+  __le32 ver; /* major/minor/API/serial */
+  union {
+    struct {
+      __le32 inst_size;      /* bytes of runtime code */
+      __le32 data_size;      /* bytes of runtime data */
+      __le32 init_size;      /* bytes of init code */
+      __le32 init_data_size; /* bytes of init data */
+      __le32 boot_size;      /* bytes of bootstrap code */
+      uint8_t data[0];       /* in same order as sizes */
+    } v1;
+    struct {
+      __le32 build;          /* build number */
+      __le32 inst_size;      /* bytes of runtime code */
+      __le32 data_size;      /* bytes of runtime data */
+      __le32 init_size;      /* bytes of init code */
+      __le32 init_data_size; /* bytes of init data */
+      __le32 boot_size;      /* bytes of bootstrap code */
+      uint8_t data[0];       /* in same order as sizes */
+    } v2;
+  } u;
 };
 
 #define IWL_UCODE_INI_TLV_GROUP BIT(24)
@@ -70,64 +70,64 @@
  */
 
 enum iwl_ucode_tlv_type {
-    IWL_UCODE_TLV_INVALID = 0, /* unused */
-    IWL_UCODE_TLV_INST = 1,
-    IWL_UCODE_TLV_DATA = 2,
-    IWL_UCODE_TLV_INIT = 3,
-    IWL_UCODE_TLV_INIT_DATA = 4,
-    IWL_UCODE_TLV_BOOT = 5,
-    IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a uint32_t value */
-    IWL_UCODE_TLV_PAN = 7,
-    IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
-    IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
-    IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
-    IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
-    IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
-    IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
-    IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
-    IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
-    IWL_UCODE_TLV_WOWLAN_INST = 16,
-    IWL_UCODE_TLV_WOWLAN_DATA = 17,
-    IWL_UCODE_TLV_FLAGS = 18,
-    IWL_UCODE_TLV_SEC_RT = 19,
-    IWL_UCODE_TLV_SEC_INIT = 20,
-    IWL_UCODE_TLV_SEC_WOWLAN = 21,
-    IWL_UCODE_TLV_DEF_CALIB = 22,
-    IWL_UCODE_TLV_PHY_SKU = 23,
-    IWL_UCODE_TLV_SECURE_SEC_RT = 24,
-    IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
-    IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
-    IWL_UCODE_TLV_NUM_OF_CPU = 27,
-    IWL_UCODE_TLV_CSCHEME = 28,
-    IWL_UCODE_TLV_API_CHANGES_SET = 29,
-    IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
-    IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
-    IWL_UCODE_TLV_PAGING = 32,
-    IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
-    /* 35 is unused */
-    IWL_UCODE_TLV_FW_VERSION = 36,
-    IWL_UCODE_TLV_FW_DBG_DEST = 38,
-    IWL_UCODE_TLV_FW_DBG_CONF = 39,
-    IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
-    IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
-    IWL_UCODE_TLV_FW_MEM_SEG = 51,
-    IWL_UCODE_TLV_IML = 52,
-    IWL_UCODE_TLV_FW_FMAC_API_VERSION = 53,
-    IWL_UCODE_TLV_FW_FMAC_RECOVERY_INFO = 59,
-    IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1,
-    IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2,
-    IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3,
-    IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4,
-    IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5,
+  IWL_UCODE_TLV_INVALID = 0, /* unused */
+  IWL_UCODE_TLV_INST = 1,
+  IWL_UCODE_TLV_DATA = 2,
+  IWL_UCODE_TLV_INIT = 3,
+  IWL_UCODE_TLV_INIT_DATA = 4,
+  IWL_UCODE_TLV_BOOT = 5,
+  IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a uint32_t value */
+  IWL_UCODE_TLV_PAN = 7,
+  IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
+  IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
+  IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
+  IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
+  IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
+  IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
+  IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
+  IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
+  IWL_UCODE_TLV_WOWLAN_INST = 16,
+  IWL_UCODE_TLV_WOWLAN_DATA = 17,
+  IWL_UCODE_TLV_FLAGS = 18,
+  IWL_UCODE_TLV_SEC_RT = 19,
+  IWL_UCODE_TLV_SEC_INIT = 20,
+  IWL_UCODE_TLV_SEC_WOWLAN = 21,
+  IWL_UCODE_TLV_DEF_CALIB = 22,
+  IWL_UCODE_TLV_PHY_SKU = 23,
+  IWL_UCODE_TLV_SECURE_SEC_RT = 24,
+  IWL_UCODE_TLV_SECURE_SEC_INIT = 25,
+  IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
+  IWL_UCODE_TLV_NUM_OF_CPU = 27,
+  IWL_UCODE_TLV_CSCHEME = 28,
+  IWL_UCODE_TLV_API_CHANGES_SET = 29,
+  IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
+  IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
+  IWL_UCODE_TLV_PAGING = 32,
+  IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
+  /* 35 is unused */
+  IWL_UCODE_TLV_FW_VERSION = 36,
+  IWL_UCODE_TLV_FW_DBG_DEST = 38,
+  IWL_UCODE_TLV_FW_DBG_CONF = 39,
+  IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
+  IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
+  IWL_UCODE_TLV_FW_MEM_SEG = 51,
+  IWL_UCODE_TLV_IML = 52,
+  IWL_UCODE_TLV_FW_FMAC_API_VERSION = 53,
+  IWL_UCODE_TLV_FW_FMAC_RECOVERY_INFO = 59,
+  IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1,
+  IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2,
+  IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3,
+  IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4,
+  IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5,
 
-    /* TLVs 0x1000-0x2000 are for internal driver usage */
-    IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
+  /* TLVs 0x1000-0x2000 are for internal driver usage */
+  IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
 };
 
 struct iwl_ucode_tlv {
-    __le32 type;   /* see above */
-    __le32 length; /* not including type/length fields */
-    uint8_t data[0];
+  __le32 type;   /* see above */
+  __le32 length; /* not including type/length fields */
+  uint8_t data[0];
 };
 
 #define IWL_TLV_FW_DBG_MAGIC 0xb5221389
@@ -135,26 +135,26 @@
 #define FW_VER_HUMAN_READABLE_SZ 64
 
 struct iwl_tlv_ucode_header {
-    /*
-     * The TLV style ucode header is distinguished from
-     * the v1/v2 style header by first four bytes being
-     * zero, as such is an invalid combination of
-     * major/minor/API/serial versions.
-     */
-    __le32 zero;
-    __le32 magic;
-    uint8_t human_readable[FW_VER_HUMAN_READABLE_SZ];
-    /* major/minor/API/serial or major in new format */
-    __le32 ver;
-    __le32 build;
-    __le64 ignore;
-    /*
-     * The data contained herein has a TLV layout,
-     * see above for the TLV header and types.
-     * Note that each TLV is padded to a length
-     * that is a multiple of 4 for alignment.
-     */
-    uint8_t data[0];
+  /*
+   * The TLV style ucode header is distinguished from
+   * the v1/v2 style header by first four bytes being
+   * zero, as such is an invalid combination of
+   * major/minor/API/serial versions.
+   */
+  __le32 zero;
+  __le32 magic;
+  uint8_t human_readable[FW_VER_HUMAN_READABLE_SZ];
+  /* major/minor/API/serial or major in new format */
+  __le32 ver;
+  __le32 build;
+  __le64 ignore;
+  /*
+   * The data contained herein has a TLV layout,
+   * see above for the TLV header and types.
+   * Note that each TLV is padded to a length
+   * that is a multiple of 4 for alignment.
+   */
+  uint8_t data[0];
 };
 
 /*
@@ -163,13 +163,13 @@
  * ability to get extension for: flags & capabilities from ucode binaries files
  */
 struct iwl_ucode_api {
-    __le32 api_index;
-    __le32 api_flags;
+  __le32 api_index;
+  __le32 api_flags;
 } __packed;
 
 struct iwl_ucode_capa {
-    __le32 api_index;
-    __le32 api_capa;
+  __le32 api_index;
+  __le32 api_capa;
 } __packed;
 
 /**
@@ -194,20 +194,20 @@
  * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
  */
 enum iwl_ucode_tlv_flag {
-    IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
-    IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
-    IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
-    IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
-    IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
-    IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
-    IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
-    IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
-    IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
-    IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
-    IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
-    IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
+  IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
+  IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
+  IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
+  IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
+  IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
+  IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
+  IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
+  IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
+  IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
+  IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
+  IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
+  IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
 #ifdef CPTCFG_IWLWIFI_LTE_COEX
-    IWL_UCODE_TLV_FLAGS_LTE_COEX = BIT(31),
+  IWL_UCODE_TLV_FLAGS_LTE_COEX = BIT(31),
 #endif
 };
 
@@ -244,32 +244,32 @@
  * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
 enum iwl_ucode_tlv_api {
-    /* API Set 0 */
-    IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
-    IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
-    IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
-    IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
-    IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
-    IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29,
-    IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
-    IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
-    /* API Set 1 */
-    IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
-    IWL_UCODE_TLV_API_OCE = (__force iwl_ucode_tlv_api_t)33,
-    IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
-    IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
-    IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL = (__force iwl_ucode_tlv_api_t)36,
-    IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38,
-    IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41,
-    IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42,
-    IWL_UCODE_TLV_API_NAN_NOTIF_V2 = (__force iwl_ucode_tlv_api_t)43,
-    IWL_UCODE_TLV_API_FRAG_EBS = (__force iwl_ucode_tlv_api_t)44,
-    IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45,
+  /* API Set 0 */
+  IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
+  IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
+  IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
+  IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
+  IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
+  IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29,
+  IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
+  IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
+  /* API Set 1 */
+  IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
+  IWL_UCODE_TLV_API_OCE = (__force iwl_ucode_tlv_api_t)33,
+  IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
+  IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
+  IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL = (__force iwl_ucode_tlv_api_t)36,
+  IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38,
+  IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41,
+  IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42,
+  IWL_UCODE_TLV_API_NAN_NOTIF_V2 = (__force iwl_ucode_tlv_api_t)43,
+  IWL_UCODE_TLV_API_FRAG_EBS = (__force iwl_ucode_tlv_api_t)44,
+  IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45,
 
-    NUM_IWL_UCODE_TLV_API
+  NUM_IWL_UCODE_TLV_API
 #ifdef __CHECKER__
-    /* sparse says it cannot increment the previous enum member */
-    = 128
+  /* sparse says it cannot increment the previous enum member */
+  = 128
 #endif
 };
 
@@ -357,68 +357,68 @@
  * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
  */
 enum iwl_ucode_tlv_capa {
-    IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
-    IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
-    IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
-    IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
-    IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5,
-    IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
-    IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
-    IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
-    IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10,
-    IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11,
-    IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12,
-    IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
-    IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17,
-    IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
-    IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
-    IWL_UCODE_TLV_CAPA_2G_COEX_SUPPORT = (__force iwl_ucode_tlv_capa_t)20,
-    IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
-    IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
-    IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
-    IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
-    IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
-    IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
-    IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
-    IWL_UCODE_TLV_CAPA_NAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)34,
-    IWL_UCODE_TLV_CAPA_UMAC_UPLOAD = (__force iwl_ucode_tlv_capa_t)35,
-    IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = (__force iwl_ucode_tlv_capa_t)37,
-    IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
-    IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
-    IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
-    IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41,
-    IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43,
-    IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44,
-    IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45,
-    IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
-    IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
-    IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
-    IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68,
-    IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70,
-    IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
-    IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
-    IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73,
-    IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
-    IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
-    IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
-    IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
-    IWL_UCODE_TLV_CAPA_LMAC_UPLOAD = (__force iwl_ucode_tlv_capa_t)79,
-    IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
-    IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
-    IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
+  IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
+  IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
+  IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
+  IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
+  IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5,
+  IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
+  IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
+  IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
+  IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10,
+  IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11,
+  IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12,
+  IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
+  IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17,
+  IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
+  IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
+  IWL_UCODE_TLV_CAPA_2G_COEX_SUPPORT = (__force iwl_ucode_tlv_capa_t)20,
+  IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
+  IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
+  IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
+  IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
+  IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
+  IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
+  IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
+  IWL_UCODE_TLV_CAPA_NAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)34,
+  IWL_UCODE_TLV_CAPA_UMAC_UPLOAD = (__force iwl_ucode_tlv_capa_t)35,
+  IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = (__force iwl_ucode_tlv_capa_t)37,
+  IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
+  IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
+  IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
+  IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41,
+  IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43,
+  IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44,
+  IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45,
+  IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
+  IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
+  IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
+  IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68,
+  IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70,
+  IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
+  IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
+  IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73,
+  IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
+  IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
+  IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
+  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
+  IWL_UCODE_TLV_CAPA_LMAC_UPLOAD = (__force iwl_ucode_tlv_capa_t)79,
+  IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
+  IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
+  IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
 #ifdef CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE
-    IWL_UCODE_TLV_CAPA_AX_SAP_TM = (__force iwl_ucode_tlv_capa_t)85,
-    IWL_UCODE_TLV_CAPA_AX_SAP_TM_V2 = (__force iwl_ucode_tlv_capa_t)86,
+  IWL_UCODE_TLV_CAPA_AX_SAP_TM = (__force iwl_ucode_tlv_capa_t)85,
+  IWL_UCODE_TLV_CAPA_AX_SAP_TM_V2 = (__force iwl_ucode_tlv_capa_t)86,
 #endif
-    IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
-    IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
-    IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89,
-    IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
+  IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
+  IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
+  IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89,
+  IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
 
-    NUM_IWL_UCODE_TLV_CAPA
+  NUM_IWL_UCODE_TLV_CAPA
 #ifdef __CHECKER__
-    /* sparse says it cannot increment the previous enum member */
-    = 128
+  /* sparse says it cannot increment the previous enum member */
+  = 128
 #endif
 };
 
@@ -452,22 +452,22 @@
  *      event triggers.
  */
 struct iwl_tlv_calib_ctrl {
-    __le32 flow_trigger;
-    __le32 event_trigger;
+  __le32 flow_trigger;
+  __le32 event_trigger;
 } __packed;
 
 enum iwl_fw_phy_cfg {
-    FW_PHY_CFG_RADIO_TYPE_POS = 0,
-    FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
-    FW_PHY_CFG_RADIO_STEP_POS = 2,
-    FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
-    FW_PHY_CFG_RADIO_DASH_POS = 4,
-    FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
-    FW_PHY_CFG_TX_CHAIN_POS = 16,
-    FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
-    FW_PHY_CFG_RX_CHAIN_POS = 20,
-    FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
-    FW_PHY_CFG_SHARED_CLK = BIT(31),
+  FW_PHY_CFG_RADIO_TYPE_POS = 0,
+  FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
+  FW_PHY_CFG_RADIO_STEP_POS = 2,
+  FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
+  FW_PHY_CFG_RADIO_DASH_POS = 4,
+  FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
+  FW_PHY_CFG_TX_CHAIN_POS = 16,
+  FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
+  FW_PHY_CFG_RX_CHAIN_POS = 20,
+  FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
+  FW_PHY_CFG_SHARED_CLK = BIT(31),
 };
 
 #define IWL_UCODE_MAX_CS 1
@@ -486,32 +486,32 @@
  * @hw_cipher: a HW cipher index used in host commands
  */
 struct iwl_fw_cipher_scheme {
-    __le32 cipher;
-    uint8_t flags;
-    uint8_t hdr_len;
-    uint8_t pn_len;
-    uint8_t pn_off;
-    uint8_t key_idx_off;
-    uint8_t key_idx_mask;
-    uint8_t key_idx_shift;
-    uint8_t mic_len;
-    uint8_t hw_cipher;
+  __le32 cipher;
+  uint8_t flags;
+  uint8_t hdr_len;
+  uint8_t pn_len;
+  uint8_t pn_off;
+  uint8_t key_idx_off;
+  uint8_t key_idx_mask;
+  uint8_t key_idx_shift;
+  uint8_t mic_len;
+  uint8_t hw_cipher;
 } __packed;
 
 enum iwl_fw_dbg_reg_operator {
-    CSR_ASSIGN,
-    CSR_SETBIT,
-    CSR_CLEARBIT,
+  CSR_ASSIGN,
+  CSR_SETBIT,
+  CSR_CLEARBIT,
 
-    PRPH_ASSIGN,
-    PRPH_SETBIT,
-    PRPH_CLEARBIT,
+  PRPH_ASSIGN,
+  PRPH_SETBIT,
+  PRPH_CLEARBIT,
 
-    INDIRECT_ASSIGN,
-    INDIRECT_SETBIT,
-    INDIRECT_CLEARBIT,
+  INDIRECT_ASSIGN,
+  INDIRECT_SETBIT,
+  INDIRECT_CLEARBIT,
 
-    PRPH_BLOCKBIT,
+  PRPH_BLOCKBIT,
 };
 
 /**
@@ -522,10 +522,10 @@
  * @val: value
  */
 struct iwl_fw_dbg_reg_op {
-    uint8_t op;
-    uint8_t reserved[3];
-    __le32 addr;
-    __le32 val;
+  uint8_t op;
+  uint8_t reserved[3];
+  __le32 addr;
+  __le32 val;
 } __packed;
 
 /**
@@ -537,10 +537,10 @@
  * @MIPI_MODE: monitor outputs the data through the MIPI interface
  */
 enum iwl_fw_dbg_monitor_mode {
-    SMEM_MODE = 0,
-    EXTERNAL_MODE = 1,
-    MARBH_MODE = 2,
-    MIPI_MODE = 3,
+  SMEM_MODE = 0,
+  EXTERNAL_MODE = 1,
+  MARBH_MODE = 2,
+  MIPI_MODE = 3,
 };
 
 /**
@@ -553,9 +553,9 @@
  * This parses IWL_UCODE_TLV_FW_MEM_SEG
  */
 struct iwl_fw_dbg_mem_seg_tlv {
-    __le32 data_type;
-    __le32 ofs;
-    __le32 len;
+  __le32 data_type;
+  __le32 ofs;
+  __le32 len;
 } __packed;
 
 /**
@@ -575,17 +575,17 @@
  * This parses IWL_UCODE_TLV_FW_DBG_DEST
  */
 struct iwl_fw_dbg_dest_tlv_v1 {
-    uint8_t version;
-    uint8_t monitor_mode;
-    uint8_t size_power;
-    uint8_t reserved;
-    __le32 base_reg;
-    __le32 end_reg;
-    __le32 write_ptr_reg;
-    __le32 wrap_count;
-    uint8_t base_shift;
-    uint8_t end_shift;
-    struct iwl_fw_dbg_reg_op reg_ops[0];
+  uint8_t version;
+  uint8_t monitor_mode;
+  uint8_t size_power;
+  uint8_t reserved;
+  __le32 base_reg;
+  __le32 end_reg;
+  __le32 write_ptr_reg;
+  __le32 wrap_count;
+  uint8_t base_shift;
+  uint8_t end_shift;
+  struct iwl_fw_dbg_reg_op reg_ops[0];
 } __packed;
 
 /* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
@@ -596,23 +596,23 @@
 #define IWL_M2S_UNIT_SIZE 0x100
 
 struct iwl_fw_dbg_dest_tlv {
-    uint8_t version;
-    uint8_t monitor_mode;
-    uint8_t size_power;
-    uint8_t reserved;
-    __le32 cfg_reg;
-    __le32 write_ptr_reg;
-    __le32 wrap_count;
-    uint8_t base_shift;
-    uint8_t size_shift;
-    struct iwl_fw_dbg_reg_op reg_ops[0];
+  uint8_t version;
+  uint8_t monitor_mode;
+  uint8_t size_power;
+  uint8_t reserved;
+  __le32 cfg_reg;
+  __le32 write_ptr_reg;
+  __le32 wrap_count;
+  uint8_t base_shift;
+  uint8_t size_shift;
+  struct iwl_fw_dbg_reg_op reg_ops[0];
 } __packed;
 
 struct iwl_fw_dbg_conf_hcmd {
-    uint8_t id;
-    uint8_t reserved;
-    __le16 len;
-    uint8_t data[0];
+  uint8_t id;
+  uint8_t reserved;
+  __le16 len;
+  uint8_t data[0];
 } __packed;
 
 /**
@@ -624,9 +624,9 @@
  *  collect only monitor data
  */
 enum iwl_fw_dbg_trigger_mode {
-    IWL_FW_DBG_TRIGGER_START = BIT(0),
-    IWL_FW_DBG_TRIGGER_STOP = BIT(1),
-    IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
+  IWL_FW_DBG_TRIGGER_START = BIT(0),
+  IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+  IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
 };
 
 /**
@@ -634,7 +634,7 @@
  * @IWL_FW_DBG_FORCE_RESTART: force a firmware restart
  */
 enum iwl_fw_dbg_trigger_flags {
-    IWL_FW_DBG_FORCE_RESTART = BIT(0),
+  IWL_FW_DBG_FORCE_RESTART = BIT(0),
 };
 
 /**
@@ -649,14 +649,14 @@
  * @IWL_FW_DBG_CONF_VIF_NAN: NAN device
  */
 enum iwl_fw_dbg_trigger_vif_type {
-    IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED,
-    IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC,
-    IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION,
-    IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP,
-    IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT,
-    IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO,
-    IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE,
-    IWL_FW_DBG_CONF_VIF_NAN = NL80211_IFTYPE_NAN,
+  IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED,
+  IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC,
+  IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION,
+  IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP,
+  IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT,
+  IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO,
+  IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE,
+  IWL_FW_DBG_CONF_VIF_NAN = NL80211_IFTYPE_NAN,
 };
 
 /**
@@ -678,18 +678,18 @@
  * @flags: &enum iwl_fw_dbg_trigger_flags
  */
 struct iwl_fw_dbg_trigger_tlv {
-    __le32 id;
-    __le32 vif_type;
-    __le32 stop_conf_ids;
-    __le32 stop_delay;
-    uint8_t mode;
-    uint8_t start_conf_id;
-    __le16 occurrences;
-    __le16 trig_dis_ms;
-    uint8_t flags;
-    uint8_t reserved[5];
+  __le32 id;
+  __le32 vif_type;
+  __le32 stop_conf_ids;
+  __le32 stop_delay;
+  uint8_t mode;
+  uint8_t start_conf_id;
+  __le16 occurrences;
+  __le16 trig_dis_ms;
+  uint8_t flags;
+  uint8_t reserved[5];
 
-    uint8_t data[0];
+  uint8_t data[0];
 } __packed;
 
 #define FW_DBG_START_FROM_ALIVE 0
@@ -706,12 +706,12 @@
  * @reserved2: reserved
  */
 struct iwl_fw_dbg_trigger_missed_bcon {
-    __le32 stop_consec_missed_bcon;
-    __le32 stop_consec_missed_bcon_since_rx;
-    __le32 reserved2[2];
-    __le32 start_consec_missed_bcon;
-    __le32 start_consec_missed_bcon_since_rx;
-    __le32 reserved1[2];
+  __le32 stop_consec_missed_bcon;
+  __le32 stop_consec_missed_bcon_since_rx;
+  __le32 reserved2[2];
+  __le32 start_consec_missed_bcon;
+  __le32 start_consec_missed_bcon_since_rx;
+  __le32 reserved1[2];
 } __packed;
 
 /**
@@ -719,10 +719,10 @@
  * cmds: the list of commands to trigger the collection on
  */
 struct iwl_fw_dbg_trigger_cmd {
-    struct cmd {
-        uint8_t cmd_id;
-        uint8_t group_id;
-    } __packed cmds[16];
+  struct cmd {
+    uint8_t cmd_id;
+    uint8_t group_id;
+  } __packed cmds[16];
 } __packed;
 
 /**
@@ -733,10 +733,10 @@
  * @start_threshold: the threshold above which to start recording
  */
 struct iwl_fw_dbg_trigger_stats {
-    __le32 stop_offset;
-    __le32 stop_threshold;
-    __le32 start_offset;
-    __le32 start_threshold;
+  __le32 stop_offset;
+  __le32 stop_threshold;
+  __le32 start_offset;
+  __le32 start_threshold;
 } __packed;
 
 /**
@@ -744,7 +744,7 @@
  * @rssi: RSSI value to trigger at
  */
 struct iwl_fw_dbg_trigger_low_rssi {
-    __le32 rssi;
+  __le32 rssi;
 } __packed;
 
 /**
@@ -765,25 +765,25 @@
  * @start_connection_loss: number of connection loss to start recording
  */
 struct iwl_fw_dbg_trigger_mlme {
-    uint8_t stop_auth_denied;
-    uint8_t stop_auth_timeout;
-    uint8_t stop_rx_deauth;
-    uint8_t stop_tx_deauth;
+  uint8_t stop_auth_denied;
+  uint8_t stop_auth_timeout;
+  uint8_t stop_rx_deauth;
+  uint8_t stop_tx_deauth;
 
-    uint8_t stop_assoc_denied;
-    uint8_t stop_assoc_timeout;
-    uint8_t stop_connection_loss;
-    uint8_t reserved;
+  uint8_t stop_assoc_denied;
+  uint8_t stop_assoc_timeout;
+  uint8_t stop_connection_loss;
+  uint8_t reserved;
 
-    uint8_t start_auth_denied;
-    uint8_t start_auth_timeout;
-    uint8_t start_rx_deauth;
-    uint8_t start_tx_deauth;
+  uint8_t start_auth_denied;
+  uint8_t start_auth_timeout;
+  uint8_t start_rx_deauth;
+  uint8_t start_tx_deauth;
 
-    uint8_t start_assoc_denied;
-    uint8_t start_assoc_timeout;
-    uint8_t start_connection_loss;
-    uint8_t reserved2;
+  uint8_t start_assoc_denied;
+  uint8_t start_assoc_timeout;
+  uint8_t start_connection_loss;
+  uint8_t reserved2;
 } __packed;
 
 /**
@@ -798,15 +798,15 @@
  * @tdls: timeout for the queues of a TDLS station in ms
  */
 struct iwl_fw_dbg_trigger_txq_timer {
-    __le32 command_queue;
-    __le32 bss;
-    __le32 softap;
-    __le32 p2p_go;
-    __le32 p2p_client;
-    __le32 p2p_device;
-    __le32 ibss;
-    __le32 tdls;
-    __le32 reserved[4];
+  __le32 command_queue;
+  __le32 bss;
+  __le32 softap;
+  __le32 p2p_go;
+  __le32 p2p_client;
+  __le32 p2p_device;
+  __le32 ibss;
+  __le32 tdls;
+  __le32 reserved[4];
 } __packed;
 
 /**
@@ -818,11 +818,11 @@
  *
  */
 struct iwl_fw_dbg_trigger_time_event {
-    struct {
-        __le32 id;
-        __le32 action_bitmap;
-        __le32 status_bitmap;
-    } __packed time_events[16];
+  struct {
+    __le32 id;
+    __le32 action_bitmap;
+    __le32 status_bitmap;
+  } __packed time_events[16];
 } __packed;
 
 /**
@@ -843,13 +843,13 @@
  *  when a frame times out in the reodering buffer.
  */
 struct iwl_fw_dbg_trigger_ba {
-    __le16 rx_ba_start;
-    __le16 rx_ba_stop;
-    __le16 tx_ba_start;
-    __le16 tx_ba_stop;
-    __le16 rx_bar;
-    __le16 tx_bar;
-    __le16 frame_timeout;
+  __le16 rx_ba_start;
+  __le16 rx_ba_stop;
+  __le16 tx_ba_start;
+  __le16 tx_ba_stop;
+  __le16 rx_bar;
+  __le16 tx_bar;
+  __le16 frame_timeout;
 } __packed;
 
 #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS
@@ -862,11 +862,11 @@
  * @reserved: reserved.
  */
 struct iwl_fw_dbg_trigger_tx_latency {
-    __le32 thrshold;
-    __le16 tid_bitmap;
-    __le16 mode;
-    __le32 window;
-    __le32 reserved[4];
+  __le32 thrshold;
+  __le16 tid_bitmap;
+  __le16 mode;
+  __le32 window;
+  __le32 reserved[4];
 } __packed;
 #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */
 
@@ -877,10 +877,10 @@
  * @peer: the TDLS peer to trigger the collection on
  */
 struct iwl_fw_dbg_trigger_tdls {
-    uint8_t action_bitmap;
-    uint8_t peer_mode;
-    uint8_t peer[ETH_ALEN];
-    uint8_t reserved[4];
+  uint8_t action_bitmap;
+  uint8_t peer_mode;
+  uint8_t peer[ETH_ALEN];
+  uint8_t reserved[4];
 } __packed;
 
 /**
@@ -889,11 +889,11 @@
  * @statuses: the list of statuses to trigger the collection on
  */
 struct iwl_fw_dbg_trigger_tx_status {
-    struct tx_status {
-        uint8_t status;
-        uint8_t reserved[3];
-    } __packed statuses[16];
-    __le32 reserved[2];
+  struct tx_status {
+    uint8_t status;
+    uint8_t reserved[3];
+  } __packed statuses[16];
+  __le32 reserved[2];
 } __packed;
 
 /**
@@ -908,11 +908,11 @@
  * %FW_DBG_CONF_MAX configuration per run.
  */
 struct iwl_fw_dbg_conf_tlv {
-    uint8_t id;
-    uint8_t usniffer;
-    uint8_t reserved;
-    uint8_t num_of_hcmds;
-    struct iwl_fw_dbg_conf_hcmd hcmd;
+  uint8_t id;
+  uint8_t usniffer;
+  uint8_t reserved;
+  uint8_t num_of_hcmds;
+  struct iwl_fw_dbg_conf_hcmd hcmd;
 } __packed;
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_FILE_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/img.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/img.h
index cc562e3..4381d78 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/img.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/img.h
@@ -37,7 +37,6 @@
 #define SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_IMG_H_
 
 #include "api/dbg-tlv.h"
-
 #include "error-dump.h"
 #include "file.h"
 
@@ -52,11 +51,11 @@
  * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image
  */
 enum iwl_ucode_type {
-    IWL_UCODE_REGULAR,
-    IWL_UCODE_INIT,
-    IWL_UCODE_WOWLAN,
-    IWL_UCODE_REGULAR_USNIFFER,
-    IWL_UCODE_TYPE_MAX,
+  IWL_UCODE_REGULAR,
+  IWL_UCODE_INIT,
+  IWL_UCODE_WOWLAN,
+  IWL_UCODE_REGULAR_USNIFFER,
+  IWL_UCODE_TYPE_MAX,
 };
 
 /*
@@ -67,46 +66,46 @@
  * some debugging code accesses that.
  */
 enum iwl_ucode_sec {
-    IWL_UCODE_SECTION_DATA,
-    IWL_UCODE_SECTION_INST,
+  IWL_UCODE_SECTION_DATA,
+  IWL_UCODE_SECTION_INST,
 };
 
 struct iwl_ucode_capabilities {
-    uint32_t max_probe_length;
-    uint32_t n_scan_channels;
-    uint32_t standard_phy_calibration_size;
-    uint32_t flags;
+  uint32_t max_probe_length;
+  uint32_t n_scan_channels;
+  uint32_t standard_phy_calibration_size;
+  uint32_t flags;
 #if IS_ENABLED(CPTCFG_IWLFMAC)
-    uint32_t fmac_api_version;
-    uint32_t fmac_error_log_addr;
-    uint32_t fmac_error_log_size;
+  uint32_t fmac_api_version;
+  uint32_t fmac_error_log_addr;
+  uint32_t fmac_error_log_size;
 #endif
-    unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
-    unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
+  unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
+  unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
 };
 
 static inline bool fw_has_api(const struct iwl_ucode_capabilities* capabilities,
                               iwl_ucode_tlv_api_t api) {
-    return test_bit((__force long)api, capabilities->_api);
+  return test_bit((__force long)api, capabilities->_api);
 }
 
 static inline bool fw_has_capa(const struct iwl_ucode_capabilities* capabilities,
                                iwl_ucode_tlv_capa_t capa) {
-    return test_bit((__force long)capa, capabilities->_capa);
+  return test_bit((__force long)capa, capabilities->_capa);
 }
 
 /* one for each uCode image (inst/data, init/runtime/wowlan) */
 struct fw_desc {
-    const void* data; /* vmalloc'ed data */
-    uint32_t len;     /* size in bytes */
-    uint32_t offset;  /* offset in the device */
+  const void* data; /* vmalloc'ed data */
+  uint32_t len;     /* size in bytes */
+  uint32_t offset;  /* offset in the device */
 };
 
 struct fw_img {
-    struct fw_desc* sec;
-    int num_sec;
-    bool is_dual_cpus;
-    uint32_t paging_mem_size;
+  struct fw_desc* sec;
+  int num_sec;
+  bool is_dual_cpus;
+  uint32_t paging_mem_size;
 };
 
 /*
@@ -146,9 +145,9 @@
  * @fw_paging_size: page size
  */
 struct iwl_fw_paging {
-    dma_addr_t fw_paging_phys;
-    struct page* fw_paging_block;
-    uint32_t fw_paging_size;
+  dma_addr_t fw_paging_phys;
+  struct page* fw_paging_block;
+  uint32_t fw_paging_size;
 };
 
 /**
@@ -157,8 +156,8 @@
  * @cs: cipher scheme entries
  */
 struct iwl_fw_cscheme_list {
-    uint8_t size;
-    struct iwl_fw_cipher_scheme cs[];
+  uint8_t size;
+  struct iwl_fw_cipher_scheme cs[];
 } __packed;
 
 /**
@@ -167,10 +166,10 @@
  * @IWL_FW_MVM: MVM firmware
  */
 enum iwl_fw_type {
-    IWL_FW_DVM,
-    IWL_FW_MVM,
+  IWL_FW_DVM,
+  IWL_FW_MVM,
 #if IS_ENABLED(CPTCFG_IWLFMAC)
-    IWL_FW_FMAC,
+  IWL_FW_FMAC,
 #endif
 };
 
@@ -187,14 +186,14 @@
  * @dump_mask: bitmask of dump regions
  */
 struct iwl_fw_dbg {
-    struct iwl_fw_dbg_dest_tlv_v1* dest_tlv;
-    uint8_t n_dest_reg;
-    struct iwl_fw_dbg_conf_tlv* conf_tlv[FW_DBG_CONF_MAX];
-    struct iwl_fw_dbg_trigger_tlv* trigger_tlv[FW_DBG_TRIGGER_MAX];
-    size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX];
-    struct iwl_fw_dbg_mem_seg_tlv* mem_tlv;
-    size_t n_mem_tlv;
-    uint32_t dump_mask;
+  struct iwl_fw_dbg_dest_tlv_v1* dest_tlv;
+  uint8_t n_dest_reg;
+  struct iwl_fw_dbg_conf_tlv* conf_tlv[FW_DBG_CONF_MAX];
+  struct iwl_fw_dbg_trigger_tlv* trigger_tlv[FW_DBG_TRIGGER_MAX];
+  size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+  struct iwl_fw_dbg_mem_seg_tlv* mem_tlv;
+  size_t n_mem_tlv;
+  uint32_t dump_mask;
 };
 
 /**
@@ -205,10 +204,10 @@
  * @conf_ext: second trigger, contains extra regions to dump
  */
 struct iwl_fw_ini_active_triggers {
-    bool active;
-    enum iwl_fw_ini_apply_point apply_point;
-    struct iwl_fw_ini_trigger* conf;
-    struct iwl_fw_ini_trigger* conf_ext;
+  bool active;
+  enum iwl_fw_ini_apply_point apply_point;
+  struct iwl_fw_ini_trigger* conf;
+  struct iwl_fw_ini_trigger* conf_ext;
 };
 
 /**
@@ -217,8 +216,8 @@
  * @apply_point: apply point where it became active
  */
 struct iwl_fw_ini_active_regs {
-    struct iwl_fw_ini_region_cfg* reg;
-    enum iwl_fw_ini_apply_point apply_point;
+  struct iwl_fw_ini_region_cfg* reg;
+  enum iwl_fw_ini_apply_point apply_point;
 };
 
 /**
@@ -243,62 +242,66 @@
  *  we get the ALIVE from the uCode
  */
 struct iwl_fw {
-    uint32_t ucode_ver;
+  uint32_t ucode_ver;
 
-    char fw_version[ETHTOOL_FWVERS_LEN];
+  char fw_version[ETHTOOL_FWVERS_LEN];
 
-    /* ucode images */
-    struct fw_img img[IWL_UCODE_TYPE_MAX];
-    size_t iml_len;
-    uint8_t* iml;
+  /* ucode images */
+  struct fw_img img[IWL_UCODE_TYPE_MAX];
+  size_t iml_len;
+  uint8_t* iml;
 
-    struct iwl_ucode_capabilities ucode_capa;
-    bool enhance_sensitivity_table;
+  struct iwl_ucode_capabilities ucode_capa;
+  bool enhance_sensitivity_table;
 
-    uint32_t init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
-    uint32_t inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+  uint32_t init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+  uint32_t inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
 
-    struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
-    uint32_t phy_config;
-    uint8_t valid_tx_ant;
-    uint8_t valid_rx_ant;
+  struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
+  uint32_t phy_config;
+  uint8_t valid_tx_ant;
+  uint8_t valid_rx_ant;
 
-    enum iwl_fw_type type;
+  enum iwl_fw_type type;
 
-    struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
-    uint8_t human_readable[FW_VER_HUMAN_READABLE_SZ];
+  struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
+  uint8_t human_readable[FW_VER_HUMAN_READABLE_SZ];
 
-    struct iwl_fw_dbg dbg;
+  struct iwl_fw_dbg dbg;
 };
 
 static inline const char* get_fw_dbg_mode_string(int mode) {
-    switch (mode) {
+  switch (mode) {
     case SMEM_MODE:
-        return "SMEM";
+      return "SMEM";
     case EXTERNAL_MODE:
-        return "EXTERNAL_DRAM";
+      return "EXTERNAL_DRAM";
     case MARBH_MODE:
-        return "MARBH";
+      return "MARBH";
     case MIPI_MODE:
-        return "MIPI";
+      return "MIPI";
     default:
-        return "UNKNOWN";
-    }
+      return "UNKNOWN";
+  }
 }
 
 static inline bool iwl_fw_dbg_conf_usniffer(const struct iwl_fw* fw, uint8_t id) {
-    const struct iwl_fw_dbg_conf_tlv* conf_tlv = fw->dbg.conf_tlv[id];
+  const struct iwl_fw_dbg_conf_tlv* conf_tlv = fw->dbg.conf_tlv[id];
 
-    if (!conf_tlv) { return false; }
+  if (!conf_tlv) {
+    return false;
+  }
 
-    return conf_tlv->usniffer;
+  return conf_tlv->usniffer;
 }
 
 static inline const struct fw_img* iwl_get_ucode_image(const struct iwl_fw* fw,
                                                        enum iwl_ucode_type ucode_type) {
-    if (ucode_type >= IWL_UCODE_TYPE_MAX) { return NULL; }
+  if (ucode_type >= IWL_UCODE_TYPE_MAX) {
+    return NULL;
+  }
 
-    return &fw->img[ucode_type];
+  return &fw->img[ucode_type];
 }
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_FW_IMG_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/init.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/init.c
index b7bb60e..61f8df8 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/init.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/init.c
@@ -38,23 +38,19 @@
 void iwl_fw_runtime_init(struct iwl_fw_runtime* fwrt, struct iwl_trans* trans,
                          const struct iwl_fw* fw, const struct iwl_fw_runtime_ops* ops,
                          void* ops_ctx, struct dentry* dbgfs_dir) {
-    memset(fwrt, 0, sizeof(*fwrt));
-    fwrt->trans = trans;
-    fwrt->fw = fw;
-    fwrt->dev = trans->dev;
-    fwrt->dump.conf = FW_DBG_INVALID;
-    fwrt->ops = ops;
-    fwrt->ops_ctx = ops_ctx;
+  memset(fwrt, 0, sizeof(*fwrt));
+  fwrt->trans = trans;
+  fwrt->fw = fw;
+  fwrt->dev = trans->dev;
+  fwrt->dump.conf = FW_DBG_INVALID;
+  fwrt->ops = ops;
+  fwrt->ops_ctx = ops_ctx;
 #if 0   // NEEDS_PORTING
     INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
 #endif  // NEEDS_PORTING
-    iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
+  iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
 }
 
-void iwl_fw_runtime_suspend(struct iwl_fw_runtime* fwrt) {
-    iwl_fw_suspend_timestamp(fwrt);
-}
+void iwl_fw_runtime_suspend(struct iwl_fw_runtime* fwrt) { iwl_fw_suspend_timestamp(fwrt); }
 
-void iwl_fw_runtime_resume(struct iwl_fw_runtime* fwrt) {
-    iwl_fw_resume_timestamp(fwrt);
-}
+void iwl_fw_runtime_resume(struct iwl_fw_runtime* fwrt) { iwl_fw_resume_timestamp(fwrt); }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/notif-wait.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/notif-wait.c
index 3e2666a..3e39f50 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/notif-wait.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/notif-wait.c
@@ -31,69 +31,74 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include "notif-wait.h"
+
 #include <linux/export.h>
 #include <linux/sched.h>
 
 #include "iwl-drv.h"
-#include "notif-wait.h"
 
 void iwl_notification_wait_init(struct iwl_notif_wait_data* notif_wait) {
-    spin_lock_init(&notif_wait->notif_wait_lock);
-    INIT_LIST_HEAD(&notif_wait->notif_waits);
-    init_waitqueue_head(&notif_wait->notif_waitq);
+  spin_lock_init(&notif_wait->notif_wait_lock);
+  INIT_LIST_HEAD(&notif_wait->notif_waits);
+  init_waitqueue_head(&notif_wait->notif_waitq);
 }
 IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
 
 bool iwl_notification_wait(struct iwl_notif_wait_data* notif_wait, struct iwl_rx_packet* pkt) {
-    bool triggered = false;
+  bool triggered = false;
 
-    if (!list_empty(&notif_wait->notif_waits)) {
-        struct iwl_notification_wait* w;
+  if (!list_empty(&notif_wait->notif_waits)) {
+    struct iwl_notification_wait* w;
 
-        spin_lock(&notif_wait->notif_wait_lock);
-        list_for_each_entry(w, &notif_wait->notif_waits, list) {
-            int i;
-            bool found = false;
+    spin_lock(&notif_wait->notif_wait_lock);
+    list_for_each_entry(w, &notif_wait->notif_waits, list) {
+      int i;
+      bool found = false;
 
-            /*
-             * If it already finished (triggered) or has been
-             * aborted then don't evaluate it again to avoid races,
-             * Otherwise the function could be called again even
-             * though it returned true before
-             */
-            if (w->triggered || w->aborted) { continue; }
+      /*
+       * If it already finished (triggered) or has been
+       * aborted then don't evaluate it again to avoid races,
+       * Otherwise the function could be called again even
+       * though it returned true before
+       */
+      if (w->triggered || w->aborted) {
+        continue;
+      }
 
-            for (i = 0; i < w->n_cmds; i++) {
-                uint16_t rec_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+      for (i = 0; i < w->n_cmds; i++) {
+        uint16_t rec_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
 
-                if (w->cmds[i] == rec_id ||
-                    (!iwl_cmd_groupid(w->cmds[i]) && DEF_ID(w->cmds[i]) == rec_id)) {
-                    found = true;
-                    break;
-                }
-            }
-            if (!found) { continue; }
-
-            if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
-                w->triggered = true;
-                triggered = true;
-            }
+        if (w->cmds[i] == rec_id ||
+            (!iwl_cmd_groupid(w->cmds[i]) && DEF_ID(w->cmds[i]) == rec_id)) {
+          found = true;
+          break;
         }
-        spin_unlock(&notif_wait->notif_wait_lock);
-    }
+      }
+      if (!found) {
+        continue;
+      }
 
-    return triggered;
+      if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
+        w->triggered = true;
+        triggered = true;
+      }
+    }
+    spin_unlock(&notif_wait->notif_wait_lock);
+  }
+
+  return triggered;
 }
 IWL_EXPORT_SYMBOL(iwl_notification_wait);
 
 void iwl_abort_notification_waits(struct iwl_notif_wait_data* notif_wait) {
-    struct iwl_notification_wait* wait_entry;
+  struct iwl_notification_wait* wait_entry;
 
-    spin_lock(&notif_wait->notif_wait_lock);
-    list_for_each_entry(wait_entry, &notif_wait->notif_waits, list) wait_entry->aborted = true;
-    spin_unlock(&notif_wait->notif_wait_lock);
+  spin_lock(&notif_wait->notif_wait_lock);
+  list_for_each_entry(wait_entry, &notif_wait->notif_waits, list) wait_entry->aborted = true;
+  spin_unlock(&notif_wait->notif_wait_lock);
 
-    wake_up_all(&notif_wait->notif_waitq);
+  wake_up_all(&notif_wait->notif_waitq);
 }
 IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
 
@@ -103,42 +108,48 @@
                                 bool (*fn)(struct iwl_notif_wait_data* notif_wait,
                                            struct iwl_rx_packet* pkt, void* data),
                                 void* fn_data) {
-    if (WARN_ON(n_cmds > MAX_NOTIF_CMDS)) { n_cmds = MAX_NOTIF_CMDS; }
+  if (WARN_ON(n_cmds > MAX_NOTIF_CMDS)) {
+    n_cmds = MAX_NOTIF_CMDS;
+  }
 
-    wait_entry->fn = fn;
-    wait_entry->fn_data = fn_data;
-    wait_entry->n_cmds = n_cmds;
-    memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(uint16_t));
-    wait_entry->triggered = false;
-    wait_entry->aborted = false;
+  wait_entry->fn = fn;
+  wait_entry->fn_data = fn_data;
+  wait_entry->n_cmds = n_cmds;
+  memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(uint16_t));
+  wait_entry->triggered = false;
+  wait_entry->aborted = false;
 
-    spin_lock_bh(&notif_wait->notif_wait_lock);
-    list_add(&wait_entry->list, &notif_wait->notif_waits);
-    spin_unlock_bh(&notif_wait->notif_wait_lock);
+  spin_lock_bh(&notif_wait->notif_wait_lock);
+  list_add(&wait_entry->list, &notif_wait->notif_waits);
+  spin_unlock_bh(&notif_wait->notif_wait_lock);
 }
 IWL_EXPORT_SYMBOL(iwl_init_notification_wait);
 
 void iwl_remove_notification(struct iwl_notif_wait_data* notif_wait,
                              struct iwl_notification_wait* wait_entry) {
-    spin_lock_bh(&notif_wait->notif_wait_lock);
-    list_del(&wait_entry->list);
-    spin_unlock_bh(&notif_wait->notif_wait_lock);
+  spin_lock_bh(&notif_wait->notif_wait_lock);
+  list_del(&wait_entry->list);
+  spin_unlock_bh(&notif_wait->notif_wait_lock);
 }
 IWL_EXPORT_SYMBOL(iwl_remove_notification);
 
 int iwl_wait_notification(struct iwl_notif_wait_data* notif_wait,
                           struct iwl_notification_wait* wait_entry, unsigned long timeout) {
-    int ret;
+  int ret;
 
-    ret = wait_event_timeout(notif_wait->notif_waitq, wait_entry->triggered || wait_entry->aborted,
-                             timeout);
+  ret = wait_event_timeout(notif_wait->notif_waitq, wait_entry->triggered || wait_entry->aborted,
+                           timeout);
 
-    iwl_remove_notification(notif_wait, wait_entry);
+  iwl_remove_notification(notif_wait, wait_entry);
 
-    if (wait_entry->aborted) { return -EIO; }
+  if (wait_entry->aborted) {
+    return -EIO;
+  }
 
-    /* return value is always >= 0 */
-    if (ret <= 0) { return -ETIMEDOUT; }
-    return 0;
+  /* return value is always >= 0 */
+  if (ret <= 0) {
+    return -ETIMEDOUT;
+  }
+  return 0;
 }
 IWL_EXPORT_SYMBOL(iwl_wait_notification);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/notif-wait.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/notif-wait.h
index 66a791e..c1172c6 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/notif-wait.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/notif-wait.h
@@ -41,8 +41,8 @@
 // NEEDS_PORTING: seems that this is exactly what sync_completion_t is doing.
 
 struct iwl_notif_wait_data {
-    struct list_node notif_waits;
-    mtx_t notif_wait_lock;
+  struct list_node notif_waits;
+  mtx_t notif_wait_lock;
 #if 0   // NEEDS_PORTING
     wait_queue_head_t notif_waitq;
 #endif  // NEEDS_PORTING
@@ -75,14 +75,14 @@
  * the code for them.
  */
 struct iwl_notification_wait {
-    list_node_t list;
+  list_node_t list;
 
-    bool (*fn)(struct iwl_notif_wait_data* notif_data, struct iwl_rx_packet* pkt, void* data);
-    void* fn_data;
+  bool (*fn)(struct iwl_notif_wait_data* notif_data, struct iwl_rx_packet* pkt, void* data);
+  void* fn_data;
 
-    uint16_t cmds[MAX_NOTIF_CMDS];
-    uint8_t n_cmds;
-    bool triggered, aborted;
+  uint16_t cmds[MAX_NOTIF_CMDS];
+  uint8_t n_cmds;
+  bool triggered, aborted;
 };
 
 /* caller functions */
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/paging.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/paging.c
index d5263d5..052a3ff0 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/paging.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/paging.c
@@ -38,253 +38,261 @@
 #include "runtime.h"
 
 void iwl_free_fw_paging(struct iwl_fw_runtime* fwrt) {
-    int i;
+  int i;
 
-    if (!fwrt->fw_paging_db[0].fw_paging_block) { return; }
+  if (!fwrt->fw_paging_db[0].fw_paging_block) {
+    return;
+  }
 
-    for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
-        struct iwl_fw_paging* paging = &fwrt->fw_paging_db[i];
+  for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+    struct iwl_fw_paging* paging = &fwrt->fw_paging_db[i];
 
-        if (!paging->fw_paging_block) {
-            IWL_DEBUG_FW(fwrt, "Paging: block %d already freed, continue to next page\n", i);
+    if (!paging->fw_paging_block) {
+      IWL_DEBUG_FW(fwrt, "Paging: block %d already freed, continue to next page\n", i);
 
-            continue;
-        }
-        dma_unmap_page(fwrt->trans->dev, paging->fw_paging_phys, paging->fw_paging_size,
-                       DMA_BIDIRECTIONAL);
-
-        __free_pages(paging->fw_paging_block, get_order(paging->fw_paging_size));
-        paging->fw_paging_block = NULL;
+      continue;
     }
+    dma_unmap_page(fwrt->trans->dev, paging->fw_paging_phys, paging->fw_paging_size,
+                   DMA_BIDIRECTIONAL);
 
-    memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db));
+    __free_pages(paging->fw_paging_block, get_order(paging->fw_paging_size));
+    paging->fw_paging_block = NULL;
+  }
+
+  memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db));
 }
 IWL_EXPORT_SYMBOL(iwl_free_fw_paging);
 
 static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime* fwrt, const struct fw_img* image) {
-    struct page* block;
-    dma_addr_t phys = 0;
-    int blk_idx, order, num_of_pages, size;
+  struct page* block;
+  dma_addr_t phys = 0;
+  int blk_idx, order, num_of_pages, size;
 
-    if (fwrt->fw_paging_db[0].fw_paging_block) { return 0; }
+  if (fwrt->fw_paging_db[0].fw_paging_block) {
+    return 0;
+  }
 
-    /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
-    BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+  /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+  BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
 
-    num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
-    fwrt->num_of_paging_blk = DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
-    fwrt->num_of_pages_in_last_blk =
-        num_of_pages - NUM_OF_PAGE_PER_GROUP * (fwrt->num_of_paging_blk - 1);
+  num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+  fwrt->num_of_paging_blk = DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
+  fwrt->num_of_pages_in_last_blk =
+      num_of_pages - NUM_OF_PAGE_PER_GROUP * (fwrt->num_of_paging_blk - 1);
 
-    IWL_DEBUG_FW(fwrt,
-                 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last "
-                 "block holds %d pages\n",
-                 fwrt->num_of_paging_blk, fwrt->num_of_pages_in_last_blk);
+  IWL_DEBUG_FW(fwrt,
+               "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last "
+               "block holds %d pages\n",
+               fwrt->num_of_paging_blk, fwrt->num_of_pages_in_last_blk);
 
-    /*
-     * Allocate CSS and paging blocks in dram.
-     */
-    for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
-        /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
-        size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
-        order = get_order(size);
-        block = alloc_pages(GFP_KERNEL, order);
-        if (!block) {
-            /* free all the previous pages since we failed */
-            iwl_free_fw_paging(fwrt);
-            return -ENOMEM;
-        }
-
-        fwrt->fw_paging_db[blk_idx].fw_paging_block = block;
-        fwrt->fw_paging_db[blk_idx].fw_paging_size = size;
-
-        phys = dma_map_page(fwrt->trans->dev, block, 0, PAGE_SIZE << order, DMA_BIDIRECTIONAL);
-        if (dma_mapping_error(fwrt->trans->dev, phys)) {
-            /*
-             * free the previous pages and the current one
-             * since we failed to map_page.
-             */
-            iwl_free_fw_paging(fwrt);
-            return -ENOMEM;
-        }
-        fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys;
-
-        if (!blk_idx)
-            IWL_DEBUG_FW(fwrt, "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
-                         order);
-        else
-            IWL_DEBUG_FW(fwrt, "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
-                         order);
+  /*
+   * Allocate CSS and paging blocks in dram.
+   */
+  for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
+    /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
+    size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
+    order = get_order(size);
+    block = alloc_pages(GFP_KERNEL, order);
+    if (!block) {
+      /* free all the previous pages since we failed */
+      iwl_free_fw_paging(fwrt);
+      return -ENOMEM;
     }
 
-    return 0;
+    fwrt->fw_paging_db[blk_idx].fw_paging_block = block;
+    fwrt->fw_paging_db[blk_idx].fw_paging_size = size;
+
+    phys = dma_map_page(fwrt->trans->dev, block, 0, PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+    if (dma_mapping_error(fwrt->trans->dev, phys)) {
+      /*
+       * free the previous pages and the current one
+       * since we failed to map_page.
+       */
+      iwl_free_fw_paging(fwrt);
+      return -ENOMEM;
+    }
+    fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys;
+
+    if (!blk_idx)
+      IWL_DEBUG_FW(fwrt, "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+                   order);
+    else
+      IWL_DEBUG_FW(fwrt, "Paging: allocated 32K bytes (order %d) for firmware paging.\n", order);
+  }
+
+  return 0;
 }
 
 static int iwl_fill_paging_mem(struct iwl_fw_runtime* fwrt, const struct fw_img* image) {
-    int sec_idx, idx, ret;
-    uint32_t offset = 0;
+  int sec_idx, idx, ret;
+  uint32_t offset = 0;
 
-    /*
-     * find where is the paging image start point:
-     * if CPU2 exist and it's in paging format, then the image looks like:
-     * CPU1 sections (2 or more)
-     * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
-     * CPU2 sections (not paged)
-     * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
-     * non paged to CPU2 paging sec
-     * CPU2 paging CSS
-     * CPU2 paging image (including instruction and data)
-     */
-    for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
-        if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
-            sec_idx++;
-            break;
-        }
+  /*
+   * find where is the paging image start point:
+   * if CPU2 exist and it's in paging format, then the image looks like:
+   * CPU1 sections (2 or more)
+   * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+   * CPU2 sections (not paged)
+   * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+   * non paged to CPU2 paging sec
+   * CPU2 paging CSS
+   * CPU2 paging image (including instruction and data)
+   */
+  for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
+    if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+      sec_idx++;
+      break;
     }
+  }
+
+  /*
+   * If paging is enabled there should be at least 2 more sections left
+   * (one for CSS and one for Paging data)
+   */
+  if (sec_idx >= image->num_sec - 1) {
+    IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
+    ret = -EINVAL;
+    goto err;
+  }
+
+  /* copy the CSS block to the dram */
+  IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", sec_idx);
+
+  if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
+    IWL_ERR(fwrt, "CSS block is larger than paging size\n");
+    ret = -EINVAL;
+    goto err;
+  }
+
+  memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), image->sec[sec_idx].data,
+         image->sec[sec_idx].len);
+  dma_sync_single_for_device(fwrt->trans->dev, fwrt->fw_paging_db[0].fw_paging_phys,
+                             fwrt->fw_paging_db[0].fw_paging_size, DMA_BIDIRECTIONAL);
+
+  IWL_DEBUG_FW(fwrt, "Paging: copied %d CSS bytes to first block\n",
+               fwrt->fw_paging_db[0].fw_paging_size);
+
+  sec_idx++;
+
+  /*
+   * Copy the paging blocks to the dram.  The loop index starts
+   * from 1 since the CSS block (index 0) was already copied to
+   * dram.  We use num_of_paging_blk + 1 to account for that.
+   */
+  for (idx = 1; idx < fwrt->num_of_paging_blk + 1; idx++) {
+    struct iwl_fw_paging* block = &fwrt->fw_paging_db[idx];
+    int remaining = image->sec[sec_idx].len - offset;
+    int len = block->fw_paging_size;
 
     /*
-     * If paging is enabled there should be at least 2 more sections left
-     * (one for CSS and one for Paging data)
-     */
-    if (sec_idx >= image->num_sec - 1) {
-        IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
+     * For the last block, we copy all that is remaining,
+     * for all other blocks, we copy fw_paging_size at a
+     * time. */
+    if (idx == fwrt->num_of_paging_blk) {
+      len = remaining;
+      if (remaining != fwrt->num_of_pages_in_last_blk * FW_PAGING_SIZE) {
+        IWL_ERR(fwrt, "Paging: last block contains more data than expected %d\n", remaining);
         ret = -EINVAL;
         goto err;
+      }
+    } else if (block->fw_paging_size > remaining) {
+      IWL_ERR(fwrt, "Paging: not enough data in other in block %d (%d)\n", idx, remaining);
+      ret = -EINVAL;
+      goto err;
     }
 
-    /* copy the CSS block to the dram */
-    IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", sec_idx);
+    memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, len);
+    dma_sync_single_for_device(fwrt->trans->dev, block->fw_paging_phys, block->fw_paging_size,
+                               DMA_BIDIRECTIONAL);
 
-    if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
-        IWL_ERR(fwrt, "CSS block is larger than paging size\n");
-        ret = -EINVAL;
-        goto err;
-    }
+    IWL_DEBUG_FW(fwrt, "Paging: copied %d paging bytes to block %d\n", len, idx);
 
-    memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), image->sec[sec_idx].data,
-           image->sec[sec_idx].len);
-    dma_sync_single_for_device(fwrt->trans->dev, fwrt->fw_paging_db[0].fw_paging_phys,
-                               fwrt->fw_paging_db[0].fw_paging_size, DMA_BIDIRECTIONAL);
+    offset += block->fw_paging_size;
+  }
 
-    IWL_DEBUG_FW(fwrt, "Paging: copied %d CSS bytes to first block\n",
-                 fwrt->fw_paging_db[0].fw_paging_size);
-
-    sec_idx++;
-
-    /*
-     * Copy the paging blocks to the dram.  The loop index starts
-     * from 1 since the CSS block (index 0) was already copied to
-     * dram.  We use num_of_paging_blk + 1 to account for that.
-     */
-    for (idx = 1; idx < fwrt->num_of_paging_blk + 1; idx++) {
-        struct iwl_fw_paging* block = &fwrt->fw_paging_db[idx];
-        int remaining = image->sec[sec_idx].len - offset;
-        int len = block->fw_paging_size;
-
-        /*
-         * For the last block, we copy all that is remaining,
-         * for all other blocks, we copy fw_paging_size at a
-         * time. */
-        if (idx == fwrt->num_of_paging_blk) {
-            len = remaining;
-            if (remaining != fwrt->num_of_pages_in_last_blk * FW_PAGING_SIZE) {
-                IWL_ERR(fwrt, "Paging: last block contains more data than expected %d\n",
-                        remaining);
-                ret = -EINVAL;
-                goto err;
-            }
-        } else if (block->fw_paging_size > remaining) {
-            IWL_ERR(fwrt, "Paging: not enough data in other in block %d (%d)\n", idx, remaining);
-            ret = -EINVAL;
-            goto err;
-        }
-
-        memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, len);
-        dma_sync_single_for_device(fwrt->trans->dev, block->fw_paging_phys, block->fw_paging_size,
-                                   DMA_BIDIRECTIONAL);
-
-        IWL_DEBUG_FW(fwrt, "Paging: copied %d paging bytes to block %d\n", len, idx);
-
-        offset += block->fw_paging_size;
-    }
-
-    return 0;
+  return 0;
 
 err:
-    iwl_free_fw_paging(fwrt);
-    return ret;
+  iwl_free_fw_paging(fwrt);
+  return ret;
 }
 
 static int iwl_save_fw_paging(struct iwl_fw_runtime* fwrt, const struct fw_img* fw) {
-    int ret;
+  int ret;
 
-    ret = iwl_alloc_fw_paging_mem(fwrt, fw);
-    if (ret) { return ret; }
+  ret = iwl_alloc_fw_paging_mem(fwrt, fw);
+  if (ret) {
+    return ret;
+  }
 
-    return iwl_fill_paging_mem(fwrt, fw);
+  return iwl_fill_paging_mem(fwrt, fw);
 }
 
 /* send paging cmd to FW in case CPU2 has paging image */
 static int iwl_send_paging_cmd(struct iwl_fw_runtime* fwrt, const struct fw_img* fw) {
-    struct iwl_fw_paging_cmd paging_cmd = {
-        .flags = cpu_to_le32(
-            PAGING_CMD_IS_SECURED | PAGING_CMD_IS_ENABLED |
-            (fwrt->num_of_pages_in_last_blk << PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
-        .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
-        .block_num = cpu_to_le32(fwrt->num_of_paging_blk),
-    };
-    struct iwl_host_cmd hcmd = {
-        .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
-        .len =
-            {
-                sizeof(paging_cmd),
-            },
-        .data =
-            {
-                &paging_cmd,
-            },
-    };
-    int blk_idx;
+  struct iwl_fw_paging_cmd paging_cmd = {
+      .flags =
+          cpu_to_le32(PAGING_CMD_IS_SECURED | PAGING_CMD_IS_ENABLED |
+                      (fwrt->num_of_pages_in_last_blk << PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+      .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+      .block_num = cpu_to_le32(fwrt->num_of_paging_blk),
+  };
+  struct iwl_host_cmd hcmd = {
+      .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+      .len =
+          {
+              sizeof(paging_cmd),
+          },
+      .data =
+          {
+              &paging_cmd,
+          },
+  };
+  int blk_idx;
 
-    /* loop for for all paging blocks + CSS block */
-    for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
-        dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys;
-        __le32 phy_addr;
+  /* loop for for all paging blocks + CSS block */
+  for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
+    dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys;
+    __le32 phy_addr;
 
-        addr = addr >> PAGE_2_EXP_SIZE;
-        phy_addr = cpu_to_le32(addr);
-        paging_cmd.device_phy_addr[blk_idx] = phy_addr;
-    }
+    addr = addr >> PAGE_2_EXP_SIZE;
+    phy_addr = cpu_to_le32(addr);
+    paging_cmd.device_phy_addr[blk_idx] = phy_addr;
+  }
 
-    return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+  return iwl_trans_send_cmd(fwrt->trans, &hcmd);
 }
 
 int iwl_init_paging(struct iwl_fw_runtime* fwrt, enum iwl_ucode_type type) {
-    const struct fw_img* fw = &fwrt->fw->img[type];
-    int ret;
+  const struct fw_img* fw = &fwrt->fw->img[type];
+  int ret;
 
-    if (fwrt->trans->cfg->gen2) { return 0; }
-
-    /*
-     * Configure and operate fw paging mechanism.
-     * The driver configures the paging flow only once.
-     * The CPU2 paging image is included in the IWL_UCODE_INIT image.
-     */
-    if (!fw->paging_mem_size) { return 0; }
-
-    ret = iwl_save_fw_paging(fwrt, fw);
-    if (ret) {
-        IWL_ERR(fwrt, "failed to save the FW paging image\n");
-        return ret;
-    }
-
-    ret = iwl_send_paging_cmd(fwrt, fw);
-    if (ret) {
-        IWL_ERR(fwrt, "failed to send the paging cmd\n");
-        iwl_free_fw_paging(fwrt);
-        return ret;
-    }
-
+  if (fwrt->trans->cfg->gen2) {
     return 0;
+  }
+
+  /*
+   * Configure and operate fw paging mechanism.
+   * The driver configures the paging flow only once.
+   * The CPU2 paging image is included in the IWL_UCODE_INIT image.
+   */
+  if (!fw->paging_mem_size) {
+    return 0;
+  }
+
+  ret = iwl_save_fw_paging(fwrt, fw);
+  if (ret) {
+    IWL_ERR(fwrt, "failed to save the FW paging image\n");
+    return ret;
+  }
+
+  ret = iwl_send_paging_cmd(fwrt, fw);
+  if (ret) {
+    IWL_ERR(fwrt, "failed to send the paging cmd\n");
+    iwl_free_fw_paging(fwrt);
+    return ret;
+  }
+
+  return 0;
 }
 IWL_EXPORT_SYMBOL(iwl_init_paging);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/runtime.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/runtime.h
index 4302884..11df11f 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/runtime.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/runtime.h
@@ -43,28 +43,28 @@
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h"
 
 struct iwl_fw_runtime_ops {
-    int (*dump_start)(void* ctx);
-    void (*dump_end)(void* ctx);
-    bool (*fw_running)(void* ctx);
-    int (*send_hcmd)(void* ctx, struct iwl_host_cmd* host_cmd);
+  int (*dump_start)(void* ctx);
+  void (*dump_end)(void* ctx);
+  bool (*fw_running)(void* ctx);
+  int (*send_hcmd)(void* ctx, struct iwl_host_cmd* host_cmd);
 };
 
 #define MAX_NUM_LMAC 2
 struct iwl_fwrt_shared_mem_cfg {
-    int num_lmacs;
-    int num_txfifo_entries;
-    struct {
-        uint32_t txfifo_size[TX_FIFO_MAX_NUM];
-        uint32_t rxfifo1_size;
-    } lmac[MAX_NUM_LMAC];
-    uint32_t rxfifo2_size;
-    uint32_t internal_txfifo_addr;
-    uint32_t internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+  int num_lmacs;
+  int num_txfifo_entries;
+  struct {
+    uint32_t txfifo_size[TX_FIFO_MAX_NUM];
+    uint32_t rxfifo1_size;
+  } lmac[MAX_NUM_LMAC];
+  uint32_t rxfifo2_size;
+  uint32_t internal_txfifo_addr;
+  uint32_t internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
 };
 
 enum iwl_fw_runtime_status {
-    IWL_FWRT_STATUS_DUMPING = 0,
-    IWL_FWRT_STATUS_WAIT_ALIVE,
+  IWL_FWRT_STATUS_DUMPING = 0,
+  IWL_FWRT_STATUS_WAIT_ALIVE,
 };
 
 /**
@@ -84,48 +84,48 @@
  * @dump: debug dump data
  */
 struct iwl_fw_runtime {
-    struct iwl_trans* trans;
-    const struct iwl_fw* fw;
-    struct device* dev;
+  struct iwl_trans* trans;
+  const struct iwl_fw* fw;
+  struct device* dev;
 
-    const struct iwl_fw_runtime_ops* ops;
-    void* ops_ctx;
+  const struct iwl_fw_runtime_ops* ops;
+  void* ops_ctx;
 
-    unsigned long status;
+  unsigned long status;
 
-    /* Paging */
-    struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
-    uint16_t num_of_paging_blk;
-    uint16_t num_of_pages_in_last_blk;
+  /* Paging */
+  struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+  uint16_t num_of_paging_blk;
+  uint16_t num_of_pages_in_last_blk;
 
-    enum iwl_ucode_type cur_fw_img;
+  enum iwl_ucode_type cur_fw_img;
 
-    /* memory configuration */
-    struct iwl_fwrt_shared_mem_cfg smem_cfg;
+  /* memory configuration */
+  struct iwl_fwrt_shared_mem_cfg smem_cfg;
 
-    /* debug */
-    struct {
-        const struct iwl_fw_dump_desc* desc;
-        bool monitor_only;
-        struct delayed_work wk;
+  /* debug */
+  struct {
+    const struct iwl_fw_dump_desc* desc;
+    bool monitor_only;
+    struct delayed_work wk;
 
-        uint8_t conf;
+    uint8_t conf;
 
-        /* ts of the beginning of a non-collect fw dbg data period */
-        unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM - 1];
-        uint32_t* d3_debug_data;
-        struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID];
-        struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
-        uint32_t lmac_err_id[MAX_NUM_LMAC];
-        uint32_t umac_err_id;
-    } dump;
+    /* ts of the beginning of a non-collect fw dbg data period */
+    unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM - 1];
+    uint32_t* d3_debug_data;
+    struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID];
+    struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
+    uint32_t lmac_err_id[MAX_NUM_LMAC];
+    uint32_t umac_err_id;
+  } dump;
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    struct {
-        struct delayed_work wk;
-        uint32_t delay;
-        uint64_t seq;
-    } timestamp;
-    bool tpc_enabled;
+  struct {
+    struct delayed_work wk;
+    uint32_t delay;
+    uint64_t seq;
+  } timestamp;
+  bool tpc_enabled;
 #endif /* CPTCFG_IWLWIFI_DEBUGFS */
 };
 
@@ -134,8 +134,8 @@
                          void* ops_ctx, struct dentry* dbgfs_dir);
 
 static inline void iwl_fw_runtime_free(struct iwl_fw_runtime* fwrt) {
-    kfree(fwrt->dump.d3_debug_data);
-    fwrt->dump.d3_debug_data = NULL;
+  kfree(fwrt->dump.d3_debug_data);
+  fwrt->dump.d3_debug_data = NULL;
 }
 
 void iwl_fw_runtime_suspend(struct iwl_fw_runtime* fwrt);
@@ -144,7 +144,7 @@
 
 static inline void iwl_fw_set_current_image(struct iwl_fw_runtime* fwrt,
                                             enum iwl_ucode_type cur_fw_img) {
-    fwrt->cur_fw_img = cur_fw_img;
+  fwrt->cur_fw_img = cur_fw_img;
 }
 
 int iwl_init_paging(struct iwl_fw_runtime* fwrt, enum iwl_ucode_type type);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/smem.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/smem.c
index 4a4c3ba..bbb1309 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/smem.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/smem.c
@@ -38,84 +38,88 @@
 #include "runtime.h"
 
 static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime* fwrt, struct iwl_rx_packet* pkt) {
-    struct iwl_shared_mem_cfg* mem_cfg = (void*)pkt->data;
-    int i, lmac;
-    int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
+  struct iwl_shared_mem_cfg* mem_cfg = (void*)pkt->data;
+  int i, lmac;
+  int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
 
-    if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) { return; }
+  if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) {
+    return;
+  }
 
-    fwrt->smem_cfg.num_lmacs = lmac_num;
-    fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
-    fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
+  fwrt->smem_cfg.num_lmacs = lmac_num;
+  fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
+  fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
 
-    for (lmac = 0; lmac < lmac_num; lmac++) {
-        struct iwl_shared_mem_lmac_cfg* lmac_cfg = &mem_cfg->lmac_smem[lmac];
+  for (lmac = 0; lmac < lmac_num; lmac++) {
+    struct iwl_shared_mem_lmac_cfg* lmac_cfg = &mem_cfg->lmac_smem[lmac];
 
-        for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) {
-            fwrt->smem_cfg.lmac[lmac].txfifo_size[i] = le32_to_cpu(lmac_cfg->txfifo_size[i]);
-        }
-        fwrt->smem_cfg.lmac[lmac].rxfifo1_size = le32_to_cpu(lmac_cfg->rxfifo1_size);
+    for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) {
+      fwrt->smem_cfg.lmac[lmac].txfifo_size[i] = le32_to_cpu(lmac_cfg->txfifo_size[i]);
     }
+    fwrt->smem_cfg.lmac[lmac].rxfifo1_size = le32_to_cpu(lmac_cfg->rxfifo1_size);
+  }
 }
 
 static void iwl_parse_shared_mem(struct iwl_fw_runtime* fwrt, struct iwl_rx_packet* pkt) {
-    struct iwl_shared_mem_cfg_v2* mem_cfg = (void*)pkt->data;
-    int i;
+  struct iwl_shared_mem_cfg_v2* mem_cfg = (void*)pkt->data;
+  int i;
 
-    fwrt->smem_cfg.num_lmacs = 1;
+  fwrt->smem_cfg.num_lmacs = 1;
 
-    fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
-    for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
-        fwrt->smem_cfg.lmac[0].txfifo_size[i] = le32_to_cpu(mem_cfg->txfifo_size[i]);
+  fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
+  for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
+    fwrt->smem_cfg.lmac[0].txfifo_size[i] = le32_to_cpu(mem_cfg->txfifo_size[i]);
+  }
+
+  fwrt->smem_cfg.lmac[0].rxfifo1_size = le32_to_cpu(mem_cfg->rxfifo_size[0]);
+  fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
+
+  /* new API has more data, from rxfifo_addr field and on */
+  if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+    BUILD_BUG_ON(sizeof(fwrt->smem_cfg.internal_txfifo_size) !=
+                 sizeof(mem_cfg->internal_txfifo_size));
+
+    fwrt->smem_cfg.internal_txfifo_addr = le32_to_cpu(mem_cfg->internal_txfifo_addr);
+
+    for (i = 0; i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) {
+      fwrt->smem_cfg.internal_txfifo_size[i] = le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
     }
-
-    fwrt->smem_cfg.lmac[0].rxfifo1_size = le32_to_cpu(mem_cfg->rxfifo_size[0]);
-    fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
-
-    /* new API has more data, from rxfifo_addr field and on */
-    if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
-        BUILD_BUG_ON(sizeof(fwrt->smem_cfg.internal_txfifo_size) !=
-                     sizeof(mem_cfg->internal_txfifo_size));
-
-        fwrt->smem_cfg.internal_txfifo_addr = le32_to_cpu(mem_cfg->internal_txfifo_addr);
-
-        for (i = 0; i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) {
-            fwrt->smem_cfg.internal_txfifo_size[i] = le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
-        }
-    }
+  }
 }
 
 void iwl_get_shared_mem_conf(struct iwl_fw_runtime* fwrt) {
-    struct iwl_host_cmd cmd = {
-        .flags = CMD_WANT_SKB,
-        .data =
-            {
-                NULL,
-            },
-        .len =
-            {
-                0,
-            },
-    };
-    struct iwl_rx_packet* pkt;
+  struct iwl_host_cmd cmd = {
+      .flags = CMD_WANT_SKB,
+      .data =
+          {
+              NULL,
+          },
+      .len =
+          {
+              0,
+          },
+  };
+  struct iwl_rx_packet* pkt;
 
-    if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
-        cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
-    } else {
-        cmd.id = SHARED_MEM_CFG;
-    }
+  if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+    cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+  } else {
+    cmd.id = SHARED_MEM_CFG;
+  }
 
-    if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd))) { return; }
+  if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd))) {
+    return;
+  }
 
-    pkt = cmd.resp_pkt;
-    if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
-        iwl_parse_shared_mem_22000(fwrt, pkt);
-    } else {
-        iwl_parse_shared_mem(fwrt, pkt);
-    }
+  pkt = cmd.resp_pkt;
+  if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+    iwl_parse_shared_mem_22000(fwrt, pkt);
+  } else {
+    iwl_parse_shared_mem(fwrt, pkt);
+  }
 
-    IWL_DEBUG_INFO(fwrt, "SHARED MEM CFG: got memory offsets/sizes\n");
+  IWL_DEBUG_INFO(fwrt, "SHARED MEM CFG: got memory offsets/sizes\n");
 
-    iwl_free_resp(&cmd);
+  iwl_free_resp(&cmd);
 }
 IWL_EXPORT_SYMBOL(iwl_get_shared_mem_conf);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/testmode.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/testmode.c
index 1b7ab6f..dd6013c 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/testmode.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/testmode.c
@@ -41,257 +41,279 @@
 
 static int iwl_tm_send_hcmd(struct iwl_testmode* testmode, struct iwl_tm_data* data_in,
                             struct iwl_tm_data* data_out) {
-    struct iwl_tm_cmd_request* hcmd_req = data_in->data;
-    struct iwl_tm_cmd_request* cmd_resp;
-    uint32_t reply_len, resp_size;
-    struct iwl_rx_packet* pkt;
-    struct iwl_host_cmd host_cmd = {
-        .id = hcmd_req->id,
-        .data[0] = hcmd_req->data,
-        .len[0] = hcmd_req->len,
-        .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-    };
-    int ret;
+  struct iwl_tm_cmd_request* hcmd_req = data_in->data;
+  struct iwl_tm_cmd_request* cmd_resp;
+  uint32_t reply_len, resp_size;
+  struct iwl_rx_packet* pkt;
+  struct iwl_host_cmd host_cmd = {
+      .id = hcmd_req->id,
+      .data[0] = hcmd_req->data,
+      .len[0] = hcmd_req->len,
+      .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+  };
+  int ret;
 
-    if (!testmode->send_hcmd) { return -EOPNOTSUPP; }
+  if (!testmode->send_hcmd) {
+    return -EOPNOTSUPP;
+  }
 
-    if (hcmd_req->want_resp) { host_cmd.flags |= CMD_WANT_SKB; }
+  if (hcmd_req->want_resp) {
+    host_cmd.flags |= CMD_WANT_SKB;
+  }
 
-    mutex_lock(testmode->mutex);
-    ret = testmode->send_hcmd(testmode->op_mode, &host_cmd);
-    mutex_unlock(testmode->mutex);
-    if (ret) { return ret; }
-    /* if no reply is required, we are done */
-    if (!(host_cmd.flags & CMD_WANT_SKB)) { return 0; }
+  mutex_lock(testmode->mutex);
+  ret = testmode->send_hcmd(testmode->op_mode, &host_cmd);
+  mutex_unlock(testmode->mutex);
+  if (ret) {
+    return ret;
+  }
+  /* if no reply is required, we are done */
+  if (!(host_cmd.flags & CMD_WANT_SKB)) {
+    return 0;
+  }
 
-    /* Retrieve response packet */
-    pkt = host_cmd.resp_pkt;
-    reply_len = iwl_rx_packet_len(pkt);
+  /* Retrieve response packet */
+  pkt = host_cmd.resp_pkt;
+  reply_len = iwl_rx_packet_len(pkt);
 
-    /* Set response data */
-    resp_size = sizeof(struct iwl_tm_cmd_request) + reply_len;
-    cmd_resp = kzalloc(resp_size, GFP_KERNEL);
-    if (!cmd_resp) {
-        ret = -ENOMEM;
-        goto out;
-    }
-    cmd_resp->id = hcmd_req->id;
-    cmd_resp->len = reply_len;
-    memcpy(cmd_resp->data, &(pkt->hdr), reply_len);
+  /* Set response data */
+  resp_size = sizeof(struct iwl_tm_cmd_request) + reply_len;
+  cmd_resp = kzalloc(resp_size, GFP_KERNEL);
+  if (!cmd_resp) {
+    ret = -ENOMEM;
+    goto out;
+  }
+  cmd_resp->id = hcmd_req->id;
+  cmd_resp->len = reply_len;
+  memcpy(cmd_resp->data, &(pkt->hdr), reply_len);
 
-    data_out->data = cmd_resp;
-    data_out->len = resp_size;
-    ret = 0;
+  data_out->data = cmd_resp;
+  data_out->len = resp_size;
+  ret = 0;
 
 out:
-    iwl_free_resp(&host_cmd);
-    return ret;
+  iwl_free_resp(&host_cmd);
+  return ret;
 }
 
 static void iwl_tm_execute_reg_ops(struct iwl_testmode* testmode,
                                    struct iwl_tm_regs_request* request,
                                    struct iwl_tm_regs_request* result) {
-    struct iwl_tm_reg_op* cur_op;
-    uint32_t idx, read_idx;
+  struct iwl_tm_reg_op* cur_op;
+  uint32_t idx, read_idx;
 
-    for (idx = 0, read_idx = 0; idx < request->num; idx++) {
-        cur_op = &request->reg_ops[idx];
+  for (idx = 0, read_idx = 0; idx < request->num; idx++) {
+    cur_op = &request->reg_ops[idx];
 
-        if (cur_op->op_type == IWL_TM_REG_OP_READ) {
-            cur_op->value = iwl_read32(testmode->trans, cur_op->address);
-            memcpy(&result->reg_ops[read_idx], cur_op, sizeof(*cur_op));
-            read_idx++;
-        } else {
-            /* IWL_TM_REG_OP_WRITE is the only possible option */
-            iwl_write32(testmode->trans, cur_op->address, cur_op->value);
-        }
+    if (cur_op->op_type == IWL_TM_REG_OP_READ) {
+      cur_op->value = iwl_read32(testmode->trans, cur_op->address);
+      memcpy(&result->reg_ops[read_idx], cur_op, sizeof(*cur_op));
+      read_idx++;
+    } else {
+      /* IWL_TM_REG_OP_WRITE is the only possible option */
+      iwl_write32(testmode->trans, cur_op->address, cur_op->value);
     }
+  }
 }
 
 static int iwl_tm_reg_ops(struct iwl_testmode* testmode, struct iwl_tm_data* data_in,
                           struct iwl_tm_data* data_out) {
-    struct iwl_tm_reg_op* cur_op;
-    struct iwl_tm_regs_request* request = data_in->data;
-    struct iwl_tm_regs_request* result;
-    uint32_t result_size;
-    uint32_t idx, read_idx;
-    bool is_grab_nic_access_required = true;
-    unsigned long flags;
+  struct iwl_tm_reg_op* cur_op;
+  struct iwl_tm_regs_request* request = data_in->data;
+  struct iwl_tm_regs_request* result;
+  uint32_t result_size;
+  uint32_t idx, read_idx;
+  bool is_grab_nic_access_required = true;
+  unsigned long flags;
 
-    /* Calculate result size (result is returned only for read ops) */
-    for (idx = 0, read_idx = 0; idx < request->num; idx++) {
-        if (request->reg_ops[idx].op_type == IWL_TM_REG_OP_READ) { read_idx++; }
-        /* check if there is an operation that it is not */
-        /* in the CSR range (0x00000000 - 0x000003FF)    */
-        /* and not in the AL range           */
-        cur_op = &request->reg_ops[idx];
-        if (IS_AL_ADDR(cur_op->address) || cur_op->address < HBUS_BASE) {
-            is_grab_nic_access_required = false;
-        }
+  /* Calculate result size (result is returned only for read ops) */
+  for (idx = 0, read_idx = 0; idx < request->num; idx++) {
+    if (request->reg_ops[idx].op_type == IWL_TM_REG_OP_READ) {
+      read_idx++;
     }
-    result_size = sizeof(struct iwl_tm_regs_request) + read_idx * sizeof(struct iwl_tm_reg_op);
-
-    result = kzalloc(result_size, GFP_KERNEL);
-    if (!result) { return -ENOMEM; }
-    result->num = read_idx;
-    if (is_grab_nic_access_required) {
-        if (!iwl_trans_grab_nic_access(testmode->trans, &flags)) {
-            kfree(result);
-            return -EBUSY;
-        }
-        iwl_tm_execute_reg_ops(testmode, request, result);
-        iwl_trans_release_nic_access(testmode->trans, &flags);
-    } else {
-        iwl_tm_execute_reg_ops(testmode, request, result);
+    /* check if there is an operation that it is not */
+    /* in the CSR range (0x00000000 - 0x000003FF)    */
+    /* and not in the AL range           */
+    cur_op = &request->reg_ops[idx];
+    if (IS_AL_ADDR(cur_op->address) || cur_op->address < HBUS_BASE) {
+      is_grab_nic_access_required = false;
     }
+  }
+  result_size = sizeof(struct iwl_tm_regs_request) + read_idx * sizeof(struct iwl_tm_reg_op);
 
-    data_out->data = result;
-    data_out->len = result_size;
+  result = kzalloc(result_size, GFP_KERNEL);
+  if (!result) {
+    return -ENOMEM;
+  }
+  result->num = read_idx;
+  if (is_grab_nic_access_required) {
+    if (!iwl_trans_grab_nic_access(testmode->trans, &flags)) {
+      kfree(result);
+      return -EBUSY;
+    }
+    iwl_tm_execute_reg_ops(testmode, request, result);
+    iwl_trans_release_nic_access(testmode->trans, &flags);
+  } else {
+    iwl_tm_execute_reg_ops(testmode, request, result);
+  }
 
-    return 0;
+  data_out->data = result;
+  data_out->len = result_size;
+
+  return 0;
 }
 
 static int iwl_tm_get_dev_info(struct iwl_testmode* testmode, struct iwl_tm_data* data_out) {
-    struct iwl_tm_dev_info* dev_info;
-    const uint8_t driver_ver[] = BACKPORTS_GIT_TRACKED;
+  struct iwl_tm_dev_info* dev_info;
+  const uint8_t driver_ver[] = BACKPORTS_GIT_TRACKED;
 
-    dev_info = kzalloc(sizeof(*dev_info) + (strlen(driver_ver) + 1) * sizeof(uint8_t), GFP_KERNEL);
-    if (!dev_info) { return -ENOMEM; }
+  dev_info = kzalloc(sizeof(*dev_info) + (strlen(driver_ver) + 1) * sizeof(uint8_t), GFP_KERNEL);
+  if (!dev_info) {
+    return -ENOMEM;
+  }
 
-    dev_info->dev_id = testmode->trans->hw_id;
-    dev_info->fw_ver = testmode->fw->ucode_ver;
-    dev_info->vendor_id = PCI_VENDOR_ID_INTEL;
-    dev_info->silicon_step = CSR_HW_REV_STEP(testmode->trans->hw_rev);
+  dev_info->dev_id = testmode->trans->hw_id;
+  dev_info->fw_ver = testmode->fw->ucode_ver;
+  dev_info->vendor_id = PCI_VENDOR_ID_INTEL;
+  dev_info->silicon_step = CSR_HW_REV_STEP(testmode->trans->hw_rev);
 
-    /* TODO: Assign real value when feature is implemented */
-    dev_info->build_ver = 0x00;
+  /* TODO: Assign real value when feature is implemented */
+  dev_info->build_ver = 0x00;
 
-    strcpy(dev_info->driver_ver, driver_ver);
+  strcpy(dev_info->driver_ver, driver_ver);
 
-    data_out->data = dev_info;
-    data_out->len = sizeof(*dev_info);
+  data_out->data = dev_info;
+  data_out->len = sizeof(*dev_info);
 
-    return 0;
+  return 0;
 }
 
 static int iwl_tm_indirect_read(struct iwl_testmode* testmode, struct iwl_tm_data* data_in,
                                 struct iwl_tm_data* data_out) {
-    struct iwl_trans* trans = testmode->trans;
-    struct iwl_tm_sram_read_request* cmd_in = data_in->data;
-    uint32_t addr = cmd_in->offset;
-    uint32_t size = cmd_in->length;
-    uint32_t *buf32, size32, i;
-    unsigned long flags;
+  struct iwl_trans* trans = testmode->trans;
+  struct iwl_tm_sram_read_request* cmd_in = data_in->data;
+  uint32_t addr = cmd_in->offset;
+  uint32_t size = cmd_in->length;
+  uint32_t *buf32, size32, i;
+  unsigned long flags;
 
-    if (size & (sizeof(uint32_t) - 1)) { return -EINVAL; }
+  if (size & (sizeof(uint32_t) - 1)) {
+    return -EINVAL;
+  }
 
-    data_out->data = kmalloc(size, GFP_KERNEL);
-    if (!data_out->data) { return -ENOMEM; }
+  data_out->data = kmalloc(size, GFP_KERNEL);
+  if (!data_out->data) {
+    return -ENOMEM;
+  }
 
-    data_out->len = size;
+  data_out->len = size;
 
-    size32 = size / sizeof(uint32_t);
-    buf32 = data_out->data;
+  size32 = size / sizeof(uint32_t);
+  buf32 = data_out->data;
 
-    mutex_lock(testmode->mutex);
+  mutex_lock(testmode->mutex);
 
-    /* Hard-coded periphery absolute address */
-    if (addr >= IWL_ABS_PRPH_START && addr < IWL_ABS_PRPH_START + PRPH_END) {
-        if (!iwl_trans_grab_nic_access(trans, &flags)) {
-            mutex_unlock(testmode->mutex);
-            return -EBUSY;
-        }
-        for (i = 0; i < size32; i++) {
-            buf32[i] = iwl_trans_read_prph(trans, addr + i * sizeof(uint32_t));
-        }
-        iwl_trans_release_nic_access(trans, &flags);
-    } else {
-        /* target memory (SRAM) */
-        iwl_trans_read_mem(trans, addr, buf32, size32);
+  /* Hard-coded periphery absolute address */
+  if (addr >= IWL_ABS_PRPH_START && addr < IWL_ABS_PRPH_START + PRPH_END) {
+    if (!iwl_trans_grab_nic_access(trans, &flags)) {
+      mutex_unlock(testmode->mutex);
+      return -EBUSY;
     }
+    for (i = 0; i < size32; i++) {
+      buf32[i] = iwl_trans_read_prph(trans, addr + i * sizeof(uint32_t));
+    }
+    iwl_trans_release_nic_access(trans, &flags);
+  } else {
+    /* target memory (SRAM) */
+    iwl_trans_read_mem(trans, addr, buf32, size32);
+  }
 
-    mutex_unlock(testmode->mutex);
-    return 0;
+  mutex_unlock(testmode->mutex);
+  return 0;
 }
 
 static int iwl_tm_indirect_write(struct iwl_testmode* testmode, struct iwl_tm_data* data_in) {
-    struct iwl_trans* trans = testmode->trans;
-    struct iwl_tm_sram_write_request* cmd_in = data_in->data;
-    uint32_t addr = cmd_in->offset;
-    uint32_t size = cmd_in->len;
-    uint8_t* buf = cmd_in->buffer;
-    uint32_t *buf32 = (uint32_t*)buf, size32 = size / sizeof(uint32_t);
-    unsigned long flags;
-    uint32_t val, i;
+  struct iwl_trans* trans = testmode->trans;
+  struct iwl_tm_sram_write_request* cmd_in = data_in->data;
+  uint32_t addr = cmd_in->offset;
+  uint32_t size = cmd_in->len;
+  uint8_t* buf = cmd_in->buffer;
+  uint32_t *buf32 = (uint32_t*)buf, size32 = size / sizeof(uint32_t);
+  unsigned long flags;
+  uint32_t val, i;
 
-    mutex_lock(testmode->mutex);
-    if (addr >= IWL_ABS_PRPH_START && addr < IWL_ABS_PRPH_START + PRPH_END) {
-        /* Periphery writes can be 1-3 bytes long, or DWORDs */
-        if (size < 4) {
-            memcpy(&val, buf, size);
-            if (!iwl_trans_grab_nic_access(trans, &flags)) {
-                mutex_unlock(testmode->mutex);
-                return -EBUSY;
-            }
-            iwl_write32(trans, HBUS_TARG_PRPH_WADDR, (addr & 0x000FFFFF) | ((size - 1) << 24));
-            iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
-            iwl_trans_release_nic_access(trans, &flags);
-        } else {
-            if (size % sizeof(uint32_t)) {
-                mutex_unlock(testmode->mutex);
-                return -EINVAL;
-            }
-
-            for (i = 0; i < size32; i++) {
-                iwl_write_prph(trans, addr + i * sizeof(uint32_t), buf32[i]);
-            }
-        }
+  mutex_lock(testmode->mutex);
+  if (addr >= IWL_ABS_PRPH_START && addr < IWL_ABS_PRPH_START + PRPH_END) {
+    /* Periphery writes can be 1-3 bytes long, or DWORDs */
+    if (size < 4) {
+      memcpy(&val, buf, size);
+      if (!iwl_trans_grab_nic_access(trans, &flags)) {
+        mutex_unlock(testmode->mutex);
+        return -EBUSY;
+      }
+      iwl_write32(trans, HBUS_TARG_PRPH_WADDR, (addr & 0x000FFFFF) | ((size - 1) << 24));
+      iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+      iwl_trans_release_nic_access(trans, &flags);
     } else {
-        iwl_trans_write_mem(trans, addr, buf32, size32);
-    }
-    mutex_unlock(testmode->mutex);
+      if (size % sizeof(uint32_t)) {
+        mutex_unlock(testmode->mutex);
+        return -EINVAL;
+      }
 
-    return 0;
+      for (i = 0; i < size32; i++) {
+        iwl_write_prph(trans, addr + i * sizeof(uint32_t), buf32[i]);
+      }
+    }
+  } else {
+    iwl_trans_write_mem(trans, addr, buf32, size32);
+  }
+  mutex_unlock(testmode->mutex);
+
+  return 0;
 }
 
 static int iwl_tm_get_fw_info(struct iwl_testmode* testmode, struct iwl_tm_data* data_out) {
-    struct iwl_tm_get_fw_info* fw_info;
-    uint32_t api_len, capa_len;
-    uint32_t* bitmap;
-    int i;
+  struct iwl_tm_get_fw_info* fw_info;
+  uint32_t api_len, capa_len;
+  uint32_t* bitmap;
+  int i;
 
-    if (!testmode->fw_major_ver || !testmode->fw_minor_ver) { return -EOPNOTSUPP; }
+  if (!testmode->fw_major_ver || !testmode->fw_minor_ver) {
+    return -EOPNOTSUPP;
+  }
 
-    api_len = 4 * DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32);
-    capa_len = 4 * DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32);
+  api_len = 4 * DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32);
+  capa_len = 4 * DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32);
 
-    fw_info = kzalloc(sizeof(*fw_info) + api_len + capa_len, GFP_KERNEL);
-    if (!fw_info) { return -ENOMEM; }
+  fw_info = kzalloc(sizeof(*fw_info) + api_len + capa_len, GFP_KERNEL);
+  if (!fw_info) {
+    return -ENOMEM;
+  }
 
-    fw_info->fw_major_ver = testmode->fw_major_ver;
-    fw_info->fw_minor_ver = testmode->fw_minor_ver;
-    fw_info->fw_capa_api_len = api_len;
-    fw_info->fw_capa_flags = testmode->fw->ucode_capa.flags;
-    fw_info->fw_capa_len = capa_len;
+  fw_info->fw_major_ver = testmode->fw_major_ver;
+  fw_info->fw_minor_ver = testmode->fw_minor_ver;
+  fw_info->fw_capa_api_len = api_len;
+  fw_info->fw_capa_flags = testmode->fw->ucode_capa.flags;
+  fw_info->fw_capa_len = capa_len;
 
-    bitmap = (uint32_t*)fw_info->data;
-    for (i = 0; i < NUM_IWL_UCODE_TLV_API; i++) {
-        if (fw_has_api(&testmode->fw->ucode_capa, (__force iwl_ucode_tlv_api_t)i)) {
-            bitmap[i / 32] |= BIT(i % 32);
-        }
+  bitmap = (uint32_t*)fw_info->data;
+  for (i = 0; i < NUM_IWL_UCODE_TLV_API; i++) {
+    if (fw_has_api(&testmode->fw->ucode_capa, (__force iwl_ucode_tlv_api_t)i)) {
+      bitmap[i / 32] |= BIT(i % 32);
     }
+  }
 
-    bitmap = (uint32_t*)(fw_info->data + api_len);
-    for (i = 0; i < NUM_IWL_UCODE_TLV_CAPA; i++) {
-        if (fw_has_capa(&testmode->fw->ucode_capa, (__force iwl_ucode_tlv_capa_t)i)) {
-            bitmap[i / 32] |= BIT(i % 32);
-        }
+  bitmap = (uint32_t*)(fw_info->data + api_len);
+  for (i = 0; i < NUM_IWL_UCODE_TLV_CAPA; i++) {
+    if (fw_has_capa(&testmode->fw->ucode_capa, (__force iwl_ucode_tlv_capa_t)i)) {
+      bitmap[i / 32] |= BIT(i % 32);
     }
+  }
 
-    data_out->data = fw_info;
-    data_out->len = sizeof(*fw_info) + api_len + capa_len;
+  data_out->data = fw_info;
+  data_out->len = sizeof(*fw_info) + api_len + capa_len;
 
-    return 0;
+  return 0;
 }
 
 /**
@@ -307,77 +329,85 @@
  */
 int iwl_tm_execute_cmd(struct iwl_testmode* testmode, uint32_t cmd, struct iwl_tm_data* data_in,
                        struct iwl_tm_data* data_out) {
-    const struct iwl_test_ops* test_ops;
-    bool cmd_supported = false;
-    int ret;
+  const struct iwl_test_ops* test_ops;
+  bool cmd_supported = false;
+  int ret;
 
-    if (!testmode->trans->op_mode) {
-        IWL_ERR(testmode->trans, "No op_mode!\n");
-        return -ENODEV;
+  if (!testmode->trans->op_mode) {
+    IWL_ERR(testmode->trans, "No op_mode!\n");
+    return -ENODEV;
+  }
+  if (WARN_ON_ONCE(!testmode->op_mode || !data_in)) {
+    return -EINVAL;
+  }
+
+  test_ops = &testmode->trans->op_mode->ops->test_ops;
+
+  if (test_ops->cmd_exec_start) {
+    ret = test_ops->cmd_exec_start(testmode);
+    if (ret) {
+      return ret;
     }
-    if (WARN_ON_ONCE(!testmode->op_mode || !data_in)) { return -EINVAL; }
+  }
 
-    test_ops = &testmode->trans->op_mode->ops->test_ops;
+  if (test_ops->cmd_exec) {
+    ret = test_ops->cmd_exec(testmode, cmd, data_in, data_out, &cmd_supported);
+  }
 
-    if (test_ops->cmd_exec_start) {
-        ret = test_ops->cmd_exec_start(testmode);
-        if (ret) { return ret; }
-    }
+  if (cmd_supported) {
+    goto out;
+  }
 
-    if (test_ops->cmd_exec) {
-        ret = test_ops->cmd_exec(testmode, cmd, data_in, data_out, &cmd_supported);
-    }
-
-    if (cmd_supported) { goto out; }
-
-    switch (cmd) {
+  switch (cmd) {
     case IWL_TM_USER_CMD_HCMD:
-        ret = iwl_tm_send_hcmd(testmode, data_in, data_out);
-        break;
+      ret = iwl_tm_send_hcmd(testmode, data_in, data_out);
+      break;
     case IWL_TM_USER_CMD_REG_ACCESS:
-        ret = iwl_tm_reg_ops(testmode, data_in, data_out);
-        break;
+      ret = iwl_tm_reg_ops(testmode, data_in, data_out);
+      break;
     case IWL_TM_USER_CMD_SRAM_WRITE:
-        ret = iwl_tm_indirect_write(testmode, data_in);
-        break;
+      ret = iwl_tm_indirect_write(testmode, data_in);
+      break;
     case IWL_TM_USER_CMD_SRAM_READ:
-        ret = iwl_tm_indirect_read(testmode, data_in, data_out);
-        break;
+      ret = iwl_tm_indirect_read(testmode, data_in, data_out);
+      break;
     case IWL_TM_USER_CMD_GET_DEVICE_INFO:
-        ret = iwl_tm_get_dev_info(testmode, data_out);
-        break;
+      ret = iwl_tm_get_dev_info(testmode, data_out);
+      break;
     case IWL_TM_USER_CMD_GET_FW_INFO:
-        ret = iwl_tm_get_fw_info(testmode, data_out);
-        break;
+      ret = iwl_tm_get_fw_info(testmode, data_out);
+      break;
     default:
-        ret = -EOPNOTSUPP;
-        break;
-    }
+      ret = -EOPNOTSUPP;
+      break;
+  }
 
 out:
-    if (test_ops->cmd_exec_end) { test_ops->cmd_exec_end(testmode); }
-    return ret;
+  if (test_ops->cmd_exec_end) {
+    test_ops->cmd_exec_end(testmode);
+  }
+  return ret;
 }
 
 void iwl_tm_init(struct iwl_trans* trans, const struct iwl_fw* fw, struct mutex* mutex,
                  void* op_mode) {
-    struct iwl_testmode* testmode = &trans->testmode;
+  struct iwl_testmode* testmode = &trans->testmode;
 
-    testmode->trans = trans;
-    testmode->fw = fw;
-    testmode->mutex = mutex;
-    testmode->op_mode = op_mode;
+  testmode->trans = trans;
+  testmode->fw = fw;
+  testmode->mutex = mutex;
+  testmode->op_mode = op_mode;
 
-    if (trans->op_mode->ops->test_ops.send_hcmd) {
-        testmode->send_hcmd = trans->op_mode->ops->test_ops.send_hcmd;
-    }
+  if (trans->op_mode->ops->test_ops.send_hcmd) {
+    testmode->send_hcmd = trans->op_mode->ops->test_ops.send_hcmd;
+  }
 }
 IWL_EXPORT_SYMBOL(iwl_tm_init);
 
 void iwl_tm_set_fw_ver(struct iwl_trans* trans, uint32_t fw_major_ver, uint32_t fw_minor_var) {
-    struct iwl_testmode* testmode = &trans->testmode;
+  struct iwl_testmode* testmode = &trans->testmode;
 
-    testmode->fw_major_ver = fw_major_ver;
-    testmode->fw_minor_ver = fw_minor_var;
+  testmode->fw_major_ver = fw_major_ver;
+  testmode->fw_minor_ver = fw_minor_var;
 }
 IWL_EXPORT_SYMBOL(iwl_tm_set_fw_ver);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/testmode.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/testmode.h
index 7d980d1..ab5ff11 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/testmode.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/testmode.h
@@ -48,14 +48,14 @@
  * @IWL_TM_ATTR_MAX: max amount of attributes
  */
 enum iwl_testmode_attrs {
-    IWL_TM_ATTR_UNSPEC,
-    IWL_TM_ATTR_CMD,
-    IWL_TM_ATTR_NOA_DURATION,
-    IWL_TM_ATTR_BEACON_FILTER_STATE,
+  IWL_TM_ATTR_UNSPEC,
+  IWL_TM_ATTR_CMD,
+  IWL_TM_ATTR_NOA_DURATION,
+  IWL_TM_ATTR_BEACON_FILTER_STATE,
 
-    /* keep last */
-    NUM_IWL_TM_ATTRS,
-    IWL_TM_ATTR_MAX = NUM_IWL_TM_ATTRS - 1,
+  /* keep last */
+  NUM_IWL_TM_ATTRS,
+  IWL_TM_ATTR_MAX = NUM_IWL_TM_ATTRS - 1,
 };
 
 /**
@@ -64,8 +64,8 @@
  * @IWL_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
  */
 enum iwl_testmode_commands {
-    IWL_TM_CMD_SET_NOA,
-    IWL_TM_CMD_SET_BEACON_FILTER,
+  IWL_TM_CMD_SET_NOA,
+  IWL_TM_CMD_SET_BEACON_FILTER,
 };
 #endif
 
@@ -74,14 +74,14 @@
 struct iwl_rx_cmd_buffer;
 
 struct iwl_testmode {
-    struct iwl_trans* trans;
-    const struct iwl_fw* fw;
-    /* the mutex of the op_mode */
-    struct mutex* mutex;
-    void* op_mode;
-    int (*send_hcmd)(void* op_mode, struct iwl_host_cmd* host_cmd);
-    uint32_t fw_major_ver;
-    uint32_t fw_minor_ver;
+  struct iwl_trans* trans;
+  const struct iwl_fw* fw;
+  /* the mutex of the op_mode */
+  struct mutex* mutex;
+  void* op_mode;
+  int (*send_hcmd)(void* op_mode, struct iwl_host_cmd* host_cmd);
+  uint32_t fw_major_ver;
+  uint32_t fw_minor_ver;
 };
 
 /**
@@ -94,8 +94,8 @@
  * between internal testmode interfaces
  */
 struct iwl_tm_data {
-    void* data;
-    uint32_t len;
+  void* data;
+  uint32_t len;
 };
 
 void iwl_tm_init(struct iwl_trans* trans, const struct iwl_fw* fw, struct mutex* mutex,
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/ieee80211.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/ieee80211.h
index d12422b..ffad848 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/ieee80211.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/ieee80211.h
@@ -192,8 +192,8 @@
   void* drv_priv;
 };
 
-static inline struct ieee80211_hw* ieee80211_alloc_hw(
-    size_t priv_data_len, const struct ieee80211_ops* ops) {
+static inline struct ieee80211_hw* ieee80211_alloc_hw(size_t priv_data_len,
+                                                      const struct ieee80211_ops* ops) {
   return NULL;  // NEEDS_PORTING
 }
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-config.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-config.h
index b35f4d2..aa07423 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-config.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-config.h
@@ -41,26 +41,26 @@
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h"
 
 enum iwl_device_family {
-    IWL_DEVICE_FAMILY_UNDEFINED,
-    IWL_DEVICE_FAMILY_1000,
-    IWL_DEVICE_FAMILY_100,
-    IWL_DEVICE_FAMILY_2000,
-    IWL_DEVICE_FAMILY_2030,
-    IWL_DEVICE_FAMILY_105,
-    IWL_DEVICE_FAMILY_135,
-    IWL_DEVICE_FAMILY_5000,
-    IWL_DEVICE_FAMILY_5150,
-    IWL_DEVICE_FAMILY_6000,
-    IWL_DEVICE_FAMILY_6000i,
-    IWL_DEVICE_FAMILY_6005,
-    IWL_DEVICE_FAMILY_6030,
-    IWL_DEVICE_FAMILY_6050,
-    IWL_DEVICE_FAMILY_6150,
-    IWL_DEVICE_FAMILY_7000,
-    IWL_DEVICE_FAMILY_8000,
-    IWL_DEVICE_FAMILY_9000,
-    IWL_DEVICE_FAMILY_22000,
-    IWL_DEVICE_FAMILY_22560,
+  IWL_DEVICE_FAMILY_UNDEFINED,
+  IWL_DEVICE_FAMILY_1000,
+  IWL_DEVICE_FAMILY_100,
+  IWL_DEVICE_FAMILY_2000,
+  IWL_DEVICE_FAMILY_2030,
+  IWL_DEVICE_FAMILY_105,
+  IWL_DEVICE_FAMILY_135,
+  IWL_DEVICE_FAMILY_5000,
+  IWL_DEVICE_FAMILY_5150,
+  IWL_DEVICE_FAMILY_6000,
+  IWL_DEVICE_FAMILY_6000i,
+  IWL_DEVICE_FAMILY_6005,
+  IWL_DEVICE_FAMILY_6030,
+  IWL_DEVICE_FAMILY_6050,
+  IWL_DEVICE_FAMILY_6150,
+  IWL_DEVICE_FAMILY_7000,
+  IWL_DEVICE_FAMILY_8000,
+  IWL_DEVICE_FAMILY_9000,
+  IWL_DEVICE_FAMILY_22000,
+  IWL_DEVICE_FAMILY_22560,
 };
 
 /*
@@ -73,10 +73,10 @@
  *    IWL_LED_DISABLE:  led disabled
  */
 enum iwl_led_mode {
-    IWL_LED_DEFAULT,
-    IWL_LED_RF_STATE,
-    IWL_LED_BLINK,
-    IWL_LED_DISABLE,
+  IWL_LED_DEFAULT,
+  IWL_LED_RF_STATE,
+  IWL_LED_BLINK,
+  IWL_LED_DISABLE,
 };
 
 /**
@@ -86,9 +86,9 @@
  * @IWL_NVM_SDP: NVM format used by 3168 series
  */
 enum iwl_nvm_type {
-    IWL_NVM,
-    IWL_NVM_EXT,
-    IWL_NVM_SDP,
+  IWL_NVM,
+  IWL_NVM_EXT,
+  IWL_NVM_SDP,
 };
 
 /*
@@ -124,7 +124,7 @@
 #define MAX_ANT_NUM 3
 
 static inline uint8_t num_of_ant(uint8_t mask) {
-    return !!((mask)&ANT_A) + !!((mask)&ANT_B) + !!((mask)&ANT_C);
+  return !!((mask)&ANT_A) + !!((mask)&ANT_B) + !!((mask)&ANT_C);
 }
 
 /*
@@ -142,20 +142,20 @@
  * @max_tfd_queue_size: max number of entries in tfd queue.
  */
 struct iwl_base_params {
-    unsigned int wd_timeout;
+  unsigned int wd_timeout;
 
-    uint16_t eeprom_size;
-    uint16_t max_event_log_size;
+  uint16_t eeprom_size;
+  uint16_t max_event_log_size;
 
-    uint8_t pll_cfg : 1, /* for iwl_pcie_apm_init() */
-        shadow_ram_support : 1, shadow_reg_enable : 1, pcie_l1_allowed : 1, apmg_wake_up_wa : 1,
-        scd_chain_ext_wa : 1;
+  uint8_t pll_cfg : 1, /* for iwl_pcie_apm_init() */
+      shadow_ram_support : 1, shadow_reg_enable : 1, pcie_l1_allowed : 1, apmg_wake_up_wa : 1,
+      scd_chain_ext_wa : 1;
 
-    uint16_t num_of_queues;      /* def: HW dependent */
-    uint32_t max_tfd_queue_size; /* def: HW dependent */
+  uint16_t num_of_queues;      /* def: HW dependent */
+  uint32_t max_tfd_queue_size; /* def: HW dependent */
 
-    uint8_t max_ll_items;
-    uint8_t led_compensation;
+  uint8_t max_ll_items;
+  uint8_t led_compensation;
 };
 
 /*
@@ -165,8 +165,8 @@
  * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40
  */
 struct iwl_ht_params {
-    uint8_t ht_greenfield_support : 1, stbc : 1, ldpc : 1, use_rts_for_aggregation : 1;
-    uint8_t ht40_bands;
+  uint8_t ht_greenfield_support : 1, stbc : 1, ldpc : 1, use_rts_for_aggregation : 1;
+  uint8_t ht40_bands;
 };
 
 /*
@@ -175,8 +175,8 @@
  * @backoff: The tx-backoff in uSec
  */
 struct iwl_tt_tx_backoff {
-    int32_t temperature;
-    uint32_t backoff;
+  int32_t temperature;
+  uint32_t backoff;
 };
 
 #define TT_TX_BACKOFF_SIZE 6
@@ -198,16 +198,16 @@
  * @support_tx_backoff: Support tx-backoff?
  */
 struct iwl_tt_params {
-    uint32_t ct_kill_entry;
-    uint32_t ct_kill_exit;
-    uint32_t ct_kill_duration;
-    uint32_t dynamic_smps_entry;
-    uint32_t dynamic_smps_exit;
-    uint32_t tx_protection_entry;
-    uint32_t tx_protection_exit;
-    struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
-    uint8_t support_ct_kill : 1, support_dynamic_smps : 1, support_tx_protection : 1,
-        support_tx_backoff : 1;
+  uint32_t ct_kill_entry;
+  uint32_t ct_kill_exit;
+  uint32_t ct_kill_duration;
+  uint32_t dynamic_smps_entry;
+  uint32_t dynamic_smps_exit;
+  uint32_t tx_protection_entry;
+  uint32_t tx_protection_exit;
+  struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
+  uint8_t support_ct_kill : 1, support_dynamic_smps : 1, support_tx_protection : 1,
+      support_tx_backoff : 1;
 };
 
 /*
@@ -229,8 +229,8 @@
 #define OTP_LOW_IMAGE_SIZE_32K (32 * 512 * sizeof(uint16_t)) /* 32 KB */
 
 struct iwl_eeprom_params {
-    const uint8_t regulatory_bands[7];
-    bool enhanced_txpower;
+  const uint8_t regulatory_bands[7];
+  bool enhanced_txpower;
 };
 
 /* Tx-backoff power threshold
@@ -238,8 +238,8 @@
  * @backoff: The tx-backoff in uSec
  */
 struct iwl_pwr_tx_backoff {
-    uint32_t pwr;
-    uint32_t backoff;
+  uint32_t pwr;
+  uint32_t backoff;
 };
 
 /**
@@ -274,18 +274,18 @@
  * @mac_addr1_strap: second part of MAC address from strap
  */
 struct iwl_csr_params {
-    uint8_t flag_sw_reset;
-    uint8_t flag_mac_clock_ready;
-    uint8_t flag_init_done;
-    uint8_t flag_mac_access_req;
-    uint8_t flag_val_mac_access_en;
-    uint8_t flag_master_dis;
-    uint8_t flag_stop_master;
-    uint8_t addr_sw_reset;
-    uint32_t mac_addr0_otp;
-    uint32_t mac_addr1_otp;
-    uint32_t mac_addr0_strap;
-    uint32_t mac_addr1_strap;
+  uint8_t flag_sw_reset;
+  uint8_t flag_mac_clock_ready;
+  uint8_t flag_init_done;
+  uint8_t flag_mac_access_req;
+  uint8_t flag_val_mac_access_en;
+  uint8_t flag_master_dis;
+  uint8_t flag_stop_master;
+  uint8_t addr_sw_reset;
+  uint32_t mac_addr0_otp;
+  uint32_t mac_addr1_otp;
+  uint32_t mac_addr0_strap;
+  uint32_t mac_addr1_strap;
 };
 
 /**
@@ -344,52 +344,52 @@
  * and/or the uCode API version instead.
  */
 struct iwl_cfg {
-    /* params specific to an individual device within a device family */
-    const char* name;
-    const char* fw_name_pre;
-    /* params not likely to change within a device family */
-    const struct iwl_base_params* base_params;
-    /* params likely to change within a device family */
-    const struct iwl_ht_params* ht_params;
-    const struct iwl_eeprom_params* eeprom_params;
-    const struct iwl_pwr_tx_backoff* pwr_tx_backoffs;
-    const char* default_nvm_file_C_step;
-    const struct iwl_tt_params* thermal_params;
-    const struct iwl_csr_params* csr;
-    enum iwl_device_family device_family;
-    enum iwl_led_mode led_mode;
-    enum iwl_nvm_type nvm_type;
-    uint32_t max_data_size;
-    uint32_t max_inst_size;
-    netdev_features_t features;
-    uint32_t dccm_offset;
-    uint32_t dccm_len;
-    uint32_t dccm2_offset;
-    uint32_t dccm2_len;
-    uint32_t smem_offset;
-    uint32_t smem_len;
-    uint32_t soc_latency;
-    uint16_t nvm_ver;
-    uint16_t nvm_calib_ver;
-    uint32_t rx_with_siso_diversity : 1, bt_shared_single_ant : 1, internal_wimax_coex : 1,
-        host_interrupt_operation_mode : 1, high_temp : 1, mac_addr_from_csr : 1,
-        lp_xtal_workaround : 1, disable_dummy_notification : 1, apmg_not_supported : 1,
-        mq_rx_supported : 1, vht_mu_mimo_supported : 1, rf_id : 1, integrated : 1, use_tfh : 1,
-        gen2 : 1, cdb : 1, dbgc_supported : 1;
-    uint8_t valid_tx_ant;
-    uint8_t valid_rx_ant;
-    uint8_t non_shared_ant;
-    uint8_t nvm_hw_section_num;
-    uint8_t max_rx_agg_size;
-    uint8_t max_tx_agg_size;
-    uint8_t max_ht_ampdu_exponent;
-    uint8_t max_vht_ampdu_exponent;
-    uint8_t ucode_api_max;
-    uint8_t ucode_api_min;
-    uint32_t min_umac_error_event_table;
-    uint32_t extra_phy_cfg_flags;
-    uint32_t d3_debug_data_base_addr;
-    uint32_t d3_debug_data_length;
+  /* params specific to an individual device within a device family */
+  const char* name;
+  const char* fw_name_pre;
+  /* params not likely to change within a device family */
+  const struct iwl_base_params* base_params;
+  /* params likely to change within a device family */
+  const struct iwl_ht_params* ht_params;
+  const struct iwl_eeprom_params* eeprom_params;
+  const struct iwl_pwr_tx_backoff* pwr_tx_backoffs;
+  const char* default_nvm_file_C_step;
+  const struct iwl_tt_params* thermal_params;
+  const struct iwl_csr_params* csr;
+  enum iwl_device_family device_family;
+  enum iwl_led_mode led_mode;
+  enum iwl_nvm_type nvm_type;
+  uint32_t max_data_size;
+  uint32_t max_inst_size;
+  netdev_features_t features;
+  uint32_t dccm_offset;
+  uint32_t dccm_len;
+  uint32_t dccm2_offset;
+  uint32_t dccm2_len;
+  uint32_t smem_offset;
+  uint32_t smem_len;
+  uint32_t soc_latency;
+  uint16_t nvm_ver;
+  uint16_t nvm_calib_ver;
+  uint32_t rx_with_siso_diversity : 1, bt_shared_single_ant : 1, internal_wimax_coex : 1,
+      host_interrupt_operation_mode : 1, high_temp : 1, mac_addr_from_csr : 1,
+      lp_xtal_workaround : 1, disable_dummy_notification : 1, apmg_not_supported : 1,
+      mq_rx_supported : 1, vht_mu_mimo_supported : 1, rf_id : 1, integrated : 1, use_tfh : 1,
+      gen2 : 1, cdb : 1, dbgc_supported : 1;
+  uint8_t valid_tx_ant;
+  uint8_t valid_rx_ant;
+  uint8_t non_shared_ant;
+  uint8_t nvm_hw_section_num;
+  uint8_t max_rx_agg_size;
+  uint8_t max_tx_agg_size;
+  uint8_t max_ht_ampdu_exponent;
+  uint8_t max_vht_ampdu_exponent;
+  uint8_t ucode_api_max;
+  uint8_t ucode_api_min;
+  uint32_t min_umac_error_event_table;
+  uint32_t extra_phy_cfg_flags;
+  uint32_t d3_debug_data_base_addr;
+  uint32_t d3_debug_data_length;
 };
 
 static const struct iwl_csr_params iwl_csr_v1 = {.flag_mac_clock_ready = 0,
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-constants.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-constants.h
index 4132792..7801770 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-constants.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-constants.h
@@ -37,8 +37,8 @@
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h"
 
 enum {
-    IWL_D0I3_DBG_KEEP_BUS = BIT(0),
-    IWL_D0I3_DBG_KEEP_WAKE_LOCK = BIT(1),
+  IWL_D0I3_DBG_KEEP_BUS = BIT(0),
+  IWL_D0I3_DBG_KEEP_WAKE_LOCK = BIT(1),
 };
 
 #ifndef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-context-info-gen3.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-context-info-gen3.h
index aac8cc3..edea4d5 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-context-info-gen3.h
@@ -54,10 +54,10 @@
  * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
  */
 enum iwl_prph_scratch_mtr_format {
-    IWL_PRPH_MTR_FORMAT_16B = 0x0,
-    IWL_PRPH_MTR_FORMAT_32B = 0x40000,
-    IWL_PRPH_MTR_FORMAT_64B = 0x80000,
-    IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
+  IWL_PRPH_MTR_FORMAT_16B = 0x0,
+  IWL_PRPH_MTR_FORMAT_32B = 0x40000,
+  IWL_PRPH_MTR_FORMAT_64B = 0x80000,
+  IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
 };
 
 /**
@@ -77,14 +77,14 @@
  *  3: 256 bit.
  */
 enum iwl_prph_scratch_flags {
-    IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
-    IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
-    IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
-    IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10),
-    IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11),
-    IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
-    IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
-    IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
+  IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
+  IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
+  IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
+  IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10),
+  IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11),
+  IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
+  IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
+  IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
 };
 
 /*
@@ -95,10 +95,10 @@
  * @reserved: reserved
  */
 struct iwl_prph_scratch_version {
-    __le16 mac_id;
-    __le16 version;
-    __le16 size;
-    __le16 reserved;
+  __le16 mac_id;
+  __le16 version;
+  __le16 size;
+  __le16 reserved;
 } __packed; /* PERIPH_SCRATCH_VERSION_S */
 
 /*
@@ -107,8 +107,8 @@
  * @reserved: reserved
  */
 struct iwl_prph_scratch_control {
-    __le32 control_flags;
-    __le32 reserved;
+  __le32 control_flags;
+  __le32 reserved;
 } __packed; /* PERIPH_SCRATCH_CONTROL_S */
 
 /*
@@ -118,9 +118,9 @@
  * @reserved: reserved
  */
 struct iwl_prph_scratch_ror_cfg {
-    __le64 ror_base_addr;
-    __le32 ror_size;
-    __le32 reserved;
+  __le64 ror_base_addr;
+  __le32 ror_size;
+  __le32 reserved;
 } __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
 
 /*
@@ -130,9 +130,9 @@
  * @reserved: reserved
  */
 struct iwl_prph_scratch_hwm_cfg {
-    __le64 hwm_base_addr;
-    __le32 hwm_size;
-    __le32 reserved;
+  __le64 hwm_base_addr;
+  __le32 hwm_size;
+  __le32 reserved;
 } __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
 
 /*
@@ -141,8 +141,8 @@
  * @reserved: reserved
  */
 struct iwl_prph_scratch_rbd_cfg {
-    __le64 free_rbd_addr;
-    __le32 reserved;
+  __le64 free_rbd_addr;
+  __le32 reserved;
 } __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
 
 /*
@@ -154,11 +154,11 @@
  * @rbd_cfg: default RX queue configuration
  */
 struct iwl_prph_scratch_ctrl_cfg {
-    struct iwl_prph_scratch_version version;
-    struct iwl_prph_scratch_control control;
-    struct iwl_prph_scratch_ror_cfg ror_cfg;
-    struct iwl_prph_scratch_hwm_cfg hwm_cfg;
-    struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+  struct iwl_prph_scratch_version version;
+  struct iwl_prph_scratch_control control;
+  struct iwl_prph_scratch_ror_cfg ror_cfg;
+  struct iwl_prph_scratch_hwm_cfg hwm_cfg;
+  struct iwl_prph_scratch_rbd_cfg rbd_cfg;
 } __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
 
 /*
@@ -168,9 +168,9 @@
  * @reserved: reserved
  */
 struct iwl_prph_scratch {
-    struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
-    __le32 reserved[16];
-    struct iwl_context_info_dram dram;
+  struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
+  __le32 reserved[16];
+  struct iwl_context_info_dram dram;
 } __packed; /* PERIPH_SCRATCH_S */
 
 /*
@@ -181,10 +181,10 @@
  * @reserved: reserved
  */
 struct iwl_prph_info {
-    __le32 boot_stage_mirror;
-    __le32 ipc_status_mirror;
-    __le32 sleep_notif;
-    __le32 reserved;
+  __le32 boot_stage_mirror;
+  __le32 ipc_status_mirror;
+  __le32 sleep_notif;
+  __le32 reserved;
 } __packed; /* PERIPH_INFO_S */
 
 /*
@@ -232,33 +232,33 @@
  * @reserved: reserved
  */
 struct iwl_context_info_gen3 {
-    __le16 version;
-    __le16 size;
-    __le32 config;
-    __le64 prph_info_base_addr;
-    __le64 cr_head_idx_arr_base_addr;
-    __le64 tr_tail_idx_arr_base_addr;
-    __le64 cr_tail_idx_arr_base_addr;
-    __le64 tr_head_idx_arr_base_addr;
-    __le16 cr_idx_arr_size;
-    __le16 tr_idx_arr_size;
-    __le64 mtr_base_addr;
-    __le64 mcr_base_addr;
-    __le16 mtr_size;
-    __le16 mcr_size;
-    __le16 mtr_doorbell_vec;
-    __le16 mcr_doorbell_vec;
-    __le16 mtr_msi_vec;
-    __le16 mcr_msi_vec;
-    uint8_t mtr_opt_header_size;
-    uint8_t mtr_opt_footer_size;
-    uint8_t mcr_opt_header_size;
-    uint8_t mcr_opt_footer_size;
-    __le16 msg_rings_ctrl_flags;
-    __le16 prph_info_msi_vec;
-    __le64 prph_scratch_base_addr;
-    __le32 prph_scratch_size;
-    __le32 reserved;
+  __le16 version;
+  __le16 size;
+  __le32 config;
+  __le64 prph_info_base_addr;
+  __le64 cr_head_idx_arr_base_addr;
+  __le64 tr_tail_idx_arr_base_addr;
+  __le64 cr_tail_idx_arr_base_addr;
+  __le64 tr_head_idx_arr_base_addr;
+  __le16 cr_idx_arr_size;
+  __le16 tr_idx_arr_size;
+  __le64 mtr_base_addr;
+  __le64 mcr_base_addr;
+  __le16 mtr_size;
+  __le16 mcr_size;
+  __le16 mtr_doorbell_vec;
+  __le16 mcr_doorbell_vec;
+  __le16 mtr_msi_vec;
+  __le16 mcr_msi_vec;
+  uint8_t mtr_opt_header_size;
+  uint8_t mtr_opt_footer_size;
+  uint8_t mcr_opt_header_size;
+  uint8_t mcr_opt_footer_size;
+  __le16 msg_rings_ctrl_flags;
+  __le16 prph_info_msi_vec;
+  __le64 prph_scratch_base_addr;
+  __le32 prph_scratch_size;
+  __le32 reserved;
 } __packed; /* IPC_CONTEXT_INFO_S */
 
 int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans* trans, const struct fw_img* fw);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-context-info.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-context-info.h
index 9766c5d..f1348d3 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-context-info.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-context-info.h
@@ -64,22 +64,22 @@
  * @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
  */
 enum iwl_context_info_flags {
-    IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
-    IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
-    IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
-    IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
-    IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
-    IWL_CTXT_INFO_RB_SIZE_POS = 9,
-    IWL_CTXT_INFO_RB_SIZE_1K = 0x1,
-    IWL_CTXT_INFO_RB_SIZE_2K = 0x2,
-    IWL_CTXT_INFO_RB_SIZE_4K = 0x4,
-    IWL_CTXT_INFO_RB_SIZE_8K = 0x8,
-    IWL_CTXT_INFO_RB_SIZE_12K = 0x9,
-    IWL_CTXT_INFO_RB_SIZE_16K = 0xa,
-    IWL_CTXT_INFO_RB_SIZE_20K = 0xb,
-    IWL_CTXT_INFO_RB_SIZE_24K = 0xc,
-    IWL_CTXT_INFO_RB_SIZE_28K = 0xd,
-    IWL_CTXT_INFO_RB_SIZE_32K = 0xe,
+  IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
+  IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
+  IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
+  IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
+  IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
+  IWL_CTXT_INFO_RB_SIZE_POS = 9,
+  IWL_CTXT_INFO_RB_SIZE_1K = 0x1,
+  IWL_CTXT_INFO_RB_SIZE_2K = 0x2,
+  IWL_CTXT_INFO_RB_SIZE_4K = 0x4,
+  IWL_CTXT_INFO_RB_SIZE_8K = 0x8,
+  IWL_CTXT_INFO_RB_SIZE_12K = 0x9,
+  IWL_CTXT_INFO_RB_SIZE_16K = 0xa,
+  IWL_CTXT_INFO_RB_SIZE_20K = 0xb,
+  IWL_CTXT_INFO_RB_SIZE_24K = 0xc,
+  IWL_CTXT_INFO_RB_SIZE_28K = 0xd,
+  IWL_CTXT_INFO_RB_SIZE_32K = 0xe,
 };
 
 /*
@@ -89,10 +89,10 @@
  * @size: the size of the context information in DWs
  */
 struct iwl_context_info_version {
-    __le16 mac_id;
-    __le16 version;
-    __le16 size;
-    __le16 reserved;
+  __le16 mac_id;
+  __le16 version;
+  __le16 size;
+  __le16 reserved;
 } __packed;
 
 /*
@@ -100,8 +100,8 @@
  * @control_flags: context information flags see &enum iwl_context_info_flags
  */
 struct iwl_context_info_control {
-    __le32 control_flags;
-    __le32 reserved;
+  __le32 control_flags;
+  __le32 reserved;
 } __packed;
 
 /*
@@ -112,9 +112,9 @@
  * @virtual_img: paged image DRAM map
  */
 struct iwl_context_info_dram {
-    __le64 umac_img[IWL_MAX_DRAM_ENTRY];
-    __le64 lmac_img[IWL_MAX_DRAM_ENTRY];
-    __le64 virtual_img[IWL_MAX_DRAM_ENTRY];
+  __le64 umac_img[IWL_MAX_DRAM_ENTRY];
+  __le64 lmac_img[IWL_MAX_DRAM_ENTRY];
+  __le64 virtual_img[IWL_MAX_DRAM_ENTRY];
 } __packed;
 
 /*
@@ -124,9 +124,9 @@
  * @status_wr_ptr: default queue used RB status write pointer
  */
 struct iwl_context_info_rbd_cfg {
-    __le64 free_rbd_addr;
-    __le64 used_rbd_addr;
-    __le64 status_wr_ptr;
+  __le64 free_rbd_addr;
+  __le64 used_rbd_addr;
+  __le64 status_wr_ptr;
 } __packed;
 
 /*
@@ -135,9 +135,9 @@
  * @cmd_queue_size: number of entries
  */
 struct iwl_context_info_hcmd_cfg {
-    __le64 cmd_queue_addr;
-    uint8_t cmd_queue_size;
-    uint8_t reserved[7];
+  __le64 cmd_queue_addr;
+  uint8_t cmd_queue_size;
+  uint8_t reserved[7];
 } __packed;
 
 /*
@@ -146,9 +146,9 @@
  * @core_dump_size: size, in DWs
  */
 struct iwl_context_info_dump_cfg {
-    __le64 core_dump_addr;
-    __le32 core_dump_size;
-    __le32 reserved;
+  __le64 core_dump_addr;
+  __le32 core_dump_size;
+  __le32 reserved;
 } __packed;
 
 /*
@@ -157,9 +157,9 @@
  * @platform_nvm_size: size in DWs
  */
 struct iwl_context_info_pnvm_cfg {
-    __le64 platform_nvm_addr;
-    __le32 platform_nvm_size;
-    __le32 reserved;
+  __le64 platform_nvm_addr;
+  __le32 platform_nvm_size;
+  __le32 reserved;
 } __packed;
 
 /*
@@ -169,9 +169,9 @@
  * @early_debug_size: size in DWs
  */
 struct iwl_context_info_early_dbg_cfg {
-    __le64 early_debug_addr;
-    __le32 early_debug_size;
-    __le32 reserved;
+  __le64 early_debug_addr;
+  __le32 early_debug_size;
+  __le32 reserved;
 } __packed;
 
 /*
@@ -186,18 +186,18 @@
  * @dram: firmware image addresses in DRAM
  */
 struct iwl_context_info {
-    struct iwl_context_info_version version;
-    struct iwl_context_info_control control;
-    __le64 reserved0;
-    struct iwl_context_info_rbd_cfg rbd_cfg;
-    struct iwl_context_info_hcmd_cfg hcmd_cfg;
-    __le32 reserved1[4];
-    struct iwl_context_info_dump_cfg dump_cfg;
-    struct iwl_context_info_early_dbg_cfg edbg_cfg;
-    struct iwl_context_info_pnvm_cfg pnvm_cfg;
-    __le32 reserved2[16];
-    struct iwl_context_info_dram dram;
-    __le32 reserved3[16];
+  struct iwl_context_info_version version;
+  struct iwl_context_info_control control;
+  __le64 reserved0;
+  struct iwl_context_info_rbd_cfg rbd_cfg;
+  struct iwl_context_info_hcmd_cfg hcmd_cfg;
+  __le32 reserved1[4];
+  struct iwl_context_info_dump_cfg dump_cfg;
+  struct iwl_context_info_early_dbg_cfg edbg_cfg;
+  struct iwl_context_info_pnvm_cfg pnvm_cfg;
+  __le32 reserved2[16];
+  struct iwl_context_info_dram dram;
+  __le32 reserved3[16];
 } __packed;
 
 int iwl_pcie_ctxt_info_init(struct iwl_trans* trans, const struct fw_img* fw);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h
index a03b453..0897995 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h
@@ -197,10 +197,10 @@
 #define CSR_INT_BIT_WAKEUP (1 << 1)       /* NIC controller waking up (pwr mgmt) */
 #define CSR_INT_BIT_ALIVE (1 << 0)        /* uCode interrupts once it initializes */
 
-#define CSR_INI_SET_MASK                                                                \
-    (CSR_INT_BIT_FH_RX | CSR_INT_BIT_HW_ERR | CSR_INT_BIT_FH_TX | CSR_INT_BIT_SW_ERR |  \
-     CSR_INT_BIT_RF_KILL | CSR_INT_BIT_SW_RX | CSR_INT_BIT_WAKEUP | CSR_INT_BIT_ALIVE | \
-     CSR_INT_BIT_RX_PERIODIC)
+#define CSR_INI_SET_MASK                                                              \
+  (CSR_INT_BIT_FH_RX | CSR_INT_BIT_HW_ERR | CSR_INT_BIT_FH_TX | CSR_INT_BIT_SW_ERR |  \
+   CSR_INT_BIT_RF_KILL | CSR_INT_BIT_SW_RX | CSR_INT_BIT_WAKEUP | CSR_INT_BIT_ALIVE | \
+   CSR_INT_BIT_RX_PERIODIC)
 
 /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
 #define CSR_FH_INT_BIT_ERR (1 << 31)      /* Error */
@@ -211,7 +211,7 @@
 #define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0)  /* Tx channel 0 */
 
 #define CSR_FH_INT_RX_MASK \
-    (CSR_FH_INT_BIT_HI_PRIOR | CSR_FH_INT_BIT_RX_CHNL1 | CSR_FH_INT_BIT_RX_CHNL0)
+  (CSR_FH_INT_BIT_HI_PRIOR | CSR_FH_INT_BIT_RX_CHNL1 | CSR_FH_INT_BIT_RX_CHNL0)
 
 #define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | CSR_FH_INT_BIT_TX_CHNL0)
 
@@ -268,9 +268,9 @@
  *  hw_rev values
  */
 enum {
-    SILICON_A_STEP = 0,
-    SILICON_B_STEP,
-    SILICON_C_STEP,
+  SILICON_A_STEP = 0,
+  SILICON_B_STEP,
+  SILICON_C_STEP,
 };
 
 #define CSR_HW_REV_TYPE_MSK (0x000FFF0)
@@ -510,17 +510,17 @@
 
 /* Diode Results Register Structure: */
 enum dtd_diode_reg {
-    DTS_DIODE_REG_DIG_VAL = 0x000000FF,   /* bits [7:0] */
-    DTS_DIODE_REG_VREF_LOW = 0x0000FF00,  /* bits [15:8] */
-    DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */
-    DTS_DIODE_REG_VREF_ID = 0x03000000,   /* bits [25:24] */
-    DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */
-    DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */
-    /* Those are the masks INSIDE the flags bit-field: */
-    DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0,
-    DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */
-    DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7,
-    DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */
+  DTS_DIODE_REG_DIG_VAL = 0x000000FF,   /* bits [7:0] */
+  DTS_DIODE_REG_VREF_LOW = 0x0000FF00,  /* bits [15:8] */
+  DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */
+  DTS_DIODE_REG_VREF_ID = 0x03000000,   /* bits [25:24] */
+  DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */
+  DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */
+  /* Those are the masks INSIDE the flags bit-field: */
+  DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0,
+  DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */
+  DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7,
+  DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */
 };
 
 /*****************************************************************************
@@ -545,30 +545,30 @@
  * Causes for the FH register interrupts
  */
 enum msix_fh_int_causes {
-    MSIX_FH_INT_CAUSES_Q0 = BIT(0),
-    MSIX_FH_INT_CAUSES_Q1 = BIT(1),
-    MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16),
-    MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17),
-    MSIX_FH_INT_CAUSES_S2D = BIT(19),
-    MSIX_FH_INT_CAUSES_FH_ERR = BIT(21),
+  MSIX_FH_INT_CAUSES_Q0 = BIT(0),
+  MSIX_FH_INT_CAUSES_Q1 = BIT(1),
+  MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16),
+  MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17),
+  MSIX_FH_INT_CAUSES_S2D = BIT(19),
+  MSIX_FH_INT_CAUSES_FH_ERR = BIT(21),
 };
 
 /*
  * Causes for the HW register interrupts
  */
 enum msix_hw_int_causes {
-    MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
-    MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
-    MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
-    MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
-    MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
-    MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
-    MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
-    MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25),
-    MSIX_HW_INT_CAUSES_REG_SCD = BIT(26),
-    MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27),
-    MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29),
-    MSIX_HW_INT_CAUSES_REG_HAP = BIT(30),
+  MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
+  MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
+  MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
+  MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
+  MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
+  MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
+  MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
+  MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25),
+  MSIX_HW_INT_CAUSES_REG_SCD = BIT(26),
+  MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27),
+  MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29),
+  MSIX_HW_INT_CAUSES_REG_HAP = BIT(30),
 };
 
 #define MSIX_MIN_INTERRUPT_VECTORS 2
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-cfg.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-cfg.c
index 1242010..94591d0 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-cfg.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-cfg.c
@@ -33,10 +33,12 @@
  *
  *****************************************************************************/
 #include "iwl-dbg-cfg.h"
+
 #include <linux/export.h>
 #include <linux/firmware.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+
 #include "iwl-modparams.h"
 
 /* grab default values */
@@ -72,25 +74,25 @@
 
 static const char dbg_cfg_magic[] = "[IWL DEBUG CONFIG DATA]";
 
-#define DBG_CFG_LOADER(_type)                                                                     \
-    static void __maybe_unused dbg_cfg_load_##_type(const char* name, const char* val, void* out, \
-                                                    s64 min, s64 max) {                           \
-        _type r;                                                                                  \
-                                                                                                  \
-        if (kstrto##_type(val, 0, &r)) {                                                          \
-            printk(KERN_INFO "iwlwifi debug config: Invalid data for %s: %s\n", name, val);       \
-            return;                                                                               \
-        }                                                                                         \
-                                                                                                  \
-        if (min && max && (r < min || r > max)) {                                                 \
-            printk(KERN_INFO "iwlwifi debug config: value %u for %s out of range [%lld,%lld]\n",  \
-                   r, name, min, max);                                                            \
-            return;                                                                               \
-        }                                                                                         \
-                                                                                                  \
-        *(_type*)out = r;                                                                         \
-        printk(KERN_INFO "iwlwifi debug config: %s=%d\n", name, *(_type*)out);                    \
-    }
+#define DBG_CFG_LOADER(_type)                                                                   \
+  static void __maybe_unused dbg_cfg_load_##_type(const char* name, const char* val, void* out, \
+                                                  s64 min, s64 max) {                           \
+    _type r;                                                                                    \
+                                                                                                \
+    if (kstrto##_type(val, 0, &r)) {                                                            \
+      printk(KERN_INFO "iwlwifi debug config: Invalid data for %s: %s\n", name, val);           \
+      return;                                                                                   \
+    }                                                                                           \
+                                                                                                \
+    if (min && max && (r < min || r > max)) {                                                   \
+      printk(KERN_INFO "iwlwifi debug config: value %u for %s out of range [%lld,%lld]\n", r,   \
+             name, min, max);                                                                   \
+      return;                                                                                   \
+    }                                                                                           \
+                                                                                                \
+    *(_type*)out = r;                                                                           \
+    printk(KERN_INFO "iwlwifi debug config: %s=%d\n", name, *(_type*)out);                      \
+  }
 
 DBG_CFG_LOADER(uint8_t)
 DBG_CFG_LOADER(uint16_t)
@@ -100,70 +102,74 @@
 
 static void __maybe_unused dbg_cfg_load_bool(const char* name, const char* val, void* out, s64 min,
                                              s64 max) {
-    uint8_t v;
+  uint8_t v;
 
-    if (kstrtou8(val, 0, &v)) {
-        printk(KERN_INFO "iwlwifi debug config: Invalid data for %s: %s\n", name, val);
-    } else {
-        *(bool*)out = v;
-        printk(KERN_INFO "iwlwifi debug config: %s=%d\n", name, *(bool*)out);
-    }
+  if (kstrtou8(val, 0, &v)) {
+    printk(KERN_INFO "iwlwifi debug config: Invalid data for %s: %s\n", name, val);
+  } else {
+    *(bool*)out = v;
+    printk(KERN_INFO "iwlwifi debug config: %s=%d\n", name, *(bool*)out);
+  }
 }
 
 static int __maybe_unused dbg_cfg_load_bin(const char* name, const char* val,
                                            struct iwl_dbg_cfg_bin* out) {
-    int len = strlen(val);
-    uint8_t* data;
+  int len = strlen(val);
+  uint8_t* data;
 
-    if (len % 2) { goto error; }
-    len /= 2;
+  if (len % 2) {
+    goto error;
+  }
+  len /= 2;
 
-    data = kzalloc(len, GFP_KERNEL);
-    if (!data) { return -ENOMEM; }
-    if (hex2bin(data, val, len)) {
-        kfree(data);
-        goto error;
-    }
-    out->data = data;
-    out->len = len;
-    printk(KERN_INFO "iwlwifi debug config: %d bytes for %s\n", len, name);
-    return 0;
+  data = kzalloc(len, GFP_KERNEL);
+  if (!data) {
+    return -ENOMEM;
+  }
+  if (hex2bin(data, val, len)) {
+    kfree(data);
+    goto error;
+  }
+  out->data = data;
+  out->len = len;
+  printk(KERN_INFO "iwlwifi debug config: %d bytes for %s\n", len, name);
+  return 0;
 error:
-    printk(KERN_INFO "iwlwifi debug config: Invalid data for %s\n", name);
-    return -EINVAL;
+  printk(KERN_INFO "iwlwifi debug config: Invalid data for %s\n", name);
+  return -EINVAL;
 }
 
 static __maybe_unused void dbg_cfg_load_str(const char* name, const char* val, void* out, s64 min,
                                             s64 max) {
-    if (strlen(val) == 0) {
-        printk(KERN_INFO "iwlwifi debug config: Invalid data for %s\n", name);
-    } else {
-        *(char**)out = kstrdup(val, GFP_KERNEL);
-        printk(KERN_INFO "iwlwifi debug config: %s=%s\n", name, *(char**)out);
-    }
+  if (strlen(val) == 0) {
+    printk(KERN_INFO "iwlwifi debug config: Invalid data for %s\n", name);
+  } else {
+    *(char**)out = kstrdup(val, GFP_KERNEL);
+    printk(KERN_INFO "iwlwifi debug config: %s=%s\n", name, *(char**)out);
+  }
 }
 
 void iwl_dbg_cfg_free(struct iwl_dbg_cfg* dbgcfg) {
 #define IWL_DBG_CFG(t, n) /* nothing */
 #define IWL_DBG_CFG_STR(n) kfree(dbgcfg->n);
 #define IWL_DBG_CFG_NODEF(t, n) /* nothing */
-#define IWL_DBG_CFG_BIN(n)     \
-    do {                       \
-        kfree(dbgcfg->n.data); \
-        dbgcfg->n.data = NULL; \
-        dbgcfg->n.len = 0;     \
-    } while (0);
-#define IWL_DBG_CFG_BINA(n, max)      \
-    do {                              \
-        int i;                        \
-                                      \
-        for (i = 0; i < max; i++) {   \
-            kfree(dbgcfg->n[i].data); \
-            dbgcfg->n[i].data = NULL; \
-            dbgcfg->n[i].len = 0;     \
-        }                             \
-        dbgcfg->n_##n = 0;            \
-    } while (0);
+#define IWL_DBG_CFG_BIN(n) \
+  do {                     \
+    kfree(dbgcfg->n.data); \
+    dbgcfg->n.data = NULL; \
+    dbgcfg->n.len = 0;     \
+  } while (0);
+#define IWL_DBG_CFG_BINA(n, max) \
+  do {                           \
+    int i;                       \
+                                 \
+    for (i = 0; i < max; i++) {  \
+      kfree(dbgcfg->n[i].data);  \
+      dbgcfg->n[i].data = NULL;  \
+      dbgcfg->n[i].len = 0;      \
+    }                            \
+    dbgcfg->n_##n = 0;           \
+  } while (0);
 #define IWL_DBG_CFG_RANGE(t, n, min, max) /* nothing */
 #define IWL_MOD_PARAM(t, n)               /* nothing */
 #define IWL_MVM_MOD_PARAM(t, n)           /* nothing */
@@ -179,36 +185,36 @@
 }
 
 struct iwl_dbg_cfg_loader {
-    const char* name;
-    s64 min, max;
-    void (*loader)(const char* name, const char* val, void* out, s64 min, s64 max);
-    uint32_t offset;
+  const char* name;
+  s64 min, max;
+  void (*loader)(const char* name, const char* val, void* out, s64 min, s64 max);
+  uint32_t offset;
 };
 
 static const struct iwl_dbg_cfg_loader iwl_dbg_cfg_loaders[] = {
-#define IWL_DBG_CFG(t, n)                          \
-    {                                              \
-        .name = #n,                                \
-        .offset = offsetof(struct iwl_dbg_cfg, n), \
-        .loader = dbg_cfg_load_##t,                \
-    },
-#define IWL_DBG_CFG_STR(n)                         \
-    {                                              \
-        .name = #n,                                \
-        .offset = offsetof(struct iwl_dbg_cfg, n), \
-        .loader = dbg_cfg_load_str,                \
-    },
+#define IWL_DBG_CFG(t, n)                        \
+  {                                              \
+      .name = #n,                                \
+      .offset = offsetof(struct iwl_dbg_cfg, n), \
+      .loader = dbg_cfg_load_##t,                \
+  },
+#define IWL_DBG_CFG_STR(n)                       \
+  {                                              \
+      .name = #n,                                \
+      .offset = offsetof(struct iwl_dbg_cfg, n), \
+      .loader = dbg_cfg_load_str,                \
+  },
 #define IWL_DBG_CFG_NODEF(t, n) IWL_DBG_CFG(t, n)
 #define IWL_DBG_CFG_BIN(n)       /* not using this */
 #define IWL_DBG_CFG_BINA(n, max) /* not using this */
-#define IWL_DBG_CFG_RANGE(t, n, _min, _max)        \
-    {                                              \
-        .name = #n,                                \
-        .offset = offsetof(struct iwl_dbg_cfg, n), \
-        .min = _min,                               \
-        .max = _max,                               \
-        .loader = dbg_cfg_load_##t,                \
-    },
+#define IWL_DBG_CFG_RANGE(t, n, _min, _max)      \
+  {                                              \
+      .name = #n,                                \
+      .offset = offsetof(struct iwl_dbg_cfg, n), \
+      .min = _min,                               \
+      .max = _max,                               \
+      .loader = dbg_cfg_load_##t,                \
+  },
 #define IWL_MOD_PARAM(t, n)     /* no using this */
 #define IWL_MVM_MOD_PARAM(t, n) /* no using this */
 #include "iwl-dbg-cfg.h"
@@ -223,101 +229,115 @@
 };
 
 void iwl_dbg_cfg_load_ini(struct device* dev, struct iwl_dbg_cfg* dbgcfg) {
-    const struct firmware* fw;
-    char *data, *end, *pos;
-    int err;
+  const struct firmware* fw;
+  char *data, *end, *pos;
+  int err;
 
-    if (dbgcfg->loaded) { return; }
+  if (dbgcfg->loaded) {
+    return;
+  }
 
-    /* TODO: maybe add a per-device file? */
-    err = request_firmware(&fw, "iwl-dbg-cfg.ini", dev);
-    if (err) { return; }
+  /* TODO: maybe add a per-device file? */
+  err = request_firmware(&fw, "iwl-dbg-cfg.ini", dev);
+  if (err) {
+    return;
+  }
 
-    /* must be ini file style with magic section header */
-    if (fw->size < strlen(dbg_cfg_magic)) { goto release; }
-    if (memcmp(fw->data, dbg_cfg_magic, strlen(dbg_cfg_magic))) {
-        printk(KERN_INFO "iwlwifi debug config: file is malformed\n");
-        goto release;
+  /* must be ini file style with magic section header */
+  if (fw->size < strlen(dbg_cfg_magic)) {
+    goto release;
+  }
+  if (memcmp(fw->data, dbg_cfg_magic, strlen(dbg_cfg_magic))) {
+    printk(KERN_INFO "iwlwifi debug config: file is malformed\n");
+    goto release;
+  }
+
+  /* +1 guarantees the last line gets NUL-terminated even without \n */
+  data = kzalloc(fw->size - strlen(dbg_cfg_magic) + 1, GFP_KERNEL);
+  if (!data) {
+    goto release;
+  }
+  memcpy(data, fw->data + strlen(dbg_cfg_magic), fw->size - strlen(dbg_cfg_magic));
+  end = data + fw->size - strlen(dbg_cfg_magic);
+  /* replace CR/LF with NULs to make parsing easier */
+  for (pos = data; pos < end; pos++) {
+    if (*pos == '\n' || *pos == '\r') {
+      *pos = '\0';
+    }
+  }
+
+  pos = data;
+  while (pos < end) {
+    const char* line = pos;
+    bool loaded = false;
+    int idx;
+
+    /* skip to next line */
+    while (pos < end && *pos) {
+      pos++;
+    }
+    /* skip to start of next line, over empty ones if any */
+    while (pos < end && !*pos) {
+      pos++;
     }
 
-    /* +1 guarantees the last line gets NUL-terminated even without \n */
-    data = kzalloc(fw->size - strlen(dbg_cfg_magic) + 1, GFP_KERNEL);
-    if (!data) { goto release; }
-    memcpy(data, fw->data + strlen(dbg_cfg_magic), fw->size - strlen(dbg_cfg_magic));
-    end = data + fw->size - strlen(dbg_cfg_magic);
-    /* replace CR/LF with NULs to make parsing easier */
-    for (pos = data; pos < end; pos++) {
-        if (*pos == '\n' || *pos == '\r') { *pos = '\0'; }
+    /* skip empty lines and comments */
+    if (!*line || *line == '#') {
+      continue;
     }
 
-    pos = data;
-    while (pos < end) {
-        const char* line = pos;
-        bool loaded = false;
-        int idx;
+    for (idx = 0; idx < ARRAY_SIZE(iwl_dbg_cfg_loaders); idx++) {
+      const struct iwl_dbg_cfg_loader* l;
 
-        /* skip to next line */
-        while (pos < end && *pos) {
-            pos++;
-        }
-        /* skip to start of next line, over empty ones if any */
-        while (pos < end && !*pos) {
-            pos++;
-        }
+      l = &iwl_dbg_cfg_loaders[idx];
 
-        /* skip empty lines and comments */
-        if (!*line || *line == '#') { continue; }
+      if (strncmp(l->name, line, strlen(l->name)) == 0 && line[strlen(l->name)] == '=') {
+        l->loader(l->name, line + strlen(l->name) + 1, (void*)((uint8_t*)dbgcfg + l->offset),
+                  l->min, l->max);
+        loaded = true;
+      }
+    }
 
-        for (idx = 0; idx < ARRAY_SIZE(iwl_dbg_cfg_loaders); idx++) {
-            const struct iwl_dbg_cfg_loader* l;
-
-            l = &iwl_dbg_cfg_loaders[idx];
-
-            if (strncmp(l->name, line, strlen(l->name)) == 0 && line[strlen(l->name)] == '=') {
-                l->loader(l->name, line + strlen(l->name) + 1,
-                          (void*)((uint8_t*)dbgcfg + l->offset), l->min, l->max);
-                loaded = true;
-            }
-        }
-
-        /*
-         * if it was loaded by the loaders, don't bother checking
-         * more or printing an error message below
-         */
-        if (loaded) { continue; }
+    /*
+     * if it was loaded by the loaders, don't bother checking
+     * more or printing an error message below
+     */
+    if (loaded) {
+      continue;
+    }
 
 #define IWL_DBG_CFG(t, n)       /* handled above */
 #define IWL_DBG_CFG_NODEF(t, n) /* handled above */
-#define IWL_DBG_CFG_BIN(n)                                               \
-    if (strncmp(#n, line, strlen(#n)) == 0 && line[strlen(#n)] == '=') { \
-        dbg_cfg_load_bin(#n, line + strlen(#n) + 1, &dbgcfg->n);         \
-        continue;                                                        \
-    }
-#define IWL_DBG_CFG_BINA(n, max)                                                     \
-    if (strncmp(#n, line, strlen(#n)) == 0 && line[strlen(#n)] == '=') {             \
-        if (dbgcfg->n_##n >= max) {                                                  \
-            printk(KERN_INFO "iwlwifi debug config: " #n " given too many times\n"); \
-            continue;                                                                \
-        }                                                                            \
-        if (!dbg_cfg_load_bin(#n, line + strlen(#n) + 1, &dbgcfg->n[dbgcfg->n_##n])) \
-            dbgcfg->n_##n++;                                                         \
-        continue;                                                                    \
-    }
+#define IWL_DBG_CFG_BIN(n)                                             \
+  if (strncmp(#n, line, strlen(#n)) == 0 && line[strlen(#n)] == '=') { \
+    dbg_cfg_load_bin(#n, line + strlen(#n) + 1, &dbgcfg->n);           \
+    continue;                                                          \
+  }
+#define IWL_DBG_CFG_BINA(n, max)                                                 \
+  if (strncmp(#n, line, strlen(#n)) == 0 && line[strlen(#n)] == '=') {           \
+    if (dbgcfg->n_##n >= max) {                                                  \
+      printk(KERN_INFO "iwlwifi debug config: " #n " given too many times\n");   \
+      continue;                                                                  \
+    }                                                                            \
+    if (!dbg_cfg_load_bin(#n, line + strlen(#n) + 1, &dbgcfg->n[dbgcfg->n_##n])) \
+      dbgcfg->n_##n++;                                                           \
+    continue;                                                                    \
+  }
 #define IWL_DBG_CFG_RANGE(t, n, min, max) /* handled above */
 #define IWL_DBG_CFG_STR(n)                /* handled above */
-#define IWL_MOD_PARAM(t, n)                                                       \
-    if (strncmp(#n, line, strlen(#n)) == 0 && line[strlen(#n)] == '=') {          \
-        dbg_cfg_load_##t(#n, line + strlen(#n) + 1, &iwlwifi_mod_params.n, 0, 0); \
-        continue;                                                                 \
-    }
-#define IWL_MVM_MOD_PARAM(t, n)                                                                   \
-    {                                                                                             \
-        if (strncmp("mvm." #n, line, strlen("mvm." #n)) == 0 && line[strlen("mvm." #n)] == '=') { \
-            dbg_cfg_load_##t("mvm." #n, line + strlen("mvm." #n) + 1, &dbgcfg->mvm_##n, 0, 0);    \
-            dbgcfg->__mvm_mod_param_##n = true;                                                   \
-            continue;                                                                             \
-        }                                                                                         \
-    }
+#define IWL_MOD_PARAM(t, n)                                                   \
+  if (strncmp(#n, line, strlen(#n)) == 0 && line[strlen(#n)] == '=') {        \
+    dbg_cfg_load_##t(#n, line + strlen(#n) + 1, &iwlwifi_mod_params.n, 0, 0); \
+    continue;                                                                 \
+  }
+#define IWL_MVM_MOD_PARAM(t, n)                                                               \
+  {                                                                                           \
+    if (strncmp("mvm." #n, line, strlen("mvm." #n)) == 0 && line[strlen("mvm." #n)] == '=') { \
+      dbg_cfg_load_##t("mvm." #n, line + strlen("mvm." #n) + 1, &dbgcfg->mvm_##n, 0, 0);      \
+      dbgcfg->__mvm_mod_param_##n = true;                                                     \
+      continue;                                                                               \
+    }                                                                                         \
+  }
 #include "iwl-dbg-cfg.h"
 #undef IWL_DBG_CFG
 #undef IWL_DBG_CFG_STR
@@ -327,11 +347,11 @@
 #undef IWL_DBG_CFG_RANGE
 #undef IWL_MOD_PARAM
 #undef IWL_MVM_MOD_PARAM
-        printk(KERN_INFO "iwlwifi debug config: failed to load line \"%s\"\n", line);
-    }
+    printk(KERN_INFO "iwlwifi debug config: failed to load line \"%s\"\n", line);
+  }
 
-    kfree(data);
+  kfree(data);
 release:
-    release_firmware(fw);
-    dbgcfg->loaded = true;
+  release_firmware(fw);
+  dbgcfg->loaded = true;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-cfg.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-cfg.h
index c44bc9e..9c45136 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-cfg.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-cfg.h
@@ -45,233 +45,233 @@
 #include <linux/types.h>
 
 struct iwl_dbg_cfg_bin {
-    const void* data;
-    unsigned int len;
+  const void* data;
+  unsigned int len;
 };
 
 struct iwl_dbg_cfg {
-    bool loaded;
+  bool loaded;
 
 #define IWL_DBG_CFG(type, name) type name;
 #define IWL_DBG_CFG_NODEF(type, name) type name;
 #define IWL_DBG_CFG_BIN(name) struct iwl_dbg_cfg_bin name;
 #define IWL_DBG_CFG_STR(name) const char* name;
-#define IWL_DBG_CFG_BINA(name, max)   \
-    struct iwl_dbg_cfg_bin name[max]; \
-    int n_##name;
+#define IWL_DBG_CFG_BINA(name, max) \
+  struct iwl_dbg_cfg_bin name[max]; \
+  int n_##name;
 #define IWL_DBG_CFG_RANGE(type, name, min, max) IWL_DBG_CFG(type, name)
 #define IWL_MOD_PARAM(type, name) /* do nothing */
 #define IWL_MVM_MOD_PARAM(type, name) \
-    type mvm_##name;                  \
-    bool __mvm_mod_param_##name;
+  type mvm_##name;                    \
+  bool __mvm_mod_param_##name;
 
 #endif /* DBG_CFG_REINCLUDE */
 #if IS_ENABLED(CPTCFG_IWLXVT)
-    IWL_DBG_CFG(uint32_t, XVT_DEFAULT_DBGM_MEM_POWER)
-    IWL_DBG_CFG(uint32_t, XVT_DEFAULT_DBGM_LMAC_MASK)
-    IWL_DBG_CFG(uint32_t, XVT_DEFAULT_DBGM_PRPH_MASK)
-    IWL_MOD_PARAM(bool, xvt_default_mode)
+  IWL_DBG_CFG(uint32_t, XVT_DEFAULT_DBGM_MEM_POWER)
+  IWL_DBG_CFG(uint32_t, XVT_DEFAULT_DBGM_LMAC_MASK)
+  IWL_DBG_CFG(uint32_t, XVT_DEFAULT_DBGM_PRPH_MASK)
+  IWL_MOD_PARAM(bool, xvt_default_mode)
 #endif
-    IWL_DBG_CFG_NODEF(bool, disable_52GHz)
-    IWL_DBG_CFG_NODEF(bool, disable_24GHz)
+  IWL_DBG_CFG_NODEF(bool, disable_52GHz)
+  IWL_DBG_CFG_NODEF(bool, disable_24GHz)
 #if IS_ENABLED(CPTCFG_IWLMVM) || IS_ENABLED(CPTCFG_IWLFMAC)
-    IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_OVERRIDE_CONTROL)
-    IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_INIT_FLOW)
-    IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_INIT_EVENT)
-    IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_D0_FLOW)
-    IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_D0_EVENT)
-    IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_D3_FLOW)
-    IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_D3_EVENT)
-    IWL_DBG_CFG_NODEF(bool, enable_timestamp_marker_cmd)
+  IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_OVERRIDE_CONTROL)
+  IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_INIT_FLOW)
+  IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_INIT_EVENT)
+  IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_D0_FLOW)
+  IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_D0_EVENT)
+  IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_D3_FLOW)
+  IWL_DBG_CFG_NODEF(uint32_t, MVM_CALIB_D3_EVENT)
+  IWL_DBG_CFG_NODEF(bool, enable_timestamp_marker_cmd)
 #endif
 #if IS_ENABLED(CPTCFG_IWLMVM)
-    IWL_DBG_CFG(uint32_t, MVM_DEFAULT_PS_TX_DATA_TIMEOUT)
-    IWL_DBG_CFG(uint32_t, MVM_DEFAULT_PS_RX_DATA_TIMEOUT)
-    IWL_DBG_CFG(uint32_t, MVM_WOWLAN_PS_TX_DATA_TIMEOUT)
-    IWL_DBG_CFG(uint32_t, MVM_WOWLAN_PS_RX_DATA_TIMEOUT)
-    IWL_DBG_CFG(uint32_t, MVM_SHORT_PS_TX_DATA_TIMEOUT)
-    IWL_DBG_CFG(uint32_t, MVM_SHORT_PS_RX_DATA_TIMEOUT)
-    IWL_DBG_CFG(uint32_t, MVM_UAPSD_TX_DATA_TIMEOUT)
-    IWL_DBG_CFG(uint32_t, MVM_UAPSD_RX_DATA_TIMEOUT)
-    IWL_DBG_CFG(uint32_t, MVM_UAPSD_QUEUES)
-    IWL_DBG_CFG_NODEF(bool, MVM_USE_PS_POLL)
-    IWL_DBG_CFG(uint8_t, MVM_PS_HEAVY_TX_THLD_PACKETS)
-    IWL_DBG_CFG(uint8_t, MVM_PS_HEAVY_RX_THLD_PACKETS)
-    IWL_DBG_CFG(uint8_t, MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS)
-    IWL_DBG_CFG(uint8_t, MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS)
-    IWL_DBG_CFG(uint8_t, MVM_PS_HEAVY_TX_THLD_PERCENT)
-    IWL_DBG_CFG(uint8_t, MVM_PS_HEAVY_RX_THLD_PERCENT)
-    IWL_DBG_CFG(uint16_t, MVM_PS_SNOOZE_INTERVAL)
-    IWL_DBG_CFG(uint16_t, MVM_PS_SNOOZE_WINDOW)
-    IWL_DBG_CFG(uint16_t, MVM_WOWLAN_PS_SNOOZE_WINDOW)
-    IWL_DBG_CFG(uint8_t, MVM_LOWLAT_QUOTA_MIN_PERCENT)
-    IWL_DBG_CFG(uint16_t, MVM_BT_COEX_EN_RED_TXP_THRESH)
-    IWL_DBG_CFG(uint16_t, MVM_BT_COEX_DIS_RED_TXP_THRESH)
-    IWL_DBG_CFG(uint32_t, MVM_BT_COEX_ANTENNA_COUPLING_THRS)
-    IWL_DBG_CFG(uint32_t, MVM_BT_COEX_MPLUT_REG0)
-    IWL_DBG_CFG(uint32_t, MVM_BT_COEX_MPLUT_REG1)
-    IWL_DBG_CFG(bool, MVM_BT_COEX_SYNC2SCO)
-    IWL_DBG_CFG(bool, MVM_BT_COEX_MPLUT)
-    IWL_DBG_CFG(bool, MVM_BT_COEX_TTC)
-    IWL_DBG_CFG(bool, MVM_BT_COEX_RRC)
-    IWL_DBG_CFG(bool, MVM_FW_MCAST_FILTER_PASS_ALL)
-    IWL_DBG_CFG(bool, MVM_FW_BCAST_FILTER_PASS_ALL)
-    IWL_DBG_CFG(bool, MVM_TOF_IS_RESPONDER)
-    IWL_DBG_CFG(bool, MVM_P2P_LOWLATENCY_PS_ENABLE)
-    IWL_DBG_CFG(bool, MVM_SW_TX_CSUM_OFFLOAD)
-    IWL_DBG_CFG(bool, MVM_HW_CSUM_DISABLE)
-    IWL_DBG_CFG(bool, MVM_PARSE_NVM)
-    IWL_DBG_CFG(bool, MVM_ADWELL_ENABLE)
-    IWL_DBG_CFG(uint16_t, MVM_ADWELL_MAX_BUDGET)
-    IWL_DBG_CFG(uint32_t, MVM_TCM_LOAD_MEDIUM_THRESH)
-    IWL_DBG_CFG(uint32_t, MVM_TCM_LOAD_HIGH_THRESH)
-    IWL_DBG_CFG(uint32_t, MVM_TCM_LOWLAT_ENABLE_THRESH)
-    IWL_DBG_CFG(uint32_t, MVM_UAPSD_NONAGG_PERIOD)
-    IWL_DBG_CFG_RANGE(uint8_t, MVM_UAPSD_NOAGG_LIST_LEN, 1, IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM)
+  IWL_DBG_CFG(uint32_t, MVM_DEFAULT_PS_TX_DATA_TIMEOUT)
+  IWL_DBG_CFG(uint32_t, MVM_DEFAULT_PS_RX_DATA_TIMEOUT)
+  IWL_DBG_CFG(uint32_t, MVM_WOWLAN_PS_TX_DATA_TIMEOUT)
+  IWL_DBG_CFG(uint32_t, MVM_WOWLAN_PS_RX_DATA_TIMEOUT)
+  IWL_DBG_CFG(uint32_t, MVM_SHORT_PS_TX_DATA_TIMEOUT)
+  IWL_DBG_CFG(uint32_t, MVM_SHORT_PS_RX_DATA_TIMEOUT)
+  IWL_DBG_CFG(uint32_t, MVM_UAPSD_TX_DATA_TIMEOUT)
+  IWL_DBG_CFG(uint32_t, MVM_UAPSD_RX_DATA_TIMEOUT)
+  IWL_DBG_CFG(uint32_t, MVM_UAPSD_QUEUES)
+  IWL_DBG_CFG_NODEF(bool, MVM_USE_PS_POLL)
+  IWL_DBG_CFG(uint8_t, MVM_PS_HEAVY_TX_THLD_PACKETS)
+  IWL_DBG_CFG(uint8_t, MVM_PS_HEAVY_RX_THLD_PACKETS)
+  IWL_DBG_CFG(uint8_t, MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS)
+  IWL_DBG_CFG(uint8_t, MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS)
+  IWL_DBG_CFG(uint8_t, MVM_PS_HEAVY_TX_THLD_PERCENT)
+  IWL_DBG_CFG(uint8_t, MVM_PS_HEAVY_RX_THLD_PERCENT)
+  IWL_DBG_CFG(uint16_t, MVM_PS_SNOOZE_INTERVAL)
+  IWL_DBG_CFG(uint16_t, MVM_PS_SNOOZE_WINDOW)
+  IWL_DBG_CFG(uint16_t, MVM_WOWLAN_PS_SNOOZE_WINDOW)
+  IWL_DBG_CFG(uint8_t, MVM_LOWLAT_QUOTA_MIN_PERCENT)
+  IWL_DBG_CFG(uint16_t, MVM_BT_COEX_EN_RED_TXP_THRESH)
+  IWL_DBG_CFG(uint16_t, MVM_BT_COEX_DIS_RED_TXP_THRESH)
+  IWL_DBG_CFG(uint32_t, MVM_BT_COEX_ANTENNA_COUPLING_THRS)
+  IWL_DBG_CFG(uint32_t, MVM_BT_COEX_MPLUT_REG0)
+  IWL_DBG_CFG(uint32_t, MVM_BT_COEX_MPLUT_REG1)
+  IWL_DBG_CFG(bool, MVM_BT_COEX_SYNC2SCO)
+  IWL_DBG_CFG(bool, MVM_BT_COEX_MPLUT)
+  IWL_DBG_CFG(bool, MVM_BT_COEX_TTC)
+  IWL_DBG_CFG(bool, MVM_BT_COEX_RRC)
+  IWL_DBG_CFG(bool, MVM_FW_MCAST_FILTER_PASS_ALL)
+  IWL_DBG_CFG(bool, MVM_FW_BCAST_FILTER_PASS_ALL)
+  IWL_DBG_CFG(bool, MVM_TOF_IS_RESPONDER)
+  IWL_DBG_CFG(bool, MVM_P2P_LOWLATENCY_PS_ENABLE)
+  IWL_DBG_CFG(bool, MVM_SW_TX_CSUM_OFFLOAD)
+  IWL_DBG_CFG(bool, MVM_HW_CSUM_DISABLE)
+  IWL_DBG_CFG(bool, MVM_PARSE_NVM)
+  IWL_DBG_CFG(bool, MVM_ADWELL_ENABLE)
+  IWL_DBG_CFG(uint16_t, MVM_ADWELL_MAX_BUDGET)
+  IWL_DBG_CFG(uint32_t, MVM_TCM_LOAD_MEDIUM_THRESH)
+  IWL_DBG_CFG(uint32_t, MVM_TCM_LOAD_HIGH_THRESH)
+  IWL_DBG_CFG(uint32_t, MVM_TCM_LOWLAT_ENABLE_THRESH)
+  IWL_DBG_CFG(uint32_t, MVM_UAPSD_NONAGG_PERIOD)
+  IWL_DBG_CFG_RANGE(uint8_t, MVM_UAPSD_NOAGG_LIST_LEN, 1, IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM)
 #ifdef CPTCFG_IWLMVM_ADVANCED_QUOTA_MGMT
-    IWL_DBG_CFG(bool, MVM_DYNQUOTA_DISABLED)
-    IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_MIN_PERCENT, 0, 100)
-    IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_GUARD_PERCENT, 0, 100)
-    IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_HIGH_WM_PERCENT, 0, 100)
-    IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_LOW_WM_PERCENT, 0, 100)
-    IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_START_PERCENT, 0, 100)
-    IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_INC_HIGH_PERCENT, 0, 100)
-    IWL_DBG_CFG_RANGE(uint8_t, MVM_LOWLAT_QUOTA_MIN_PCT_P2PCLIENT, 0, 100)
-    IWL_DBG_CFG_RANGE(uint8_t, MVM_LOWLAT_QUOTA_MIN_PCT_P2PGO, 0, 100)
+  IWL_DBG_CFG(bool, MVM_DYNQUOTA_DISABLED)
+  IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_MIN_PERCENT, 0, 100)
+  IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_GUARD_PERCENT, 0, 100)
+  IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_HIGH_WM_PERCENT, 0, 100)
+  IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_LOW_WM_PERCENT, 0, 100)
+  IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_START_PERCENT, 0, 100)
+  IWL_DBG_CFG_RANGE(uint8_t, MVM_DYNQUOTA_INC_HIGH_PERCENT, 0, 100)
+  IWL_DBG_CFG_RANGE(uint8_t, MVM_LOWLAT_QUOTA_MIN_PCT_P2PCLIENT, 0, 100)
+  IWL_DBG_CFG_RANGE(uint8_t, MVM_LOWLAT_QUOTA_MIN_PCT_P2PGO, 0, 100)
 #endif /* CPTCFG_IWLMVM_ADVANCED_QUOTA_MGMT */
-    IWL_DBG_CFG(uint8_t, MVM_QUOTA_THRESHOLD)
-    IWL_DBG_CFG(uint8_t, MVM_RS_RSSI_BASED_INIT_RATE)
-    IWL_DBG_CFG(uint8_t, MVM_RS_80_20_FAR_RANGE_TWEAK)
-    IWL_DBG_CFG(uint8_t, MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE)
-    IWL_DBG_CFG(uint8_t, MVM_RS_HT_VHT_RETRIES_PER_RATE)
-    IWL_DBG_CFG(uint8_t, MVM_RS_HT_VHT_RETRIES_PER_RATE_TW)
-    IWL_DBG_CFG(uint8_t, MVM_RS_INITIAL_MIMO_NUM_RATES)
-    IWL_DBG_CFG(uint8_t, MVM_RS_INITIAL_SISO_NUM_RATES)
-    IWL_DBG_CFG(uint8_t, MVM_RS_INITIAL_LEGACY_NUM_RATES)
-    IWL_DBG_CFG(uint8_t, MVM_RS_INITIAL_LEGACY_RETRIES)
-    IWL_DBG_CFG(uint8_t, MVM_RS_SECONDARY_LEGACY_RETRIES)
-    IWL_DBG_CFG(uint8_t, MVM_RS_SECONDARY_LEGACY_NUM_RATES)
-    IWL_DBG_CFG(uint8_t, MVM_RS_SECONDARY_SISO_NUM_RATES)
-    IWL_DBG_CFG(uint8_t, MVM_RS_SECONDARY_SISO_RETRIES)
-    IWL_DBG_CFG(uint8_t, MVM_RS_RATE_MIN_FAILURE_TH)
-    IWL_DBG_CFG(uint8_t, MVM_RS_RATE_MIN_SUCCESS_TH)
-    IWL_DBG_CFG(uint8_t, MVM_RS_STAY_IN_COLUMN_TIMEOUT)
-    IWL_DBG_CFG(uint8_t, MVM_RS_IDLE_TIMEOUT)
-    IWL_DBG_CFG(uint8_t, MVM_RS_MISSED_RATE_MAX)
-    IWL_DBG_CFG(uint16_t, MVM_RS_LEGACY_FAILURE_LIMIT)
-    IWL_DBG_CFG(uint16_t, MVM_RS_LEGACY_SUCCESS_LIMIT)
-    IWL_DBG_CFG(uint16_t, MVM_RS_LEGACY_TABLE_COUNT)
-    IWL_DBG_CFG(uint16_t, MVM_RS_NON_LEGACY_FAILURE_LIMIT)
-    IWL_DBG_CFG(uint16_t, MVM_RS_NON_LEGACY_SUCCESS_LIMIT)
-    IWL_DBG_CFG(uint16_t, MVM_RS_NON_LEGACY_TABLE_COUNT)
-    IWL_DBG_CFG(uint16_t, MVM_RS_SR_FORCE_DECREASE)
-    IWL_DBG_CFG(uint16_t, MVM_RS_SR_NO_DECREASE)
-    IWL_DBG_CFG(uint16_t, MVM_RS_AGG_TIME_LIMIT)
-    IWL_DBG_CFG(uint8_t, MVM_RS_AGG_DISABLE_START)
-    IWL_DBG_CFG(uint8_t, MVM_RS_AGG_START_THRESHOLD)
-    IWL_DBG_CFG(uint16_t, MVM_RS_TPC_SR_FORCE_INCREASE)
-    IWL_DBG_CFG(uint16_t, MVM_RS_TPC_SR_NO_INCREASE)
-    IWL_DBG_CFG(uint8_t, MVM_RS_TPC_TX_POWER_STEP)
-    IWL_DBG_CFG(bool, MVM_ENABLE_EBS)
-    IWL_MVM_MOD_PARAM(int, power_scheme)
-    IWL_MVM_MOD_PARAM(bool, init_dbg)
-    IWL_MVM_MOD_PARAM(bool, tfd_q_hang_detect)
+  IWL_DBG_CFG(uint8_t, MVM_QUOTA_THRESHOLD)
+  IWL_DBG_CFG(uint8_t, MVM_RS_RSSI_BASED_INIT_RATE)
+  IWL_DBG_CFG(uint8_t, MVM_RS_80_20_FAR_RANGE_TWEAK)
+  IWL_DBG_CFG(uint8_t, MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE)
+  IWL_DBG_CFG(uint8_t, MVM_RS_HT_VHT_RETRIES_PER_RATE)
+  IWL_DBG_CFG(uint8_t, MVM_RS_HT_VHT_RETRIES_PER_RATE_TW)
+  IWL_DBG_CFG(uint8_t, MVM_RS_INITIAL_MIMO_NUM_RATES)
+  IWL_DBG_CFG(uint8_t, MVM_RS_INITIAL_SISO_NUM_RATES)
+  IWL_DBG_CFG(uint8_t, MVM_RS_INITIAL_LEGACY_NUM_RATES)
+  IWL_DBG_CFG(uint8_t, MVM_RS_INITIAL_LEGACY_RETRIES)
+  IWL_DBG_CFG(uint8_t, MVM_RS_SECONDARY_LEGACY_RETRIES)
+  IWL_DBG_CFG(uint8_t, MVM_RS_SECONDARY_LEGACY_NUM_RATES)
+  IWL_DBG_CFG(uint8_t, MVM_RS_SECONDARY_SISO_NUM_RATES)
+  IWL_DBG_CFG(uint8_t, MVM_RS_SECONDARY_SISO_RETRIES)
+  IWL_DBG_CFG(uint8_t, MVM_RS_RATE_MIN_FAILURE_TH)
+  IWL_DBG_CFG(uint8_t, MVM_RS_RATE_MIN_SUCCESS_TH)
+  IWL_DBG_CFG(uint8_t, MVM_RS_STAY_IN_COLUMN_TIMEOUT)
+  IWL_DBG_CFG(uint8_t, MVM_RS_IDLE_TIMEOUT)
+  IWL_DBG_CFG(uint8_t, MVM_RS_MISSED_RATE_MAX)
+  IWL_DBG_CFG(uint16_t, MVM_RS_LEGACY_FAILURE_LIMIT)
+  IWL_DBG_CFG(uint16_t, MVM_RS_LEGACY_SUCCESS_LIMIT)
+  IWL_DBG_CFG(uint16_t, MVM_RS_LEGACY_TABLE_COUNT)
+  IWL_DBG_CFG(uint16_t, MVM_RS_NON_LEGACY_FAILURE_LIMIT)
+  IWL_DBG_CFG(uint16_t, MVM_RS_NON_LEGACY_SUCCESS_LIMIT)
+  IWL_DBG_CFG(uint16_t, MVM_RS_NON_LEGACY_TABLE_COUNT)
+  IWL_DBG_CFG(uint16_t, MVM_RS_SR_FORCE_DECREASE)
+  IWL_DBG_CFG(uint16_t, MVM_RS_SR_NO_DECREASE)
+  IWL_DBG_CFG(uint16_t, MVM_RS_AGG_TIME_LIMIT)
+  IWL_DBG_CFG(uint8_t, MVM_RS_AGG_DISABLE_START)
+  IWL_DBG_CFG(uint8_t, MVM_RS_AGG_START_THRESHOLD)
+  IWL_DBG_CFG(uint16_t, MVM_RS_TPC_SR_FORCE_INCREASE)
+  IWL_DBG_CFG(uint16_t, MVM_RS_TPC_SR_NO_INCREASE)
+  IWL_DBG_CFG(uint8_t, MVM_RS_TPC_TX_POWER_STEP)
+  IWL_DBG_CFG(bool, MVM_ENABLE_EBS)
+  IWL_MVM_MOD_PARAM(int, power_scheme)
+  IWL_MVM_MOD_PARAM(bool, init_dbg)
+  IWL_MVM_MOD_PARAM(bool, tfd_q_hang_detect)
 #endif /* CPTCFG_IWLMVM */
 #ifdef CPTCFG_IWLWIFI_FRQ_MGR_TEST
-    IWL_DBG_CFG_NODEF(uint8_t, fm_debug_mode)
+  IWL_DBG_CFG_NODEF(uint8_t, fm_debug_mode)
 #endif
 #if IS_ENABLED(CPTCFG_IWLTEST)
-    IWL_MOD_PARAM(bool, trans_test)
+  IWL_MOD_PARAM(bool, trans_test)
 #endif
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
-    IWL_DBG_CFG_NODEF(uint32_t, dnt_out_mode)
-    /* XXX: should be dbgm_ or dbg_mon_ for consistency? */
-    IWL_DBG_CFG_NODEF(uint32_t, dbm_destination_path)
-    /* XXX: should be dbg_mon_ for consistency? */
-    IWL_DBG_CFG_NODEF(uint32_t, dbgm_enable_mode)
-    IWL_DBG_CFG_NODEF(uint32_t, dbgm_mem_power)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_flags)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_ctl_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_ctl_val)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_buff_base_addr_reg_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_buff_end_addr_reg_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_data_sel_ctl_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_data_sel_ctl_val)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_mc_msk_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_mc_msk_val)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_mask_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_mask_val)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_start_mask_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_start_mask_val)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_end_mask_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_end_mask_val)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_end_threshold_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_end_threshold_val)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_period_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_period_val)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_wr_ptr_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_cyc_cnt_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_dmarb_rd_ctl_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_dmarb_rd_data_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_marbh_conf_reg)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_marbh_conf_mask)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_marbh_access_type)
-    IWL_DBG_CFG_NODEF(uint32_t, dbgc_hb_base_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbgc_hb_end_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbgc_dram_wrptr_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbgc_wrap_count_addr)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mipi_conf_reg)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mipi_conf_mask)
-    IWL_DBG_CFG_NODEF(uint32_t, dbgc_hb_base_val_smem)
-    IWL_DBG_CFG_NODEF(uint32_t, dbgc_hb_end_val_smem)
-    IWL_DBG_CFG_BIN(dbg_conf_monitor_host_command)
-    IWL_DBG_CFG_BIN(log_level_cmd)
-    IWL_DBG_CFG_BINA(ldbg_cmd, 32)
-    IWL_DBG_CFG_NODEF(uint8_t, log_level_cmd_id)
-    IWL_DBG_CFG_NODEF(uint8_t, dbg_conf_monitor_cmd_id)
-    IWL_DBG_CFG_NODEF(uint8_t, ldbg_cmd_nums)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_buff_base_addr_reg_addr_b_step)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_buff_end_addr_reg_addr_b_step)
-    IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_wr_ptr_addr_b_step)
+  IWL_DBG_CFG_NODEF(uint32_t, dnt_out_mode)
+  /* XXX: should be dbgm_ or dbg_mon_ for consistency? */
+  IWL_DBG_CFG_NODEF(uint32_t, dbm_destination_path)
+  /* XXX: should be dbg_mon_ for consistency? */
+  IWL_DBG_CFG_NODEF(uint32_t, dbgm_enable_mode)
+  IWL_DBG_CFG_NODEF(uint32_t, dbgm_mem_power)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_flags)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_ctl_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_ctl_val)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_buff_base_addr_reg_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_buff_end_addr_reg_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_data_sel_ctl_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_data_sel_ctl_val)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_mc_msk_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_mc_msk_val)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_mask_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_mask_val)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_start_mask_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_start_mask_val)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_end_mask_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_end_mask_val)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_end_threshold_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_end_threshold_val)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_period_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_sample_period_val)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_wr_ptr_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_cyc_cnt_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_dmarb_rd_ctl_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_dmarb_rd_data_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_marbh_conf_reg)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_marbh_conf_mask)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_marbh_access_type)
+  IWL_DBG_CFG_NODEF(uint32_t, dbgc_hb_base_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbgc_hb_end_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbgc_dram_wrptr_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbgc_wrap_count_addr)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mipi_conf_reg)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mipi_conf_mask)
+  IWL_DBG_CFG_NODEF(uint32_t, dbgc_hb_base_val_smem)
+  IWL_DBG_CFG_NODEF(uint32_t, dbgc_hb_end_val_smem)
+  IWL_DBG_CFG_BIN(dbg_conf_monitor_host_command)
+  IWL_DBG_CFG_BIN(log_level_cmd)
+  IWL_DBG_CFG_BINA(ldbg_cmd, 32)
+  IWL_DBG_CFG_NODEF(uint8_t, log_level_cmd_id)
+  IWL_DBG_CFG_NODEF(uint8_t, dbg_conf_monitor_cmd_id)
+  IWL_DBG_CFG_NODEF(uint8_t, ldbg_cmd_nums)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_buff_base_addr_reg_addr_b_step)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_buff_end_addr_reg_addr_b_step)
+  IWL_DBG_CFG_NODEF(uint32_t, dbg_mon_wr_ptr_addr_b_step)
 #endif /* CPTCFG_IWLWIFI_DEVICE_TESTMODE */
-    IWL_DBG_CFG_BIN(hw_address)
-    IWL_DBG_CFG_STR(fw_dbg_conf)
-    IWL_DBG_CFG_STR(nvm_file)
-    IWL_DBG_CFG_STR(fw_file_pre)
-    IWL_DBG_CFG_NODEF(uint32_t, d0i3_debug)
-    IWL_DBG_CFG_NODEF(uint32_t, valid_ants)
-    IWL_DBG_CFG_NODEF(uint32_t, no_ack_en)
-    IWL_DBG_CFG_NODEF(bool, no_ldpc)
-    IWL_DBG_CFG_NODEF(uint16_t, rx_mcs_80)
-    IWL_DBG_CFG_NODEF(uint16_t, tx_mcs_80)
-    IWL_DBG_CFG_NODEF(uint16_t, rx_mcs_160)
-    IWL_DBG_CFG_NODEF(uint16_t, tx_mcs_160)
-    IWL_DBG_CFG_NODEF(uint32_t, secure_boot_cfg)
-    IWL_MOD_PARAM(uint32_t, uapsd_disable)
-    IWL_MOD_PARAM(bool, d0i3_disable)
-    IWL_MOD_PARAM(bool, lar_disable)
-    IWL_MOD_PARAM(bool, fw_monitor)
-    IWL_MOD_PARAM(bool, fw_restart)
-    IWL_MOD_PARAM(bool, power_save)
-    IWL_MOD_PARAM(bool, bt_coex_active)
-    IWL_MOD_PARAM(int, antenna_coupling)
-    IWL_MOD_PARAM(int, power_level)
-    IWL_MOD_PARAM(int, led_mode)
-    IWL_MOD_PARAM(int, amsdu_size)
-    IWL_MOD_PARAM(int, swcrypto)
-    IWL_MOD_PARAM(uint, disable_11n)
-    IWL_MOD_PARAM(uint, d0i3_timeout)
-    IWL_DBG_CFG_BIN(he_ppe_thres)
-    IWL_DBG_CFG_NODEF(uint8_t, he_chan_width_dis)
-    IWL_DBG_CFG_NODEF(uint32_t, vht_cap_flip)
+  IWL_DBG_CFG_BIN(hw_address)
+  IWL_DBG_CFG_STR(fw_dbg_conf)
+  IWL_DBG_CFG_STR(nvm_file)
+  IWL_DBG_CFG_STR(fw_file_pre)
+  IWL_DBG_CFG_NODEF(uint32_t, d0i3_debug)
+  IWL_DBG_CFG_NODEF(uint32_t, valid_ants)
+  IWL_DBG_CFG_NODEF(uint32_t, no_ack_en)
+  IWL_DBG_CFG_NODEF(bool, no_ldpc)
+  IWL_DBG_CFG_NODEF(uint16_t, rx_mcs_80)
+  IWL_DBG_CFG_NODEF(uint16_t, tx_mcs_80)
+  IWL_DBG_CFG_NODEF(uint16_t, rx_mcs_160)
+  IWL_DBG_CFG_NODEF(uint16_t, tx_mcs_160)
+  IWL_DBG_CFG_NODEF(uint32_t, secure_boot_cfg)
+  IWL_MOD_PARAM(uint32_t, uapsd_disable)
+  IWL_MOD_PARAM(bool, d0i3_disable)
+  IWL_MOD_PARAM(bool, lar_disable)
+  IWL_MOD_PARAM(bool, fw_monitor)
+  IWL_MOD_PARAM(bool, fw_restart)
+  IWL_MOD_PARAM(bool, power_save)
+  IWL_MOD_PARAM(bool, bt_coex_active)
+  IWL_MOD_PARAM(int, antenna_coupling)
+  IWL_MOD_PARAM(int, power_level)
+  IWL_MOD_PARAM(int, led_mode)
+  IWL_MOD_PARAM(int, amsdu_size)
+  IWL_MOD_PARAM(int, swcrypto)
+  IWL_MOD_PARAM(uint, disable_11n)
+  IWL_MOD_PARAM(uint, d0i3_timeout)
+  IWL_DBG_CFG_BIN(he_ppe_thres)
+  IWL_DBG_CFG_NODEF(uint8_t, he_chan_width_dis)
+  IWL_DBG_CFG_NODEF(uint32_t, vht_cap_flip)
 #ifdef CPTCFG_IWLWIFI_DEBUG
-    IWL_MOD_PARAM(uint32_t, debug_level)
+  IWL_MOD_PARAM(uint32_t, debug_level)
 #endif /* CPTCFG_IWLWIFI_DEBUG */
 #ifdef CPTCFG_IWLWIFI_DISALLOW_OLDER_FW
-    IWL_DBG_CFG_NODEF(bool, load_old_fw)
+  IWL_DBG_CFG_NODEF(bool, load_old_fw)
 #endif /* CPTCFG_IWLWIFI_DISALLOW_OLDER_FW */
 #if IS_ENABLED(CPTCFG_IWLFMAC)
-    IWL_DBG_CFG_NODEF(bool, intcmd_dbg)
+  IWL_DBG_CFG_NODEF(bool, intcmd_dbg)
 #endif /* CPTCFG_IWLFMAC */
 #undef IWL_DBG_CFG
 #undef IWL_DBG_CFG_STR
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-tlv.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-tlv.c
index 0304736..cf986eb 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-tlv.c
@@ -32,157 +32,170 @@
  *****************************************************************************/
 
 #include "iwl-dbg-tlv.h"
+
 #include <linux/firmware.h>
+
 #include "iwl-trans.h"
 
 void iwl_fw_dbg_copy_tlv(struct iwl_trans* trans, struct iwl_ucode_tlv* tlv, bool ext) {
-    struct iwl_apply_point_data* data;
-    struct iwl_fw_ini_header* header = (void*)&tlv->data[0];
-    uint32_t apply_point = le32_to_cpu(header->apply_point);
+  struct iwl_apply_point_data* data;
+  struct iwl_fw_ini_header* header = (void*)&tlv->data[0];
+  uint32_t apply_point = le32_to_cpu(header->apply_point);
 
-    int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv);
+  int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv);
 
-    if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM, "Invalid apply point id %d\n",
-                  apply_point)) {
-        return;
-    }
+  if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM, "Invalid apply point id %d\n", apply_point)) {
+    return;
+  }
 
-    if (ext) {
-        data = &trans->apply_points_ext[apply_point];
-    } else {
-        data = &trans->apply_points[apply_point];
-    }
+  if (ext) {
+    data = &trans->apply_points_ext[apply_point];
+  } else {
+    data = &trans->apply_points[apply_point];
+  }
 
-    /*
-     * Make sure we still have room to copy this TLV. Offset points to the
-     * location the last copy ended.
-     */
-    if (WARN_ONCE(data->offset + copy_size > data->size, "Not enough memory for apply point %d\n",
-                  apply_point)) {
-        return;
-    }
+  /*
+   * Make sure we still have room to copy this TLV. Offset points to the
+   * location the last copy ended.
+   */
+  if (WARN_ONCE(data->offset + copy_size > data->size, "Not enough memory for apply point %d\n",
+                apply_point)) {
+    return;
+  }
 
-    memcpy(data->data + data->offset, (void*)tlv, copy_size);
-    data->offset += copy_size;
+  memcpy(data->data + data->offset, (void*)tlv, copy_size);
+  data->offset += copy_size;
 }
 
 void iwl_alloc_dbg_tlv(struct iwl_trans* trans, size_t len, const uint8_t* data, bool ext) {
-    struct iwl_ucode_tlv* tlv;
-    uint32_t size[IWL_FW_INI_APPLY_NUM] = {0};
-    int i;
+  struct iwl_ucode_tlv* tlv;
+  uint32_t size[IWL_FW_INI_APPLY_NUM] = {0};
+  int i;
 
-    while (len >= sizeof(*tlv)) {
-        uint32_t tlv_len, tlv_type, apply;
-        struct iwl_fw_ini_header* hdr;
+  while (len >= sizeof(*tlv)) {
+    uint32_t tlv_len, tlv_type, apply;
+    struct iwl_fw_ini_header* hdr;
 
-        len -= sizeof(*tlv);
-        tlv = (void*)data;
+    len -= sizeof(*tlv);
+    tlv = (void*)data;
 
-        tlv_len = le32_to_cpu(tlv->length);
-        tlv_type = le32_to_cpu(tlv->type);
+    tlv_len = le32_to_cpu(tlv->length);
+    tlv_type = le32_to_cpu(tlv->type);
 
-        if (len < tlv_len) { return; }
-
-        len -= ALIGN(tlv_len, 4);
-        data += sizeof(*tlv) + ALIGN(tlv_len, 4);
-
-        if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP)) { continue; }
-
-        hdr = (void*)&tlv->data[0];
-        apply = le32_to_cpu(hdr->apply_point);
-
-        IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n", le32_to_cpu(tlv->type), apply);
-
-        if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM)) { continue; }
-
-        size[apply] += sizeof(*tlv) + tlv_len;
+    if (len < tlv_len) {
+      return;
     }
 
-    for (i = 0; i < ARRAY_SIZE(size); i++) {
-        void* mem;
+    len -= ALIGN(tlv_len, 4);
+    data += sizeof(*tlv) + ALIGN(tlv_len, 4);
 
-        if (!size[i]) { continue; }
-
-        mem = kzalloc(size[i], GFP_KERNEL);
-
-        if (!mem) {
-            IWL_ERR(trans, "No memory for apply point %d\n", i);
-            return;
-        }
-
-        if (ext) {
-            trans->apply_points_ext[i].data = mem;
-            trans->apply_points_ext[i].size = size[i];
-        } else {
-            trans->apply_points[i].data = mem;
-            trans->apply_points[i].size = size[i];
-        }
-
-        trans->ini_valid = true;
+    if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP)) {
+      continue;
     }
+
+    hdr = (void*)&tlv->data[0];
+    apply = le32_to_cpu(hdr->apply_point);
+
+    IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n", le32_to_cpu(tlv->type), apply);
+
+    if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM)) {
+      continue;
+    }
+
+    size[apply] += sizeof(*tlv) + tlv_len;
+  }
+
+  for (i = 0; i < ARRAY_SIZE(size); i++) {
+    void* mem;
+
+    if (!size[i]) {
+      continue;
+    }
+
+    mem = kzalloc(size[i], GFP_KERNEL);
+
+    if (!mem) {
+      IWL_ERR(trans, "No memory for apply point %d\n", i);
+      return;
+    }
+
+    if (ext) {
+      trans->apply_points_ext[i].data = mem;
+      trans->apply_points_ext[i].size = size[i];
+    } else {
+      trans->apply_points[i].data = mem;
+      trans->apply_points[i].size = size[i];
+    }
+
+    trans->ini_valid = true;
+  }
 }
 
 void iwl_fw_dbg_free(struct iwl_trans* trans) {
-    int i;
+  int i;
 
-    for (i = 0; i < ARRAY_SIZE(trans->apply_points); i++) {
-        kfree(trans->apply_points[i].data);
-        trans->apply_points[i].size = 0;
-        trans->apply_points[i].offset = 0;
+  for (i = 0; i < ARRAY_SIZE(trans->apply_points); i++) {
+    kfree(trans->apply_points[i].data);
+    trans->apply_points[i].size = 0;
+    trans->apply_points[i].offset = 0;
 
-        kfree(trans->apply_points_ext[i].data);
-        trans->apply_points_ext[i].size = 0;
-        trans->apply_points_ext[i].offset = 0;
-    }
+    kfree(trans->apply_points_ext[i].data);
+    trans->apply_points_ext[i].size = 0;
+    trans->apply_points_ext[i].offset = 0;
+  }
 }
 
 static int iwl_parse_fw_dbg_tlv(struct iwl_trans* trans, const uint8_t* data, size_t len) {
-    struct iwl_ucode_tlv* tlv;
-    enum iwl_ucode_tlv_type tlv_type;
-    uint32_t tlv_len;
+  struct iwl_ucode_tlv* tlv;
+  enum iwl_ucode_tlv_type tlv_type;
+  uint32_t tlv_len;
 
-    while (len >= sizeof(*tlv)) {
-        len -= sizeof(*tlv);
-        tlv = (void*)data;
+  while (len >= sizeof(*tlv)) {
+    len -= sizeof(*tlv);
+    tlv = (void*)data;
 
-        tlv_len = le32_to_cpu(tlv->length);
-        tlv_type = le32_to_cpu(tlv->type);
+    tlv_len = le32_to_cpu(tlv->length);
+    tlv_type = le32_to_cpu(tlv->type);
 
-        if (len < tlv_len) {
-            IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len);
-            return -EINVAL;
-        }
-        len -= ALIGN(tlv_len, 4);
-        data += sizeof(*tlv) + ALIGN(tlv_len, 4);
-
-        switch (tlv_type) {
-        case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
-        case IWL_UCODE_TLV_TYPE_HCMD:
-        case IWL_UCODE_TLV_TYPE_REGIONS:
-        case IWL_UCODE_TLV_TYPE_TRIGGERS:
-        case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
-            iwl_fw_dbg_copy_tlv(trans, tlv, true);
-        default:
-            WARN_ONCE(1, "Invalid TLV %x\n", tlv_type);
-            break;
-        }
+    if (len < tlv_len) {
+      IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len);
+      return -EINVAL;
     }
+    len -= ALIGN(tlv_len, 4);
+    data += sizeof(*tlv) + ALIGN(tlv_len, 4);
 
-    return 0;
+    switch (tlv_type) {
+      case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
+      case IWL_UCODE_TLV_TYPE_HCMD:
+      case IWL_UCODE_TLV_TYPE_REGIONS:
+      case IWL_UCODE_TLV_TYPE_TRIGGERS:
+      case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
+        iwl_fw_dbg_copy_tlv(trans, tlv, true);
+      default:
+        WARN_ONCE(1, "Invalid TLV %x\n", tlv_type);
+        break;
+    }
+  }
+
+  return 0;
 }
 
 void iwl_load_fw_dbg_tlv(struct device* dev, struct iwl_trans* trans) {
-    const struct firmware* fw;
-    int res;
+  const struct firmware* fw;
+  int res;
 
-    if (trans->external_ini_loaded || !iwlwifi_mod_params.enable_ini) { return; }
+  if (trans->external_ini_loaded || !iwlwifi_mod_params.enable_ini) {
+    return;
+  }
 
-    res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev);
-    if (res) { return; }
+  res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev);
+  if (res) {
+    return;
+  }
 
-    iwl_alloc_dbg_tlv(trans, fw->size, fw->data, true);
-    iwl_parse_fw_dbg_tlv(trans, fw->data, fw->size);
+  iwl_alloc_dbg_tlv(trans, fw->size, fw->data, true);
+  iwl_parse_fw_dbg_tlv(trans, fw->data, fw->size);
 
-    trans->external_ini_loaded = true;
-    release_firmware(fw);
+  trans->external_ini_loaded = true;
+  release_firmware(fw);
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-tlv.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-tlv.h
index cb33ee1..a5e1922 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dbg-tlv.h
@@ -40,9 +40,9 @@
  * @offset: current offset of the copied data
  */
 struct iwl_apply_point_data {
-    void* data;
-    int size;
-    int offset;
+  void* data;
+  int size;
+  int offset;
 };
 
 struct iwl_trans;
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-debug.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-debug.h
index ae636fd..e5e84a0 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-debug.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-debug.h
@@ -96,10 +96,9 @@
 #define __iwl_dbg(dev, level, limit, function, fmt, args...) \
   zxlogf(DEBUG1, "iwlwifi (%s): " fmt, function, ##args)
 
-#define iwl_print_hex_error(m, p, len)                                        \
-  do {                                                                        \
-    print_hex_dump(KERN_ERR, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1, p, len, \
-                   1);                                                        \
+#define iwl_print_hex_error(m, p, len)                                            \
+  do {                                                                            \
+    print_hex_dump(KERN_ERR, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
   } while (0)
 
 #define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...) \
@@ -107,19 +106,15 @@
     CHECK_FOR_NEWLINE(fmt);                              \
     __iwl_dbg(dev, level, limit, __func__, fmt, ##args); \
   } while (0)
-#define IWL_DEBUG(m, level, fmt, args...) \
-  __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args)
-#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
-  __IWL_DEBUG_DEV(dev, level, false, fmt, ##args)
-#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
-  __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args)
+#define IWL_DEBUG(m, level, fmt, args...) __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args)
+#define IWL_DEBUG_DEV(dev, level, fmt, args...) __IWL_DEBUG_DEV(dev, level, false, fmt, ##args)
+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args)
 
 #ifdef CPTCFG_IWLWIFI_DEBUG
-#define iwl_print_hex_dump(m, level, p, len)                                 \
-  do {                                                                       \
-    if (iwl_have_debug_level(level))                                         \
-      print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1, p, \
-                     len, 1);                                                \
+#define iwl_print_hex_dump(m, level, p, len)                                          \
+  do {                                                                                \
+    if (iwl_have_debug_level(level))                                                  \
+      print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
   } while (0)
 #else
 #define iwl_print_hex_dump(m, level, p, len)
@@ -214,14 +209,11 @@
 #define IWL_DEBUG_COEX(p, f, a...) IWL_DEBUG(p, IWL_DL_COEX, f, ##a)
 #define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ##a)
 #define IWL_DEBUG_RATE_LIMIT(p, f, a...) IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ##a)
-#define IWL_DEBUG_ASSOC(p, f, a...) \
-  IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ##a)
-#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
-  IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ##a)
+#define IWL_DEBUG_ASSOC(p, f, a...) IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ##a)
+#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ##a)
 #define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ##a)
 #define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ##a)
-#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
-  IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ##a)
+#define IWL_DEBUG_STATS_LIMIT(p, f, a...) IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ##a)
 #define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ##a)
 #define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ##a)
 #define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ##a)
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-cfg.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-cfg.c
index c2a7f1d..f653bc1 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-cfg.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-cfg.c
@@ -32,6 +32,8 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include "iwl-dnt-cfg.h"
+
 #include <linux/export.h>
 #include <linux/kthread.h>
 #include <linux/module.h>
@@ -39,7 +41,6 @@
 
 #include "iwl-config.h"
 #include "iwl-debug.h"
-#include "iwl-dnt-cfg.h"
 #include "iwl-dnt-dev-if.h"
 #include "iwl-dnt-dispatch.h"
 #include "iwl-drv.h"
@@ -55,50 +56,52 @@
  */
 static ssize_t iwl_dnt_debugfs_log_read(struct file* file, char __user* user_buf, size_t count,
                                         loff_t* ppos) {
-    struct iwl_trans* trans = file->private_data;
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
-    struct dnt_collect_db* db = dnt->dispatch.um_db;
-    unsigned char* temp_buf;
-    int ret = 0;
+  struct iwl_trans* trans = file->private_data;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
+  struct dnt_collect_db* db = dnt->dispatch.um_db;
+  unsigned char* temp_buf;
+  int ret = 0;
 
-    temp_buf = kzalloc(count, GFP_KERNEL);
-    if (!temp_buf) { return -ENOMEM; }
+  temp_buf = kzalloc(count, GFP_KERNEL);
+  if (!temp_buf) {
+    return -ENOMEM;
+  }
 
-    dnt->debugfs_counter++;
-    do {
-        /* wait for new logs */
-        ret = wait_event_interruptible_timeout(db->waitq,
-                                               (!trans->op_mode || db->read_ptr != db->wr_ptr), HZ);
-        if (ret < 0 || !trans->op_mode) {
-            /* we reached EOF */
-            ret = 0;
-            break;
-        }
-        if (ret == 0) {
-            /*
-             * temp_buf is zeroed at this point, so if we set the
-             * size to non-zero we'll return zeroes to userspace,
-             * which the trace viewer will ignore (outside of a
-             * specific trace item/event)
-             */
-            ret = sizeof(uint32_t);
-            break;
-        }
+  dnt->debugfs_counter++;
+  do {
+    /* wait for new logs */
+    ret = wait_event_interruptible_timeout(db->waitq,
+                                           (!trans->op_mode || db->read_ptr != db->wr_ptr), HZ);
+    if (ret < 0 || !trans->op_mode) {
+      /* we reached EOF */
+      ret = 0;
+      break;
+    }
+    if (ret == 0) {
+      /*
+       * temp_buf is zeroed at this point, so if we set the
+       * size to non-zero we'll return zeroes to userspace,
+       * which the trace viewer will ignore (outside of a
+       * specific trace item/event)
+       */
+      ret = sizeof(uint32_t);
+      break;
+    }
 
-        ret = iwl_dnt_dispatch_pull(trans, temp_buf, count, UCODE_MESSAGES);
-        if (ret < 0) {
-            IWL_DEBUG_INFO(trans, "Failed to retrieve debug data\n");
-            goto free_buf;
-        }
-    } while (!ret);
+    ret = iwl_dnt_dispatch_pull(trans, temp_buf, count, UCODE_MESSAGES);
+    if (ret < 0) {
+      IWL_DEBUG_INFO(trans, "Failed to retrieve debug data\n");
+      goto free_buf;
+    }
+  } while (!ret);
 
-    *ppos = 0;
-    ret = simple_read_from_buffer(user_buf, ret, ppos, temp_buf, count);
+  *ppos = 0;
+  ret = simple_read_from_buffer(user_buf, ret, ppos, temp_buf, count);
 free_buf:
-    kfree(temp_buf);
-    dnt->debugfs_counter--;
-    wake_up(&dnt->debugfs_waitq);
-    return ret;
+  kfree(temp_buf);
+  dnt->debugfs_counter--;
+  wake_up(&dnt->debugfs_waitq);
+  return ret;
 }
 
 static const struct file_operations iwl_dnt_debugfs_log_ops = {
@@ -108,210 +111,231 @@
 };
 
 static bool iwl_dnt_register_debugfs_entries(struct iwl_trans* trans, struct dentry* dbgfs_dir) {
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
 
-    dnt->debugfs_entry = debugfs_create_dir("dbgm", dbgfs_dir);
-    if (!dnt->debugfs_entry) { return false; }
+  dnt->debugfs_entry = debugfs_create_dir("dbgm", dbgfs_dir);
+  if (!dnt->debugfs_entry) {
+    return false;
+  }
 
-    if (!debugfs_create_file("log", S_IRUSR, dnt->debugfs_entry, trans, &iwl_dnt_debugfs_log_ops)) {
-        return false;
-    }
-    return true;
+  if (!debugfs_create_file("log", S_IRUSR, dnt->debugfs_entry, trans, &iwl_dnt_debugfs_log_ops)) {
+    return false;
+  }
+  return true;
 }
 #endif
 
 static bool iwl_dnt_configure_prepare_dma(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
+  struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
 
-    if (dbg_cfg->dbm_destination_path != DMA || !dbg_cfg->dbgm_mem_power) { return true; }
-
-    dnt->mon_buf_size = 0x800 << dbg_cfg->dbgm_mem_power;
-    dnt->mon_buf_cpu_addr =
-        dma_alloc_coherent(trans->dev, dnt->mon_buf_size, &dnt->mon_dma_addr, GFP_KERNEL);
-    if (!dnt->mon_buf_cpu_addr) { return false; }
-
-    dnt->mon_base_addr = (uint64_t)dnt->mon_dma_addr;
-    dnt->mon_end_addr = dnt->mon_base_addr + dnt->mon_buf_size;
-    dnt->iwl_dnt_status |= IWL_DNT_STATUS_DMA_BUFFER_ALLOCATED;
-
+  if (dbg_cfg->dbm_destination_path != DMA || !dbg_cfg->dbgm_mem_power) {
     return true;
+  }
+
+  dnt->mon_buf_size = 0x800 << dbg_cfg->dbgm_mem_power;
+  dnt->mon_buf_cpu_addr =
+      dma_alloc_coherent(trans->dev, dnt->mon_buf_size, &dnt->mon_dma_addr, GFP_KERNEL);
+  if (!dnt->mon_buf_cpu_addr) {
+    return false;
+  }
+
+  dnt->mon_base_addr = (uint64_t)dnt->mon_dma_addr;
+  dnt->mon_end_addr = dnt->mon_base_addr + dnt->mon_buf_size;
+  dnt->iwl_dnt_status |= IWL_DNT_STATUS_DMA_BUFFER_ALLOCATED;
+
+  return true;
 }
 
 static bool iwl_dnt_validate_configuration(struct iwl_trans* trans) {
-    struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
+  struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
 
-    if (!strcmp(trans->dev->bus->name, BUS_TYPE_PCI))
-        return dbg_cfg->dbm_destination_path == DMA || dbg_cfg->dbm_destination_path == MARBH_ADC ||
-               dbg_cfg->dbm_destination_path == MARBH_DBG || dbg_cfg->dbm_destination_path == MIPI;
-    else if (!strcmp(trans->dev->bus->name, BUS_TYPE_SDIO))
-        return dbg_cfg->dbm_destination_path == MARBH_ADC ||
-               dbg_cfg->dbm_destination_path == MARBH_DBG || dbg_cfg->dbm_destination_path == MIPI;
+  if (!strcmp(trans->dev->bus->name, BUS_TYPE_PCI))
+    return dbg_cfg->dbm_destination_path == DMA || dbg_cfg->dbm_destination_path == MARBH_ADC ||
+           dbg_cfg->dbm_destination_path == MARBH_DBG || dbg_cfg->dbm_destination_path == MIPI;
+  else if (!strcmp(trans->dev->bus->name, BUS_TYPE_SDIO))
+    return dbg_cfg->dbm_destination_path == MARBH_ADC ||
+           dbg_cfg->dbm_destination_path == MARBH_DBG || dbg_cfg->dbm_destination_path == MIPI;
 
-    return false;
+  return false;
 }
 
 static int iwl_dnt_conf_monitor(struct iwl_trans* trans, uint32_t output, uint32_t monitor_type,
                                 uint32_t target_mon_mode) {
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
 
-    if (dnt->cur_input_mask & MONITOR_INPUT_MODE_MASK) {
-        IWL_INFO(trans, "DNT: Resetting deivce configuration\n");
-        return iwl_dnt_dev_if_configure_monitor(dnt, trans);
-    }
-
-    dnt->cur_input_mask |= MONITOR;
-    dnt->dispatch.mon_output = output;
-    dnt->cur_mon_type = monitor_type;
-    dnt->cur_mon_mode = target_mon_mode;
-    if (monitor_type == INTERFACE) {
-        if (output == NETLINK || output == FTRACE) {
-            /* setting PUSH out mode */
-            dnt->dispatch.mon_out_mode = PUSH;
-            dnt->dispatch.mon_in_mode = COLLECT;
-        } else {
-            dnt->dispatch.dbgm_db = iwl_dnt_dispatch_allocate_collect_db(dnt);
-            if (!dnt->dispatch.dbgm_db) { return -ENOMEM; }
-            dnt->dispatch.mon_in_mode = RETRIEVE;
-        }
-    } else {
-        dnt->dispatch.mon_out_mode = PULL;
-        dnt->dispatch.mon_in_mode = RETRIEVE;
-
-        /*
-         * If we're running a device that supports DBGC and monitor
-         * was given value as MARBH, it should be interpreted as SMEM
-         */
-        if (trans->cfg->dbgc_supported &&
-            (monitor_type == MARBH_ADC || monitor_type == MARBH_DBG)) {
-            dnt->cur_mon_type = SMEM;
-        }
-    }
+  if (dnt->cur_input_mask & MONITOR_INPUT_MODE_MASK) {
+    IWL_INFO(trans, "DNT: Resetting deivce configuration\n");
     return iwl_dnt_dev_if_configure_monitor(dnt, trans);
+  }
+
+  dnt->cur_input_mask |= MONITOR;
+  dnt->dispatch.mon_output = output;
+  dnt->cur_mon_type = monitor_type;
+  dnt->cur_mon_mode = target_mon_mode;
+  if (monitor_type == INTERFACE) {
+    if (output == NETLINK || output == FTRACE) {
+      /* setting PUSH out mode */
+      dnt->dispatch.mon_out_mode = PUSH;
+      dnt->dispatch.mon_in_mode = COLLECT;
+    } else {
+      dnt->dispatch.dbgm_db = iwl_dnt_dispatch_allocate_collect_db(dnt);
+      if (!dnt->dispatch.dbgm_db) {
+        return -ENOMEM;
+      }
+      dnt->dispatch.mon_in_mode = RETRIEVE;
+    }
+  } else {
+    dnt->dispatch.mon_out_mode = PULL;
+    dnt->dispatch.mon_in_mode = RETRIEVE;
+
+    /*
+     * If we're running a device that supports DBGC and monitor
+     * was given value as MARBH, it should be interpreted as SMEM
+     */
+    if (trans->cfg->dbgc_supported && (monitor_type == MARBH_ADC || monitor_type == MARBH_DBG)) {
+      dnt->cur_mon_type = SMEM;
+    }
+  }
+  return iwl_dnt_dev_if_configure_monitor(dnt, trans);
 }
 
 void iwl_dnt_start(struct iwl_trans* trans) {
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
-    struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
+  struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
 
-    if (!dnt) { return; }
+  if (!dnt) {
+    return;
+  }
 
-    if ((dnt->iwl_dnt_status & IWL_DNT_STATUS_MON_CONFIGURED) && dbg_cfg->dbg_conf_monitor_cmd_id) {
-        iwl_dnt_dev_if_start_monitor(dnt, trans);
-    }
+  if ((dnt->iwl_dnt_status & IWL_DNT_STATUS_MON_CONFIGURED) && dbg_cfg->dbg_conf_monitor_cmd_id) {
+    iwl_dnt_dev_if_start_monitor(dnt, trans);
+  }
 
-    if ((dnt->iwl_dnt_status & IWL_DNT_STATUS_UCODE_MSGS_CONFIGURED) && dbg_cfg->log_level_cmd_id) {
-        iwl_dnt_dev_if_set_log_level(dnt, trans);
-    }
+  if ((dnt->iwl_dnt_status & IWL_DNT_STATUS_UCODE_MSGS_CONFIGURED) && dbg_cfg->log_level_cmd_id) {
+    iwl_dnt_dev_if_set_log_level(dnt, trans);
+  }
 }
 IWL_EXPORT_SYMBOL(iwl_dnt_start);
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
 static int iwl_dnt_conf_ucode_msgs_via_rx(struct iwl_trans* trans, uint32_t output) {
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
 
-    dnt->cur_input_mask |= UCODE_MESSAGES;
-    dnt->dispatch.ucode_msgs_output = output;
+  dnt->cur_input_mask |= UCODE_MESSAGES;
+  dnt->dispatch.ucode_msgs_output = output;
 
-    if (output == NETLINK || output == FTRACE) {
-        /* setting PUSH out mode */
-        dnt->dispatch.ucode_msgs_out_mode = PUSH;
-    } else {
-        dnt->dispatch.um_db = iwl_dnt_dispatch_allocate_collect_db(dnt);
-        if (!dnt->dispatch.um_db) { return -ENOMEM; }
-        dnt->dispatch.ucode_msgs_out_mode = RETRIEVE;
+  if (output == NETLINK || output == FTRACE) {
+    /* setting PUSH out mode */
+    dnt->dispatch.ucode_msgs_out_mode = PUSH;
+  } else {
+    dnt->dispatch.um_db = iwl_dnt_dispatch_allocate_collect_db(dnt);
+    if (!dnt->dispatch.um_db) {
+      return -ENOMEM;
     }
-    /* setting COLLECT in mode */
-    dnt->dispatch.ucode_msgs_in_mode = COLLECT;
-    dnt->iwl_dnt_status |= IWL_DNT_STATUS_UCODE_MSGS_CONFIGURED;
+    dnt->dispatch.ucode_msgs_out_mode = RETRIEVE;
+  }
+  /* setting COLLECT in mode */
+  dnt->dispatch.ucode_msgs_in_mode = COLLECT;
+  dnt->iwl_dnt_status |= IWL_DNT_STATUS_UCODE_MSGS_CONFIGURED;
 
-    return 0;
+  return 0;
 }
 #endif
 
 void iwl_dnt_init(struct iwl_trans* trans, struct dentry* dbgfs_dir) {
-    struct iwl_dnt* dnt;
-    bool __maybe_unused ret;
-    int __maybe_unused err;
+  struct iwl_dnt* dnt;
+  bool __maybe_unused ret;
+  int __maybe_unused err;
 
-    dnt = kzalloc(sizeof(struct iwl_dnt), GFP_KERNEL);
-    if (!dnt) { return; }
+  dnt = kzalloc(sizeof(struct iwl_dnt), GFP_KERNEL);
+  if (!dnt) {
+    return;
+  }
 
-    trans->tmdev->dnt = dnt;
+  trans->tmdev->dnt = dnt;
 
-    dnt->dev = trans->dev;
+  dnt->dev = trans->dev;
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    ret = iwl_dnt_register_debugfs_entries(trans, dbgfs_dir);
-    if (!ret) {
-        IWL_ERR(trans, "Failed to create dnt debugfs entries\n");
-        return;
-    }
-    err = iwl_dnt_conf_ucode_msgs_via_rx(trans, DEBUGFS);
-    if (err) { IWL_DEBUG_INFO(trans, "Failed to configure uCodeMessages\n"); }
-    init_waitqueue_head(&dnt->debugfs_waitq);
+  ret = iwl_dnt_register_debugfs_entries(trans, dbgfs_dir);
+  if (!ret) {
+    IWL_ERR(trans, "Failed to create dnt debugfs entries\n");
+    return;
+  }
+  err = iwl_dnt_conf_ucode_msgs_via_rx(trans, DEBUGFS);
+  if (err) {
+    IWL_DEBUG_INFO(trans, "Failed to configure uCodeMessages\n");
+  }
+  init_waitqueue_head(&dnt->debugfs_waitq);
 #endif
 
-    if (!iwl_dnt_validate_configuration(trans)) {
-        dnt->iwl_dnt_status |= IWL_DNT_STATUS_INVALID_MONITOR_CONF;
-        return;
-    }
-    /* allocate DMA if needed */
-    if (!iwl_dnt_configure_prepare_dma(dnt, trans)) {
-        IWL_ERR(trans, "Failed to prepare DMA\n");
-        dnt->iwl_dnt_status |= IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DMA;
-    }
+  if (!iwl_dnt_validate_configuration(trans)) {
+    dnt->iwl_dnt_status |= IWL_DNT_STATUS_INVALID_MONITOR_CONF;
+    return;
+  }
+  /* allocate DMA if needed */
+  if (!iwl_dnt_configure_prepare_dma(dnt, trans)) {
+    IWL_ERR(trans, "Failed to prepare DMA\n");
+    dnt->iwl_dnt_status |= IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DMA;
+  }
 }
 IWL_EXPORT_SYMBOL(iwl_dnt_init);
 
 void iwl_dnt_free(struct iwl_trans* trans) {
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
 
-    if (!dnt) { return; }
+  if (!dnt) {
+    return;
+  }
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    debugfs_remove_recursive(dnt->debugfs_entry);
-    if (dnt->debugfs_counter) {
-        IWL_INFO(trans, "waiting for dnt debugfs release (cnt=%d)\n", dnt->debugfs_counter);
-        wake_up_interruptible(&dnt->dispatch.um_db->waitq);
-        wait_event(dnt->debugfs_waitq, dnt->debugfs_counter == 0);
-    }
+  debugfs_remove_recursive(dnt->debugfs_entry);
+  if (dnt->debugfs_counter) {
+    IWL_INFO(trans, "waiting for dnt debugfs release (cnt=%d)\n", dnt->debugfs_counter);
+    wake_up_interruptible(&dnt->dispatch.um_db->waitq);
+    wait_event(dnt->debugfs_waitq, dnt->debugfs_counter == 0);
+  }
 #endif
-    iwl_dnt_dispatch_free(dnt, trans);
-    kfree(dnt);
+  iwl_dnt_dispatch_free(dnt, trans);
+  kfree(dnt);
 }
 IWL_EXPORT_SYMBOL(iwl_dnt_free);
 
 void iwl_dnt_configure(struct iwl_trans* trans, const struct fw_img* image) {
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
-    struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
-    bool is_conf_invalid;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
+  struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
+  bool is_conf_invalid;
 
-    if (!dnt) { return; }
+  if (!dnt) {
+    return;
+  }
 
-    dnt->image = image;
+  dnt->image = image;
 
-    is_conf_invalid = (dnt->iwl_dnt_status & IWL_DNT_STATUS_INVALID_MONITOR_CONF);
+  is_conf_invalid = (dnt->iwl_dnt_status & IWL_DNT_STATUS_INVALID_MONITOR_CONF);
 
-    if (is_conf_invalid) { return; }
+  if (is_conf_invalid) {
+    return;
+  }
 
-    switch (dbg_cfg->dbm_destination_path) {
+  switch (dbg_cfg->dbm_destination_path) {
     case DMA:
-        if (!dnt->mon_buf_cpu_addr) {
-            IWL_ERR(trans, "DMA buffer wasn't allocated\n");
-            return;
-        }
+      if (!dnt->mon_buf_cpu_addr) {
+        IWL_ERR(trans, "DMA buffer wasn't allocated\n");
+        return;
+      }
     case NO_MONITOR:
     case MIPI:
     case INTERFACE:
     case MARBH_ADC:
     case MARBH_DBG:
-        iwl_dnt_conf_monitor(trans, dbg_cfg->dnt_out_mode, dbg_cfg->dbm_destination_path,
-                             dbg_cfg->dbgm_enable_mode);
-        break;
+      iwl_dnt_conf_monitor(trans, dbg_cfg->dnt_out_mode, dbg_cfg->dbm_destination_path,
+                           dbg_cfg->dbgm_enable_mode);
+      break;
     default:
-        IWL_INFO(trans, "Invalid monitor type\n");
-        return;
-    }
+      IWL_INFO(trans, "Invalid monitor type\n");
+      return;
+  }
 
-    dnt->dispatch.crash_out_mode |= dbg_cfg->dnt_out_mode;
+  dnt->dispatch.crash_out_mode |= dbg_cfg->dnt_out_mode;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-cfg.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-cfg.h
index 71ead00..6a51af5 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-cfg.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-cfg.h
@@ -48,21 +48,21 @@
 #define BUS_TYPE_SDIO "sdio"
 
 #define GET_RX_PACKET_SIZE(pkt) \
-    ((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK) - sizeof(struct iwl_cmd_header))
+  ((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK) - sizeof(struct iwl_cmd_header))
 
 #define MONITOR_INPUT_MODE_MASK 0x01
 #define UCODE_MSGS_INPUT_MODE_MASK 0x02
 
 /* DnT status */
 enum {
-    IWL_DNT_STATUS_MON_CONFIGURED = BIT(0),
-    IWL_DNT_STATUS_UCODE_MSGS_CONFIGURED = BIT(1),
-    IWL_DNT_STATUS_DMA_BUFFER_ALLOCATED = BIT(2),
-    IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DMA = BIT(3),
-    IWL_DNT_STATUS_FAILED_START_MONITOR = BIT(4),
-    IWL_DNT_STATUS_INVALID_MONITOR_CONF = BIT(5),
-    IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DB = BIT(6),
-    IWL_DNT_STATUS_FW_CRASH = BIT(7),
+  IWL_DNT_STATUS_MON_CONFIGURED = BIT(0),
+  IWL_DNT_STATUS_UCODE_MSGS_CONFIGURED = BIT(1),
+  IWL_DNT_STATUS_DMA_BUFFER_ALLOCATED = BIT(2),
+  IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DMA = BIT(3),
+  IWL_DNT_STATUS_FAILED_START_MONITOR = BIT(4),
+  IWL_DNT_STATUS_INVALID_MONITOR_CONF = BIT(5),
+  IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DB = BIT(6),
+  IWL_DNT_STATUS_FW_CRASH = BIT(7),
 };
 
 /* input modes */
@@ -73,13 +73,13 @@
 
 /* monitor types */
 enum {
-    NO_MONITOR = 0,
-    MIPI = BIT(0),
-    INTERFACE = BIT(1),
-    DMA = BIT(2),
-    MARBH_ADC = BIT(3),
-    MARBH_DBG = BIT(4),
-    SMEM = BIT(5)
+  NO_MONITOR = 0,
+  MIPI = BIT(0),
+  INTERFACE = BIT(1),
+  DMA = BIT(2),
+  MARBH_ADC = BIT(3),
+  MARBH_DBG = BIT(4),
+  SMEM = BIT(5)
 };
 
 /* monitor modes */
@@ -93,17 +93,17 @@
 
 /* crash data */
 enum {
-    NONE = 0,
-    SRAM = BIT(0),
-    DBGM = BIT(1),
-    TX_FIFO = BIT(2),
-    RX_FIFO = BIT(3),
-    PERIPHERY = BIT(4)
+  NONE = 0,
+  SRAM = BIT(0),
+  DBGM = BIT(1),
+  TX_FIFO = BIT(2),
+  RX_FIFO = BIT(3),
+  PERIPHERY = BIT(4)
 };
 
 struct dnt_collect_entry {
-    uint8_t* data;
-    uint32_t size;
+  uint8_t* data;
+  uint32_t size;
 };
 
 /**
@@ -113,11 +113,11 @@
  * @db_lock: lock for the list
  */
 struct dnt_collect_db {
-    struct dnt_collect_entry collect_array[IWL_DNT_ARRAY_SIZE];
-    unsigned int read_ptr;
-    unsigned int wr_ptr;
-    wait_queue_head_t waitq;
-    spinlock_t db_lock; /*locks the array */
+  struct dnt_collect_entry collect_array[IWL_DNT_ARRAY_SIZE];
+  unsigned int read_ptr;
+  unsigned int wr_ptr;
+  wait_queue_head_t waitq;
+  spinlock_t db_lock; /*locks the array */
 };
 
 /**
@@ -129,16 +129,16 @@
  * @periph: periphery registers data pointer
  */
 struct dnt_crash_data {
-    uint8_t* sram;
-    uint32_t sram_buf_size;
-    uint8_t* dbgm;
-    uint32_t dbgm_buf_size;
-    uint8_t* rx;
-    uint32_t rx_buf_size;
-    uint8_t* tx;
-    uint32_t tx_buf_size;
-    uint8_t* periph;
-    uint32_t periph_buf_size;
+  uint8_t* sram;
+  uint32_t sram_buf_size;
+  uint8_t* dbgm;
+  uint32_t dbgm_buf_size;
+  uint8_t* rx;
+  uint32_t rx_buf_size;
+  uint8_t* tx;
+  uint32_t tx_buf_size;
+  uint8_t* periph;
+  uint32_t periph_buf_size;
 };
 
 /**
@@ -150,20 +150,20 @@
  * @um_list: uCodeMessages link list
  */
 struct iwl_dnt_dispatch {
-    uint32_t mon_in_mode;
-    uint32_t mon_out_mode;
-    uint32_t mon_output;
+  uint32_t mon_in_mode;
+  uint32_t mon_out_mode;
+  uint32_t mon_output;
 
-    uint32_t ucode_msgs_in_mode;
-    uint32_t ucode_msgs_out_mode;
-    uint32_t ucode_msgs_output;
+  uint32_t ucode_msgs_in_mode;
+  uint32_t ucode_msgs_out_mode;
+  uint32_t ucode_msgs_output;
 
-    uint32_t crash_out_mode;
+  uint32_t crash_out_mode;
 
-    struct dnt_collect_db* dbgm_db;
-    struct dnt_collect_db* um_db;
+  struct dnt_collect_db* dbgm_db;
+  struct dnt_collect_db* um_db;
 
-    struct dnt_crash_data crash;
+  struct dnt_crash_data crash;
 };
 
 /**
@@ -186,27 +186,27 @@
  * @dispatch: a pointer to dispatch
  */
 struct iwl_dnt {
-    struct device* dev;
-    const struct fw_img* image;
+  struct device* dev;
+  const struct fw_img* image;
 
-    uint32_t iwl_dnt_status;
-    bool is_configuration_valid;
-    uint8_t cur_input_mask;
-    uint8_t cur_output_mask;
+  uint32_t iwl_dnt_status;
+  bool is_configuration_valid;
+  uint8_t cur_input_mask;
+  uint8_t cur_output_mask;
 
-    uint32_t cur_mon_type;
-    uint8_t* mon_buf_cpu_addr;
-    dma_addr_t mon_dma_addr;
-    uint64_t mon_base_addr;
-    uint64_t mon_end_addr;
-    uint32_t mon_buf_size;
-    uint32_t cur_mon_mode;
+  uint32_t cur_mon_type;
+  uint8_t* mon_buf_cpu_addr;
+  dma_addr_t mon_dma_addr;
+  uint64_t mon_base_addr;
+  uint64_t mon_end_addr;
+  uint32_t mon_buf_size;
+  uint32_t cur_mon_mode;
 
-    struct iwl_dnt_dispatch dispatch;
+  struct iwl_dnt_dispatch dispatch;
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    uint8_t debugfs_counter;
-    wait_queue_head_t debugfs_waitq;
-    struct dentry* debugfs_entry;
+  uint8_t debugfs_counter;
+  wait_queue_head_t debugfs_waitq;
+  struct dentry* debugfs_entry;
 #endif
 };
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dev-if.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dev-if.c
index 16a7a49..f95ba53 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dev-if.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dev-if.c
@@ -31,6 +31,8 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include "iwl-dnt-dev-if.h"
+
 #include <linux/export.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
@@ -38,498 +40,507 @@
 #include "iwl-csr.h"
 #include "iwl-debug.h"
 #include "iwl-dnt-cfg.h"
-#include "iwl-dnt-dev-if.h"
 #include "iwl-io.h"
 #include "iwl-prph.h"
 #include "iwl-tm-gnl.h"
 #include "iwl-trans.h"
 
 static void iwl_dnt_dev_if_configure_mipi(struct iwl_trans* trans) {
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
-        iwl_trans_set_bits_mask(trans, trans->dbg_cfg.dbg_mipi_conf_reg,
-                                trans->dbg_cfg.dbg_mipi_conf_mask,
-                                trans->dbg_cfg.dbg_mipi_conf_mask);
-        return;
-    }
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
+    iwl_trans_set_bits_mask(trans, trans->dbg_cfg.dbg_mipi_conf_reg,
+                            trans->dbg_cfg.dbg_mipi_conf_mask, trans->dbg_cfg.dbg_mipi_conf_mask);
+    return;
+  }
 
-    /* ABB_CguDTClkCtrl - set system trace and mtm clock souce as PLLA */
-    iowrite32(0x30303, (void __force __iomem*)0xe640110c);
+  /* ABB_CguDTClkCtrl - set system trace and mtm clock souce as PLLA */
+  iowrite32(0x30303, (void __force __iomem*)0xe640110c);
 
-    /* ABB_SpcuMemPower - set the power of the trace memory */
-    iowrite32(0x1, (void __force __iomem*)0xe640201c);
+  /* ABB_SpcuMemPower - set the power of the trace memory */
+  iowrite32(0x1, (void __force __iomem*)0xe640201c);
 
-    /* set MIPI2 PCL, PCL_26 - PCL_30 */
-    iowrite32(0x10, (void __force __iomem*)0xe6300274);
-    iowrite32(0x10, (void __force __iomem*)0xe6300278);
-    iowrite32(0x10, (void __force __iomem*)0xe630027c);
-    iowrite32(0x10, (void __force __iomem*)0xe6300280);
-    iowrite32(0x10, (void __force __iomem*)0xe6300284);
+  /* set MIPI2 PCL, PCL_26 - PCL_30 */
+  iowrite32(0x10, (void __force __iomem*)0xe6300274);
+  iowrite32(0x10, (void __force __iomem*)0xe6300278);
+  iowrite32(0x10, (void __force __iomem*)0xe630027c);
+  iowrite32(0x10, (void __force __iomem*)0xe6300280);
+  iowrite32(0x10, (void __force __iomem*)0xe6300284);
 
-    /* ARB0_CNF - enable generic arbiter */
-    iowrite32(0xc0000000, (void __force __iomem*)0xe6700108);
+  /* ARB0_CNF - enable generic arbiter */
+  iowrite32(0xc0000000, (void __force __iomem*)0xe6700108);
 
-    /* enable WLAN arbiter */
-    iowrite32(0x80000006, (void __force __iomem*)0xe6700140);
+  /* enable WLAN arbiter */
+  iowrite32(0x80000006, (void __force __iomem*)0xe6700140);
 }
 
 static void iwl_dnt_dev_if_configure_marbh(struct iwl_trans* trans) {
-    struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
-    uint32_t ret, reg_val = 0;
+  struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
+  uint32_t ret, reg_val = 0;
 
-    if (cfg->dbg_marbh_access_type == ACCESS_TYPE_DIRECT) {
-        iwl_trans_set_bits_mask(trans, cfg->dbg_marbh_conf_reg, cfg->dbg_marbh_conf_mask,
-                                cfg->dbg_marbh_conf_mask);
-    } else if (cfg->dbg_marbh_access_type == ACCESS_TYPE_INDIRECT) {
-        ret = iwl_trans_read_mem(trans, cfg->dbg_marbh_conf_reg, &reg_val, 1);
-        if (ret) {
-            IWL_ERR(trans, "Failed to read MARBH conf reg\n");
-            return;
-        }
-        reg_val |= cfg->dbg_marbh_conf_mask;
-        ret = iwl_trans_write_mem(trans, cfg->dbg_marbh_conf_reg, &reg_val, 1);
-        if (ret) {
-            IWL_ERR(trans, "Failed to write MARBH conf reg\n");
-            return;
-        }
-    } else {
-        IWL_ERR(trans, "Invalid MARBH access type\n");
+  if (cfg->dbg_marbh_access_type == ACCESS_TYPE_DIRECT) {
+    iwl_trans_set_bits_mask(trans, cfg->dbg_marbh_conf_reg, cfg->dbg_marbh_conf_mask,
+                            cfg->dbg_marbh_conf_mask);
+  } else if (cfg->dbg_marbh_access_type == ACCESS_TYPE_INDIRECT) {
+    ret = iwl_trans_read_mem(trans, cfg->dbg_marbh_conf_reg, &reg_val, 1);
+    if (ret) {
+      IWL_ERR(trans, "Failed to read MARBH conf reg\n");
+      return;
     }
+    reg_val |= cfg->dbg_marbh_conf_mask;
+    ret = iwl_trans_write_mem(trans, cfg->dbg_marbh_conf_reg, &reg_val, 1);
+    if (ret) {
+      IWL_ERR(trans, "Failed to write MARBH conf reg\n");
+      return;
+    }
+  } else {
+    IWL_ERR(trans, "Invalid MARBH access type\n");
+  }
 }
 
 static void iwl_dnt_dev_if_configure_dbgc_registers(struct iwl_trans* trans, uint32_t base_addr,
                                                     uint32_t end_addr) {
-    struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
+  struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
 
-    switch (trans->tmdev->dnt->cur_mon_type) {
+  switch (trans->tmdev->dnt->cur_mon_type) {
     case SMEM:
-        iwl_write_prph(trans, cfg->dbgc_hb_base_addr, cfg->dbgc_hb_base_val_smem);
-        iwl_write_prph(trans, cfg->dbgc_hb_end_addr, cfg->dbgc_hb_end_val_smem);
+      iwl_write_prph(trans, cfg->dbgc_hb_base_addr, cfg->dbgc_hb_base_val_smem);
+      iwl_write_prph(trans, cfg->dbgc_hb_end_addr, cfg->dbgc_hb_end_val_smem);
 
-        /*
-         * SMEM requires the same internal configuration as MARBH,
-         * which preceded it.
-         */
-        iwl_dnt_dev_if_configure_marbh(trans);
-        break;
+      /*
+       * SMEM requires the same internal configuration as MARBH,
+       * which preceded it.
+       */
+      iwl_dnt_dev_if_configure_marbh(trans);
+      break;
 
     case DMA:
     default:
-        /*
-         * The given addresses are already shifted by 4 places so we
-         * need to shift by another 4.
-         * Note that in SfP the end addr points to the last block of
-         * data that the DBGC can write to, so when setting the end
-         * register we need to set it to 1 block before.
-         */
-        iwl_write_prph(trans, cfg->dbgc_hb_base_addr, base_addr >> 4);
-        iwl_write_prph(trans, cfg->dbgc_hb_end_addr, (end_addr >> 4) - 1);
-        break;
-    };
+      /*
+       * The given addresses are already shifted by 4 places so we
+       * need to shift by another 4.
+       * Note that in SfP the end addr points to the last block of
+       * data that the DBGC can write to, so when setting the end
+       * register we need to set it to 1 block before.
+       */
+      iwl_write_prph(trans, cfg->dbgc_hb_base_addr, base_addr >> 4);
+      iwl_write_prph(trans, cfg->dbgc_hb_end_addr, (end_addr >> 4) - 1);
+      break;
+  };
 }
 
 static void iwl_dnt_dev_if_configure_dbgm_registers(struct iwl_trans* trans, uint32_t base_addr,
                                                     uint32_t end_addr) {
-    struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
+  struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
 
-    /* If we're running a device that supports DBGC - use it */
-    if (trans->cfg->dbgc_supported) {
-        iwl_dnt_dev_if_configure_dbgc_registers(trans, base_addr, end_addr);
-        return;
-    }
+  /* If we're running a device that supports DBGC - use it */
+  if (trans->cfg->dbgc_supported) {
+    iwl_dnt_dev_if_configure_dbgc_registers(trans, base_addr, end_addr);
+    return;
+  }
 
-    /* configuring monitor */
-    iwl_write_prph(trans, cfg->dbg_mon_buff_base_addr_reg_addr, base_addr);
-    iwl_write_prph(trans, cfg->dbg_mon_buff_end_addr_reg_addr, end_addr);
-    iwl_write_prph(trans, cfg->dbg_mon_data_sel_ctl_addr, cfg->dbg_mon_data_sel_ctl_val);
-    iwl_write_prph(trans, cfg->dbg_mon_mc_msk_addr, cfg->dbg_mon_mc_msk_val);
-    iwl_write_prph(trans, cfg->dbg_mon_sample_mask_addr, cfg->dbg_mon_sample_mask_val);
-    iwl_write_prph(trans, cfg->dbg_mon_start_mask_addr, cfg->dbg_mon_start_mask_val);
-    iwl_write_prph(trans, cfg->dbg_mon_end_threshold_addr, cfg->dbg_mon_end_threshold_val);
-    iwl_write_prph(trans, cfg->dbg_mon_end_mask_addr, cfg->dbg_mon_end_mask_val);
-    iwl_write_prph(trans, cfg->dbg_mon_sample_period_addr, cfg->dbg_mon_sample_period_val);
-    /* starting monitor */
-    iwl_write_prph(trans, cfg->dbg_mon_sample_ctl_addr, cfg->dbg_mon_sample_ctl_val);
+  /* configuring monitor */
+  iwl_write_prph(trans, cfg->dbg_mon_buff_base_addr_reg_addr, base_addr);
+  iwl_write_prph(trans, cfg->dbg_mon_buff_end_addr_reg_addr, end_addr);
+  iwl_write_prph(trans, cfg->dbg_mon_data_sel_ctl_addr, cfg->dbg_mon_data_sel_ctl_val);
+  iwl_write_prph(trans, cfg->dbg_mon_mc_msk_addr, cfg->dbg_mon_mc_msk_val);
+  iwl_write_prph(trans, cfg->dbg_mon_sample_mask_addr, cfg->dbg_mon_sample_mask_val);
+  iwl_write_prph(trans, cfg->dbg_mon_start_mask_addr, cfg->dbg_mon_start_mask_val);
+  iwl_write_prph(trans, cfg->dbg_mon_end_threshold_addr, cfg->dbg_mon_end_threshold_val);
+  iwl_write_prph(trans, cfg->dbg_mon_end_mask_addr, cfg->dbg_mon_end_mask_val);
+  iwl_write_prph(trans, cfg->dbg_mon_sample_period_addr, cfg->dbg_mon_sample_period_val);
+  /* starting monitor */
+  iwl_write_prph(trans, cfg->dbg_mon_sample_ctl_addr, cfg->dbg_mon_sample_ctl_val);
 }
 
 static int iwl_dnt_dev_if_retrieve_dma_monitor_data(struct iwl_dnt* dnt, struct iwl_trans* trans,
                                                     void* buffer, uint32_t buffer_size) {
-    struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
-    uint32_t wr_ptr, wrap_cnt;
-    bool dont_reorder = false;
-    /* FIXME send stop command to FW */
-    if (WARN_ON_ONCE(!dnt->mon_buf_cpu_addr)) {
-        IWL_ERR(trans, "Can't retrieve data - DMA wasn't allocated\n");
-        return -ENOMEM;
-    }
+  struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
+  uint32_t wr_ptr, wrap_cnt;
+  bool dont_reorder = false;
+  /* FIXME send stop command to FW */
+  if (WARN_ON_ONCE(!dnt->mon_buf_cpu_addr)) {
+    IWL_ERR(trans, "Can't retrieve data - DMA wasn't allocated\n");
+    return -ENOMEM;
+  }
 
-    /* If we're running a device that supports DBGC - use it */
-    if (trans->cfg->dbgc_supported) {
-        wr_ptr = iwl_read_prph(trans, cfg->dbgc_dram_wrptr_addr);
-    } else {
-        wr_ptr = iwl_read_prph(trans, cfg->dbg_mon_wr_ptr_addr);
-    }
-    /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */
-    if (wr_ptr == 0x5a5a5a5a) {
-        IWL_ERR(trans, "Can't read write pointer - not reordering buffer\n");
-        dont_reorder = true;
-    }
+  /* If we're running a device that supports DBGC - use it */
+  if (trans->cfg->dbgc_supported) {
+    wr_ptr = iwl_read_prph(trans, cfg->dbgc_dram_wrptr_addr);
+  } else {
+    wr_ptr = iwl_read_prph(trans, cfg->dbg_mon_wr_ptr_addr);
+  }
+  /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */
+  if (wr_ptr == 0x5a5a5a5a) {
+    IWL_ERR(trans, "Can't read write pointer - not reordering buffer\n");
+    dont_reorder = true;
+  }
 
-    /* If we're running a device that supports DBGC.... */
-    if (trans->cfg->dbgc_supported)
-    /*
-     * wr_ptr is given relative to the base address, in
-     * DWORD granularity, and points to the next chunk to
-     * write to - i.e., the oldest data in the buffer.
-     */
-    {
-        wr_ptr <<= 2;
-    } else {
-        wr_ptr = (wr_ptr << 4) - dnt->mon_base_addr;
-    }
+  /* If we're running a device that supports DBGC.... */
+  if (trans->cfg->dbgc_supported)
+  /*
+   * wr_ptr is given relative to the base address, in
+   * DWORD granularity, and points to the next chunk to
+   * write to - i.e., the oldest data in the buffer.
+   */
+  {
+    wr_ptr <<= 2;
+  } else {
+    wr_ptr = (wr_ptr << 4) - dnt->mon_base_addr;
+  }
 
-    /* Misunderstanding wr_ptr can cause a page fault, so validate it... */
-    if (wr_ptr > dnt->mon_buf_size) {
-        IWL_ERR(trans,
-                "Write pointer DMA monitor register points to invalid data - setting to 0\n");
-        dont_reorder = true;
-    }
+  /* Misunderstanding wr_ptr can cause a page fault, so validate it... */
+  if (wr_ptr > dnt->mon_buf_size) {
+    IWL_ERR(trans, "Write pointer DMA monitor register points to invalid data - setting to 0\n");
+    dont_reorder = true;
+  }
 
-    /* We have a problem with the wr_ptr, so just return the memory as-is */
-    if (dont_reorder) { wr_ptr = 0; }
+  /* We have a problem with the wr_ptr, so just return the memory as-is */
+  if (dont_reorder) {
+    wr_ptr = 0;
+  }
 
-    if (cfg->dbgc_wrap_count_addr) {
-        wrap_cnt = iwl_read_prph(trans, cfg->dbgc_wrap_count_addr);
-    } else {
-        wrap_cnt = 1;
-    }
+  if (cfg->dbgc_wrap_count_addr) {
+    wrap_cnt = iwl_read_prph(trans, cfg->dbgc_wrap_count_addr);
+  } else {
+    wrap_cnt = 1;
+  }
 
-    if (wrap_cnt) {
-        memcpy(buffer, dnt->mon_buf_cpu_addr + wr_ptr, dnt->mon_buf_size - wr_ptr);
-        memcpy(buffer + dnt->mon_buf_size - wr_ptr, dnt->mon_buf_cpu_addr, wr_ptr);
-    } else {
-        memcpy(buffer, dnt->mon_buf_cpu_addr, wr_ptr);
-        memset(buffer + wr_ptr, 0, dnt->mon_buf_size - wr_ptr);
-    }
+  if (wrap_cnt) {
+    memcpy(buffer, dnt->mon_buf_cpu_addr + wr_ptr, dnt->mon_buf_size - wr_ptr);
+    memcpy(buffer + dnt->mon_buf_size - wr_ptr, dnt->mon_buf_cpu_addr, wr_ptr);
+  } else {
+    memcpy(buffer, dnt->mon_buf_cpu_addr, wr_ptr);
+    memset(buffer + wr_ptr, 0, dnt->mon_buf_size - wr_ptr);
+  }
 
-    return dnt->mon_buf_size;
+  return dnt->mon_buf_size;
 }
 
 static int iwl_dnt_dev_if_retrieve_marbh_monitor_data(struct iwl_dnt* dnt, struct iwl_trans* trans,
                                                       uint8_t* buffer, uint32_t buffer_size) {
-    struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
-    int buf_size_in_dwords, buf_index, i;
-    uint32_t wr_ptr, read_val;
+  struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
+  int buf_size_in_dwords, buf_index, i;
+  uint32_t wr_ptr, read_val;
 
-    /* FIXME send stop command to FW */
+  /* FIXME send stop command to FW */
 
-    wr_ptr = iwl_read_prph(trans, cfg->dbg_mon_wr_ptr_addr);
-    /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */
-    if (wr_ptr == 0x5a5a5a5a) {
-        IWL_ERR(trans, "Can't read write pointer\n");
-        return -ENODEV;
-    }
+  wr_ptr = iwl_read_prph(trans, cfg->dbg_mon_wr_ptr_addr);
+  /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */
+  if (wr_ptr == 0x5a5a5a5a) {
+    IWL_ERR(trans, "Can't read write pointer\n");
+    return -ENODEV;
+  }
 
-    read_val = iwl_read_prph(trans, cfg->dbg_mon_buff_base_addr_reg_addr);
-    if (read_val == 0x5a5a5a5a) {
-        IWL_ERR(trans, "Can't read monitor base address\n");
-        return -ENODEV;
-    }
-    dnt->mon_base_addr = read_val;
+  read_val = iwl_read_prph(trans, cfg->dbg_mon_buff_base_addr_reg_addr);
+  if (read_val == 0x5a5a5a5a) {
+    IWL_ERR(trans, "Can't read monitor base address\n");
+    return -ENODEV;
+  }
+  dnt->mon_base_addr = read_val;
 
-    read_val = iwl_read_prph(trans, cfg->dbg_mon_buff_end_addr_reg_addr);
-    if (read_val == 0x5a5a5a5a) {
-        IWL_ERR(trans, "Can't read monitor end address\n");
-        return -ENODEV;
-    }
-    dnt->mon_end_addr = read_val;
+  read_val = iwl_read_prph(trans, cfg->dbg_mon_buff_end_addr_reg_addr);
+  if (read_val == 0x5a5a5a5a) {
+    IWL_ERR(trans, "Can't read monitor end address\n");
+    return -ENODEV;
+  }
+  dnt->mon_end_addr = read_val;
 
-    wr_ptr = wr_ptr - dnt->mon_base_addr;
-    iwl_write_prph(trans, cfg->dbg_mon_dmarb_rd_ctl_addr, 0x00000001);
+  wr_ptr = wr_ptr - dnt->mon_base_addr;
+  iwl_write_prph(trans, cfg->dbg_mon_dmarb_rd_ctl_addr, 0x00000001);
 
-    /* buf size includes the end_addr as well */
-    buf_size_in_dwords = dnt->mon_end_addr - dnt->mon_base_addr + 1;
-    for (i = 0; i < buf_size_in_dwords; i++) {
-        /* reordering cyclic buffer */
-        buf_index = (i + (buf_size_in_dwords - wr_ptr)) % buf_size_in_dwords;
-        read_val = iwl_read_prph(trans, cfg->dbg_mon_dmarb_rd_data_addr);
-        memcpy(&buffer[buf_index * sizeof(uint32_t)], &read_val, sizeof(uint32_t));
-    }
-    iwl_write_prph(trans, cfg->dbg_mon_dmarb_rd_ctl_addr, 0x00000000);
+  /* buf size includes the end_addr as well */
+  buf_size_in_dwords = dnt->mon_end_addr - dnt->mon_base_addr + 1;
+  for (i = 0; i < buf_size_in_dwords; i++) {
+    /* reordering cyclic buffer */
+    buf_index = (i + (buf_size_in_dwords - wr_ptr)) % buf_size_in_dwords;
+    read_val = iwl_read_prph(trans, cfg->dbg_mon_dmarb_rd_data_addr);
+    memcpy(&buffer[buf_index * sizeof(uint32_t)], &read_val, sizeof(uint32_t));
+  }
+  iwl_write_prph(trans, cfg->dbg_mon_dmarb_rd_ctl_addr, 0x00000000);
 
-    return buf_size_in_dwords * sizeof(uint32_t);
+  return buf_size_in_dwords * sizeof(uint32_t);
 }
 
 static int iwl_dnt_dev_if_retrieve_smem_monitor_data(struct iwl_dnt* dnt, struct iwl_trans* trans,
                                                      uint8_t* buffer, uint32_t buffer_size) {
-    struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
-    uint32_t i, bytes_to_end, calc_size;
-    uint32_t base_addr, end_addr, wr_ptr_addr, wr_ptr_shift;
-    uint32_t base, end, wr_ptr, pos, chunks_num, wr_ptr_offset, wrap_cnt;
-    uint8_t* temp_buffer;
+  struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
+  uint32_t i, bytes_to_end, calc_size;
+  uint32_t base_addr, end_addr, wr_ptr_addr, wr_ptr_shift;
+  uint32_t base, end, wr_ptr, pos, chunks_num, wr_ptr_offset, wrap_cnt;
+  uint8_t* temp_buffer;
 
-    /* assuming B-step or C-step */
-    base_addr = cfg->dbg_mon_buff_base_addr_reg_addr_b_step;
-    end_addr = cfg->dbg_mon_buff_end_addr_reg_addr_b_step;
-    wr_ptr_addr = cfg->dbg_mon_wr_ptr_addr_b_step;
-    wr_ptr_shift = 2;
+  /* assuming B-step or C-step */
+  base_addr = cfg->dbg_mon_buff_base_addr_reg_addr_b_step;
+  end_addr = cfg->dbg_mon_buff_end_addr_reg_addr_b_step;
+  wr_ptr_addr = cfg->dbg_mon_wr_ptr_addr_b_step;
+  wr_ptr_shift = 2;
 
-    base = iwl_read_prph(trans, base_addr);
-    /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */
-    if (base == 0x5a5a5a5a) {
-        IWL_ERR(trans, "Can't read base addr\n");
-        return -ENODEV;
-    }
+  base = iwl_read_prph(trans, base_addr);
+  /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */
+  if (base == 0x5a5a5a5a) {
+    IWL_ERR(trans, "Can't read base addr\n");
+    return -ENODEV;
+  }
 
-    end = iwl_read_prph(trans, end_addr);
-    /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */
-    if (end == 0x5a5a5a5a) {
-        IWL_ERR(trans, "Can't read end addr\n");
-        return -ENODEV;
-    }
+  end = iwl_read_prph(trans, end_addr);
+  /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */
+  if (end == 0x5a5a5a5a) {
+    IWL_ERR(trans, "Can't read end addr\n");
+    return -ENODEV;
+  }
 
-    if (base == end) {
-        IWL_ERR(trans, "Invalid base and end values\n");
-        return -ENODEV;
-    }
+  if (base == end) {
+    IWL_ERR(trans, "Invalid base and end values\n");
+    return -ENODEV;
+  }
 
-    wr_ptr = iwl_read_prph(trans, wr_ptr_addr);
-    /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */
-    if (wr_ptr == 0x5a5a5a5a) {
-        IWL_ERR(trans, "Can't read write pointer, not re-aligning\n");
-        wr_ptr = base << 8;
-    }
+  wr_ptr = iwl_read_prph(trans, wr_ptr_addr);
+  /* iwl_read_prph returns 0x5a5a5a5a when it fails to grab nic access */
+  if (wr_ptr == 0x5a5a5a5a) {
+    IWL_ERR(trans, "Can't read write pointer, not re-aligning\n");
+    wr_ptr = base << 8;
+  }
 
-    pos = base << 8;
-    calc_size = (end - base + 1) << 8;
-    wr_ptr <<= wr_ptr_shift;
-    bytes_to_end = ((end + 1) << 8) - wr_ptr;
-    chunks_num = calc_size / DNT_CHUNK_SIZE;
-    wr_ptr_offset = wr_ptr - pos;
+  pos = base << 8;
+  calc_size = (end - base + 1) << 8;
+  wr_ptr <<= wr_ptr_shift;
+  bytes_to_end = ((end + 1) << 8) - wr_ptr;
+  chunks_num = calc_size / DNT_CHUNK_SIZE;
+  wr_ptr_offset = wr_ptr - pos;
 
-    if (wr_ptr_offset > calc_size) {
-        IWL_ERR(trans, "Invalid wr_ptr value, not re-aligning\n");
-        wr_ptr_offset = 0;
-    }
+  if (wr_ptr_offset > calc_size) {
+    IWL_ERR(trans, "Invalid wr_ptr value, not re-aligning\n");
+    wr_ptr_offset = 0;
+  }
 
-    if (calc_size > buffer_size) {
-        IWL_ERR(trans, "Invalid buffer size\n");
-        return -EINVAL;
-    }
+  if (calc_size > buffer_size) {
+    IWL_ERR(trans, "Invalid buffer size\n");
+    return -EINVAL;
+  }
 
-    temp_buffer = kzalloc(calc_size, GFP_KERNEL);
-    if (!temp_buffer) { return -ENOMEM; }
+  temp_buffer = kzalloc(calc_size, GFP_KERNEL);
+  if (!temp_buffer) {
+    return -ENOMEM;
+  }
 
-    for (i = 0; i < chunks_num; i++)
-        iwl_trans_read_mem(trans, pos + (i * DNT_CHUNK_SIZE), temp_buffer + (i * DNT_CHUNK_SIZE),
-                           DNT_CHUNK_SIZE / sizeof(uint32_t));
+  for (i = 0; i < chunks_num; i++)
+    iwl_trans_read_mem(trans, pos + (i * DNT_CHUNK_SIZE), temp_buffer + (i * DNT_CHUNK_SIZE),
+                       DNT_CHUNK_SIZE / sizeof(uint32_t));
 
-    if (calc_size % DNT_CHUNK_SIZE)
-        iwl_trans_read_mem(trans, pos + (chunks_num * DNT_CHUNK_SIZE),
-                           temp_buffer + (chunks_num * DNT_CHUNK_SIZE),
-                           (calc_size - (chunks_num * DNT_CHUNK_SIZE)) / sizeof(uint32_t));
+  if (calc_size % DNT_CHUNK_SIZE)
+    iwl_trans_read_mem(trans, pos + (chunks_num * DNT_CHUNK_SIZE),
+                       temp_buffer + (chunks_num * DNT_CHUNK_SIZE),
+                       (calc_size - (chunks_num * DNT_CHUNK_SIZE)) / sizeof(uint32_t));
 
-    if (cfg->dbgc_wrap_count_addr) {
-        wrap_cnt = iwl_read_prph(trans, cfg->dbgc_wrap_count_addr);
-    } else {
-        wrap_cnt = 1;
-    }
+  if (cfg->dbgc_wrap_count_addr) {
+    wrap_cnt = iwl_read_prph(trans, cfg->dbgc_wrap_count_addr);
+  } else {
+    wrap_cnt = 1;
+  }
 
-    if (wrap_cnt) {
-        memcpy(buffer, temp_buffer + wr_ptr_offset, bytes_to_end);
-        memcpy(buffer + bytes_to_end, temp_buffer, wr_ptr_offset);
-    } else {
-        memcpy(buffer, temp_buffer, wr_ptr_offset);
-        memset(buffer + wr_ptr_offset, 0, bytes_to_end);
-    }
+  if (wrap_cnt) {
+    memcpy(buffer, temp_buffer + wr_ptr_offset, bytes_to_end);
+    memcpy(buffer + bytes_to_end, temp_buffer, wr_ptr_offset);
+  } else {
+    memcpy(buffer, temp_buffer, wr_ptr_offset);
+    memset(buffer + wr_ptr_offset, 0, bytes_to_end);
+  }
 
-    kfree(temp_buffer);
+  kfree(temp_buffer);
 
-    return calc_size;
+  return calc_size;
 }
 
 int iwl_dnt_dev_if_configure_monitor(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    uint32_t base_addr, end_addr;
+  uint32_t base_addr, end_addr;
 
-    switch (dnt->cur_mon_type) {
+  switch (dnt->cur_mon_type) {
     case NO_MONITOR:
-        IWL_INFO(trans, "Monitor is disabled\n");
-        dnt->iwl_dnt_status &= ~IWL_DNT_STATUS_MON_CONFIGURED;
-        break;
+      IWL_INFO(trans, "Monitor is disabled\n");
+      dnt->iwl_dnt_status &= ~IWL_DNT_STATUS_MON_CONFIGURED;
+      break;
     case MARBH_ADC:
     case MARBH_DBG:
-        iwl_dnt_dev_if_configure_marbh(trans);
-        dnt->mon_buf_size = DNT_MARBH_BUF_SIZE;
-        break;
+      iwl_dnt_dev_if_configure_marbh(trans);
+      dnt->mon_buf_size = DNT_MARBH_BUF_SIZE;
+      break;
     case DMA:
-        if (!dnt->mon_buf_cpu_addr) {
-            IWL_ERR(trans, "Can't configure DMA monitor: no cpu addr\n");
-            return -ENOMEM;
-        }
-        base_addr = dnt->mon_base_addr >> 4;
-        end_addr = dnt->mon_end_addr >> 4;
-        iwl_dnt_dev_if_configure_dbgm_registers(trans, base_addr, end_addr);
-        break;
+      if (!dnt->mon_buf_cpu_addr) {
+        IWL_ERR(trans, "Can't configure DMA monitor: no cpu addr\n");
+        return -ENOMEM;
+      }
+      base_addr = dnt->mon_base_addr >> 4;
+      end_addr = dnt->mon_end_addr >> 4;
+      iwl_dnt_dev_if_configure_dbgm_registers(trans, base_addr, end_addr);
+      break;
     case MIPI:
-        iwl_dnt_dev_if_configure_mipi(trans);
-        break;
+      iwl_dnt_dev_if_configure_mipi(trans);
+      break;
     case SMEM:
-        base_addr = 0;
-        end_addr = 0;
-        iwl_dnt_dev_if_configure_dbgm_registers(trans, base_addr, end_addr);
-        dnt->mon_buf_size = DNT_SMEM_BUF_SIZE;
-        break;
+      base_addr = 0;
+      end_addr = 0;
+      iwl_dnt_dev_if_configure_dbgm_registers(trans, base_addr, end_addr);
+      dnt->mon_buf_size = DNT_SMEM_BUF_SIZE;
+      break;
     case INTERFACE:
-        base_addr = 0;
-        end_addr = 0x400;
-        iwl_dnt_dev_if_configure_dbgm_registers(trans, base_addr, end_addr);
-        break;
+      base_addr = 0;
+      end_addr = 0x400;
+      iwl_dnt_dev_if_configure_dbgm_registers(trans, base_addr, end_addr);
+      break;
     default:
-        dnt->iwl_dnt_status &= ~IWL_DNT_STATUS_MON_CONFIGURED;
-        IWL_INFO(trans, "Invalid monitor type\n");
-        return -EINVAL;
-    }
+      dnt->iwl_dnt_status &= ~IWL_DNT_STATUS_MON_CONFIGURED;
+      IWL_INFO(trans, "Invalid monitor type\n");
+      return -EINVAL;
+  }
 
-    dnt->iwl_dnt_status |= IWL_DNT_STATUS_MON_CONFIGURED;
+  dnt->iwl_dnt_status |= IWL_DNT_STATUS_MON_CONFIGURED;
 
-    return 0;
+  return 0;
 }
 
 static int iwl_dnt_dev_if_send_dbgm(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
-    struct iwl_host_cmd host_cmd = {
-        .id = cfg->dbg_conf_monitor_cmd_id,
-        .data[0] = cfg->dbg_conf_monitor_host_command.data,
-        .len[0] = cfg->dbg_conf_monitor_host_command.len,
-        .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-    };
-    int ret;
+  struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
+  struct iwl_host_cmd host_cmd = {
+      .id = cfg->dbg_conf_monitor_cmd_id,
+      .data[0] = cfg->dbg_conf_monitor_host_command.data,
+      .len[0] = cfg->dbg_conf_monitor_host_command.len,
+      .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+  };
+  int ret;
 
-    ret = iwl_trans_send_cmd(trans, &host_cmd);
-    if (ret) {
-        IWL_ERR(trans, "Failed to send monitor command\n");
-        dnt->iwl_dnt_status |= IWL_DNT_STATUS_FAILED_START_MONITOR;
-    }
+  ret = iwl_trans_send_cmd(trans, &host_cmd);
+  if (ret) {
+    IWL_ERR(trans, "Failed to send monitor command\n");
+    dnt->iwl_dnt_status |= IWL_DNT_STATUS_FAILED_START_MONITOR;
+  }
 
-    return ret;
+  return ret;
 }
 
 static int iwl_dnt_dev_if_send_ldbg(struct iwl_dnt* dnt, struct iwl_trans* trans, int cmd_index) {
-    struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
-    struct iwl_host_cmd host_cmd = {
-        .id = cfg->dbg_conf_monitor_cmd_id,
-        .data[0] = cfg->ldbg_cmd[cmd_index].data,
-        .len[0] = DNT_LDBG_CMD_SIZE,
-        .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-    };
+  struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
+  struct iwl_host_cmd host_cmd = {
+      .id = cfg->dbg_conf_monitor_cmd_id,
+      .data[0] = cfg->ldbg_cmd[cmd_index].data,
+      .len[0] = DNT_LDBG_CMD_SIZE,
+      .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+  };
 
-    return iwl_trans_send_cmd(trans, &host_cmd);
+  return iwl_trans_send_cmd(trans, &host_cmd);
 }
 
 int iwl_dnt_dev_if_start_monitor(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
-    int i, ret;
+  struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
+  int i, ret;
 
-    switch (cfg->dbgm_enable_mode) {
+  switch (cfg->dbgm_enable_mode) {
     case DEBUG:
-        return iwl_dnt_dev_if_send_dbgm(dnt, trans);
+      return iwl_dnt_dev_if_send_dbgm(dnt, trans);
     case SNIFFER:
-        ret = 0;
-        for (i = 0; i < cfg->ldbg_cmd_nums; i++) {
-            ret = iwl_dnt_dev_if_send_ldbg(dnt, trans, i);
-            if (ret) {
-                IWL_ERR(trans, "Failed to send ldbg command\n");
-                break;
-            }
+      ret = 0;
+      for (i = 0; i < cfg->ldbg_cmd_nums; i++) {
+        ret = iwl_dnt_dev_if_send_ldbg(dnt, trans, i);
+        if (ret) {
+          IWL_ERR(trans, "Failed to send ldbg command\n");
+          break;
         }
-        return ret;
+      }
+      return ret;
     default:
-        WARN_ONCE(1, "invalid option: %d\n", cfg->dbgm_enable_mode);
-        return -EINVAL;
-    }
+      WARN_ONCE(1, "invalid option: %d\n", cfg->dbgm_enable_mode);
+      return -EINVAL;
+  }
 }
 
 int iwl_dnt_dev_if_set_log_level(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
-    struct iwl_host_cmd host_cmd = {
-        .id = cfg->log_level_cmd_id,
-        .data[0] = cfg->log_level_cmd.data,
-        .len[0] = cfg->log_level_cmd.len,
-        .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-    };
-    int ret;
+  struct iwl_dbg_cfg* cfg = &trans->dbg_cfg;
+  struct iwl_host_cmd host_cmd = {
+      .id = cfg->log_level_cmd_id,
+      .data[0] = cfg->log_level_cmd.data,
+      .len[0] = cfg->log_level_cmd.len,
+      .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+  };
+  int ret;
 
-    ret = iwl_trans_send_cmd(trans, &host_cmd);
-    if (ret) { IWL_ERR(trans, "Failed to send log level cmd\n"); }
+  ret = iwl_trans_send_cmd(trans, &host_cmd);
+  if (ret) {
+    IWL_ERR(trans, "Failed to send log level cmd\n");
+  }
 
-    return ret;
+  return ret;
 }
 
 int iwl_dnt_dev_if_retrieve_monitor_data(struct iwl_dnt* dnt, struct iwl_trans* trans,
                                          uint8_t* buffer, uint32_t buffer_size) {
-    switch (dnt->cur_mon_type) {
+  switch (dnt->cur_mon_type) {
     case DMA:
-        return iwl_dnt_dev_if_retrieve_dma_monitor_data(dnt, trans, buffer, buffer_size);
+      return iwl_dnt_dev_if_retrieve_dma_monitor_data(dnt, trans, buffer, buffer_size);
     case MARBH_ADC:
     case MARBH_DBG:
-        return iwl_dnt_dev_if_retrieve_marbh_monitor_data(dnt, trans, buffer, buffer_size);
+      return iwl_dnt_dev_if_retrieve_marbh_monitor_data(dnt, trans, buffer, buffer_size);
     case SMEM:
-        return iwl_dnt_dev_if_retrieve_smem_monitor_data(dnt, trans, buffer, buffer_size);
+      return iwl_dnt_dev_if_retrieve_smem_monitor_data(dnt, trans, buffer, buffer_size);
     case INTERFACE:
     default:
-        WARN_ONCE(1, "invalid option: %d\n", dnt->cur_mon_type);
-        return -EINVAL;
-    }
+      WARN_ONCE(1, "invalid option: %d\n", dnt->cur_mon_type);
+      return -EINVAL;
+  }
 }
 
 int iwl_dnt_dev_if_read_sram(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    struct dnt_crash_data* crash = &dnt->dispatch.crash;
-    int ofs, len = 0;
+  struct dnt_crash_data* crash = &dnt->dispatch.crash;
+  int ofs, len = 0;
 
-    ofs = dnt->image->sec[IWL_UCODE_SECTION_DATA].offset;
-    len = dnt->image->sec[IWL_UCODE_SECTION_DATA].len;
+  ofs = dnt->image->sec[IWL_UCODE_SECTION_DATA].offset;
+  len = dnt->image->sec[IWL_UCODE_SECTION_DATA].len;
 
-    crash->sram = vmalloc(len);
-    if (!crash->sram) { return -ENOMEM; }
+  crash->sram = vmalloc(len);
+  if (!crash->sram) {
+    return -ENOMEM;
+  }
 
-    crash->sram_buf_size = len;
-    return iwl_trans_read_mem(trans, ofs, crash->sram, len / sizeof(uint32_t));
+  crash->sram_buf_size = len;
+  return iwl_trans_read_mem(trans, ofs, crash->sram, len / sizeof(uint32_t));
 }
 IWL_EXPORT_SYMBOL(iwl_dnt_dev_if_read_sram);
 
 int iwl_dnt_dev_if_read_rx(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    struct dnt_crash_data* crash = &dnt->dispatch.crash;
-    int i, reg_val;
-    uint32_t buf32_size, offset = 0;
-    uint32_t* buf32;
-    unsigned long flags;
+  struct dnt_crash_data* crash = &dnt->dispatch.crash;
+  int i, reg_val;
+  uint32_t buf32_size, offset = 0;
+  uint32_t* buf32;
+  unsigned long flags;
 
-    /* reading buffer size */
-    reg_val = iwl_trans_read_prph(trans, RXF_SIZE_ADDR);
-    crash->rx_buf_size = (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+  /* reading buffer size */
+  reg_val = iwl_trans_read_prph(trans, RXF_SIZE_ADDR);
+  crash->rx_buf_size = (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
 
-    /* the register holds the value divided by 128 */
-    crash->rx_buf_size = crash->rx_buf_size << 7;
+  /* the register holds the value divided by 128 */
+  crash->rx_buf_size = crash->rx_buf_size << 7;
 
-    if (!crash->rx_buf_size) { return -ENOMEM; }
+  if (!crash->rx_buf_size) {
+    return -ENOMEM;
+  }
 
-    buf32_size = crash->rx_buf_size / sizeof(uint32_t);
+  buf32_size = crash->rx_buf_size / sizeof(uint32_t);
 
-    crash->rx = vmalloc(crash->rx_buf_size);
-    if (!crash->rx) { return -ENOMEM; }
+  crash->rx = vmalloc(crash->rx_buf_size);
+  if (!crash->rx) {
+    return -ENOMEM;
+  }
 
-    buf32 = (uint32_t*)crash->rx;
+  buf32 = (uint32_t*)crash->rx;
 
-    if (!iwl_trans_grab_nic_access(trans, &flags)) {
-        vfree(crash->rx);
-        return -EBUSY;
-    }
-    for (i = 0; i < buf32_size; i++) {
-        iwl_trans_write_prph(trans, RXF_LD_FENCE_OFFSET_ADDR, offset);
-        offset += sizeof(uint32_t);
-        buf32[i] = iwl_trans_read_prph(trans, RXF_FIFO_RD_FENCE_ADDR);
-    }
-    iwl_trans_release_nic_access(trans, &flags);
+  if (!iwl_trans_grab_nic_access(trans, &flags)) {
+    vfree(crash->rx);
+    return -EBUSY;
+  }
+  for (i = 0; i < buf32_size; i++) {
+    iwl_trans_write_prph(trans, RXF_LD_FENCE_OFFSET_ADDR, offset);
+    offset += sizeof(uint32_t);
+    buf32[i] = iwl_trans_read_prph(trans, RXF_FIFO_RD_FENCE_ADDR);
+  }
+  iwl_trans_release_nic_access(trans, &flags);
 
-    return 0;
+  return 0;
 }
 IWL_EXPORT_SYMBOL(iwl_dnt_dev_if_read_rx);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dev-if.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dev-if.h
index b94031c..a5a8a48 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dev-if.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dev-if.h
@@ -49,8 +49,8 @@
 
 /* marbh access types */
 enum {
-    ACCESS_TYPE_DIRECT = 0,
-    ACCESS_TYPE_INDIRECT,
+  ACCESS_TYPE_DIRECT = 0,
+  ACCESS_TYPE_INDIRECT,
 };
 
 /**
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dispatch.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dispatch.c
index 225d19a..742963a 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dispatch.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-dispatch.c
@@ -33,6 +33,8 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include "iwl-dnt-dispatch.h"
+
 #include <linux/export.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
@@ -41,56 +43,57 @@
 #include "iwl-debug.h"
 #include "iwl-dnt-cfg.h"
 #include "iwl-dnt-dev-if.h"
-#include "iwl-dnt-dispatch.h"
 #include "iwl-io.h"
 #include "iwl-tm-gnl.h"
 #include "iwl-tm-infc.h"
 
 struct dnt_collect_db* iwl_dnt_dispatch_allocate_collect_db(struct iwl_dnt* dnt) {
-    struct dnt_collect_db* db;
+  struct dnt_collect_db* db;
 
-    db = kzalloc(sizeof(struct dnt_collect_db), GFP_KERNEL);
-    if (!db) {
-        dnt->iwl_dnt_status |= IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DB;
-        return NULL;
-    }
+  db = kzalloc(sizeof(struct dnt_collect_db), GFP_KERNEL);
+  if (!db) {
+    dnt->iwl_dnt_status |= IWL_DNT_STATUS_FAILED_TO_ALLOCATE_DB;
+    return NULL;
+  }
 
-    spin_lock_init(&db->db_lock);
-    init_waitqueue_head(&db->waitq);
+  spin_lock_init(&db->db_lock);
+  init_waitqueue_head(&db->waitq);
 
-    return db;
+  return db;
 }
 
 static void iwl_dnt_dispatch_free_collect_db(struct dnt_collect_db* db) {
-    int i;
+  int i;
 
-    for (i = 0; i < ARRAY_SIZE(db->collect_array); i++) {
-        kfree(db->collect_array[i].data);
-    }
+  for (i = 0; i < ARRAY_SIZE(db->collect_array); i++) {
+    kfree(db->collect_array[i].data);
+  }
 
-    kfree(db);
+  kfree(db);
 }
 
 static int iwl_dnt_dispatch_get_list_data(struct dnt_collect_db* db, uint8_t* buffer,
                                           uint32_t buffer_size) {
-    struct dnt_collect_entry* cur_entry;
-    int data_offset = 0;
+  struct dnt_collect_entry* cur_entry;
+  int data_offset = 0;
 
-    spin_lock_bh(&db->db_lock);
-    while (db->read_ptr != db->wr_ptr) {
-        cur_entry = &db->collect_array[db->read_ptr];
-        if (data_offset + cur_entry->size > buffer_size) { break; }
-        memcpy(buffer + data_offset, cur_entry->data, cur_entry->size);
-        data_offset += cur_entry->size;
-        cur_entry->size = 0;
-        kfree(cur_entry->data);
-        cur_entry->data = NULL;
-
-        /* increment read_ptr */
-        db->read_ptr = (db->read_ptr + 1) % IWL_DNT_ARRAY_SIZE;
+  spin_lock_bh(&db->db_lock);
+  while (db->read_ptr != db->wr_ptr) {
+    cur_entry = &db->collect_array[db->read_ptr];
+    if (data_offset + cur_entry->size > buffer_size) {
+      break;
     }
-    spin_unlock_bh(&db->db_lock);
-    return data_offset;
+    memcpy(buffer + data_offset, cur_entry->data, cur_entry->size);
+    data_offset += cur_entry->size;
+    cur_entry->size = 0;
+    kfree(cur_entry->data);
+    cur_entry->data = NULL;
+
+    /* increment read_ptr */
+    db->read_ptr = (db->read_ptr + 1) % IWL_DNT_ARRAY_SIZE;
+  }
+  spin_unlock_bh(&db->db_lock);
+  return data_offset;
 }
 
 /**
@@ -99,7 +102,7 @@
  */
 static void iwl_dnt_dispatch_push_ftrace_handler(struct iwl_dnt* dnt, uint8_t* buffer,
                                                  uint32_t buffer_size) {
-    trace_iwlwifi_dev_dnt_data(dnt->dev, buffer, buffer_size);
+  trace_iwlwifi_dev_dnt_data(dnt->dev, buffer, buffer_size);
 }
 
 /**
@@ -109,306 +112,340 @@
 static int iwl_dnt_dispatch_push_netlink_handler(struct iwl_dnt* dnt, struct iwl_trans* trans,
                                                  unsigned int cmd_id, uint8_t* buffer,
                                                  uint32_t buffer_size) {
-    return iwl_tm_gnl_send_msg(trans, cmd_id, false, buffer, buffer_size, GFP_ATOMIC);
+  return iwl_tm_gnl_send_msg(trans, cmd_id, false, buffer, buffer_size, GFP_ATOMIC);
 }
 
 static int iwl_dnt_dispatch_pull_monitor(struct iwl_dnt* dnt, struct iwl_trans* trans,
                                          uint8_t* buffer, uint32_t buffer_size) {
-    int ret = 0;
+  int ret = 0;
 
-    if (dnt->cur_mon_type == INTERFACE) {
-        ret = iwl_dnt_dispatch_get_list_data(dnt->dispatch.dbgm_db, buffer, buffer_size);
-    } else {
-        ret = iwl_dnt_dev_if_retrieve_monitor_data(dnt, trans, buffer, buffer_size);
-    }
-    return ret;
+  if (dnt->cur_mon_type == INTERFACE) {
+    ret = iwl_dnt_dispatch_get_list_data(dnt->dispatch.dbgm_db, buffer, buffer_size);
+  } else {
+    ret = iwl_dnt_dev_if_retrieve_monitor_data(dnt, trans, buffer, buffer_size);
+  }
+  return ret;
 }
 
 int iwl_dnt_dispatch_pull(struct iwl_trans* trans, uint8_t* buffer, uint32_t buffer_size,
                           uint32_t input) {
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
-    int ret = 0;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
+  int ret = 0;
 
-    if (!trans->op_mode) { return -EINVAL; }
+  if (!trans->op_mode) {
+    return -EINVAL;
+  }
 
-    switch (input) {
+  switch (input) {
     case MONITOR:
-        ret = iwl_dnt_dispatch_pull_monitor(dnt, trans, buffer, buffer_size);
-        break;
+      ret = iwl_dnt_dispatch_pull_monitor(dnt, trans, buffer, buffer_size);
+      break;
     case UCODE_MESSAGES:
-        ret = iwl_dnt_dispatch_get_list_data(dnt->dispatch.um_db, buffer, buffer_size);
-        break;
+      ret = iwl_dnt_dispatch_get_list_data(dnt->dispatch.um_db, buffer, buffer_size);
+      break;
     default:
-        WARN_ONCE(1, "Invalid input mode %d\n", input);
-        return -EINVAL;
-    }
+      WARN_ONCE(1, "Invalid input mode %d\n", input);
+      return -EINVAL;
+  }
 
-    return ret;
+  return ret;
 }
 
 static int iwl_dnt_dispatch_collect_data(struct iwl_dnt* dnt, struct dnt_collect_db* db,
                                          struct iwl_rx_packet* pkt) {
-    struct dnt_collect_entry* wr_entry;
-    uint32_t data_size;
+  struct dnt_collect_entry* wr_entry;
+  uint32_t data_size;
 
-    data_size = GET_RX_PACKET_SIZE(pkt);
-    spin_lock(&db->db_lock);
-    wr_entry = &db->collect_array[db->wr_ptr];
+  data_size = GET_RX_PACKET_SIZE(pkt);
+  spin_lock(&db->db_lock);
+  wr_entry = &db->collect_array[db->wr_ptr];
 
-    /*
-     * cheking if wr_ptr is already in use
-     * if so it means that we complete a cycle in the array
-     * hence replacing data in wr_ptr
-     */
-    if (WARN_ON_ONCE(wr_entry->data)) {
-        spin_unlock(&db->db_lock);
-        return -ENOMEM;
-    }
-
-    wr_entry->size = data_size;
-    wr_entry->data = kzalloc(data_size, GFP_ATOMIC);
-    if (!wr_entry->data) {
-        spin_unlock(&db->db_lock);
-        return -ENOMEM;
-    }
-
-    memcpy(wr_entry->data, pkt->data, wr_entry->size);
-    db->wr_ptr = (db->wr_ptr + 1) % IWL_DNT_ARRAY_SIZE;
-
-    if (db->wr_ptr == db->read_ptr) {
-        /*
-         * since we overrun oldest data we should update read
-         * ptr to the next oldest data
-         */
-        struct dnt_collect_entry* rd_entry = &db->collect_array[db->read_ptr];
-
-        kfree(rd_entry->data);
-        rd_entry->data = NULL;
-        db->read_ptr = (db->read_ptr + 1) % IWL_DNT_ARRAY_SIZE;
-    }
-    wake_up_interruptible(&db->waitq);
+  /*
+   * cheking if wr_ptr is already in use
+   * if so it means that we complete a cycle in the array
+   * hence replacing data in wr_ptr
+   */
+  if (WARN_ON_ONCE(wr_entry->data)) {
     spin_unlock(&db->db_lock);
+    return -ENOMEM;
+  }
 
-    return 0;
+  wr_entry->size = data_size;
+  wr_entry->data = kzalloc(data_size, GFP_ATOMIC);
+  if (!wr_entry->data) {
+    spin_unlock(&db->db_lock);
+    return -ENOMEM;
+  }
+
+  memcpy(wr_entry->data, pkt->data, wr_entry->size);
+  db->wr_ptr = (db->wr_ptr + 1) % IWL_DNT_ARRAY_SIZE;
+
+  if (db->wr_ptr == db->read_ptr) {
+    /*
+     * since we overrun oldest data we should update read
+     * ptr to the next oldest data
+     */
+    struct dnt_collect_entry* rd_entry = &db->collect_array[db->read_ptr];
+
+    kfree(rd_entry->data);
+    rd_entry->data = NULL;
+    db->read_ptr = (db->read_ptr + 1) % IWL_DNT_ARRAY_SIZE;
+  }
+  wake_up_interruptible(&db->waitq);
+  spin_unlock(&db->db_lock);
+
+  return 0;
 }
 
 int iwl_dnt_dispatch_collect_ucode_message(struct iwl_trans* trans, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_dnt_dispatch* dispatch;
-    struct dnt_collect_db* db;
-    int data_size;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_dnt_dispatch* dispatch;
+  struct dnt_collect_db* db;
+  int data_size;
 
-    dispatch = &dnt->dispatch;
-    db = dispatch->um_db;
+  dispatch = &dnt->dispatch;
+  db = dispatch->um_db;
 
-    if (dispatch->ucode_msgs_in_mode != COLLECT) { return 0; }
-
-    if (dispatch->ucode_msgs_out_mode != PUSH) {
-        return iwl_dnt_dispatch_collect_data(dnt, db, pkt);
-    }
-
-    data_size = GET_RX_PACKET_SIZE(pkt);
-    if (dispatch->ucode_msgs_output == FTRACE) {
-        iwl_dnt_dispatch_push_ftrace_handler(dnt, pkt->data, data_size);
-    } else if (dispatch->ucode_msgs_output == NETLINK)
-        iwl_dnt_dispatch_push_netlink_handler(dnt, trans, IWL_TM_USER_CMD_NOTIF_UCODE_MSGS_DATA,
-                                              pkt->data, data_size);
-
+  if (dispatch->ucode_msgs_in_mode != COLLECT) {
     return 0;
+  }
+
+  if (dispatch->ucode_msgs_out_mode != PUSH) {
+    return iwl_dnt_dispatch_collect_data(dnt, db, pkt);
+  }
+
+  data_size = GET_RX_PACKET_SIZE(pkt);
+  if (dispatch->ucode_msgs_output == FTRACE) {
+    iwl_dnt_dispatch_push_ftrace_handler(dnt, pkt->data, data_size);
+  } else if (dispatch->ucode_msgs_output == NETLINK)
+    iwl_dnt_dispatch_push_netlink_handler(dnt, trans, IWL_TM_USER_CMD_NOTIF_UCODE_MSGS_DATA,
+                                          pkt->data, data_size);
+
+  return 0;
 }
 IWL_EXPORT_SYMBOL(iwl_dnt_dispatch_collect_ucode_message);
 
 void iwl_dnt_dispatch_free(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    struct iwl_dnt_dispatch* dispatch = &dnt->dispatch;
-    struct dnt_crash_data* crash = &dispatch->crash;
+  struct iwl_dnt_dispatch* dispatch = &dnt->dispatch;
+  struct dnt_crash_data* crash = &dispatch->crash;
 
-    if (dispatch->dbgm_db) { iwl_dnt_dispatch_free_collect_db(dispatch->dbgm_db); }
-    if (dispatch->um_db) { iwl_dnt_dispatch_free_collect_db(dispatch->um_db); }
+  if (dispatch->dbgm_db) {
+    iwl_dnt_dispatch_free_collect_db(dispatch->dbgm_db);
+  }
+  if (dispatch->um_db) {
+    iwl_dnt_dispatch_free_collect_db(dispatch->um_db);
+  }
 
-    if (dnt->mon_buf_cpu_addr) {
-        dma_free_coherent(trans->dev, dnt->mon_buf_size, dnt->mon_buf_cpu_addr, dnt->mon_dma_addr);
-    }
+  if (dnt->mon_buf_cpu_addr) {
+    dma_free_coherent(trans->dev, dnt->mon_buf_size, dnt->mon_buf_cpu_addr, dnt->mon_dma_addr);
+  }
 
-    if (crash->sram) { vfree(crash->sram); }
-    if (crash->rx) { vfree(crash->rx); }
-    if (crash->dbgm) { vfree(crash->dbgm); }
+  if (crash->sram) {
+    vfree(crash->sram);
+  }
+  if (crash->rx) {
+    vfree(crash->rx);
+  }
+  if (crash->dbgm) {
+    vfree(crash->dbgm);
+  }
 
-    memset(dispatch, 0, sizeof(*dispatch));
+  memset(dispatch, 0, sizeof(*dispatch));
 }
 
 static void iwl_dnt_dispatch_retrieve_crash_sram(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    int ret;
-    struct dnt_crash_data* crash = &dnt->dispatch.crash;
+  int ret;
+  struct dnt_crash_data* crash = &dnt->dispatch.crash;
 
-    if (crash->sram) {
-        crash->sram_buf_size = 0;
-        vfree(crash->sram);
-    }
+  if (crash->sram) {
+    crash->sram_buf_size = 0;
+    vfree(crash->sram);
+  }
 
-    ret = iwl_dnt_dev_if_read_sram(dnt, trans);
-    if (ret) {
-        IWL_ERR(dnt, "Failed to read sram\n");
-        return;
-    }
+  ret = iwl_dnt_dev_if_read_sram(dnt, trans);
+  if (ret) {
+    IWL_ERR(dnt, "Failed to read sram\n");
+    return;
+  }
 }
 
 static void iwl_dnt_dispatch_retrieve_crash_rx(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    int ret;
-    struct dnt_crash_data* crash = &dnt->dispatch.crash;
+  int ret;
+  struct dnt_crash_data* crash = &dnt->dispatch.crash;
 
-    if (crash->rx) {
-        crash->rx_buf_size = 0;
-        vfree(crash->rx);
-    }
+  if (crash->rx) {
+    crash->rx_buf_size = 0;
+    vfree(crash->rx);
+  }
 
-    ret = iwl_dnt_dev_if_read_rx(dnt, trans);
-    if (ret) {
-        IWL_ERR(dnt, "Failed to read rx\n");
-        return;
-    }
+  ret = iwl_dnt_dev_if_read_rx(dnt, trans);
+  if (ret) {
+    IWL_ERR(dnt, "Failed to read rx\n");
+    return;
+  }
 }
 
 static void iwl_dnt_dispatch_retrieve_crash_dbgm(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    int ret;
-    uint32_t buf_size;
-    struct dnt_crash_data* crash = &dnt->dispatch.crash;
+  int ret;
+  uint32_t buf_size;
+  struct dnt_crash_data* crash = &dnt->dispatch.crash;
 
-    if (crash->dbgm) {
-        crash->dbgm_buf_size = 0;
-        vfree(crash->dbgm);
-    }
+  if (crash->dbgm) {
+    crash->dbgm_buf_size = 0;
+    vfree(crash->dbgm);
+  }
 
-    switch (dnt->cur_mon_type) {
+  switch (dnt->cur_mon_type) {
     case DMA:
-        buf_size = dnt->mon_buf_size;
-        break;
+      buf_size = dnt->mon_buf_size;
+      break;
     case MARBH_ADC:
     case MARBH_DBG:
-        buf_size = 0x2000 * sizeof(uint32_t);
-        break;
+      buf_size = 0x2000 * sizeof(uint32_t);
+      break;
     case INTERFACE:
-        if (dnt->dispatch.mon_output == NETLINK) { return; }
-        buf_size = ARRAY_SIZE(dnt->dispatch.dbgm_db->collect_array);
-        break;
-    default:
+      if (dnt->dispatch.mon_output == NETLINK) {
         return;
-    }
-    crash->dbgm = vmalloc(buf_size);
-    if (!crash->dbgm) { return; }
+      }
+      buf_size = ARRAY_SIZE(dnt->dispatch.dbgm_db->collect_array);
+      break;
+    default:
+      return;
+  }
+  crash->dbgm = vmalloc(buf_size);
+  if (!crash->dbgm) {
+    return;
+  }
 
-    if (dnt->cur_mon_type == INTERFACE) {
-        iwl_dnt_dispatch_get_list_data(dnt->dispatch.dbgm_db, crash->dbgm, buf_size);
+  if (dnt->cur_mon_type == INTERFACE) {
+    iwl_dnt_dispatch_get_list_data(dnt->dispatch.dbgm_db, crash->dbgm, buf_size);
 
-    } else {
-        ret = iwl_dnt_dev_if_retrieve_monitor_data(dnt, trans, crash->dbgm, buf_size);
-        if (ret != buf_size) {
-            IWL_ERR(dnt, "Failed to read DBGM\n");
-            vfree(crash->dbgm);
-            return;
-        }
+  } else {
+    ret = iwl_dnt_dev_if_retrieve_monitor_data(dnt, trans, crash->dbgm, buf_size);
+    if (ret != buf_size) {
+      IWL_ERR(dnt, "Failed to read DBGM\n");
+      vfree(crash->dbgm);
+      return;
     }
-    crash->dbgm_buf_size = buf_size;
+  }
+  crash->dbgm_buf_size = buf_size;
 }
 
 static void iwl_dnt_dispatch_create_tlv(struct iwl_fw_error_dump_data* tlv, uint32_t type,
                                         uint32_t len, uint8_t* value) {
-    tlv->type = cpu_to_le32(type);
-    tlv->len = cpu_to_le32(len);
-    memcpy(tlv->data, value, len);
+  tlv->type = cpu_to_le32(type);
+  tlv->len = cpu_to_le32(len);
+  memcpy(tlv->data, value, len);
 }
 
 static uint32_t iwl_dnt_dispatch_create_crash_tlv(struct iwl_trans* trans, uint8_t** tlv_buf) {
-    struct iwl_fw_error_dump_file* dump_file;
-    struct iwl_fw_error_dump_data* cur_tlv;
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
-    struct dnt_crash_data* crash;
-    uint32_t total_size;
+  struct iwl_fw_error_dump_file* dump_file;
+  struct iwl_fw_error_dump_data* cur_tlv;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
+  struct dnt_crash_data* crash;
+  uint32_t total_size;
 
-    if (!dnt) {
-        IWL_DEBUG_INFO(trans, "DnT is not intialized\n");
-        return 0;
-    }
+  if (!dnt) {
+    IWL_DEBUG_INFO(trans, "DnT is not intialized\n");
+    return 0;
+  }
 
-    crash = &dnt->dispatch.crash;
+  crash = &dnt->dispatch.crash;
 
-    /*
-     * data will be represented as TLV - each buffer is represented as
-     * follow:
-     * uint32_t - type (SRAM/DBGM/RX/TX/PERIPHERY)
-     * uint32_t - length
-     * uint8_t[] - data
-     */
-    total_size = sizeof(*dump_file) + crash->sram_buf_size + crash->dbgm_buf_size +
-                 crash->rx_buf_size + crash->tx_buf_size + crash->periph_buf_size +
-                 sizeof(uint32_t) * 10;
-    dump_file = vmalloc(total_size);
-    if (!dump_file) { return 0; }
+  /*
+   * data will be represented as TLV - each buffer is represented as
+   * follow:
+   * uint32_t - type (SRAM/DBGM/RX/TX/PERIPHERY)
+   * uint32_t - length
+   * uint8_t[] - data
+   */
+  total_size = sizeof(*dump_file) + crash->sram_buf_size + crash->dbgm_buf_size +
+               crash->rx_buf_size + crash->tx_buf_size + crash->periph_buf_size +
+               sizeof(uint32_t) * 10;
+  dump_file = vmalloc(total_size);
+  if (!dump_file) {
+    return 0;
+  }
 
-    dump_file->file_len = cpu_to_le32(total_size);
-    dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
-    *tlv_buf = (uint8_t*)dump_file;
+  dump_file->file_len = cpu_to_le32(total_size);
+  dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+  *tlv_buf = (uint8_t*)dump_file;
 
-    cur_tlv = (void*)dump_file->data;
-    if (crash->sram_buf_size) {
-        /* TODO: Convert to the new SMEM format */
-        iwl_dnt_dispatch_create_tlv(cur_tlv, 0, crash->sram_buf_size, crash->sram);
-        cur_tlv = iwl_fw_error_next_data(cur_tlv);
-    }
-    if (crash->dbgm_buf_size) {
-        iwl_dnt_dispatch_create_tlv(cur_tlv, IWL_FW_ERROR_DUMP_FW_MONITOR, crash->dbgm_buf_size,
-                                    crash->dbgm);
-        cur_tlv = iwl_fw_error_next_data(cur_tlv);
-    }
-    if (crash->tx_buf_size) {
-        iwl_dnt_dispatch_create_tlv(cur_tlv, IWL_FW_ERROR_DUMP_TXF, crash->tx_buf_size, crash->tx);
-        cur_tlv = iwl_fw_error_next_data(cur_tlv);
-    }
-    if (crash->rx_buf_size) {
-        iwl_dnt_dispatch_create_tlv(cur_tlv, IWL_FW_ERROR_DUMP_RXF, crash->rx_buf_size, crash->rx);
-        cur_tlv = iwl_fw_error_next_data(cur_tlv);
-    }
+  cur_tlv = (void*)dump_file->data;
+  if (crash->sram_buf_size) {
+    /* TODO: Convert to the new SMEM format */
+    iwl_dnt_dispatch_create_tlv(cur_tlv, 0, crash->sram_buf_size, crash->sram);
+    cur_tlv = iwl_fw_error_next_data(cur_tlv);
+  }
+  if (crash->dbgm_buf_size) {
+    iwl_dnt_dispatch_create_tlv(cur_tlv, IWL_FW_ERROR_DUMP_FW_MONITOR, crash->dbgm_buf_size,
+                                crash->dbgm);
+    cur_tlv = iwl_fw_error_next_data(cur_tlv);
+  }
+  if (crash->tx_buf_size) {
+    iwl_dnt_dispatch_create_tlv(cur_tlv, IWL_FW_ERROR_DUMP_TXF, crash->tx_buf_size, crash->tx);
+    cur_tlv = iwl_fw_error_next_data(cur_tlv);
+  }
+  if (crash->rx_buf_size) {
+    iwl_dnt_dispatch_create_tlv(cur_tlv, IWL_FW_ERROR_DUMP_RXF, crash->rx_buf_size, crash->rx);
+    cur_tlv = iwl_fw_error_next_data(cur_tlv);
+  }
 
-    return total_size;
+  return total_size;
 }
 
 static void iwl_dnt_dispatch_handle_crash_netlink(struct iwl_dnt* dnt, struct iwl_trans* trans) {
-    int ret;
-    uint8_t* tlv_buf;
-    uint32_t tlv_buf_size;
-    struct iwl_tm_crash_data* crash_notif;
+  int ret;
+  uint8_t* tlv_buf;
+  uint32_t tlv_buf_size;
+  struct iwl_tm_crash_data* crash_notif;
 
-    tlv_buf_size = iwl_dnt_dispatch_create_crash_tlv(trans, &tlv_buf);
-    if (!tlv_buf_size) { return; }
+  tlv_buf_size = iwl_dnt_dispatch_create_crash_tlv(trans, &tlv_buf);
+  if (!tlv_buf_size) {
+    return;
+  }
 
-    crash_notif = vmalloc(sizeof(struct iwl_tm_crash_data) + tlv_buf_size);
-    if (!crash_notif) { return; }
+  crash_notif = vmalloc(sizeof(struct iwl_tm_crash_data) + tlv_buf_size);
+  if (!crash_notif) {
+    return;
+  }
 
-    crash_notif->size = tlv_buf_size;
-    memcpy(crash_notif->data, tlv_buf, tlv_buf_size);
-    ret = iwl_tm_gnl_send_msg(trans, IWL_TM_USER_CMD_NOTIF_CRASH_DATA, false, crash_notif,
-                              sizeof(struct iwl_tm_crash_data) + tlv_buf_size, GFP_ATOMIC);
+  crash_notif->size = tlv_buf_size;
+  memcpy(crash_notif->data, tlv_buf, tlv_buf_size);
+  ret = iwl_tm_gnl_send_msg(trans, IWL_TM_USER_CMD_NOTIF_CRASH_DATA, false, crash_notif,
+                            sizeof(struct iwl_tm_crash_data) + tlv_buf_size, GFP_ATOMIC);
 
-    if (ret) { IWL_ERR(dnt, "Failed to send crash data notification\n"); }
+  if (ret) {
+    IWL_ERR(dnt, "Failed to send crash data notification\n");
+  }
 
-    vfree(crash_notif);
-    vfree(tlv_buf);
+  vfree(crash_notif);
+  vfree(tlv_buf);
 }
 
 void iwl_dnt_dispatch_handle_nic_err(struct iwl_trans* trans) {
-    struct iwl_dnt* dnt = trans->tmdev->dnt;
-    struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
+  struct iwl_dnt* dnt = trans->tmdev->dnt;
+  struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
 
-    trans->tmdev->dnt->iwl_dnt_status |= IWL_DNT_STATUS_FW_CRASH;
+  trans->tmdev->dnt->iwl_dnt_status |= IWL_DNT_STATUS_FW_CRASH;
 
-    if (!dbg_cfg->dbg_flags) { return; }
+  if (!dbg_cfg->dbg_flags) {
+    return;
+  }
 
-    if (dbg_cfg->dbg_flags & SRAM) { iwl_dnt_dispatch_retrieve_crash_sram(dnt, trans); }
-    if (dbg_cfg->dbg_flags & RX_FIFO) { iwl_dnt_dispatch_retrieve_crash_rx(dnt, trans); }
-    if (dbg_cfg->dbg_flags & DBGM) { iwl_dnt_dispatch_retrieve_crash_dbgm(dnt, trans); }
+  if (dbg_cfg->dbg_flags & SRAM) {
+    iwl_dnt_dispatch_retrieve_crash_sram(dnt, trans);
+  }
+  if (dbg_cfg->dbg_flags & RX_FIFO) {
+    iwl_dnt_dispatch_retrieve_crash_rx(dnt, trans);
+  }
+  if (dbg_cfg->dbg_flags & DBGM) {
+    iwl_dnt_dispatch_retrieve_crash_dbgm(dnt, trans);
+  }
 
-    if (dnt->dispatch.crash_out_mode & NETLINK) {
-        iwl_dnt_dispatch_handle_crash_netlink(dnt, trans);
-    }
+  if (dnt->dispatch.crash_out_mode & NETLINK) {
+    iwl_dnt_dispatch_handle_crash_netlink(dnt, trans);
+  }
 }
 IWL_EXPORT_SYMBOL(iwl_dnt_dispatch_handle_nic_err);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-drv.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-drv.c
index 6ce4fc4..4680f55 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-drv.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-drv.c
@@ -148,8 +148,7 @@
  * folder in the sysfs */
 static struct kobject* iwl_kobj;
 
-static struct iwl_op_mode* _iwl_op_mode_start(struct iwl_drv* drv,
-                                              struct iwlwifi_opmode_table* op);
+static struct iwl_op_mode* _iwl_op_mode_start(struct iwl_drv* drv, struct iwlwifi_opmode_table* op);
 static void _iwl_op_mode_stop(struct iwl_drv* drv);
 
 /*
@@ -162,8 +161,7 @@
 
   /* Going over all drivers, looking for the one that holds dev */
   for (i = 0; (i < ARRAY_SIZE(iwlwifi_opmode_table)); i++) {
-    list_for_each_entry(drv_itr, &iwlwifi_opmode_table[i].drv,
-                        list) if (drv_itr->dev == dev) {
+    list_for_each_entry(drv_itr, &iwlwifi_opmode_table[i].drv, list) if (drv_itr->dev == dev) {
       return drv_itr;
     }
   }
@@ -186,8 +184,7 @@
 
   /* Going over all drivers, looking for the list that holds it */
   for (i = 0; (i < ARRAY_SIZE(iwlwifi_opmode_table)); i++) {
-    list_for_each_entry(drv_itr, &iwlwifi_opmode_table[i].drv,
-                        list) if (drv_itr->dev == drv->dev) {
+    list_for_each_entry(drv_itr, &iwlwifi_opmode_table[i].drv, list) if (drv_itr->dev == drv->dev) {
       return i;
     }
   }
@@ -226,8 +223,7 @@
  * is supported by the device. Stops the current op mode
  * and starts the desired mode.
  */
-zx_status_t iwl_drv_switch_op_mode(struct iwl_drv* drv,
-                                   const char* new_op_name) {
+zx_status_t iwl_drv_switch_op_mode(struct iwl_drv* drv, const char* new_op_name) {
   struct iwlwifi_opmode_table* new_op = NULL;
   int idx;
 
@@ -288,8 +284,7 @@
 /*
  * iwl_drv_sysfs_show - Returns device information to user
  */
-static ssize_t iwl_drv_sysfs_show(struct device* dev,
-                                  struct device_attribute* attr, char* buf) {
+static ssize_t iwl_drv_sysfs_show(struct device* dev, struct device_attribute* attr, char* buf) {
   struct iwl_drv* drv;
   int op_mode_idx = 0, itr;
   int ret = 0;
@@ -305,8 +300,7 @@
 
   /* Constructing output */
   for (itr = 0; itr < ARRAY_SIZE(iwlwifi_opmode_table); itr++) {
-    ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%-s\n",
-                     (itr == op_mode_idx) ? "* " : "  ",
+    ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%-s\n", (itr == op_mode_idx) ? "* " : "  ",
                      iwlwifi_opmode_table[itr].name);
   }
 
@@ -416,8 +410,7 @@
   }
 }
 
-static void iwl_req_fw_callback(struct firmware* ucode_raw,
-                                struct iwl_drv* drv);
+static void iwl_req_fw_callback(struct firmware* ucode_raw, struct iwl_drv* drv);
 
 static zx_status_t iwl_request_firmware(struct iwl_drv* drv, bool first) {
   const struct iwl_cfg* cfg = drv->trans->cfg;
@@ -429,8 +422,7 @@
   if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
       (CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP &&
        CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) {
-    IWL_ERR(drv, "Only HW steps B and C are currently supported (0x%0x)\n",
-            drv->trans->hw_rev);
+    IWL_ERR(drv, "Only HW steps B and C are currently supported (0x%0x)\n", drv->trans->hw_rev);
     return ZX_ERR_INVALID_ARGS;
   }
 
@@ -461,10 +453,8 @@
     if (cfg->ucode_api_min == cfg->ucode_api_max) {
       IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre, cfg->ucode_api_max);
     } else {
-      IWL_ERR(drv, "minimum version required: %s%d\n", cfg->fw_name_pre,
-              cfg->ucode_api_min);
-      IWL_ERR(drv, "maximum version supported: %s%d\n", cfg->fw_name_pre,
-              cfg->ucode_api_max);
+      IWL_ERR(drv, "minimum version required: %s%d\n", cfg->fw_name_pre, cfg->ucode_api_min);
+      IWL_ERR(drv, "maximum version supported: %s%d\n", cfg->fw_name_pre, cfg->ucode_api_max);
     }
 
     IWL_ERR(drv,
@@ -474,13 +464,13 @@
     return ZX_ERR_NOT_FOUND;
   }
 
-  snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s/%s%s.ucode",
-           FIRMWARE_DIR, cfg->fw_name_pre, tag);
+  snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s/%s%s.ucode", FIRMWARE_DIR,
+           cfg->fw_name_pre, tag);
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
   if (drv->trans->dbg_cfg.fw_file_pre) {
-    snprintf(fw_name_temp, sizeof(fw_name_temp), "%s%s",
-             drv->trans->dbg_cfg.fw_file_pre, drv->firmware_name);
+    snprintf(fw_name_temp, sizeof(fw_name_temp), "%s%s", drv->trans->dbg_cfg.fw_file_pre,
+             drv->firmware_name);
     strncpy(drv->firmware_name, fw_name_temp, sizeof(drv->firmware_name));
   }
 #endif /* CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES */
@@ -496,8 +486,7 @@
   // to test the zx_vmar_map() below.
   status = ZX_OK;
 #else
-  status = load_firmware(drv->zxdev, drv->firmware_name, &firmware.vmo,
-                         &firmware.size);
+  status = load_firmware(drv->zxdev, drv->firmware_name, &firmware.vmo, &firmware.size);
   if (status != ZX_OK) {
     IWL_ERR(drv, "Failed to load firmware: %s\n", zx_status_get_string(status));
     return status;
@@ -505,11 +494,10 @@
 #endif
 
   uintptr_t vaddr;
-  status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ, 0, firmware.vmo, 0,
-                       firmware.size, &vaddr);
+  status =
+      zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ, 0, firmware.vmo, 0, firmware.size, &vaddr);
   if (status != ZX_OK) {
-    IWL_ERR(drv, "Failed to map firmware VMO: %s",
-            zx_status_get_string(status));
+    IWL_ERR(drv, "Failed to map firmware VMO: %s", zx_status_get_string(status));
     zx_handle_close(firmware.vmo);
     return status;
   }
@@ -570,13 +558,12 @@
  * These functions are just to extract uCode section data from the pieces
  * structure.
  */
-static struct fw_sec* get_sec(struct iwl_firmware_pieces* pieces,
-                              enum iwl_ucode_type type, int sec) {
+static struct fw_sec* get_sec(struct iwl_firmware_pieces* pieces, enum iwl_ucode_type type,
+                              int sec) {
   return &pieces->img[type].sec[sec];
 }
 
-static void alloc_sec_data(struct iwl_firmware_pieces* pieces,
-                           enum iwl_ucode_type type, int sec) {
+static void alloc_sec_data(struct iwl_firmware_pieces* pieces, enum iwl_ucode_type type, int sec) {
   struct fw_img_parsing* img = &pieces->img[type];
   struct fw_sec* sec_memory;
   int size = sec + 1;
@@ -595,34 +582,32 @@
   img->sec_counter = size;
 }
 
-static void set_sec_data(struct iwl_firmware_pieces* pieces,
-                         enum iwl_ucode_type type, int sec, const void* data) {
+static void set_sec_data(struct iwl_firmware_pieces* pieces, enum iwl_ucode_type type, int sec,
+                         const void* data) {
   alloc_sec_data(pieces, type, sec);
 
   pieces->img[type].sec[sec].data = data;
 }
 
-static void set_sec_size(struct iwl_firmware_pieces* pieces,
-                         enum iwl_ucode_type type, int sec, size_t size) {
+static void set_sec_size(struct iwl_firmware_pieces* pieces, enum iwl_ucode_type type, int sec,
+                         size_t size) {
   alloc_sec_data(pieces, type, sec);
 
   pieces->img[type].sec[sec].size = size;
 }
 
-static size_t get_sec_size(struct iwl_firmware_pieces* pieces,
-                           enum iwl_ucode_type type, int sec) {
+static size_t get_sec_size(struct iwl_firmware_pieces* pieces, enum iwl_ucode_type type, int sec) {
   return pieces->img[type].sec[sec].size;
 }
 
-static void set_sec_offset(struct iwl_firmware_pieces* pieces,
-                           enum iwl_ucode_type type, int sec, uint32_t offset) {
+static void set_sec_offset(struct iwl_firmware_pieces* pieces, enum iwl_ucode_type type, int sec,
+                           uint32_t offset) {
   alloc_sec_data(pieces, type, sec);
 
   pieces->img[type].sec[sec].offset = offset;
 }
 
-static zx_status_t iwl_store_cscheme(struct iwl_fw* fw, const uint8_t* data,
-                                     const uint32_t len) {
+static zx_status_t iwl_store_cscheme(struct iwl_fw* fw, const uint8_t* data, const uint32_t len) {
   int i, j;
   struct iwl_fw_cscheme_list* l = (struct iwl_fw_cscheme_list*)data;
   struct iwl_fw_cipher_scheme* fwcs;
@@ -648,9 +633,8 @@
 /*
  * Gets uCode section from tlv.
  */
-static int iwl_store_ucode_sec(struct iwl_firmware_pieces* pieces,
-                               const void* data, enum iwl_ucode_type type,
-                               int size) {
+static int iwl_store_ucode_sec(struct iwl_firmware_pieces* pieces, const void* data,
+                               enum iwl_ucode_type type, int size) {
   struct fw_img_parsing* img;
   struct fw_sec* sec;
   struct fw_sec_parsing* sec_parse;
@@ -682,18 +666,15 @@
   return 0;
 }
 
-static zx_status_t iwl_set_default_calib(struct iwl_drv* drv,
-                                         const uint8_t* data) {
+static zx_status_t iwl_set_default_calib(struct iwl_drv* drv, const uint8_t* data) {
   struct iwl_tlv_calib_data* def_calib = (struct iwl_tlv_calib_data*)data;
   uint32_t ucode_type = le32_to_cpu(def_calib->ucode_type);
   if (ucode_type >= IWL_UCODE_TYPE_MAX) {
     IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n", ucode_type);
     return ZX_ERR_INVALID_ARGS;
   }
-  drv->fw.default_calib[ucode_type].flow_trigger =
-      def_calib->calib.flow_trigger;
-  drv->fw.default_calib[ucode_type].event_trigger =
-      def_calib->calib.event_trigger;
+  drv->fw.default_calib[ucode_type].flow_trigger = def_calib->calib.flow_trigger;
+  drv->fw.default_calib[ucode_type].event_trigger = def_calib->calib.event_trigger;
 
   return 0;
 }
@@ -706,8 +687,7 @@
   int i;
 
   if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) {
-    IWL_WARN(drv, "api flags index %d larger than supported by driver\n",
-             api_index);
+    IWL_WARN(drv, "api flags index %d larger than supported by driver\n", api_index);
     return;
   }
 
@@ -726,8 +706,7 @@
   int i;
 
   if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) {
-    IWL_WARN(drv, "capa flags index %d larger than supported by driver\n",
-             api_index);
+    IWL_WARN(drv, "capa flags index %d larger than supported by driver\n", api_index);
     return;
   }
 
@@ -738,9 +717,8 @@
   }
 }
 
-static zx_status_t iwl_parse_v1_v2_firmware(
-    struct iwl_drv* drv, const struct firmware* ucode_raw,
-    struct iwl_firmware_pieces* pieces) {
+static zx_status_t iwl_parse_v1_v2_firmware(struct iwl_drv* drv, const struct firmware* ucode_raw,
+                                            struct iwl_firmware_pieces* pieces) {
   struct iwl_ucode_header* ucode = (void*)ucode_raw->data;
   uint32_t api_ver, hdr_size, build;
   char buildstr[25];
@@ -795,44 +773,36 @@
   }
 
   snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u.%u%s",
-           IWL_UCODE_MAJOR(drv->fw.ucode_ver),
-           IWL_UCODE_MINOR(drv->fw.ucode_ver), IWL_UCODE_API(drv->fw.ucode_ver),
-           IWL_UCODE_SERIAL(drv->fw.ucode_ver), buildstr);
+           IWL_UCODE_MAJOR(drv->fw.ucode_ver), IWL_UCODE_MINOR(drv->fw.ucode_ver),
+           IWL_UCODE_API(drv->fw.ucode_ver), IWL_UCODE_SERIAL(drv->fw.ucode_ver), buildstr);
 
   /* Verify size of file vs. image size info in file's header */
 
-  if (ucode_raw->size !=
-      hdr_size +
-          get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) +
-          get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) +
-          get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) +
-          get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) {
-    IWL_ERR(drv, "uCode file size %d does not match expected size\n",
-            (int)ucode_raw->size);
+  if (ucode_raw->size != hdr_size +
+                             get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) +
+                             get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) +
+                             get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) +
+                             get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) {
+    IWL_ERR(drv, "uCode file size %d does not match expected size\n", (int)ucode_raw->size);
     return ZX_ERR_INVALID_ARGS;
   }
 
   set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src);
   src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST);
-  set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
-                 IWLAGN_RTC_INST_LOWER_BOUND);
+  set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND);
   set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src);
   src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA);
-  set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
-                 IWLAGN_RTC_DATA_LOWER_BOUND);
+  set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND);
   set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src);
   src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST);
-  set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
-                 IWLAGN_RTC_INST_LOWER_BOUND);
+  set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND);
   set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src);
   src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA);
-  set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
-                 IWLAGN_RTC_DATA_LOWER_BOUND);
+  set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND);
   return 0;
 }
 
-static zx_status_t iwl_parse_tlv_firmware(struct iwl_drv* drv,
-                                          const struct firmware* ucode_raw,
+static zx_status_t iwl_parse_tlv_firmware(struct iwl_drv* drv, const struct firmware* ucode_raw,
                                           struct iwl_firmware_pieces* pieces,
                                           struct iwl_ucode_capabilities* capa,
                                           bool* usniffer_images) {
@@ -870,8 +840,7 @@
   }
 
   drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
-  memcpy(drv->fw.human_readable, ucode->human_readable,
-         sizeof(drv->fw.human_readable));
+  memcpy(drv->fw.human_readable, ucode->human_readable, sizeof(drv->fw.human_readable));
   build = le32_to_cpu(ucode->build);
 
   if (build) {
@@ -881,9 +850,8 @@
   }
 
   snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u.%u%s",
-           IWL_UCODE_MAJOR(drv->fw.ucode_ver),
-           IWL_UCODE_MINOR(drv->fw.ucode_ver), IWL_UCODE_API(drv->fw.ucode_ver),
-           IWL_UCODE_SERIAL(drv->fw.ucode_ver), buildstr);
+           IWL_UCODE_MAJOR(drv->fw.ucode_ver), IWL_UCODE_MINOR(drv->fw.ucode_ver),
+           IWL_UCODE_API(drv->fw.ucode_ver), IWL_UCODE_SERIAL(drv->fw.ucode_ver), buildstr);
 
   data = ucode->data;
 
@@ -916,32 +884,26 @@
 
     switch (tlv_type) {
       case IWL_UCODE_TLV_INST:
-        set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
-                     tlv_data);
-        set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
-                     tlv_len);
+        set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, tlv_data);
+        set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, tlv_len);
         set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
                        IWLAGN_RTC_INST_LOWER_BOUND);
         break;
       case IWL_UCODE_TLV_DATA:
-        set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
-                     tlv_data);
-        set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
-                     tlv_len);
+        set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, tlv_data);
+        set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, tlv_len);
         set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
                        IWLAGN_RTC_DATA_LOWER_BOUND);
         break;
       case IWL_UCODE_TLV_INIT:
         set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, tlv_data);
         set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, tlv_len);
-        set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
-                       IWLAGN_RTC_INST_LOWER_BOUND);
+        set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND);
         break;
       case IWL_UCODE_TLV_INIT_DATA:
         set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, tlv_data);
         set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, tlv_len);
-        set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
-                       IWLAGN_RTC_DATA_LOWER_BOUND);
+        set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND);
         break;
       case IWL_UCODE_TLV_BOOT:
         IWL_ERR(drv, "Found unexpected BOOT ucode\n");
@@ -1031,15 +993,13 @@
         drv->fw.enhance_sensitivity_table = true;
         break;
       case IWL_UCODE_TLV_WOWLAN_INST:
-        set_sec_data(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST,
-                     tlv_data);
+        set_sec_data(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, tlv_data);
         set_sec_size(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, tlv_len);
         set_sec_offset(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST,
                        IWLAGN_RTC_INST_LOWER_BOUND);
         break;
       case IWL_UCODE_TLV_WOWLAN_DATA:
-        set_sec_data(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA,
-                     tlv_data);
+        set_sec_data(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, tlv_data);
         set_sec_size(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, tlv_len);
         set_sec_offset(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA,
                        IWLAGN_RTC_DATA_LOWER_BOUND);
@@ -1077,26 +1037,23 @@
         drv->fw.phy_config = le32_to_cpup((__le32*)tlv_data);
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
         if (drv->trans->dbg_cfg.valid_ants & ~ANT_ABC) {
-          IWL_ERR(drv, "Invalid value for antennas: 0x%x\n",
-                  drv->trans->dbg_cfg.valid_ants);
+          IWL_ERR(drv, "Invalid value for antennas: 0x%x\n", drv->trans->dbg_cfg.valid_ants);
         }
         /* Make sure value stays in range */
         drv->trans->dbg_cfg.valid_ants &= ANT_ABC;
         if (drv->trans->dbg_cfg.valid_ants) {
           uint32_t phy_config = ~(FW_PHY_CFG_TX_CHAIN | FW_PHY_CFG_RX_CHAIN);
 
-          phy_config |=
-              (drv->trans->dbg_cfg.valid_ants << FW_PHY_CFG_TX_CHAIN_POS);
-          phy_config |=
-              (drv->trans->dbg_cfg.valid_ants << FW_PHY_CFG_RX_CHAIN_POS);
+          phy_config |= (drv->trans->dbg_cfg.valid_ants << FW_PHY_CFG_TX_CHAIN_POS);
+          phy_config |= (drv->trans->dbg_cfg.valid_ants << FW_PHY_CFG_RX_CHAIN_POS);
 
           drv->fw.phy_config &= phy_config;
         }
 #endif
-        drv->fw.valid_tx_ant = (drv->fw.phy_config & FW_PHY_CFG_TX_CHAIN) >>
-                               FW_PHY_CFG_TX_CHAIN_POS;
-        drv->fw.valid_rx_ant = (drv->fw.phy_config & FW_PHY_CFG_RX_CHAIN) >>
-                               FW_PHY_CFG_RX_CHAIN_POS;
+        drv->fw.valid_tx_ant =
+            (drv->fw.phy_config & FW_PHY_CFG_TX_CHAIN) >> FW_PHY_CFG_TX_CHAIN_POS;
+        drv->fw.valid_rx_ant =
+            (drv->fw.phy_config & FW_PHY_CFG_RX_CHAIN) >> FW_PHY_CFG_RX_CHAIN_POS;
         break;
       case IWL_UCODE_TLV_SECURE_SEC_RT:
         iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, tlv_len);
@@ -1150,11 +1107,11 @@
         local_comp = le32_to_cpup(ptr);
 
         if (strncmp((const char*)drv->fw.human_readable, "stream:", 7))
-          snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version),
-                   "%u.%08x.%hhu", major, minor, local_comp);
+          snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%08x.%hhu", major, minor,
+                   local_comp);
         else
-          snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%hhu",
-                   major, minor, local_comp);
+          snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%hhu", major, minor,
+                   local_comp);
         break;
       }
       case IWL_UCODE_TLV_FW_DBG_DEST: {
@@ -1168,8 +1125,7 @@
         } else if (*pieces->dbg_dest_ver == 0) {
           dest_v1 = (void*)tlv_data;
         } else {
-          IWL_ERR(drv, "The version is %d, and it is invalid\n",
-                  *pieces->dbg_dest_ver);
+          IWL_ERR(drv, "The version is %d, and it is invalid\n", *pieces->dbg_dest_ver);
           break;
         }
 
@@ -1197,13 +1153,11 @@
           mon_mode = dest->monitor_mode;
         }
 
-        IWL_INFO(drv, "Found debug destination: %s\n",
-                 get_fw_dbg_mode_string(mon_mode));
+        IWL_INFO(drv, "Found debug destination: %s\n", get_fw_dbg_mode_string(mon_mode));
 
-        drv->fw.dbg.n_dest_reg =
-            (dest_v1)
-                ? tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv_v1, reg_ops)
-                : tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv, reg_ops);
+        drv->fw.dbg.n_dest_reg = (dest_v1)
+                                     ? tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv_v1, reg_ops)
+                                     : tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv, reg_ops);
 
         drv->fw.dbg.n_dest_reg /= sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]);
 
@@ -1213,8 +1167,7 @@
         struct iwl_fw_dbg_conf_tlv* conf = (void*)tlv_data;
 
         if (!pieces->dbg_dest_tlv_init) {
-          IWL_ERR(drv, "Ignore dbg config %d - no destination configured\n",
-                  conf->id);
+          IWL_ERR(drv, "Ignore dbg config %d - no destination configured\n", conf->id);
           break;
         }
 
@@ -1269,8 +1222,7 @@
       }
       case IWL_UCODE_TLV_SEC_RT_USNIFFER:
         *usniffer_images = true;
-        iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR_USNIFFER,
-                            tlv_len);
+        iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR_USNIFFER, tlv_len);
         break;
       case IWL_UCODE_TLV_PAGING:
         if (tlv_len != sizeof(uint32_t)) {
@@ -1278,12 +1230,10 @@
         }
         paging_mem_size = le32_to_cpup((__le32*)tlv_data);
 
-        IWL_DEBUG_FW(drv, "Paging: paging enabled (size = %u bytes)\n",
-                     paging_mem_size);
+        IWL_DEBUG_FW(drv, "Paging: paging enabled (size = %u bytes)\n", paging_mem_size);
 
         if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
-          IWL_ERR(drv,
-                  "Paging: driver supports up to %u bytes for paging image\n",
+          IWL_ERR(drv, "Paging: driver supports up to %u bytes for paging image\n",
                   MAX_PAGING_IMAGE_SIZE);
           return ZX_ERR_INVALID_ARGS;
         }
@@ -1309,8 +1259,7 @@
           goto invalid_tlv_len;
         }
 
-        IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
-                       dbg_mem->data_type);
+        IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", dbg_mem->data_type);
 
         size = sizeof(*pieces->dbg_mem_tlv) * (pieces->n_mem_tlv + 1);
         n = realloc(pieces->dbg_mem_tlv, size);
@@ -1397,8 +1346,7 @@
   return ZX_ERR_INVALID_ARGS;
 }
 
-static int iwl_alloc_ucode(struct iwl_drv* drv,
-                           struct iwl_firmware_pieces* pieces,
+static int iwl_alloc_ucode(struct iwl_drv* drv, struct iwl_firmware_pieces* pieces,
                            enum iwl_ucode_type type) {
   int i;
   struct fw_desc* sec;
@@ -1418,44 +1366,37 @@
   return 0;
 }
 
-static int validate_sec_sizes(struct iwl_drv* drv,
-                              struct iwl_firmware_pieces* pieces,
+static int validate_sec_sizes(struct iwl_drv* drv, struct iwl_firmware_pieces* pieces,
                               const struct iwl_cfg* cfg) {
-  IWL_DEBUG_INFO(
-      drv, "f/w package hdr runtime inst size = %zd\n",
-      get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST));
-  IWL_DEBUG_INFO(
-      drv, "f/w package hdr runtime data size = %zd\n",
-      get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA));
+  IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n",
+                 get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST));
+  IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %zd\n",
+                 get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA));
   IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %zd\n",
                  get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
   IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %zd\n",
                  get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
 
   /* Verify that uCode images will fit in card's SRAM. */
-  if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
-      cfg->max_inst_size) {
+  if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) > cfg->max_inst_size) {
     IWL_ERR(drv, "uCode instr len %zd too large to fit in\n",
             get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST));
     return -1;
   }
 
-  if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
-      cfg->max_data_size) {
+  if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) > cfg->max_data_size) {
     IWL_ERR(drv, "uCode data len %zd too large to fit in\n",
             get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA));
     return -1;
   }
 
-  if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
-      cfg->max_inst_size) {
+  if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) > cfg->max_inst_size) {
     IWL_ERR(drv, "uCode init instr len %zd too large to fit in\n",
             get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
     return -1;
   }
 
-  if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
-      cfg->max_data_size) {
+  if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) > cfg->max_data_size) {
     IWL_ERR(drv, "uCode init data len %zd too large to fit in\n",
             get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA));
     return -1;
@@ -1510,8 +1451,7 @@
  * If loaded successfully, copies the firmware into buffers
  * for the card to fetch (via DMA).
  */
-static void iwl_req_fw_callback(struct firmware* ucode_raw,
-                                struct iwl_drv* drv) {
+static void iwl_req_fw_callback(struct firmware* ucode_raw, struct iwl_drv* drv) {
   struct iwl_fw* fw = &drv->fw;
   struct iwl_ucode_header* ucode;
   struct iwlwifi_opmode_table* op;
@@ -1531,8 +1471,7 @@
 #endif
 
   fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
-  fw->ucode_capa.standard_phy_calibration_size =
-      IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+  fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
   fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
   /* dump all fw memory areas by default except d3 debug data */
   fw->dbg.dump_mask = 0xfffdffff;
@@ -1546,8 +1485,8 @@
     goto try_again;
   }
 
-  IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
-                 drv->firmware_name, ucode_raw->size);
+  IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", drv->firmware_name,
+                 ucode_raw->size);
 
   /* Make sure that we got at least the API version number */
   if (ucode_raw->size < 4) {
@@ -1561,8 +1500,7 @@
   if (ucode->ver) {
     err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
   } else {
-    err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces, &fw->ucode_capa,
-                                 &usniffer_images);
+    err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces, &fw->ucode_capa, &usniffer_images);
   }
 
   if (err) {
@@ -1571,11 +1509,10 @@
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
   if (!ucode->ver && drv->trans->dbg_cfg.fw_dbg_conf) {
-    load_fw_dbg_err = request_firmware(
-        &fw_dbg_config, drv->trans->dbg_cfg.fw_dbg_conf, drv->trans->dev);
+    load_fw_dbg_err =
+        request_firmware(&fw_dbg_config, drv->trans->dbg_cfg.fw_dbg_conf, drv->trans->dev);
     if (!load_fw_dbg_err) {
-      err = iwl_parse_tlv_firmware(drv, fw_dbg_config, pieces, &fw->ucode_capa,
-                                   &usniffer_images);
+      err = iwl_parse_tlv_firmware(drv, fw_dbg_config, pieces, &fw->ucode_capa, &usniffer_images);
       if (err) {
         IWL_ERR(drv, "Failed to configure FW DBG data!\n");
       }
@@ -1606,8 +1543,7 @@
    * In mvm uCode there is no difference between data and instructions
    * sections.
    */
-  if (fw->type == IWL_FW_DVM &&
-      validate_sec_sizes(drv, pieces, drv->trans->cfg)) {
+  if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->trans->cfg)) {
     goto try_again;
   }
 
@@ -1623,9 +1559,8 @@
     }
 
   if (pieces->dbg_dest_tlv_init) {
-    size_t dbg_dest_size =
-        sizeof(*drv->fw.dbg.dest_tlv) +
-        sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * drv->fw.dbg.n_dest_reg;
+    size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) +
+                           sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * drv->fw.dbg.n_dest_reg;
 
     drv->fw.dbg.dest_tlv = malloc(dbg_dest_size);
 
@@ -1662,8 +1597,7 @@
 
   for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) {
     if (pieces->dbg_conf_tlv[i]) {
-      drv->fw.dbg.conf_tlv[i] =
-          kmemdup(pieces->dbg_conf_tlv[i], pieces->dbg_conf_tlv_len[i]);
+      drv->fw.dbg.conf_tlv[i] = kmemdup(pieces->dbg_conf_tlv[i], pieces->dbg_conf_tlv_len[i]);
       if (!pieces->dbg_conf_tlv_len[i]) {
         goto out_free_fw;
       }
@@ -1672,24 +1606,17 @@
 
   memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz));
 
-  trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] =
-      sizeof(struct iwl_fw_dbg_trigger_missed_bcon);
+  trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] = sizeof(struct iwl_fw_dbg_trigger_missed_bcon);
   trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0;
-  trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] =
-      sizeof(struct iwl_fw_dbg_trigger_cmd);
+  trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] = sizeof(struct iwl_fw_dbg_trigger_cmd);
   trigger_tlv_sz[FW_DBG_TRIGGER_MLME] = sizeof(struct iwl_fw_dbg_trigger_mlme);
-  trigger_tlv_sz[FW_DBG_TRIGGER_STATS] =
-      sizeof(struct iwl_fw_dbg_trigger_stats);
-  trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] =
-      sizeof(struct iwl_fw_dbg_trigger_low_rssi);
-  trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] =
-      sizeof(struct iwl_fw_dbg_trigger_txq_timer);
-  trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
-      sizeof(struct iwl_fw_dbg_trigger_time_event);
+  trigger_tlv_sz[FW_DBG_TRIGGER_STATS] = sizeof(struct iwl_fw_dbg_trigger_stats);
+  trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] = sizeof(struct iwl_fw_dbg_trigger_low_rssi);
+  trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] = sizeof(struct iwl_fw_dbg_trigger_txq_timer);
+  trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] = sizeof(struct iwl_fw_dbg_trigger_time_event);
   trigger_tlv_sz[FW_DBG_TRIGGER_BA] = sizeof(struct iwl_fw_dbg_trigger_ba);
 #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS
-  trigger_tlv_sz[FW_DBG_TRIGGER_TX_LATENCY] =
-      sizeof(struct iwl_fw_dbg_trigger_tx_latency);
+  trigger_tlv_sz[FW_DBG_TRIGGER_TX_LATENCY] = sizeof(struct iwl_fw_dbg_trigger_tx_latency);
 #endif /* CPTCFG_MAC80211_LATENCY_MEASUREMENTS */
   trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] = sizeof(struct iwl_fw_dbg_trigger_tdls);
 
@@ -1702,9 +1629,8 @@
        * We'd better be noisy to be sure he knows what's
        * going on.
        */
-      if (WARN_ON(
-              pieces->dbg_trigger_tlv_len[i] <
-              (trigger_tlv_sz[i] + sizeof(struct iwl_fw_dbg_trigger_tlv)))) {
+      if (WARN_ON(pieces->dbg_trigger_tlv_len[i] <
+                  (trigger_tlv_sz[i] + sizeof(struct iwl_fw_dbg_trigger_tlv)))) {
         goto out_free_fw;
       }
       drv->fw.dbg.trigger_tlv_len[i] = pieces->dbg_trigger_tlv_len[i];
@@ -1746,10 +1672,8 @@
    * figure out the offset of chain noise reset and gain commands
    * base on the size of standard phy calibration commands table size
    */
-  if (fw->ucode_capa.standard_phy_calibration_size >
-      IWL_MAX_PHY_CALIBRATE_TBL_SIZE) {
-    fw->ucode_capa.standard_phy_calibration_size =
-        IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+  if (fw->ucode_capa.standard_phy_calibration_size > IWL_MAX_PHY_CALIBRATE_TBL_SIZE) {
+    fw->ucode_capa.standard_phy_calibration_size = IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
   }
 
   /* We have our copies now, allow OS release its copies */
@@ -1792,8 +1716,7 @@
     op = &iwlwifi_opmode_table[TRANS_TEST_OP_MODE];
   }
 #endif
-  IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", drv->fw.fw_version,
-           op->name);
+  IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", drv->fw.fw_version, op->name);
 
   /* add this device to the list of devices using this op_mode */
   list_add_tail(&drv->list, &op->drv);
@@ -1985,8 +1908,7 @@
     /* the rest are 0 by default */
 };
 
-zx_status_t iwl_opmode_register(const char* name,
-                                const struct iwl_op_mode_ops* ops) {
+zx_status_t iwl_opmode_register(const char* name, const struct iwl_op_mode_ops* ops) {
   size_t i;
   struct iwl_drv* drv;
   struct iwlwifi_opmode_table* op;
@@ -1999,7 +1921,7 @@
     }
     op->ops = ops;
     /* TODO: need to handle exceptional case */
-    list_for_every_entry(&op->drv, drv, struct iwl_drv, list) {
+    list_for_every_entry (&op->drv, drv, struct iwl_drv, list) {
       drv->op_mode = _iwl_op_mode_start(drv, op);
     }
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-parse.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-parse.c
index 1ae2038..41b0db1 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-parse.c
@@ -32,9 +32,11 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
 #include "iwl-eeprom-parse.h"
+
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+
 #include "iwl-drv.h"
 #include "iwl-modparams.h"
 
@@ -77,9 +79,9 @@
 
 /* calibration */
 struct iwl_eeprom_calib_hdr {
-    uint8_t version;
-    uint8_t pa_type;
-    __le16 voltage;
+  uint8_t version;
+  uint8_t pa_type;
+  __le16 voltage;
 } __packed;
 
 #define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
@@ -91,11 +93,11 @@
 
 /* SKU Capabilities (actual values from EEPROM definition) */
 enum eeprom_sku_bits {
-    EEPROM_SKU_CAP_BAND_24GHZ = BIT(4),
-    EEPROM_SKU_CAP_BAND_52GHZ = BIT(5),
-    EEPROM_SKU_CAP_11N_ENABLE = BIT(6),
-    EEPROM_SKU_CAP_AMT_ENABLE = BIT(7),
-    EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8)
+  EEPROM_SKU_CAP_BAND_24GHZ = BIT(4),
+  EEPROM_SKU_CAP_BAND_52GHZ = BIT(5),
+  EEPROM_SKU_CAP_11N_ENABLE = BIT(6),
+  EEPROM_SKU_CAP_AMT_ENABLE = BIT(7),
+  EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8)
 };
 
 /* radio config bits (actual values from EEPROM definition) */
@@ -135,10 +137,9 @@
 static const uint8_t iwl_eeprom_band_7[] = {/* 5.2 ht40 channel */
                                             36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157};
 
-#define IWL_NUM_CHANNELS                                             \
-    (ARRAY_SIZE(iwl_eeprom_band_1) + ARRAY_SIZE(iwl_eeprom_band_2) + \
-     ARRAY_SIZE(iwl_eeprom_band_3) + ARRAY_SIZE(iwl_eeprom_band_4) + \
-     ARRAY_SIZE(iwl_eeprom_band_5))
+#define IWL_NUM_CHANNELS                                                                           \
+  (ARRAY_SIZE(iwl_eeprom_band_1) + ARRAY_SIZE(iwl_eeprom_band_2) + ARRAY_SIZE(iwl_eeprom_band_3) + \
+   ARRAY_SIZE(iwl_eeprom_band_4) + ARRAY_SIZE(iwl_eeprom_band_5))
 
 /* rate data (static) */
 static struct ieee80211_rate iwl_cfg80211_rates[] = {
@@ -214,69 +215,77 @@
 /* EEPROM reading functions */
 
 static uint16_t iwl_eeprom_query16(const uint8_t* eeprom, size_t eeprom_size, int offset) {
-    if (WARN_ON(offset + sizeof(uint16_t) > eeprom_size)) { return 0; }
-    return le16_to_cpup((__le16*)(eeprom + offset));
+  if (WARN_ON(offset + sizeof(uint16_t) > eeprom_size)) {
+    return 0;
+  }
+  return le16_to_cpup((__le16*)(eeprom + offset));
 }
 
 static uint32_t eeprom_indirect_address(const uint8_t* eeprom, size_t eeprom_size,
                                         uint32_t address) {
-    uint16_t offset = 0;
+  uint16_t offset = 0;
 
-    if ((address & INDIRECT_ADDRESS) == 0) { return address; }
+  if ((address & INDIRECT_ADDRESS) == 0) {
+    return address;
+  }
 
-    switch (address & INDIRECT_TYPE_MSK) {
+  switch (address & INDIRECT_TYPE_MSK) {
     case INDIRECT_HOST:
-        offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_HOST);
-        break;
+      offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_HOST);
+      break;
     case INDIRECT_GENERAL:
-        offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_GENERAL);
-        break;
+      offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_GENERAL);
+      break;
     case INDIRECT_REGULATORY:
-        offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_REGULATORY);
-        break;
+      offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_REGULATORY);
+      break;
     case INDIRECT_TXP_LIMIT:
-        offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_TXP_LIMIT);
-        break;
+      offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_TXP_LIMIT);
+      break;
     case INDIRECT_TXP_LIMIT_SIZE:
-        offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_TXP_LIMIT_SIZE);
-        break;
+      offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_TXP_LIMIT_SIZE);
+      break;
     case INDIRECT_CALIBRATION:
-        offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_CALIBRATION);
-        break;
+      offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_CALIBRATION);
+      break;
     case INDIRECT_PROCESS_ADJST:
-        offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_PROCESS_ADJST);
-        break;
+      offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_PROCESS_ADJST);
+      break;
     case INDIRECT_OTHERS:
-        offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_OTHERS);
-        break;
+      offset = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_LINK_OTHERS);
+      break;
     default:
-        WARN_ON(1);
-        break;
-    }
+      WARN_ON(1);
+      break;
+  }
 
-    /* translate the offset from words to byte */
-    return (address & ADDRESS_MSK) + (offset << 1);
+  /* translate the offset from words to byte */
+  return (address & ADDRESS_MSK) + (offset << 1);
 }
 
 static const uint8_t* iwl_eeprom_query_addr(const uint8_t* eeprom, size_t eeprom_size,
                                             uint32_t offset) {
-    uint32_t address = eeprom_indirect_address(eeprom, eeprom_size, offset);
+  uint32_t address = eeprom_indirect_address(eeprom, eeprom_size, offset);
 
-    if (WARN_ON(address >= eeprom_size)) { return NULL; }
+  if (WARN_ON(address >= eeprom_size)) {
+    return NULL;
+  }
 
-    return &eeprom[address];
+  return &eeprom[address];
 }
 
 static int iwl_eeprom_read_calib(const uint8_t* eeprom, size_t eeprom_size,
                                  struct iwl_nvm_data* data) {
-    struct iwl_eeprom_calib_hdr* hdr;
+  struct iwl_eeprom_calib_hdr* hdr;
 
-    hdr = (void*)iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_CALIB_ALL);
-    if (!hdr) { return -ENODATA; }
-    data->calib_version = hdr->version;
-    data->calib_voltage = hdr->voltage;
+  hdr = (void*)iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_CALIB_ALL);
+  if (!hdr) {
+    return -ENODATA;
+  }
+  data->calib_version = hdr->version;
+  data->calib_voltage = hdr->voltage;
 
-    return 0;
+  return 0;
 }
 
 /**
@@ -289,12 +298,12 @@
  * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate
  */
 enum iwl_eeprom_channel_flags {
-    EEPROM_CHANNEL_VALID = BIT(0),
-    EEPROM_CHANNEL_IBSS = BIT(1),
-    EEPROM_CHANNEL_ACTIVE = BIT(3),
-    EEPROM_CHANNEL_RADAR = BIT(4),
-    EEPROM_CHANNEL_WIDE = BIT(5),
-    EEPROM_CHANNEL_DFS = BIT(7),
+  EEPROM_CHANNEL_VALID = BIT(0),
+  EEPROM_CHANNEL_IBSS = BIT(1),
+  EEPROM_CHANNEL_ACTIVE = BIT(3),
+  EEPROM_CHANNEL_RADAR = BIT(4),
+  EEPROM_CHANNEL_WIDE = BIT(5),
+  EEPROM_CHANNEL_DFS = BIT(7),
 };
 
 /**
@@ -303,19 +312,19 @@
  * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm
  */
 struct iwl_eeprom_channel {
-    uint8_t flags;
-    int8_t max_power_avg;
+  uint8_t flags;
+  int8_t max_power_avg;
 } __packed;
 
 enum iwl_eeprom_enhanced_txpwr_flags {
-    IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
-    IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
-    IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
-    IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
-    IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
-    IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
-    IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
-    IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
+  IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
+  IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
+  IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
+  IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
+  IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
+  IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
+  IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
+  IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
 };
 
 /**
@@ -333,36 +342,44 @@
  * in an EEPROM image.
  */
 struct iwl_eeprom_enhanced_txpwr {
-    uint8_t flags;
-    uint8_t channel;
-    int8_t chain_a_max;
-    int8_t chain_b_max;
-    int8_t chain_c_max;
-    uint8_t delta_20_in_40;
-    int8_t mimo2_max;
-    int8_t mimo3_max;
+  uint8_t flags;
+  uint8_t channel;
+  int8_t chain_a_max;
+  int8_t chain_b_max;
+  int8_t chain_c_max;
+  uint8_t delta_20_in_40;
+  int8_t mimo2_max;
+  int8_t mimo3_max;
 } __packed;
 
 static int8_t iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data* data,
                                          struct iwl_eeprom_enhanced_txpwr* txp) {
-    int8_t result = 0; /* (.5 dBm) */
+  int8_t result = 0; /* (.5 dBm) */
 
-    /* Take the highest tx power from any valid chains */
-    if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result) { result = txp->chain_a_max; }
+  /* Take the highest tx power from any valid chains */
+  if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result) {
+    result = txp->chain_a_max;
+  }
 
-    if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result) { result = txp->chain_b_max; }
+  if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result) {
+    result = txp->chain_b_max;
+  }
 
-    if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result) { result = txp->chain_c_max; }
+  if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result) {
+    result = txp->chain_c_max;
+  }
 
-    if ((data->valid_tx_ant == ANT_AB || data->valid_tx_ant == ANT_BC ||
-         data->valid_tx_ant == ANT_AC) &&
-        txp->mimo2_max > result) {
-        result = txp->mimo2_max;
-    }
+  if ((data->valid_tx_ant == ANT_AB || data->valid_tx_ant == ANT_BC ||
+       data->valid_tx_ant == ANT_AC) &&
+      txp->mimo2_max > result) {
+    result = txp->mimo2_max;
+  }
 
-    if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result) { result = txp->mimo3_max; }
+  if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result) {
+    result = txp->mimo3_max;
+  }
 
-    return result;
+  return result;
 }
 
 #define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
@@ -374,116 +391,120 @@
 static void iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data* data,
                                             struct iwl_eeprom_enhanced_txpwr* txp, int n_channels,
                                             int8_t max_txpower_avg) {
-    int ch_idx;
-    enum nl80211_band band;
+  int ch_idx;
+  enum nl80211_band band;
 
-    band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
+  band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
 
-    for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
-        struct ieee80211_channel* chan = &data->channels[ch_idx];
+  for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
+    struct ieee80211_channel* chan = &data->channels[ch_idx];
 
-        /* update matching channel or from common data only */
-        if (txp->channel != 0 && chan->hw_value != txp->channel) { continue; }
-
-        /* update matching band only */
-        if (band != chan->band) { continue; }
-
-        if (chan->max_power < max_txpower_avg && !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ)) {
-            chan->max_power = max_txpower_avg;
-        }
+    /* update matching channel or from common data only */
+    if (txp->channel != 0 && chan->hw_value != txp->channel) {
+      continue;
     }
+
+    /* update matching band only */
+    if (band != chan->band) {
+      continue;
+    }
+
+    if (chan->max_power < max_txpower_avg && !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ)) {
+      chan->max_power = max_txpower_avg;
+    }
+  }
 }
 
 static void iwl_eeprom_enhanced_txpower(struct device* dev, struct iwl_nvm_data* data,
                                         const uint8_t* eeprom, size_t eeprom_size, int n_channels) {
-    struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
-    int idx, entries;
-    __le16* txp_len;
-    int8_t max_txp_avg_halfdbm;
+  struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
+  int idx, entries;
+  __le16* txp_len;
+  int8_t max_txp_avg_halfdbm;
 
-    BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
+  BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
 
-    /* the length is in 16-bit words, but we want entries */
-    txp_len = (__le16*)iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_SZ_OFFS);
-    entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
+  /* the length is in 16-bit words, but we want entries */
+  txp_len = (__le16*)iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_SZ_OFFS);
+  entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
 
-    txp_array = (void*)iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_OFFS);
+  txp_array = (void*)iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_OFFS);
 
-    for (idx = 0; idx < entries; idx++) {
-        txp = &txp_array[idx];
-        /* skip invalid entries */
-        if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID)) { continue; }
-
-        IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
-                         (txp->channel && (txp->flags & IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE))
-                             ? "Common "
-                             : (txp->channel) ? "Channel" : "Common",
-                         (txp->channel), TXP_CHECK_AND_PRINT(VALID), TXP_CHECK_AND_PRINT(BAND_52G),
-                         TXP_CHECK_AND_PRINT(OFDM), TXP_CHECK_AND_PRINT(40MHZ),
-                         TXP_CHECK_AND_PRINT(HT_AP), TXP_CHECK_AND_PRINT(RES1),
-                         TXP_CHECK_AND_PRINT(RES2), TXP_CHECK_AND_PRINT(COMMON_TYPE), txp->flags);
-        IWL_DEBUG_EEPROM(dev, "\t\t chain_A: %d chain_B: %d chain_C: %d\n", txp->chain_a_max,
-                         txp->chain_b_max, txp->chain_c_max);
-        IWL_DEBUG_EEPROM(dev,
-                         "\t\t MIMO2: %d MIMO3: %d High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
-                         txp->mimo2_max, txp->mimo3_max, ((txp->delta_20_in_40 & 0xf0) >> 4),
-                         (txp->delta_20_in_40 & 0x0f));
-
-        max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
-
-        iwl_eeprom_enh_txp_read_element(data, txp, n_channels,
-                                        DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
-
-        if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm) {
-            data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
-        }
+  for (idx = 0; idx < entries; idx++) {
+    txp = &txp_array[idx];
+    /* skip invalid entries */
+    if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID)) {
+      continue;
     }
+
+    IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
+                     (txp->channel && (txp->flags & IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE))
+                         ? "Common "
+                         : (txp->channel) ? "Channel" : "Common",
+                     (txp->channel), TXP_CHECK_AND_PRINT(VALID), TXP_CHECK_AND_PRINT(BAND_52G),
+                     TXP_CHECK_AND_PRINT(OFDM), TXP_CHECK_AND_PRINT(40MHZ),
+                     TXP_CHECK_AND_PRINT(HT_AP), TXP_CHECK_AND_PRINT(RES1),
+                     TXP_CHECK_AND_PRINT(RES2), TXP_CHECK_AND_PRINT(COMMON_TYPE), txp->flags);
+    IWL_DEBUG_EEPROM(dev, "\t\t chain_A: %d chain_B: %d chain_C: %d\n", txp->chain_a_max,
+                     txp->chain_b_max, txp->chain_c_max);
+    IWL_DEBUG_EEPROM(dev, "\t\t MIMO2: %d MIMO3: %d High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
+                     txp->mimo2_max, txp->mimo3_max, ((txp->delta_20_in_40 & 0xf0) >> 4),
+                     (txp->delta_20_in_40 & 0x0f));
+
+    max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
+
+    iwl_eeprom_enh_txp_read_element(data, txp, n_channels, DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
+
+    if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm) {
+      data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
+    }
+  }
 }
 
 static void iwl_init_band_reference(const struct iwl_cfg* cfg, const uint8_t* eeprom,
                                     size_t eeprom_size, int eeprom_band, int* eeprom_ch_count,
                                     const struct iwl_eeprom_channel** ch_info,
                                     const uint8_t** eeprom_ch_array) {
-    uint32_t offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
+  uint32_t offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
 
-    offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
+  offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
 
-    *ch_info = (void*)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
+  *ch_info = (void*)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
 
-    switch (eeprom_band) {
+  switch (eeprom_band) {
     case 1: /* 2.4GHz band */
-        *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
-        *eeprom_ch_array = iwl_eeprom_band_1;
-        break;
+      *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
+      *eeprom_ch_array = iwl_eeprom_band_1;
+      break;
     case 2: /* 4.9GHz band */
-        *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
-        *eeprom_ch_array = iwl_eeprom_band_2;
-        break;
+      *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
+      *eeprom_ch_array = iwl_eeprom_band_2;
+      break;
     case 3: /* 5.2GHz band */
-        *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
-        *eeprom_ch_array = iwl_eeprom_band_3;
-        break;
+      *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
+      *eeprom_ch_array = iwl_eeprom_band_3;
+      break;
     case 4: /* 5.5GHz band */
-        *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
-        *eeprom_ch_array = iwl_eeprom_band_4;
-        break;
+      *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
+      *eeprom_ch_array = iwl_eeprom_band_4;
+      break;
     case 5: /* 5.7GHz band */
-        *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
-        *eeprom_ch_array = iwl_eeprom_band_5;
-        break;
+      *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
+      *eeprom_ch_array = iwl_eeprom_band_5;
+      break;
     case 6: /* 2.4GHz ht40 channels */
-        *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
-        *eeprom_ch_array = iwl_eeprom_band_6;
-        break;
+      *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
+      *eeprom_ch_array = iwl_eeprom_band_6;
+      break;
     case 7: /* 5 GHz ht40 channels */
-        *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
-        *eeprom_ch_array = iwl_eeprom_band_7;
-        break;
+      *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
+      *eeprom_ch_array = iwl_eeprom_band_7;
+      break;
     default:
-        *eeprom_ch_count = 0;
-        *eeprom_ch_array = NULL;
-        WARN_ON(1);
-    }
+      *eeprom_ch_count = 0;
+      *eeprom_ch_array = NULL;
+      WARN_ON(1);
+  }
 }
 
 #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? #x " " : "")
@@ -492,28 +513,36 @@
                                    enum nl80211_band band, uint16_t channel,
                                    const struct iwl_eeprom_channel* eeprom_ch,
                                    uint8_t clear_ht40_extension_channel) {
-    struct ieee80211_channel* chan = NULL;
-    int i;
+  struct ieee80211_channel* chan = NULL;
+  int i;
 
-    for (i = 0; i < n_channels; i++) {
-        if (data->channels[i].band != band) { continue; }
-        if (data->channels[i].hw_value != channel) { continue; }
-        chan = &data->channels[i];
-        break;
+  for (i = 0; i < n_channels; i++) {
+    if (data->channels[i].band != band) {
+      continue;
     }
+    if (data->channels[i].hw_value != channel) {
+      continue;
+    }
+    chan = &data->channels[i];
+    break;
+  }
 
-    if (!chan) { return; }
+  if (!chan) {
+    return;
+  }
 
-    IWL_DEBUG_EEPROM(
-        dev, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", channel,
-        band == NL80211_BAND_5GHZ ? "5.2" : "2.4", CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
-        CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), CHECK_AND_PRINT(DFS), eeprom_ch->flags,
-        eeprom_ch->max_power_avg,
-        ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR))
-            ? ""
-            : "not ");
+  IWL_DEBUG_EEPROM(
+      dev, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", channel,
+      band == NL80211_BAND_5GHZ ? "5.2" : "2.4", CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
+      CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), CHECK_AND_PRINT(DFS), eeprom_ch->flags,
+      eeprom_ch->max_power_avg,
+      ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR))
+          ? ""
+          : "not ");
 
-    if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) { chan->flags &= ~clear_ht40_extension_channel; }
+  if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) {
+    chan->flags &= ~clear_ht40_extension_channel;
+  }
 }
 
 #define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? #x " " : "")
@@ -521,138 +550,138 @@
 static int iwl_init_channel_map(struct device* dev, const struct iwl_cfg* cfg,
                                 struct iwl_nvm_data* data, const uint8_t* eeprom,
                                 size_t eeprom_size) {
-    int band, ch_idx;
-    const struct iwl_eeprom_channel* eeprom_ch_info;
-    const uint8_t* eeprom_ch_array;
-    int eeprom_ch_count;
-    int n_channels = 0;
+  int band, ch_idx;
+  const struct iwl_eeprom_channel* eeprom_ch_info;
+  const uint8_t* eeprom_ch_array;
+  int eeprom_ch_count;
+  int n_channels = 0;
 
+  /*
+   * Loop through the 5 EEPROM bands and add them to the parse list
+   */
+  for (band = 1; band <= 5; band++) {
+    struct ieee80211_channel* channel;
+
+    iwl_init_band_reference(cfg, eeprom, eeprom_size, band, &eeprom_ch_count, &eeprom_ch_info,
+                            &eeprom_ch_array);
+
+    /* Loop through each band adding each of the channels */
+    for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
+      const struct iwl_eeprom_channel* eeprom_ch;
+
+      eeprom_ch = &eeprom_ch_info[ch_idx];
+
+      if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
+        IWL_DEBUG_EEPROM(dev, "Ch. %d Flags %x [%sGHz] - No traffic\n", eeprom_ch_array[ch_idx],
+                         eeprom_ch_info[ch_idx].flags, (band != 1) ? "5.2" : "2.4");
+        continue;
+      }
+
+      channel = &data->channels[n_channels];
+      n_channels++;
+
+      channel->hw_value = eeprom_ch_array[ch_idx];
+      channel->band = (band == 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+      channel->center_freq = ieee80211_channel_to_frequency(channel->hw_value, channel->band);
+
+      /* set no-HT40, will enable as appropriate later */
+      channel->flags = IEEE80211_CHAN_NO_HT40;
+
+      if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS)) {
+        channel->flags |= IEEE80211_CHAN_NO_IR;
+      }
+
+      if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE)) {
+        channel->flags |= IEEE80211_CHAN_NO_IR;
+      }
+
+      if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR) {
+        channel->flags |= IEEE80211_CHAN_RADAR;
+      }
+
+      /* Initialize regulatory-based run-time data */
+      channel->max_power = eeprom_ch_info[ch_idx].max_power_avg;
+      IWL_DEBUG_EEPROM(dev, "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+                       channel->hw_value, (band != 1) ? "5.2" : "2.4", CHECK_AND_PRINT_I(VALID),
+                       CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR),
+                       CHECK_AND_PRINT_I(WIDE), CHECK_AND_PRINT_I(DFS),
+                       eeprom_ch_info[ch_idx].flags, eeprom_ch_info[ch_idx].max_power_avg,
+                       ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_IBSS) &&
+                        !(eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_RADAR))
+                           ? ""
+                           : "not ");
+    }
+  }
+
+  if (cfg->eeprom_params->enhanced_txpower) {
     /*
-     * Loop through the 5 EEPROM bands and add them to the parse list
+     * for newer device (6000 series and up)
+     * EEPROM contain enhanced tx power information
+     * driver need to process addition information
+     * to determine the max channel tx power limits
      */
-    for (band = 1; band <= 5; band++) {
-        struct ieee80211_channel* channel;
+    iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size, n_channels);
+  } else {
+    /* All others use data from channel map */
+    int i;
 
-        iwl_init_band_reference(cfg, eeprom, eeprom_size, band, &eeprom_ch_count, &eeprom_ch_info,
-                                &eeprom_ch_array);
+    data->max_tx_pwr_half_dbm = -128;
 
-        /* Loop through each band adding each of the channels */
-        for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
-            const struct iwl_eeprom_channel* eeprom_ch;
+    for (i = 0; i < n_channels; i++)
+      data->max_tx_pwr_half_dbm =
+          max_t(int8_t, data->max_tx_pwr_half_dbm, data->channels[i].max_power * 2);
+  }
 
-            eeprom_ch = &eeprom_ch_info[ch_idx];
-
-            if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
-                IWL_DEBUG_EEPROM(dev, "Ch. %d Flags %x [%sGHz] - No traffic\n",
-                                 eeprom_ch_array[ch_idx], eeprom_ch_info[ch_idx].flags,
-                                 (band != 1) ? "5.2" : "2.4");
-                continue;
-            }
-
-            channel = &data->channels[n_channels];
-            n_channels++;
-
-            channel->hw_value = eeprom_ch_array[ch_idx];
-            channel->band = (band == 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
-            channel->center_freq = ieee80211_channel_to_frequency(channel->hw_value, channel->band);
-
-            /* set no-HT40, will enable as appropriate later */
-            channel->flags = IEEE80211_CHAN_NO_HT40;
-
-            if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS)) {
-                channel->flags |= IEEE80211_CHAN_NO_IR;
-            }
-
-            if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE)) {
-                channel->flags |= IEEE80211_CHAN_NO_IR;
-            }
-
-            if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR) { channel->flags |= IEEE80211_CHAN_RADAR; }
-
-            /* Initialize regulatory-based run-time data */
-            channel->max_power = eeprom_ch_info[ch_idx].max_power_avg;
-            IWL_DEBUG_EEPROM(dev, "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
-                             channel->hw_value, (band != 1) ? "5.2" : "2.4",
-                             CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(IBSS),
-                             CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR),
-                             CHECK_AND_PRINT_I(WIDE), CHECK_AND_PRINT_I(DFS),
-                             eeprom_ch_info[ch_idx].flags, eeprom_ch_info[ch_idx].max_power_avg,
-                             ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_IBSS) &&
-                              !(eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_RADAR))
-                                 ? ""
-                                 : "not ");
-        }
-    }
-
-    if (cfg->eeprom_params->enhanced_txpower) {
-        /*
-         * for newer device (6000 series and up)
-         * EEPROM contain enhanced tx power information
-         * driver need to process addition information
-         * to determine the max channel tx power limits
-         */
-        iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size, n_channels);
-    } else {
-        /* All others use data from channel map */
-        int i;
-
-        data->max_tx_pwr_half_dbm = -128;
-
-        for (i = 0; i < n_channels; i++)
-            data->max_tx_pwr_half_dbm =
-                max_t(int8_t, data->max_tx_pwr_half_dbm, data->channels[i].max_power * 2);
-    }
-
-    /* Check if we do have HT40 channels */
-    if (cfg->eeprom_params->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
-        cfg->eeprom_params->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) {
-        return n_channels;
-    }
-
-    /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
-    for (band = 6; band <= 7; band++) {
-        enum nl80211_band ieeeband;
-
-        iwl_init_band_reference(cfg, eeprom, eeprom_size, band, &eeprom_ch_count, &eeprom_ch_info,
-                                &eeprom_ch_array);
-
-        /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
-        ieeeband = (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
-
-        /* Loop through each band adding each of the channels */
-        for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
-            /* Set up driver's info for lower half */
-            iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband, eeprom_ch_array[ch_idx],
-                                   &eeprom_ch_info[ch_idx], IEEE80211_CHAN_NO_HT40PLUS);
-
-            /* Set up driver's info for upper half */
-            iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband, eeprom_ch_array[ch_idx] + 4,
-                                   &eeprom_ch_info[ch_idx], IEEE80211_CHAN_NO_HT40MINUS);
-        }
-    }
-
+  /* Check if we do have HT40 channels */
+  if (cfg->eeprom_params->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
+      cfg->eeprom_params->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) {
     return n_channels;
+  }
+
+  /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+  for (band = 6; band <= 7; band++) {
+    enum nl80211_band ieeeband;
+
+    iwl_init_band_reference(cfg, eeprom, eeprom_size, band, &eeprom_ch_count, &eeprom_ch_info,
+                            &eeprom_ch_array);
+
+    /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+    ieeeband = (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+
+    /* Loop through each band adding each of the channels */
+    for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
+      /* Set up driver's info for lower half */
+      iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband, eeprom_ch_array[ch_idx],
+                             &eeprom_ch_info[ch_idx], IEEE80211_CHAN_NO_HT40PLUS);
+
+      /* Set up driver's info for upper half */
+      iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband, eeprom_ch_array[ch_idx] + 4,
+                             &eeprom_ch_info[ch_idx], IEEE80211_CHAN_NO_HT40MINUS);
+    }
+  }
+
+  return n_channels;
 }
 
 int iwl_init_sband_channels(struct iwl_nvm_data* data, struct ieee80211_supported_band* sband,
                             int n_channels, enum nl80211_band band) {
-    struct ieee80211_channel* chan = &data->channels[0];
-    int n = 0, idx = 0;
+  struct ieee80211_channel* chan = &data->channels[0];
+  int n = 0, idx = 0;
 
-    while (idx < n_channels && chan->band != band) {
-        chan = &data->channels[++idx];
-    }
+  while (idx < n_channels && chan->band != band) {
+    chan = &data->channels[++idx];
+  }
 
-    sband->channels = &data->channels[idx];
+  sband->channels = &data->channels[idx];
 
-    while (idx < n_channels && chan->band == band) {
-        chan = &data->channels[++idx];
-        n++;
-    }
+  while (idx < n_channels && chan->band == band) {
+    chan = &data->channels[++idx];
+    n++;
+  }
 
-    sband->n_channels = n;
+  sband->n_channels = n;
 
-    return n;
+  return n;
 }
 
 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
@@ -661,163 +690,195 @@
 void iwl_init_ht_hw_capab(const struct iwl_cfg* cfg, struct iwl_nvm_data* data,
                           struct ieee80211_sta_ht_cap* ht_info, enum nl80211_band band,
                           uint8_t tx_chains, uint8_t rx_chains) {
-    int max_bit_rate = 0;
+  int max_bit_rate = 0;
 
-    tx_chains = hweight8(tx_chains);
-    if (cfg->rx_with_siso_diversity) {
-        rx_chains = 1;
-    } else {
-        rx_chains = hweight8(rx_chains);
+  tx_chains = hweight8(tx_chains);
+  if (cfg->rx_with_siso_diversity) {
+    rx_chains = 1;
+  } else {
+    rx_chains = hweight8(rx_chains);
+  }
+
+  if (!(data->sku_cap_11n_enable) || (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
+      !cfg->ht_params) {
+    ht_info->ht_supported = false;
+    return;
+  }
+
+  if (data->sku_cap_mimo_disabled) {
+    rx_chains = 1;
+  }
+
+  ht_info->ht_supported = true;
+  ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+
+  if (cfg->ht_params->stbc) {
+    ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+    if (tx_chains > 1) {
+      ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
     }
+  }
 
-    if (!(data->sku_cap_11n_enable) || (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
-        !cfg->ht_params) {
-        ht_info->ht_supported = false;
-        return;
-    }
+  if (cfg->ht_params->ldpc) {
+    ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+  }
 
-    if (data->sku_cap_mimo_disabled) { rx_chains = 1; }
+  if ((cfg->mq_rx_supported && iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
+      iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) {
+    ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+  }
 
-    ht_info->ht_supported = true;
-    ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+  ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
+  ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
 
-    if (cfg->ht_params->stbc) {
-        ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+  ht_info->mcs.rx_mask[0] = 0xFF;
+  if (rx_chains >= 2) {
+    ht_info->mcs.rx_mask[1] = 0xFF;
+  }
+  if (rx_chains >= 3) {
+    ht_info->mcs.rx_mask[2] = 0xFF;
+  }
 
-        if (tx_chains > 1) { ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; }
-    }
+  if (cfg->ht_params->ht_greenfield_support) {
+    ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
+  }
+  ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
 
-    if (cfg->ht_params->ldpc) { ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; }
+  max_bit_rate = MAX_BIT_RATE_20_MHZ;
 
-    if ((cfg->mq_rx_supported && iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
-        iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) {
-        ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-    }
+  if (cfg->ht_params->ht40_bands & BIT(band)) {
+    ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+    ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+    max_bit_rate = MAX_BIT_RATE_40_MHZ;
+  }
 
-    ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
-    ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+  /* Highest supported Rx data rate */
+  max_bit_rate *= rx_chains;
+  WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+  ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
 
-    ht_info->mcs.rx_mask[0] = 0xFF;
-    if (rx_chains >= 2) { ht_info->mcs.rx_mask[1] = 0xFF; }
-    if (rx_chains >= 3) { ht_info->mcs.rx_mask[2] = 0xFF; }
-
-    if (cfg->ht_params->ht_greenfield_support) { ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; }
-    ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-
-    max_bit_rate = MAX_BIT_RATE_20_MHZ;
-
-    if (cfg->ht_params->ht40_bands & BIT(band)) {
-        ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-        ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
-        max_bit_rate = MAX_BIT_RATE_40_MHZ;
-    }
-
-    /* Highest supported Rx data rate */
-    max_bit_rate *= rx_chains;
-    WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
-    ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
-    /* Tx MCS capabilities */
-    ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
-    if (tx_chains != rx_chains) {
-        ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
-        ht_info->mcs.tx_params |= ((tx_chains - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
-    }
+  /* Tx MCS capabilities */
+  ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+  if (tx_chains != rx_chains) {
+    ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+    ht_info->mcs.tx_params |= ((tx_chains - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+  }
 }
 
 static void iwl_init_sbands(struct device* dev, const struct iwl_cfg* cfg,
                             struct iwl_nvm_data* data, const uint8_t* eeprom, size_t eeprom_size) {
-    int n_channels = iwl_init_channel_map(dev, cfg, data, eeprom, eeprom_size);
-    int n_used = 0;
-    struct ieee80211_supported_band* sband;
+  int n_channels = iwl_init_channel_map(dev, cfg, data, eeprom, eeprom_size);
+  int n_used = 0;
+  struct ieee80211_supported_band* sband;
 
-    sband = &data->bands[NL80211_BAND_2GHZ];
-    sband->band = NL80211_BAND_2GHZ;
-    sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
-    sband->n_bitrates = N_RATES_24;
-    n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_2GHZ);
-    iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, data->valid_tx_ant,
-                         data->valid_rx_ant);
+  sband = &data->bands[NL80211_BAND_2GHZ];
+  sband->band = NL80211_BAND_2GHZ;
+  sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+  sband->n_bitrates = N_RATES_24;
+  n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_2GHZ);
+  iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, data->valid_tx_ant,
+                       data->valid_rx_ant);
 
-    sband = &data->bands[NL80211_BAND_5GHZ];
-    sband->band = NL80211_BAND_5GHZ;
-    sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
-    sband->n_bitrates = N_RATES_52;
-    n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_5GHZ);
-    iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, data->valid_tx_ant,
-                         data->valid_rx_ant);
+  sband = &data->bands[NL80211_BAND_5GHZ];
+  sband->band = NL80211_BAND_5GHZ;
+  sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
+  sband->n_bitrates = N_RATES_52;
+  n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_5GHZ);
+  iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, data->valid_tx_ant,
+                       data->valid_rx_ant);
 
-    if (n_channels != n_used) {
-        IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n", n_used, n_channels);
-    }
+  if (n_channels != n_used) {
+    IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n", n_used, n_channels);
+  }
 }
 
 /* EEPROM data functions */
 
 struct iwl_nvm_data* iwl_parse_eeprom_data(struct device* dev, const struct iwl_cfg* cfg,
                                            const uint8_t* eeprom, size_t eeprom_size) {
-    struct iwl_nvm_data* data;
-    const void* tmp;
-    uint16_t radio_cfg, sku;
+  struct iwl_nvm_data* data;
+  const void* tmp;
+  uint16_t radio_cfg, sku;
 
-    if (WARN_ON(!cfg || !cfg->eeprom_params)) { return NULL; }
-
-    data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, GFP_KERNEL);
-    if (!data) { return NULL; }
-
-    /* get MAC address(es) */
-    tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
-    if (!tmp) { goto err_free; }
-    memcpy(data->hw_addr, tmp, ETH_ALEN);
-    data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_NUM_MAC_ADDRESS);
-
-    if (iwl_eeprom_read_calib(eeprom, eeprom_size, data)) { goto err_free; }
-
-    tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
-    if (!tmp) { goto err_free; }
-    memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
-
-    tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_RAW_TEMPERATURE);
-    if (!tmp) { goto err_free; }
-    data->raw_temperature = *(__le16*)tmp;
-
-    tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_KELVIN_TEMPERATURE);
-    if (!tmp) { goto err_free; }
-    data->kelvin_temperature = *(__le16*)tmp;
-    data->kelvin_voltage = *((__le16*)tmp + 1);
-
-    radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_RADIO_CONFIG);
-    data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
-    data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
-    data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
-    data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg);
-    data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
-    data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
-
-    sku = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_SKU_CAP);
-    data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
-    data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
-    data->sku_cap_band_24ghz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
-    data->sku_cap_band_52ghz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
-    data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
-    if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) { data->sku_cap_11n_enable = false; }
-
-    data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_VERSION);
-
-    /* check overrides (some devices have wrong EEPROM) */
-    if (cfg->valid_tx_ant) { data->valid_tx_ant = cfg->valid_tx_ant; }
-    if (cfg->valid_rx_ant) { data->valid_rx_ant = cfg->valid_rx_ant; }
-
-    if (!data->valid_tx_ant || !data->valid_rx_ant) {
-        IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n", data->valid_tx_ant, data->valid_rx_ant);
-        goto err_free;
-    }
-
-    iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size);
-
-    return data;
-err_free:
-    kfree(data);
+  if (WARN_ON(!cfg || !cfg->eeprom_params)) {
     return NULL;
+  }
+
+  data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, GFP_KERNEL);
+  if (!data) {
+    return NULL;
+  }
+
+  /* get MAC address(es) */
+  tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
+  if (!tmp) {
+    goto err_free;
+  }
+  memcpy(data->hw_addr, tmp, ETH_ALEN);
+  data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_NUM_MAC_ADDRESS);
+
+  if (iwl_eeprom_read_calib(eeprom, eeprom_size, data)) {
+    goto err_free;
+  }
+
+  tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
+  if (!tmp) {
+    goto err_free;
+  }
+  memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
+
+  tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_RAW_TEMPERATURE);
+  if (!tmp) {
+    goto err_free;
+  }
+  data->raw_temperature = *(__le16*)tmp;
+
+  tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_KELVIN_TEMPERATURE);
+  if (!tmp) {
+    goto err_free;
+  }
+  data->kelvin_temperature = *(__le16*)tmp;
+  data->kelvin_voltage = *((__le16*)tmp + 1);
+
+  radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_RADIO_CONFIG);
+  data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
+  data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
+  data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
+  data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg);
+  data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+  data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+
+  sku = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_SKU_CAP);
+  data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
+  data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
+  data->sku_cap_band_24ghz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
+  data->sku_cap_band_52ghz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
+  data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
+  if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) {
+    data->sku_cap_11n_enable = false;
+  }
+
+  data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_VERSION);
+
+  /* check overrides (some devices have wrong EEPROM) */
+  if (cfg->valid_tx_ant) {
+    data->valid_tx_ant = cfg->valid_tx_ant;
+  }
+  if (cfg->valid_rx_ant) {
+    data->valid_rx_ant = cfg->valid_rx_ant;
+  }
+
+  if (!data->valid_tx_ant || !data->valid_rx_ant) {
+    IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n", data->valid_tx_ant, data->valid_rx_ant);
+    goto err_free;
+  }
+
+  iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size);
+
+  return data;
+err_free:
+  kfree(data);
+  return NULL;
 }
 IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-parse.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-parse.h
index 106b890..1e61682 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-parse.h
@@ -37,39 +37,39 @@
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h"
 
 struct iwl_nvm_data {
-    int n_hw_addrs;
-    uint8_t hw_addr[ETH_ALEN];
+  int n_hw_addrs;
+  uint8_t hw_addr[ETH_ALEN];
 
-    uint8_t calib_version;
-    __le16 calib_voltage;
+  uint8_t calib_version;
+  __le16 calib_voltage;
 
-    __le16 raw_temperature;
-    __le16 kelvin_temperature;
-    __le16 kelvin_voltage;
-    __le16 xtal_calib[2];
+  __le16 raw_temperature;
+  __le16 kelvin_temperature;
+  __le16 kelvin_voltage;
+  __le16 xtal_calib[2];
 
-    bool sku_cap_band_24ghz_enable;
-    bool sku_cap_band_52ghz_enable;
-    bool sku_cap_11n_enable;
-    bool sku_cap_11ac_enable;
-    bool sku_cap_11ax_enable;
-    bool sku_cap_amt_enable;
-    bool sku_cap_ipan_enable;
-    bool sku_cap_mimo_disabled;
+  bool sku_cap_band_24ghz_enable;
+  bool sku_cap_band_52ghz_enable;
+  bool sku_cap_11n_enable;
+  bool sku_cap_11ac_enable;
+  bool sku_cap_11ax_enable;
+  bool sku_cap_amt_enable;
+  bool sku_cap_ipan_enable;
+  bool sku_cap_mimo_disabled;
 
-    uint16_t radio_cfg_type;
-    uint8_t radio_cfg_step;
-    uint8_t radio_cfg_dash;
-    uint8_t radio_cfg_pnum;
-    uint8_t valid_tx_ant, valid_rx_ant;
+  uint16_t radio_cfg_type;
+  uint8_t radio_cfg_step;
+  uint8_t radio_cfg_dash;
+  uint8_t radio_cfg_pnum;
+  uint8_t valid_tx_ant, valid_rx_ant;
 
-    uint32_t nvm_version;
-    int8_t max_tx_pwr_half_dbm;
+  uint32_t nvm_version;
+  int8_t max_tx_pwr_half_dbm;
 
-    bool lar_enabled;
-    bool vht160_supported;
-    struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
-    struct ieee80211_channel channels[];
+  bool lar_enabled;
+  bool vht160_supported;
+  struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
+  struct ieee80211_channel channels[];
 };
 
 /**
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-read.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-read.c
index 518e890..b1c0b15 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-read.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-read.c
@@ -30,6 +30,8 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
+#include "iwl-eeprom-read.h"
+
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/types.h>
@@ -37,7 +39,6 @@
 #include "iwl-csr.h"
 #include "iwl-debug.h"
 #include "iwl-drv.h"
-#include "iwl-eeprom-read.h"
 #include "iwl-io.h"
 #include "iwl-prph.h"
 
@@ -65,54 +66,54 @@
 #define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
 
 static int iwl_eeprom_acquire_semaphore(struct iwl_trans* trans) {
-    uint16_t count;
-    int ret;
+  uint16_t count;
+  int ret;
 
-    for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
-        /* Request semaphore */
-        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+  for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+    /* Request semaphore */
+    iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
 
-        /* See if we got it */
-        ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
-                           CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, EEPROM_SEM_TIMEOUT);
-        if (ret >= 0) {
-            IWL_DEBUG_EEPROM(trans->dev, "Acquired semaphore after %d tries.\n", count + 1);
-            return ret;
-        }
+    /* See if we got it */
+    ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+                       CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, EEPROM_SEM_TIMEOUT);
+    if (ret >= 0) {
+      IWL_DEBUG_EEPROM(trans->dev, "Acquired semaphore after %d tries.\n", count + 1);
+      return ret;
     }
+  }
 
-    return ret;
+  return ret;
 }
 
 static void iwl_eeprom_release_semaphore(struct iwl_trans* trans) {
-    iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+  iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
 }
 
 static int iwl_eeprom_verify_signature(struct iwl_trans* trans, bool nvm_is_otp) {
-    uint32_t gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+  uint32_t gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
 
-    IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
+  IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
 
-    switch (gp) {
+  switch (gp) {
     case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
-        if (!nvm_is_otp) {
-            IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", gp);
-            return -ENOENT;
-        }
-        return 0;
+      if (!nvm_is_otp) {
+        IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", gp);
+        return -ENOENT;
+      }
+      return 0;
     case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
     case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
-        if (nvm_is_otp) {
-            IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
-            return -ENOENT;
-        }
-        return 0;
+      if (nvm_is_otp) {
+        IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
+        return -ENOENT;
+      }
+      return 0;
     case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
     default:
-        IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
-                nvm_is_otp ? "OTP" : "EEPROM", gp);
-        return -ENOENT;
-    }
+      IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
+              nvm_is_otp ? "OTP" : "EEPROM", gp);
+      return -ENOENT;
+  }
 }
 
 /******************************************************************************
@@ -122,111 +123,113 @@
  ******************************************************************************/
 
 static void iwl_set_otp_access_absolute(struct iwl_trans* trans) {
-    iwl_read32(trans, CSR_OTP_GP_REG);
+  iwl_read32(trans, CSR_OTP_GP_REG);
 
-    iwl_clear_bit(trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_OTP_ACCESS_MODE);
+  iwl_clear_bit(trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_OTP_ACCESS_MODE);
 }
 
 static int iwl_nvm_is_otp(struct iwl_trans* trans) {
-    uint32_t otpgp;
+  uint32_t otpgp;
 
-    /* OTP only valid for CP/PP and after */
-    switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
+  /* OTP only valid for CP/PP and after */
+  switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
     case CSR_HW_REV_TYPE_NONE:
-        IWL_ERR(trans, "Unknown hardware type\n");
-        return -EIO;
+      IWL_ERR(trans, "Unknown hardware type\n");
+      return -EIO;
     case CSR_HW_REV_TYPE_5300:
     case CSR_HW_REV_TYPE_5350:
     case CSR_HW_REV_TYPE_5100:
     case CSR_HW_REV_TYPE_5150:
-        return 0;
+      return 0;
     default:
-        otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
-        if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) { return 1; }
-        return 0;
-    }
+      otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+      if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) {
+        return 1;
+      }
+      return 0;
+  }
 }
 
 static int iwl_init_otp_access(struct iwl_trans* trans) {
-    int ret;
+  int ret;
 
-    /* Enable 40MHz radio clock */
-    iwl_write32(trans, CSR_GP_CNTRL,
-                iwl_read32(trans, CSR_GP_CNTRL) | BIT(trans->cfg->csr->flag_init_done));
+  /* Enable 40MHz radio clock */
+  iwl_write32(trans, CSR_GP_CNTRL,
+              iwl_read32(trans, CSR_GP_CNTRL) | BIT(trans->cfg->csr->flag_init_done));
 
-    /* wait for clock to be ready */
-    ret = iwl_poll_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_clock_ready),
-                       BIT(trans->cfg->csr->flag_mac_clock_ready), 25000);
-    if (ret < 0) {
-        IWL_ERR(trans, "Time out access OTP\n");
-    } else {
-        iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
-        udelay(5);
-        iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+  /* wait for clock to be ready */
+  ret = iwl_poll_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_clock_ready),
+                     BIT(trans->cfg->csr->flag_mac_clock_ready), 25000);
+  if (ret < 0) {
+    IWL_ERR(trans, "Time out access OTP\n");
+  } else {
+    iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+    udelay(5);
+    iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
 
-        /*
-         * CSR auto clock gate disable bit -
-         * this is only applicable for HW with OTP shadow RAM
-         */
-        if (trans->cfg->base_params->shadow_ram_support) {
-            iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED);
-        }
+    /*
+     * CSR auto clock gate disable bit -
+     * this is only applicable for HW with OTP shadow RAM
+     */
+    if (trans->cfg->base_params->shadow_ram_support) {
+      iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED);
     }
-    return ret;
+  }
+  return ret;
 }
 
 static int iwl_read_otp_word(struct iwl_trans* trans, uint16_t addr, __le16* eeprom_data) {
-    int ret = 0;
-    uint32_t r;
-    uint32_t otpgp;
+  int ret = 0;
+  uint32_t r;
+  uint32_t otpgp;
 
-    iwl_write32(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-    ret = iwl_poll_bit(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_READ_VALID_MSK,
-                       CSR_EEPROM_REG_READ_VALID_MSK, IWL_EEPROM_ACCESS_TIMEOUT);
-    if (ret < 0) {
-        IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
-        return ret;
-    }
-    r = iwl_read32(trans, CSR_EEPROM_REG);
-    /* check for ECC errors: */
-    otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
-    if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
-        /* stop in this case */
-        /* set the uncorrectable OTP ECC bit for acknowledgment */
-        iwl_set_bit(trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
-        IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
-        return -EINVAL;
-    }
-    if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
-        /* continue in this case */
-        /* set the correctable OTP ECC bit for acknowledgment */
-        iwl_set_bit(trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
-        IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
-    }
-    *eeprom_data = cpu_to_le16(r >> 16);
-    return 0;
+  iwl_write32(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+  ret = iwl_poll_bit(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_READ_VALID_MSK,
+                     CSR_EEPROM_REG_READ_VALID_MSK, IWL_EEPROM_ACCESS_TIMEOUT);
+  if (ret < 0) {
+    IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
+    return ret;
+  }
+  r = iwl_read32(trans, CSR_EEPROM_REG);
+  /* check for ECC errors: */
+  otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+  if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
+    /* stop in this case */
+    /* set the uncorrectable OTP ECC bit for acknowledgment */
+    iwl_set_bit(trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+    IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
+    return -EINVAL;
+  }
+  if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
+    /* continue in this case */
+    /* set the correctable OTP ECC bit for acknowledgment */
+    iwl_set_bit(trans, CSR_OTP_GP_REG, CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
+    IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
+  }
+  *eeprom_data = cpu_to_le16(r >> 16);
+  return 0;
 }
 
 /*
  * iwl_is_otp_empty: check for empty OTP
  */
 static bool iwl_is_otp_empty(struct iwl_trans* trans) {
-    uint16_t next_link_addr = 0;
-    __le16 link_value;
-    bool is_empty = false;
+  uint16_t next_link_addr = 0;
+  __le16 link_value;
+  bool is_empty = false;
 
-    /* locate the beginning of OTP link list */
-    if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
-        if (!link_value) {
-            IWL_ERR(trans, "OTP is empty\n");
-            is_empty = true;
-        }
-    } else {
-        IWL_ERR(trans, "Unable to read first block of OTP list.\n");
-        is_empty = true;
+  /* locate the beginning of OTP link list */
+  if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
+    if (!link_value) {
+      IWL_ERR(trans, "OTP is empty\n");
+      is_empty = true;
     }
+  } else {
+    IWL_ERR(trans, "Unable to read first block of OTP list.\n");
+    is_empty = true;
+  }
 
-    return is_empty;
+  return is_empty;
 }
 
 /*
@@ -239,47 +242,51 @@
  *   only perform this operation if shadow RAM is disabled
  */
 static int iwl_find_otp_image(struct iwl_trans* trans, uint16_t* validblockaddr) {
-    uint16_t next_link_addr = 0, valid_addr;
-    __le16 link_value = 0;
-    int usedblocks = 0;
+  uint16_t next_link_addr = 0, valid_addr;
+  __le16 link_value = 0;
+  int usedblocks = 0;
 
-    /* set addressing mode to absolute to traverse the link list */
-    iwl_set_otp_access_absolute(trans);
+  /* set addressing mode to absolute to traverse the link list */
+  iwl_set_otp_access_absolute(trans);
 
-    /* checking for empty OTP or error */
-    if (iwl_is_otp_empty(trans)) { return -EINVAL; }
-
-    /*
-     * start traverse link list
-     * until reach the max number of OTP blocks
-     * different devices have different number of OTP blocks
-     */
-    do {
-        /* save current valid block address
-         * check for more block on the link list
-         */
-        valid_addr = next_link_addr;
-        next_link_addr = le16_to_cpu(link_value) * sizeof(uint16_t);
-        IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n", usedblocks, next_link_addr);
-        if (iwl_read_otp_word(trans, next_link_addr, &link_value)) { return -EINVAL; }
-        if (!link_value) {
-            /*
-             * reach the end of link list, return success and
-             * set address point to the starting address
-             * of the image
-             */
-            *validblockaddr = valid_addr;
-            /* skip first 2 bytes (link list pointer) */
-            *validblockaddr += 2;
-            return 0;
-        }
-        /* more in the link list, continue */
-        usedblocks++;
-    } while (usedblocks <= trans->cfg->base_params->max_ll_items);
-
-    /* OTP has no valid blocks */
-    IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
+  /* checking for empty OTP or error */
+  if (iwl_is_otp_empty(trans)) {
     return -EINVAL;
+  }
+
+  /*
+   * start traverse link list
+   * until reach the max number of OTP blocks
+   * different devices have different number of OTP blocks
+   */
+  do {
+    /* save current valid block address
+     * check for more block on the link list
+     */
+    valid_addr = next_link_addr;
+    next_link_addr = le16_to_cpu(link_value) * sizeof(uint16_t);
+    IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n", usedblocks, next_link_addr);
+    if (iwl_read_otp_word(trans, next_link_addr, &link_value)) {
+      return -EINVAL;
+    }
+    if (!link_value) {
+      /*
+       * reach the end of link list, return success and
+       * set address point to the starting address
+       * of the image
+       */
+      *validblockaddr = valid_addr;
+      /* skip first 2 bytes (link list pointer) */
+      *validblockaddr += 2;
+      return 0;
+    }
+    /* more in the link list, continue */
+    usedblocks++;
+  } while (usedblocks <= trans->cfg->base_params->max_ll_items);
+
+  /* OTP has no valid blocks */
+  IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
+  return -EINVAL;
 }
 
 /**
@@ -291,95 +298,105 @@
  * NOTE:  This routine uses the non-debug IO access functions.
  */
 int iwl_read_eeprom(struct iwl_trans* trans, uint8_t** eeprom, size_t* eeprom_size) {
-    __le16* e;
-    uint32_t gp = iwl_read32(trans, CSR_EEPROM_GP);
-    int sz;
-    int ret;
-    uint16_t addr;
-    uint16_t validblockaddr = 0;
-    uint16_t cache_addr = 0;
-    int nvm_is_otp;
+  __le16* e;
+  uint32_t gp = iwl_read32(trans, CSR_EEPROM_GP);
+  int sz;
+  int ret;
+  uint16_t addr;
+  uint16_t validblockaddr = 0;
+  uint16_t cache_addr = 0;
+  int nvm_is_otp;
 
-    if (!eeprom || !eeprom_size) { return -EINVAL; }
+  if (!eeprom || !eeprom_size) {
+    return -EINVAL;
+  }
 
-    nvm_is_otp = iwl_nvm_is_otp(trans);
-    if (nvm_is_otp < 0) { return nvm_is_otp; }
+  nvm_is_otp = iwl_nvm_is_otp(trans);
+  if (nvm_is_otp < 0) {
+    return nvm_is_otp;
+  }
 
-    sz = trans->cfg->base_params->eeprom_size;
-    IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
+  sz = trans->cfg->base_params->eeprom_size;
+  IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
 
-    e = kmalloc(sz, GFP_KERNEL);
-    if (!e) { return -ENOMEM; }
+  e = kmalloc(sz, GFP_KERNEL);
+  if (!e) {
+    return -ENOMEM;
+  }
 
-    ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
-    if (ret < 0) {
-        IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
-        goto err_free;
+  ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
+  if (ret < 0) {
+    IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+    goto err_free;
+  }
+
+  /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+  ret = iwl_eeprom_acquire_semaphore(trans);
+  if (ret < 0) {
+    IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
+    goto err_free;
+  }
+
+  if (nvm_is_otp) {
+    ret = iwl_init_otp_access(trans);
+    if (ret) {
+      IWL_ERR(trans, "Failed to initialize OTP access.\n");
+      goto err_unlock;
     }
 
-    /* Make sure driver (instead of uCode) is allowed to read EEPROM */
-    ret = iwl_eeprom_acquire_semaphore(trans);
-    if (ret < 0) {
-        IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
-        goto err_free;
+    iwl_write32(trans, CSR_EEPROM_GP,
+                iwl_read32(trans, CSR_EEPROM_GP) & ~CSR_EEPROM_GP_IF_OWNER_MSK);
+
+    iwl_set_bit(trans, CSR_OTP_GP_REG,
+                CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+    /* traversing the linked list if no shadow ram supported */
+    if (!trans->cfg->base_params->shadow_ram_support) {
+      ret = iwl_find_otp_image(trans, &validblockaddr);
+      if (ret) {
+        goto err_unlock;
+      }
     }
+    for (addr = validblockaddr; addr < validblockaddr + sz; addr += sizeof(uint16_t)) {
+      __le16 eeprom_data;
 
-    if (nvm_is_otp) {
-        ret = iwl_init_otp_access(trans);
-        if (ret) {
-            IWL_ERR(trans, "Failed to initialize OTP access.\n");
-            goto err_unlock;
-        }
-
-        iwl_write32(trans, CSR_EEPROM_GP,
-                    iwl_read32(trans, CSR_EEPROM_GP) & ~CSR_EEPROM_GP_IF_OWNER_MSK);
-
-        iwl_set_bit(trans, CSR_OTP_GP_REG,
-                    CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
-        /* traversing the linked list if no shadow ram supported */
-        if (!trans->cfg->base_params->shadow_ram_support) {
-            ret = iwl_find_otp_image(trans, &validblockaddr);
-            if (ret) { goto err_unlock; }
-        }
-        for (addr = validblockaddr; addr < validblockaddr + sz; addr += sizeof(uint16_t)) {
-            __le16 eeprom_data;
-
-            ret = iwl_read_otp_word(trans, addr, &eeprom_data);
-            if (ret) { goto err_unlock; }
-            e[cache_addr / 2] = eeprom_data;
-            cache_addr += sizeof(uint16_t);
-        }
-    } else {
-        /* eeprom is an array of 16bit values */
-        for (addr = 0; addr < sz; addr += sizeof(uint16_t)) {
-            uint32_t r;
-
-            iwl_write32(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
-            ret = iwl_poll_bit(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_READ_VALID_MSK,
-                               CSR_EEPROM_REG_READ_VALID_MSK, IWL_EEPROM_ACCESS_TIMEOUT);
-            if (ret < 0) {
-                IWL_ERR(trans, "Time out reading EEPROM[%d]\n", addr);
-                goto err_unlock;
-            }
-            r = iwl_read32(trans, CSR_EEPROM_REG);
-            e[addr / 2] = cpu_to_le16(r >> 16);
-        }
+      ret = iwl_read_otp_word(trans, addr, &eeprom_data);
+      if (ret) {
+        goto err_unlock;
+      }
+      e[cache_addr / 2] = eeprom_data;
+      cache_addr += sizeof(uint16_t);
     }
+  } else {
+    /* eeprom is an array of 16bit values */
+    for (addr = 0; addr < sz; addr += sizeof(uint16_t)) {
+      uint32_t r;
 
-    IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n", nvm_is_otp ? "OTP" : "EEPROM");
+      iwl_write32(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
 
-    iwl_eeprom_release_semaphore(trans);
+      ret = iwl_poll_bit(trans, CSR_EEPROM_REG, CSR_EEPROM_REG_READ_VALID_MSK,
+                         CSR_EEPROM_REG_READ_VALID_MSK, IWL_EEPROM_ACCESS_TIMEOUT);
+      if (ret < 0) {
+        IWL_ERR(trans, "Time out reading EEPROM[%d]\n", addr);
+        goto err_unlock;
+      }
+      r = iwl_read32(trans, CSR_EEPROM_REG);
+      e[addr / 2] = cpu_to_le16(r >> 16);
+    }
+  }
 
-    *eeprom_size = sz;
-    *eeprom = (uint8_t*)e;
-    return 0;
+  IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n", nvm_is_otp ? "OTP" : "EEPROM");
+
+  iwl_eeprom_release_semaphore(trans);
+
+  *eeprom_size = sz;
+  *eeprom = (uint8_t*)e;
+  return 0;
 
 err_unlock:
-    iwl_eeprom_release_semaphore(trans);
+  iwl_eeprom_release_semaphore(trans);
 err_free:
-    kfree(e);
+  kfree(e);
 
-    return ret;
+  return ret;
 }
 IWL_EXPORT_SYMBOL(iwl_read_eeprom);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-fh.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-fh.h
index 243f69f..3be68c6 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-fh.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-fh.h
@@ -94,14 +94,18 @@
 
 /* Find TFD CB base pointer for given queue */
 static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans* trans, unsigned int chnl) {
-    if (trans->cfg->use_tfh) {
-        WARN_ON_ONCE(chnl >= 64);
-        return TFH_TFDQ_CBB_TABLE + 8 * chnl;
-    }
-    if (chnl < 16) { return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl; }
-    if (chnl < 20) { return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16); }
-    WARN_ON_ONCE(chnl >= 32);
-    return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
+  if (trans->cfg->use_tfh) {
+    WARN_ON_ONCE(chnl >= 64);
+    return TFH_TFDQ_CBB_TABLE + 8 * chnl;
+  }
+  if (chnl < 16) {
+    return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
+  }
+  if (chnl < 20) {
+    return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
+  }
+  WARN_ON_ONCE(chnl >= 32);
+  return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
 }
 
 /* 22000 configuration registers */
@@ -575,7 +579,7 @@
 #define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1)
 #define MQ_RX_NUM_RBDS (MQ_RX_TABLE_SIZE - 1)
 #define RX_POOL_SIZE \
-    (MQ_RX_NUM_RBDS + IWL_MAX_RX_HW_QUEUES * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
+  (MQ_RX_NUM_RBDS + IWL_MAX_RX_HW_QUEUES * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
 /* cb size is the exponent */
 #define RX_QUEUE_CB_SIZE(x) ilog2(x)
 
@@ -594,11 +598,11 @@
  *  which was transferred
  */
 struct iwl_rb_status {
-    __le16 closed_rb_num;
-    __le16 closed_fr_num;
-    __le16 finished_rb_num;
-    __le16 finished_fr_nam;
-    __le32 __unused;
+  __le16 closed_rb_num;
+  __le16 closed_fr_num;
+  __le16 finished_rb_num;
+  __le16 finished_fr_nam;
+  __le32 __unused;
 } __packed;
 
 #define TFD_QUEUE_SIZE_MAX (256)
@@ -613,7 +617,7 @@
 #define IWL_TFH_NUM_TBS 25
 
 static inline uint8_t iwl_get_dma_hi_addr(dma_addr_t addr) {
-    return (sizeof(addr) > sizeof(uint32_t) ? (uint32_t)(addr >> 32) : 0) & 0xF;
+  return (sizeof(addr) > sizeof(uint32_t) ? (uint32_t)(addr >> 32) : 0) & 0xF;
 }
 
 /**
@@ -622,8 +626,8 @@
  * @TB_HI_N_LEN_LEN_MSK: length of the TB
  */
 enum iwl_tfd_tb_hi_n_len {
-    TB_HI_N_LEN_ADDR_HI_MSK = 0xf,
-    TB_HI_N_LEN_LEN_MSK = 0xfff0,
+  TB_HI_N_LEN_ADDR_HI_MSK = 0xf,
+  TB_HI_N_LEN_LEN_MSK = 0xfff0,
 };
 
 /**
@@ -636,8 +640,8 @@
  * @hi_n_len: &enum iwl_tfd_tb_hi_n_len
  */
 struct iwl_tfd_tb {
-    __le32 lo;
-    __le16 hi_n_len;
+  __le32 lo;
+  __le16 hi_n_len;
 } __packed;
 
 /**
@@ -649,8 +653,8 @@
  * @addr 64 bits dma address
  */
 struct iwl_tfh_tb {
-    __le16 tb_len;
-    __le64 addr;
+  __le16 tb_len;
+  __le64 addr;
 } __packed;
 
 /**
@@ -683,10 +687,10 @@
  * @ __pad  padding
  */
 struct iwl_tfd {
-    uint8_t __reserved1[3];
-    uint8_t num_tbs;
-    struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
-    __le32 __pad;
+  uint8_t __reserved1[3];
+  uint8_t num_tbs;
+  struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
+  __le32 __pad;
 } __packed;
 
 /**
@@ -697,9 +701,9 @@
  * @ __pad  padding
  */
 struct iwl_tfh_tfd {
-    __le16 num_tbs;
-    struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS];
-    __le32 __pad;
+  __le16 num_tbs;
+  struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS];
+  __le32 __pad;
 } __packed;
 
 /* Keep Warm Size */
@@ -719,7 +723,7 @@
  *      14-16 - reserved
  */
 struct iwlagn_scd_bc_tbl {
-    __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+  __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
 } __packed;
 
 /**
@@ -730,7 +734,7 @@
  *      14-16 - reserved
  */
 struct iwl_gen3_bc_tbl {
-    __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+  __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
 } __packed;
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_IWL_FH_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.c
index 4188ef1..0493c1b 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.c
@@ -32,321 +32,335 @@
  *
  *****************************************************************************/
 
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.h"
+
 #include <zircon/syscalls.h>
 
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-debug.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-drv.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-fh.h"
-#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-prph.h"
 
 void iwl_write8(struct iwl_trans* trans, uint32_t ofs, uint8_t val) {
-    iwl_trans_write8(trans, ofs, val);
+  iwl_trans_write8(trans, ofs, val);
 }
 
 void iwl_write32(struct iwl_trans* trans, uint32_t ofs, uint32_t val) {
-    iwl_trans_write32(trans, ofs, val);
+  iwl_trans_write32(trans, ofs, val);
 }
 
 void iwl_write64(struct iwl_trans* trans, uint64_t ofs, uint64_t val) {
-    iwl_trans_write32(trans, ofs, lower_32_bits(val));
-    iwl_trans_write32(trans, ofs + 4, upper_32_bits(val));
+  iwl_trans_write32(trans, ofs, lower_32_bits(val));
+  iwl_trans_write32(trans, ofs + 4, upper_32_bits(val));
 }
 
 uint32_t iwl_read32(struct iwl_trans* trans, uint32_t ofs) {
-    uint32_t val = iwl_trans_read32(trans, ofs);
+  uint32_t val = iwl_trans_read32(trans, ofs);
 
-    return val;
+  return val;
 }
 
 #define IWL_POLL_INTERVAL 10 /* microseconds */
 
 zx_status_t iwl_poll_bit(struct iwl_trans* trans, uint32_t addr, uint32_t bits, uint32_t mask,
                          int timeout) {
-    int t = 0;
+  int t = 0;
 
-    do {
-        if ((iwl_read32(trans, addr) & mask) == (bits & mask)) { return t; }
-        zx_nanosleep(zx_deadline_after(ZX_USEC(IWL_POLL_INTERVAL)));
-        t += IWL_POLL_INTERVAL;
-    } while (t < timeout);
+  do {
+    if ((iwl_read32(trans, addr) & mask) == (bits & mask)) {
+      return t;
+    }
+    zx_nanosleep(zx_deadline_after(ZX_USEC(IWL_POLL_INTERVAL)));
+    t += IWL_POLL_INTERVAL;
+  } while (t < timeout);
 
-    return ZX_ERR_TIMED_OUT;
+  return ZX_ERR_TIMED_OUT;
 }
 
 uint32_t iwl_read_direct32(struct iwl_trans* trans, uint32_t reg) {
-    uint32_t value = 0x5a5a5a5a;
-    unsigned long flags;
-    if (iwl_trans_grab_nic_access(trans, &flags)) {
-        value = iwl_read32(trans, reg);
-        iwl_trans_release_nic_access(trans, &flags);
-    }
+  uint32_t value = 0x5a5a5a5a;
+  unsigned long flags;
+  if (iwl_trans_grab_nic_access(trans, &flags)) {
+    value = iwl_read32(trans, reg);
+    iwl_trans_release_nic_access(trans, &flags);
+  }
 
-    return value;
+  return value;
 }
 
 void iwl_write_direct32(struct iwl_trans* trans, uint32_t reg, uint32_t value) {
-    unsigned long flags;
+  unsigned long flags;
 
-    if (iwl_trans_grab_nic_access(trans, &flags)) {
-        iwl_write32(trans, reg, value);
-        iwl_trans_release_nic_access(trans, &flags);
-    }
+  if (iwl_trans_grab_nic_access(trans, &flags)) {
+    iwl_write32(trans, reg, value);
+    iwl_trans_release_nic_access(trans, &flags);
+  }
 }
 
 void iwl_write_direct64(struct iwl_trans* trans, uint64_t reg, uint64_t value) {
-    unsigned long flags;
+  unsigned long flags;
 
-    if (iwl_trans_grab_nic_access(trans, &flags)) {
-        iwl_write64(trans, reg, value);
-        iwl_trans_release_nic_access(trans, &flags);
-    }
+  if (iwl_trans_grab_nic_access(trans, &flags)) {
+    iwl_write64(trans, reg, value);
+    iwl_trans_release_nic_access(trans, &flags);
+  }
 }
 
-zx_status_t iwl_poll_direct_bit(struct iwl_trans* trans, uint32_t addr, uint32_t mask, int timeout) {
-    int t = 0;
+zx_status_t iwl_poll_direct_bit(struct iwl_trans* trans, uint32_t addr, uint32_t mask,
+                                int timeout) {
+  int t = 0;
 
-    do {
-        if ((iwl_read_direct32(trans, addr) & mask) == mask) { return t; }
-        zx_nanosleep(zx_deadline_after(ZX_USEC(IWL_POLL_INTERVAL)));
-        t += IWL_POLL_INTERVAL;
-    } while (t < timeout);
+  do {
+    if ((iwl_read_direct32(trans, addr) & mask) == mask) {
+      return t;
+    }
+    zx_nanosleep(zx_deadline_after(ZX_USEC(IWL_POLL_INTERVAL)));
+    t += IWL_POLL_INTERVAL;
+  } while (t < timeout);
 
-    return ZX_ERR_TIMED_OUT;
+  return ZX_ERR_TIMED_OUT;
 }
 
 uint32_t iwl_read_prph_no_grab(struct iwl_trans* trans, uint32_t ofs) {
-    uint32_t val = iwl_trans_read_prph(trans, ofs);
-    return val;
+  uint32_t val = iwl_trans_read_prph(trans, ofs);
+  return val;
 }
 
 void iwl_write_prph_no_grab(struct iwl_trans* trans, uint32_t ofs, uint32_t val) {
-    iwl_trans_write_prph(trans, ofs, val);
+  iwl_trans_write_prph(trans, ofs, val);
 }
 
 void iwl_write_prph64_no_grab(struct iwl_trans* trans, uint64_t ofs, uint64_t val) {
-    iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
-    iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
+  iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
+  iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
 }
 
 uint32_t iwl_read_prph(struct iwl_trans* trans, uint32_t ofs) {
-    unsigned long flags;
-    uint32_t val = 0x5a5a5a5a;
+  unsigned long flags;
+  uint32_t val = 0x5a5a5a5a;
 
-    if (iwl_trans_grab_nic_access(trans, &flags)) {
-        val = iwl_read_prph_no_grab(trans, ofs);
-        iwl_trans_release_nic_access(trans, &flags);
-    }
-    return val;
+  if (iwl_trans_grab_nic_access(trans, &flags)) {
+    val = iwl_read_prph_no_grab(trans, ofs);
+    iwl_trans_release_nic_access(trans, &flags);
+  }
+  return val;
 }
 
 void iwl_write_prph(struct iwl_trans* trans, uint32_t ofs, uint32_t val) {
-    unsigned long flags;
+  unsigned long flags;
 
-    if (iwl_trans_grab_nic_access(trans, &flags)) {
-        iwl_write_prph_no_grab(trans, ofs, val);
-        iwl_trans_release_nic_access(trans, &flags);
-    }
+  if (iwl_trans_grab_nic_access(trans, &flags)) {
+    iwl_write_prph_no_grab(trans, ofs, val);
+    iwl_trans_release_nic_access(trans, &flags);
+  }
 }
 
 zx_status_t iwl_poll_prph_bit(struct iwl_trans* trans, uint32_t addr, uint32_t bits, uint32_t mask,
                               int timeout) {
-    int t = 0;
+  int t = 0;
 
-    do {
-        if ((iwl_read_prph(trans, addr) & mask) == (bits & mask)) { return t; }
-        zx_nanosleep(zx_deadline_after(ZX_USEC(IWL_POLL_INTERVAL)));
-        t += IWL_POLL_INTERVAL;
-    } while (t < timeout);
+  do {
+    if ((iwl_read_prph(trans, addr) & mask) == (bits & mask)) {
+      return t;
+    }
+    zx_nanosleep(zx_deadline_after(ZX_USEC(IWL_POLL_INTERVAL)));
+    t += IWL_POLL_INTERVAL;
+  } while (t < timeout);
 
-    return ZX_ERR_TIMED_OUT;
+  return ZX_ERR_TIMED_OUT;
 }
 
 void iwl_set_bits_prph(struct iwl_trans* trans, uint32_t ofs, uint32_t mask) {
-    unsigned long flags;
+  unsigned long flags;
 
-    if (iwl_trans_grab_nic_access(trans, &flags)) {
-        iwl_write_prph_no_grab(trans, ofs, iwl_read_prph_no_grab(trans, ofs) | mask);
-        iwl_trans_release_nic_access(trans, &flags);
-    }
+  if (iwl_trans_grab_nic_access(trans, &flags)) {
+    iwl_write_prph_no_grab(trans, ofs, iwl_read_prph_no_grab(trans, ofs) | mask);
+    iwl_trans_release_nic_access(trans, &flags);
+  }
 }
 
 void iwl_set_bits_mask_prph(struct iwl_trans* trans, uint32_t ofs, uint32_t bits, uint32_t mask) {
-    unsigned long flags;
+  unsigned long flags;
 
-    if (iwl_trans_grab_nic_access(trans, &flags)) {
-        iwl_write_prph_no_grab(trans, ofs, (iwl_read_prph_no_grab(trans, ofs) & mask) | bits);
-        iwl_trans_release_nic_access(trans, &flags);
-    }
+  if (iwl_trans_grab_nic_access(trans, &flags)) {
+    iwl_write_prph_no_grab(trans, ofs, (iwl_read_prph_no_grab(trans, ofs) & mask) | bits);
+    iwl_trans_release_nic_access(trans, &flags);
+  }
 }
 
 void iwl_clear_bits_prph(struct iwl_trans* trans, uint32_t ofs, uint32_t mask) {
-    unsigned long flags;
-    uint32_t val;
+  unsigned long flags;
+  uint32_t val;
 
-    if (iwl_trans_grab_nic_access(trans, &flags)) {
-        val = iwl_read_prph_no_grab(trans, ofs);
-        iwl_write_prph_no_grab(trans, ofs, (val & ~mask));
-        iwl_trans_release_nic_access(trans, &flags);
-    }
+  if (iwl_trans_grab_nic_access(trans, &flags)) {
+    val = iwl_read_prph_no_grab(trans, ofs);
+    iwl_write_prph_no_grab(trans, ofs, (val & ~mask));
+    iwl_trans_release_nic_access(trans, &flags);
+  }
 }
 
 void iwl_force_nmi(struct iwl_trans* trans) {
-    if (trans->cfg->device_family < IWL_DEVICE_FAMILY_9000) {
-        iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_DRV);
-    } else {
-        iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK);
-    }
+  if (trans->cfg->device_family < IWL_DEVICE_FAMILY_9000) {
+    iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_DRV);
+  } else {
+    iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK);
+  }
 }
 
 static const char* get_rfh_string(int cmd) {
 #define IWL_CMD(x) \
-    case x:        \
-        return #x
-#define IWL_CMD_MQ(arg, reg, q)         \
-    {                                   \
-        if (arg == reg(q)) return #reg; \
-    }
+  case x:          \
+    return #x
+#define IWL_CMD_MQ(arg, reg, q) \
+  {                             \
+    if (arg == reg(q))          \
+      return #reg;              \
+  }
 
-    int i;
+  int i;
 
-    for (i = 0; i < IWL_MAX_RX_HW_QUEUES; i++) {
-        IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_BA_LSB, i);
-        IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i);
-        IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i);
-        IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i);
-    }
+  for (i = 0; i < IWL_MAX_RX_HW_QUEUES; i++) {
+    IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_BA_LSB, i);
+    IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i);
+    IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i);
+    IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i);
+  }
 
-    switch (cmd) {
-        IWL_CMD(RFH_RXF_DMA_CFG);
-        IWL_CMD(RFH_GEN_CFG);
-        IWL_CMD(RFH_GEN_STATUS);
-        IWL_CMD(FH_TSSR_TX_STATUS_REG);
-        IWL_CMD(FH_TSSR_TX_ERROR_REG);
+  switch (cmd) {
+    IWL_CMD(RFH_RXF_DMA_CFG);
+    IWL_CMD(RFH_GEN_CFG);
+    IWL_CMD(RFH_GEN_STATUS);
+    IWL_CMD(FH_TSSR_TX_STATUS_REG);
+    IWL_CMD(FH_TSSR_TX_ERROR_REG);
     default:
-        return "UNKNOWN";
-    }
+      return "UNKNOWN";
+  }
 #undef IWL_CMD_MQ
 }
 
 struct reg {
-    uint32_t addr;
-    bool is64;
+  uint32_t addr;
+  bool is64;
 };
 
 static int iwl_dump_rfh(struct iwl_trans* trans, char** buf) {
-    size_t i;
-    int q, num_q = trans->num_rx_queues;
-    static const uint32_t rfh_tbl[] = {
-        RFH_RXF_DMA_CFG, RFH_GEN_CFG, RFH_GEN_STATUS, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG,
-    };
-    static const struct reg rfh_mq_tbl[] = {
-        {RFH_Q0_FRBDCB_BA_LSB, true},
-        {RFH_Q0_FRBDCB_WIDX, false},
-        {RFH_Q0_FRBDCB_RIDX, false},
-        {RFH_Q0_URBD_STTS_WPTR_LSB, true},
-    };
+  size_t i;
+  int q, num_q = trans->num_rx_queues;
+  static const uint32_t rfh_tbl[] = {
+      RFH_RXF_DMA_CFG, RFH_GEN_CFG, RFH_GEN_STATUS, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG,
+  };
+  static const struct reg rfh_mq_tbl[] = {
+      {RFH_Q0_FRBDCB_BA_LSB, true},
+      {RFH_Q0_FRBDCB_WIDX, false},
+      {RFH_Q0_FRBDCB_RIDX, false},
+      {RFH_Q0_URBD_STTS_WPTR_LSB, true},
+  };
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    if (buf) {
-        int pos = 0;
-        /*
-         * Register (up to 34 for name + 8 blank/q for MQ): 40 chars
-         * Colon + space: 2 characters
-         * 0X%08x: 10 characters
-         * New line: 1 character
-         * Total of 53 characters
-         */
-        size_t bufsz = ARRAY_SIZE(rfh_tbl) * 53 + ARRAY_SIZE(rfh_mq_tbl) * 53 * num_q + 40;
+  if (buf) {
+    int pos = 0;
+    /*
+     * Register (up to 34 for name + 8 blank/q for MQ): 40 chars
+     * Colon + space: 2 characters
+     * 0X%08x: 10 characters
+     * New line: 1 character
+     * Total of 53 characters
+     */
+    size_t bufsz = ARRAY_SIZE(rfh_tbl) * 53 + ARRAY_SIZE(rfh_mq_tbl) * 53 * num_q + 40;
 
-        *buf = kmalloc(bufsz, GFP_KERNEL);
-        if (!*buf) { return -ENOMEM; }
-
-        pos += scnprintf(*buf + pos, bufsz - pos, "RFH register values:\n");
-
-        for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++)
-            pos += scnprintf(*buf + pos, bufsz - pos, "%40s: 0X%08x\n", get_rfh_string(rfh_tbl[i]),
-                             iwl_read_prph(trans, rfh_tbl[i]));
-
-        for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++)
-            for (q = 0; q < num_q; q++) {
-                uint32_t addr = rfh_mq_tbl[i].addr;
-
-                addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
-                pos += scnprintf(*buf + pos, bufsz - pos, "%34s(q %2d): 0X%08x\n",
-                                 get_rfh_string(addr), q, iwl_read_prph(trans, addr));
-            }
-
-        return pos;
+    *buf = kmalloc(bufsz, GFP_KERNEL);
+    if (!*buf) {
+      return -ENOMEM;
     }
-#endif
 
-    IWL_ERR(trans, "RFH register values:\n");
+    pos += scnprintf(*buf + pos, bufsz - pos, "RFH register values:\n");
+
     for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++)
-        IWL_ERR(trans, "  %34s: 0X%08x\n", get_rfh_string(rfh_tbl[i]),
-                iwl_read_prph(trans, rfh_tbl[i]));
+      pos += scnprintf(*buf + pos, bufsz - pos, "%40s: 0X%08x\n", get_rfh_string(rfh_tbl[i]),
+                       iwl_read_prph(trans, rfh_tbl[i]));
 
     for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++)
-        for (q = 0; q < num_q; q++) {
-            uint32_t addr = rfh_mq_tbl[i].addr;
+      for (q = 0; q < num_q; q++) {
+        uint32_t addr = rfh_mq_tbl[i].addr;
 
-            addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
-            IWL_ERR(trans, "  %34s(q %d): 0X%08x\n", get_rfh_string(addr), q,
-                    iwl_read_prph(trans, addr));
-        }
+        addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
+        pos += scnprintf(*buf + pos, bufsz - pos, "%34s(q %2d): 0X%08x\n", get_rfh_string(addr), q,
+                         iwl_read_prph(trans, addr));
+      }
 
-    return 0;
+    return pos;
+  }
+#endif
+
+  IWL_ERR(trans, "RFH register values:\n");
+  for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++)
+    IWL_ERR(trans, "  %34s: 0X%08x\n", get_rfh_string(rfh_tbl[i]),
+            iwl_read_prph(trans, rfh_tbl[i]));
+
+  for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++)
+    for (q = 0; q < num_q; q++) {
+      uint32_t addr = rfh_mq_tbl[i].addr;
+
+      addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
+      IWL_ERR(trans, "  %34s(q %d): 0X%08x\n", get_rfh_string(addr), q, iwl_read_prph(trans, addr));
+    }
+
+  return 0;
 }
 
 static const char* get_fh_string(int cmd) {
-    switch (cmd) {
-        IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
-        IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
-        IWL_CMD(FH_RSCSR_CHNL0_WPTR);
-        IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
-        IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
-        IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
-        IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
-        IWL_CMD(FH_TSSR_TX_STATUS_REG);
-        IWL_CMD(FH_TSSR_TX_ERROR_REG);
+  switch (cmd) {
+    IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+    IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+    IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+    IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+    IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+    IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+    IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+    IWL_CMD(FH_TSSR_TX_STATUS_REG);
+    IWL_CMD(FH_TSSR_TX_ERROR_REG);
     default:
-        return "UNKNOWN";
-    }
+      return "UNKNOWN";
+  }
 #undef IWL_CMD
 }
 
 int iwl_dump_fh(struct iwl_trans* trans, char** buf) {
-    size_t i;
-    static const uint32_t fh_tbl[] = {
-        FH_RSCSR_CHNL0_STTS_WPTR_REG,      FH_RSCSR_CHNL0_RBDCB_BASE_REG, FH_RSCSR_CHNL0_WPTR,
-        FH_MEM_RCSR_CHNL0_CONFIG_REG,      FH_MEM_RSSR_SHARED_CTRL_REG,   FH_MEM_RSSR_RX_STATUS_REG,
-        FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, FH_TSSR_TX_STATUS_REG,         FH_TSSR_TX_ERROR_REG};
+  size_t i;
+  static const uint32_t fh_tbl[] = {
+      FH_RSCSR_CHNL0_STTS_WPTR_REG,      FH_RSCSR_CHNL0_RBDCB_BASE_REG, FH_RSCSR_CHNL0_WPTR,
+      FH_MEM_RCSR_CHNL0_CONFIG_REG,      FH_MEM_RSSR_SHARED_CTRL_REG,   FH_MEM_RSSR_RX_STATUS_REG,
+      FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, FH_TSSR_TX_STATUS_REG,         FH_TSSR_TX_ERROR_REG};
 
-    if (trans->cfg->mq_rx_supported) { return iwl_dump_rfh(trans, buf); }
+  if (trans->cfg->mq_rx_supported) {
+    return iwl_dump_rfh(trans, buf);
+  }
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    if (buf) {
-        int pos = 0;
-        size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+  if (buf) {
+    int pos = 0;
+    size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
 
-        *buf = kmalloc(bufsz, GFP_KERNEL);
-        if (!*buf) { return -ENOMEM; }
-
-        pos += scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
-
-        for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
-            pos += scnprintf(*buf + pos, bufsz - pos, "  %34s: 0X%08x\n", get_fh_string(fh_tbl[i]),
-                             iwl_read_direct32(trans, fh_tbl[i]));
-
-        return pos;
+    *buf = kmalloc(bufsz, GFP_KERNEL);
+    if (!*buf) {
+      return -ENOMEM;
     }
+
+    pos += scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
+
+    for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+      pos += scnprintf(*buf + pos, bufsz - pos, "  %34s: 0X%08x\n", get_fh_string(fh_tbl[i]),
+                       iwl_read_direct32(trans, fh_tbl[i]));
+
+    return pos;
+  }
 #endif
 
-    IWL_ERR(trans, "FH register values:\n");
-    for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
-        IWL_ERR(trans, "  %34s: 0X%08x\n", get_fh_string(fh_tbl[i]),
-                iwl_read_direct32(trans, fh_tbl[i]));
+  IWL_ERR(trans, "FH register values:\n");
+  for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+    IWL_ERR(trans, "  %34s: 0X%08x\n", get_fh_string(fh_tbl[i]),
+            iwl_read_direct32(trans, fh_tbl[i]));
 
-    return 0;
+  return 0;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.h
index c603c19..32ea208 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.h
@@ -34,7 +34,7 @@
 
 #include <zircon/types.h>
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-devtrace.h"
 #endif  // NEEDS_PORTING
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h"
@@ -45,17 +45,16 @@
 uint32_t iwl_read32(struct iwl_trans* trans, uint32_t ofs);
 
 static inline void iwl_set_bit(struct iwl_trans* trans, uint32_t reg, uint32_t mask) {
-    iwl_trans_set_bits_mask(trans, reg, mask, mask);
+  iwl_trans_set_bits_mask(trans, reg, mask, mask);
 }
 
 static inline void iwl_clear_bit(struct iwl_trans* trans, uint32_t reg, uint32_t mask) {
-    iwl_trans_set_bits_mask(trans, reg, mask, 0);
+  iwl_trans_set_bits_mask(trans, reg, mask, 0);
 }
 
 zx_status_t iwl_poll_bit(struct iwl_trans* trans, uint32_t addr, uint32_t bits, uint32_t mask,
                          int timeout);
-zx_status_t iwl_poll_direct_bit(struct iwl_trans* trans, uint32_t addr, uint32_t mask,
-                         int timeout);
+zx_status_t iwl_poll_direct_bit(struct iwl_trans* trans, uint32_t addr, uint32_t mask, int timeout);
 
 uint32_t iwl_read_direct32(struct iwl_trans* trans, uint32_t reg);
 void iwl_write_direct32(struct iwl_trans* trans, uint32_t reg, uint32_t value);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-modparams.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-modparams.h
index 356a55a..8a27e82 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-modparams.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-modparams.h
@@ -41,33 +41,33 @@
 extern struct iwl_mod_params iwlwifi_mod_params;
 
 enum iwl_power_level {
-    IWL_POWER_INDEX_1,
-    IWL_POWER_INDEX_2,
-    IWL_POWER_INDEX_3,
-    IWL_POWER_INDEX_4,
-    IWL_POWER_INDEX_5,
-    IWL_POWER_NUM
+  IWL_POWER_INDEX_1,
+  IWL_POWER_INDEX_2,
+  IWL_POWER_INDEX_3,
+  IWL_POWER_INDEX_4,
+  IWL_POWER_INDEX_5,
+  IWL_POWER_NUM
 };
 
 enum iwl_disable_11n {
-    IWL_DISABLE_HT_ALL = BIT(0),
-    IWL_DISABLE_HT_TXAGG = BIT(1),
-    IWL_DISABLE_HT_RXAGG = BIT(2),
-    IWL_ENABLE_HT_TXAGG = BIT(3),
+  IWL_DISABLE_HT_ALL = BIT(0),
+  IWL_DISABLE_HT_TXAGG = BIT(1),
+  IWL_DISABLE_HT_RXAGG = BIT(2),
+  IWL_ENABLE_HT_TXAGG = BIT(3),
 };
 
 enum iwl_amsdu_size {
-    IWL_AMSDU_DEF = 0,
-    IWL_AMSDU_4K = 1,
-    IWL_AMSDU_8K = 2,
-    IWL_AMSDU_12K = 3,
-    /* Add 2K at the end to avoid breaking current API */
-    IWL_AMSDU_2K = 4,
+  IWL_AMSDU_DEF = 0,
+  IWL_AMSDU_4K = 1,
+  IWL_AMSDU_8K = 2,
+  IWL_AMSDU_12K = 3,
+  /* Add 2K at the end to avoid breaking current API */
+  IWL_AMSDU_2K = 4,
 };
 
 enum iwl_uapsd_disable {
-    IWL_DISABLE_UAPSD_BSS = BIT(0),
-    IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1),
+  IWL_DISABLE_UAPSD_BSS = BIT(0),
+  IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1),
 };
 
 /**
@@ -101,38 +101,38 @@
  * @enable_ini: enable new FW debug infratructure (INI TLVs)
  */
 struct iwl_mod_params {
-    int swcrypto;
-    unsigned int disable_11n;
-    int amsdu_size;
-    bool fw_restart;
-    bool bt_coex_active;
-    int led_mode;
-    bool power_save;
-    int power_level;
+  int swcrypto;
+  unsigned int disable_11n;
+  int amsdu_size;
+  bool fw_restart;
+  bool bt_coex_active;
+  int led_mode;
+  bool power_save;
+  int power_level;
 #ifdef CPTCFG_IWLWIFI_DEBUG
-    uint32_t debug_level;
+  uint32_t debug_level;
 #endif
-    int antenna_coupling;
+  int antenna_coupling;
 #if IS_ENABLED(CPTCFG_IWLXVT)
-    bool xvt_default_mode;
+  bool xvt_default_mode;
 #endif
 #if IS_ENABLED(CPTCFG_IWLTEST)
-    bool trans_test;
+  bool trans_test;
 #endif
-    char* nvm_file;
-    uint32_t uapsd_disable;
-    bool d0i3_disable;
-    unsigned int d0i3_timeout;
-    bool lar_disable;
-    bool fw_monitor;
-    bool disable_11ac;
-    /**
-     * @disable_11ax: disable HE capabilities, default = false
-     */
-    bool disable_11ax;
-    bool disable_msix;
-    bool remove_when_gone;
-    bool enable_ini;
+  char* nvm_file;
+  uint32_t uapsd_disable;
+  bool d0i3_disable;
+  unsigned int d0i3_timeout;
+  bool lar_disable;
+  bool fw_monitor;
+  bool disable_11ac;
+  /**
+   * @disable_11ax: disable HE capabilities, default = false
+   */
+  bool disable_11ax;
+  bool disable_msix;
+  bool remove_when_gone;
+  bool enable_ini;
 };
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_IWL_MODPARAMS_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-nvm-parse.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-nvm-parse.c
index 57efea4..a87b8bd 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-nvm-parse.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-nvm-parse.c
@@ -32,6 +32,8 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
+#include "iwl-nvm-parse.h"
+
 #include <linux/etherdevice.h>
 #include <linux/export.h>
 #include <linux/firmware.h>
@@ -48,51 +50,50 @@
 #include "iwl-drv.h"
 #include "iwl-io.h"
 #include "iwl-modparams.h"
-#include "iwl-nvm-parse.h"
 #include "iwl-prph.h"
 
 /* NVM offsets (in words) definitions */
 enum nvm_offsets {
-    /* NVM HW-Section offset (in words) definitions */
-    SUBSYSTEM_ID = 0x0A,
-    HW_ADDR = 0x15,
+  /* NVM HW-Section offset (in words) definitions */
+  SUBSYSTEM_ID = 0x0A,
+  HW_ADDR = 0x15,
 
-    /* NVM SW-Section offset (in words) definitions */
-    NVM_SW_SECTION = 0x1C0,
-    NVM_VERSION = 0,
-    RADIO_CFG = 1,
-    SKU = 2,
-    N_HW_ADDRS = 3,
-    NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
+  /* NVM SW-Section offset (in words) definitions */
+  NVM_SW_SECTION = 0x1C0,
+  NVM_VERSION = 0,
+  RADIO_CFG = 1,
+  SKU = 2,
+  N_HW_ADDRS = 3,
+  NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
 
-    /* NVM REGULATORY -Section offset (in words) definitions */
-    NVM_CHANNELS_SDP = 0,
+  /* NVM REGULATORY -Section offset (in words) definitions */
+  NVM_CHANNELS_SDP = 0,
 };
 
 enum ext_nvm_offsets {
-    /* NVM HW-Section offset (in words) definitions */
-    MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
+  /* NVM HW-Section offset (in words) definitions */
+  MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
 
-    /* NVM SW-Section offset (in words) definitions */
-    NVM_VERSION_EXT_NVM = 0,
-    RADIO_CFG_FAMILY_EXT_NVM = 0,
-    SKU_FAMILY_8000 = 2,
-    N_HW_ADDRS_FAMILY_8000 = 3,
+  /* NVM SW-Section offset (in words) definitions */
+  NVM_VERSION_EXT_NVM = 0,
+  RADIO_CFG_FAMILY_EXT_NVM = 0,
+  SKU_FAMILY_8000 = 2,
+  N_HW_ADDRS_FAMILY_8000 = 3,
 
-    /* NVM REGULATORY -Section offset (in words) definitions */
-    NVM_CHANNELS_EXTENDED = 0,
-    NVM_LAR_OFFSET_OLD = 0x4C7,
-    NVM_LAR_OFFSET = 0x507,
-    NVM_LAR_ENABLED = 0x7,
+  /* NVM REGULATORY -Section offset (in words) definitions */
+  NVM_CHANNELS_EXTENDED = 0,
+  NVM_LAR_OFFSET_OLD = 0x4C7,
+  NVM_LAR_OFFSET = 0x507,
+  NVM_LAR_ENABLED = 0x7,
 };
 
 /* SKU Capabilities (actual values from NVM definition) */
 enum nvm_sku_bits {
-    NVM_SKU_CAP_BAND_24GHZ = BIT(0),
-    NVM_SKU_CAP_BAND_52GHZ = BIT(1),
-    NVM_SKU_CAP_11N_ENABLE = BIT(2),
-    NVM_SKU_CAP_11AC_ENABLE = BIT(3),
-    NVM_SKU_CAP_MIMO_DISABLE = BIT(5),
+  NVM_SKU_CAP_BAND_24GHZ = BIT(0),
+  NVM_SKU_CAP_BAND_52GHZ = BIT(1),
+  NVM_SKU_CAP_11N_ENABLE = BIT(2),
+  NVM_SKU_CAP_11AC_ENABLE = BIT(3),
+  NVM_SKU_CAP_MIMO_DISABLE = BIT(5),
 };
 
 /*
@@ -210,231 +211,255 @@
  * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
  */
 enum iwl_nvm_channel_flags {
-    NVM_CHANNEL_VALID = BIT(0),
-    NVM_CHANNEL_IBSS = BIT(1),
-    NVM_CHANNEL_ACTIVE = BIT(3),
-    NVM_CHANNEL_RADAR = BIT(4),
-    NVM_CHANNEL_INDOOR_ONLY = BIT(5),
-    NVM_CHANNEL_GO_CONCURRENT = BIT(6),
-    NVM_CHANNEL_UNIFORM = BIT(7),
-    NVM_CHANNEL_20MHZ = BIT(8),
-    NVM_CHANNEL_40MHZ = BIT(9),
-    NVM_CHANNEL_80MHZ = BIT(10),
-    NVM_CHANNEL_160MHZ = BIT(11),
-    NVM_CHANNEL_DC_HIGH = BIT(12),
+  NVM_CHANNEL_VALID = BIT(0),
+  NVM_CHANNEL_IBSS = BIT(1),
+  NVM_CHANNEL_ACTIVE = BIT(3),
+  NVM_CHANNEL_RADAR = BIT(4),
+  NVM_CHANNEL_INDOOR_ONLY = BIT(5),
+  NVM_CHANNEL_GO_CONCURRENT = BIT(6),
+  NVM_CHANNEL_UNIFORM = BIT(7),
+  NVM_CHANNEL_20MHZ = BIT(8),
+  NVM_CHANNEL_40MHZ = BIT(9),
+  NVM_CHANNEL_80MHZ = BIT(10),
+  NVM_CHANNEL_160MHZ = BIT(11),
+  NVM_CHANNEL_DC_HIGH = BIT(12),
 };
 
 static inline void iwl_nvm_print_channel_flags(struct device* dev, uint32_t level, int chan,
                                                uint16_t flags) {
 #define CHECK_AND_PRINT_I(x) ((flags & NVM_CHANNEL_##x) ? " " #x : "")
 
-    if (!(flags & NVM_CHANNEL_VALID)) {
-        IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n", chan, flags);
-        return;
-    }
+  if (!(flags & NVM_CHANNEL_VALID)) {
+    IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n", chan, flags);
+    return;
+  }
 
-    /* Note: already can print up to 101 characters, 110 is the limit! */
-    IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n", chan, flags,
-                  CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE),
-                  CHECK_AND_PRINT_I(RADAR), CHECK_AND_PRINT_I(INDOOR_ONLY),
-                  CHECK_AND_PRINT_I(GO_CONCURRENT), CHECK_AND_PRINT_I(UNIFORM),
-                  CHECK_AND_PRINT_I(20MHZ), CHECK_AND_PRINT_I(40MHZ), CHECK_AND_PRINT_I(80MHZ),
-                  CHECK_AND_PRINT_I(160MHZ), CHECK_AND_PRINT_I(DC_HIGH));
+  /* Note: already can print up to 101 characters, 110 is the limit! */
+  IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n", chan, flags,
+                CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE),
+                CHECK_AND_PRINT_I(RADAR), CHECK_AND_PRINT_I(INDOOR_ONLY),
+                CHECK_AND_PRINT_I(GO_CONCURRENT), CHECK_AND_PRINT_I(UNIFORM),
+                CHECK_AND_PRINT_I(20MHZ), CHECK_AND_PRINT_I(40MHZ), CHECK_AND_PRINT_I(80MHZ),
+                CHECK_AND_PRINT_I(160MHZ), CHECK_AND_PRINT_I(DC_HIGH));
 #undef CHECK_AND_PRINT_I
 }
 
 static uint32_t iwl_get_channel_flags(uint8_t ch_num, int ch_idx, bool is_5ghz, uint16_t nvm_flags,
                                       const struct iwl_cfg* cfg) {
-    uint32_t flags = IEEE80211_CHAN_NO_HT40;
-    uint32_t last_5ghz_ht = LAST_5GHZ_HT;
+  uint32_t flags = IEEE80211_CHAN_NO_HT40;
+  uint32_t last_5ghz_ht = LAST_5GHZ_HT;
 
-    if (cfg->nvm_type == IWL_NVM_EXT) { last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; }
+  if (cfg->nvm_type == IWL_NVM_EXT) {
+    last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
+  }
 
-    if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
-        if (ch_num <= LAST_2GHZ_HT_PLUS) { flags &= ~IEEE80211_CHAN_NO_HT40PLUS; }
-        if (ch_num >= FIRST_2GHZ_HT_MINUS) { flags &= ~IEEE80211_CHAN_NO_HT40MINUS; }
-    } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
-        if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) {
-            flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
-        } else {
-            flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
-        }
+  if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+    if (ch_num <= LAST_2GHZ_HT_PLUS) {
+      flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
     }
-    if (!(nvm_flags & NVM_CHANNEL_80MHZ)) { flags |= IEEE80211_CHAN_NO_80MHZ; }
-    if (!(nvm_flags & NVM_CHANNEL_160MHZ)) { flags |= IEEE80211_CHAN_NO_160MHZ; }
-
-    if (!(nvm_flags & NVM_CHANNEL_IBSS)) { flags |= IEEE80211_CHAN_NO_IR; }
-
-    if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) { flags |= IEEE80211_CHAN_NO_IR; }
-
-    if (nvm_flags & NVM_CHANNEL_RADAR) { flags |= IEEE80211_CHAN_RADAR; }
-
-    if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) { flags |= IEEE80211_CHAN_INDOOR_ONLY; }
-
-    /* Set the GO concurrent flag only in case that NO_IR is set.
-     * Otherwise it is meaningless
-     */
-    if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && (flags & IEEE80211_CHAN_NO_IR)) {
-        flags |= IEEE80211_CHAN_IR_CONCURRENT;
+    if (ch_num >= FIRST_2GHZ_HT_MINUS) {
+      flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
     }
+  } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+    if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) {
+      flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+    } else {
+      flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+    }
+  }
+  if (!(nvm_flags & NVM_CHANNEL_80MHZ)) {
+    flags |= IEEE80211_CHAN_NO_80MHZ;
+  }
+  if (!(nvm_flags & NVM_CHANNEL_160MHZ)) {
+    flags |= IEEE80211_CHAN_NO_160MHZ;
+  }
 
-    return flags;
+  if (!(nvm_flags & NVM_CHANNEL_IBSS)) {
+    flags |= IEEE80211_CHAN_NO_IR;
+  }
+
+  if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) {
+    flags |= IEEE80211_CHAN_NO_IR;
+  }
+
+  if (nvm_flags & NVM_CHANNEL_RADAR) {
+    flags |= IEEE80211_CHAN_RADAR;
+  }
+
+  if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) {
+    flags |= IEEE80211_CHAN_INDOOR_ONLY;
+  }
+
+  /* Set the GO concurrent flag only in case that NO_IR is set.
+   * Otherwise it is meaningless
+   */
+  if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && (flags & IEEE80211_CHAN_NO_IR)) {
+    flags |= IEEE80211_CHAN_IR_CONCURRENT;
+  }
+
+  return flags;
 }
 
 static int iwl_init_channel_map(struct device* dev, const struct iwl_cfg* cfg,
                                 struct iwl_nvm_data* data, const __le16* const nvm_ch_flags,
                                 uint32_t sbands_flags) {
-    int ch_idx;
-    int n_channels = 0;
-    struct ieee80211_channel* channel;
-    uint16_t ch_flags;
-    int num_of_ch, num_2ghz_channels;
-    const uint8_t* nvm_chan;
+  int ch_idx;
+  int n_channels = 0;
+  struct ieee80211_channel* channel;
+  uint16_t ch_flags;
+  int num_of_ch, num_2ghz_channels;
+  const uint8_t* nvm_chan;
 
-    if (cfg->nvm_type != IWL_NVM_EXT) {
-        num_of_ch = IWL_NVM_NUM_CHANNELS;
-        nvm_chan = &iwl_nvm_channels[0];
-        num_2ghz_channels = NUM_2GHZ_CHANNELS;
-    } else {
-        num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
-        nvm_chan = &iwl_ext_nvm_channels[0];
-        num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
+  if (cfg->nvm_type != IWL_NVM_EXT) {
+    num_of_ch = IWL_NVM_NUM_CHANNELS;
+    nvm_chan = &iwl_nvm_channels[0];
+    num_2ghz_channels = NUM_2GHZ_CHANNELS;
+  } else {
+    num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
+    nvm_chan = &iwl_ext_nvm_channels[0];
+    num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
+  }
+
+  for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+    bool is_5ghz = (ch_idx >= num_2ghz_channels);
+
+    ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+
+    if (is_5ghz && !data->sku_cap_band_52ghz_enable) {
+      continue;
     }
 
-    for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
-        bool is_5ghz = (ch_idx >= num_2ghz_channels);
-
-        ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
-
-        if (is_5ghz && !data->sku_cap_band_52ghz_enable) { continue; }
-
-        /* workaround to disable wide channels in 5GHz */
-        if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) && is_5ghz) {
-            ch_flags &= ~(NVM_CHANNEL_40MHZ | NVM_CHANNEL_80MHZ | NVM_CHANNEL_160MHZ);
-        }
-
-        if (ch_flags & NVM_CHANNEL_160MHZ) { data->vht160_supported = true; }
-
-        if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) && !(ch_flags & NVM_CHANNEL_VALID)) {
-            /*
-             * Channels might become valid later if lar is
-             * supported, hence we still want to add them to
-             * the list of supported channels to cfg80211.
-             */
-            iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, nvm_chan[ch_idx], ch_flags);
-            continue;
-        }
-
-        channel = &data->channels[n_channels];
-        n_channels++;
-
-        channel->hw_value = nvm_chan[ch_idx];
-        channel->band = is_5ghz ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
-        channel->center_freq = ieee80211_channel_to_frequency(channel->hw_value, channel->band);
-
-        /* Initialize regulatory-based run-time data */
-
-        /*
-         * Default value - highest tx power value.  max_power
-         * is not used in mvm, and is used for backwards compatibility
-         */
-        channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
-
-        /* don't put limitations in case we're using LAR */
-        if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR))
-            channel->flags =
-                iwl_get_channel_flags(nvm_chan[ch_idx], ch_idx, is_5ghz, ch_flags, cfg);
-        else {
-            channel->flags = 0;
-        }
-
-        iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, channel->hw_value, ch_flags);
-        IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", channel->hw_value, channel->max_power);
+    /* workaround to disable wide channels in 5GHz */
+    if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) && is_5ghz) {
+      ch_flags &= ~(NVM_CHANNEL_40MHZ | NVM_CHANNEL_80MHZ | NVM_CHANNEL_160MHZ);
     }
 
-    return n_channels;
+    if (ch_flags & NVM_CHANNEL_160MHZ) {
+      data->vht160_supported = true;
+    }
+
+    if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) && !(ch_flags & NVM_CHANNEL_VALID)) {
+      /*
+       * Channels might become valid later if lar is
+       * supported, hence we still want to add them to
+       * the list of supported channels to cfg80211.
+       */
+      iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, nvm_chan[ch_idx], ch_flags);
+      continue;
+    }
+
+    channel = &data->channels[n_channels];
+    n_channels++;
+
+    channel->hw_value = nvm_chan[ch_idx];
+    channel->band = is_5ghz ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
+    channel->center_freq = ieee80211_channel_to_frequency(channel->hw_value, channel->band);
+
+    /* Initialize regulatory-based run-time data */
+
+    /*
+     * Default value - highest tx power value.  max_power
+     * is not used in mvm, and is used for backwards compatibility
+     */
+    channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
+
+    /* don't put limitations in case we're using LAR */
+    if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR))
+      channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], ch_idx, is_5ghz, ch_flags, cfg);
+    else {
+      channel->flags = 0;
+    }
+
+    iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, channel->hw_value, ch_flags);
+    IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", channel->hw_value, channel->max_power);
+  }
+
+  return n_channels;
 }
 
 static void iwl_init_vht_hw_capab(struct iwl_trans* trans, struct iwl_nvm_data* data,
                                   struct ieee80211_sta_vht_cap* vht_cap, uint8_t tx_chains,
                                   uint8_t rx_chains) {
-    const struct iwl_cfg* cfg = trans->cfg;
-    int num_rx_ants = num_of_ant(rx_chains);
-    int num_tx_ants = num_of_ant(tx_chains);
-    unsigned int max_ampdu_exponent =
-        (cfg->max_vht_ampdu_exponent ?: IEEE80211_VHT_MAX_AMPDU_1024K);
+  const struct iwl_cfg* cfg = trans->cfg;
+  int num_rx_ants = num_of_ant(rx_chains);
+  int num_tx_ants = num_of_ant(tx_chains);
+  unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?: IEEE80211_VHT_MAX_AMPDU_1024K);
 
-    vht_cap->vht_supported = true;
+  vht_cap->vht_supported = true;
 
-    vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_RXSTBC_1 |
-                   IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
-                   3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
-                   max_ampdu_exponent << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+  vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_RXSTBC_1 |
+                 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+                 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
+                 max_ampdu_exponent << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
 
-    if (data->vht160_supported) {
-        vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | IEEE80211_VHT_CAP_SHORT_GI_160;
-    }
+  if (data->vht160_supported) {
+    vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | IEEE80211_VHT_CAP_SHORT_GI_160;
+  }
 
-    if (cfg->vht_mu_mimo_supported) { vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; }
+  if (cfg->vht_mu_mimo_supported) {
+    vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+  }
 
-    if (cfg->ht_params->ldpc) { vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; }
+  if (cfg->ht_params->ldpc) {
+    vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
+  }
 
-    if (data->sku_cap_mimo_disabled) {
-        num_rx_ants = 1;
-        num_tx_ants = 1;
-    }
+  if (data->sku_cap_mimo_disabled) {
+    num_rx_ants = 1;
+    num_tx_ants = 1;
+  }
 
-    if (num_tx_ants > 1) {
-        vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
-    } else {
-        vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
-    }
+  if (num_tx_ants > 1) {
+    vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+  } else {
+    vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
+  }
 
-    switch (iwlwifi_mod_params.amsdu_size) {
+  switch (iwlwifi_mod_params.amsdu_size) {
     case IWL_AMSDU_DEF:
-        if (cfg->mq_rx_supported) {
-            vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
-        } else {
-            vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
-        }
-        break;
-    case IWL_AMSDU_2K:
-        if (cfg->mq_rx_supported) {
-            vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
-        } else {
-            WARN(1, "RB size of 2K is not supported by this device\n");
-        }
-        break;
-    case IWL_AMSDU_4K:
-        vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
-        break;
-    case IWL_AMSDU_8K:
-        vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
-        break;
-    case IWL_AMSDU_12K:
+      if (cfg->mq_rx_supported) {
         vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
-        break;
+      } else {
+        vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
+      }
+      break;
+    case IWL_AMSDU_2K:
+      if (cfg->mq_rx_supported) {
+        vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+      } else {
+        WARN(1, "RB size of 2K is not supported by this device\n");
+      }
+      break;
+    case IWL_AMSDU_4K:
+      vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
+      break;
+    case IWL_AMSDU_8K:
+      vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
+      break;
+    case IWL_AMSDU_12K:
+      vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+      break;
     default:
-        break;
-    }
+      break;
+  }
 
-    vht_cap->vht_mcs.rx_mcs_map =
-        cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
-                    IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
-                    IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
-                    IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
+  vht_cap->vht_mcs.rx_mcs_map =
+      cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+                  IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+                  IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+                  IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
 
-    if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
-        vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
-        /* this works because NOT_SUPPORTED == 3 */
-        vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
-    }
+  if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
+    vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
+    /* this works because NOT_SUPPORTED == 3 */
+    vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
+  }
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    vht_cap->cap ^= trans->dbg_cfg.vht_cap_flip;
+  vht_cap->cap ^= trans->dbg_cfg.vht_cap_flip;
 #endif
 
-    vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
+  vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
 }
 
 static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
@@ -605,361 +630,363 @@
 
 static void iwl_init_he_hw_capab(struct ieee80211_supported_band* sband, uint8_t tx_chains,
                                  uint8_t rx_chains) {
-    if (sband->band == NL80211_BAND_2GHZ || sband->band == NL80211_BAND_5GHZ) {
-        sband->iftype_data = iwl_he_capa;
-    } else {
-        return;
+  if (sband->band == NL80211_BAND_2GHZ || sband->band == NL80211_BAND_5GHZ) {
+    sband->iftype_data = iwl_he_capa;
+  } else {
+    return;
+  }
+
+  sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
+
+  /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
+  if ((tx_chains & rx_chains) != ANT_AB) {
+    int i;
+
+    for (i = 0; i < sband->n_iftype_data; i++) {
+      iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[1] &=
+          ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
+      iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[2] &=
+          ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
+      iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
     }
-
-    sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
-
-    /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
-    if ((tx_chains & rx_chains) != ANT_AB) {
-        int i;
-
-        for (i = 0; i < sband->n_iftype_data; i++) {
-            iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[1] &=
-                ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
-            iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[2] &=
-                ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
-            iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
-        }
-    }
+  }
 }
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
 /* returns true iff there exists one spatial stream where MCS of a > b */
 static bool iwl_he_mcs_greater(uint16_t a, uint16_t b) {
-    int i;
+  int i;
 
-    for (i = 0; i < 16; i += 2) {
-        if ((((a >> i) + 1) & 3) > (((b >> i) + 1) & 3)) { return true; }
+  for (i = 0; i < 16; i += 2) {
+    if ((((a >> i) + 1) & 3) > (((b >> i) + 1) & 3)) {
+      return true;
     }
-    return false;
+  }
+  return false;
 }
 
 static void iwl_init_he_override(struct iwl_trans* trans, struct ieee80211_supported_band* sband) {
-    struct ieee80211_sband_iftype_data* iftype_data;
-    int i;
+  struct ieee80211_sband_iftype_data* iftype_data;
+  int i;
 
-    if (sband->band != NL80211_BAND_2GHZ && sband->band != NL80211_BAND_5GHZ) { return; }
+  if (sband->band != NL80211_BAND_2GHZ && sband->band != NL80211_BAND_5GHZ) {
+    return;
+  }
 
-    for (i = 0; i < ARRAY_SIZE(iwl_he_capa); i++) {
-        iftype_data = &iwl_he_capa[i];
+  for (i = 0; i < ARRAY_SIZE(iwl_he_capa); i++) {
+    iftype_data = &iwl_he_capa[i];
 
-        if (trans->dbg_cfg.rx_mcs_80) {
-            if (iwl_he_mcs_greater(trans->dbg_cfg.rx_mcs_80,
-                                   le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80)))
-                IWL_ERR(trans, "Cannot set dbg rx_mcs_80 = 0x%x (too big)\n",
-                        trans->dbg_cfg.rx_mcs_80);
-            else
-                iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80 =
-                    cpu_to_le16(trans->dbg_cfg.rx_mcs_80);
-        }
-        if (trans->dbg_cfg.tx_mcs_80) {
-            if (iwl_he_mcs_greater(trans->dbg_cfg.tx_mcs_80,
-                                   le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80)))
-                IWL_ERR(trans, "Cannot set dbg tx_mcs_80 = 0x%x (too big)\n",
-                        trans->dbg_cfg.tx_mcs_80);
-            else
-                iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80 =
-                    cpu_to_le16(trans->dbg_cfg.tx_mcs_80);
-        }
-        if (trans->dbg_cfg.rx_mcs_160) {
-            if (iwl_he_mcs_greater(trans->dbg_cfg.rx_mcs_160,
-                                   le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160)))
-                IWL_ERR(trans, "Cannot set dbg rx_mcs_160 = 0x%x (too big)\n",
-                        trans->dbg_cfg.rx_mcs_160);
-            else
-                iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160 =
-                    cpu_to_le16(trans->dbg_cfg.rx_mcs_160);
-        }
-        if (trans->dbg_cfg.tx_mcs_160) {
-            if (iwl_he_mcs_greater(trans->dbg_cfg.tx_mcs_160,
-                                   le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160)))
-                IWL_ERR(trans, "Cannot set dbg tx_mcs_160 = 0x%x (too big)\n",
-                        trans->dbg_cfg.tx_mcs_160);
-            else
-                iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160 =
-                    cpu_to_le16(trans->dbg_cfg.tx_mcs_160);
-        }
-
-        /*
-         * If antennas were forced - make sure not declaring MIMO when
-         * we actually are SISO
-         * Recall that there are 2 bits per stream in the "HE Tx/Rx HE
-         * MCS NSS Support Field", so if some antenna is forced on but
-         * not both A and B - we should work in SISO mode, so mark the
-         * 2nd SS as not supported
-         */
-        if (trans->dbg_cfg.valid_ants && (trans->dbg_cfg.valid_ants & ANT_AB) != ANT_AB) {
-            iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80 |=
-                cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
-            iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80 |=
-                cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
-            iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160 |=
-                cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
-            iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160 |=
-                cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
-            iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80p80 |=
-                cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
-            iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80p80 |=
-                cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
-        }
-
-        if (trans->dbg_cfg.no_ack_en & 0x1) {
-            iftype_data->he_cap.he_cap_elem.mac_cap_info[2] &= ~IEEE80211_HE_MAC_CAP2_ACK_EN;
-        }
-
-        if (trans->dbg_cfg.no_ldpc)
-            iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
-                ~IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
-
-        /* Check if any HE capabilities need to be set for debug */
-        if (trans->dbg_cfg.he_ppe_thres.len) {
-            uint8_t len = trans->dbg_cfg.he_ppe_thres.len;
-
-            if (len > sizeof(iftype_data->he_cap.ppe_thres)) {
-                len = sizeof(iftype_data->he_cap.ppe_thres);
-            }
-            memcpy(iftype_data->he_cap.ppe_thres, trans->dbg_cfg.he_ppe_thres.data, len);
-        }
-
-        if (trans->dbg_cfg.he_chan_width_dis)
-            iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
-                ~(trans->dbg_cfg.he_chan_width_dis << 1);
+    if (trans->dbg_cfg.rx_mcs_80) {
+      if (iwl_he_mcs_greater(trans->dbg_cfg.rx_mcs_80,
+                             le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80)))
+        IWL_ERR(trans, "Cannot set dbg rx_mcs_80 = 0x%x (too big)\n", trans->dbg_cfg.rx_mcs_80);
+      else
+        iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(trans->dbg_cfg.rx_mcs_80);
     }
+    if (trans->dbg_cfg.tx_mcs_80) {
+      if (iwl_he_mcs_greater(trans->dbg_cfg.tx_mcs_80,
+                             le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80)))
+        IWL_ERR(trans, "Cannot set dbg tx_mcs_80 = 0x%x (too big)\n", trans->dbg_cfg.tx_mcs_80);
+      else
+        iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(trans->dbg_cfg.tx_mcs_80);
+    }
+    if (trans->dbg_cfg.rx_mcs_160) {
+      if (iwl_he_mcs_greater(trans->dbg_cfg.rx_mcs_160,
+                             le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160)))
+        IWL_ERR(trans, "Cannot set dbg rx_mcs_160 = 0x%x (too big)\n", trans->dbg_cfg.rx_mcs_160);
+      else
+        iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(trans->dbg_cfg.rx_mcs_160);
+    }
+    if (trans->dbg_cfg.tx_mcs_160) {
+      if (iwl_he_mcs_greater(trans->dbg_cfg.tx_mcs_160,
+                             le16_to_cpu(iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160)))
+        IWL_ERR(trans, "Cannot set dbg tx_mcs_160 = 0x%x (too big)\n", trans->dbg_cfg.tx_mcs_160);
+      else
+        iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(trans->dbg_cfg.tx_mcs_160);
+    }
+
+    /*
+     * If antennas were forced - make sure not declaring MIMO when
+     * we actually are SISO
+     * Recall that there are 2 bits per stream in the "HE Tx/Rx HE
+     * MCS NSS Support Field", so if some antenna is forced on but
+     * not both A and B - we should work in SISO mode, so mark the
+     * 2nd SS as not supported
+     */
+    if (trans->dbg_cfg.valid_ants && (trans->dbg_cfg.valid_ants & ANT_AB) != ANT_AB) {
+      iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80 |=
+          cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+      iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80 |=
+          cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+      iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_160 |=
+          cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+      iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160 |=
+          cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+      iftype_data->he_cap.he_mcs_nss_supp.rx_mcs_80p80 |=
+          cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+      iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80p80 |=
+          cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+    }
+
+    if (trans->dbg_cfg.no_ack_en & 0x1) {
+      iftype_data->he_cap.he_cap_elem.mac_cap_info[2] &= ~IEEE80211_HE_MAC_CAP2_ACK_EN;
+    }
+
+    if (trans->dbg_cfg.no_ldpc)
+      iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
+          ~IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
+
+    /* Check if any HE capabilities need to be set for debug */
+    if (trans->dbg_cfg.he_ppe_thres.len) {
+      uint8_t len = trans->dbg_cfg.he_ppe_thres.len;
+
+      if (len > sizeof(iftype_data->he_cap.ppe_thres)) {
+        len = sizeof(iftype_data->he_cap.ppe_thres);
+      }
+      memcpy(iftype_data->he_cap.ppe_thres, trans->dbg_cfg.he_ppe_thres.data, len);
+    }
+
+    if (trans->dbg_cfg.he_chan_width_dis)
+      iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &= ~(trans->dbg_cfg.he_chan_width_dis << 1);
+  }
 }
 #endif
 
 static void iwl_init_sbands(struct iwl_trans* trans, struct iwl_nvm_data* data,
                             const __le16* nvm_ch_flags, uint8_t tx_chains, uint8_t rx_chains,
                             uint32_t sbands_flags) {
-    struct device* dev = trans->dev;
-    const struct iwl_cfg* cfg = trans->cfg;
-    int n_channels;
-    int n_used = 0;
-    struct ieee80211_supported_band* sband;
+  struct device* dev = trans->dev;
+  const struct iwl_cfg* cfg = trans->cfg;
+  int n_channels;
+  int n_used = 0;
+  struct ieee80211_supported_band* sband;
 
-    n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, sbands_flags);
-    sband = &data->bands[NL80211_BAND_2GHZ];
-    sband->band = NL80211_BAND_2GHZ;
-    sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
-    sband->n_bitrates = N_RATES_24;
-    n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_2GHZ);
-    iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains);
+  n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, sbands_flags);
+  sband = &data->bands[NL80211_BAND_2GHZ];
+  sband->band = NL80211_BAND_2GHZ;
+  sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+  sband->n_bitrates = N_RATES_24;
+  n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_2GHZ);
+  iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains);
 
-    if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) {
-        iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
-    }
+  if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) {
+    iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+  }
 
-    sband = &data->bands[NL80211_BAND_5GHZ];
-    sband->band = NL80211_BAND_5GHZ;
-    sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
-    sband->n_bitrates = N_RATES_52;
-    n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_5GHZ);
-    iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, tx_chains, rx_chains);
-    if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) {
-        iwl_init_vht_hw_capab(trans, data, &sband->vht_cap, tx_chains, rx_chains);
-    }
+  sband = &data->bands[NL80211_BAND_5GHZ];
+  sband->band = NL80211_BAND_5GHZ;
+  sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
+  sband->n_bitrates = N_RATES_52;
+  n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_5GHZ);
+  iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, tx_chains, rx_chains);
+  if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) {
+    iwl_init_vht_hw_capab(trans, data, &sband->vht_cap, tx_chains, rx_chains);
+  }
 
-    if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) {
-        iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
-    }
+  if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) {
+    iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+  }
 
-    if (n_channels != n_used) {
-        IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels);
-    }
+  if (n_channels != n_used) {
+    IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels);
+  }
 }
 
 static int iwl_get_sku(const struct iwl_cfg* cfg, const __le16* nvm_sw, const __le16* phy_sku) {
-    if (cfg->nvm_type != IWL_NVM_EXT) { return le16_to_cpup(nvm_sw + SKU); }
+  if (cfg->nvm_type != IWL_NVM_EXT) {
+    return le16_to_cpup(nvm_sw + SKU);
+  }
 
-    return le32_to_cpup((__le32*)(phy_sku + SKU_FAMILY_8000));
+  return le32_to_cpup((__le32*)(phy_sku + SKU_FAMILY_8000));
 }
 
 static int iwl_get_nvm_version(const struct iwl_cfg* cfg, const __le16* nvm_sw) {
-    if (cfg->nvm_type != IWL_NVM_EXT) {
-        return le16_to_cpup(nvm_sw + NVM_VERSION);
-    } else {
-        return le32_to_cpup((__le32*)(nvm_sw + NVM_VERSION_EXT_NVM));
-    }
+  if (cfg->nvm_type != IWL_NVM_EXT) {
+    return le16_to_cpup(nvm_sw + NVM_VERSION);
+  } else {
+    return le32_to_cpup((__le32*)(nvm_sw + NVM_VERSION_EXT_NVM));
+  }
 }
 
 static int iwl_get_radio_cfg(const struct iwl_cfg* cfg, const __le16* nvm_sw,
                              const __le16* phy_sku) {
-    if (cfg->nvm_type != IWL_NVM_EXT) { return le16_to_cpup(nvm_sw + RADIO_CFG); }
+  if (cfg->nvm_type != IWL_NVM_EXT) {
+    return le16_to_cpup(nvm_sw + RADIO_CFG);
+  }
 
-    return le32_to_cpup((__le32*)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
+  return le32_to_cpup((__le32*)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
 }
 
 static int iwl_get_n_hw_addrs(const struct iwl_cfg* cfg, const __le16* nvm_sw) {
-    int n_hw_addr;
+  int n_hw_addr;
 
-    if (cfg->nvm_type != IWL_NVM_EXT) { return le16_to_cpup(nvm_sw + N_HW_ADDRS); }
+  if (cfg->nvm_type != IWL_NVM_EXT) {
+    return le16_to_cpup(nvm_sw + N_HW_ADDRS);
+  }
 
-    n_hw_addr = le32_to_cpup((__le32*)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
+  n_hw_addr = le32_to_cpup((__le32*)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
 
-    return n_hw_addr & N_HW_ADDR_MASK;
+  return n_hw_addr & N_HW_ADDR_MASK;
 }
 
 static void iwl_set_radio_cfg(const struct iwl_cfg* cfg, struct iwl_nvm_data* data,
                               uint32_t radio_cfg) {
-    if (cfg->nvm_type != IWL_NVM_EXT) {
-        data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
-        data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
-        data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
-        data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
-        return;
-    }
+  if (cfg->nvm_type != IWL_NVM_EXT) {
+    data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
+    data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
+    data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
+    data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
+    return;
+  }
 
-    /* set the radio configuration for family 8000 */
-    data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg);
-    data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg);
-    data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg);
-    data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg);
-    data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
-    data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
+  /* set the radio configuration for family 8000 */
+  data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg);
+  data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg);
+  data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg);
+  data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg);
+  data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
+  data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
 }
 
 static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, uint8_t* dest) {
-    const uint8_t* hw_addr;
+  const uint8_t* hw_addr;
 
-    hw_addr = (const uint8_t*)&mac_addr0;
-    dest[0] = hw_addr[3];
-    dest[1] = hw_addr[2];
-    dest[2] = hw_addr[1];
-    dest[3] = hw_addr[0];
+  hw_addr = (const uint8_t*)&mac_addr0;
+  dest[0] = hw_addr[3];
+  dest[1] = hw_addr[2];
+  dest[2] = hw_addr[1];
+  dest[3] = hw_addr[0];
 
-    hw_addr = (const uint8_t*)&mac_addr1;
-    dest[4] = hw_addr[1];
-    dest[5] = hw_addr[0];
+  hw_addr = (const uint8_t*)&mac_addr1;
+  dest[4] = hw_addr[1];
+  dest[5] = hw_addr[0];
 }
 
 static void iwl_set_hw_address_from_csr(struct iwl_trans* trans, struct iwl_nvm_data* data) {
-    __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr0_strap));
-    __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr1_strap));
+  __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr0_strap));
+  __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr1_strap));
 
-    iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
-    /*
-     * If the OEM fused a valid address, use it instead of the one in the
-     * OTP
-     */
-    if (is_valid_ether_addr(data->hw_addr)) { return; }
+  iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+  /*
+   * If the OEM fused a valid address, use it instead of the one in the
+   * OTP
+   */
+  if (is_valid_ether_addr(data->hw_addr)) {
+    return;
+  }
 
-    mac_addr0 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr0_otp));
-    mac_addr1 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr1_otp));
+  mac_addr0 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr0_otp));
+  mac_addr1 = cpu_to_le32(iwl_read32(trans, trans->cfg->csr->mac_addr1_otp));
 
-    iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+  iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
 }
 
 static void iwl_set_hw_address_family_8000(struct iwl_trans* trans, const struct iwl_cfg* cfg,
                                            struct iwl_nvm_data* data, const __le16* mac_override,
                                            const __be16* nvm_hw) {
-    const uint8_t* hw_addr;
+  const uint8_t* hw_addr;
 
-    if (mac_override) {
-        static const uint8_t reserved_mac[] = {0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00};
+  if (mac_override) {
+    static const uint8_t reserved_mac[] = {0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00};
 
-        hw_addr = (const uint8_t*)(mac_override + MAC_ADDRESS_OVERRIDE_EXT_NVM);
+    hw_addr = (const uint8_t*)(mac_override + MAC_ADDRESS_OVERRIDE_EXT_NVM);
 
-        /*
-         * Store the MAC address from MAO section.
-         * No byte swapping is required in MAO section
-         */
-        memcpy(data->hw_addr, hw_addr, ETH_ALEN);
+    /*
+     * Store the MAC address from MAO section.
+     * No byte swapping is required in MAO section
+     */
+    memcpy(data->hw_addr, hw_addr, ETH_ALEN);
 
-        /*
-         * Force the use of the OTP MAC address in case of reserved MAC
-         * address in the NVM, or if address is given but invalid.
-         */
-        if (is_valid_ether_addr(data->hw_addr) && memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) {
-            return;
-        }
-
-        IWL_ERR(trans, "mac address from nvm override section is not valid\n");
+    /*
+     * Force the use of the OTP MAC address in case of reserved MAC
+     * address in the NVM, or if address is given but invalid.
+     */
+    if (is_valid_ether_addr(data->hw_addr) && memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) {
+      return;
     }
 
-    if (nvm_hw) {
-        /* read the mac address from WFMP registers */
-        __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_0));
-        __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_1));
+    IWL_ERR(trans, "mac address from nvm override section is not valid\n");
+  }
 
-        iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+  if (nvm_hw) {
+    /* read the mac address from WFMP registers */
+    __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_0));
+    __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_1));
 
-        return;
-    }
+    iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
 
-    IWL_ERR(trans, "mac address is not found\n");
+    return;
+  }
+
+  IWL_ERR(trans, "mac address is not found\n");
 }
 
 static int iwl_set_hw_address(struct iwl_trans* trans, const struct iwl_cfg* cfg,
                               struct iwl_nvm_data* data, const __be16* nvm_hw,
                               const __le16* mac_override) {
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
+  struct iwl_dbg_cfg* dbg_cfg = &trans->dbg_cfg;
 
-    if (dbg_cfg->hw_address.len) {
-        if (dbg_cfg->hw_address.len == ETH_ALEN && is_valid_ether_addr(dbg_cfg->hw_address.data)) {
-            memcpy(data->hw_addr, dbg_cfg->hw_address.data, ETH_ALEN);
-            return 0;
-        }
-        IWL_ERR(trans, "mac address from config file is invalid\n");
+  if (dbg_cfg->hw_address.len) {
+    if (dbg_cfg->hw_address.len == ETH_ALEN && is_valid_ether_addr(dbg_cfg->hw_address.data)) {
+      memcpy(data->hw_addr, dbg_cfg->hw_address.data, ETH_ALEN);
+      return 0;
     }
+    IWL_ERR(trans, "mac address from config file is invalid\n");
+  }
 #endif
-    if (cfg->mac_addr_from_csr) {
-        iwl_set_hw_address_from_csr(trans, data);
-    } else if (cfg->nvm_type != IWL_NVM_EXT) {
-        const uint8_t* hw_addr = (const uint8_t*)(nvm_hw + HW_ADDR);
+  if (cfg->mac_addr_from_csr) {
+    iwl_set_hw_address_from_csr(trans, data);
+  } else if (cfg->nvm_type != IWL_NVM_EXT) {
+    const uint8_t* hw_addr = (const uint8_t*)(nvm_hw + HW_ADDR);
 
-        /* The byte order is little endian 16 bit, meaning 214365 */
-        data->hw_addr[0] = hw_addr[1];
-        data->hw_addr[1] = hw_addr[0];
-        data->hw_addr[2] = hw_addr[3];
-        data->hw_addr[3] = hw_addr[2];
-        data->hw_addr[4] = hw_addr[5];
-        data->hw_addr[5] = hw_addr[4];
-    } else {
-        iwl_set_hw_address_family_8000(trans, cfg, data, mac_override, nvm_hw);
-    }
+    /* The byte order is little endian 16 bit, meaning 214365 */
+    data->hw_addr[0] = hw_addr[1];
+    data->hw_addr[1] = hw_addr[0];
+    data->hw_addr[2] = hw_addr[3];
+    data->hw_addr[3] = hw_addr[2];
+    data->hw_addr[4] = hw_addr[5];
+    data->hw_addr[5] = hw_addr[4];
+  } else {
+    iwl_set_hw_address_family_8000(trans, cfg, data, mac_override, nvm_hw);
+  }
 
-    if (!is_valid_ether_addr(data->hw_addr)) {
-        IWL_ERR(trans, "no valid mac address was found\n");
-        return -EINVAL;
-    }
+  if (!is_valid_ether_addr(data->hw_addr)) {
+    IWL_ERR(trans, "no valid mac address was found\n");
+    return -EINVAL;
+  }
 
-    IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
+  IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
 
-    return 0;
+  return 0;
 }
 
 static bool iwl_nvm_no_wide_in_5ghz(struct device* dev, const struct iwl_cfg* cfg,
                                     const __be16* nvm_hw) {
+  /*
+   * Workaround a bug in Indonesia SKUs where the regulatory in
+   * some 7000-family OTPs erroneously allow wide channels in
+   * 5GHz.  To check for Indonesia, we take the SKU value from
+   * bits 1-4 in the subsystem ID and check if it is either 5 or
+   * 9.  In those cases, we need to force-disable wide channels
+   * in 5GHz otherwise the FW will throw a sysassert when we try
+   * to use them.
+   */
+  if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
     /*
-     * Workaround a bug in Indonesia SKUs where the regulatory in
-     * some 7000-family OTPs erroneously allow wide channels in
-     * 5GHz.  To check for Indonesia, we take the SKU value from
-     * bits 1-4 in the subsystem ID and check if it is either 5 or
-     * 9.  In those cases, we need to force-disable wide channels
-     * in 5GHz otherwise the FW will throw a sysassert when we try
-     * to use them.
+     * Unlike the other sections in the NVM, the hw
+     * section uses big-endian.
      */
-    if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
-        /*
-         * Unlike the other sections in the NVM, the hw
-         * section uses big-endian.
-         */
-        uint16_t subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID);
-        uint8_t sku = (subsystem_id & 0x1e) >> 1;
+    uint16_t subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID);
+    uint8_t sku = (subsystem_id & 0x1e) >> 1;
 
-        if (sku == 5 || sku == 9) {
-            IWL_DEBUG_EEPROM(dev, "disabling wide channels in 5GHz (0x%0x %d)\n", subsystem_id,
-                             sku);
-            return true;
-        }
+    if (sku == 5 || sku == 9) {
+      IWL_DEBUG_EEPROM(dev, "disabling wide channels in 5GHz (0x%0x %d)\n", subsystem_id, sku);
+      return true;
     }
+  }
 
-    return false;
+  return false;
 }
 
 struct iwl_nvm_data* iwl_parse_nvm_data(struct iwl_trans* trans, const struct iwl_cfg* cfg,
@@ -968,241 +995,275 @@
                                         const __le16* mac_override, const __le16* phy_sku,
                                         uint8_t tx_chains, uint8_t rx_chains,
                                         bool lar_fw_supported) {
-    struct device* dev = trans->dev;
-    struct iwl_nvm_data* data;
-    bool lar_enabled;
-    uint32_t sku, radio_cfg;
-    uint32_t sbands_flags = 0;
-    uint16_t lar_config;
-    const __le16* ch_section;
+  struct device* dev = trans->dev;
+  struct iwl_nvm_data* data;
+  bool lar_enabled;
+  uint32_t sku, radio_cfg;
+  uint32_t sbands_flags = 0;
+  uint16_t lar_config;
+  const __le16* ch_section;
 
-    if (cfg->nvm_type != IWL_NVM_EXT)
-        data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * IWL_NVM_NUM_CHANNELS,
-                       GFP_KERNEL);
-    else
-        data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * IWL_NVM_NUM_CHANNELS_EXT,
-                       GFP_KERNEL);
-    if (!data) { return NULL; }
+  if (cfg->nvm_type != IWL_NVM_EXT)
+    data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * IWL_NVM_NUM_CHANNELS,
+                   GFP_KERNEL);
+  else
+    data = kzalloc(sizeof(*data) + sizeof(struct ieee80211_channel) * IWL_NVM_NUM_CHANNELS_EXT,
+                   GFP_KERNEL);
+  if (!data) {
+    return NULL;
+  }
 
-    data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
+  data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
 
-    radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku);
-    iwl_set_radio_cfg(cfg, data, radio_cfg);
-    if (data->valid_tx_ant) { tx_chains &= data->valid_tx_ant; }
-    if (data->valid_rx_ant) { rx_chains &= data->valid_rx_ant; }
+  radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku);
+  iwl_set_radio_cfg(cfg, data, radio_cfg);
+  if (data->valid_tx_ant) {
+    tx_chains &= data->valid_tx_ant;
+  }
+  if (data->valid_rx_ant) {
+    rx_chains &= data->valid_rx_ant;
+  }
 
-    sku = iwl_get_sku(cfg, nvm_sw, phy_sku);
+  sku = iwl_get_sku(cfg, nvm_sw, phy_sku);
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    if (trans->dbg_cfg.disable_52GHz) { /* remove support for 5.2 */
-        sku &= ~NVM_SKU_CAP_BAND_52GHZ;
-    }
-    if (trans->dbg_cfg.disable_24GHz) { /* remove support for 2.4 */
-        sku &= ~NVM_SKU_CAP_BAND_24GHZ;
-    }
+  if (trans->dbg_cfg.disable_52GHz) { /* remove support for 5.2 */
+    sku &= ~NVM_SKU_CAP_BAND_52GHZ;
+  }
+  if (trans->dbg_cfg.disable_24GHz) { /* remove support for 2.4 */
+    sku &= ~NVM_SKU_CAP_BAND_24GHZ;
+  }
 #endif
 
-    data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
-    data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
-    data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
-    if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) { data->sku_cap_11n_enable = false; }
-    data->sku_cap_11ac_enable = data->sku_cap_11n_enable && (sku & NVM_SKU_CAP_11AC_ENABLE);
-    data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
+  data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
+  data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
+  data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
+  if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) {
+    data->sku_cap_11n_enable = false;
+  }
+  data->sku_cap_11ac_enable = data->sku_cap_11n_enable && (sku & NVM_SKU_CAP_11AC_ENABLE);
+  data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
 
-    data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
+  data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
-    if (cfg->nvm_type != IWL_NVM_EXT) {
-        /* Checking for required sections */
-        if (!nvm_calib) {
-            IWL_ERR(trans, "Can't parse empty Calib NVM sections\n");
-            kfree(data);
-            return NULL;
-        }
-
-        ch_section =
-            cfg->nvm_type == IWL_NVM_SDP ? &regulatory[NVM_CHANNELS_SDP] : &nvm_sw[NVM_CHANNELS];
-
-        lar_enabled = true;
-    } else {
-        uint16_t lar_offset = data->nvm_version < 0xE39 ? NVM_LAR_OFFSET_OLD : NVM_LAR_OFFSET;
-
-        lar_config = le16_to_cpup(regulatory + lar_offset);
-        data->lar_enabled = !!(lar_config & NVM_LAR_ENABLED);
-        lar_enabled = data->lar_enabled;
-        ch_section = &regulatory[NVM_CHANNELS_EXTENDED];
+  if (cfg->nvm_type != IWL_NVM_EXT) {
+    /* Checking for required sections */
+    if (!nvm_calib) {
+      IWL_ERR(trans, "Can't parse empty Calib NVM sections\n");
+      kfree(data);
+      return NULL;
     }
 
-    /* If no valid mac address was found - bail out */
-    if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) {
-        kfree(data);
-        return NULL;
-    }
+    ch_section =
+        cfg->nvm_type == IWL_NVM_SDP ? &regulatory[NVM_CHANNELS_SDP] : &nvm_sw[NVM_CHANNELS];
+
+    lar_enabled = true;
+  } else {
+    uint16_t lar_offset = data->nvm_version < 0xE39 ? NVM_LAR_OFFSET_OLD : NVM_LAR_OFFSET;
+
+    lar_config = le16_to_cpup(regulatory + lar_offset);
+    data->lar_enabled = !!(lar_config & NVM_LAR_ENABLED);
+    lar_enabled = data->lar_enabled;
+    ch_section = &regulatory[NVM_CHANNELS_EXTENDED];
+  }
+
+  /* If no valid mac address was found - bail out */
+  if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) {
+    kfree(data);
+    return NULL;
+  }
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    iwl_init_he_override(trans, &data->bands[NL80211_BAND_2GHZ]);
-    iwl_init_he_override(trans, &data->bands[NL80211_BAND_5GHZ]);
+  iwl_init_he_override(trans, &data->bands[NL80211_BAND_2GHZ]);
+  iwl_init_he_override(trans, &data->bands[NL80211_BAND_5GHZ]);
 #endif
-    if (lar_fw_supported && lar_enabled) { sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; }
+  if (lar_fw_supported && lar_enabled) {
+    sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
+  }
 
-    if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw)) {
-        sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
-    }
+  if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw)) {
+    sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
+  }
 
-    iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains, sbands_flags);
-    data->calib_version = 255;
+  iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains, sbands_flags);
+  data->calib_version = 255;
 
-    return data;
+  return data;
 }
 IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
 
 static uint32_t iwl_nvm_get_regdom_bw_flags(const uint8_t* nvm_chan, int ch_idx, uint16_t nvm_flags,
                                             const struct iwl_cfg* cfg) {
-    uint32_t flags = NL80211_RRF_NO_HT40;
-    uint32_t last_5ghz_ht = LAST_5GHZ_HT;
+  uint32_t flags = NL80211_RRF_NO_HT40;
+  uint32_t last_5ghz_ht = LAST_5GHZ_HT;
 
-    if (cfg->nvm_type == IWL_NVM_EXT) { last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; }
+  if (cfg->nvm_type == IWL_NVM_EXT) {
+    last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
+  }
 
-    if (ch_idx < NUM_2GHZ_CHANNELS && (nvm_flags & NVM_CHANNEL_40MHZ)) {
-        if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) { flags &= ~NL80211_RRF_NO_HT40PLUS; }
-        if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) { flags &= ~NL80211_RRF_NO_HT40MINUS; }
-    } else if (nvm_chan[ch_idx] <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
-        if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) {
-            flags &= ~NL80211_RRF_NO_HT40PLUS;
-        } else {
-            flags &= ~NL80211_RRF_NO_HT40MINUS;
-        }
+  if (ch_idx < NUM_2GHZ_CHANNELS && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+    if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) {
+      flags &= ~NL80211_RRF_NO_HT40PLUS;
     }
-
-    if (!(nvm_flags & NVM_CHANNEL_80MHZ)) { flags |= NL80211_RRF_NO_80MHZ; }
-    if (!(nvm_flags & NVM_CHANNEL_160MHZ)) { flags |= NL80211_RRF_NO_160MHZ; }
-
-    if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) { flags |= NL80211_RRF_NO_IR; }
-
-    if (nvm_flags & NVM_CHANNEL_RADAR) { flags |= NL80211_RRF_DFS; }
-
-    if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) { flags |= NL80211_RRF_NO_OUTDOOR; }
-
-    /* Set the GO concurrent flag only in case that NO_IR is set.
-     * Otherwise it is meaningless
-     */
-    if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && (flags & NL80211_RRF_NO_IR)) {
-        flags |= NL80211_RRF_GO_CONCURRENT;
+    if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) {
+      flags &= ~NL80211_RRF_NO_HT40MINUS;
     }
+  } else if (nvm_chan[ch_idx] <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+    if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) {
+      flags &= ~NL80211_RRF_NO_HT40PLUS;
+    } else {
+      flags &= ~NL80211_RRF_NO_HT40MINUS;
+    }
+  }
 
-    return flags;
+  if (!(nvm_flags & NVM_CHANNEL_80MHZ)) {
+    flags |= NL80211_RRF_NO_80MHZ;
+  }
+  if (!(nvm_flags & NVM_CHANNEL_160MHZ)) {
+    flags |= NL80211_RRF_NO_160MHZ;
+  }
+
+  if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) {
+    flags |= NL80211_RRF_NO_IR;
+  }
+
+  if (nvm_flags & NVM_CHANNEL_RADAR) {
+    flags |= NL80211_RRF_DFS;
+  }
+
+  if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) {
+    flags |= NL80211_RRF_NO_OUTDOOR;
+  }
+
+  /* Set the GO concurrent flag only in case that NO_IR is set.
+   * Otherwise it is meaningless
+   */
+  if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && (flags & NL80211_RRF_NO_IR)) {
+    flags |= NL80211_RRF_GO_CONCURRENT;
+  }
+
+  return flags;
 }
 
 struct regdb_ptrs {
-    struct ieee80211_wmm_rule* rule;
-    uint32_t token;
+  struct ieee80211_wmm_rule* rule;
+  uint32_t token;
 };
 
 struct ieee80211_regdomain* iwl_parse_nvm_mcc_info(struct device* dev, const struct iwl_cfg* cfg,
                                                    int num_of_ch, __le32* channels, uint16_t fw_mcc,
                                                    uint16_t geo_info) {
-    int ch_idx;
-    uint16_t ch_flags;
-    uint32_t reg_rule_flags, prev_reg_rule_flags = 0;
-    const uint8_t* nvm_chan =
-        cfg->nvm_type == IWL_NVM_EXT ? iwl_ext_nvm_channels : iwl_nvm_channels;
-    struct ieee80211_regdomain *regd, *copy_rd;
-    int size_of_regd, regd_to_copy;
-    struct ieee80211_reg_rule* rule;
-    struct regdb_ptrs* regdb_ptrs;
-    enum nl80211_band band;
-    int center_freq, prev_center_freq = 0;
-    int valid_rules = 0;
-    bool new_rule;
-    int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
+  int ch_idx;
+  uint16_t ch_flags;
+  uint32_t reg_rule_flags, prev_reg_rule_flags = 0;
+  const uint8_t* nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? iwl_ext_nvm_channels : iwl_nvm_channels;
+  struct ieee80211_regdomain *regd, *copy_rd;
+  int size_of_regd, regd_to_copy;
+  struct ieee80211_reg_rule* rule;
+  struct regdb_ptrs* regdb_ptrs;
+  enum nl80211_band band;
+  int center_freq, prev_center_freq = 0;
+  int valid_rules = 0;
+  bool new_rule;
+  int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
 
-    if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) { return ERR_PTR(-EINVAL); }
+  if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) {
+    return ERR_PTR(-EINVAL);
+  }
 
-    if (WARN_ON(num_of_ch > max_num_ch)) { num_of_ch = max_num_ch; }
+  if (WARN_ON(num_of_ch > max_num_ch)) {
+    num_of_ch = max_num_ch;
+  }
 
-    IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", num_of_ch);
+  IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", num_of_ch);
 
-    /* build a regdomain rule for every valid channel */
-    size_of_regd =
-        sizeof(struct ieee80211_regdomain) + num_of_ch * sizeof(struct ieee80211_reg_rule);
+  /* build a regdomain rule for every valid channel */
+  size_of_regd = sizeof(struct ieee80211_regdomain) + num_of_ch * sizeof(struct ieee80211_reg_rule);
 
-    regd = kzalloc(size_of_regd, GFP_KERNEL);
-    if (!regd) { return ERR_PTR(-ENOMEM); }
+  regd = kzalloc(size_of_regd, GFP_KERNEL);
+  if (!regd) {
+    return ERR_PTR(-ENOMEM);
+  }
 
-    regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL);
-    if (!regdb_ptrs) {
-        copy_rd = ERR_PTR(-ENOMEM);
-        goto out;
+  regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL);
+  if (!regdb_ptrs) {
+    copy_rd = ERR_PTR(-ENOMEM);
+    goto out;
+  }
+
+  /* set alpha2 from FW. */
+  regd->alpha2[0] = fw_mcc >> 8;
+  regd->alpha2[1] = fw_mcc & 0xff;
+
+  for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+    ch_flags = (uint16_t)__le32_to_cpup(channels + ch_idx);
+    band = (ch_idx < NUM_2GHZ_CHANNELS) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+    center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], band);
+    new_rule = false;
+
+    if (!(ch_flags & NVM_CHANNEL_VALID)) {
+      iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags);
+      continue;
     }
 
-    /* set alpha2 from FW. */
-    regd->alpha2[0] = fw_mcc >> 8;
-    regd->alpha2[1] = fw_mcc & 0xff;
+    reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, ch_flags, cfg);
 
-    for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
-        ch_flags = (uint16_t)__le32_to_cpup(channels + ch_idx);
-        band = (ch_idx < NUM_2GHZ_CHANNELS) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
-        center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], band);
-        new_rule = false;
-
-        if (!(ch_flags & NVM_CHANNEL_VALID)) {
-            iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags);
-            continue;
-        }
-
-        reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, ch_flags, cfg);
-
-        /* we can't continue the same rule */
-        if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
-            center_freq - prev_center_freq > 20) {
-            valid_rules++;
-            new_rule = true;
-        }
-
-        rule = &regd->reg_rules[valid_rules - 1];
-
-        if (new_rule) { rule->freq_range.start_freq_khz = MHZ_TO_KHZ(center_freq - 10); }
-
-        rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10);
-
-        /* this doesn't matter - not used by FW */
-        rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
-        rule->power_rule.max_eirp = DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
-
-        rule->flags = reg_rule_flags;
-
-        /* rely on auto-calculation to merge BW of contiguous chans */
-        rule->flags |= NL80211_RRF_AUTO_BW;
-        rule->freq_range.max_bandwidth_khz = 0;
-
-        prev_center_freq = center_freq;
-        prev_reg_rule_flags = reg_rule_flags;
-
-        iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags);
-
-        if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || band == NL80211_BAND_2GHZ) { continue; }
-
-        reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
+    /* we can't continue the same rule */
+    if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
+        center_freq - prev_center_freq > 20) {
+      valid_rules++;
+      new_rule = true;
     }
 
-    regd->n_reg_rules = valid_rules;
+    rule = &regd->reg_rules[valid_rules - 1];
 
-    /*
-     * Narrow down regdom for unused regulatory rules to prevent hole
-     * between reg rules to wmm rules.
-     */
-    regd_to_copy =
-        sizeof(struct ieee80211_regdomain) + valid_rules * sizeof(struct ieee80211_reg_rule);
-
-    copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL);
-    if (!copy_rd) {
-        copy_rd = ERR_PTR(-ENOMEM);
-        goto out;
+    if (new_rule) {
+      rule->freq_range.start_freq_khz = MHZ_TO_KHZ(center_freq - 10);
     }
 
+    rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10);
+
+    /* this doesn't matter - not used by FW */
+    rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
+    rule->power_rule.max_eirp = DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
+
+    rule->flags = reg_rule_flags;
+
+    /* rely on auto-calculation to merge BW of contiguous chans */
+    rule->flags |= NL80211_RRF_AUTO_BW;
+    rule->freq_range.max_bandwidth_khz = 0;
+
+    prev_center_freq = center_freq;
+    prev_reg_rule_flags = reg_rule_flags;
+
+    iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags);
+
+    if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || band == NL80211_BAND_2GHZ) {
+      continue;
+    }
+
+    reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
+  }
+
+  regd->n_reg_rules = valid_rules;
+
+  /*
+   * Narrow down regdom for unused regulatory rules to prevent hole
+   * between reg rules to wmm rules.
+   */
+  regd_to_copy =
+      sizeof(struct ieee80211_regdomain) + valid_rules * sizeof(struct ieee80211_reg_rule);
+
+  copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL);
+  if (!copy_rd) {
+    copy_rd = ERR_PTR(-ENOMEM);
+    goto out;
+  }
+
 out:
-    kfree(regdb_ptrs);
-    kfree(regd);
-    return copy_rd;
+  kfree(regdb_ptrs);
+  kfree(regd);
+  return copy_rd;
 }
 IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
 
@@ -1214,12 +1275,12 @@
 #define IWL_4165_DEVICE_ID 0x5501
 #define NVM_SKU_CAP_MIMO_DISABLE BIT(5)
 
-    if (section == NVM_SECTION_TYPE_PHY_SKU && hw_id == IWL_4165_DEVICE_ID && data && len >= 5 &&
-        (data[4] & NVM_SKU_CAP_MIMO_DISABLE))
-    /* OTP 0x52 bug work around: it's a 1x1 device */
-    {
-        data[3] = ANT_B | (ANT_B << 4);
-    }
+  if (section == NVM_SECTION_TYPE_PHY_SKU && hw_id == IWL_4165_DEVICE_ID && data && len >= 5 &&
+      (data[4] & NVM_SKU_CAP_MIMO_DISABLE))
+  /* OTP 0x52 bug work around: it's a 1x1 device */
+  {
+    data[3] = ANT_B | (ANT_B << 4);
+  }
 }
 IWL_EXPORT_SYMBOL(iwl_nvm_fixups);
 
@@ -1246,18 +1307,18 @@
  */
 int iwl_read_external_nvm(struct iwl_trans* trans, const char* nvm_file_name,
                           struct iwl_nvm_section* nvm_sections) {
-    int ret, section_size;
-    uint16_t section_id;
-    const struct firmware* fw_entry;
-    const struct {
-        __le16 word1;
-        __le16 word2;
-        uint8_t data[];
-    } * file_sec;
-    const uint8_t* eof;
-    uint8_t* temp;
-    int max_section_size;
-    const __le32* dword_buff;
+  int ret, section_size;
+  uint16_t section_id;
+  const struct firmware* fw_entry;
+  const struct {
+    __le16 word1;
+    __le16 word2;
+    uint8_t data[];
+  } * file_sec;
+  const uint8_t* eof;
+  uint8_t* temp;
+  int max_section_size;
+  const __le32* dword_buff;
 
 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
 #define NVM_WORD2_ID(x) (x >> 12)
@@ -1267,226 +1328,227 @@
 #define NVM_HEADER_1 (0x4E564D2A)
 #define NVM_HEADER_SIZE (4 * sizeof(uint32_t))
 
-    IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n");
+  IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n");
 
-    /* Maximal size depends on NVM version */
-    if (trans->cfg->nvm_type != IWL_NVM_EXT) {
-        max_section_size = IWL_MAX_NVM_SECTION_SIZE;
-    } else {
-        max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
-    }
+  /* Maximal size depends on NVM version */
+  if (trans->cfg->nvm_type != IWL_NVM_EXT) {
+    max_section_size = IWL_MAX_NVM_SECTION_SIZE;
+  } else {
+    max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
+  }
 
-    /*
-     * Obtain NVM image via request_firmware. Since we already used
-     * request_firmware_nowait() for the firmware binary load and only
-     * get here after that we assume the NVM request can be satisfied
-     * synchronously.
-     */
-    ret = request_firmware(&fw_entry, nvm_file_name, trans->dev);
-    if (ret) {
-        IWL_ERR(trans, "ERROR: %s isn't available %d\n", nvm_file_name, ret);
-        return ret;
-    }
-
-    IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n", nvm_file_name, fw_entry->size);
-
-    if (fw_entry->size > MAX_NVM_FILE_LEN) {
-        IWL_ERR(trans, "NVM file too large\n");
-        ret = -EINVAL;
-        goto out;
-    }
-
-    eof = fw_entry->data + fw_entry->size;
-    dword_buff = (__le32*)fw_entry->data;
-
-    /* some NVM file will contain a header.
-     * The header is identified by 2 dwords header as follow:
-     * dword[0] = 0x2A504C54
-     * dword[1] = 0x4E564D2A
-     *
-     * This header must be skipped when providing the NVM data to the FW.
-     */
-    if (fw_entry->size > NVM_HEADER_SIZE && dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
-        dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
-        file_sec = (void*)(fw_entry->data + NVM_HEADER_SIZE);
-        IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
-        IWL_INFO(trans, "NVM Manufacturing date %08X\n", le32_to_cpu(dword_buff[3]));
-
-        /* nvm file validation, dword_buff[2] holds the file version */
-        if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
-            CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP &&
-            le32_to_cpu(dword_buff[2]) < 0xE4A) {
-            ret = -EFAULT;
-            goto out;
-        }
-    } else {
-        file_sec = (void*)fw_entry->data;
-    }
-
-    while (true) {
-        if (file_sec->data > eof) {
-            IWL_ERR(trans, "ERROR - NVM file too short for section header\n");
-            ret = -EINVAL;
-            break;
-        }
-
-        /* check for EOF marker */
-        if (!file_sec->word1 && !file_sec->word2) {
-            ret = 0;
-            break;
-        }
-
-        if (trans->cfg->nvm_type != IWL_NVM_EXT) {
-            section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
-            section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
-        } else {
-            section_size = 2 * EXT_NVM_WORD2_LEN(le16_to_cpu(file_sec->word2));
-            section_id = EXT_NVM_WORD1_ID(le16_to_cpu(file_sec->word1));
-        }
-
-        if (section_size > max_section_size) {
-            IWL_ERR(trans, "ERROR - section too large (%d)\n", section_size);
-            ret = -EINVAL;
-            break;
-        }
-
-        if (!section_size) {
-            IWL_ERR(trans, "ERROR - section empty\n");
-            ret = -EINVAL;
-            break;
-        }
-
-        if (file_sec->data + section_size > eof) {
-            IWL_ERR(trans, "ERROR - NVM file too short for section (%d bytes)\n", section_size);
-            ret = -EINVAL;
-            break;
-        }
-
-        if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, "Invalid NVM section ID %d\n", section_id)) {
-            ret = -EINVAL;
-            break;
-        }
-
-        temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
-        if (!temp) {
-            ret = -ENOMEM;
-            break;
-        }
-
-        iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size);
-
-        kfree(nvm_sections[section_id].data);
-        nvm_sections[section_id].data = temp;
-        nvm_sections[section_id].length = section_size;
-
-        /* advance to the next section */
-        file_sec = (void*)(file_sec->data + section_size);
-    }
-out:
-    release_firmware(fw_entry);
+  /*
+   * Obtain NVM image via request_firmware. Since we already used
+   * request_firmware_nowait() for the firmware binary load and only
+   * get here after that we assume the NVM request can be satisfied
+   * synchronously.
+   */
+  ret = request_firmware(&fw_entry, nvm_file_name, trans->dev);
+  if (ret) {
+    IWL_ERR(trans, "ERROR: %s isn't available %d\n", nvm_file_name, ret);
     return ret;
+  }
+
+  IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n", nvm_file_name, fw_entry->size);
+
+  if (fw_entry->size > MAX_NVM_FILE_LEN) {
+    IWL_ERR(trans, "NVM file too large\n");
+    ret = -EINVAL;
+    goto out;
+  }
+
+  eof = fw_entry->data + fw_entry->size;
+  dword_buff = (__le32*)fw_entry->data;
+
+  /* some NVM file will contain a header.
+   * The header is identified by 2 dwords header as follow:
+   * dword[0] = 0x2A504C54
+   * dword[1] = 0x4E564D2A
+   *
+   * This header must be skipped when providing the NVM data to the FW.
+   */
+  if (fw_entry->size > NVM_HEADER_SIZE && dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
+      dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
+    file_sec = (void*)(fw_entry->data + NVM_HEADER_SIZE);
+    IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
+    IWL_INFO(trans, "NVM Manufacturing date %08X\n", le32_to_cpu(dword_buff[3]));
+
+    /* nvm file validation, dword_buff[2] holds the file version */
+    if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+        CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP && le32_to_cpu(dword_buff[2]) < 0xE4A) {
+      ret = -EFAULT;
+      goto out;
+    }
+  } else {
+    file_sec = (void*)fw_entry->data;
+  }
+
+  while (true) {
+    if (file_sec->data > eof) {
+      IWL_ERR(trans, "ERROR - NVM file too short for section header\n");
+      ret = -EINVAL;
+      break;
+    }
+
+    /* check for EOF marker */
+    if (!file_sec->word1 && !file_sec->word2) {
+      ret = 0;
+      break;
+    }
+
+    if (trans->cfg->nvm_type != IWL_NVM_EXT) {
+      section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
+      section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+    } else {
+      section_size = 2 * EXT_NVM_WORD2_LEN(le16_to_cpu(file_sec->word2));
+      section_id = EXT_NVM_WORD1_ID(le16_to_cpu(file_sec->word1));
+    }
+
+    if (section_size > max_section_size) {
+      IWL_ERR(trans, "ERROR - section too large (%d)\n", section_size);
+      ret = -EINVAL;
+      break;
+    }
+
+    if (!section_size) {
+      IWL_ERR(trans, "ERROR - section empty\n");
+      ret = -EINVAL;
+      break;
+    }
+
+    if (file_sec->data + section_size > eof) {
+      IWL_ERR(trans, "ERROR - NVM file too short for section (%d bytes)\n", section_size);
+      ret = -EINVAL;
+      break;
+    }
+
+    if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, "Invalid NVM section ID %d\n", section_id)) {
+      ret = -EINVAL;
+      break;
+    }
+
+    temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
+    if (!temp) {
+      ret = -ENOMEM;
+      break;
+    }
+
+    iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size);
+
+    kfree(nvm_sections[section_id].data);
+    nvm_sections[section_id].data = temp;
+    nvm_sections[section_id].length = section_size;
+
+    /* advance to the next section */
+    file_sec = (void*)(file_sec->data + section_size);
+  }
+out:
+  release_firmware(fw_entry);
+  return ret;
 }
 IWL_EXPORT_SYMBOL(iwl_read_external_nvm);
 
 struct iwl_nvm_data* iwl_get_nvm(struct iwl_trans* trans, const struct iwl_fw* fw) {
-    struct iwl_nvm_get_info cmd = {};
-    struct iwl_nvm_get_info_rsp* rsp;
-    struct iwl_nvm_data* nvm;
-    struct iwl_host_cmd hcmd = {.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
-                                .data =
-                                    {
-                                        &cmd,
-                                    },
-                                .len = {sizeof(cmd)},
-                                .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)};
-    int ret;
-    bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
-                            fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
-    bool empty_otp;
-    uint32_t mac_flags;
-    uint32_t sbands_flags = 0;
+  struct iwl_nvm_get_info cmd = {};
+  struct iwl_nvm_get_info_rsp* rsp;
+  struct iwl_nvm_data* nvm;
+  struct iwl_host_cmd hcmd = {.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+                              .data =
+                                  {
+                                      &cmd,
+                                  },
+                              .len = {sizeof(cmd)},
+                              .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)};
+  int ret;
+  bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
+                          fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+  bool empty_otp;
+  uint32_t mac_flags;
+  uint32_t sbands_flags = 0;
 
-    ret = iwl_trans_send_cmd(trans, &hcmd);
-    if (ret) { return ERR_PTR(ret); }
+  ret = iwl_trans_send_cmd(trans, &hcmd);
+  if (ret) {
+    return ERR_PTR(ret);
+  }
 
-    if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
-             "Invalid payload len in NVM response from FW %d",
-             iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
-        ret = -EINVAL;
-        goto out;
-    }
+  if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
+           "Invalid payload len in NVM response from FW %d",
+           iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
+    ret = -EINVAL;
+    goto out;
+  }
 
-    rsp = (void*)hcmd.resp_pkt->data;
-    empty_otp = !!(le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP);
-    if (empty_otp) { IWL_INFO(trans, "OTP is empty\n"); }
+  rsp = (void*)hcmd.resp_pkt->data;
+  empty_otp = !!(le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP);
+  if (empty_otp) {
+    IWL_INFO(trans, "OTP is empty\n");
+  }
 
-    nvm = kzalloc(sizeof(*nvm) + sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, GFP_KERNEL);
-    if (!nvm) {
-        ret = -ENOMEM;
-        goto out;
-    }
+  nvm = kzalloc(sizeof(*nvm) + sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, GFP_KERNEL);
+  if (!nvm) {
+    ret = -ENOMEM;
+    goto out;
+  }
 
-    iwl_set_hw_address_from_csr(trans, nvm);
-    /* TODO: if platform NVM has MAC address - override it here */
+  iwl_set_hw_address_from_csr(trans, nvm);
+  /* TODO: if platform NVM has MAC address - override it here */
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    if (trans->dbg_cfg.hw_address.len) {
-        if (trans->dbg_cfg.hw_address.len == ETH_ALEN &&
-            is_valid_ether_addr(trans->dbg_cfg.hw_address.data)) {
-            memcpy(nvm->hw_addr, trans->dbg_cfg.hw_address.data, ETH_ALEN);
-        } else {
-            IWL_ERR(trans, "mac address from config file is invalid\n");
-        }
+  if (trans->dbg_cfg.hw_address.len) {
+    if (trans->dbg_cfg.hw_address.len == ETH_ALEN &&
+        is_valid_ether_addr(trans->dbg_cfg.hw_address.data)) {
+      memcpy(nvm->hw_addr, trans->dbg_cfg.hw_address.data, ETH_ALEN);
+    } else {
+      IWL_ERR(trans, "mac address from config file is invalid\n");
     }
+  }
 #endif
-    if (!is_valid_ether_addr(nvm->hw_addr)) {
-        IWL_ERR(trans, "no valid mac address was found\n");
-        ret = -EINVAL;
-        goto err_free;
-    }
+  if (!is_valid_ether_addr(nvm->hw_addr)) {
+    IWL_ERR(trans, "no valid mac address was found\n");
+    ret = -EINVAL;
+    goto err_free;
+  }
 
-    IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr);
+  IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr);
 
-    /* Initialize general data */
-    nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
-    nvm->n_hw_addrs = rsp->general.n_hw_addrs;
-    if (nvm->n_hw_addrs == 0)
-        IWL_WARN(trans, "Firmware declares no reserved mac addresses. OTP is empty: %d\n",
-                 empty_otp);
+  /* Initialize general data */
+  nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
+  nvm->n_hw_addrs = rsp->general.n_hw_addrs;
+  if (nvm->n_hw_addrs == 0)
+    IWL_WARN(trans, "Firmware declares no reserved mac addresses. OTP is empty: %d\n", empty_otp);
 
-    /* Initialize MAC sku data */
-    mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
-    nvm->sku_cap_11ac_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
-    nvm->sku_cap_11n_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
-    nvm->sku_cap_11ax_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
-    nvm->sku_cap_band_24ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
-    nvm->sku_cap_band_52ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
-    nvm->sku_cap_mimo_disabled = !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
+  /* Initialize MAC sku data */
+  mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
+  nvm->sku_cap_11ac_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
+  nvm->sku_cap_11n_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
+  nvm->sku_cap_11ax_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
+  nvm->sku_cap_band_24ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
+  nvm->sku_cap_band_52ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
+  nvm->sku_cap_mimo_disabled = !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
 
-    /* Initialize PHY sku data */
-    nvm->valid_tx_ant = (uint8_t)le32_to_cpu(rsp->phy_sku.tx_chains);
-    nvm->valid_rx_ant = (uint8_t)le32_to_cpu(rsp->phy_sku.rx_chains);
+  /* Initialize PHY sku data */
+  nvm->valid_tx_ant = (uint8_t)le32_to_cpu(rsp->phy_sku.tx_chains);
+  nvm->valid_rx_ant = (uint8_t)le32_to_cpu(rsp->phy_sku.rx_chains);
 
-    if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
-        nvm->lar_enabled = true;
-        sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
-    }
+  if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
+    nvm->lar_enabled = true;
+    sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
+  }
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    iwl_init_he_override(trans, &nvm->bands[NL80211_BAND_2GHZ]);
-    iwl_init_he_override(trans, &nvm->bands[NL80211_BAND_5GHZ]);
+  iwl_init_he_override(trans, &nvm->bands[NL80211_BAND_2GHZ]);
+  iwl_init_he_override(trans, &nvm->bands[NL80211_BAND_5GHZ]);
 #endif
-    iwl_init_sbands(trans, nvm, rsp->regulatory.channel_profile,
-                    nvm->valid_tx_ant & fw->valid_tx_ant, nvm->valid_rx_ant & fw->valid_rx_ant,
-                    sbands_flags);
+  iwl_init_sbands(trans, nvm, rsp->regulatory.channel_profile, nvm->valid_tx_ant & fw->valid_tx_ant,
+                  nvm->valid_rx_ant & fw->valid_rx_ant, sbands_flags);
 
-    iwl_free_resp(&hcmd);
-    return nvm;
+  iwl_free_resp(&hcmd);
+  return nvm;
 
 err_free:
-    kfree(nvm);
+  kfree(nvm);
 out:
-    iwl_free_resp(&hcmd);
-    return ERR_PTR(ret);
+  iwl_free_resp(&hcmd);
+  return ERR_PTR(ret);
 }
 IWL_EXPORT_SYMBOL(iwl_get_nvm);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-nvm-parse.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-nvm-parse.h
index 395399d..22c2fc8 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-nvm-parse.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-nvm-parse.h
@@ -43,8 +43,8 @@
  * @IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ: disallow 40, 80 and 160MHz on 5GHz
  */
 enum iwl_nvm_sbands_flags {
-    IWL_NVM_SBANDS_FLAGS_LAR = BIT(0),
-    IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1),
+  IWL_NVM_SBANDS_FLAGS_LAR = BIT(0),
+  IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1),
 };
 
 /**
@@ -84,8 +84,8 @@
  * this way, only the needed ones.
  */
 struct iwl_nvm_section {
-    uint16_t length;
-    const uint8_t* data;
+  uint16_t length;
+  const uint8_t* data;
 };
 
 /**
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-op-mode.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-op-mode.h
index d852301..ee20257 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-op-mode.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-op-mode.h
@@ -92,11 +92,11 @@
  * inorder to handle logic that is out of the scope of iwl_test.
  */
 struct iwl_test_ops {
-    int (*send_hcmd)(void* op_mode, struct iwl_host_cmd* host_cmd);
-    int (*cmd_exec_start)(struct iwl_testmode* testmode);
-    int (*cmd_exec)(struct iwl_testmode* testmode, uint32_t cmd, struct iwl_tm_data* data_in,
-                    struct iwl_tm_data* data_out, bool* cmd_supported);
-    void (*cmd_exec_end)(struct iwl_testmode* testmode);
+  int (*send_hcmd)(void* op_mode, struct iwl_host_cmd* host_cmd);
+  int (*cmd_exec_start)(struct iwl_testmode* testmode);
+  int (*cmd_exec)(struct iwl_testmode* testmode, uint32_t cmd, struct iwl_tm_data* data_in,
+                  struct iwl_tm_data* data_out, bool* cmd_supported);
+  void (*cmd_exec_end)(struct iwl_testmode* testmode);
 };
 #endif
 
@@ -145,27 +145,26 @@
  * @exit_d0i3: configure the fw to exit d0i3. May sleep.
  */
 struct iwl_op_mode_ops {
-    struct iwl_op_mode* (*start)(struct iwl_trans* trans, const struct iwl_cfg* cfg,
-                                 const struct iwl_fw* fw, struct dentry* dbgfs_dir);
-    void (*stop)(struct iwl_op_mode* op_mode);
-    void (*rx)(struct iwl_op_mode* op_mode, struct napi_struct* napi,
-               struct iwl_rx_cmd_buffer* rxb);
-    void (*rx_rss)(struct iwl_op_mode* op_mode, struct napi_struct* napi,
-                   struct iwl_rx_cmd_buffer* rxb, unsigned int queue);
-    void (*async_cb)(struct iwl_op_mode* op_mode, const struct iwl_device_cmd* cmd);
-    void (*queue_full)(struct iwl_op_mode* op_mode, int queue);
-    void (*queue_not_full)(struct iwl_op_mode* op_mode, int queue);
-    bool (*hw_rf_kill)(struct iwl_op_mode* op_mode, bool state);
-    void (*free_skb)(struct iwl_op_mode* op_mode, struct sk_buff* skb);
-    void (*nic_error)(struct iwl_op_mode* op_mode);
-    void (*cmd_queue_full)(struct iwl_op_mode* op_mode);
-    void (*nic_config)(struct iwl_op_mode* op_mode);
-    void (*wimax_active)(struct iwl_op_mode* op_mode);
+  struct iwl_op_mode* (*start)(struct iwl_trans* trans, const struct iwl_cfg* cfg,
+                               const struct iwl_fw* fw, struct dentry* dbgfs_dir);
+  void (*stop)(struct iwl_op_mode* op_mode);
+  void (*rx)(struct iwl_op_mode* op_mode, struct napi_struct* napi, struct iwl_rx_cmd_buffer* rxb);
+  void (*rx_rss)(struct iwl_op_mode* op_mode, struct napi_struct* napi,
+                 struct iwl_rx_cmd_buffer* rxb, unsigned int queue);
+  void (*async_cb)(struct iwl_op_mode* op_mode, const struct iwl_device_cmd* cmd);
+  void (*queue_full)(struct iwl_op_mode* op_mode, int queue);
+  void (*queue_not_full)(struct iwl_op_mode* op_mode, int queue);
+  bool (*hw_rf_kill)(struct iwl_op_mode* op_mode, bool state);
+  void (*free_skb)(struct iwl_op_mode* op_mode, struct sk_buff* skb);
+  void (*nic_error)(struct iwl_op_mode* op_mode);
+  void (*cmd_queue_full)(struct iwl_op_mode* op_mode);
+  void (*nic_config)(struct iwl_op_mode* op_mode);
+  void (*wimax_active)(struct iwl_op_mode* op_mode);
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
-    struct iwl_test_ops test_ops;
+  struct iwl_test_ops test_ops;
 #endif
-    int (*enter_d0i3)(struct iwl_op_mode* op_mode);
-    int (*exit_d0i3)(struct iwl_op_mode* op_mode);
+  int (*enter_d0i3)(struct iwl_op_mode* op_mode);
+  int (*exit_d0i3)(struct iwl_op_mode* op_mode);
 };
 
 int iwl_opmode_register(const char* name, const struct iwl_op_mode_ops* ops);
@@ -178,78 +177,84 @@
  * This holds an implementation of the mac80211 / fw API.
  */
 struct iwl_op_mode {
-    const struct iwl_op_mode_ops* ops;
+  const struct iwl_op_mode_ops* ops;
 
-    void* op_mode_specific;
+  void* op_mode_specific;
 };
 
 static inline void iwl_op_mode_stop(struct iwl_op_mode* op_mode) {
-    might_sleep();
-    op_mode->ops->stop(op_mode);
+  might_sleep();
+  op_mode->ops->stop(op_mode);
 }
 
 static inline void iwl_op_mode_rx(struct iwl_op_mode* op_mode, struct napi_struct* napi,
                                   struct iwl_rx_cmd_buffer* rxb) {
-    return op_mode->ops->rx(op_mode, napi, rxb);
+  return op_mode->ops->rx(op_mode, napi, rxb);
 }
 
 static inline void iwl_op_mode_rx_rss(struct iwl_op_mode* op_mode, struct napi_struct* napi,
                                       struct iwl_rx_cmd_buffer* rxb, unsigned int queue) {
-    op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
+  op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
 }
 
 static inline void iwl_op_mode_async_cb(struct iwl_op_mode* op_mode,
                                         const struct iwl_device_cmd* cmd) {
-    if (op_mode->ops->async_cb) { op_mode->ops->async_cb(op_mode, cmd); }
+  if (op_mode->ops->async_cb) {
+    op_mode->ops->async_cb(op_mode, cmd);
+  }
 }
 
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode* op_mode, int queue) {
-    op_mode->ops->queue_full(op_mode, queue);
+  op_mode->ops->queue_full(op_mode, queue);
 }
 
 static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode* op_mode, int queue) {
-    op_mode->ops->queue_not_full(op_mode, queue);
+  op_mode->ops->queue_not_full(op_mode, queue);
 }
 
 static inline bool __must_check iwl_op_mode_hw_rf_kill(struct iwl_op_mode* op_mode, bool state) {
-    might_sleep();
-    return op_mode->ops->hw_rf_kill(op_mode, state);
+  might_sleep();
+  return op_mode->ops->hw_rf_kill(op_mode, state);
 }
 
 static inline void iwl_op_mode_free_skb(struct iwl_op_mode* op_mode, struct sk_buff* skb) {
-    op_mode->ops->free_skb(op_mode, skb);
+  op_mode->ops->free_skb(op_mode, skb);
 }
 
 static inline void iwl_op_mode_nic_error(struct iwl_op_mode* op_mode) {
-    op_mode->ops->nic_error(op_mode);
+  op_mode->ops->nic_error(op_mode);
 }
 
 static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode* op_mode) {
-    op_mode->ops->cmd_queue_full(op_mode);
+  op_mode->ops->cmd_queue_full(op_mode);
 }
 
 static inline void iwl_op_mode_nic_config(struct iwl_op_mode* op_mode) {
-    might_sleep();
-    op_mode->ops->nic_config(op_mode);
+  might_sleep();
+  op_mode->ops->nic_config(op_mode);
 }
 
 static inline void iwl_op_mode_wimax_active(struct iwl_op_mode* op_mode) {
-    might_sleep();
-    op_mode->ops->wimax_active(op_mode);
+  might_sleep();
+  op_mode->ops->wimax_active(op_mode);
 }
 
 static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode* op_mode) {
-    might_sleep();
+  might_sleep();
 
-    if (!op_mode->ops->enter_d0i3) { return 0; }
-    return op_mode->ops->enter_d0i3(op_mode);
+  if (!op_mode->ops->enter_d0i3) {
+    return 0;
+  }
+  return op_mode->ops->enter_d0i3(op_mode);
 }
 
 static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode* op_mode) {
-    might_sleep();
+  might_sleep();
 
-    if (!op_mode->ops->exit_d0i3) { return 0; }
-    return op_mode->ops->exit_d0i3(op_mode);
+  if (!op_mode->ops->exit_d0i3) {
+    return 0;
+  }
+  return op_mode->ops->exit_d0i3(op_mode);
 }
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_IWL_OP_MODE_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.c
index 9597a19..fb99f01 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.c
@@ -31,17 +31,18 @@
  *
  *****************************************************************************/
 
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.h"
+
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-debug.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-drv.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-op-mode.h"
-#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h"
 
 #define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
 
 struct iwl_phy_db_entry {
-    uint16_t size;
-    uint8_t* data;
+  uint16_t size;
+  uint8_t* data;
 };
 
 /**
@@ -56,45 +57,47 @@
  * @calib_ch_group_txp: calibration data related to tx power chanel group.
  */
 struct iwl_phy_db {
-    struct iwl_phy_db_entry cfg;
-    struct iwl_phy_db_entry calib_nch;
-    int n_group_papd;
-    struct iwl_phy_db_entry* calib_ch_group_papd;
-    int n_group_txp;
-    struct iwl_phy_db_entry* calib_ch_group_txp;
+  struct iwl_phy_db_entry cfg;
+  struct iwl_phy_db_entry calib_nch;
+  int n_group_papd;
+  struct iwl_phy_db_entry* calib_ch_group_papd;
+  int n_group_txp;
+  struct iwl_phy_db_entry* calib_ch_group_txp;
 
-    struct iwl_trans* trans;
+  struct iwl_trans* trans;
 };
 
 enum iwl_phy_db_section_type {
-    IWL_PHY_DB_CFG = 1,
-    IWL_PHY_DB_CALIB_NCH,
-    IWL_PHY_DB_UNUSED,
-    IWL_PHY_DB_CALIB_CHG_PAPD,
-    IWL_PHY_DB_CALIB_CHG_TXP,
-    IWL_PHY_DB_MAX
+  IWL_PHY_DB_CFG = 1,
+  IWL_PHY_DB_CALIB_NCH,
+  IWL_PHY_DB_UNUSED,
+  IWL_PHY_DB_CALIB_CHG_PAPD,
+  IWL_PHY_DB_CALIB_CHG_TXP,
+  IWL_PHY_DB_MAX
 };
 
 #define PHY_DB_CMD 0x6c
 
 /* for parsing of tx power channel group data that comes from the firmware*/
 struct iwl_phy_db_chg_txp {
-    __le32 space;
-    __le16 max_channel_idx;
+  __le32 space;
+  __le16 max_channel_idx;
 } __packed;
 
 struct iwl_phy_db* iwl_phy_db_init(struct iwl_trans* trans) {
-    struct iwl_phy_db* phy_db = calloc(1, sizeof(struct iwl_phy_db));
+  struct iwl_phy_db* phy_db = calloc(1, sizeof(struct iwl_phy_db));
 
-    if (!phy_db) { return phy_db; }
-
-    phy_db->trans = trans;
-
-    phy_db->n_group_txp = -1;
-    phy_db->n_group_papd = -1;
-
-    /* TODO: add default values of the phy db. */
+  if (!phy_db) {
     return phy_db;
+  }
+
+  phy_db->trans = trans;
+
+  phy_db->n_group_txp = -1;
+  phy_db->n_group_papd = -1;
+
+  /* TODO: add default values of the phy db. */
+  return phy_db;
 }
 
 /*
@@ -104,57 +107,67 @@
 static struct iwl_phy_db_entry* iwl_phy_db_get_section(struct iwl_phy_db* phy_db,
                                                        enum iwl_phy_db_section_type type,
                                                        uint16_t chg_id) {
-    if (!phy_db || type >= IWL_PHY_DB_MAX) { return NULL; }
-
-    switch (type) {
-    case IWL_PHY_DB_CFG:
-        return &phy_db->cfg;
-    case IWL_PHY_DB_CALIB_NCH:
-        return &phy_db->calib_nch;
-    case IWL_PHY_DB_CALIB_CHG_PAPD:
-        if (chg_id >= phy_db->n_group_papd) { return NULL; }
-        return &phy_db->calib_ch_group_papd[chg_id];
-    case IWL_PHY_DB_CALIB_CHG_TXP:
-        if (chg_id >= phy_db->n_group_txp) { return NULL; }
-        return &phy_db->calib_ch_group_txp[chg_id];
-    default:
-        return NULL;
-    }
+  if (!phy_db || type >= IWL_PHY_DB_MAX) {
     return NULL;
+  }
+
+  switch (type) {
+    case IWL_PHY_DB_CFG:
+      return &phy_db->cfg;
+    case IWL_PHY_DB_CALIB_NCH:
+      return &phy_db->calib_nch;
+    case IWL_PHY_DB_CALIB_CHG_PAPD:
+      if (chg_id >= phy_db->n_group_papd) {
+        return NULL;
+      }
+      return &phy_db->calib_ch_group_papd[chg_id];
+    case IWL_PHY_DB_CALIB_CHG_TXP:
+      if (chg_id >= phy_db->n_group_txp) {
+        return NULL;
+      }
+      return &phy_db->calib_ch_group_txp[chg_id];
+    default:
+      return NULL;
+  }
+  return NULL;
 }
 
 static void iwl_phy_db_free_section(struct iwl_phy_db* phy_db, enum iwl_phy_db_section_type type,
                                     uint16_t chg_id) {
-    struct iwl_phy_db_entry* entry = iwl_phy_db_get_section(phy_db, type, chg_id);
-    if (!entry) { return; }
+  struct iwl_phy_db_entry* entry = iwl_phy_db_get_section(phy_db, type, chg_id);
+  if (!entry) {
+    return;
+  }
 
-    kfree(entry->data);
-    entry->data = NULL;
-    entry->size = 0;
+  kfree(entry->data);
+  entry->data = NULL;
+  entry->size = 0;
 }
 
 void iwl_phy_db_free(struct iwl_phy_db* phy_db) {
-    int i;
+  int i;
 
-    if (!phy_db) { return; }
+  if (!phy_db) {
+    return;
+  }
 
-    iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
-    iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
+  iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
+  iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
 
-    for (i = 0; i < phy_db->n_group_papd; i++) {
-        iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
-    }
-    kfree(phy_db->calib_ch_group_papd);
+  for (i = 0; i < phy_db->n_group_papd; i++) {
+    iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
+  }
+  kfree(phy_db->calib_ch_group_papd);
 
-    for (i = 0; i < phy_db->n_group_txp; i++) {
-        iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
-    }
-    kfree(phy_db->calib_ch_group_txp);
+  for (i = 0; i < phy_db->n_group_txp; i++) {
+    iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
+  }
+  kfree(phy_db->calib_ch_group_txp);
 
-    kfree(phy_db);
+  kfree(phy_db);
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 int iwl_phy_db_set_section(struct iwl_phy_db* phy_db, struct iwl_rx_packet* pkt) {
     struct iwl_calib_res_notif_phy_db* phy_db_notif = (struct iwl_calib_res_notif_phy_db*)pkt->data;
     enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.h
index 096513f..fccae46 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.h
@@ -34,6 +34,11 @@
 #ifndef SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_IWL_PHY_DB_H_
 #define SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_IWL_PHY_DB_H_
 
+// This file muse be included before all header files.
+// clang-format off
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h"
+// clang-format on
+
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-op-mode.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h"
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-prph.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-prph.h
index 8f3ea40..7df91fd 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-prph.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-prph.h
@@ -354,18 +354,18 @@
 #define WFPM_CTRL_REG 0xA03030
 #define WFPM_GP2 0xA030B4
 enum {
-    ENABLE_WFPM = BIT(31),
-    WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
+  ENABLE_WFPM = BIT(31),
+  WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
 };
 
 #define AUX_MISC_REG 0xA200B0
 enum {
-    HW_STEP_LOCATION_BITS = 24,
+  HW_STEP_LOCATION_BITS = 24,
 };
 
 #define AUX_MISC_MASTER1_EN 0xA20818
 enum aux_misc_master1_en {
-    AUX_MISC_MASTER1_EN_SBE_MSK = 0x1,
+  AUX_MISC_MASTER1_EN_SBE_MSK = 0x1,
 };
 
 #define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
@@ -384,13 +384,13 @@
 
 /* For UMAG_GEN_HW_STATUS reg check */
 enum {
-    UMAG_GEN_HW_IS_FPGA = BIT(1),
+  UMAG_GEN_HW_IS_FPGA = BIT(1),
 };
 
 /* FW chicken bits */
 #define LMPM_CHICK 0xA01FF8
 enum {
-    LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
+  LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
 };
 
 #define UREG_CHICK (0xA05C00)
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-scd.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-scd.h
index 517aec3..e04f8df 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-scd.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-scd.h
@@ -39,54 +39,60 @@
 #include "iwl-trans.h"
 
 static inline void iwl_scd_txq_set_chain(struct iwl_trans* trans, uint16_t txq_id) {
-    iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
+  iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
 }
 
 static inline void iwl_scd_txq_enable_agg(struct iwl_trans* trans, uint16_t txq_id) {
-    iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+  iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
 }
 
 static inline void iwl_scd_txq_disable_agg(struct iwl_trans* trans, uint16_t txq_id) {
-    iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+  iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
 }
 
 static inline void iwl_scd_disable_agg(struct iwl_trans* trans) {
-    iwl_set_bits_prph(trans, SCD_AGGR_SEL, 0);
+  iwl_set_bits_prph(trans, SCD_AGGR_SEL, 0);
 }
 
 static inline void iwl_scd_activate_fifos(struct iwl_trans* trans) {
-    iwl_write_prph(trans, SCD_TXFACT, IWL_MASK(0, 7));
+  iwl_write_prph(trans, SCD_TXFACT, IWL_MASK(0, 7));
 }
 
 static inline void iwl_scd_deactivate_fifos(struct iwl_trans* trans) {
-    iwl_write_prph(trans, SCD_TXFACT, 0);
+  iwl_write_prph(trans, SCD_TXFACT, 0);
 }
 
 static inline void iwl_scd_enable_set_active(struct iwl_trans* trans, uint32_t value) {
-    iwl_write_prph(trans, SCD_EN_CTRL, value);
+  iwl_write_prph(trans, SCD_EN_CTRL, value);
 }
 
 static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl) {
-    if (chnl < 20) { return SCD_BASE + 0x18 + chnl * 4; }
-    WARN_ON_ONCE(chnl >= 32);
-    return SCD_BASE + 0x284 + (chnl - 20) * 4;
+  if (chnl < 20) {
+    return SCD_BASE + 0x18 + chnl * 4;
+  }
+  WARN_ON_ONCE(chnl >= 32);
+  return SCD_BASE + 0x284 + (chnl - 20) * 4;
 }
 
 static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl) {
-    if (chnl < 20) { return SCD_BASE + 0x68 + chnl * 4; }
-    WARN_ON_ONCE(chnl >= 32);
-    return SCD_BASE + 0x2B4 + chnl * 4;
+  if (chnl < 20) {
+    return SCD_BASE + 0x68 + chnl * 4;
+  }
+  WARN_ON_ONCE(chnl >= 32);
+  return SCD_BASE + 0x2B4 + chnl * 4;
 }
 
 static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl) {
-    if (chnl < 20) { return SCD_BASE + 0x10c + chnl * 4; }
-    WARN_ON_ONCE(chnl >= 32);
-    return SCD_BASE + 0x334 + chnl * 4;
+  if (chnl < 20) {
+    return SCD_BASE + 0x10c + chnl * 4;
+  }
+  WARN_ON_ONCE(chnl >= 32);
+  return SCD_BASE + 0x334 + chnl * 4;
 }
 
 static inline void iwl_scd_txq_set_inactive(struct iwl_trans* trans, uint16_t txq_id) {
-    iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
-                   (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+  iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+                 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
 }
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_IWL_SCD_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-gnl.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-gnl.c
index 354e819..9e0a8d4 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-gnl.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-gnl.c
@@ -35,8 +35,10 @@
  *****************************************************************************/
 
 #include "iwl-tm-gnl.h"
+
 #include <linux/export.h>
 #include <net/genetlink.h>
+
 #include "iwl-csr.h"
 #include "iwl-dnt-cfg.h"
 #include "iwl-dnt-dispatch.h"
@@ -54,17 +56,21 @@
  *
  */
 static int iwl_tm_validate_fw_cmd(struct iwl_tm_data* data_in) {
-    struct iwl_tm_cmd_request* cmd_req;
-    uint32_t data_buf_size;
+  struct iwl_tm_cmd_request* cmd_req;
+  uint32_t data_buf_size;
 
-    if (!data_in->data || (data_in->len < sizeof(struct iwl_tm_cmd_request))) { return -EINVAL; }
+  if (!data_in->data || (data_in->len < sizeof(struct iwl_tm_cmd_request))) {
+    return -EINVAL;
+  }
 
-    cmd_req = (struct iwl_tm_cmd_request*)data_in->data;
+  cmd_req = (struct iwl_tm_cmd_request*)data_in->data;
 
-    data_buf_size = data_in->len - sizeof(struct iwl_tm_cmd_request);
-    if (data_buf_size < cmd_req->len) { return -EINVAL; }
+  data_buf_size = data_in->len - sizeof(struct iwl_tm_cmd_request);
+  if (data_buf_size < cmd_req->len) {
+    return -EINVAL;
+  }
 
-    return 0;
+  return 0;
 }
 
 /**
@@ -73,35 +79,43 @@
  *      the size of the request struct in bytes.
  */
 static int iwl_tm_validate_reg_ops(struct iwl_tm_data* data_in) {
-    struct iwl_tm_regs_request* request;
-    uint32_t request_size;
-    uint32_t idx;
+  struct iwl_tm_regs_request* request;
+  uint32_t request_size;
+  uint32_t idx;
 
-    if (!data_in->data || (data_in->len < sizeof(struct iwl_tm_regs_request))) { return -EINVAL; }
+  if (!data_in->data || (data_in->len < sizeof(struct iwl_tm_regs_request))) {
+    return -EINVAL;
+  }
 
-    request = (struct iwl_tm_regs_request*)(data_in->data);
-    request_size = sizeof(struct iwl_tm_regs_request) + request->num * sizeof(struct iwl_tm_reg_op);
-    if (data_in->len < request_size) { return -EINVAL; }
+  request = (struct iwl_tm_regs_request*)(data_in->data);
+  request_size = sizeof(struct iwl_tm_regs_request) + request->num * sizeof(struct iwl_tm_reg_op);
+  if (data_in->len < request_size) {
+    return -EINVAL;
+  }
 
-    /*
-     * Calculate result size - result is returned only for read ops
-     * Also, verifying inputs
-     */
-    for (idx = 0; idx < request->num; idx++) {
-        if (request->reg_ops[idx].op_type >= IWL_TM_REG_OP_MAX) { return -EINVAL; }
-
-        /*
-         * Allow access only to FH/CSR/HBUS in direct mode.
-         * Since we don't have the upper bounds for the CSR
-         * and HBUS segments, we will use only the upper
-         * bound of FH for sanity check.
-         */
-        if (!IS_AL_ADDR(request->reg_ops[idx].address)) {
-            if (request->reg_ops[idx].address >= FH_MEM_UPPER_BOUND) { return -EINVAL; }
-        }
+  /*
+   * Calculate result size - result is returned only for read ops
+   * Also, verifying inputs
+   */
+  for (idx = 0; idx < request->num; idx++) {
+    if (request->reg_ops[idx].op_type >= IWL_TM_REG_OP_MAX) {
+      return -EINVAL;
     }
 
-    return 0;
+    /*
+     * Allow access only to FH/CSR/HBUS in direct mode.
+     * Since we don't have the upper bounds for the CSR
+     * and HBUS segments, we will use only the upper
+     * bound of FH for sanity check.
+     */
+    if (!IS_AL_ADDR(request->reg_ops[idx].address)) {
+      if (request->reg_ops[idx].address >= FH_MEM_UPPER_BOUND) {
+        return -EINVAL;
+      }
+    }
+  }
+
+  return 0;
 }
 
 /**
@@ -109,17 +123,19 @@
  * @dev: testmode device struct
  */
 static int iwl_tm_trace_end(struct iwl_tm_gnl_dev* dev) {
-    struct iwl_trans* trans = dev->trans;
-    struct iwl_test_trace* trace = &dev->tst.trace;
+  struct iwl_trans* trans = dev->trans;
+  struct iwl_test_trace* trace = &dev->tst.trace;
 
-    if (!trace->enabled) { return -EILSEQ; }
+  if (!trace->enabled) {
+    return -EILSEQ;
+  }
 
-    if (trace->cpu_addr && trace->dma_addr) {
-        dma_free_coherent(trans->dev, trace->size, trace->cpu_addr, trace->dma_addr);
-    }
-    memset(trace, 0, sizeof(struct iwl_test_trace));
+  if (trace->cpu_addr && trace->dma_addr) {
+    dma_free_coherent(trans->dev, trace->size, trace->cpu_addr, trace->dma_addr);
+  }
+  memset(trace, 0, sizeof(struct iwl_test_trace));
 
-    return 0;
+  return 0;
 }
 
 /**
@@ -130,37 +146,41 @@
  */
 static int iwl_tm_trace_begin(struct iwl_tm_gnl_dev* dev, struct iwl_tm_data* data_in,
                               struct iwl_tm_data* data_out) {
-    struct iwl_tm_trace_request* req = data_in->data;
-    struct iwl_tm_trace_request* resp;
+  struct iwl_tm_trace_request* req = data_in->data;
+  struct iwl_tm_trace_request* resp;
 
-    if (!data_in->data || data_in->len < sizeof(struct iwl_tm_trace_request)) { return -EINVAL; }
+  if (!data_in->data || data_in->len < sizeof(struct iwl_tm_trace_request)) {
+    return -EINVAL;
+  }
 
-    req = data_in->data;
+  req = data_in->data;
 
-    /* size zero means use the default */
-    if (!req->size) {
-        req->size = TRACE_BUFF_SIZE_DEF;
-    } else if (req->size < TRACE_BUFF_SIZE_MIN || req->size > TRACE_BUFF_SIZE_MAX) {
-        return -EINVAL;
-    } else if (!dev->dnt->mon_buf_cpu_addr) {
-        return -ENOMEM;
-    }
+  /* size zero means use the default */
+  if (!req->size) {
+    req->size = TRACE_BUFF_SIZE_DEF;
+  } else if (req->size < TRACE_BUFF_SIZE_MIN || req->size > TRACE_BUFF_SIZE_MAX) {
+    return -EINVAL;
+  } else if (!dev->dnt->mon_buf_cpu_addr) {
+    return -ENOMEM;
+  }
 
-    resp = kmalloc(sizeof(*resp), GFP_KERNEL);
-    if (!resp) { return -ENOMEM; }
-    resp->size = dev->dnt->mon_buf_size;
-    /* Casting to avoid compilation warnings when DMA address is 32bit */
-    resp->addr = (uint64_t)dev->dnt->mon_base_addr;
+  resp = kmalloc(sizeof(*resp), GFP_KERNEL);
+  if (!resp) {
+    return -ENOMEM;
+  }
+  resp->size = dev->dnt->mon_buf_size;
+  /* Casting to avoid compilation warnings when DMA address is 32bit */
+  resp->addr = (uint64_t)dev->dnt->mon_base_addr;
 
-    data_out->data = resp;
-    data_out->len = sizeof(*resp);
+  data_out->data = resp;
+  data_out->len = sizeof(*resp);
 
-    return 0;
+  return 0;
 }
 
 static bool iwl_tm_gnl_valid_hw_addr(uint32_t addr) {
-    /* TODO need to implement */
-    return true;
+  /* TODO need to implement */
+  return true;
 }
 
 /**
@@ -169,31 +189,34 @@
  * @data_in:    SRAM access request
  */
 static int iwl_tm_validate_sram_write_req(struct iwl_tm_gnl_dev* dev, struct iwl_tm_data* data_in) {
-    struct iwl_tm_sram_write_request* cmd_in;
-    uint32_t data_buf_size;
+  struct iwl_tm_sram_write_request* cmd_in;
+  uint32_t data_buf_size;
 
-    if (!dev->trans->op_mode) {
-        IWL_ERR(dev->trans, "No op_mode!\n");
-        return -ENODEV;
-    }
+  if (!dev->trans->op_mode) {
+    IWL_ERR(dev->trans, "No op_mode!\n");
+    return -ENODEV;
+  }
 
-    if (!data_in->data || data_in->len < sizeof(struct iwl_tm_sram_write_request)) {
-        return -EINVAL;
-    }
-
-    cmd_in = data_in->data;
-
-    data_buf_size = data_in->len - sizeof(struct iwl_tm_sram_write_request);
-    if (data_buf_size < cmd_in->len) { return -EINVAL; }
-
-    if (iwl_tm_gnl_valid_hw_addr(cmd_in->offset)) { return 0; }
-
-    if ((cmd_in->offset < IWL_ABS_PRPH_START) &&
-        (cmd_in->offset >= IWL_ABS_PRPH_START + PRPH_END)) {
-        return 0;
-    }
-
+  if (!data_in->data || data_in->len < sizeof(struct iwl_tm_sram_write_request)) {
     return -EINVAL;
+  }
+
+  cmd_in = data_in->data;
+
+  data_buf_size = data_in->len - sizeof(struct iwl_tm_sram_write_request);
+  if (data_buf_size < cmd_in->len) {
+    return -EINVAL;
+  }
+
+  if (iwl_tm_gnl_valid_hw_addr(cmd_in->offset)) {
+    return 0;
+  }
+
+  if ((cmd_in->offset < IWL_ABS_PRPH_START) && (cmd_in->offset >= IWL_ABS_PRPH_START + PRPH_END)) {
+    return 0;
+  }
+
+  return -EINVAL;
 }
 
 /**
@@ -202,27 +225,28 @@
  * @data_in:    SRAM access request
  */
 static int iwl_tm_validate_sram_read_req(struct iwl_tm_gnl_dev* dev, struct iwl_tm_data* data_in) {
-    struct iwl_tm_sram_read_request* cmd_in;
+  struct iwl_tm_sram_read_request* cmd_in;
 
-    if (!dev->trans->op_mode) {
-        IWL_ERR(dev->trans, "No op_mode!\n");
-        return -ENODEV;
-    }
+  if (!dev->trans->op_mode) {
+    IWL_ERR(dev->trans, "No op_mode!\n");
+    return -ENODEV;
+  }
 
-    if (!data_in->data || data_in->len < sizeof(struct iwl_tm_sram_read_request)) {
-        return -EINVAL;
-    }
-
-    cmd_in = data_in->data;
-
-    if (iwl_tm_gnl_valid_hw_addr(cmd_in->offset)) { return 0; }
-
-    if ((cmd_in->offset < IWL_ABS_PRPH_START) &&
-        (cmd_in->offset >= IWL_ABS_PRPH_START + PRPH_END)) {
-        return 0;
-    }
-
+  if (!data_in->data || data_in->len < sizeof(struct iwl_tm_sram_read_request)) {
     return -EINVAL;
+  }
+
+  cmd_in = data_in->data;
+
+  if (iwl_tm_gnl_valid_hw_addr(cmd_in->offset)) {
+    return 0;
+  }
+
+  if ((cmd_in->offset < IWL_ABS_PRPH_START) && (cmd_in->offset >= IWL_ABS_PRPH_START + PRPH_END)) {
+    return 0;
+  }
+
+  return -EINVAL;
 }
 
 /**
@@ -231,18 +255,20 @@
  * @data_in:    uint32_t notification (flag)
  */
 static int iwl_tm_notifications_en(struct iwl_test* tst, struct iwl_tm_data* data_in) {
-    uint32_t notification_en;
+  uint32_t notification_en;
 
-    if (!data_in->data || (data_in->len != sizeof(uint32_t))) { return -EINVAL; }
+  if (!data_in->data || (data_in->len != sizeof(uint32_t))) {
+    return -EINVAL;
+  }
 
-    notification_en = *(uint32_t*)data_in->data;
-    if ((notification_en != NOTIFICATIONS_ENABLE) && (notification_en != NOTIFICATIONS_DISABLE)) {
-        return -EINVAL;
-    }
+  notification_en = *(uint32_t*)data_in->data;
+  if ((notification_en != NOTIFICATIONS_ENABLE) && (notification_en != NOTIFICATIONS_DISABLE)) {
+    return -EINVAL;
+  }
 
-    tst->notify = notification_en == NOTIFICATIONS_ENABLE;
+  tst->notify = notification_en == NOTIFICATIONS_ENABLE;
 
-    return 0;
+  return 0;
 }
 
 /**
@@ -251,19 +277,25 @@
  *
  */
 static int iwl_tm_validate_tx_cmd(struct iwl_tm_data* data_in) {
-    struct iwl_tm_mod_tx_request* cmd_req;
-    uint32_t data_buf_size;
+  struct iwl_tm_mod_tx_request* cmd_req;
+  uint32_t data_buf_size;
 
-    if (!data_in->data || (data_in->len < sizeof(struct iwl_tm_mod_tx_request))) { return -EINVAL; }
+  if (!data_in->data || (data_in->len < sizeof(struct iwl_tm_mod_tx_request))) {
+    return -EINVAL;
+  }
 
-    cmd_req = (struct iwl_tm_mod_tx_request*)data_in->data;
+  cmd_req = (struct iwl_tm_mod_tx_request*)data_in->data;
 
-    data_buf_size = data_in->len - sizeof(struct iwl_tm_mod_tx_request);
-    if (data_buf_size < cmd_req->len) { return -EINVAL; }
+  data_buf_size = data_in->len - sizeof(struct iwl_tm_mod_tx_request);
+  if (data_buf_size < cmd_req->len) {
+    return -EINVAL;
+  }
 
-    if (cmd_req->sta_id >= IWL_TM_STATION_COUNT) { return -EINVAL; }
+  if (cmd_req->sta_id >= IWL_TM_STATION_COUNT) {
+    return -EINVAL;
+  }
 
-    return 0;
+  return 0;
 }
 
 /**
@@ -272,16 +304,14 @@
  *
  */
 static int iwl_tm_validate_rx_hdrs_mode_req(struct iwl_tm_data* data_in) {
-    if (!data_in->data || (data_in->len < sizeof(struct iwl_xvt_rx_hdrs_mode_request))) {
-        return -EINVAL;
-    }
+  if (!data_in->data || (data_in->len < sizeof(struct iwl_xvt_rx_hdrs_mode_request))) {
+    return -EINVAL;
+  }
 
-    return 0;
+  return 0;
 }
 
-static int iwl_tm_validate_get_chip_id(struct iwl_trans* trans) {
-    return 0;
-}
+static int iwl_tm_validate_get_chip_id(struct iwl_trans* trans) { return 0; }
 
 /**
  * iwl_tm_validate_apmg_pd_mode_req() - Validates apmg rx mode request
@@ -289,112 +319,123 @@
  *
  */
 static int iwl_tm_validate_apmg_pd_mode_req(struct iwl_tm_data* data_in) {
-    if (!data_in->data || (data_in->len != sizeof(struct iwl_xvt_apmg_pd_mode_request))) {
-        return -EINVAL;
-    }
+  if (!data_in->data || (data_in->len != sizeof(struct iwl_xvt_apmg_pd_mode_request))) {
+    return -EINVAL;
+  }
 
-    return 0;
+  return 0;
 }
 
 static int iwl_tm_get_device_status(struct iwl_tm_gnl_dev* dev, struct iwl_tm_data* data_in,
                                     struct iwl_tm_data* data_out) {
-    __u32* status;
+  __u32* status;
 
-    status = kmalloc(sizeof(__u32), GFP_KERNEL);
-    if (!status) { return -ENOMEM; }
+  status = kmalloc(sizeof(__u32), GFP_KERNEL);
+  if (!status) {
+    return -ENOMEM;
+  }
 
-    *status = dev->dnt->iwl_dnt_status;
+  *status = dev->dnt->iwl_dnt_status;
 
-    data_out->data = status;
-    data_out->len = sizeof(__u32);
+  data_out->data = status;
+  data_out->len = sizeof(__u32);
 
-    return 0;
+  return 0;
 }
 
 #if IS_ENABLED(CPTCFG_IWLXVT)
 static int iwl_tm_switch_op_mode(struct iwl_tm_gnl_dev* dev, struct iwl_tm_data* data_in) {
-    struct iwl_switch_op_mode* switch_cmd = data_in->data;
-    struct iwl_drv* drv;
-    int ret = 0;
+  struct iwl_switch_op_mode* switch_cmd = data_in->data;
+  struct iwl_drv* drv;
+  int ret = 0;
 
-    if (data_in->len < sizeof(*switch_cmd)) { return -EINVAL; }
+  if (data_in->len < sizeof(*switch_cmd)) {
+    return -EINVAL;
+  }
 
-    drv = iwl_drv_get_dev_container(dev->trans->dev);
-    if (!drv) {
-        IWL_ERR(dev->trans, "Couldn't retrieve device information\n");
-        return -ENODEV;
-    }
+  drv = iwl_drv_get_dev_container(dev->trans->dev);
+  if (!drv) {
+    IWL_ERR(dev->trans, "Couldn't retrieve device information\n");
+    return -ENODEV;
+  }
 
-    /* Executing switch command */
-    ret = iwl_drv_switch_op_mode(drv, switch_cmd->new_op_mode);
+  /* Executing switch command */
+  ret = iwl_drv_switch_op_mode(drv, switch_cmd->new_op_mode);
 
-    if (ret < 0)
-        IWL_ERR(dev->trans, "Failed to switch op mode to %s (err:%d)\n", switch_cmd->new_op_mode,
-                ret);
+  if (ret < 0)
+    IWL_ERR(dev->trans, "Failed to switch op mode to %s (err:%d)\n", switch_cmd->new_op_mode, ret);
 
-    return ret;
+  return ret;
 }
 #endif
 
 static int iwl_tm_gnl_get_sil_step(struct iwl_trans* trans, struct iwl_tm_data* data_out) {
-    struct iwl_sil_step* resp;
-    data_out->data = kmalloc(sizeof(struct iwl_sil_step), GFP_KERNEL);
-    if (!data_out->data) { return -ENOMEM; }
-    data_out->len = sizeof(struct iwl_sil_step);
-    resp = (struct iwl_sil_step*)data_out->data;
-    resp->silicon_step = CSR_HW_REV_STEP(trans->hw_rev);
-    return 0;
+  struct iwl_sil_step* resp;
+  data_out->data = kmalloc(sizeof(struct iwl_sil_step), GFP_KERNEL);
+  if (!data_out->data) {
+    return -ENOMEM;
+  }
+  data_out->len = sizeof(struct iwl_sil_step);
+  resp = (struct iwl_sil_step*)data_out->data;
+  resp->silicon_step = CSR_HW_REV_STEP(trans->hw_rev);
+  return 0;
 }
 
 static int iwl_tm_gnl_get_build_info(struct iwl_trans* trans, struct iwl_tm_data* data_out) {
-    struct iwl_tm_build_info* resp;
+  struct iwl_tm_build_info* resp;
 
-    data_out->data = kmalloc(sizeof(*resp), GFP_KERNEL);
-    if (!data_out->data) { return -ENOMEM; }
-    data_out->len = sizeof(struct iwl_tm_build_info);
-    resp = (struct iwl_tm_build_info*)data_out->data;
+  data_out->data = kmalloc(sizeof(*resp), GFP_KERNEL);
+  if (!data_out->data) {
+    return -ENOMEM;
+  }
+  data_out->len = sizeof(struct iwl_tm_build_info);
+  resp = (struct iwl_tm_build_info*)data_out->data;
 
-    memset(resp, 0, sizeof(*resp));
-    strncpy(resp->driver_version, BACKPORTS_GIT_TRACKED, sizeof(resp->driver_version));
+  memset(resp, 0, sizeof(*resp));
+  strncpy(resp->driver_version, BACKPORTS_GIT_TRACKED, sizeof(resp->driver_version));
 #ifdef BACKPORTS_BRANCH_TSTAMP
-    strncpy(resp->branch_time, BACKPORTS_BRANCH_TSTAMP, sizeof(resp->branch_time));
+  strncpy(resp->branch_time, BACKPORTS_BRANCH_TSTAMP, sizeof(resp->branch_time));
 #endif
-    strncpy(resp->build_time, BACKPORTS_BUILD_TSTAMP, sizeof(resp->build_time));
+  strncpy(resp->build_time, BACKPORTS_BUILD_TSTAMP, sizeof(resp->build_time));
 
-    return 0;
+  return 0;
 }
 
 static int iwl_tm_gnl_get_sil_type(struct iwl_trans* trans, struct iwl_tm_data* data_out) {
-    struct iwl_tm_sil_type* resp;
+  struct iwl_tm_sil_type* resp;
 
-    resp = kzalloc(sizeof(*resp), GFP_KERNEL);
-    if (!resp) { return -ENOMEM; }
+  resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+  if (!resp) {
+    return -ENOMEM;
+  }
 
-    resp->silicon_type = CSR_HW_REV_TYPE(trans->hw_rev);
+  resp->silicon_type = CSR_HW_REV_TYPE(trans->hw_rev);
 
-    data_out->data = resp;
-    data_out->len = sizeof(*resp);
+  data_out->data = resp;
+  data_out->len = sizeof(*resp);
 
-    return 0;
+  return 0;
 }
 
 static int iwl_tm_gnl_get_rfid(struct iwl_trans* trans, struct iwl_tm_data* data_out) {
-    struct iwl_tm_rfid* resp;
+  struct iwl_tm_rfid* resp;
 
-    resp = kzalloc(sizeof(*resp), GFP_KERNEL);
-    if (!resp) { return -ENOMEM; }
+  resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+  if (!resp) {
+    return -ENOMEM;
+  }
 
-    IWL_DEBUG_INFO(trans, "HW RFID=0x08%X\n", trans->hw_rf_id);
+  IWL_DEBUG_INFO(trans, "HW RFID=0x08%X\n", trans->hw_rf_id);
 
-    resp->flavor = CSR_HW_RFID_FLAVOR(trans->hw_rf_id);
-    resp->dash = CSR_HW_RFID_DASH(trans->hw_rf_id);
-    resp->step = CSR_HW_RFID_STEP(trans->hw_rf_id);
-    resp->type = CSR_HW_RFID_TYPE(trans->hw_rf_id);
+  resp->flavor = CSR_HW_RFID_FLAVOR(trans->hw_rf_id);
+  resp->dash = CSR_HW_RFID_DASH(trans->hw_rf_id);
+  resp->step = CSR_HW_RFID_STEP(trans->hw_rf_id);
+  resp->type = CSR_HW_RFID_TYPE(trans->hw_rf_id);
 
-    data_out->data = resp;
-    data_out->len = sizeof(*resp);
+  data_out->data = resp;
+  data_out->len = sizeof(*resp);
 
-    return 0;
+  return 0;
 }
 
 /*
@@ -418,10 +459,10 @@
  *      command is done.
  */
 struct iwl_tm_gnl_cmd {
-    const char* dev_name;
-    uint32_t cmd;
-    struct iwl_tm_data data_in;
-    struct iwl_tm_data data_out;
+  const char* dev_name;
+  uint32_t cmd;
+  struct iwl_tm_data data_in;
+  struct iwl_tm_data data_out;
 };
 
 static struct list_head dev_list; /* protected by mutex or RCU */
@@ -429,11 +470,11 @@
 
 /* Testmode GNL family command attributes  */
 enum iwl_tm_gnl_cmd_attr_t {
-    IWL_TM_GNL_MSG_ATTR_INVALID = 0,
-    IWL_TM_GNL_MSG_ATTR_DEVNAME,
-    IWL_TM_GNL_MSG_ATTR_CMD,
-    IWL_TM_GNL_MSG_ATTR_DATA,
-    IWL_TM_GNL_MSG_ATTR_MAX
+  IWL_TM_GNL_MSG_ATTR_INVALID = 0,
+  IWL_TM_GNL_MSG_ATTR_DEVNAME,
+  IWL_TM_GNL_MSG_ATTR_CMD,
+  IWL_TM_GNL_MSG_ATTR_DATA,
+  IWL_TM_GNL_MSG_ATTR_MAX
 };
 
 /* TM GNL family definition */
@@ -466,18 +507,18 @@
  * locked inside the function to allow code flexibility)
  */
 static struct iwl_tm_gnl_dev* iwl_tm_gnl_get_dev(const char* dev_name) {
-    struct iwl_tm_gnl_dev *dev_itr, *dev = NULL;
+  struct iwl_tm_gnl_dev *dev_itr, *dev = NULL;
 
-    lockdep_assert_held(&dev_list_mtx);
+  lockdep_assert_held(&dev_list_mtx);
 
-    list_for_each_entry(dev_itr, &dev_list, list) {
-        if (!strcmp(dev_itr->dev_name, dev_name)) {
-            dev = dev_itr;
-            break;
-        }
+  list_for_each_entry(dev_itr, &dev_list, list) {
+    if (!strcmp(dev_itr->dev_name, dev_name)) {
+      dev = dev_itr;
+      break;
     }
+  }
 
-    return dev;
+  return dev;
 }
 
 /**
@@ -489,35 +530,47 @@
  */
 static struct sk_buff* iwl_tm_gnl_create_msg(uint32_t pid, uint32_t seq,
                                              struct iwl_tm_gnl_cmd cmd_data, gfp_t flags) {
-    void* nlmsg_head;
-    struct sk_buff* skb;
-    int ret;
+  void* nlmsg_head;
+  struct sk_buff* skb;
+  int ret;
 
-    skb = genlmsg_new(NLMSG_GOODSIZE, flags);
-    if (!skb) { goto send_msg_err; }
+  skb = genlmsg_new(NLMSG_GOODSIZE, flags);
+  if (!skb) {
+    goto send_msg_err;
+  }
 
-    nlmsg_head = genlmsg_put(skb, pid, seq, &iwl_tm_gnl_family, 0, IWL_TM_GNL_CMD_EXECUTE);
-    if (!nlmsg_head) { goto send_msg_err; }
+  nlmsg_head = genlmsg_put(skb, pid, seq, &iwl_tm_gnl_family, 0, IWL_TM_GNL_CMD_EXECUTE);
+  if (!nlmsg_head) {
+    goto send_msg_err;
+  }
 
-    ret = nla_put_string(skb, IWL_TM_GNL_MSG_ATTR_DEVNAME, cmd_data.dev_name);
-    if (ret) { goto send_msg_err; }
+  ret = nla_put_string(skb, IWL_TM_GNL_MSG_ATTR_DEVNAME, cmd_data.dev_name);
+  if (ret) {
+    goto send_msg_err;
+  }
 
-    ret = nla_put_u32(skb, IWL_TM_GNL_MSG_ATTR_CMD, cmd_data.cmd);
-    if (ret) { goto send_msg_err; }
+  ret = nla_put_u32(skb, IWL_TM_GNL_MSG_ATTR_CMD, cmd_data.cmd);
+  if (ret) {
+    goto send_msg_err;
+  }
 
-    if (cmd_data.data_out.len && cmd_data.data_out.data) {
-        ret = nla_put(skb, IWL_TM_GNL_MSG_ATTR_DATA, cmd_data.data_out.len, cmd_data.data_out.data);
-        if (ret) { goto send_msg_err; }
+  if (cmd_data.data_out.len && cmd_data.data_out.data) {
+    ret = nla_put(skb, IWL_TM_GNL_MSG_ATTR_DATA, cmd_data.data_out.len, cmd_data.data_out.data);
+    if (ret) {
+      goto send_msg_err;
     }
+  }
 
-    genlmsg_end(skb, nlmsg_head);
+  genlmsg_end(skb, nlmsg_head);
 
-    return skb;
+  return skb;
 
 send_msg_err:
-    if (skb) { kfree_skb(skb); }
+  if (skb) {
+    kfree_skb(skb);
+  }
 
-    return NULL;
+  return NULL;
 }
 
 /**
@@ -533,31 +586,41 @@
  */
 int iwl_tm_gnl_send_msg(struct iwl_trans* trans, uint32_t cmd, bool check_notify, void* data_out,
                         uint32_t data_len, gfp_t flags) {
-    struct iwl_tm_gnl_dev* dev;
-    struct iwl_tm_gnl_cmd cmd_data;
-    struct sk_buff* skb;
-    uint32_t nlportid;
+  struct iwl_tm_gnl_dev* dev;
+  struct iwl_tm_gnl_cmd cmd_data;
+  struct sk_buff* skb;
+  uint32_t nlportid;
 
-    if (WARN_ON_ONCE(!trans)) { return -EINVAL; }
+  if (WARN_ON_ONCE(!trans)) {
+    return -EINVAL;
+  }
 
-    if (!trans->tmdev) { return 0; }
-    dev = trans->tmdev;
+  if (!trans->tmdev) {
+    return 0;
+  }
+  dev = trans->tmdev;
 
-    nlportid = READ_ONCE(dev->nl_events_portid);
+  nlportid = READ_ONCE(dev->nl_events_portid);
 
-    if (check_notify && !dev->tst.notify) { return 0; }
+  if (check_notify && !dev->tst.notify) {
+    return 0;
+  }
 
-    memset(&cmd_data, 0, sizeof(struct iwl_tm_gnl_cmd));
-    cmd_data.dev_name = dev_name(trans->dev);
-    cmd_data.cmd = cmd;
-    cmd_data.data_out.data = data_out;
-    cmd_data.data_out.len = data_len;
+  memset(&cmd_data, 0, sizeof(struct iwl_tm_gnl_cmd));
+  cmd_data.dev_name = dev_name(trans->dev);
+  cmd_data.cmd = cmd;
+  cmd_data.data_out.data = data_out;
+  cmd_data.data_out.len = data_len;
 
-    skb = iwl_tm_gnl_create_msg(nlportid, 0, cmd_data, flags);
-    if (!skb) { return -EINVAL; }
+  skb = iwl_tm_gnl_create_msg(nlportid, 0, cmd_data, flags);
+  if (!skb) {
+    return -EINVAL;
+  }
 
-    if (nlportid) { return genlmsg_unicast(&init_net, skb, nlportid); }
-    return genlmsg_multicast(&iwl_tm_gnl_family, skb, 0, 0, flags);
+  if (nlportid) {
+    return genlmsg_unicast(&init_net, skb, nlportid);
+  }
+  return genlmsg_multicast(&iwl_tm_gnl_family, skb, 0, 0, flags);
 }
 IWL_EXPORT_SYMBOL(iwl_tm_gnl_send_msg);
 
@@ -567,12 +630,14 @@
  * @cmd_data:   Data of command to be responded
  */
 static int iwl_tm_gnl_reply(struct genl_info* info, struct iwl_tm_gnl_cmd cmd_data) {
-    struct sk_buff* skb;
+  struct sk_buff* skb;
 
-    skb = iwl_tm_gnl_create_msg(genl_info_snd_portid(info), info->snd_seq, cmd_data, GFP_KERNEL);
-    if (!skb) { return -EINVAL; }
+  skb = iwl_tm_gnl_create_msg(genl_info_snd_portid(info), info->snd_seq, cmd_data, GFP_KERNEL);
+  if (!skb) {
+    return -EINVAL;
+  }
 
-    return genlmsg_reply(skb, info);
+  return genlmsg_reply(skb, info);
 }
 
 /**
@@ -580,104 +645,106 @@
  * @cmd_data:   Pointer to the data of command to be executed
  */
 static int iwl_tm_gnl_cmd_execute(struct iwl_tm_gnl_cmd* cmd_data) {
-    struct iwl_tm_gnl_dev* dev;
-    bool common_op = false;
-    int ret = 0;
-    mutex_lock(&dev_list_mtx);
-    dev = iwl_tm_gnl_get_dev(cmd_data->dev_name);
-    mutex_unlock(&dev_list_mtx);
-    if (!dev) { return -ENODEV; }
+  struct iwl_tm_gnl_dev* dev;
+  bool common_op = false;
+  int ret = 0;
+  mutex_lock(&dev_list_mtx);
+  dev = iwl_tm_gnl_get_dev(cmd_data->dev_name);
+  mutex_unlock(&dev_list_mtx);
+  if (!dev) {
+    return -ENODEV;
+  }
 
-    IWL_DEBUG_INFO(dev->trans, "%s cmd=0x%X\n", __func__, cmd_data->cmd);
-    switch (cmd_data->cmd) {
+  IWL_DEBUG_INFO(dev->trans, "%s cmd=0x%X\n", __func__, cmd_data->cmd);
+  switch (cmd_data->cmd) {
     case IWL_TM_USER_CMD_HCMD:
-        ret = iwl_tm_validate_fw_cmd(&cmd_data->data_in);
-        break;
+      ret = iwl_tm_validate_fw_cmd(&cmd_data->data_in);
+      break;
 
     case IWL_TM_USER_CMD_REG_ACCESS:
-        ret = iwl_tm_validate_reg_ops(&cmd_data->data_in);
-        break;
+      ret = iwl_tm_validate_reg_ops(&cmd_data->data_in);
+      break;
 
     case IWL_TM_USER_CMD_SRAM_WRITE:
-        ret = iwl_tm_validate_sram_write_req(dev, &cmd_data->data_in);
-        break;
+      ret = iwl_tm_validate_sram_write_req(dev, &cmd_data->data_in);
+      break;
 
     case IWL_TM_USER_CMD_BEGIN_TRACE:
-        ret = iwl_tm_trace_begin(dev, &cmd_data->data_in, &cmd_data->data_out);
-        common_op = true;
-        break;
+      ret = iwl_tm_trace_begin(dev, &cmd_data->data_in, &cmd_data->data_out);
+      common_op = true;
+      break;
 
     case IWL_TM_USER_CMD_END_TRACE:
-        ret = iwl_tm_trace_end(dev);
-        common_op = true;
-        break;
+      ret = iwl_tm_trace_end(dev);
+      common_op = true;
+      break;
 
     case IWL_XVT_CMD_MOD_TX:
-        ret = iwl_tm_validate_tx_cmd(&cmd_data->data_in);
-        break;
+      ret = iwl_tm_validate_tx_cmd(&cmd_data->data_in);
+      break;
 
     case IWL_XVT_CMD_RX_HDRS_MODE:
-        ret = iwl_tm_validate_rx_hdrs_mode_req(&cmd_data->data_in);
-        break;
+      ret = iwl_tm_validate_rx_hdrs_mode_req(&cmd_data->data_in);
+      break;
 
     case IWL_XVT_CMD_APMG_PD_MODE:
-        ret = iwl_tm_validate_apmg_pd_mode_req(&cmd_data->data_in);
-        break;
+      ret = iwl_tm_validate_apmg_pd_mode_req(&cmd_data->data_in);
+      break;
 
     case IWL_TM_USER_CMD_NOTIFICATIONS:
-        ret = iwl_tm_notifications_en(&dev->tst, &cmd_data->data_in);
-        common_op = true;
-        break;
+      ret = iwl_tm_notifications_en(&dev->tst, &cmd_data->data_in);
+      common_op = true;
+      break;
 
     case IWL_TM_USER_CMD_GET_DEVICE_STATUS:
-        ret = iwl_tm_get_device_status(dev, &cmd_data->data_in, &cmd_data->data_out);
-        common_op = true;
-        break;
+      ret = iwl_tm_get_device_status(dev, &cmd_data->data_in, &cmd_data->data_out);
+      common_op = true;
+      break;
 #if IS_ENABLED(CPTCFG_IWLXVT)
     case IWL_TM_USER_CMD_SWITCH_OP_MODE:
-        ret = iwl_tm_switch_op_mode(dev, &cmd_data->data_in);
-        common_op = true;
-        break;
+      ret = iwl_tm_switch_op_mode(dev, &cmd_data->data_in);
+      common_op = true;
+      break;
 #endif
     case IWL_XVT_CMD_GET_CHIP_ID:
-        ret = iwl_tm_validate_get_chip_id(dev->trans);
-        break;
+      ret = iwl_tm_validate_get_chip_id(dev->trans);
+      break;
 
     case IWL_TM_USER_CMD_GET_SIL_STEP:
-        ret = iwl_tm_gnl_get_sil_step(dev->trans, &cmd_data->data_out);
-        common_op = true;
-        break;
+      ret = iwl_tm_gnl_get_sil_step(dev->trans, &cmd_data->data_out);
+      common_op = true;
+      break;
 
     case IWL_TM_USER_CMD_GET_DRIVER_BUILD_INFO:
-        ret = iwl_tm_gnl_get_build_info(dev->trans, &cmd_data->data_out);
-        common_op = true;
-        break;
+      ret = iwl_tm_gnl_get_build_info(dev->trans, &cmd_data->data_out);
+      common_op = true;
+      break;
 
     case IWL_TM_USER_CMD_GET_SIL_TYPE:
-        ret = iwl_tm_gnl_get_sil_type(dev->trans, &cmd_data->data_out);
-        common_op = true;
-        break;
+      ret = iwl_tm_gnl_get_sil_type(dev->trans, &cmd_data->data_out);
+      common_op = true;
+      break;
 
     case IWL_TM_USER_CMD_GET_RFID:
-        ret = iwl_tm_gnl_get_rfid(dev->trans, &cmd_data->data_out);
-        common_op = true;
-        break;
-    }
-    if (ret) {
-        IWL_ERR(dev->trans, "%s Error=%d\n", __func__, ret);
-        return ret;
-    }
-
-    if (!common_op)
-        ret = iwl_tm_execute_cmd(&dev->trans->testmode, cmd_data->cmd, &cmd_data->data_in,
-                                 &cmd_data->data_out);
-
-    if (ret) {
-        IWL_ERR(dev->trans, "%s ret=%d\n", __func__, ret);
-    } else {
-        IWL_DEBUG_INFO(dev->trans, "%s ended Ok\n", __func__);
-    }
+      ret = iwl_tm_gnl_get_rfid(dev->trans, &cmd_data->data_out);
+      common_op = true;
+      break;
+  }
+  if (ret) {
+    IWL_ERR(dev->trans, "%s Error=%d\n", __func__, ret);
     return ret;
+  }
+
+  if (!common_op)
+    ret = iwl_tm_execute_cmd(&dev->trans->testmode, cmd_data->cmd, &cmd_data->data_in,
+                             &cmd_data->data_out);
+
+  if (ret) {
+    IWL_ERR(dev->trans, "%s ret=%d\n", __func__, ret);
+  } else {
+    IWL_DEBUG_INFO(dev->trans, "%s ended Ok\n", __func__);
+  }
+  return ret;
 }
 
 /**
@@ -688,12 +755,14 @@
  */
 static int iwl_tm_mem_dump(struct iwl_tm_gnl_dev* dev, struct iwl_tm_data* data_in,
                            struct iwl_tm_data* data_out) {
-    int ret;
+  int ret;
 
-    ret = iwl_tm_validate_sram_read_req(dev, data_in);
-    if (ret) { return ret; }
+  ret = iwl_tm_validate_sram_read_req(dev, data_in);
+  if (ret) {
+    return ret;
+  }
 
-    return iwl_tm_execute_cmd(&dev->trans->testmode, IWL_TM_USER_CMD_SRAM_READ, data_in, data_out);
+  return iwl_tm_execute_cmd(&dev->trans->testmode, IWL_TM_USER_CMD_SRAM_READ, data_in, data_out);
 }
 
 /**
@@ -702,31 +771,33 @@
  * @data_out:   Dump data
  */
 static int iwl_tm_trace_dump(struct iwl_tm_gnl_dev* dev, struct iwl_tm_data* data_out) {
-    int ret;
-    uint32_t buf_size;
+  int ret;
+  uint32_t buf_size;
 
-    if (!(dev->dnt->iwl_dnt_status & IWL_DNT_STATUS_MON_CONFIGURED)) {
-        IWL_ERR(dev->trans, "Invalid monitor status\n");
-        return -EINVAL;
-    }
+  if (!(dev->dnt->iwl_dnt_status & IWL_DNT_STATUS_MON_CONFIGURED)) {
+    IWL_ERR(dev->trans, "Invalid monitor status\n");
+    return -EINVAL;
+  }
 
-    if (dev->dnt->mon_buf_size == 0) {
-        IWL_ERR(dev->trans, "No available monitor buffer\n");
-        return -ENOMEM;
-    }
+  if (dev->dnt->mon_buf_size == 0) {
+    IWL_ERR(dev->trans, "No available monitor buffer\n");
+    return -ENOMEM;
+  }
 
-    buf_size = dev->dnt->mon_buf_size;
-    data_out->data = kmalloc(buf_size, GFP_KERNEL);
-    if (!data_out->data) { return -ENOMEM; }
+  buf_size = dev->dnt->mon_buf_size;
+  data_out->data = kmalloc(buf_size, GFP_KERNEL);
+  if (!data_out->data) {
+    return -ENOMEM;
+  }
 
-    ret = iwl_dnt_dispatch_pull(dev->trans, data_out->data, buf_size, MONITOR);
-    if (ret < 0) {
-        kfree(data_out->data);
-        return ret;
-    }
-    data_out->len = ret;
+  ret = iwl_dnt_dispatch_pull(dev->trans, data_out->data, buf_size, MONITOR);
+  if (ret < 0) {
+    kfree(data_out->data);
+    return ret;
+  }
+  data_out->len = ret;
 
-    return 0;
+  return 0;
 }
 
 /**
@@ -736,28 +807,30 @@
  *      Data out is the start address of the buffer, and it's size.
  */
 static int iwl_tm_gnl_command_dump(struct iwl_tm_gnl_cmd* cmd_data) {
-    struct iwl_tm_gnl_dev* dev;
-    int ret = 0;
+  struct iwl_tm_gnl_dev* dev;
+  int ret = 0;
 
-    mutex_lock(&dev_list_mtx);
-    dev = iwl_tm_gnl_get_dev(cmd_data->dev_name);
-    mutex_unlock(&dev_list_mtx);
-    if (!dev) { return -ENODEV; }
+  mutex_lock(&dev_list_mtx);
+  dev = iwl_tm_gnl_get_dev(cmd_data->dev_name);
+  mutex_unlock(&dev_list_mtx);
+  if (!dev) {
+    return -ENODEV;
+  }
 
-    switch (cmd_data->cmd) {
+  switch (cmd_data->cmd) {
     case IWL_TM_USER_CMD_TRACE_DUMP:
-        ret = iwl_tm_trace_dump(dev, &cmd_data->data_out);
-        break;
+      ret = iwl_tm_trace_dump(dev, &cmd_data->data_out);
+      break;
 
     case IWL_TM_USER_CMD_SRAM_READ:
-        ret = iwl_tm_mem_dump(dev, &cmd_data->data_in, &cmd_data->data_out);
-        break;
+      ret = iwl_tm_mem_dump(dev, &cmd_data->data_in, &cmd_data->data_out);
+      break;
 
     default:
-        return -EOPNOTSUPP;
-    }
+      return -EOPNOTSUPP;
+  }
 
-    return ret;
+  return ret;
 }
 
 /**
@@ -766,43 +839,47 @@
  * @cmd_data:   Command
  */
 static int iwl_tm_gnl_parse_msg(struct nlattr** attrs, struct iwl_tm_gnl_cmd* cmd_data) {
-    memset(cmd_data, 0, sizeof(struct iwl_tm_gnl_cmd));
+  memset(cmd_data, 0, sizeof(struct iwl_tm_gnl_cmd));
 
-    if (!attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME] || !attrs[IWL_TM_GNL_MSG_ATTR_CMD]) { return -EINVAL; }
+  if (!attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME] || !attrs[IWL_TM_GNL_MSG_ATTR_CMD]) {
+    return -EINVAL;
+  }
 
-    cmd_data->dev_name = nla_data(attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME]);
-    cmd_data->cmd = nla_get_u32(attrs[IWL_TM_GNL_MSG_ATTR_CMD]);
+  cmd_data->dev_name = nla_data(attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME]);
+  cmd_data->cmd = nla_get_u32(attrs[IWL_TM_GNL_MSG_ATTR_CMD]);
 
-    if (attrs[IWL_TM_GNL_MSG_ATTR_DATA]) {
-        cmd_data->data_in.data = nla_data(attrs[IWL_TM_GNL_MSG_ATTR_DATA]);
-        cmd_data->data_in.len = nla_len(attrs[IWL_TM_GNL_MSG_ATTR_DATA]);
-    }
+  if (attrs[IWL_TM_GNL_MSG_ATTR_DATA]) {
+    cmd_data->data_in.data = nla_data(attrs[IWL_TM_GNL_MSG_ATTR_DATA]);
+    cmd_data->data_in.len = nla_len(attrs[IWL_TM_GNL_MSG_ATTR_DATA]);
+  }
 
-    return 0;
+  return 0;
 }
 
 /**
  * iwl_tm_gnl_cmd_do() - Executes IWL testmode GNL command
  */
 static int iwl_tm_gnl_cmd_do(struct sk_buff* skb, struct genl_info* info) {
-    struct iwl_tm_gnl_cmd cmd_data;
-    int ret;
+  struct iwl_tm_gnl_cmd cmd_data;
+  int ret;
 
-    ret = iwl_tm_gnl_parse_msg(info->attrs, &cmd_data);
-    if (ret) { return ret; }
-
-    ret = iwl_tm_gnl_cmd_execute(&cmd_data);
-    if (!ret && cmd_data.data_out.len) {
-        ret = iwl_tm_gnl_reply(info, cmd_data);
-        /*
-         * In this case, data out should be allocated in
-         * iwl_tm_gnl_cmd_execute so it should be freed
-         * here
-         */
-        kfree(cmd_data.data_out.data);
-    }
-
+  ret = iwl_tm_gnl_parse_msg(info->attrs, &cmd_data);
+  if (ret) {
     return ret;
+  }
+
+  ret = iwl_tm_gnl_cmd_execute(&cmd_data);
+  if (!ret && cmd_data.data_out.len) {
+    ret = iwl_tm_gnl_reply(info, cmd_data);
+    /*
+     * In this case, data out should be allocated in
+     * iwl_tm_gnl_cmd_execute so it should be freed
+     * here
+     */
+    kfree(cmd_data.data_out.data);
+  }
+
+  return ret;
 }
 
 /**
@@ -815,112 +892,126 @@
  * cb->args[3]: Buffer offset from where to dump in the next round
  */
 static int iwl_tm_gnl_dump(struct sk_buff* skb, struct netlink_callback* cb) {
-    struct iwl_tm_gnl_cmd cmd_data;
-    void* nlmsg_head = NULL;
-    struct nlattr* attrs[IWL_TM_GNL_MSG_ATTR_MAX];
-    void* dump_addr;
-    unsigned long dump_offset;
-    int dump_size, chunk_size, ret;
+  struct iwl_tm_gnl_cmd cmd_data;
+  void* nlmsg_head = NULL;
+  struct nlattr* attrs[IWL_TM_GNL_MSG_ATTR_MAX];
+  void* dump_addr;
+  unsigned long dump_offset;
+  int dump_size, chunk_size, ret;
 
-    if (!cb->args[0]) {
-        /*
-         * This is the first part of the dump - Parse dump data
-         * out of the data in the netlink header and set up the
-         * dump in cb->args[].
-         */
-        ret = nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, IWL_TM_GNL_MSG_ATTR_MAX - 1,
-                          iwl_tm_gnl_msg_policy, NULL);
-        if (ret) { return ret; }
-
-        ret = iwl_tm_gnl_parse_msg(attrs, &cmd_data);
-        if (ret) { return ret; }
-
-        ret = iwl_tm_gnl_command_dump(&cmd_data);
-        if (ret) { return ret; }
-
-        /* Incrementing command since command number may be zero */
-        cb->args[0] = cmd_data.cmd + 1;
-        cb->args[1] = (unsigned long)cmd_data.data_out.data;
-        cb->args[2] = cmd_data.data_out.len;
-        cb->args[3] = 0;
-
-        if (!cb->args[2]) { return -ENODATA; }
-    }
-
-    dump_addr = (uint8_t*)cb->args[1];
-    dump_size = cb->args[2];
-    dump_offset = cb->args[3];
-
-    nlmsg_head = genlmsg_put(skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq,
-                             &iwl_tm_gnl_family, NLM_F_MULTI, IWL_TM_GNL_CMD_EXECUTE);
-
+  if (!cb->args[0]) {
     /*
-     * Reserve some room for NL attribute header,
-     * 16 bytes should be enough.
+     * This is the first part of the dump - Parse dump data
+     * out of the data in the netlink header and set up the
+     * dump in cb->args[].
      */
-    chunk_size = skb_tailroom(skb) - 16;
-    if (chunk_size <= 0) {
-        ret = -ENOMEM;
-        goto dump_err;
+    ret = nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, IWL_TM_GNL_MSG_ATTR_MAX - 1,
+                      iwl_tm_gnl_msg_policy, NULL);
+    if (ret) {
+      return ret;
     }
 
-    if (chunk_size > dump_size - dump_offset) { chunk_size = dump_size - dump_offset; }
-
-    if (chunk_size) {
-        ret = nla_put(skb, IWL_TM_GNL_MSG_ATTR_DATA, chunk_size, dump_addr + dump_offset);
-        if (ret) { goto dump_err; }
+    ret = iwl_tm_gnl_parse_msg(attrs, &cmd_data);
+    if (ret) {
+      return ret;
     }
 
-    genlmsg_end(skb, nlmsg_head);
+    ret = iwl_tm_gnl_command_dump(&cmd_data);
+    if (ret) {
+      return ret;
+    }
 
-    /* move offset */
-    cb->args[3] += chunk_size;
+    /* Incrementing command since command number may be zero */
+    cb->args[0] = cmd_data.cmd + 1;
+    cb->args[1] = (unsigned long)cmd_data.data_out.data;
+    cb->args[2] = cmd_data.data_out.len;
+    cb->args[3] = 0;
 
-    return cb->args[2] - cb->args[3];
+    if (!cb->args[2]) {
+      return -ENODATA;
+    }
+  }
+
+  dump_addr = (uint8_t*)cb->args[1];
+  dump_size = cb->args[2];
+  dump_offset = cb->args[3];
+
+  nlmsg_head = genlmsg_put(skb, NETLINK_CB_PORTID(cb->skb), cb->nlh->nlmsg_seq, &iwl_tm_gnl_family,
+                           NLM_F_MULTI, IWL_TM_GNL_CMD_EXECUTE);
+
+  /*
+   * Reserve some room for NL attribute header,
+   * 16 bytes should be enough.
+   */
+  chunk_size = skb_tailroom(skb) - 16;
+  if (chunk_size <= 0) {
+    ret = -ENOMEM;
+    goto dump_err;
+  }
+
+  if (chunk_size > dump_size - dump_offset) {
+    chunk_size = dump_size - dump_offset;
+  }
+
+  if (chunk_size) {
+    ret = nla_put(skb, IWL_TM_GNL_MSG_ATTR_DATA, chunk_size, dump_addr + dump_offset);
+    if (ret) {
+      goto dump_err;
+    }
+  }
+
+  genlmsg_end(skb, nlmsg_head);
+
+  /* move offset */
+  cb->args[3] += chunk_size;
+
+  return cb->args[2] - cb->args[3];
 
 dump_err:
-    genlmsg_cancel(skb, nlmsg_head);
-    return ret;
+  genlmsg_cancel(skb, nlmsg_head);
+  return ret;
 }
 
 static int iwl_tm_gnl_done(struct netlink_callback* cb) {
-    switch (cb->args[0] - 1) {
+  switch (cb->args[0] - 1) {
     case IWL_TM_USER_CMD_SRAM_READ:
     case IWL_TM_USER_CMD_TRACE_DUMP:
-        kfree((void*)cb->args[1]);
-        return 0;
-    }
+      kfree((void*)cb->args[1]);
+      return 0;
+  }
 
-    return -EOPNOTSUPP;
+  return -EOPNOTSUPP;
 }
 
 static int iwl_tm_gnl_cmd_subscribe(struct sk_buff* skb, struct genl_info* info) {
-    struct iwl_tm_gnl_dev* dev;
-    const char* dev_name;
-    int ret;
+  struct iwl_tm_gnl_dev* dev;
+  const char* dev_name;
+  int ret;
 
-    if (!info->attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME]) { return -EINVAL; }
+  if (!info->attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME]) {
+    return -EINVAL;
+  }
 
-    dev_name = nla_data(info->attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME]);
+  dev_name = nla_data(info->attrs[IWL_TM_GNL_MSG_ATTR_DEVNAME]);
 
-    mutex_lock(&dev_list_mtx);
-    dev = iwl_tm_gnl_get_dev(dev_name);
-    if (!dev) {
-        ret = -ENODEV;
-        goto unlock;
-    }
+  mutex_lock(&dev_list_mtx);
+  dev = iwl_tm_gnl_get_dev(dev_name);
+  if (!dev) {
+    ret = -ENODEV;
+    goto unlock;
+  }
 
-    if (dev->nl_events_portid) {
-        ret = -EBUSY;
-        goto unlock;
-    }
+  if (dev->nl_events_portid) {
+    ret = -EBUSY;
+    goto unlock;
+  }
 
-    dev->nl_events_portid = genl_info_snd_portid(info);
-    ret = 0;
+  dev->nl_events_portid = genl_info_snd_portid(info);
+  ret = 0;
 
 unlock:
-    mutex_unlock(&dev_list_mtx);
-    return ret;
+  mutex_unlock(&dev_list_mtx);
+  return ret;
 }
 
 /*
@@ -960,26 +1051,34 @@
  * @trans:  transport struct for the device to register for
  */
 void iwl_tm_gnl_add(struct iwl_trans* trans) {
-    struct iwl_tm_gnl_dev* dev;
+  struct iwl_tm_gnl_dev* dev;
 
-    if (!trans) { return; }
+  if (!trans) {
+    return;
+  }
 
-    if (trans->tmdev) { return; }
+  if (trans->tmdev) {
+    return;
+  }
 
-    mutex_lock(&dev_list_mtx);
+  mutex_lock(&dev_list_mtx);
 
-    if (iwl_tm_gnl_get_dev(dev_name(trans->dev))) { goto unlock; }
+  if (iwl_tm_gnl_get_dev(dev_name(trans->dev))) {
+    goto unlock;
+  }
 
-    dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-    if (!dev) { goto unlock; }
+  dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+  if (!dev) {
+    goto unlock;
+  }
 
-    dev->dev_name = dev_name(trans->dev);
-    trans->tmdev = dev;
-    dev->trans = trans;
-    list_add_tail_rcu(&dev->list, &dev_list);
+  dev->dev_name = dev_name(trans->dev);
+  trans->tmdev = dev;
+  dev->trans = trans;
+  list_add_tail_rcu(&dev->list, &dev_list);
 
 unlock:
-    mutex_unlock(&dev_list_mtx);
+  mutex_unlock(&dev_list_mtx);
 }
 
 /**
@@ -987,41 +1086,45 @@
  * @trans:  transport struct for the device
  */
 void iwl_tm_gnl_remove(struct iwl_trans* trans) {
-    struct iwl_tm_gnl_dev *dev_itr, *tmp;
+  struct iwl_tm_gnl_dev *dev_itr, *tmp;
 
-    if (WARN_ON_ONCE(!trans)) { return; }
+  if (WARN_ON_ONCE(!trans)) {
+    return;
+  }
 
-    /* Searching for operation mode in list */
-    mutex_lock(&dev_list_mtx);
-    list_for_each_entry_safe(dev_itr, tmp, &dev_list, list) {
-        if (dev_itr->trans == trans) {
-            /*
-             * Device found. Removing it from list
-             * and releasing it's resources
-             */
-            list_del_rcu(&dev_itr->list);
-            synchronize_rcu();
-            kfree(dev_itr);
-            break;
-        }
+  /* Searching for operation mode in list */
+  mutex_lock(&dev_list_mtx);
+  list_for_each_entry_safe(dev_itr, tmp, &dev_list, list) {
+    if (dev_itr->trans == trans) {
+      /*
+       * Device found. Removing it from list
+       * and releasing it's resources
+       */
+      list_del_rcu(&dev_itr->list);
+      synchronize_rcu();
+      kfree(dev_itr);
+      break;
     }
+  }
 
-    trans->tmdev = NULL;
-    mutex_unlock(&dev_list_mtx);
+  trans->tmdev = NULL;
+  mutex_unlock(&dev_list_mtx);
 }
 
 static int iwl_tm_gnl_netlink_notify(struct notifier_block* nb, unsigned long state,
                                      void* _notify) {
-    struct netlink_notify* notify = _notify;
-    struct iwl_tm_gnl_dev* dev;
+  struct netlink_notify* notify = _notify;
+  struct iwl_tm_gnl_dev* dev;
 
-    rcu_read_lock();
-    list_for_each_entry_rcu(dev, &dev_list, list) {
-        if (dev->nl_events_portid == netlink_notify_portid(notify)) { dev->nl_events_portid = 0; }
+  rcu_read_lock();
+  list_for_each_entry_rcu(dev, &dev_list, list) {
+    if (dev->nl_events_portid == netlink_notify_portid(notify)) {
+      dev->nl_events_portid = 0;
     }
-    rcu_read_unlock();
+  }
+  rcu_read_unlock();
 
-    return NOTIFY_OK;
+  return NOTIFY_OK;
 }
 
 static struct notifier_block iwl_tm_gnl_netlink_notifier = {
@@ -1035,24 +1138,28 @@
  * TM GNL global variables
  */
 int iwl_tm_gnl_init(void) {
-    int ret;
+  int ret;
 
-    INIT_LIST_HEAD(&dev_list);
-    mutex_init(&dev_list_mtx);
+  INIT_LIST_HEAD(&dev_list);
+  mutex_init(&dev_list_mtx);
 
-    ret = genl_register_family(&iwl_tm_gnl_family);
-    if (ret) { return ret; }
-    ret = netlink_register_notifier(&iwl_tm_gnl_netlink_notifier);
-    if (ret) { genl_unregister_family(&iwl_tm_gnl_family); }
+  ret = genl_register_family(&iwl_tm_gnl_family);
+  if (ret) {
     return ret;
+  }
+  ret = netlink_register_notifier(&iwl_tm_gnl_netlink_notifier);
+  if (ret) {
+    genl_unregister_family(&iwl_tm_gnl_family);
+  }
+  return ret;
 }
 
 /**
  * iwl_tm_gnl_exit() - Unregisters Testmode GNL family
  */
 int iwl_tm_gnl_exit(void) {
-    netlink_unregister_notifier(&iwl_tm_gnl_netlink_notifier);
-    return genl_unregister_family(&iwl_tm_gnl_family);
+  netlink_unregister_notifier(&iwl_tm_gnl_netlink_notifier);
+  return genl_unregister_family(&iwl_tm_gnl_family);
 }
 
 /**
@@ -1061,13 +1168,13 @@
  * @rxb:    Contains rx packet to be sent
  */
 void iwl_tm_gnl_send_rx(struct iwl_trans* trans, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    int length = iwl_rx_packet_len(pkt);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  int length = iwl_rx_packet_len(pkt);
 
-    /* the length doesn't include len_n_flags field, so add it manually */
-    length += sizeof(__le32);
+  /* the length doesn't include len_n_flags field, so add it manually */
+  length += sizeof(__le32);
 
-    iwl_tm_gnl_send_msg(trans, IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT, true, (void*)pkt, length,
-                        GFP_ATOMIC);
+  iwl_tm_gnl_send_msg(trans, IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT, true, (void*)pkt, length,
+                      GFP_ATOMIC);
 }
 IWL_EXPORT_SYMBOL(iwl_tm_gnl_send_rx);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-gnl.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-gnl.h
index 0c4ab3b..8dcf918 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-gnl.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-gnl.h
@@ -40,15 +40,15 @@
 #include "fw/testmode.h"
 
 struct iwl_test_trace {
-    uint32_t size;
-    uint8_t* cpu_addr;
-    dma_addr_t dma_addr;
-    bool enabled;
+  uint32_t size;
+  uint8_t* cpu_addr;
+  dma_addr_t dma_addr;
+  bool enabled;
 };
 
 struct iwl_test {
-    struct iwl_test_trace trace;
-    bool notify;
+  struct iwl_test_trace trace;
+  bool notify;
 };
 
 /**
@@ -62,12 +62,12 @@
  * Device identifier it's name.
  */
 struct iwl_tm_gnl_dev {
-    struct list_head list;
-    struct iwl_test tst;
-    struct iwl_dnt* dnt;
-    struct iwl_trans* trans;
-    const char* dev_name;
-    uint32_t nl_events_portid;
+  struct list_head list;
+  struct iwl_test tst;
+  struct iwl_dnt* dnt;
+  struct iwl_trans* trans;
+  const char* dev_name;
+  uint32_t nl_events_portid;
 };
 
 int iwl_tm_gnl_send_msg(struct iwl_trans* trans, uint32_t cmd, bool check_notify, void* data_out,
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-infc.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-infc.h
index f29f2dd..3068a30 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-infc.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-tm-infc.h
@@ -45,8 +45,8 @@
  * confused with testmode commands
  */
 enum iwl_tm_gnl_cmd_t {
-    IWL_TM_GNL_CMD_EXECUTE = 0,
-    IWL_TM_GNL_CMD_SUBSCRIBE_EVENTS,
+  IWL_TM_GNL_CMD_EXECUTE = 0,
+  IWL_TM_GNL_CMD_SUBSCRIBE_EVENTS,
 };
 
 /* uCode trace buffer */
@@ -78,40 +78,40 @@
 
 /* User-Driver interface commands */
 enum {
-    IWL_TM_USER_CMD_HCMD = TM_CMD_BASE,
-    IWL_TM_USER_CMD_REG_ACCESS,
-    IWL_TM_USER_CMD_SRAM_WRITE,
-    IWL_TM_USER_CMD_SRAM_READ,
-    IWL_TM_USER_CMD_GET_DEVICE_INFO,
-    IWL_TM_USER_CMD_GET_DEVICE_STATUS,
-    IWL_TM_USER_CMD_BEGIN_TRACE,
-    IWL_TM_USER_CMD_END_TRACE,
-    IWL_TM_USER_CMD_TRACE_DUMP,
-    IWL_TM_USER_CMD_NOTIFICATIONS,
-    IWL_TM_USER_CMD_SWITCH_OP_MODE,
-    IWL_TM_USER_CMD_GET_SIL_STEP,
-    IWL_TM_USER_CMD_GET_DRIVER_BUILD_INFO,
-    IWL_TM_USER_CMD_GET_FW_INFO,
-    IWL_TM_USER_CMD_BUS_DATA_ACCESS,
-    IWL_TM_USER_CMD_GET_SIL_TYPE,
-    IWL_TM_USER_CMD_GET_RFID,
+  IWL_TM_USER_CMD_HCMD = TM_CMD_BASE,
+  IWL_TM_USER_CMD_REG_ACCESS,
+  IWL_TM_USER_CMD_SRAM_WRITE,
+  IWL_TM_USER_CMD_SRAM_READ,
+  IWL_TM_USER_CMD_GET_DEVICE_INFO,
+  IWL_TM_USER_CMD_GET_DEVICE_STATUS,
+  IWL_TM_USER_CMD_BEGIN_TRACE,
+  IWL_TM_USER_CMD_END_TRACE,
+  IWL_TM_USER_CMD_TRACE_DUMP,
+  IWL_TM_USER_CMD_NOTIFICATIONS,
+  IWL_TM_USER_CMD_SWITCH_OP_MODE,
+  IWL_TM_USER_CMD_GET_SIL_STEP,
+  IWL_TM_USER_CMD_GET_DRIVER_BUILD_INFO,
+  IWL_TM_USER_CMD_GET_FW_INFO,
+  IWL_TM_USER_CMD_BUS_DATA_ACCESS,
+  IWL_TM_USER_CMD_GET_SIL_TYPE,
+  IWL_TM_USER_CMD_GET_RFID,
 
-    IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT = TM_CMD_NOTIF_BASE,
-    IWL_TM_USER_CMD_NOTIF_DRIVER,
-    IWL_TM_USER_CMD_NOTIF_RX_HDR,
-    IWL_TM_USER_CMD_NOTIF_COMMIT_STATISTICS,
-    IWL_TM_USER_CMD_NOTIF_PHY_DB,
-    IWL_TM_USER_CMD_NOTIF_DTS_MEASUREMENTS,
-    IWL_TM_USER_CMD_NOTIF_MONITOR_DATA,
-    IWL_TM_USER_CMD_NOTIF_UCODE_MSGS_DATA,
-    IWL_TM_USER_CMD_NOTIF_APMG_PD,
-    IWL_TM_USER_CMD_NOTIF_RETRIEVE_MONITOR,
-    IWL_TM_USER_CMD_NOTIF_CRASH_DATA,
-    IWL_TM_USER_CMD_NOTIF_BFE,
-    IWL_TM_USER_CMD_NOTIF_LOC_MCSI,
-    IWL_TM_USER_CMD_NOTIF_LOC_RANGE,
-    IWL_TM_USER_CMD_NOTIF_IQ_CALIB,
-    IWL_TM_USER_CMD_NOTIF_CT_KILL,
+  IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT = TM_CMD_NOTIF_BASE,
+  IWL_TM_USER_CMD_NOTIF_DRIVER,
+  IWL_TM_USER_CMD_NOTIF_RX_HDR,
+  IWL_TM_USER_CMD_NOTIF_COMMIT_STATISTICS,
+  IWL_TM_USER_CMD_NOTIF_PHY_DB,
+  IWL_TM_USER_CMD_NOTIF_DTS_MEASUREMENTS,
+  IWL_TM_USER_CMD_NOTIF_MONITOR_DATA,
+  IWL_TM_USER_CMD_NOTIF_UCODE_MSGS_DATA,
+  IWL_TM_USER_CMD_NOTIF_APMG_PD,
+  IWL_TM_USER_CMD_NOTIF_RETRIEVE_MONITOR,
+  IWL_TM_USER_CMD_NOTIF_CRASH_DATA,
+  IWL_TM_USER_CMD_NOTIF_BFE,
+  IWL_TM_USER_CMD_NOTIF_LOC_MCSI,
+  IWL_TM_USER_CMD_NOTIF_LOC_RANGE,
+  IWL_TM_USER_CMD_NOTIF_IQ_CALIB,
+  IWL_TM_USER_CMD_NOTIF_CT_KILL,
 };
 
 /*
@@ -119,42 +119,42 @@
  * testmode commands indeces end
  */
 enum {
-    IWL_XVT_CMD_START = XVT_CMD_BASE,
-    IWL_XVT_CMD_STOP,
-    IWL_XVT_CMD_CONTINUE_INIT,
-    IWL_XVT_CMD_GET_PHY_DB_ENTRY,
-    IWL_XVT_CMD_SET_CONFIG,
-    IWL_XVT_CMD_GET_CONFIG,
-    IWL_XVT_CMD_MOD_TX,
-    IWL_XVT_CMD_RX_HDRS_MODE,
-    IWL_XVT_CMD_ALLOC_DMA,
-    IWL_XVT_CMD_GET_DMA,
-    IWL_XVT_CMD_FREE_DMA,
-    IWL_XVT_CMD_GET_CHIP_ID,
-    IWL_XVT_CMD_APMG_PD_MODE,
-    IWL_XVT_CMD_GET_MAC_ADDR_INFO,
-    IWL_XVT_CMD_MOD_TX_STOP,
-    IWL_XVT_CMD_TX_QUEUE_CFG,
-    IWL_XVT_CMD_DRIVER_CMD,
+  IWL_XVT_CMD_START = XVT_CMD_BASE,
+  IWL_XVT_CMD_STOP,
+  IWL_XVT_CMD_CONTINUE_INIT,
+  IWL_XVT_CMD_GET_PHY_DB_ENTRY,
+  IWL_XVT_CMD_SET_CONFIG,
+  IWL_XVT_CMD_GET_CONFIG,
+  IWL_XVT_CMD_MOD_TX,
+  IWL_XVT_CMD_RX_HDRS_MODE,
+  IWL_XVT_CMD_ALLOC_DMA,
+  IWL_XVT_CMD_GET_DMA,
+  IWL_XVT_CMD_FREE_DMA,
+  IWL_XVT_CMD_GET_CHIP_ID,
+  IWL_XVT_CMD_APMG_PD_MODE,
+  IWL_XVT_CMD_GET_MAC_ADDR_INFO,
+  IWL_XVT_CMD_MOD_TX_STOP,
+  IWL_XVT_CMD_TX_QUEUE_CFG,
+  IWL_XVT_CMD_DRIVER_CMD,
 
-    /* Driver notifications */
-    IWL_XVT_CMD_SEND_REPLY_ALIVE = XVT_CMD_NOTIF_BASE,
-    IWL_XVT_CMD_SEND_RFKILL,
-    IWL_XVT_CMD_SEND_NIC_ERROR,
-    IWL_XVT_CMD_SEND_NIC_UMAC_ERROR,
-    IWL_XVT_CMD_SEND_MOD_TX_DONE,
-    IWL_XVT_CMD_ENHANCED_TX_DONE,
-    IWL_XVT_CMD_TX_CMD_RESP,
-    IWL_XVT_CMD_ECHO_NOTIF,
+  /* Driver notifications */
+  IWL_XVT_CMD_SEND_REPLY_ALIVE = XVT_CMD_NOTIF_BASE,
+  IWL_XVT_CMD_SEND_RFKILL,
+  IWL_XVT_CMD_SEND_NIC_ERROR,
+  IWL_XVT_CMD_SEND_NIC_UMAC_ERROR,
+  IWL_XVT_CMD_SEND_MOD_TX_DONE,
+  IWL_XVT_CMD_ENHANCED_TX_DONE,
+  IWL_XVT_CMD_TX_CMD_RESP,
+  IWL_XVT_CMD_ECHO_NOTIF,
 
-    /* Bus Tester Commands*/
-    IWL_TM_USER_CMD_SV_BUS_CONFIG = XVT_BUS_TESTER_BASE,
-    IWL_TM_USER_CMD_SV_BUS_RESET,
-    IWL_TM_USER_CMD_SV_IO_TOGGLE,
-    IWL_TM_USER_CMD_SV_GET_STATUS,
-    IWL_TM_USER_CMD_SV_RD_WR_UINT8,
-    IWL_TM_USER_CMD_SV_RD_WR_UINT32,
-    IWL_TM_USER_CMD_SV_RD_WR_BUFFER,
+  /* Bus Tester Commands*/
+  IWL_TM_USER_CMD_SV_BUS_CONFIG = XVT_BUS_TESTER_BASE,
+  IWL_TM_USER_CMD_SV_BUS_RESET,
+  IWL_TM_USER_CMD_SV_IO_TOGGLE,
+  IWL_TM_USER_CMD_SV_GET_STATUS,
+  IWL_TM_USER_CMD_SV_RD_WR_UINT8,
+  IWL_TM_USER_CMD_SV_RD_WR_UINT32,
+  IWL_TM_USER_CMD_SV_RD_WR_BUFFER,
 };
 
 /**
@@ -162,18 +162,18 @@
  * sub-commands through IWL_XVT_CMD_DRIVER_CMD.
  */
 enum {
-    IWL_DRV_CMD_CONFIG_TX_QUEUE = 0,
-    IWL_DRV_CMD_SET_TX_PAYLOAD,
-    IWL_DRV_CMD_TX_START,
-    IWL_DRV_CMD_TX_STOP,
-    IWL_DRV_CMD_GET_RX_AGG_STATS,
-    IWL_DRV_CMD_CONFIG_RX_MPDU,
-    IWL_DRV_CMD_ECHO_NOTIF,
+  IWL_DRV_CMD_CONFIG_TX_QUEUE = 0,
+  IWL_DRV_CMD_SET_TX_PAYLOAD,
+  IWL_DRV_CMD_TX_START,
+  IWL_DRV_CMD_TX_STOP,
+  IWL_DRV_CMD_GET_RX_AGG_STATS,
+  IWL_DRV_CMD_CONFIG_RX_MPDU,
+  IWL_DRV_CMD_ECHO_NOTIF,
 };
 
 enum {
-    NOTIFICATIONS_DISABLE = 0,
-    NOTIFICATIONS_ENABLE = 1,
+  NOTIFICATIONS_DISABLE = 0,
+  NOTIFICATIONS_ENABLE = 1,
 };
 
 /**
@@ -185,10 +185,10 @@
  *      rx packet when structure is used for command response.
  */
 struct iwl_tm_cmd_request {
-    __u32 id;
-    __u32 want_resp;
-    __u32 len;
-    __u8 data[];
+  __u32 id;
+  __u32 want_resp;
+  __u32 len;
+  __u8 data[];
 } __packed __aligned(4);
 
 /**
@@ -196,7 +196,7 @@
  * @enable: Function enable/disable 1/0
  */
 struct iwl_tm_sdio_io_toggle {
-    __u32 enable;
+  __u32 enable;
 } __packed __aligned(4);
 
 /* Register operations - Operation type */
@@ -209,9 +209,9 @@
  * @value:  Write value, or read result
  */
 struct iwl_tm_reg_op {
-    __u32 op_type;
-    __u32 address;
-    __u32 value;
+  __u32 op_type;
+  __u32 address;
+  __u32 value;
 } __packed __aligned(4);
 
 /**
@@ -220,8 +220,8 @@
  * @reg_ops:    Array of register operations
  */
 struct iwl_tm_regs_request {
-    __u32 num;
-    struct iwl_tm_reg_op reg_ops[];
+  __u32 num;
+  struct iwl_tm_reg_op reg_ops[];
 } __packed __aligned(4);
 
 /**
@@ -230,8 +230,8 @@
  * @addr:   Resulting DMA address of trace buffer LSB
  */
 struct iwl_tm_trace_request {
-    __u64 addr;
-    __u32 size;
+  __u64 addr;
+  __u32 size;
 } __packed __aligned(4);
 
 /**
@@ -241,9 +241,9 @@
  * @buffer: input data
  */
 struct iwl_tm_sram_write_request {
-    __u32 offset;
-    __u32 len;
-    __u8 buffer[];
+  __u32 offset;
+  __u32 len;
+  __u8 buffer[];
 } __packed __aligned(4);
 
 /**
@@ -252,8 +252,8 @@
  * @length: data length
  */
 struct iwl_tm_sram_read_request {
-    __u32 offset;
-    __u32 length;
+  __u32 offset;
+  __u32 length;
 } __packed __aligned(4);
 
 /**
@@ -261,7 +261,7 @@
  * @read_sv: rather or not read sv_srop
  */
 struct iwl_tm_dev_info_req {
-    __u32 read_sv;
+  __u32 read_sv;
 } __packed __aligned(4);
 
 /**
@@ -274,12 +274,12 @@
  * @build_ver:
  */
 struct iwl_tm_dev_info {
-    __u32 dev_id;
-    __u32 vendor_id;
-    __u32 silicon_step;
-    __u32 fw_ver;
-    __u32 build_ver;
-    __u8 driver_ver[];
+  __u32 dev_id;
+  __u32 vendor_id;
+  __u32 silicon_step;
+  __u32 fw_ver;
+  __u32 build_ver;
+  __u8 driver_ver[];
 } __packed __aligned(4);
 
 /*
@@ -294,13 +294,13 @@
  * @mode: recording mode (internal buffer or continuous recording).
  */
 struct iwl_tm_thrshld_md {
-    __u16 monitor_collec_wind;
-    __u16 seq;
-    __u32 pkt_start;
-    __u32 pkt_end;
-    __u32 msrmnt;
-    __u16 tid;
-    __u8 mode;
+  __u16 monitor_collec_wind;
+  __u16 seq;
+  __u32 pkt_start;
+  __u32 pkt_end;
+  __u32 msrmnt;
+  __u16 tid;
+  __u8 mode;
 } __packed __aligned(4);
 
 #define MAX_OP_MODE_LENGTH 16
@@ -309,7 +309,7 @@
  * @new_op_mode:    size of data
  */
 struct iwl_switch_op_mode {
-    __u8 new_op_mode[MAX_OP_MODE_LENGTH];
+  __u8 new_op_mode[MAX_OP_MODE_LENGTH];
 } __packed __aligned(4);
 
 /**
@@ -317,7 +317,7 @@
  * @silicon_step: the device silicon step
  */
 struct iwl_sil_step {
-    __u32 silicon_step;
+  __u32 silicon_step;
 } __packed __aligned(4);
 
 /**
@@ -325,7 +325,7 @@
  * @silicon_type: the device silicon type
  */
 struct iwl_tm_sil_type {
-    __u32 silicon_type;
+  __u32 silicon_type;
 } __packed __aligned(4);
 
 /**
@@ -336,10 +336,10 @@
  * @type:   - RFID type
  */
 struct iwl_tm_rfid {
-    __u32 flavor;
-    __u32 dash;
-    __u32 step;
-    __u32 type;
+  __u32 flavor;
+  __u32 dash;
+  __u32 step;
+  __u32 type;
 } __packed __aligned(4);
 
 #define MAX_DRIVER_VERSION_LEN 256
@@ -351,9 +351,9 @@
  * @build_time: build time
  */
 struct iwl_tm_build_info {
-    __u8 driver_version[MAX_DRIVER_VERSION_LEN];
-    __u8 branch_time[MAX_BUILD_DATE_LEN];
-    __u8 build_time[MAX_BUILD_DATE_LEN];
+  __u8 driver_version[MAX_DRIVER_VERSION_LEN];
+  __u8 branch_time[MAX_BUILD_DATE_LEN];
+  __u8 build_time[MAX_BUILD_DATE_LEN];
 } __packed __aligned(4);
 
 /**
@@ -367,12 +367,12 @@
  *  + fw_capa_len)
  */
 struct iwl_tm_get_fw_info {
-    __u32 fw_major_ver;
-    __u32 fw_minor_ver;
-    __u32 fw_capa_flags;
-    __u32 fw_capa_api_len;
-    __u32 fw_capa_len;
-    __u8 data[];
+  __u32 fw_major_ver;
+  __u32 fw_minor_ver;
+  __u32 fw_capa_flags;
+  __u32 fw_capa_api_len;
+  __u32 fw_capa_len;
+  __u8 data[];
 } __packed __aligned(4);
 
 /* xVT definitions */
@@ -381,8 +381,8 @@
 #define IWL_XVT_RFKILL_ON 1
 
 struct iwl_xvt_user_calib_ctrl {
-    __u32 flow_trigger;
-    __u32 event_trigger;
+  __u32 flow_trigger;
+  __u32 event_trigger;
 } __packed __aligned(4);
 
 #define IWL_USER_FW_IMAGE_IDX_INIT 0
@@ -405,12 +405,12 @@
 enum { IWL_XVT_GET_CALIB_TYPE_DEF = 0, IWL_XVT_GET_CALIB_TYPE_RUNTIME };
 
 struct iwl_xvt_sw_cfg_request {
-    __u32 load_mask;
-    __u32 cfg_mask;
-    __u32 phy_config;
-    __u32 get_calib_type;
-    __u32 dbg_flags;
-    struct iwl_xvt_user_calib_ctrl calib_ctrl[IWL_UCODE_TYPE_MAX];
+  __u32 load_mask;
+  __u32 cfg_mask;
+  __u32 phy_config;
+  __u32 get_calib_type;
+  __u32 dbg_flags;
+  struct iwl_xvt_user_calib_ctrl calib_ctrl[IWL_UCODE_TYPE_MAX];
 } __packed __aligned(4);
 
 /**
@@ -422,10 +422,10 @@
  * @data:   Result entry data
  */
 struct iwl_xvt_phy_db_request {
-    __u32 type;
-    __u32 chg_id;
-    __u32 size;
-    __u8 data[];
+  __u32 type;
+  __u32 chg_id;
+  __u32 size;
+  __u8 data[];
 } __packed __aligned(4);
 
 #define IWL_TM_STATION_COUNT 16
@@ -443,15 +443,15 @@
  * @data:     Data to transmit
  */
 struct iwl_tm_mod_tx_request {
-    __u32 times;
-    __u32 delay_us;
-    __u32 pa_detect_en;
-    __u32 trigger_led;
-    __u32 len;
-    __u32 rate_flags;
-    __u32 no_ack;
-    __u8 sta_id;
-    __u8 data[];
+  __u32 times;
+  __u32 delay_us;
+  __u32 pa_detect_en;
+  __u32 trigger_led;
+  __u32 len;
+  __u32 rate_flags;
+  __u32 no_ack;
+  __u8 sta_id;
+  __u8 data[];
 } __packed __aligned(4);
 
 /**
@@ -461,19 +461,19 @@
  * @tx_req: pointer to data of transmission request
  */
 struct iwl_xvt_tx_mod_task_data {
-    __u32 lmac_id;
-    struct iwl_xvt* xvt;
-    struct iwl_tm_mod_tx_request tx_req;
+  __u32 lmac_id;
+  struct iwl_xvt* xvt;
+  struct iwl_tm_mod_tx_request tx_req;
 } __packed __aligned(4);
 
 /**
  * error status for status parameter in struct iwl_xvt_tx_mod_done
  */
 enum {
-    XVT_TX_DRIVER_SUCCESSFUL = 0,
-    XVT_TX_DRIVER_QUEUE_FULL,
-    XVT_TX_DRIVER_TIMEOUT,
-    XVT_TX_DRIVER_ABORTED
+  XVT_TX_DRIVER_SUCCESSFUL = 0,
+  XVT_TX_DRIVER_QUEUE_FULL,
+  XVT_TX_DRIVER_TIMEOUT,
+  XVT_TX_DRIVER_ABORTED
 };
 
 /**
@@ -483,9 +483,9 @@
  * @lmac_id: lmac index
  */
 struct iwl_xvt_tx_mod_done {
-    __u64 num_of_packets;
-    __u32 status;
-    __u32 lmac_id;
+  __u64 num_of_packets;
+  __u32 status;
+  __u32 lmac_id;
 } __packed __aligned(4);
 
 /**
@@ -493,7 +493,7 @@
  * @lmac_id: which lmac id to stop
  */
 struct iwl_xvt_tx_mod_stop {
-    __u32 lmac_id;
+  __u32 lmac_id;
 } __packed __aligned(4);
 
 /**
@@ -502,7 +502,7 @@
  *        1 - start
  */
 struct iwl_xvt_rx_hdrs_mode_request {
-    __u32 mode;
+  __u32 mode;
 } __packed __aligned(4);
 
 /**
@@ -511,7 +511,7 @@
  *        1 - start
  */
 struct iwl_xvt_apmg_pd_mode_request {
-    __u32 mode;
+  __u32 mode;
 } __packed __aligned(4);
 
 /**
@@ -520,8 +520,8 @@
  * @size:   Requested size of dma buffer
  */
 struct iwl_xvt_alloc_dma {
-    __u64 addr;
-    __u32 size;
+  __u64 addr;
+  __u32 size;
 } __packed __aligned(4);
 
 /**
@@ -530,8 +530,8 @@
  * @data:   Data to transmit
  */
 struct iwl_xvt_get_dma {
-    __u32 size;
-    __u8 data[];
+  __u32 size;
+  __u8 data[];
 } __packed __aligned(4);
 
 /**
@@ -539,7 +539,7 @@
  * @registers:  an array of registers to hold the chip id data
  */
 struct iwl_xvt_chip_id {
-    __u32 registers[3];
+  __u32 registers[3];
 } __packed __aligned(4);
 
 /**
@@ -549,8 +549,8 @@
  * @data:   data
  */
 struct iwl_tm_crash_data {
-    __u32 size;
-    __u8 data[];
+  __u32 size;
+  __u8 data[];
 } __packed __aligned(4);
 
 /**
@@ -558,12 +558,12 @@
  * @curr_mac_addr:  the current mac address
  */
 struct iwl_xvt_mac_addr_info {
-    __u8 mac_addr[ETH_ALEN];
+  __u8 mac_addr[ETH_ALEN];
 } __packed __aligned(4);
 
 enum {
-    TX_QUEUE_CFG_REMOVE,
-    TX_QUEUE_CFG_ADD,
+  TX_QUEUE_CFG_REMOVE,
+  TX_QUEUE_CFG_ADD,
 };
 
 /**
@@ -572,8 +572,8 @@
  * @ flags: 0 - remove queue, 1 - add queue
  */
 struct iwl_xvt_tx_queue_cfg {
-    __u8 sta_id;
-    __u8 operation;
+  __u8 sta_id;
+  __u8 operation;
 } __packed __aligned(4);
 
 /**
@@ -584,9 +584,9 @@
  * @ input_data: place holder for the sub command's input structure
  */
 struct iwl_xvt_driver_command_req {
-    __u32 command_id;
-    __u32 max_out_length;
-    __u8 input_data[0];
+  __u32 command_id;
+  __u32 max_out_length;
+  __u8 input_data[0];
 } __packed __aligned(4);
 
 /**
@@ -596,9 +596,9 @@
  * @ resp_data: place holder for the sub command's rseponse data
  */
 struct iwl_xvt_driver_command_resp {
-    __u32 command_id;
-    __u32 length;
-    __u8 resp_data[0];
+  __u32 command_id;
+  __u32 length;
+  __u8 resp_data[0];
 } __packed __aligned(4);
 
 /**
@@ -618,18 +618,18 @@
  * @queue_size: size of configured queue
  */
 struct iwl_xvt_txq_config {
-    uint8_t sta_id;
-    uint8_t tid;
-    uint8_t scd_queue;
-    uint8_t action;
-    uint8_t aggregate;
-    uint8_t tx_fifo;
-    uint8_t window;
-    uint8_t reserved;
-    uint16_t ssn;
-    uint16_t flags;
-    uint16_t reserved2;
-    int queue_size;
+  uint8_t sta_id;
+  uint8_t tid;
+  uint8_t scd_queue;
+  uint8_t action;
+  uint8_t aggregate;
+  uint8_t tx_fifo;
+  uint8_t window;
+  uint8_t reserved;
+  uint16_t ssn;
+  uint16_t flags;
+  uint16_t reserved2;
+  int queue_size;
 } __packed __aligned(4);
 
 /**
@@ -640,10 +640,10 @@
  * @reserved: for alignment
  */
 struct iwl_xvt_txq_config_resp {
-    uint8_t sta_id;
-    uint8_t tid;
-    uint8_t scd_queue;
-    uint8_t reserved;
+  uint8_t sta_id;
+  uint8_t tid;
+  uint8_t scd_queue;
+  uint8_t reserved;
 } __packed __aligned(4);
 
 /**
@@ -653,9 +653,9 @@
  * @payload: buffer containing payload
  */
 struct iwl_xvt_set_tx_payload {
-    uint16_t index;
-    uint16_t length;
-    uint8_t payload[];
+  uint16_t index;
+  uint16_t length;
+  uint8_t payload[];
 } __packed __aligned(4);
 
 /**
@@ -670,13 +670,13 @@
  * @frag_num: Array of fragments numbers
  */
 struct tx_cmd_commom_data {
-    uint32_t rate_flags;
-    uint32_t tx_flags;
-    uint8_t initial_rate_index;
-    uint8_t rts_retry_limit;
-    uint8_t data_retry_limit;
-    uint8_t fragment_size;
-    uint8_t frag_num[32];
+  uint32_t rate_flags;
+  uint32_t tx_flags;
+  uint8_t initial_rate_index;
+  uint8_t rts_retry_limit;
+  uint8_t data_retry_limit;
+  uint8_t fragment_size;
+  uint8_t frag_num[32];
 } __packed __aligned(4);
 
 /**
@@ -691,15 +691,15 @@
  * @header: MAC header
  */
 struct tx_cmd_frame_data {
-    uint16_t times;
-    uint8_t sta_id;
-    uint8_t queue;
-    uint8_t tid_tspec;
-    uint8_t sec_ctl;
-    uint8_t payload_index;
-    uint8_t reserved;
-    uint8_t key[16];
-    uint8_t header[IWL_XVT_MAX_MAC_HEADER_LENGTH];
+  uint16_t times;
+  uint8_t sta_id;
+  uint8_t queue;
+  uint8_t tid_tspec;
+  uint8_t sec_ctl;
+  uint8_t payload_index;
+  uint8_t reserved;
+  uint8_t key[16];
+  uint8_t header[IWL_XVT_MAX_MAC_HEADER_LENGTH];
 } __packed __aligned(4);
 
 /**
@@ -715,13 +715,13 @@
  * @frames_data: array of specific frame data for each queue
  */
 struct iwl_xvt_tx_start {
-    uint16_t num_of_cycles;
-    uint16_t num_of_different_frames;
-    uint8_t send_tx_resp;
-    uint8_t reserved1;
-    uint16_t reserved2;
-    struct tx_cmd_commom_data tx_data;
-    struct tx_cmd_frame_data frames_data[IWL_XVT_MAX_NUM_OF_FRAMES];
+  uint16_t num_of_cycles;
+  uint16_t num_of_different_frames;
+  uint8_t send_tx_resp;
+  uint8_t reserved1;
+  uint16_t reserved2;
+  struct tx_cmd_commom_data tx_data;
+  struct tx_cmd_frame_data frames_data[IWL_XVT_MAX_NUM_OF_FRAMES];
 } __packed __aligned(4);
 
 /**
@@ -730,8 +730,8 @@
  * @tx_start_data: IWL_DRV_CMD_TX_START command's input
  */
 struct iwl_xvt_enhanced_tx_data {
-    struct iwl_xvt* xvt;
-    struct iwl_xvt_tx_start tx_start_data;
+  struct iwl_xvt* xvt;
+  struct iwl_xvt_tx_start tx_start_data;
 } __packed __aligned(4);
 
 /**
@@ -740,9 +740,9 @@
  * @queue: queue packets were sent on
  */
 struct iwl_xvt_post_tx_data {
-    uint64_t num_of_packets;
-    uint16_t queue;
-    uint16_t reserved;
+  uint64_t num_of_packets;
+  uint16_t queue;
+  uint16_t reserved;
 } __packed __aligned(4);
 
 /**
@@ -753,9 +753,9 @@
  * @tx_data: data of sent frames for each queue
  */
 struct iwl_xvt_tx_done {
-    uint32_t status;
-    uint32_t num_of_queues;
-    struct iwl_xvt_post_tx_data tx_data[];
+  uint32_t status;
+  uint32_t num_of_queues;
+  struct iwl_xvt_post_tx_data tx_data[];
 } __packed __aligned(4);
 
 /*
@@ -765,9 +765,9 @@
  * @reserved: reserved
  */
 struct iwl_xvt_get_rx_agg_stats {
-    uint8_t sta_id;
-    uint8_t tid;
-    uint16_t reserved;
+  uint8_t sta_id;
+  uint8_t tid;
+  uint16_t reserved;
 } __packed __aligned(4);
 
 /*
@@ -779,10 +779,10 @@
  * @reordered: number of frames gone through the reorder buffer (unordered)
  */
 struct iwl_xvt_get_rx_agg_stats_resp {
-    uint32_t dropped;
-    uint32_t released;
-    uint32_t skipped;
-    uint32_t reordered;
+  uint32_t dropped;
+  uint32_t released;
+  uint32_t skipped;
+  uint32_t reordered;
 } __packed __aligned(4);
 
 /* struct iwl_xvt_config_rx_mpdu - Whether to send RX MPDU notifications to user
@@ -790,8 +790,8 @@
  * @reserved: reserved
  */
 struct iwl_xvt_config_rx_mpdu_req {
-    uint8_t enable;
-    uint8_t reserved[3];
+  uint8_t enable;
+  uint8_t reserved[3];
 } __packed __aligned(4);
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_IWL_TM_INFC_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.c
index 0bcb5c9..8a8760d 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.c
@@ -32,67 +32,72 @@
  *
  *****************************************************************************/
 
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h"
+
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-constants.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-drv.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-fh.h"
-#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h"
 
 struct iwl_trans* iwl_trans_alloc(unsigned int priv_size, const struct iwl_cfg* cfg,
                                   const struct iwl_trans_ops* ops) {
-    struct iwl_trans* trans;
+  struct iwl_trans* trans;
 #ifdef CONFIG_LOCKDEP
-    static struct lock_class_key __key;
+  static struct lock_class_key __key;
 #endif
 
-    trans = calloc(1, sizeof(*trans) + priv_size);
-    if (!trans) {
-        IWL_ERR(trans, "Failed to allocate transport\n");
-        return NULL;
-    }
+  trans = calloc(1, sizeof(*trans) + priv_size);
+  if (!trans) {
+    IWL_ERR(trans, "Failed to allocate transport\n");
+    return NULL;
+  }
 
 #ifdef CONFIG_LOCKDEP
-    lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", &__key, 0);
+  lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", &__key, 0);
 #endif
 
-    trans->cfg = cfg;
-    trans->ops = ops;
-    trans->num_rx_queues = 1;
+  trans->cfg = cfg;
+  trans->ops = ops;
+  trans->num_rx_queues = 1;
 
-    WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
+  WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
 
-    return trans;
+  return trans;
 }
 
-void iwl_trans_free(struct iwl_trans* trans) {
-    free(trans);
-}
+void iwl_trans_free(struct iwl_trans* trans) { free(trans); }
 
 zx_status_t iwl_trans_send_cmd(struct iwl_trans* trans, struct iwl_host_cmd* cmd) {
-    zx_status_t ret;
+  zx_status_t ret;
 
-    if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
-                 test_bit(STATUS_RFKILL_OPMODE, &trans->status))) {
-        return ZX_ERR_BAD_STATE;
-    }
+  if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+               test_bit(STATUS_RFKILL_OPMODE, &trans->status))) {
+    return ZX_ERR_BAD_STATE;
+  }
 
-    if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) { return ZX_ERR_IO; }
+  if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) {
+    return ZX_ERR_IO;
+  }
 
-    if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
-        IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
-        return ZX_ERR_IO;
-    }
+  if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
+    IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+    return ZX_ERR_IO;
+  }
 
-    if (WARN_ON((cmd->flags & CMD_WANT_ASYNC_CALLBACK) && !(cmd->flags & CMD_ASYNC))) {
-        return ZX_ERR_IO_INVALID;
-    }
+  if (WARN_ON((cmd->flags & CMD_WANT_ASYNC_CALLBACK) && !(cmd->flags & CMD_ASYNC))) {
+    return ZX_ERR_IO_INVALID;
+  }
 
-    if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) { cmd->id = DEF_ID(cmd->id); }
+  if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) {
+    cmd->id = DEF_ID(cmd->id);
+  }
 
-    ret = trans->ops->send_cmd(trans, cmd);
+  ret = trans->ops->send_cmd(trans, cmd);
 
-    if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt)) { return ZX_ERR_IO; }
+  if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt)) {
+    return ZX_ERR_IO;
+  }
 
-    return ret;
+  return ret;
 }
 
 #if 0   // NEEDS_PORTING
@@ -149,9 +154,13 @@
 #endif  // NEEDS_PORTING
 
 void iwl_trans_ref(struct iwl_trans* trans) {
-    if (trans->ops->ref) { trans->ops->ref(trans); }
+  if (trans->ops->ref) {
+    trans->ops->ref(trans);
+  }
 }
 
 void iwl_trans_unref(struct iwl_trans* trans) {
-    if (trans->ops->unref) { trans->ops->unref(trans); }
+  if (trans->ops->unref) {
+    trans->ops->unref(trans);
+  }
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h
index 97d9373..dad5d0a 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h
@@ -94,34 +94,34 @@
 #define FH_RSCSR_RXQ_MASK 0x3F0000
 
 struct iwl_rx_packet {
-    /*
-     * The first 4 bytes of the RX frame header contain both the RX frame
-     * size and some flags.
-     * Bit fields:
-     * 31:    flag flush RB request
-     * 30:    flag ignore TC (terminal counter) request
-     * 29:    flag fast IRQ request
-     * 28-27: Reserved
-     * 26:    RADA enabled
-     * 25:    Offload enabled
-     * 24:    RPF enabled
-     * 23:    RSS enabled
-     * 22:    Checksum enabled
-     * 21-16: RX queue
-     * 15-14: Reserved
-     * 13-00: RX frame size
-     */
-    __le32 len_n_flags;
-    struct iwl_cmd_header hdr;
-    uint8_t data[];
+  /*
+   * The first 4 bytes of the RX frame header contain both the RX frame
+   * size and some flags.
+   * Bit fields:
+   * 31:    flag flush RB request
+   * 30:    flag ignore TC (terminal counter) request
+   * 29:    flag fast IRQ request
+   * 28-27: Reserved
+   * 26:    RADA enabled
+   * 25:    Offload enabled
+   * 24:    RPF enabled
+   * 23:    RSS enabled
+   * 22:    Checksum enabled
+   * 21-16: RX queue
+   * 15-14: Reserved
+   * 13-00: RX frame size
+   */
+  __le32 len_n_flags;
+  struct iwl_cmd_header hdr;
+  uint8_t data[];
 } __packed;
 
 static inline uint32_t iwl_rx_packet_len(const struct iwl_rx_packet* pkt) {
-    return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+  return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 }
 
 static inline uint32_t iwl_rx_packet_payload_len(const struct iwl_rx_packet* pkt) {
-    return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
+  return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
 }
 
 /**
@@ -141,14 +141,14 @@
  *  called after this command completes. Valid only with CMD_ASYNC.
  */
 enum CMD_MODE {
-    CMD_ASYNC = BIT(0),
-    CMD_WANT_SKB = BIT(1),
-    CMD_SEND_IN_RFKILL = BIT(2),
-    CMD_HIGH_PRIO = BIT(3),
-    CMD_SEND_IN_IDLE = BIT(4),
-    CMD_MAKE_TRANS_IDLE = BIT(5),
-    CMD_WAKE_UP_TRANS = BIT(6),
-    CMD_WANT_ASYNC_CALLBACK = BIT(7),
+  CMD_ASYNC = BIT(0),
+  CMD_WANT_SKB = BIT(1),
+  CMD_SEND_IN_RFKILL = BIT(2),
+  CMD_HIGH_PRIO = BIT(3),
+  CMD_SEND_IN_IDLE = BIT(4),
+  CMD_MAKE_TRANS_IDLE = BIT(5),
+  CMD_WAKE_UP_TRANS = BIT(6),
+  CMD_WANT_ASYNC_CALLBACK = BIT(7),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -161,17 +161,17 @@
  * aren't fully copied and use other TFD space.
  */
 struct iwl_device_cmd {
-    union {
-        struct {
-            struct iwl_cmd_header hdr; /* uCode API */
-            uint8_t payload[DEF_CMD_PAYLOAD_SIZE];
-        };
-        struct {
-            struct iwl_cmd_header_wide hdr_wide;
-            uint8_t payload_wide[DEF_CMD_PAYLOAD_SIZE - sizeof(struct iwl_cmd_header_wide) +
-                                 sizeof(struct iwl_cmd_header)];
-        };
+  union {
+    struct {
+      struct iwl_cmd_header hdr; /* uCode API */
+      uint8_t payload[DEF_CMD_PAYLOAD_SIZE];
     };
+    struct {
+      struct iwl_cmd_header_wide hdr_wide;
+      uint8_t payload_wide[DEF_CMD_PAYLOAD_SIZE - sizeof(struct iwl_cmd_header_wide) +
+                           sizeof(struct iwl_cmd_header)];
+    };
+  };
 } __packed;
 
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
@@ -198,8 +198,8 @@
  *  Note that a TFD entry after a DUP one cannot be a normal copied one.
  */
 enum iwl_hcmd_dataflag {
-    IWL_HCMD_DFL_NOCOPY = BIT(0),
-    IWL_HCMD_DFL_DUP = BIT(1),
+  IWL_HCMD_DFL_NOCOPY = BIT(0),
+  IWL_HCMD_DFL_DUP = BIT(1),
 };
 
 /**
@@ -216,47 +216,45 @@
  *  version and group as well
  */
 struct iwl_host_cmd {
-    const void* data[IWL_MAX_CMD_TBS_PER_TFD];
-    struct iwl_rx_packet* resp_pkt;
-    unsigned long _rx_page_addr;
-    uint32_t _rx_page_order;
+  const void* data[IWL_MAX_CMD_TBS_PER_TFD];
+  struct iwl_rx_packet* resp_pkt;
+  unsigned long _rx_page_addr;
+  uint32_t _rx_page_order;
 
-    uint32_t flags;
-    uint32_t id;
-    uint16_t len[IWL_MAX_CMD_TBS_PER_TFD];
-    uint8_t dataflags[IWL_MAX_CMD_TBS_PER_TFD];
+  uint32_t flags;
+  uint32_t id;
+  uint16_t len[IWL_MAX_CMD_TBS_PER_TFD];
+  uint8_t dataflags[IWL_MAX_CMD_TBS_PER_TFD];
 };
 
 static inline void iwl_free_resp(struct iwl_host_cmd* cmd) {
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 #if 0   // NEEDS_PORTING
     free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
 #endif  // NEEDS_PORTING
 }
 
 struct iwl_rx_cmd_buffer {
-    struct page* _page;
-    int _offset;
-    bool _page_stolen;
-    uint32_t _rx_page_order;
-    unsigned int truesize;
-    uint8_t status;
+  struct page* _page;
+  int _offset;
+  bool _page_stolen;
+  uint32_t _rx_page_order;
+  unsigned int truesize;
+  uint8_t status;
 };
 
 static inline void* rxb_addr(struct iwl_rx_cmd_buffer* r) {
-    return (void*)((unsigned long)page_address(r->_page) + r->_offset);
+  return (void*)((unsigned long)page_address(r->_page) + r->_offset);
 }
 
-static inline int rxb_offset(struct iwl_rx_cmd_buffer* r) {
-    return r->_offset;
-}
+static inline int rxb_offset(struct iwl_rx_cmd_buffer* r) { return r->_offset; }
 
 static inline struct page* rxb_steal_page(struct iwl_rx_cmd_buffer* r) {
-    r->_page_stolen = true;
+  r->_page_stolen = true;
 #if 0   // NEEDS_PORTING
     get_page(r->_page);
 #endif  // NEEDS_PORTING
-    return r->_page;
+  return r->_page;
 }
 
 #if 0   // NEEDS_PORTING
@@ -287,8 +285,8 @@
  * @IWL_D3_STATUS_RESET: device was reset while suspended
  */
 enum iwl_d3_status {
-    IWL_D3_STATUS_ALIVE,
-    IWL_D3_STATUS_RESET,
+  IWL_D3_STATUS_ALIVE,
+  IWL_D3_STATUS_RESET,
 };
 
 /**
@@ -307,17 +305,17 @@
  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
  */
 enum iwl_trans_status {
-    STATUS_SYNC_HCMD_ACTIVE,
-    STATUS_DEVICE_ENABLED,
-    STATUS_TPOWER_PMI,
-    STATUS_INT_ENABLED,
-    STATUS_RFKILL_HW,
-    STATUS_RFKILL_OPMODE,
-    STATUS_FW_ERROR,
-    STATUS_TRANS_GOING_IDLE,
-    STATUS_TRANS_IDLE,
-    STATUS_TA_ACTIVE,
-    STATUS_TRANS_DEAD,
+  STATUS_SYNC_HCMD_ACTIVE,
+  STATUS_DEVICE_ENABLED,
+  STATUS_TPOWER_PMI,
+  STATUS_INT_ENABLED,
+  STATUS_RFKILL_HW,
+  STATUS_RFKILL_OPMODE,
+  STATUS_FW_ERROR,
+  STATUS_TRANS_GOING_IDLE,
+  STATUS_TRANS_IDLE,
+  STATUS_TA_ACTIVE,
+  STATUS_TRANS_DEAD,
 };
 
 #if 0   // NEEDS_PORTING
@@ -340,20 +338,20 @@
 #endif  // NEEDS_PORTING
 
 struct iwl_hcmd_names {
-    uint8_t cmd_id;
-    const char* const cmd_name;
+  uint8_t cmd_id;
+  const char* const cmd_name;
 };
 
 #define HCMD_NAME(x) \
-    { .cmd_id = x, .cmd_name = #x }
+  { .cmd_id = x, .cmd_name = #x }
 
 struct iwl_hcmd_arr {
-    const struct iwl_hcmd_names* arr;
-    int size;
+  const struct iwl_hcmd_names* arr;
+  int size;
 };
 
 #define HCMD_ARR(x) \
-    { .arr = x, .size = ARRAY_SIZE(x) }
+  { .arr = x, .size = ARRAY_SIZE(x) }
 
 /**
  * struct iwl_trans_config - transport configuration
@@ -381,37 +379,37 @@
  *  space for at least two pointers
  */
 struct iwl_trans_config {
-    struct iwl_op_mode* op_mode;
+  struct iwl_op_mode* op_mode;
 
-    uint8_t cmd_queue;
-    uint8_t cmd_fifo;
-    unsigned int cmd_q_wdg_timeout;
-    const uint8_t* no_reclaim_cmds;
-    unsigned int n_no_reclaim_cmds;
+  uint8_t cmd_queue;
+  uint8_t cmd_fifo;
+  unsigned int cmd_q_wdg_timeout;
+  const uint8_t* no_reclaim_cmds;
+  unsigned int n_no_reclaim_cmds;
 
-    enum iwl_amsdu_size rx_buf_size;
-    bool bc_table_dword;
-    bool scd_set_active;
-    bool sw_csum_tx;
-    const struct iwl_hcmd_arr* command_groups;
-    int command_groups_size;
+  enum iwl_amsdu_size rx_buf_size;
+  bool bc_table_dword;
+  bool scd_set_active;
+  bool sw_csum_tx;
+  const struct iwl_hcmd_arr* command_groups;
+  int command_groups_size;
 
-    uint8_t cb_data_offs;
+  uint8_t cb_data_offs;
 };
 
 struct iwl_trans_dump_data {
-    uint32_t len;
-    uint8_t data[];
+  uint32_t len;
+  uint8_t data[];
 };
 
 struct iwl_trans;
 
 struct iwl_trans_txq_scd_cfg {
-    uint8_t fifo;
-    uint8_t sta_id;
-    uint8_t tid;
-    bool aggregate;
-    int frame_limit;
+  uint8_t fifo;
+  uint8_t sta_id;
+  uint8_t tid;
+  bool aggregate;
+  int frame_limit;
 };
 
 /**
@@ -422,10 +420,10 @@
  * @ur_bd_cb: DMA address of used BD cyclic buffer
  */
 struct iwl_trans_rxq_dma_data {
-    uint64_t fr_bd_cb;
-    uint32_t fr_bd_wid;
-    uint64_t urbd_stts_wrptr;
-    uint64_t ur_bd_cb;
+  uint64_t fr_bd_cb;
+  uint32_t fr_bd_wid;
+  uint64_t urbd_stts_wrptr;
+  uint64_t ur_bd_cb;
 };
 
 /**
@@ -517,62 +515,62 @@
  *  of the trans debugfs
  */
 struct iwl_trans_ops {
-    int (*start_hw)(struct iwl_trans* iwl_trans, bool low_power);
-    void (*op_mode_leave)(struct iwl_trans* iwl_trans);
+  int (*start_hw)(struct iwl_trans* iwl_trans, bool low_power);
+  void (*op_mode_leave)(struct iwl_trans* iwl_trans);
 #if IS_ENABLED(CPTCFG_IWLXVT)
-    int (*start_fw_dbg)(struct iwl_trans* trans, const struct fw_img* fw, bool run_in_rfkill,
-                        uint32_t fw_dbg_flags);
-    int (*test_mode_cmd)(struct iwl_trans* trans, bool enable);
+  int (*start_fw_dbg)(struct iwl_trans* trans, const struct fw_img* fw, bool run_in_rfkill,
+                      uint32_t fw_dbg_flags);
+  int (*test_mode_cmd)(struct iwl_trans* trans, bool enable);
 #endif
-    int (*start_fw)(struct iwl_trans* trans, const struct fw_img* fw, bool run_in_rfkill);
-    void (*fw_alive)(struct iwl_trans* trans, uint32_t scd_addr);
-    void (*stop_device)(struct iwl_trans* trans, bool low_power);
+  int (*start_fw)(struct iwl_trans* trans, const struct fw_img* fw, bool run_in_rfkill);
+  void (*fw_alive)(struct iwl_trans* trans, uint32_t scd_addr);
+  void (*stop_device)(struct iwl_trans* trans, bool low_power);
 
-    void (*d3_suspend)(struct iwl_trans* trans, bool test, bool reset);
-    int (*d3_resume)(struct iwl_trans* trans, enum iwl_d3_status* status, bool test, bool reset);
+  void (*d3_suspend)(struct iwl_trans* trans, bool test, bool reset);
+  int (*d3_resume)(struct iwl_trans* trans, enum iwl_d3_status* status, bool test, bool reset);
 
-    int (*send_cmd)(struct iwl_trans* trans, struct iwl_host_cmd* cmd);
+  int (*send_cmd)(struct iwl_trans* trans, struct iwl_host_cmd* cmd);
 
-    int (*tx)(struct iwl_trans* trans, struct sk_buff* skb, struct iwl_device_cmd* dev_cmd,
-              int queue);
-    void (*reclaim)(struct iwl_trans* trans, int queue, int ssn, struct sk_buff_head* skbs);
+  int (*tx)(struct iwl_trans* trans, struct sk_buff* skb, struct iwl_device_cmd* dev_cmd,
+            int queue);
+  void (*reclaim)(struct iwl_trans* trans, int queue, int ssn, struct sk_buff_head* skbs);
 
-    bool (*txq_enable)(struct iwl_trans* trans, int queue, uint16_t ssn,
-                       const struct iwl_trans_txq_scd_cfg* cfg, unsigned int queue_wdg_timeout);
-    void (*txq_disable)(struct iwl_trans* trans, int queue, bool configure_scd);
-    /* 22000 functions */
-    int (*txq_alloc)(struct iwl_trans* trans, __le16 flags, uint8_t sta_id, uint8_t tid, int cmd_id,
-                     int size, unsigned int queue_wdg_timeout);
-    void (*txq_free)(struct iwl_trans* trans, int queue);
-    int (*rxq_dma_data)(struct iwl_trans* trans, int queue, struct iwl_trans_rxq_dma_data* data);
+  bool (*txq_enable)(struct iwl_trans* trans, int queue, uint16_t ssn,
+                     const struct iwl_trans_txq_scd_cfg* cfg, unsigned int queue_wdg_timeout);
+  void (*txq_disable)(struct iwl_trans* trans, int queue, bool configure_scd);
+  /* 22000 functions */
+  int (*txq_alloc)(struct iwl_trans* trans, __le16 flags, uint8_t sta_id, uint8_t tid, int cmd_id,
+                   int size, unsigned int queue_wdg_timeout);
+  void (*txq_free)(struct iwl_trans* trans, int queue);
+  int (*rxq_dma_data)(struct iwl_trans* trans, int queue, struct iwl_trans_rxq_dma_data* data);
 
-    void (*txq_set_shared_mode)(struct iwl_trans* trans, uint32_t txq_id, bool shared);
+  void (*txq_set_shared_mode)(struct iwl_trans* trans, uint32_t txq_id, bool shared);
 
-    int (*wait_tx_queues_empty)(struct iwl_trans* trans, uint32_t txq_bm);
-    int (*wait_txq_empty)(struct iwl_trans* trans, int queue);
-    void (*freeze_txq_timer)(struct iwl_trans* trans, unsigned long txqs, bool freeze);
-    void (*block_txq_ptrs)(struct iwl_trans* trans, bool block);
+  int (*wait_tx_queues_empty)(struct iwl_trans* trans, uint32_t txq_bm);
+  int (*wait_txq_empty)(struct iwl_trans* trans, int queue);
+  void (*freeze_txq_timer)(struct iwl_trans* trans, unsigned long txqs, bool freeze);
+  void (*block_txq_ptrs)(struct iwl_trans* trans, bool block);
 
-    void (*write8)(struct iwl_trans* trans, uint32_t ofs, uint8_t val);
-    void (*write32)(struct iwl_trans* trans, uint32_t ofs, uint32_t val);
-    uint32_t (*read32)(struct iwl_trans* trans, uint32_t ofs);
-    uint32_t (*read_prph)(struct iwl_trans* trans, uint32_t ofs);
-    void (*write_prph)(struct iwl_trans* trans, uint32_t ofs, uint32_t val);
-    int (*read_mem)(struct iwl_trans* trans, uint32_t addr, void* buf, int dwords);
-    int (*write_mem)(struct iwl_trans* trans, uint32_t addr, const void* buf, int dwords);
-    void (*configure)(struct iwl_trans* trans, const struct iwl_trans_config* trans_cfg);
-    void (*set_pmi)(struct iwl_trans* trans, bool state);
-    void (*sw_reset)(struct iwl_trans* trans);
-    bool (*grab_nic_access)(struct iwl_trans* trans, unsigned long* flags);
-    void (*release_nic_access)(struct iwl_trans* trans, unsigned long* flags);
-    void (*set_bits_mask)(struct iwl_trans* trans, uint32_t reg, uint32_t mask, uint32_t value);
-    void (*ref)(struct iwl_trans* trans);
-    void (*unref)(struct iwl_trans* trans);
-    int (*suspend)(struct iwl_trans* trans);
-    void (*resume)(struct iwl_trans* trans);
+  void (*write8)(struct iwl_trans* trans, uint32_t ofs, uint8_t val);
+  void (*write32)(struct iwl_trans* trans, uint32_t ofs, uint32_t val);
+  uint32_t (*read32)(struct iwl_trans* trans, uint32_t ofs);
+  uint32_t (*read_prph)(struct iwl_trans* trans, uint32_t ofs);
+  void (*write_prph)(struct iwl_trans* trans, uint32_t ofs, uint32_t val);
+  int (*read_mem)(struct iwl_trans* trans, uint32_t addr, void* buf, int dwords);
+  int (*write_mem)(struct iwl_trans* trans, uint32_t addr, const void* buf, int dwords);
+  void (*configure)(struct iwl_trans* trans, const struct iwl_trans_config* trans_cfg);
+  void (*set_pmi)(struct iwl_trans* trans, bool state);
+  void (*sw_reset)(struct iwl_trans* trans);
+  bool (*grab_nic_access)(struct iwl_trans* trans, unsigned long* flags);
+  void (*release_nic_access)(struct iwl_trans* trans, unsigned long* flags);
+  void (*set_bits_mask)(struct iwl_trans* trans, uint32_t reg, uint32_t mask, uint32_t value);
+  void (*ref)(struct iwl_trans* trans);
+  void (*unref)(struct iwl_trans* trans);
+  int (*suspend)(struct iwl_trans* trans);
+  void (*resume)(struct iwl_trans* trans);
 
-    struct iwl_trans_dump_data* (*dump_data)(struct iwl_trans* trans, uint32_t dump_mask);
-    void (*debugfs_cleanup)(struct iwl_trans* trans);
+  struct iwl_trans_dump_data* (*dump_data)(struct iwl_trans* trans, uint32_t dump_mask);
+  void (*debugfs_cleanup)(struct iwl_trans* trans);
 };
 
 /**
@@ -582,8 +580,8 @@
  * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
  */
 enum iwl_trans_state {
-    IWL_TRANS_NO_FW = 0,
-    IWL_TRANS_FW_ALIVE = 1,
+  IWL_TRANS_NO_FW = 0,
+  IWL_TRANS_FW_ALIVE = 1,
 };
 
 /**
@@ -639,9 +637,9 @@
  * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode.
  */
 enum iwl_plat_pm_mode {
-    IWL_PLAT_PM_MODE_DISABLED,
-    IWL_PLAT_PM_MODE_D3,
-    IWL_PLAT_PM_MODE_D0I3,
+  IWL_PLAT_PM_MODE_DISABLED,
+  IWL_PLAT_PM_MODE_D3,
+  IWL_PLAT_PM_MODE_D0I3,
 };
 
 /* Max time to wait for trans to become idle/non-idle on d0i3
@@ -656,9 +654,9 @@
  * @size: size of the block/page
  */
 struct iwl_dram_data {
-    dma_addr_t physical;
-    void* block;
-    int size;
+  dma_addr_t physical;
+  void* block;
+  int size;
 };
 
 /**
@@ -705,71 +703,71 @@
  * @dbg_rec_on: true iff there is a fw debug recording currently active
  */
 struct iwl_trans {
-    const struct iwl_trans_ops* ops;
-    struct iwl_op_mode* op_mode;
-    const struct iwl_cfg* cfg;
-    struct iwl_drv* drv;
-    struct iwl_tm_gnl_dev* tmdev;
-    enum iwl_trans_state state;
-    unsigned long status;
+  const struct iwl_trans_ops* ops;
+  struct iwl_op_mode* op_mode;
+  const struct iwl_cfg* cfg;
+  struct iwl_drv* drv;
+  struct iwl_tm_gnl_dev* tmdev;
+  enum iwl_trans_state state;
+  unsigned long status;
 
-    zx_device_t* zxdev;
-    struct device* dev;
-    uint32_t max_skb_frags;
-    uint32_t hw_rev;
-    uint32_t hw_rf_id;
-    uint32_t hw_id;
-    char hw_id_str[52];
+  zx_device_t* zxdev;
+  struct device* dev;
+  uint32_t max_skb_frags;
+  uint32_t hw_rev;
+  uint32_t hw_rf_id;
+  uint32_t hw_id;
+  char hw_id_str[52];
 
-    uint8_t rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
+  uint8_t rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
 
-    bool pm_support;
-    bool ltr_enabled;
+  bool pm_support;
+  bool ltr_enabled;
 
-    const struct iwl_hcmd_arr* command_groups;
-    int command_groups_size;
-    bool wide_cmd_header;
+  const struct iwl_hcmd_arr* command_groups;
+  int command_groups_size;
+  bool wide_cmd_header;
 
-    uint8_t num_rx_queues;
+  uint8_t num_rx_queues;
 
-    size_t iml_len;
-    uint8_t* iml;
+  size_t iml_len;
+  uint8_t* iml;
 
-    /* The following fields are internal only */
-    struct dentry* dbgfs_dir;
+  /* The following fields are internal only */
+  struct dentry* dbgfs_dir;
 
 #ifdef CONFIG_LOCKDEP
-    struct lockdep_map sync_cmd_lockdep_map;
+  struct lockdep_map sync_cmd_lockdep_map;
 #endif
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    struct iwl_dbg_cfg dbg_cfg;
+  struct iwl_dbg_cfg dbg_cfg;
 #endif
-    struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM];
-    struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM];
+  struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM];
+  struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM];
 
-    bool external_ini_loaded;
-    bool ini_valid;
+  bool external_ini_loaded;
+  bool ini_valid;
 
-    const struct iwl_fw_dbg_dest_tlv_v1* dbg_dest_tlv;
-    const struct iwl_fw_dbg_conf_tlv* dbg_conf_tlv[FW_DBG_CONF_MAX];
-    struct iwl_fw_dbg_trigger_tlv* const* dbg_trigger_tlv;
-    uint8_t dbg_n_dest_reg;
-    int num_blocks;
-    struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM];
+  const struct iwl_fw_dbg_dest_tlv_v1* dbg_dest_tlv;
+  const struct iwl_fw_dbg_conf_tlv* dbg_conf_tlv[FW_DBG_CONF_MAX];
+  struct iwl_fw_dbg_trigger_tlv* const* dbg_trigger_tlv;
+  uint8_t dbg_n_dest_reg;
+  int num_blocks;
+  struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM];
 
-    enum iwl_plat_pm_mode system_pm_mode;
-    enum iwl_plat_pm_mode runtime_pm_mode;
-    bool suspending;
-    bool dbg_rec_on;
+  enum iwl_plat_pm_mode system_pm_mode;
+  enum iwl_plat_pm_mode runtime_pm_mode;
+  bool suspending;
+  bool dbg_rec_on;
 
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
-    struct iwl_testmode testmode;
+  struct iwl_testmode testmode;
 #endif
 
-    /* pointer to trans specific struct */
-    /*Ensure that this pointer will always be aligned to sizeof pointer */
-    char trans_specific[] __aligned(sizeof(void*));
+  /* pointer to trans specific struct */
+  /*Ensure that this pointer will always be aligned to sizeof pointer */
+  char trans_specific[] __aligned(sizeof(void*));
 };
 
 const char* iwl_get_cmd_string(struct iwl_trans* trans, uint32_t id);
@@ -777,110 +775,124 @@
 
 static inline void iwl_trans_configure(struct iwl_trans* trans,
                                        const struct iwl_trans_config* trans_cfg) {
-    trans->op_mode = trans_cfg->op_mode;
+  trans->op_mode = trans_cfg->op_mode;
 
-    trans->ops->configure(trans, trans_cfg);
-    WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
+  trans->ops->configure(trans, trans_cfg);
+  WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
 }
 
 static inline int _iwl_trans_start_hw(struct iwl_trans* trans, bool low_power) {
-    might_sleep();
+  might_sleep();
 
-    return trans->ops->start_hw(trans, low_power);
+  return trans->ops->start_hw(trans, low_power);
 }
 
 static inline int iwl_trans_start_hw(struct iwl_trans* trans) {
-    return trans->ops->start_hw(trans, true);
+  return trans->ops->start_hw(trans, true);
 }
 
 static inline void iwl_trans_op_mode_leave(struct iwl_trans* trans) {
-    might_sleep();
+  might_sleep();
 
-    if (trans->ops->op_mode_leave) { trans->ops->op_mode_leave(trans); }
+  if (trans->ops->op_mode_leave) {
+    trans->ops->op_mode_leave(trans);
+  }
 
-    trans->op_mode = NULL;
+  trans->op_mode = NULL;
 
-    trans->state = IWL_TRANS_NO_FW;
+  trans->state = IWL_TRANS_NO_FW;
 }
 
 static inline void iwl_trans_fw_alive(struct iwl_trans* trans, uint32_t scd_addr) {
-    might_sleep();
+  might_sleep();
 
-    trans->state = IWL_TRANS_FW_ALIVE;
+  trans->state = IWL_TRANS_FW_ALIVE;
 
-    trans->ops->fw_alive(trans, scd_addr);
+  trans->ops->fw_alive(trans, scd_addr);
 }
 
 static inline int iwl_trans_start_fw(struct iwl_trans* trans, const struct fw_img* fw,
                                      bool run_in_rfkill) {
-    might_sleep();
+  might_sleep();
 
-    WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+  WARN_ON_ONCE(!trans->rx_mpdu_cmd);
 
-    clear_bit(STATUS_FW_ERROR, &trans->status);
-    return trans->ops->start_fw(trans, fw, run_in_rfkill);
+  clear_bit(STATUS_FW_ERROR, &trans->status);
+  return trans->ops->start_fw(trans, fw, run_in_rfkill);
 }
 
 #if IS_ENABLED(CPTCFG_IWLXVT)
 enum iwl_xvt_dbg_flags {
-    IWL_XVT_DBG_ADC_SAMP_TEST = BIT(0),
-    IWL_XVT_DBG_ADC_SAMP_SYNC_RX = BIT(1),
+  IWL_XVT_DBG_ADC_SAMP_TEST = BIT(0),
+  IWL_XVT_DBG_ADC_SAMP_SYNC_RX = BIT(1),
 };
 
 static inline int iwl_trans_start_fw_dbg(struct iwl_trans* trans, const struct fw_img* fw,
                                          bool run_in_rfkill, uint32_t dbg_flags) {
-    might_sleep();
+  might_sleep();
 
-    if (WARN_ON_ONCE(!trans->ops->start_fw_dbg && dbg_flags)) { return -ENOTSUPP; }
+  if (WARN_ON_ONCE(!trans->ops->start_fw_dbg && dbg_flags)) {
+    return -ENOTSUPP;
+  }
 
-    clear_bit(STATUS_FW_ERROR, &trans->status);
-    if (trans->ops->start_fw_dbg) {
-        return trans->ops->start_fw_dbg(trans, fw, run_in_rfkill, dbg_flags);
-    }
+  clear_bit(STATUS_FW_ERROR, &trans->status);
+  if (trans->ops->start_fw_dbg) {
+    return trans->ops->start_fw_dbg(trans, fw, run_in_rfkill, dbg_flags);
+  }
 
-    return trans->ops->start_fw(trans, fw, run_in_rfkill);
+  return trans->ops->start_fw(trans, fw, run_in_rfkill);
 }
 #endif
 
 static inline void _iwl_trans_stop_device(struct iwl_trans* trans, bool low_power) {
-    might_sleep();
+  might_sleep();
 
-    trans->ops->stop_device(trans, low_power);
+  trans->ops->stop_device(trans, low_power);
 
-    trans->state = IWL_TRANS_NO_FW;
+  trans->state = IWL_TRANS_NO_FW;
 }
 
 static inline void iwl_trans_stop_device(struct iwl_trans* trans) {
-    _iwl_trans_stop_device(trans, true);
+  _iwl_trans_stop_device(trans, true);
 }
 
 static inline void iwl_trans_d3_suspend(struct iwl_trans* trans, bool test, bool reset) {
-    might_sleep();
-    if (trans->ops->d3_suspend) { trans->ops->d3_suspend(trans, test, reset); }
+  might_sleep();
+  if (trans->ops->d3_suspend) {
+    trans->ops->d3_suspend(trans, test, reset);
+  }
 }
 
 static inline int iwl_trans_d3_resume(struct iwl_trans* trans, enum iwl_d3_status* status,
                                       bool test, bool reset) {
-    might_sleep();
-    if (!trans->ops->d3_resume) { return 0; }
+  might_sleep();
+  if (!trans->ops->d3_resume) {
+    return 0;
+  }
 
-    return trans->ops->d3_resume(trans, status, test, reset);
+  return trans->ops->d3_resume(trans, status, test, reset);
 }
 
 static inline int iwl_trans_suspend(struct iwl_trans* trans) {
-    if (!trans->ops->suspend) { return 0; }
+  if (!trans->ops->suspend) {
+    return 0;
+  }
 
-    return trans->ops->suspend(trans);
+  return trans->ops->suspend(trans);
 }
 
 static inline void iwl_trans_resume(struct iwl_trans* trans) {
-    if (trans->ops->resume) { trans->ops->resume(trans); }
+  if (trans->ops->resume) {
+    trans->ops->resume(trans);
+  }
 }
 
 static inline struct iwl_trans_dump_data* iwl_trans_dump_data(struct iwl_trans* trans,
                                                               uint32_t dump_mask) {
-    if (!trans->ops->dump_data) { return NULL; }
-    return trans->ops->dump_data(trans, dump_mask);
+  if (!trans->ops->dump_data) {
+    return NULL;
+  }
+  return trans->ops->dump_data(trans, dump_mask);
 }
 
 #if 0   // NEEDS_PORTING
@@ -1035,12 +1047,14 @@
 #endif  // NEEDS_PORTING
 
 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans* trans, bool block) {
-    if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
-        IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
-        return;
-    }
+  if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+    IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+    return;
+  }
 
-    if (trans->ops->block_txq_ptrs) { trans->ops->block_txq_ptrs(trans, block); }
+  if (trans->ops->block_txq_ptrs) {
+    trans->ops->block_txq_ptrs(trans, block);
+  }
 }
 
 #if 0  // NEEDS_PORTING
@@ -1079,40 +1093,40 @@
     return -ENOTSUPP;
 }
 #endif
-#endif   // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 static inline void iwl_trans_write8(struct iwl_trans* trans, uint32_t ofs, uint8_t val) {
-    trans->ops->write8(trans, ofs, val);
+  trans->ops->write8(trans, ofs, val);
 }
 
 static inline void iwl_trans_write32(struct iwl_trans* trans, uint32_t ofs, uint32_t val) {
-    trans->ops->write32(trans, ofs, val);
+  trans->ops->write32(trans, ofs, val);
 }
 
 static inline uint32_t iwl_trans_read32(struct iwl_trans* trans, uint32_t ofs) {
-    return trans->ops->read32(trans, ofs);
+  return trans->ops->read32(trans, ofs);
 }
 
 static inline uint32_t iwl_trans_read_prph(struct iwl_trans* trans, uint32_t ofs) {
-    return trans->ops->read_prph(trans, ofs);
+  return trans->ops->read_prph(trans, ofs);
 }
 
-static inline void iwl_trans_write_prph(struct iwl_trans* trans, uint32_t ofs,
-                                        uint32_t val) {
-    return trans->ops->write_prph(trans, ofs, val);
+static inline void iwl_trans_write_prph(struct iwl_trans* trans, uint32_t ofs, uint32_t val) {
+  return trans->ops->write_prph(trans, ofs, val);
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static inline int iwl_trans_read_mem(struct iwl_trans* trans, uint32_t addr,
                                      void* buf, int dwords) {
     return trans->ops->read_mem(trans, addr, buf, dwords);
 }
 
-#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)                            \
-    do {                                                                               \
-        if (__builtin_constant_p(bufsize)) BUILD_BUG_ON((bufsize) % sizeof(uint32_t)); \
-        iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(uint32_t));            \
-    } while (0)
+#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)             \
+  do {                                                                  \
+    if (__builtin_constant_p(bufsize))                                  \
+      BUILD_BUG_ON((bufsize) % sizeof(uint32_t));                       \
+    iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(uint32_t)); \
+  } while (0)
 
 static inline uint32_t iwl_trans_read_mem32(struct iwl_trans* trans, uint32_t addr) {
     uint32_t value;
@@ -1149,14 +1163,13 @@
 
 static inline void iwl_trans_set_bits_mask(struct iwl_trans* trans, uint32_t reg, uint32_t mask,
                                            uint32_t value) {
-    trans->ops->set_bits_mask(trans, reg, mask, value);
+  trans->ops->set_bits_mask(trans, reg, mask, value);
 }
 
-#define iwl_trans_grab_nic_access(trans, flags) \
-    ((trans)->ops->grab_nic_access(trans, flags))
+#define iwl_trans_grab_nic_access(trans, flags) ((trans)->ops->grab_nic_access(trans, flags))
 
 static inline void iwl_trans_release_nic_access(struct iwl_trans* trans, unsigned long* flags) {
-    trans->ops->release_nic_access(trans, flags);
+  trans->ops->release_nic_access(trans, flags);
 }
 
 #if 0   // NEEDS_PORTING
@@ -1173,7 +1186,7 @@
 #endif  // NEEDS_PORTING
 
 static inline bool iwl_trans_fw_running(struct iwl_trans* trans) {
-    return trans->state == IWL_TRANS_FW_ALIVE;
+  return trans->state == IWL_TRANS_FW_ALIVE;
 }
 
 /*****************************************************
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-vendor-cmd.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-vendor-cmd.h
index dcc160c..5d49e99 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-vendor-cmd.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-vendor-cmd.h
@@ -124,42 +124,42 @@
  */
 
 enum iwl_mvm_vendor_cmd {
-    IWL_MVM_VENDOR_CMD_SET_LOW_LATENCY = 0x00,
-    IWL_MVM_VENDOR_CMD_GET_LOW_LATENCY = 0x01,
-    IWL_MVM_VENDOR_CMD_TCM_EVENT = 0x02,
-    IWL_MVM_VENDOR_CMD_LTE_STATE = 0x03,
-    IWL_MVM_VENDOR_CMD_LTE_COEX_CONFIG_INFO = 0x04,
-    IWL_MVM_VENDOR_CMD_LTE_COEX_DYNAMIC_INFO = 0x05,
-    IWL_MVM_VENDOR_CMD_LTE_COEX_SPS_INFO = 0x06,
-    IWL_MVM_VENDOR_CMD_LTE_COEX_WIFI_RPRTD_CHAN = 0x07,
-    IWL_MVM_VENDOR_CMD_SET_COUNTRY = 0x08,
-    IWL_MVM_VENDOR_CMD_PROXY_FRAME_FILTERING = 0x09,
-    IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_ADD = 0x0a,
-    IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_DEL = 0x0b,
-    IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_QUERY = 0x0c,
-    IWL_MVM_VENDOR_CMD_SET_NIC_TXPOWER_LIMIT = 0x0d,
-    IWL_MVM_VENDOR_CMD_OPPPS_WA = 0x0e,
-    IWL_MVM_VENDOR_CMD_GSCAN_GET_CAPABILITIES = 0x0f,
-    IWL_MVM_VENDOR_CMD_GSCAN_START = 0x10,
-    IWL_MVM_VENDOR_CMD_GSCAN_STOP = 0x11,
-    IWL_MVM_VENDOR_CMD_GSCAN_RESULTS_EVENT = 0x12,
-    IWL_MVM_VENDOR_CMD_GSCAN_SET_BSSID_HOTLIST = 0x13,
-    IWL_MVM_VENDOR_CMD_GSCAN_SET_SIGNIFICANT_CHANGE = 0x14,
-    IWL_MVM_VENDOR_CMD_GSCAN_HOTLIST_CHANGE_EVENT = 0x15,
-    IWL_MVM_VENDOR_CMD_GSCAN_SIGNIFICANT_CHANGE_EVENT = 0x16,
-    IWL_MVM_VENDOR_CMD_RXFILTER = 0x17,
-    IWL_MVM_VENDOR_CMD_GSCAN_BEACON_EVENT = 0x18,
-    IWL_MVM_VENDOR_CMD_DBG_COLLECT = 0x19,
-    IWL_MVM_VENDOR_CMD_NAN_FAW_CONF = 0x1a,
-    /* 0x1b is deprecated */
-    IWL_MVM_VENDOR_CMD_SET_SAR_PROFILE = 0x1c,
-    IWL_MVM_VENDOR_CMD_GET_SAR_PROFILE_INFO = 0x1d,
-    IWL_MVM_VENDOR_CMD_NEIGHBOR_REPORT_REQUEST = 0x1e,
-    IWL_MVM_VENDOR_CMD_NEIGHBOR_REPORT_RESPONSE = 0x1f,
-    IWL_MVM_VENDOR_CMD_GET_SAR_GEO_PROFILE = 0x20,
-    IWL_MVM_VENDOR_CMD_TEST_FIPS = 0x21,
-    IWL_MVM_VENDOR_CMD_FMAC_CONNECT_PARAMS = 0x22,
-    IWL_MVM_VENDOR_CMD_FMAC_CONFIG = 0x23,
+  IWL_MVM_VENDOR_CMD_SET_LOW_LATENCY = 0x00,
+  IWL_MVM_VENDOR_CMD_GET_LOW_LATENCY = 0x01,
+  IWL_MVM_VENDOR_CMD_TCM_EVENT = 0x02,
+  IWL_MVM_VENDOR_CMD_LTE_STATE = 0x03,
+  IWL_MVM_VENDOR_CMD_LTE_COEX_CONFIG_INFO = 0x04,
+  IWL_MVM_VENDOR_CMD_LTE_COEX_DYNAMIC_INFO = 0x05,
+  IWL_MVM_VENDOR_CMD_LTE_COEX_SPS_INFO = 0x06,
+  IWL_MVM_VENDOR_CMD_LTE_COEX_WIFI_RPRTD_CHAN = 0x07,
+  IWL_MVM_VENDOR_CMD_SET_COUNTRY = 0x08,
+  IWL_MVM_VENDOR_CMD_PROXY_FRAME_FILTERING = 0x09,
+  IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_ADD = 0x0a,
+  IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_DEL = 0x0b,
+  IWL_MVM_VENDOR_CMD_TDLS_PEER_CACHE_QUERY = 0x0c,
+  IWL_MVM_VENDOR_CMD_SET_NIC_TXPOWER_LIMIT = 0x0d,
+  IWL_MVM_VENDOR_CMD_OPPPS_WA = 0x0e,
+  IWL_MVM_VENDOR_CMD_GSCAN_GET_CAPABILITIES = 0x0f,
+  IWL_MVM_VENDOR_CMD_GSCAN_START = 0x10,
+  IWL_MVM_VENDOR_CMD_GSCAN_STOP = 0x11,
+  IWL_MVM_VENDOR_CMD_GSCAN_RESULTS_EVENT = 0x12,
+  IWL_MVM_VENDOR_CMD_GSCAN_SET_BSSID_HOTLIST = 0x13,
+  IWL_MVM_VENDOR_CMD_GSCAN_SET_SIGNIFICANT_CHANGE = 0x14,
+  IWL_MVM_VENDOR_CMD_GSCAN_HOTLIST_CHANGE_EVENT = 0x15,
+  IWL_MVM_VENDOR_CMD_GSCAN_SIGNIFICANT_CHANGE_EVENT = 0x16,
+  IWL_MVM_VENDOR_CMD_RXFILTER = 0x17,
+  IWL_MVM_VENDOR_CMD_GSCAN_BEACON_EVENT = 0x18,
+  IWL_MVM_VENDOR_CMD_DBG_COLLECT = 0x19,
+  IWL_MVM_VENDOR_CMD_NAN_FAW_CONF = 0x1a,
+  /* 0x1b is deprecated */
+  IWL_MVM_VENDOR_CMD_SET_SAR_PROFILE = 0x1c,
+  IWL_MVM_VENDOR_CMD_GET_SAR_PROFILE_INFO = 0x1d,
+  IWL_MVM_VENDOR_CMD_NEIGHBOR_REPORT_REQUEST = 0x1e,
+  IWL_MVM_VENDOR_CMD_NEIGHBOR_REPORT_RESPONSE = 0x1f,
+  IWL_MVM_VENDOR_CMD_GET_SAR_GEO_PROFILE = 0x20,
+  IWL_MVM_VENDOR_CMD_TEST_FIPS = 0x21,
+  IWL_MVM_VENDOR_CMD_FMAC_CONNECT_PARAMS = 0x22,
+  IWL_MVM_VENDOR_CMD_FMAC_CONFIG = 0x23,
 };
 
 /**
@@ -173,9 +173,9 @@
  * is just the PPDU's time)
  */
 enum iwl_mvm_vendor_load {
-    IWL_MVM_VENDOR_LOAD_LOW,
-    IWL_MVM_VENDOR_LOAD_MEDIUM,
-    IWL_MVM_VENDOR_LOAD_HIGH,
+  IWL_MVM_VENDOR_LOAD_LOW,
+  IWL_MVM_VENDOR_LOAD_MEDIUM,
+  IWL_MVM_VENDOR_LOAD_HIGH,
 };
 
 /**
@@ -194,12 +194,12 @@
  * Note that these must match the firmware API.
  */
 enum iwl_mvm_vendor_gscan_report_mode {
-    IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_FULL,
-    IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_EACH_SCAN,
-    IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_FULL_RESULTS,
-    IWL_MVM_VENDOR_GSCAN_REPORT_HISTORY_RESERVED,
-    IWL_MVM_VENDOR_GSCAN_REPORT_NO_BATCH,
-    NUM_IWL_MVM_VENDOR_GSCAN_REPORT,
+  IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_FULL,
+  IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_EACH_SCAN,
+  IWL_MVM_VENDOR_GSCAN_REPORT_BUFFER_FULL_RESULTS,
+  IWL_MVM_VENDOR_GSCAN_REPORT_HISTORY_RESERVED,
+  IWL_MVM_VENDOR_GSCAN_REPORT_NO_BATCH,
+  NUM_IWL_MVM_VENDOR_GSCAN_REPORT,
 };
 
 /**
@@ -214,12 +214,12 @@
  * @MAX_IWL_MVM_VENDOR_CHANNEL_SPEC: highest channel spec attribute number.
  */
 enum iwl_mvm_vendor_gscan_channel_spec {
-    IWL_MVM_VENDOR_CHANNEL_SPEC_INVALID,
-    IWL_MVM_VENDOR_CHANNEL_SPEC_CHANNEL,
-    IWL_MVM_VENDOR_CHANNEL_SPEC_DWELL_TIME,
-    IWL_MVM_VENDOR_CHANNEL_SPEC_PASSIVE,
-    NUM_IWL_MVM_VENDOR_CHANNEL_SPEC,
-    MAX_IWL_MVM_VENDOR_CHANNEL_SPEC = NUM_IWL_MVM_VENDOR_CHANNEL_SPEC - 1,
+  IWL_MVM_VENDOR_CHANNEL_SPEC_INVALID,
+  IWL_MVM_VENDOR_CHANNEL_SPEC_CHANNEL,
+  IWL_MVM_VENDOR_CHANNEL_SPEC_DWELL_TIME,
+  IWL_MVM_VENDOR_CHANNEL_SPEC_PASSIVE,
+  NUM_IWL_MVM_VENDOR_CHANNEL_SPEC,
+  MAX_IWL_MVM_VENDOR_CHANNEL_SPEC = NUM_IWL_MVM_VENDOR_CHANNEL_SPEC - 1,
 };
 
 /**
@@ -250,17 +250,17 @@
  * @MAX_IWL_MVM_VENDOR_BUCKET_SPEC: highest bucket spec attribute number.
  */
 enum iwl_mvm_vendor_gscan_bucket_spec {
-    IWL_MVM_VENDOR_BUCKET_SPEC_INVALID,
-    IWL_MVM_VENDOR_BUCKET_SPEC_INDEX,
-    IWL_MVM_VENDOR_BUCKET_SPEC_BAND,
-    IWL_MVM_VENDOR_BUCKET_SPEC_PERIOD,
-    IWL_MVM_VENDOR_BUCKET_SPEC_REPORT_MODE,
-    IWL_MVM_VENDOR_BUCKET_SPEC_CHANNELS,
-    IWL_MVM_VENDOR_BUCKET_SPEC_MAX_PERIOD,
-    IWL_MVM_VENDOR_BUCKET_SPEC_EXPONENT,
-    IWL_MVM_VENDOR_BUCKET_SPEC_STEP_CNT,
-    NUM_IWL_MVM_VENDOR_BUCKET_SPEC,
-    MAX_IWL_MVM_VENDOR_BUCKET_SPEC = NUM_IWL_MVM_VENDOR_BUCKET_SPEC - 1,
+  IWL_MVM_VENDOR_BUCKET_SPEC_INVALID,
+  IWL_MVM_VENDOR_BUCKET_SPEC_INDEX,
+  IWL_MVM_VENDOR_BUCKET_SPEC_BAND,
+  IWL_MVM_VENDOR_BUCKET_SPEC_PERIOD,
+  IWL_MVM_VENDOR_BUCKET_SPEC_REPORT_MODE,
+  IWL_MVM_VENDOR_BUCKET_SPEC_CHANNELS,
+  IWL_MVM_VENDOR_BUCKET_SPEC_MAX_PERIOD,
+  IWL_MVM_VENDOR_BUCKET_SPEC_EXPONENT,
+  IWL_MVM_VENDOR_BUCKET_SPEC_STEP_CNT,
+  NUM_IWL_MVM_VENDOR_BUCKET_SPEC,
+  MAX_IWL_MVM_VENDOR_BUCKET_SPEC = NUM_IWL_MVM_VENDOR_BUCKET_SPEC - 1,
 };
 
 /**
@@ -275,9 +275,9 @@
  * Note that these must match the firmware API.
  */
 enum iwl_mvm_vendor_results_event_type {
-    IWL_MVM_VENDOR_RESULTS_NOTIF_BUFFER_FULL,
-    IWL_MVM_VENDOR_RESULTS_NOTIF_BUCKET_END,
-    NUM_IWL_VENDOR_RESULTS_NOTIF_EVENT_TYPE,
+  IWL_MVM_VENDOR_RESULTS_NOTIF_BUFFER_FULL,
+  IWL_MVM_VENDOR_RESULTS_NOTIF_BUCKET_END,
+  NUM_IWL_VENDOR_RESULTS_NOTIF_EVENT_TYPE,
 };
 
 /**
@@ -299,18 +299,18 @@
  * @MAX_IWL_MVM_VENDOR_GSCAN_RESULT: highest scan result attribute number.
  */
 enum iwl_mvm_vendor_gscan_result {
-    IWL_MVM_VENDOR_GSCAN_RESULT_INVALID,
-    IWL_MVM_VENDOR_GSCAN_RESULT_TIMESTAMP,
-    IWL_MVM_VENDOR_GSCAN_RESULT_SSID,
-    IWL_MVM_VENDOR_GSCAN_RESULT_BSSID,
-    IWL_MVM_VENDOR_GSCAN_RESULT_CHANNEL,
-    IWL_MVM_VENDOR_GSCAN_RESULT_RSSI,
-    IWL_MVM_VENDOR_GSCAN_RESULT_FRAME,
-    IWL_MVM_VENDOR_GSCAN_RESULT_BEACON_PERIOD,
-    IWL_MVM_VENDOR_GSCAN_RESULT_CAPABILITY,
-    IWL_MVM_VENDOR_GSCAN_RESULT_PAD,
-    NUM_IWL_MVM_VENDOR_GSCAN_RESULT,
-    MAX_IWL_MVM_VENDOR_GSCAN_RESULT = NUM_IWL_MVM_VENDOR_GSCAN_RESULT - 1,
+  IWL_MVM_VENDOR_GSCAN_RESULT_INVALID,
+  IWL_MVM_VENDOR_GSCAN_RESULT_TIMESTAMP,
+  IWL_MVM_VENDOR_GSCAN_RESULT_SSID,
+  IWL_MVM_VENDOR_GSCAN_RESULT_BSSID,
+  IWL_MVM_VENDOR_GSCAN_RESULT_CHANNEL,
+  IWL_MVM_VENDOR_GSCAN_RESULT_RSSI,
+  IWL_MVM_VENDOR_GSCAN_RESULT_FRAME,
+  IWL_MVM_VENDOR_GSCAN_RESULT_BEACON_PERIOD,
+  IWL_MVM_VENDOR_GSCAN_RESULT_CAPABILITY,
+  IWL_MVM_VENDOR_GSCAN_RESULT_PAD,
+  NUM_IWL_MVM_VENDOR_GSCAN_RESULT,
+  MAX_IWL_MVM_VENDOR_GSCAN_RESULT = NUM_IWL_MVM_VENDOR_GSCAN_RESULT - 1,
 };
 
 /**
@@ -324,12 +324,12 @@
  * @MAX_IWL_MVM_VENDOR_GSCAN_CACHED_RES: highest scan result attribute number.
  */
 enum iwl_mvm_vendor_gscan_cached_scan_res {
-    IWL_MVM_VENDOR_GSCAN_CACHED_RES_INVALID,
-    IWL_MVM_VENDOR_GSCAN_CACHED_RES_SCAN_ID,
-    IWL_MVM_VENDOR_GSCAN_CACHED_RES_FLAGS,
-    IWL_MVM_VENDOR_GSCAN_CACHED_RES_APS,
-    NUM_IWL_MVM_VENDOR_GSCAN_CACHED_RES,
-    MAX_IWL_MVM_VENDOR_GSCAN_CACHED_RES = NUM_IWL_MVM_VENDOR_GSCAN_CACHED_RES - 1,
+  IWL_MVM_VENDOR_GSCAN_CACHED_RES_INVALID,
+  IWL_MVM_VENDOR_GSCAN_CACHED_RES_SCAN_ID,
+  IWL_MVM_VENDOR_GSCAN_CACHED_RES_FLAGS,
+  IWL_MVM_VENDOR_GSCAN_CACHED_RES_APS,
+  NUM_IWL_MVM_VENDOR_GSCAN_CACHED_RES,
+  MAX_IWL_MVM_VENDOR_GSCAN_CACHED_RES = NUM_IWL_MVM_VENDOR_GSCAN_CACHED_RES - 1,
 };
 
 /**
@@ -344,12 +344,12 @@
  *  attribute number.
  */
 enum iwl_mvm_vendor_ap_threshold_param {
-    IWL_MVM_VENDOR_AP_THRESHOLD_PARAM_INVALID,
-    IWL_MVM_VENDOR_AP_BSSID,
-    IWL_MVM_VENDOR_AP_LOW_RSSI_THRESHOLD,
-    IWL_MVM_VENDOR_AP_HIGH_RSSI_THRESHOLD,
-    NUM_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM,
-    MAX_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM = NUM_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM - 1,
+  IWL_MVM_VENDOR_AP_THRESHOLD_PARAM_INVALID,
+  IWL_MVM_VENDOR_AP_BSSID,
+  IWL_MVM_VENDOR_AP_LOW_RSSI_THRESHOLD,
+  IWL_MVM_VENDOR_AP_HIGH_RSSI_THRESHOLD,
+  NUM_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM,
+  MAX_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM = NUM_IWL_MVM_VENDOR_GSCAN_AP_THRESHOLD_PARAM - 1,
 };
 
 /**
@@ -363,9 +363,9 @@
  * Note that these must match the firmware API.
  */
 enum iwl_mvm_vendor_hotlist_ap_status {
-    IWL_MVM_VENDOR_HOTLIST_AP_FOUND,
-    IWL_MVM_VENDOR_HOTLIST_AP_LOST,
-    NUM_IWL_MVM_VENDOR_HOTLIST_AP_STATUS,
+  IWL_MVM_VENDOR_HOTLIST_AP_FOUND,
+  IWL_MVM_VENDOR_HOTLIST_AP_LOST,
+  NUM_IWL_MVM_VENDOR_HOTLIST_AP_STATUS,
 };
 
 /**
@@ -382,12 +382,12 @@
  *  result attribute number.
  */
 enum iwl_mvm_vendor_significant_change_result {
-    IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_INVALID,
-    IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_CHANNEL,
-    IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_BSSID,
-    IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RSSI_HISTORY,
-    NUM_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT,
-    MAX_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT = NUM_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT - 1,
+  IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_INVALID,
+  IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_CHANNEL,
+  IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_BSSID,
+  IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RSSI_HISTORY,
+  NUM_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT,
+  MAX_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT = NUM_IWL_MVM_VENDOR_SIGNIFICANT_CHANGE_RESULT - 1,
 };
 
 /**
@@ -401,11 +401,11 @@
  *
  */
 enum iwl_mvm_vendor_rxfilter_flags {
-    IWL_MVM_VENDOR_RXFILTER_UNICAST = 1 << 0,
-    IWL_MVM_VENDOR_RXFILTER_BCAST = 1 << 1,
-    IWL_MVM_VENDOR_RXFILTER_MCAST4 = 1 << 2,
-    IWL_MVM_VENDOR_RXFILTER_MCAST6 = 1 << 3,
-    IWL_MVM_VENDOR_RXFILTER_EINVAL = 1 << 7,
+  IWL_MVM_VENDOR_RXFILTER_UNICAST = 1 << 0,
+  IWL_MVM_VENDOR_RXFILTER_BCAST = 1 << 1,
+  IWL_MVM_VENDOR_RXFILTER_MCAST4 = 1 << 2,
+  IWL_MVM_VENDOR_RXFILTER_MCAST6 = 1 << 3,
+  IWL_MVM_VENDOR_RXFILTER_EINVAL = 1 << 7,
 };
 
 /**
@@ -415,8 +415,8 @@
  * @IWL_MVM_VENDOR_RXFILTER_OP_DROP: drop frames matching the filter
  */
 enum iwl_mvm_vendor_rxfilter_op {
-    IWL_MVM_VENDOR_RXFILTER_OP_PASS,
-    IWL_MVM_VENDOR_RXFILTER_OP_DROP,
+  IWL_MVM_VENDOR_RXFILTER_OP_PASS,
+  IWL_MVM_VENDOR_RXFILTER_OP_DROP,
 };
 
 /*
@@ -426,11 +426,11 @@
  * IEEE802.11-2016, table 9-153.
  */
 enum iwl_mvm_vendor_nr_chan_width {
-    IWL_MVM_VENDOR_CHAN_WIDTH_20,
-    IWL_MVM_VENDOR_CHAN_WIDTH_40,
-    IWL_MVM_VENDOR_CHAN_WIDTH_80,
-    IWL_MVM_VENDOR_CHAN_WIDTH_160,
-    IWL_MVM_VENDOR_CHAN_WIDTH_80P80,
+  IWL_MVM_VENDOR_CHAN_WIDTH_20,
+  IWL_MVM_VENDOR_CHAN_WIDTH_40,
+  IWL_MVM_VENDOR_CHAN_WIDTH_80,
+  IWL_MVM_VENDOR_CHAN_WIDTH_160,
+  IWL_MVM_VENDOR_CHAN_WIDTH_80P80,
 };
 
 /*
@@ -440,15 +440,15 @@
  * IEEE802.11-2016, Annex C.
  */
 enum iwl_mvm_vendor_phy_type {
-    IWL_MVM_VENDOR_PHY_TYPE_UNSPECIFIED,
-    IWL_MVM_VENDOR_PHY_TYPE_DSSS = 2,
-    IWL_MVM_VENDOR_PHY_TYPE_OFDM = 4,
-    IWL_MVM_VENDOR_PHY_TYPE_HRDSSS = 5,
-    IWL_MVM_VENDOR_PHY_TYPE_ERP = 6,
-    IWL_MVM_VENDOR_PHY_TYPE_HT = 7,
-    IWL_MVM_VENDOR_PHY_TYPE_DMG = 8,
-    IWL_MVM_VENDOR_PHY_TYPE_VHT = 9,
-    IWL_MVM_VENDOR_PHY_TYPE_TVHT = 10,
+  IWL_MVM_VENDOR_PHY_TYPE_UNSPECIFIED,
+  IWL_MVM_VENDOR_PHY_TYPE_DSSS = 2,
+  IWL_MVM_VENDOR_PHY_TYPE_OFDM = 4,
+  IWL_MVM_VENDOR_PHY_TYPE_HRDSSS = 5,
+  IWL_MVM_VENDOR_PHY_TYPE_ERP = 6,
+  IWL_MVM_VENDOR_PHY_TYPE_HT = 7,
+  IWL_MVM_VENDOR_PHY_TYPE_DMG = 8,
+  IWL_MVM_VENDOR_PHY_TYPE_VHT = 9,
+  IWL_MVM_VENDOR_PHY_TYPE_TVHT = 10,
 };
 
 /**
@@ -481,20 +481,20 @@
 
  */
 enum iwl_mvm_vendor_neighbor_report {
-    __IWL_MVM_VENDOR_NEIGHBOR_INVALID,
-    IWL_MVM_VENDOR_NEIGHBOR_BSSID,
-    IWL_MVM_VENDOR_NEIGHBOR_BSSID_INFO,
-    IWL_MVM_VENDOR_NEIGHBOR_OPERATING_CLASS,
-    IWL_MVM_VENDOR_NEIGHBOR_CHANNEL,
-    IWL_MVM_VENDOR_NEIGHBOR_PHY_TYPE,
-    IWL_MVM_VENDOR_NEIGHBOR_CHANNEL_WIDTH,
-    IWL_MVM_VENDOR_NEIGHBOR_CENTER_FREQ_IDX_0,
-    IWL_MVM_VENDOR_NEIGHBOR_CENTER_FREQ_IDX_1,
-    IWL_MVM_VENDOR_NEIGHBOR_LCI,
-    IWL_MVM_VENDOR_NEIGHBOR_CIVIC,
+  __IWL_MVM_VENDOR_NEIGHBOR_INVALID,
+  IWL_MVM_VENDOR_NEIGHBOR_BSSID,
+  IWL_MVM_VENDOR_NEIGHBOR_BSSID_INFO,
+  IWL_MVM_VENDOR_NEIGHBOR_OPERATING_CLASS,
+  IWL_MVM_VENDOR_NEIGHBOR_CHANNEL,
+  IWL_MVM_VENDOR_NEIGHBOR_PHY_TYPE,
+  IWL_MVM_VENDOR_NEIGHBOR_CHANNEL_WIDTH,
+  IWL_MVM_VENDOR_NEIGHBOR_CENTER_FREQ_IDX_0,
+  IWL_MVM_VENDOR_NEIGHBOR_CENTER_FREQ_IDX_1,
+  IWL_MVM_VENDOR_NEIGHBOR_LCI,
+  IWL_MVM_VENDOR_NEIGHBOR_CIVIC,
 
-    NUM_IWL_MVM_VENDOR_NEIGHBOR_REPORT,
-    MAX_IWL_MVM_VENDOR_NEIGHBOR_REPORT = NUM_IWL_MVM_VENDOR_NEIGHBOR_REPORT - 1,
+  NUM_IWL_MVM_VENDOR_NEIGHBOR_REPORT,
+  MAX_IWL_MVM_VENDOR_NEIGHBOR_REPORT = NUM_IWL_MVM_VENDOR_NEIGHBOR_REPORT - 1,
 };
 
 /**
@@ -506,10 +506,10 @@
  * @IWL_VENDOR_SAR_GEO_MAX_TXP: maximum allowed tx power (uint8_t).
  */
 enum iwl_vendor_sar_per_chain_geo_table {
-    IWL_VENDOR_SAR_GEO_INVALID,
-    IWL_VENDOR_SAR_GEO_CHAIN_A_OFFSET,
-    IWL_VENDOR_SAR_GEO_CHAIN_B_OFFSET,
-    IWL_VENDOR_SAR_GEO_MAX_TXP,
+  IWL_VENDOR_SAR_GEO_INVALID,
+  IWL_VENDOR_SAR_GEO_CHAIN_A_OFFSET,
+  IWL_VENDOR_SAR_GEO_CHAIN_B_OFFSET,
+  IWL_VENDOR_SAR_GEO_MAX_TXP,
 };
 
 /**
@@ -520,9 +520,9 @@
  * @IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA384: SHA384
  */
 enum iwl_vendor_fips_test_vector_sha_type {
-    IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA1,
-    IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA256,
-    IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA384,
+  IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA1,
+  IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA256,
+  IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE_SHA384,
 };
 
 /**
@@ -536,12 +536,12 @@
  * @MAX_IWL_VENDOR_FIPS_TEST_VECTOR_SHA: highest SHA test vector attribute.
  */
 enum iwl_vendor_fips_test_vector_sha {
-    IWL_VENDOR_FIPS_TEST_VECTOR_SHA_INVALID,
-    IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE,
-    IWL_VENDOR_FIPS_TEST_VECTOR_SHA_MSG,
+  IWL_VENDOR_FIPS_TEST_VECTOR_SHA_INVALID,
+  IWL_VENDOR_FIPS_TEST_VECTOR_SHA_TYPE,
+  IWL_VENDOR_FIPS_TEST_VECTOR_SHA_MSG,
 
-    NUM_IWL_VENDOR_FIPS_TEST_VECTOR_SHA,
-    MAX_IWL_VENDOR_FIPS_TEST_VECTOR_SHA = NUM_IWL_VENDOR_FIPS_TEST_VECTOR_SHA - 1,
+  NUM_IWL_VENDOR_FIPS_TEST_VECTOR_SHA,
+  MAX_IWL_VENDOR_FIPS_TEST_VECTOR_SHA = NUM_IWL_VENDOR_FIPS_TEST_VECTOR_SHA - 1,
 };
 
 /**
@@ -563,14 +563,14 @@
  *  attribute.
  */
 enum iwl_vendor_fips_test_vector_hmac_kdf {
-    IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_INVALID,
-    IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_TYPE,
-    IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_KEY,
-    IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_MSG,
-    IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_RES_LEN,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_INVALID,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_TYPE,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_KEY,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_MSG,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF_RES_LEN,
 
-    NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF,
-    MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF = NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF - 1,
+  NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF,
+  MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF = NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HMAC_KDF - 1,
 };
 
 /**
@@ -580,7 +580,7 @@
  *  decryption.
  */
 enum iwl_vendor_fips_test_vector_flags {
-    IWL_VENDOR_FIPS_TEST_VECTOR_FLAGS_ENCRYPT = BIT(0),
+  IWL_VENDOR_FIPS_TEST_VECTOR_FLAGS_ENCRYPT = BIT(0),
 };
 
 /**
@@ -601,15 +601,15 @@
  * @MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HW: highest hw test vector attribute.
  */
 enum iwl_vendor_fips_test_vector_hw {
-    IWL_VENDOR_FIPS_TEST_VECTOR_HW_INVALID,
-    IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY,
-    IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE,
-    IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD,
-    IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD,
-    IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HW_INVALID,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD,
+  IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS,
 
-    NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW,
-    MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HW = NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW - 1,
+  NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW,
+  MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HW = NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW - 1,
 };
 
 /**
@@ -762,86 +762,86 @@
  *  fmac configuration option.
  */
 enum iwl_mvm_vendor_attr {
-    __IWL_MVM_VENDOR_ATTR_INVALID = 0x00,
-    IWL_MVM_VENDOR_ATTR_LOW_LATENCY = 0x01,
-    IWL_MVM_VENDOR_ATTR_VIF_ADDR = 0x02,
-    IWL_MVM_VENDOR_ATTR_VIF_LL = 0x03,
-    IWL_MVM_VENDOR_ATTR_LL = 0x04,
-    IWL_MVM_VENDOR_ATTR_VIF_LOAD = 0x05,
-    IWL_MVM_VENDOR_ATTR_LOAD = 0x06,
-    IWL_MVM_VENDOR_ATTR_COUNTRY = 0x07,
-    IWL_MVM_VENDOR_ATTR_FILTER_ARP_NA = 0x08,
-    IWL_MVM_VENDOR_ATTR_FILTER_GTK = 0x09,
-    IWL_MVM_VENDOR_ATTR_ADDR = 0x0a,
-    IWL_MVM_VENDOR_ATTR_TX_BYTES = 0x0b,
-    IWL_MVM_VENDOR_ATTR_RX_BYTES = 0x0c,
-    IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24 = 0x0d,
-    IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L = 0x0e,
-    IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H = 0x0f,
-    IWL_MVM_VENDOR_ATTR_OPPPS_WA = 0x10,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_CACHE_SIZE = 0x11,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_BUCKETS = 0x12,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_AP_CACHE_PER_SCAN = 0x13,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_RSSI_SAMPLE_SIZE = 0x14,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_REPORTING_THRESHOLD = 0x15,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_HOTLIST_APS = 0x16,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SIGNIFICANT_CHANGE_APS = 0x17,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_BSSID_HISTORY_ENTRIES = 0x18,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR = 0x19,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR_MASK = 0x1a,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_AP_PER_SCAN = 0x1b,
-    IWL_MVM_VENDOR_ATTR_GSCAN_REPORT_THRESHOLD = 0x1c,
-    IWL_MVM_VENDOR_ATTR_GSCAN_BUCKET_SPECS = 0x1d,
-    IWL_MVM_VENDOR_ATTR_GSCAN_RESULTS_EVENT_TYPE = 0x1e,
-    IWL_MVM_VENDOR_ATTR_GSCAN_RESULTS = 0x1f,
-    IWL_MVM_VENDOR_ATTR_GSCAN_LOST_AP_SAMPLE_SIZE = 0x20,
-    IWL_MVM_VENDOR_ATTR_GSCAN_AP_LIST = 0x21,
-    IWL_MVM_VENDOR_ATTR_GSCAN_RSSI_SAMPLE_SIZE = 0x22,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MIN_BREACHING = 0x23,
-    IWL_MVM_VENDOR_ATTR_GSCAN_HOTLIST_AP_STATUS = 0x24,
-    IWL_MVM_VENDOR_ATTR_GSCAN_SIG_CHANGE_RESULTS = 0x25,
-    IWL_MVM_VENDOR_ATTR_RXFILTER = 0x26,
-    IWL_MVM_VENDOR_ATTR_RXFILTER_OP = 0x27,
-    IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER = 0x28,
-    IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ = 0x29,
-    IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS = 0x2a,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_HOTLIST_SSIDS = 0x2b,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_EPNO_NETWORKS = 0x2c,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_EPNO_NETWORKS_BY_SSID = 0x2d,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_WHITE_LISTED_SSID = 0x2e,
-    IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_BLACK_LISTED_SSID = 0x2f,
-    IWL_MVM_VENDOR_ATTR_WIPHY_FREQ = 0x30,
-    IWL_MVM_VENDOR_ATTR_CHANNEL_WIDTH = 0x31,
-    IWL_MVM_VENDOR_ATTR_CENTER_FREQ1 = 0x32,
-    IWL_MVM_VENDOR_ATTR_CENTER_FREQ2 = 0x33,
-    /* 0x34 is deprecated */
-    /* 0x35 is deprecated */
-    /* 0x36 is deprecated */
-    IWL_MVM_VENDOR_ATTR_GSCAN_REPORT_THRESHOLD_NUM = 0x37,
-    IWL_MVM_VENDOR_ATTR_GSCAN_CACHED_RESULTS = 0x38,
-    IWL_MVM_VENDOR_ATTR_LAST_MSG = 0x39,
-    IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE = 0x3a,
-    IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE = 0x3b,
-    IWL_MVM_VENDOR_ATTR_SAR_ENABLED_PROFILE_NUM = 0x3c,
-    IWL_MVM_VENDOR_ATTR_SSID = 0x3d,
-    IWL_MVM_VENDOR_ATTR_NEIGHBOR_LCI = 0x3e,
-    IWL_MVM_VENDOR_ATTR_NEIGHBOR_CIVIC = 0x3f,
-    IWL_MVM_VENDOR_ATTR_NEIGHBOR_REPORT = 0x40,
-    IWL_MVM_VENDOR_ATTR_SAR_GEO_PROFILE = 0x41,
-    IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_SHA = 0x42,
-    IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HMAC = 0x43,
-    IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_KDF = 0x44,
-    IWL_MVM_VENDOR_ATTR_FIPS_TEST_RESULT = 0x45,
-    IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES = 0x46,
-    IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM = 0x47,
-    IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM = 0x48,
-    IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_BLACKLIST = 0x49,
-    IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_WHITELIST = 0x4a,
-    IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_MAX_RETRIES = 0x4b,
-    IWL_MVM_VENDOR_ATTR_FMAC_CONFIG_STR = 0x4c,
+  __IWL_MVM_VENDOR_ATTR_INVALID = 0x00,
+  IWL_MVM_VENDOR_ATTR_LOW_LATENCY = 0x01,
+  IWL_MVM_VENDOR_ATTR_VIF_ADDR = 0x02,
+  IWL_MVM_VENDOR_ATTR_VIF_LL = 0x03,
+  IWL_MVM_VENDOR_ATTR_LL = 0x04,
+  IWL_MVM_VENDOR_ATTR_VIF_LOAD = 0x05,
+  IWL_MVM_VENDOR_ATTR_LOAD = 0x06,
+  IWL_MVM_VENDOR_ATTR_COUNTRY = 0x07,
+  IWL_MVM_VENDOR_ATTR_FILTER_ARP_NA = 0x08,
+  IWL_MVM_VENDOR_ATTR_FILTER_GTK = 0x09,
+  IWL_MVM_VENDOR_ATTR_ADDR = 0x0a,
+  IWL_MVM_VENDOR_ATTR_TX_BYTES = 0x0b,
+  IWL_MVM_VENDOR_ATTR_RX_BYTES = 0x0c,
+  IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24 = 0x0d,
+  IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L = 0x0e,
+  IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H = 0x0f,
+  IWL_MVM_VENDOR_ATTR_OPPPS_WA = 0x10,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_CACHE_SIZE = 0x11,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_BUCKETS = 0x12,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_AP_CACHE_PER_SCAN = 0x13,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_RSSI_SAMPLE_SIZE = 0x14,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SCAN_REPORTING_THRESHOLD = 0x15,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_HOTLIST_APS = 0x16,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_SIGNIFICANT_CHANGE_APS = 0x17,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_BSSID_HISTORY_ENTRIES = 0x18,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR = 0x19,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAC_ADDR_MASK = 0x1a,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_AP_PER_SCAN = 0x1b,
+  IWL_MVM_VENDOR_ATTR_GSCAN_REPORT_THRESHOLD = 0x1c,
+  IWL_MVM_VENDOR_ATTR_GSCAN_BUCKET_SPECS = 0x1d,
+  IWL_MVM_VENDOR_ATTR_GSCAN_RESULTS_EVENT_TYPE = 0x1e,
+  IWL_MVM_VENDOR_ATTR_GSCAN_RESULTS = 0x1f,
+  IWL_MVM_VENDOR_ATTR_GSCAN_LOST_AP_SAMPLE_SIZE = 0x20,
+  IWL_MVM_VENDOR_ATTR_GSCAN_AP_LIST = 0x21,
+  IWL_MVM_VENDOR_ATTR_GSCAN_RSSI_SAMPLE_SIZE = 0x22,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MIN_BREACHING = 0x23,
+  IWL_MVM_VENDOR_ATTR_GSCAN_HOTLIST_AP_STATUS = 0x24,
+  IWL_MVM_VENDOR_ATTR_GSCAN_SIG_CHANGE_RESULTS = 0x25,
+  IWL_MVM_VENDOR_ATTR_RXFILTER = 0x26,
+  IWL_MVM_VENDOR_ATTR_RXFILTER_OP = 0x27,
+  IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER = 0x28,
+  IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ = 0x29,
+  IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS = 0x2a,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_HOTLIST_SSIDS = 0x2b,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_EPNO_NETWORKS = 0x2c,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_EPNO_NETWORKS_BY_SSID = 0x2d,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_WHITE_LISTED_SSID = 0x2e,
+  IWL_MVM_VENDOR_ATTR_GSCAN_MAX_NUM_BLACK_LISTED_SSID = 0x2f,
+  IWL_MVM_VENDOR_ATTR_WIPHY_FREQ = 0x30,
+  IWL_MVM_VENDOR_ATTR_CHANNEL_WIDTH = 0x31,
+  IWL_MVM_VENDOR_ATTR_CENTER_FREQ1 = 0x32,
+  IWL_MVM_VENDOR_ATTR_CENTER_FREQ2 = 0x33,
+  /* 0x34 is deprecated */
+  /* 0x35 is deprecated */
+  /* 0x36 is deprecated */
+  IWL_MVM_VENDOR_ATTR_GSCAN_REPORT_THRESHOLD_NUM = 0x37,
+  IWL_MVM_VENDOR_ATTR_GSCAN_CACHED_RESULTS = 0x38,
+  IWL_MVM_VENDOR_ATTR_LAST_MSG = 0x39,
+  IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE = 0x3a,
+  IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE = 0x3b,
+  IWL_MVM_VENDOR_ATTR_SAR_ENABLED_PROFILE_NUM = 0x3c,
+  IWL_MVM_VENDOR_ATTR_SSID = 0x3d,
+  IWL_MVM_VENDOR_ATTR_NEIGHBOR_LCI = 0x3e,
+  IWL_MVM_VENDOR_ATTR_NEIGHBOR_CIVIC = 0x3f,
+  IWL_MVM_VENDOR_ATTR_NEIGHBOR_REPORT = 0x40,
+  IWL_MVM_VENDOR_ATTR_SAR_GEO_PROFILE = 0x41,
+  IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_SHA = 0x42,
+  IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HMAC = 0x43,
+  IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_KDF = 0x44,
+  IWL_MVM_VENDOR_ATTR_FIPS_TEST_RESULT = 0x45,
+  IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES = 0x46,
+  IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM = 0x47,
+  IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM = 0x48,
+  IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_BLACKLIST = 0x49,
+  IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_WHITELIST = 0x4a,
+  IWL_MVM_VENDOR_ATTR_FMAC_CONNECT_PARAMS_MAX_RETRIES = 0x4b,
+  IWL_MVM_VENDOR_ATTR_FMAC_CONFIG_STR = 0x4c,
 
-    NUM_IWL_MVM_VENDOR_ATTR,
-    MAX_IWL_MVM_VENDOR_ATTR = NUM_IWL_MVM_VENDOR_ATTR - 1,
+  NUM_IWL_MVM_VENDOR_ATTR,
+  MAX_IWL_MVM_VENDOR_ATTR = NUM_IWL_MVM_VENDOR_ATTR - 1,
 };
 #define IWL_MVM_VENDOR_FILTER_ARP_NA IWL_MVM_VENDOR_ATTR_FILTER_ARP_NA
 #define IWL_MVM_VENDOR_FILTER_GTK IWL_MVM_VENDOR_ATTR_FILTER_GTK
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/API_rates.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/API_rates.h
index e42c9b0..8847bda 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/API_rates.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/API_rates.h
@@ -323,7 +323,7 @@
 #define BEACON_TEMPLATE_FLAGS_ANT_C_POS (11)
 #define BEACON_TEMPLATE_FLAGS_ANT_ABC_NORM_MSK (BIT_MASK_3BIT)
 #define BEACON_TEMPLATE_FLAGS_ANT_ABC_MSK \
-    (BEACON_TEMPLATE_FLAGS_ANT_ABC_NORM_MSK << BEACON_TEMPLATE_FLAGS_ANT_A_POS)
+  (BEACON_TEMPLATE_FLAGS_ANT_ABC_NORM_MSK << BEACON_TEMPLATE_FLAGS_ANT_A_POS)
 
 // Kedron, added rate & MCS struct
 // bit 7:0 Rate or MCS
@@ -345,11 +345,11 @@
 #define RATE_MCS_CODE_MSK 0x7f
 
 typedef enum _MIMO_INDX_E {
-    SISO_INDX = 0,
-    MIMO2_INDX = 1,
-    MIMO3_INDX = 2,
-    MIMO4_INDX = 3,
-    MAX_MIMO_INDX
+  SISO_INDX = 0,
+  MIMO2_INDX = 1,
+  MIMO3_INDX = 2,
+  MIMO4_INDX = 3,
+  MAX_MIMO_INDX
 } MIMO_INDX_E;
 
 // this mask will apply to all MCS with the exception of MCS 32 which is not mimo but bit 5 is set
@@ -424,15 +424,15 @@
 
 #define RATE_MCS_3ANT_MSK(rate) ((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_ABC_MSK)
 
-#define RATE_MCS_2ANT_MSK(rate)                                             \
-    (((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_AB_MSK) || \
-     ((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_AC_MSK) || \
-     ((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_BC_MSK))
+#define RATE_MCS_2ANT_MSK(rate)                                           \
+  (((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_AB_MSK) || \
+   ((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_AC_MSK) || \
+   ((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_BC_MSK))
 
-#define RATE_MCS_1ANT_MSK(rate)                                            \
-    (((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_A_MSK) || \
-     ((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_B_MSK) || \
-     ((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_C_MSK))
+#define RATE_MCS_1ANT_MSK(rate)                                          \
+  (((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_A_MSK) || \
+   ((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_B_MSK) || \
+   ((rate.rate_n_flags & RATE_MCS_ANT_ABC_MSK) == RATE_MCS_ANT_C_MSK))
 
 // rate&flags cleanup - use single bit for STBC
 // for HT the number of space-time-streams is SS+STBC, STBC is 0/1/2
@@ -540,15 +540,15 @@
 #define POWER_TABLE_2_STREAM_HT_OFDM_ENTRIES_API_D_VER_1 (16)  // MCSs 0-15
 #define POWER_TABLE_1_STREAM_HT_OFDM_ENTRIES_API_D_VER_1 (8)   // MCSs 0-7
 
-#define POWER_TABLE_TOTAL_ENTRIES_API_D_VER_1  \
-    (POWER_TABLE_NUM_CCK_ENTRIES_API_D_VER_1 + \
-     POWER_TABLE_3_STREAM_HT_OFDM_ENTRIES_API_D_VER_1)  // 3 tx chain
-#define POWER_TABLE_TOTAL_ENTRIES_API_D_VER_2  \
-    (POWER_TABLE_NUM_CCK_ENTRIES_API_D_VER_1 + \
-     POWER_TABLE_2_STREAM_HT_OFDM_ENTRIES_API_D_VER_1)  // 2 tx chain
-#define POWER_TABLE_TOTAL_ENTRIES_API_D_VER_3  \
-    (POWER_TABLE_NUM_CCK_ENTRIES_API_D_VER_1 + \
-     POWER_TABLE_1_STREAM_HT_OFDM_ENTRIES_API_D_VER_1)  // 1 tx chain
+#define POWER_TABLE_TOTAL_ENTRIES_API_D_VER_1 \
+  (POWER_TABLE_NUM_CCK_ENTRIES_API_D_VER_1 +  \
+   POWER_TABLE_3_STREAM_HT_OFDM_ENTRIES_API_D_VER_1)  // 3 tx chain
+#define POWER_TABLE_TOTAL_ENTRIES_API_D_VER_2 \
+  (POWER_TABLE_NUM_CCK_ENTRIES_API_D_VER_1 +  \
+   POWER_TABLE_2_STREAM_HT_OFDM_ENTRIES_API_D_VER_1)  // 2 tx chain
+#define POWER_TABLE_TOTAL_ENTRIES_API_D_VER_3 \
+  (POWER_TABLE_NUM_CCK_ENTRIES_API_D_VER_1 +  \
+   POWER_TABLE_1_STREAM_HT_OFDM_ENTRIES_API_D_VER_1)  // 1 tx chain
 
 /**
  * @ingroup apiCmdAllTypes
@@ -556,56 +556,56 @@
  *
  */
 typedef enum _MCS_API_E_VER_1 {
-    mcs_6m = MCS_6M,
-    mcs_12m = MCS_12M,
-    mcs_18m = MCS_18M,
-    mcs_24m = MCS_24M,
-    mcs_36m = MCS_36M,
-    mcs_48m = MCS_48M,
-    mcs_54m = MCS_54M,
-    mcs_60m = MCS_60M,
+  mcs_6m = MCS_6M,
+  mcs_12m = MCS_12M,
+  mcs_18m = MCS_18M,
+  mcs_24m = MCS_24M,
+  mcs_36m = MCS_36M,
+  mcs_48m = MCS_48M,
+  mcs_54m = MCS_54M,
+  mcs_60m = MCS_60M,
 
-    mcs_MIMO2_6m = MCS_MIMO2_6M,
-    mcs_MIMO2_12m = MCS_MIMO2_12M,
-    mcs_MIMO2_18m = MCS_MIMO2_18M,
-    mcs_MIMO2_24m = MCS_MIMO2_24M,
-    mcs_MIMO2_36m = MCS_MIMO2_36M,
-    mcs_MIMO2_48m = MCS_MIMO2_48M,
-    mcs_MIMO2_54m = MCS_MIMO2_54M,
-    mcs_MIMO2_60m = MCS_MIMO2_60M,
+  mcs_MIMO2_6m = MCS_MIMO2_6M,
+  mcs_MIMO2_12m = MCS_MIMO2_12M,
+  mcs_MIMO2_18m = MCS_MIMO2_18M,
+  mcs_MIMO2_24m = MCS_MIMO2_24M,
+  mcs_MIMO2_36m = MCS_MIMO2_36M,
+  mcs_MIMO2_48m = MCS_MIMO2_48M,
+  mcs_MIMO2_54m = MCS_MIMO2_54M,
+  mcs_MIMO2_60m = MCS_MIMO2_60M,
 
-    mcs_MIMO3_6m = MCS_MIMO3_6M,
-    mcs_MIMO3_12m = MCS_MIMO3_12M,
-    mcs_MIMO3_18m = MCS_MIMO3_18M,
-    mcs_MIMO3_24m = MCS_MIMO3_24M,
-    mcs_MIMO3_36m = MCS_MIMO3_36M,
-    mcs_MIMO3_48m = MCS_MIMO3_48M,
-    mcs_MIMO3_54m = MCS_MIMO3_54M,
-    mcs_MIMO3_60m = MCS_MIMO3_60M,
+  mcs_MIMO3_6m = MCS_MIMO3_6M,
+  mcs_MIMO3_12m = MCS_MIMO3_12M,
+  mcs_MIMO3_18m = MCS_MIMO3_18M,
+  mcs_MIMO3_24m = MCS_MIMO3_24M,
+  mcs_MIMO3_36m = MCS_MIMO3_36M,
+  mcs_MIMO3_48m = MCS_MIMO3_48M,
+  mcs_MIMO3_54m = MCS_MIMO3_54M,
+  mcs_MIMO3_60m = MCS_MIMO3_60M,
 
-    mcs_dup_6m = MCS_DUP_6M,
+  mcs_dup_6m = MCS_DUP_6M,
 
-    mcs_MIMO2_mixed_16q_04q_39m = MCS_MIMO2_MIXED_16Q_04Q_39M,
-    mcs_MIMO2_mixed_64q_04q_52m = MCS_MIMO2_MIXED_64Q_04Q_52M,
-    mcs_MIMO2_mixed_64q_16q_65m = MCS_MIMO2_MIXED_64Q_16Q_65M,
-    mcs_MIMO2_mixed_16q_04q_58m = MCS_MIMO2_MIXED_16Q_04Q_58M,
-    mcs_MIMO2_mixed_64q_04q_78m = MCS_MIMO2_MIXED_64Q_04Q_78M,
-    mcs_MIMO2_mixed_64q_16q_97m = MCS_MIMO2_MIXED_64Q_16Q_97M,
+  mcs_MIMO2_mixed_16q_04q_39m = MCS_MIMO2_MIXED_16Q_04Q_39M,
+  mcs_MIMO2_mixed_64q_04q_52m = MCS_MIMO2_MIXED_64Q_04Q_52M,
+  mcs_MIMO2_mixed_64q_16q_65m = MCS_MIMO2_MIXED_64Q_16Q_65M,
+  mcs_MIMO2_mixed_16q_04q_58m = MCS_MIMO2_MIXED_16Q_04Q_58M,
+  mcs_MIMO2_mixed_64q_04q_78m = MCS_MIMO2_MIXED_64Q_04Q_78M,
+  mcs_MIMO2_mixed_64q_16q_97m = MCS_MIMO2_MIXED_64Q_16Q_97M,
 
-    mcs_MIMO3_mixed_16q_04q_04q_52m = MCS_MIMO3_MIXED_16Q_04Q_04Q_52M,
-    mcs_MIMO3_mixed_16q_16q_04q_65m = MCS_MIMO3_MIXED_16Q_16Q_04Q_65M,
-    mcs_MIMO3_mixed_64q_04q_04q_65m = MCS_MIMO3_MIXED_64Q_04Q_04Q_65M,
-    mcs_MIMO3_mixed_64q_16q_04q_78m = MCS_MIMO3_MIXED_64Q_16Q_04Q_78M,
-    mcs_MIMO3_mixed_64q_16q_16q_91m = MCS_MIMO3_MIXED_64Q_16Q_16Q_91M,
-    mcs_MIMO3_mixed_64q_64q_04q_91m = MCS_MIMO3_MIXED_64Q_64Q_04Q_91M,
-    mcs_MIMO3_mixed_64q_64q_16q_104m = MCS_MIMO3_MIXED_64Q_64Q_16Q_104M,
-    mcs_MIMO3_mixed_16q_04q_04q_78m = MCS_MIMO3_MIXED_16Q_04Q_04Q_78M,
-    mcs_MIMO3_mixed_16q_16q_04q_97m = MCS_MIMO3_MIXED_16Q_16Q_04Q_97M,
-    mcs_MIMO3_mixed_64q_04q_04q_97m = MCS_MIMO3_MIXED_64Q_04Q_04Q_97M,
-    mcs_MIMO3_mixed_64q_16q_04q_117m = MCS_MIMO3_MIXED_64Q_16Q_04Q_117M,
-    mcs_MIMO3_mixed_64q_16q_16q_136m = MCS_MIMO3_MIXED_64Q_16Q_16Q_136M,
-    mcs_MIMO3_mixed_64q_64q_04q_136m = MCS_MIMO3_MIXED_64Q_64Q_04Q_136M,
-    mcs_MIMO3_mixed_64q_64q_16q_156m = MCS_MIMO3_MIXED_64Q_64Q_16Q_156M,
+  mcs_MIMO3_mixed_16q_04q_04q_52m = MCS_MIMO3_MIXED_16Q_04Q_04Q_52M,
+  mcs_MIMO3_mixed_16q_16q_04q_65m = MCS_MIMO3_MIXED_16Q_16Q_04Q_65M,
+  mcs_MIMO3_mixed_64q_04q_04q_65m = MCS_MIMO3_MIXED_64Q_04Q_04Q_65M,
+  mcs_MIMO3_mixed_64q_16q_04q_78m = MCS_MIMO3_MIXED_64Q_16Q_04Q_78M,
+  mcs_MIMO3_mixed_64q_16q_16q_91m = MCS_MIMO3_MIXED_64Q_16Q_16Q_91M,
+  mcs_MIMO3_mixed_64q_64q_04q_91m = MCS_MIMO3_MIXED_64Q_64Q_04Q_91M,
+  mcs_MIMO3_mixed_64q_64q_16q_104m = MCS_MIMO3_MIXED_64Q_64Q_16Q_104M,
+  mcs_MIMO3_mixed_16q_04q_04q_78m = MCS_MIMO3_MIXED_16Q_04Q_04Q_78M,
+  mcs_MIMO3_mixed_16q_16q_04q_97m = MCS_MIMO3_MIXED_16Q_16Q_04Q_97M,
+  mcs_MIMO3_mixed_64q_04q_04q_97m = MCS_MIMO3_MIXED_64Q_04Q_04Q_97M,
+  mcs_MIMO3_mixed_64q_16q_04q_117m = MCS_MIMO3_MIXED_64Q_16Q_04Q_117M,
+  mcs_MIMO3_mixed_64q_16q_16q_136m = MCS_MIMO3_MIXED_64Q_16Q_16Q_136M,
+  mcs_MIMO3_mixed_64q_64q_04q_136m = MCS_MIMO3_MIXED_64Q_64Q_04Q_136M,
+  mcs_MIMO3_mixed_64q_64q_16q_156m = MCS_MIMO3_MIXED_64Q_64Q_16Q_156M,
 
 } MCS_API_E_VER_1;
 
@@ -616,9 +616,9 @@
  *
  */
 typedef struct _RATE_MCS_API_S_VER_1 {
-    U08 rate;
-    U08 flags;
-    U16 ext_flags;
+  U08 rate;
+  U08 flags;
+  U16 ext_flags;
 } __attribute__((packed)) RATE_MCS_API_S_VER_1;
 
 /**
@@ -631,44 +631,44 @@
  *
  */
 typedef struct RATE_MCS_BITS_API_S_VER_3 {
-    unsigned int rate : 7;        /**< bit 6:0 Rate or MCS */
-    unsigned int reserved1 : 1;   /**< bit 7 reserved */
-    unsigned int ofdm_ht : 1;     /**< bit 8 OFDM-HT */
-    unsigned int cck : 1;         /**< bit 9 CCK */
-    unsigned int ofdm_he : 1;     /**< bit 10 OFDM-HE */
-    unsigned int fat_channel : 2; /**< bit 12:11 FAT channel 20Mhz...160Mhz, for OFDMA this gives
-                                     the full channel width vs. RU */
-    unsigned int short_gi : 1; /**< bit 13 short GI, for HT/VHT 0 - 0.8us, 1 - 0.4us, for HE-SU use
-                                  for 5th LTF_GI for HE-SU use for 5th LTF_GI=3 -> 4xLTF+0.8 */
-    unsigned int ant_a : 1;    /**< bit 14 chain A active */
-    unsigned int ant_b : 1;    /**< bit 15 chain B active */
-    unsigned int ant_c : 1;    /**< bit 16 chain C active */
-    unsigned int stbc : 1;     /**< bit 17 STBC */
-    unsigned int he_dcm : 1;   /**< bit 18 OFDM-HE dual carrier mode, this reduce the number of data
-                                  tones by half (for all RUs) */
-    unsigned int bf : 1;       /**< bit 19 beamforming*/
-    unsigned int he_gi_ltf : 2;    /**< bit 21:20 HE guard-interval and LTF
-                                                  HE SU  : 0 - 1xLTF+0.8, 1 - 2xLTF+0.8, 2 - 2xLTF+1.6,
-                                      3 -    4xLTF+3.2, 3+short_gi - 4xLTF+0.8             HE MU  : 0 -
-                                      4xLTF+0.8, 1 - 2xLTF+0.8, 2
-                                      - 2xLTF+1.6, 3 - 4xLTF+3.2             HE TRIG: 0 - 1xLTF+1.6, 1
-                                      - 2xLTF+1.6, 2 -    4xLTF+3.2*/
-    unsigned int vht_he_type : 2;  /**< bit 23:22 VHT: 0 - SU, 2 - MU, HE: 0 - SU, 1 - SU_EXT_RANGE,
-                                      2 - MU (RU gives data channel width), 3 - trig-base */
-    unsigned int dup_channel : 2;  /**< bit 25:24 duplicate channel x1, x2, x4, x8*/
-    unsigned int ofdm_vht : 1;     /**< bit 26 VHT */
-    unsigned int ldpc : 1;         /**< bit 27 LDPC code */
-    unsigned int he_er_106 : 1;    /**< bit 28 HE extended range use 102 data-tones (or 106 tones)*/
-    unsigned int reserved3 : 1;    /**< bit 29 */
-    unsigned int rts_required : 1; /**< bit 30 RTS reuired for this rate (uCode decision) */
-    unsigned int cts_required : 1; /**< bit 31 CTS reuired for this rate (uCode decision) */
+  unsigned int rate : 7;        /**< bit 6:0 Rate or MCS */
+  unsigned int reserved1 : 1;   /**< bit 7 reserved */
+  unsigned int ofdm_ht : 1;     /**< bit 8 OFDM-HT */
+  unsigned int cck : 1;         /**< bit 9 CCK */
+  unsigned int ofdm_he : 1;     /**< bit 10 OFDM-HE */
+  unsigned int fat_channel : 2; /**< bit 12:11 FAT channel 20Mhz...160Mhz, for OFDMA this gives
+                                   the full channel width vs. RU */
+  unsigned int short_gi : 1;    /**< bit 13 short GI, for HT/VHT 0 - 0.8us, 1 - 0.4us, for HE-SU use
+                                   for 5th LTF_GI for HE-SU use for 5th LTF_GI=3 -> 4xLTF+0.8 */
+  unsigned int ant_a : 1;       /**< bit 14 chain A active */
+  unsigned int ant_b : 1;       /**< bit 15 chain B active */
+  unsigned int ant_c : 1;       /**< bit 16 chain C active */
+  unsigned int stbc : 1;        /**< bit 17 STBC */
+  unsigned int he_dcm : 1;    /**< bit 18 OFDM-HE dual carrier mode, this reduce the number of data
+                                 tones by half (for all RUs) */
+  unsigned int bf : 1;        /**< bit 19 beamforming*/
+  unsigned int he_gi_ltf : 2; /**< bit 21:20 HE guard-interval and LTF
+                                             HE SU  : 0 - 1xLTF+0.8, 1 - 2xLTF+0.8, 2 - 2xLTF+1.6,
+                                 3 -    4xLTF+3.2, 3+short_gi - 4xLTF+0.8             HE MU  : 0 -
+                                 4xLTF+0.8, 1 - 2xLTF+0.8, 2
+                                 - 2xLTF+1.6, 3 - 4xLTF+3.2             HE TRIG: 0 - 1xLTF+1.6, 1
+                                 - 2xLTF+1.6, 2 -    4xLTF+3.2*/
+  unsigned int vht_he_type : 2;  /**< bit 23:22 VHT: 0 - SU, 2 - MU, HE: 0 - SU, 1 - SU_EXT_RANGE,
+                                    2 - MU (RU gives data channel width), 3 - trig-base */
+  unsigned int dup_channel : 2;  /**< bit 25:24 duplicate channel x1, x2, x4, x8*/
+  unsigned int ofdm_vht : 1;     /**< bit 26 VHT */
+  unsigned int ldpc : 1;         /**< bit 27 LDPC code */
+  unsigned int he_er_106 : 1;    /**< bit 28 HE extended range use 102 data-tones (or 106 tones)*/
+  unsigned int reserved3 : 1;    /**< bit 29 */
+  unsigned int rts_required : 1; /**< bit 30 RTS reuired for this rate (uCode decision) */
+  unsigned int cts_required : 1; /**< bit 31 CTS reuired for this rate (uCode decision) */
 
-    // unsigned int gf:1;             /**< bit 10 green-field */
-    // unsigned int stbc:2;           /**< bit 18:17 STBC */
-    // unsigned int zlf:1;            /**< bit 20 ZLF (NDP) */
-    // unsigned int sounding:1;       /**< bit 21 sounding packet*/
-    // unsigned int num_of_ext_ss:2;  /**< bit 23:22 number of extended spatial streams for
-    // sounding*/ unsigned int vht_mu:1;         /**< bit 28 VHT/HE Multi-user */
+  // unsigned int gf:1;             /**< bit 10 green-field */
+  // unsigned int stbc:2;           /**< bit 18:17 STBC */
+  // unsigned int zlf:1;            /**< bit 20 ZLF (NDP) */
+  // unsigned int sounding:1;       /**< bit 21 sounding packet*/
+  // unsigned int num_of_ext_ss:2;  /**< bit 23:22 number of extended spatial streams for
+  // sounding*/ unsigned int vht_mu:1;         /**< bit 28 VHT/HE Multi-user */
 } __attribute__((packed)) RATE_MCS_BITS_API_S_VER_3;
 
 /**
@@ -678,11 +678,11 @@
  *
  */
 typedef union _RATE_MCS_API_U_VER_1 {
-    RATE_MCS_API_S_VER_1 s;
+  RATE_MCS_API_S_VER_1 s;
 #if !defined(SV_TOOL_PRECOMPILE_HEADERS)
-    RATE_MCS_BITS_API_S_VER_3 bits;
+  RATE_MCS_BITS_API_S_VER_3 bits;
 #endif
-    U32 rate_n_flags;
+  U32 rate_n_flags;
 } __attribute__((packed)) RATE_MCS_API_U_VER_1;
 
 /**
@@ -692,21 +692,21 @@
  *
  */
 typedef struct _RATE_MCS_API_S_VER_0 {
-    U08 rate;
-    U08 flags;
+  U08 rate;
+  U08 flags;
 } __attribute__((packed)) RATE_MCS_API_S_VER_0;
 
 #define IS_RATE_OFDM_API_M_VER_2(c_rate) ((((c_rate).rate_n_flags) & RATE_MCS_CCK_MSK) == 0)
 
 #define IS_RATE_OFDM_LEGACY_API_M_VER_2(c_rate) \
-    ((((c_rate).rate_n_flags) & (RATE_MCS_CCK_MSK | RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK)) == 0)
+  ((((c_rate).rate_n_flags) & (RATE_MCS_CCK_MSK | RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK)) == 0)
 
 #define IS_RATE_OFDM_HT_API_M_VER_2(c_rate) (((c_rate).rate_n_flags) & RATE_MCS_HT_MSK)
 
 #define IS_RATE_OFDM_VHT_API_M_VER_3(c_rate) (((c_rate).rate_n_flags) & RATE_MCS_VHT_MSK)
 
 #define IS_RATE_OFDM_HT_VHT_API_M_VER_3(c_rate) \
-    (((c_rate).rate_n_flags) & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK))
+  (((c_rate).rate_n_flags) & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK))
 
 #define IS_RATE_OFDM_VHT_API_M_VER_2(c_rate) (FALSE)
 
@@ -715,74 +715,74 @@
 #define IS_RATE_CCK_API_M_VER_3(c_rate) (((c_rate).rate_n_flags) & RATE_MCS_CCK_MSK)
 
 #define GET_ANT_CHAIN_API_M_VER_1(c_rate) \
-    SHIFT_AND_MASK(c_rate.rate_n_flags, RATE_MCS_ANT_ABC_MSK, RATE_MCS_ANT_A_POS)
+  SHIFT_AND_MASK(c_rate.rate_n_flags, RATE_MCS_ANT_ABC_MSK, RATE_MCS_ANT_A_POS)
 
 #define GET_ANT_CHAIN_NUM_API_M_VER_1(c_rate) \
-    (g_ChainCfg2ChainNum[GET_ANT_CHAIN_API_M_VER_1(c_rate)])
+  (g_ChainCfg2ChainNum[GET_ANT_CHAIN_API_M_VER_1(c_rate)])
 
 // don't use for VHT
 #define IS_RATE_STBC_PRESENT_API_M_VER_1(c_rate) (((c_rate.rate_n_flags) & RATE_MCS_STBC_MSK))
 
 // don't use for VHT
 #define GET_NUM_OF_STBC_SS_API_M_VER_1(c_rate) \
-    SHIFT_AND_MASK((c_rate.rate_n_flags), RATE_MCS_STBC_MSK, RATE_MCS_STBC_POS)
+  SHIFT_AND_MASK((c_rate.rate_n_flags), RATE_MCS_STBC_MSK, RATE_MCS_STBC_POS)
 
 // 0==>20MHz, 1==>40MHz
 #define GET_BW_INDEX_API_M_VER_1(c_rate) \
-    (((c_rate.rate_n_flags) & RATE_MCS_FAT_MSK_API_D_VER_1) >> RATE_MCS_FAT_POS)
+  (((c_rate.rate_n_flags) & RATE_MCS_FAT_MSK_API_D_VER_1) >> RATE_MCS_FAT_POS)
 // 0==>20MHz, 1==>40MHz, 2==>80MHz, 3==>160MHz
 #define GET_BW_INDEX_API_M_VER_2(c_rate) \
-    (((c_rate.rate_n_flags) & RATE_MCS_FAT_MSK_API_D_VER_2) >> RATE_MCS_FAT_POS)
+  (((c_rate.rate_n_flags) & RATE_MCS_FAT_MSK_API_D_VER_2) >> RATE_MCS_FAT_POS)
 
 // 0==>x1, 1==>x2
 #define GET_DUP_INDEX_API_M_VER_1(c_rate) \
-    (((c_rate.rate_n_flags) & RATE_MCS_DUP_MSK_API_D_VER_1) >> RATE_MCS_DUP_POS_API_D_VER_1)
+  (((c_rate.rate_n_flags) & RATE_MCS_DUP_MSK_API_D_VER_1) >> RATE_MCS_DUP_POS_API_D_VER_1)
 // 0==>x1, 1==>x2, 2==>x4, 3==>x8
 #define GET_DUP_INDEX_API_M_VER_2(c_rate) \
-    (((c_rate.rate_n_flags) & RATE_MCS_DUP_MSK_API_D_VER_2) >> RATE_MCS_DUP_POS_API_D_VER_2)
+  (((c_rate.rate_n_flags) & RATE_MCS_DUP_MSK_API_D_VER_2) >> RATE_MCS_DUP_POS_API_D_VER_2)
 
 // get channel width, either by using true wide channel or by duplicate
 #define GET_CHANNEL_WIDTH_INDEX_API_M_VER_1(c_rate) \
-    (GET_BW_INDEX_API_M_VER_1(c_rate) | GET_DUP_INDEX_API_M_VER_1(c_rate))
+  (GET_BW_INDEX_API_M_VER_1(c_rate) | GET_DUP_INDEX_API_M_VER_1(c_rate))
 #define GET_CHANNEL_WIDTH_INDEX_API_M_VER_2(c_rate) \
-    (GET_BW_INDEX_API_M_VER_2(c_rate) | GET_DUP_INDEX_API_M_VER_2(c_rate))
+  (GET_BW_INDEX_API_M_VER_2(c_rate) | GET_DUP_INDEX_API_M_VER_2(c_rate))
 
 // 0==>normal GI, 1==>short GI
 #define GET_GI_INDEX_API_M_VER_1(c_rate) \
-    SHIFT_AND_MASK((c_rate.rate_n_flags), RATE_MCS_SGI_MSK, RATE_MCS_SGI_POS)
+  SHIFT_AND_MASK((c_rate.rate_n_flags), RATE_MCS_SGI_MSK, RATE_MCS_SGI_POS)
 
-#define IS_RATE_OFDM_HT_FAT_API_M_VER_2(c_rate)                                    \
-    (((c_rate.rate_n_flags) & (RATE_MCS_HT_MSK | RATE_MCS_FAT_MSK_API_D_VER_1)) == \
-     (RATE_MCS_HT_MSK | RATE_MCS_FAT40))
+#define IS_RATE_OFDM_HT_FAT_API_M_VER_2(c_rate)                                  \
+  (((c_rate.rate_n_flags) & (RATE_MCS_HT_MSK | RATE_MCS_FAT_MSK_API_D_VER_1)) == \
+   (RATE_MCS_HT_MSK | RATE_MCS_FAT40))
 
 #define GET_HT_MIMO_INDEX_API_M_VER_1(c_rate) \
-    SHIFT_AND_MASK((c_rate.rate_n_flags), RATE_MCS_HT_MIMO_MSK, RATE_MCS_HT_MIMO_POS)
+  SHIFT_AND_MASK((c_rate.rate_n_flags), RATE_MCS_HT_MIMO_MSK, RATE_MCS_HT_MIMO_POS)
 
 #define GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) (GET_HT_MIMO_INDEX_API_M_VER_1(c_rate) + 1)
 
 #define IS_RATE_OFDM_HT_MIMO_API_M_VER_2(c_rate) \
-    (IS_RATE_OFDM_HT_API_M_VER_2(c_rate) && (GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) > 1))
+  (IS_RATE_OFDM_HT_API_M_VER_2(c_rate) && (GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) > 1))
 
 #define IS_RATE_OFDM_HT2x2MIMO_API_M_VER_1(c_rate) \
-    (IS_RATE_OFDM_HT_API_M_VER_2(c_rate) && (GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) == 2))
+  (IS_RATE_OFDM_HT_API_M_VER_2(c_rate) && (GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) == 2))
 
 #define IS_RATE_OFDM_HT3x3MIMO_API_M_VER_1(c_rate) \
-    (IS_RATE_OFDM_HT_API_M_VER_2(c_rate) && (GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) == 3))
+  (IS_RATE_OFDM_HT_API_M_VER_2(c_rate) && (GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) == 3))
 
 #define IS_RATE_OFDM_HT4x4MIMO_API_M_VER_1(c_rate) \
-    (IS_RATE_OFDM_HT_API_M_VER_2(c_rate) && (GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) == 4))
+  (IS_RATE_OFDM_HT_API_M_VER_2(c_rate) && (GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) == 4))
 
 #define GET_HT_RATE_CODE_API_M_VER_1(c_rate) ((c_rate).rate_n_flags & RATE_MCS_HT_RATE_CODE_MSK)
 
-#define IS_RATE_HT_STBC_SINGLE_SS_API_M_VER_1(c_rate)                   \
-    (((c_rate.rate_n_flags) & (RATE_MCS_HT_MSK | RATE_MCS_STBC_MSK)) == \
-     (RATE_MCS_HT_MSK | RATE_MCS_STBC_MSK))
+#define IS_RATE_HT_STBC_SINGLE_SS_API_M_VER_1(c_rate)                 \
+  (((c_rate.rate_n_flags) & (RATE_MCS_HT_MSK | RATE_MCS_STBC_MSK)) == \
+   (RATE_MCS_HT_MSK | RATE_MCS_STBC_MSK))
 
 // rate&flags cleanup, note extended HT-LTF not supported by DSP
 #define GET_NUM_OF_HT_EXT_LTF_API_M_VER_1(c_rate) 0
 
 #define GET_NUM_OF_HT_SPACE_TIME_STREAMS_API_M_VER_1(c_rate) \
-    (GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) + GET_NUM_OF_STBC_SS_API_M_VER_1(c_rate))
+  (GET_NUM_OF_HT_SS_API_M_VER_1(c_rate) + GET_NUM_OF_STBC_SS_API_M_VER_1(c_rate))
 
 // check if supported rate the bad rate conditions are:
 // 1. MCS is 32 but FAT is not set
@@ -790,33 +790,33 @@
 // 3. Number of STBC SS is greater than 2
 // 4. Number of STBC is 2 and number of SS isn't equals to 2
 // 5. Legal MCS
-#define IS_BAD_OFDM_HT_RATE_API_M_VER_2(rx_rate)                     \
-    ((((rx_rate.s.rate) == MCS_DUP_6M) &&                            \
-      (!((rx_rate.rate_n_flags) & RATE_MCS_FAT_MSK_API_D_VER_1))) || \
-     (GET_NUM_OF_STBC_SS_API_M_VER_1(rx_rate) > 2) ||                \
-     ((GET_NUM_OF_STBC_SS_API_M_VER_1(rx_rate) == 2) &&              \
-      (!(IS_RATE_OFDM_HT2x2MIMO_API_M_VER_1(rx_rate)))) ||           \
-     (IS_RATE_OFDM_HT_API_M_VER_2(rx_rate) && (rx_rate.s.rate > MAX_LEGAL_MCS_API_D_VER_1)))
+#define IS_BAD_OFDM_HT_RATE_API_M_VER_2(rx_rate)                   \
+  ((((rx_rate.s.rate) == MCS_DUP_6M) &&                            \
+    (!((rx_rate.rate_n_flags) & RATE_MCS_FAT_MSK_API_D_VER_1))) || \
+   (GET_NUM_OF_STBC_SS_API_M_VER_1(rx_rate) > 2) ||                \
+   ((GET_NUM_OF_STBC_SS_API_M_VER_1(rx_rate) == 2) &&              \
+    (!(IS_RATE_OFDM_HT2x2MIMO_API_M_VER_1(rx_rate)))) ||           \
+   (IS_RATE_OFDM_HT_API_M_VER_2(rx_rate) && (rx_rate.s.rate > MAX_LEGAL_MCS_API_D_VER_1)))
 
 // removed: GF support:
 //  ((!(IS_RATE_OFDM_HT_MIMO_API_M_VER_2(rx_rate))) && (((rx_rate.rate_n_flags) & (RATE_MCS_GF_MSK |
 //  RATE_MCS_SGI_MSK)) == (RATE_MCS_GF_MSK | RATE_MCS_SGI_MSK))) ||
 
-#define IS_BAD_OFDM_HT_RATE_API_M_VER_3(rx_rate)                     \
-    ((((rx_rate.s.rate) == MCS_DUP_6M) &&                            \
-      (!((rx_rate.rate_n_flags) & RATE_MCS_FAT_MSK_API_D_VER_2))) || \
-     (GET_NUM_OF_STBC_SS_API_M_VER_1(rx_rate) > 2) ||                \
-     ((GET_NUM_OF_STBC_SS_API_M_VER_1(rx_rate) == 2) &&              \
-      (!(IS_RATE_OFDM_HT2x2MIMO_API_M_VER_1(rx_rate)))) ||           \
-     (IS_RATE_OFDM_HT_API_M_VER_2(rx_rate) && (rx_rate.s.rate > MAX_LEGAL_MCS_API_D_VER_1)))
+#define IS_BAD_OFDM_HT_RATE_API_M_VER_3(rx_rate)                   \
+  ((((rx_rate.s.rate) == MCS_DUP_6M) &&                            \
+    (!((rx_rate.rate_n_flags) & RATE_MCS_FAT_MSK_API_D_VER_2))) || \
+   (GET_NUM_OF_STBC_SS_API_M_VER_1(rx_rate) > 2) ||                \
+   ((GET_NUM_OF_STBC_SS_API_M_VER_1(rx_rate) == 2) &&              \
+    (!(IS_RATE_OFDM_HT2x2MIMO_API_M_VER_1(rx_rate)))) ||           \
+   (IS_RATE_OFDM_HT_API_M_VER_2(rx_rate) && (rx_rate.s.rate > MAX_LEGAL_MCS_API_D_VER_1)))
 
-#define IS_RATE_HT_HIGH_RATE_API_M_VER_1(c_rate)                       \
-    (((c_rate.rate_n_flags) & (RATE_MCS_HT_MSK | RATE_MCS_CODE_MSK)) > \
-     (RATE_MCS_HT_MSK | RATE_MCS_HT_MIMO2_MSK | MCS_24M_MSK))
+#define IS_RATE_HT_HIGH_RATE_API_M_VER_1(c_rate)                     \
+  (((c_rate.rate_n_flags) & (RATE_MCS_HT_MSK | RATE_MCS_CODE_MSK)) > \
+   (RATE_MCS_HT_MSK | RATE_MCS_HT_MIMO2_MSK | MCS_24M_MSK))
 
 // 0==>SISO, 1==>MIMO2, 2==>MIMO3, 3==>MIMO4
 #define GET_VHT_MIMO_INDX_API_M_VER_1(c_rate) \
-    SHIFT_AND_MASK(((c_rate).s.rate), RATE_MCS_VHT_MIMO_MSK, RATE_MCS_VHT_MIMO_POS)
+  SHIFT_AND_MASK(((c_rate).s.rate), RATE_MCS_VHT_MIMO_MSK, RATE_MCS_VHT_MIMO_POS)
 
 // for Single-User number of Spatial-steams in SIG is actual number -1
 // for Multi-User number of Spatial-steams in SIG is actual number
@@ -824,34 +824,34 @@
 
 #define GET_VHT_RATE_CODE_API_M_VER_1(c_rate) ((c_rate).rate_n_flags & RATE_MCS_VHT_RATE_CODE_MSK)
 
-#define GET_HT_VHT_RATE_CODE_API_M_VER_1(c_rate)                               \
-    IS_RATE_OFDM_HT_API_M_VER_2(c_rate) ? GET_HT_RATE_CODE_API_M_VER_1(c_rate) \
-                                        : GET_VHT_RATE_CODE_API_M_VER_1(c_rate)
+#define GET_HT_VHT_RATE_CODE_API_M_VER_1(c_rate)                             \
+  IS_RATE_OFDM_HT_API_M_VER_2(c_rate) ? GET_HT_RATE_CODE_API_M_VER_1(c_rate) \
+                                      : GET_VHT_RATE_CODE_API_M_VER_1(c_rate)
 
 // for VHT (unlike HT) STBC may be turned off/on, thus with STBC on, the number of
 // space-time-streams is doubled.
 #define IS_VHT_STBC_PRESENT_API_M_VER_1(c_rate) (((c_rate.rate_n_flags) & RATE_MCS_STBC_MSK))
 
 #define GET_NUM_OF_VHT_SPACE_TIME_STREAMS_API_M_VER_1(c_rate) \
-    (GET_NUM_OF_VHT_SS_API_M_VER_1(c_rate)                    \
-     << (IS_VHT_STBC_PRESENT_API_M_VER_1(c_rate) >> RATE_MCS_STBC_POS))
+  (GET_NUM_OF_VHT_SS_API_M_VER_1(c_rate)                      \
+   << (IS_VHT_STBC_PRESENT_API_M_VER_1(c_rate) >> RATE_MCS_STBC_POS))
 
 // get Mimo level for any rate, i.e. note legacy rate is SISO
 // vht not supported
 #define GET_MIMO_INDEX_API_M_VER_2(c_rate) \
-    (IS_RATE_OFDM_HT_API_M_VER_2(c_rate) ? GET_HT_MIMO_INDEX_API_M_VER_1(c_rate) : SISO_INDX)
+  (IS_RATE_OFDM_HT_API_M_VER_2(c_rate) ? GET_HT_MIMO_INDEX_API_M_VER_1(c_rate) : SISO_INDX)
 
 // LDPC rate
 #define IS_RATE_OFDM_LDPC_API_M_VER_2(c_rate) ((c_rate).rate_n_flags & RATE_MCS_LDPC_MSK)
 
 // beamformed frame indication
 #define GET_BF_INDEX_API_M_VER_1(c_rate) \
-    (((c_rate.rate_n_flags) & RATE_MCS_BF_MSK) >> RATE_MCS_BF_POS)
+  (((c_rate.rate_n_flags) & RATE_MCS_BF_MSK) >> RATE_MCS_BF_POS)
 
 // VHT/HE MU
-#define IS_RATE_VHT_MU_API_M_VER_1(c_rate)                                      \
-    (((c_rate.rate_n_flags) & (RATE_MCS_VHT_MSK | RATE_MCS_VHT_HE_TYPE_MSK)) == \
-     (RATE_MCS_VHT_MSK | (RATE_MCS_VHT_HE_MU << RATE_MCS_VHT_HE_TYPE_POS)))
+#define IS_RATE_VHT_MU_API_M_VER_1(c_rate)                                    \
+  (((c_rate.rate_n_flags) & (RATE_MCS_VHT_MSK | RATE_MCS_VHT_HE_TYPE_MSK)) == \
+   (RATE_MCS_VHT_MSK | (RATE_MCS_VHT_HE_MU << RATE_MCS_VHT_HE_TYPE_POS)))
 
 // *************************************************************************
 // *            HE definitions (currently w/o version)
@@ -860,52 +860,52 @@
 #define IS_RATE_OFDM_HE_API_M(c_rate) ((c_rate.rate_n_flags) & RATE_MCS_HE_MSK)
 // rate is OFDM VHT/HE
 #define IS_RATE_OFDM_VHT_HE_API_M(c_rate) \
-    (((c_rate).rate_n_flags) & (RATE_MCS_VHT_MSK | RATE_MCS_HE_MSK))
+  (((c_rate).rate_n_flags) & (RATE_MCS_VHT_MSK | RATE_MCS_HE_MSK))
 // rate is OFDM HT/VHT/HE
 #define IS_RATE_OFDM_HT_VHT_HE_API_M(c_rate) \
-    ((c_rate) & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK | RATE_MCS_HE_MSK))
+  ((c_rate) & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK | RATE_MCS_HE_MSK))
 // rate is OFDM HE and STBC (note should check no DCM, as STBC & DCM mark special GI/HE-LTF)
 // basically don't support this combo for now.
-#define IS_RATE_OFDM_HE_STBC_API_M(c_rate)                                                      \
-    ((((c_rate.rate_n_flags) & (RATE_MCS_STBC_MSK | RATE_MCS_HE_MSK | RATE_MCS_HE_DCM_MSK))) == \
-     (RATE_MCS_STBC_MSK | RATE_MCS_HE_MSK))
+#define IS_RATE_OFDM_HE_STBC_API_M(c_rate)                                                    \
+  ((((c_rate.rate_n_flags) & (RATE_MCS_STBC_MSK | RATE_MCS_HE_MSK | RATE_MCS_HE_DCM_MSK))) == \
+   (RATE_MCS_STBC_MSK | RATE_MCS_HE_MSK))
 // number of space time streams
 #define GET_NUM_OF_HE_SPACE_TIME_STREAMS_API_M(c_rate) \
-    ((c_rate.rate_n_flags) & (RATE_MCS_STBC_MSK | RATE_MCS_VHT_MIMO_MSK)) ? 2 : 1
+  ((c_rate.rate_n_flags) & (RATE_MCS_STBC_MSK | RATE_MCS_VHT_MIMO_MSK)) ? 2 : 1
 // rate is OFDM-HE w/ DCM (dual carrier mode)
 #define IS_RATE_OFDM_HE_DCM_API_M(c_rate) (c_rate.rate_n_flags & RATE_MCS_HE_DCM_MSK)
 #define GET_OFDM_HE_DCM_API_M(c_rate) \
-    SHIFT_AND_MASK(c_rate.rate_n_flags, RATE_MCS_HE_DCM_MSK, RATE_MCS_HE_DCM_POS)
+  SHIFT_AND_MASK(c_rate.rate_n_flags, RATE_MCS_HE_DCM_MSK, RATE_MCS_HE_DCM_POS)
 // rate is OFDM-HE ext-range using 106-tones (i.e. same as RU 8MHz 102 data-tones)
 #define IS_RATE_OFDM_HE_ER_106_API_M(c_rate) (c_rate.rate_n_flags & RATE_MCS_HE_ER_106_MSK)
 #define GET_OFDM_HE_ER_106_API_M(c_rate) \
-    SHIFT_AND_MASK(c_rate.rate_n_flags, RATE_MCS_HE_ER_106_MSK, RATE_MCS_HE_ER_106_POS)
+  SHIFT_AND_MASK(c_rate.rate_n_flags, RATE_MCS_HE_ER_106_MSK, RATE_MCS_HE_ER_106_POS)
 // get OFDM-HE HE-LTF size / GI size
 #define GET_OFDM_HE_GI_LTF_INDX_API_M(c_rate) \
-    SHIFT_AND_MASK(c_rate.rate_n_flags, RATE_MCS_HE_GI_LTF_MSK, RATE_MCS_HE_GI_LTF_POS)
+  SHIFT_AND_MASK(c_rate.rate_n_flags, RATE_MCS_HE_GI_LTF_MSK, RATE_MCS_HE_GI_LTF_POS)
 // get OFDM-HE type: SU, extended-range, MU, TRIG
 #define GET_OFDM_VHT_HE_TYPE_API_M(c_rate) \
-    SHIFT_AND_MASK(c_rate.rate_n_flags, RATE_MCS_VHT_HE_TYPE_MSK, RATE_MCS_VHT_HE_TYPE_POS)
+  SHIFT_AND_MASK(c_rate.rate_n_flags, RATE_MCS_VHT_HE_TYPE_MSK, RATE_MCS_VHT_HE_TYPE_POS)
 // check if OFDM-VHT/HE single-user
 #define IS_RATE_OFDM_VHT_HE_SU_API_M(c_rate) \
-    (GET_OFDM_VHT_HE_TYPE_API_M(c_rate) == RATE_MCS_VHT_HE_SU)
+  (GET_OFDM_VHT_HE_TYPE_API_M(c_rate) == RATE_MCS_VHT_HE_SU)
 // check if OFDM-HE single-user extended range
 #define IS_RATE_OFDM_HE_EXT_RANGE_API_M(c_rate) \
-    (GET_OFDM_VHT_HE_TYPE_API_M(c_rate) == RATE_MCS_HE_EXT_RANGE)
+  (GET_OFDM_VHT_HE_TYPE_API_M(c_rate) == RATE_MCS_HE_EXT_RANGE)
 // check if OFDM-HE trigger frame based
 #define IS_RATE_OFDM_HE_TRIG_BASE_API_M(c_rate) \
-    (GET_OFDM_VHT_HE_TYPE_API_M(c_rate) == RATE_MCS_HE_TRIG_BASE)
+  (GET_OFDM_VHT_HE_TYPE_API_M(c_rate) == RATE_MCS_HE_TRIG_BASE)
 // check if rate is VHT/HE multi-user
 #define IS_RATE_OFDM_VHT_HE_MU_API_M(c_rate) \
-    (GET_OFDM_VHT_HE_TYPE_API_M(c_rate) == RATE_MCS_VHT_HE_MU)
+  (GET_OFDM_VHT_HE_TYPE_API_M(c_rate) == RATE_MCS_VHT_HE_MU)
 // get the MCS index for HT/VHT/HE
 #define GET_HT_VHT_HE_RATE_CODE_API_M(c_rate) GET_HT_VHT_RATE_CODE_API_M_VER_1(c_rate)
 
 // vht supported
-#define GET_MIMO_INDEX_API_M_VER_3(c_rate)       \
-    (IS_RATE_OFDM_HT_API_M_VER_2(c_rate)         \
-         ? GET_HT_MIMO_INDEX_API_M_VER_1(c_rate) \
-         : IS_RATE_OFDM_VHT_HE_API_M(c_rate) ? GET_VHT_MIMO_INDX_API_M_VER_1(c_rate) : SISO_INDX)
+#define GET_MIMO_INDEX_API_M_VER_3(c_rate)     \
+  (IS_RATE_OFDM_HT_API_M_VER_2(c_rate)         \
+       ? GET_HT_MIMO_INDEX_API_M_VER_1(c_rate) \
+       : IS_RATE_OFDM_VHT_HE_API_M(c_rate) ? GET_VHT_MIMO_INDX_API_M_VER_1(c_rate) : SISO_INDX)
 
 /**@} GroupRates */
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/_rateScaleMng.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/_rateScaleMng.h
index 055981d..618edf4 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/_rateScaleMng.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/_rateScaleMng.h
@@ -36,8 +36,8 @@
 #ifndef SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM__RATESCALEMNG_H_
 #define SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM__RATESCALEMNG_H_
 
-#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiVersion.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiGroupDatapath.h"
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiVersion.h"
 
 #define RS_MNG_INVALID_VAL ((U32)-1)
 #define RS_MNG_RATE_MIN_FAILURE_TH 3
@@ -75,7 +75,7 @@
 #define RS_STAT_THOLD 20
 
 #define GET_OFDM_LEGACY_RATE_IDX(rate) \
-    OFDM_LEGACY_RATE_IDX_TO_UNIFIED[(((rate.rate_n_flags) & RATE_MCS_CODE_MSK) >> 2)]
+  OFDM_LEGACY_RATE_IDX_TO_UNIFIED[(((rate.rate_n_flags) & RATE_MCS_CODE_MSK) >> 2)]
 #define RS_MNG_PERCENT(x) (((x)*128) / 100)
 #define IS_RS_MNG_COL_ID_VALID(colId) (colId < RS_MNG_COL_INVALID)
 
@@ -121,137 +121,137 @@
 /***** Data Types *****/
 
 typedef enum _RS_MNG_ACTION_E {
-    RS_MNG_ACTION_STAY = 0,
-    RS_MNG_ACTION_UPSCALE = 1,
-    RS_MNG_ACTION_DOWNSCALE = 2,
+  RS_MNG_ACTION_STAY = 0,
+  RS_MNG_ACTION_UPSCALE = 1,
+  RS_MNG_ACTION_DOWNSCALE = 2,
 } RS_MNG_ACTION_E;
 
 enum {
-    PARAMS_TBL_IDX_INIT_NUM_RATES,
-    PARAMS_TBL_IDX_INIT_NUM_RETRIES,
-    PARAMS_TBL_IDX_SEC_NUM_RATES,
-    PARAMS_TBL_IDX_SEC_NUM_RETRIES,
-    PARAMS_TBL_NUM_COLS,
+  PARAMS_TBL_IDX_INIT_NUM_RATES,
+  PARAMS_TBL_IDX_INIT_NUM_RETRIES,
+  PARAMS_TBL_IDX_SEC_NUM_RATES,
+  PARAMS_TBL_IDX_SEC_NUM_RETRIES,
+  PARAMS_TBL_NUM_COLS,
 };
 
 typedef struct _RS_MNG_STA_LIMITS_S {
-    U32 successFramesLimit;    // successfull frames threshold for starting a search cycle.
-    U32 failedFramesLimit;     // failed frames threshold for starting a search cycle.
-    U32 statsFlushTimeLimit;   // time thrshold for starting a search cycle, in usec.
-    U32 clearTblWindowsLimit;  // txed frames threshold for clearing table windows during
-    // stay-in-col.
+  U32 successFramesLimit;    // successfull frames threshold for starting a search cycle.
+  U32 failedFramesLimit;     // failed frames threshold for starting a search cycle.
+  U32 statsFlushTimeLimit;   // time thrshold for starting a search cycle, in usec.
+  U32 clearTblWindowsLimit;  // txed frames threshold for clearing table windows during
+                             // stay-in-col.
 } RS_MNG_STA_LIMITS_S;
 
 // TX AMSDU size
 typedef enum _RS_MNG_TX_AMSDU_SIZE_E {
-    RS_MNG_AMSDU_INVALID,
-    RS_MNG_AMSDU_3500B,
-    RS_MNG_AMSDU_5000B,
-    RS_MNG_AMSDU_6500B,
-    RS_MNG_AMSDU_8000B,
+  RS_MNG_AMSDU_INVALID,
+  RS_MNG_AMSDU_3500B,
+  RS_MNG_AMSDU_5000B,
+  RS_MNG_AMSDU_6500B,
+  RS_MNG_AMSDU_8000B,
 
-    RS_MNG_AMSDU_SIZE_NUM,  // keep last
+  RS_MNG_AMSDU_SIZE_NUM,  // keep last
 } RS_MNG_TX_AMSDU_SIZE_E;
 
 #define RS_MNG_AMSDU_SIZE_ALL (BIT(RS_MNG_AMSDU_3500B) | BIT(RS_MNG_AMSDU_5000B))
 
 // rs_column_mode
 typedef enum _RS_MNG_MODULATION_E {
-    RS_MNG_MODUL_LEGACY,
-    RS_MNG_MODUL_SISO,
-    RS_MNG_MODUL_MIMO2,
-    RS_MNG_NUM_MODULATIONS,  // keep last
-    RS_MNG_MODUL_INVALID = RS_MNG_NUM_MODULATIONS,
+  RS_MNG_MODUL_LEGACY,
+  RS_MNG_MODUL_SISO,
+  RS_MNG_MODUL_MIMO2,
+  RS_MNG_NUM_MODULATIONS,  // keep last
+  RS_MNG_MODUL_INVALID = RS_MNG_NUM_MODULATIONS,
 } RS_MNG_MODULATION_E;
 
 typedef enum _RS_MNG_GI_E {
-    HT_VHT_NGI,
-    HT_VHT_SGI,
-    HT_VHT_LAST_GI = HT_VHT_SGI,
-    HE_3_2_GI,
-    HE_FIRST_GI = HE_3_2_GI,
-    HE_1_6_GI,
-    HE_0_8_GI,
+  HT_VHT_NGI,
+  HT_VHT_SGI,
+  HT_VHT_LAST_GI = HT_VHT_SGI,
+  HE_3_2_GI,
+  HE_FIRST_GI = HE_3_2_GI,
+  HE_1_6_GI,
+  HE_0_8_GI,
 } RS_MNG_GI_E;
 
 typedef enum _RS_NON_HT_RATES_E {
-    RS_NON_HT_RATE_CCK_1M,
-    RS_NON_HT_RATE_CCK_2M,
-    RS_NON_HT_RATE_CCK_5_5M,
-    RS_NON_HT_RATE_CCK_11M,
-    RS_NON_HT_RATE_CCK_LAST = RS_NON_HT_RATE_CCK_11M,
-    RS_NON_HT_RATE_OFDM_6M,
-    RS_NON_HT_RATE_OFDM_9M,
-    RS_NON_HT_RATE_OFDM_12M,
-    RS_NON_HT_RATE_OFDM_18M,
-    RS_NON_HT_RATE_OFDM_24M,
-    RS_NON_HT_RATE_OFDM_36M,
-    RS_NON_HT_RATE_OFDM_48M,
-    RS_NON_HT_RATE_OFDM_54M,
+  RS_NON_HT_RATE_CCK_1M,
+  RS_NON_HT_RATE_CCK_2M,
+  RS_NON_HT_RATE_CCK_5_5M,
+  RS_NON_HT_RATE_CCK_11M,
+  RS_NON_HT_RATE_CCK_LAST = RS_NON_HT_RATE_CCK_11M,
+  RS_NON_HT_RATE_OFDM_6M,
+  RS_NON_HT_RATE_OFDM_9M,
+  RS_NON_HT_RATE_OFDM_12M,
+  RS_NON_HT_RATE_OFDM_18M,
+  RS_NON_HT_RATE_OFDM_24M,
+  RS_NON_HT_RATE_OFDM_36M,
+  RS_NON_HT_RATE_OFDM_48M,
+  RS_NON_HT_RATE_OFDM_54M,
 
-    RS_NON_HT_RATE_OFDM_LAST = RS_NON_HT_RATE_OFDM_54M,
-    RS_NON_HT_RATE_LAST = RS_NON_HT_RATE_OFDM_LAST,
-    RS_NON_HT_RATE_NUM,
+  RS_NON_HT_RATE_OFDM_LAST = RS_NON_HT_RATE_OFDM_54M,
+  RS_NON_HT_RATE_LAST = RS_NON_HT_RATE_OFDM_LAST,
+  RS_NON_HT_RATE_NUM,
 } RS_NON_HT_RATES_E;
 
 typedef enum _RS_MCS_E {
-    RS_MCS_0,
-    RS_MCS_1,
-    RS_MCS_2,
-    RS_MCS_3,
-    RS_MCS_4,
-    RS_MCS_5,
-    RS_MCS_6,
-    RS_MCS_7,
-    RS_MCS_HT_LAST = RS_MCS_7,
-    RS_MCS_8,
-    RS_MCS_20MHZ_LAST = RS_MCS_8,
-    RS_MCS_9,
-    RS_MCS_VHT_LAST = RS_MCS_9,
-    RS_MCS_10,
-    RS_MCS_11,
-    RS_MCS_HE_LAST = RS_MCS_11,
+  RS_MCS_0,
+  RS_MCS_1,
+  RS_MCS_2,
+  RS_MCS_3,
+  RS_MCS_4,
+  RS_MCS_5,
+  RS_MCS_6,
+  RS_MCS_7,
+  RS_MCS_HT_LAST = RS_MCS_7,
+  RS_MCS_8,
+  RS_MCS_20MHZ_LAST = RS_MCS_8,
+  RS_MCS_9,
+  RS_MCS_VHT_LAST = RS_MCS_9,
+  RS_MCS_10,
+  RS_MCS_11,
+  RS_MCS_HE_LAST = RS_MCS_11,
 
-    RS_MCS_0_HE_ER_AND_DCM,
+  RS_MCS_0_HE_ER_AND_DCM,
 
-    RS_MCS_NUM,
+  RS_MCS_NUM,
 } RS_MCS_E;
 
 #define RS_MNG_MAX_RATES_NUM MAX((U08)RS_NON_HT_RATE_NUM, (U08)RS_MCS_NUM)
 
 typedef enum _RS_MNG_STATE_E {
-    RS_MNG_STATE_SEARCH_CYCLE_STARTED,
-    RS_MNG_STATE_TPC_SEARCH,
-    RS_MNG_STATE_STAY_IN_COLUMN,
+  RS_MNG_STATE_SEARCH_CYCLE_STARTED,
+  RS_MNG_STATE_TPC_SEARCH,
+  RS_MNG_STATE_STAY_IN_COLUMN,
 } RS_MNG_STATE_E;
 
 typedef enum _RS_MNG_COLUMN_DESC_E {
-    RS_MNG_COL_NON_HT_ANT_A = 0,
-    RS_MNG_COL_NON_HT_ANT_B,
-    RS_MNG_COL_SISO_ANT_A,
-    RS_MNG_COL_FIRST_HT_VHT = RS_MNG_COL_SISO_ANT_A,
-    RS_MNG_COL_SISO_ANT_B,
-    RS_MNG_COL_SISO_ANT_A_SGI,
-    RS_MNG_COL_SISO_ANT_B_SGI,
-    RS_MNG_COL_MIMO2,
-    RS_MNG_COL_MIMO2_SGI,                           // 7
-    RS_MNG_COL_LAST_HT_VHT = RS_MNG_COL_MIMO2_SGI,  // 7
+  RS_MNG_COL_NON_HT_ANT_A = 0,
+  RS_MNG_COL_NON_HT_ANT_B,
+  RS_MNG_COL_SISO_ANT_A,
+  RS_MNG_COL_FIRST_HT_VHT = RS_MNG_COL_SISO_ANT_A,
+  RS_MNG_COL_SISO_ANT_B,
+  RS_MNG_COL_SISO_ANT_A_SGI,
+  RS_MNG_COL_SISO_ANT_B_SGI,
+  RS_MNG_COL_MIMO2,
+  RS_MNG_COL_MIMO2_SGI,                           // 7
+  RS_MNG_COL_LAST_HT_VHT = RS_MNG_COL_MIMO2_SGI,  // 7
 
-    RS_MNG_COL_HE_3_2_SISO_ANT_A,  // 8
-    RS_MNG_COL_FIRST_HE = RS_MNG_COL_HE_3_2_SISO_ANT_A,
-    RS_MNG_COL_HE_3_2_SISO_ANT_B,
-    RS_MNG_COL_HE_1_6_SISO_ANT_A,
-    RS_MNG_COL_HE_1_6_SISO_ANT_B,
-    RS_MNG_COL_HE_0_8_SISO_ANT_A,
-    RS_MNG_COL_HE_0_8_SISO_ANT_B,
-    RS_MNG_COL_HE_3_2_MIMO,
-    RS_MNG_COL_HE_1_6_MIMO,
-    RS_MNG_COL_HE_0_8_MIMO,                       // 16
-    RS_MNG_COL_LAST_HE = RS_MNG_COL_HE_0_8_MIMO,  // 16
-    RS_MNG_COL_LAST = RS_MNG_COL_LAST_HE,         // 16
+  RS_MNG_COL_HE_3_2_SISO_ANT_A,  // 8
+  RS_MNG_COL_FIRST_HE = RS_MNG_COL_HE_3_2_SISO_ANT_A,
+  RS_MNG_COL_HE_3_2_SISO_ANT_B,
+  RS_MNG_COL_HE_1_6_SISO_ANT_A,
+  RS_MNG_COL_HE_1_6_SISO_ANT_B,
+  RS_MNG_COL_HE_0_8_SISO_ANT_A,
+  RS_MNG_COL_HE_0_8_SISO_ANT_B,
+  RS_MNG_COL_HE_3_2_MIMO,
+  RS_MNG_COL_HE_1_6_MIMO,
+  RS_MNG_COL_HE_0_8_MIMO,                       // 16
+  RS_MNG_COL_LAST_HE = RS_MNG_COL_HE_0_8_MIMO,  // 16
+  RS_MNG_COL_LAST = RS_MNG_COL_LAST_HE,         // 16
 
-    RS_MNG_COL_COUNT = RS_MNG_COL_LAST + 1,  // 17
-    RS_MNG_COL_INVALID = RS_MNG_COL_COUNT,   // 17
+  RS_MNG_COL_COUNT = RS_MNG_COL_LAST + 1,  // 17
+  RS_MNG_COL_INVALID = RS_MNG_COL_COUNT,   // 17
 } RS_MNG_COLUMN_DESC_E;
 
 /***********************************/
@@ -262,49 +262,49 @@
 /**************************************************/
 
 typedef enum _RS_MNG_RATE_SETTING_BITMAP_E {
-    RS_MNG_RATE_MODE = BIT(0),
-    RS_MNG_RATE_MODULATION = BIT(1),
-    RS_MNG_RATE_U_IDX = BIT(2),
-    RS_MNG_RATE_GI = BIT(3),
-    RS_MNG_RATE_BW = BIT(4),
-    RS_MNG_RATE_ANT = BIT(5),
-    RS_MNG_RATE_STBC = BIT(6),
-    RS_MNG_RATE_LDPC = BIT(7),
-    RS_MNG_RATE_BFER = BIT(8),
+  RS_MNG_RATE_MODE = BIT(0),
+  RS_MNG_RATE_MODULATION = BIT(1),
+  RS_MNG_RATE_U_IDX = BIT(2),
+  RS_MNG_RATE_GI = BIT(3),
+  RS_MNG_RATE_BW = BIT(4),
+  RS_MNG_RATE_ANT = BIT(5),
+  RS_MNG_RATE_STBC = BIT(6),
+  RS_MNG_RATE_LDPC = BIT(7),
+  RS_MNG_RATE_BFER = BIT(8),
 
-    _RS_MNG_RATE_LAST = BIT(9),
-    RS_MNG_RATE_SET_ALL = _RS_MNG_RATE_LAST - 1,
+  _RS_MNG_RATE_LAST = BIT(9),
+  RS_MNG_RATE_SET_ALL = _RS_MNG_RATE_LAST - 1,
 
 } RS_MNG_RATE_SETTING_BITMAP_E;
 
 typedef struct _RS_MNG_RATE_S {
-    union {
-        RS_NON_HT_RATES_E nonHt;
-        RS_MCS_E mcs;
-        U08 idx;
-    } idx;
-    U16 unset;
-    RATE_MCS_API_U rate;
+  union {
+    RS_NON_HT_RATES_E nonHt;
+    RS_MCS_E mcs;
+    U08 idx;
+  } idx;
+  U16 unset;
+  RATE_MCS_API_U rate;
 } RS_MNG_RATE_S;
 
 typedef struct _RS_MNG_WIN_STAT_S {
-    U32 successRatio;    // per-cent * 128. RS_MNG_INVALID_VAL when invalid
-    U32 successCounter;  // number of frames successful
-    U32 framesCounter;   // number of frames attempted  //counter
-    U32 averageTpt;      // success ratio * expected throughput. RS_MNG_INVALID_VAL when invalid
+  U32 successRatio;    // per-cent * 128. RS_MNG_INVALID_VAL when invalid
+  U32 successCounter;  // number of frames successful
+  U32 framesCounter;   // number of frames attempted  //counter
+  U32 averageTpt;      // success ratio * expected throughput. RS_MNG_INVALID_VAL when invalid
 } RS_MNG_WIN_STAT_S;
 
 typedef struct _RS_MNG_TBL_INFO_S {
-    RS_MNG_RATE_S rsMngRate;
-    RS_MNG_COLUMN_DESC_E column;
-    RS_MNG_WIN_STAT_S win[RS_MNG_MAX_RATES_NUM];  // rates history
+  RS_MNG_RATE_S rsMngRate;
+  RS_MNG_COLUMN_DESC_E column;
+  RS_MNG_WIN_STAT_S win[RS_MNG_MAX_RATES_NUM];  // rates history
 } RS_MNG_TBL_INFO_S;
 
 typedef struct _RS_MNG_SEARCH_COL_DATA {
-    RS_MNG_RATE_S rsMngRate;
-    RS_MNG_COLUMN_DESC_E column;
-    RS_MNG_WIN_STAT_S win;
-    U32 expectedTpt;
+  RS_MNG_RATE_S rsMngRate;
+  RS_MNG_COLUMN_DESC_E column;
+  RS_MNG_WIN_STAT_S win;
+  U32 expectedTpt;
 } RS_MNG_SEARCH_COL_DATA;
 
 #define RS_MNG_TPC_NUM_STEPS 5
@@ -315,62 +315,62 @@
 // debug, amsdus not active long enough etc)
 #define RS_MNG_TPC_STEP_SIZE 3  // dB
 typedef struct _RS_MNG_TPC_TBL_S {
-    RS_MNG_WIN_STAT_S windows[RS_MNG_TPC_NUM_STEPS];
-    bool testing;
-    U08 currStep;  // index into the window array, or RS_MNG_TPC_<INACTIVE|DISABLED>
+  RS_MNG_WIN_STAT_S windows[RS_MNG_TPC_NUM_STEPS];
+  bool testing;
+  U08 currStep;  // index into the window array, or RS_MNG_TPC_<INACTIVE|DISABLED>
 } RS_MNG_TPC_TBL_S;
 // struct umac_lq_sta
 typedef struct _RS_MNG_STA_INFO_S {
-    TLC_MNG_CONFIG_PARAMS_CMD_API_S config;
-    bool enabled;
-    RS_MNG_TBL_INFO_S rateTblInfo;
-    RS_MNG_SEARCH_COL_DATA searchColData;  // When trying a new column, holds info on that column
-    U08 searchBetterTbl;                   // 1: currently trying alternate mode
-    U08 ignoreNextTlcNotif;  // The next notification recieved from lmac is irrelevant.
-    // Could happen if aggregations are opened in the middle of a
-    // search cycle.
-    U08 tryingRateUpscale;           // TRUE if now trying to upscale the rate.
-    U32 lastRateUpscaleTimeJiffies;  // system time of last rate upscale attempt.
-    U32 totalFramesFailed;           // total failed frames, any/all rates //total_failed
-    U32 totalFramesSuccess;
-    U16 framesSinceLastRun;  // number of frames sent since the last time rateScalePerform
-    // ran.
-    U32 lastSearchCycleEndTimeJiffies;  // time since end of last search cycle
-    U32 txedFrames;  // number of txed frames while stay in column, before clearing
-    // the all the stat windows in the current table.
-    U32 visitedColumns;  // bitmask of TX columns that were tested during this search cycle
-    U32 searchBw;        // holds a new bandwidth to try before ending a search cycle,
-    // or an invalid value if no bandwidth change should be tested.
-    RS_MNG_STATE_E rsMngState;
-    RS_MNG_COLUMN_DESC_E
-    stableColumn;  // id of the column used during the last STAY_IN_COLUMN state
+  TLC_MNG_CONFIG_PARAMS_CMD_API_S config;
+  bool enabled;
+  RS_MNG_TBL_INFO_S rateTblInfo;
+  RS_MNG_SEARCH_COL_DATA searchColData;  // When trying a new column, holds info on that column
+  U08 searchBetterTbl;                   // 1: currently trying alternate mode
+  U08 ignoreNextTlcNotif;                // The next notification recieved from lmac is irrelevant.
+  // Could happen if aggregations are opened in the middle of a
+  // search cycle.
+  U08 tryingRateUpscale;           // TRUE if now trying to upscale the rate.
+  U32 lastRateUpscaleTimeJiffies;  // system time of last rate upscale attempt.
+  U32 totalFramesFailed;           // total failed frames, any/all rates //total_failed
+  U32 totalFramesSuccess;
+  U16 framesSinceLastRun;  // number of frames sent since the last time rateScalePerform
+  // ran.
+  U32 lastSearchCycleEndTimeJiffies;  // time since end of last search cycle
+  U32 txedFrames;                     // number of txed frames while stay in column, before clearing
+  // the all the stat windows in the current table.
+  U32 visitedColumns;  // bitmask of TX columns that were tested during this search cycle
+  U32 searchBw;        // holds a new bandwidth to try before ending a search cycle,
+  // or an invalid value if no bandwidth change should be tested.
+  RS_MNG_STATE_E rsMngState;
+  RS_MNG_COLUMN_DESC_E
+  stableColumn;  // id of the column used during the last STAY_IN_COLUMN state
 
-    U16 staBuffSize;  // receipient's reordering buffer size, as received in ADDBA response - min
-    // for all TIDs
+  U16 staBuffSize;  // receipient's reordering buffer size, as received in ADDBA response - min
+  // for all TIDs
 
-    bool amsduSupport;
-    RS_MNG_TX_AMSDU_SIZE_E amsduEnabledSize;
-    U32 trafficLoad;
-    U08 amsduBlacklist;
-    U32 lastTrafficLoadStatJiffies;
-    U32 failSafeCounter;
-    bool isUpscaleSearchCycle;  // TRUE if last search cycle started because of passing success
-    // frame limit.
-    U32 lastEnableJiffies;  // timestamp of the last TX AMSDU enablement
+  bool amsduSupport;
+  RS_MNG_TX_AMSDU_SIZE_E amsduEnabledSize;
+  U32 trafficLoad;
+  U08 amsduBlacklist;
+  U32 lastTrafficLoadStatJiffies;
+  U32 failSafeCounter;
+  bool isUpscaleSearchCycle;  // TRUE if last search cycle started because of passing success
+  // frame limit.
+  U32 lastEnableJiffies;  // timestamp of the last TX AMSDU enablement
 
-    RATE_MCS_API_U lastNotifiedRate;
+  RATE_MCS_API_U lastNotifiedRate;
 
-    RS_MNG_TPC_TBL_S tpcTable;
+  RS_MNG_TPC_TBL_S tpcTable;
 
-    // The params below this line should not be reset when reconfiguring an enabled station
-    U08 amsduInAmpdu;  // bitmask of tids with AMSDU in AMPDU supported
-    U16 aggDurationLimit;
-    U32 fixedRate;  // 0 if not using fixed rate
-    bool longAggEnabled;
+  // The params below this line should not be reset when reconfiguring an enabled station
+  U08 amsduInAmpdu;  // bitmask of tids with AMSDU in AMPDU supported
+  U16 aggDurationLimit;
+  U32 fixedRate;  // 0 if not using fixed rate
+  bool longAggEnabled;
 
-    struct iwl_mvm* mvm;
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvmsta;
+  struct iwl_mvm* mvm;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvmsta;
 } RS_MNG_STA_INFO_S;
 
 typedef struct _RS_MNG_COL_ELEM_S RS_MNG_COL_ELEM_S;
@@ -378,30 +378,28 @@
                                  const RS_MNG_COL_ELEM_S* nextCol);
 
 struct _RS_MNG_COL_ELEM_S {
-    RS_MNG_MODULATION_E mode;
-    U08 ant;
-    RS_MNG_GI_E gi;
-    RS_MNG_COLUMN_DESC_E nextCols[MAX_NEXT_COLUMNS];
-    ALLOW_COL_FUNC_F checks[MAX_COLUMN_CHECKS];
+  RS_MNG_MODULATION_E mode;
+  U08 ant;
+  RS_MNG_GI_E gi;
+  RS_MNG_COLUMN_DESC_E nextCols[MAX_NEXT_COLUMNS];
+  ALLOW_COL_FUNC_F checks[MAX_COLUMN_CHECKS];
 };
 
-static INLINE U08 rsMngGetDualAntMsk(void) {
-    return TLC_MNG_CHAIN_A_MSK | TLC_MNG_CHAIN_B_MSK;
-}
+static INLINE U08 rsMngGetDualAntMsk(void) { return TLC_MNG_CHAIN_A_MSK | TLC_MNG_CHAIN_B_MSK; }
 
 static INLINE U08 _rsMngGetSingleAntMsk(U08 chainsEnabled, uint8_t non_shared_ant,
                                         uint8_t valid_tx_ant) {
-    BUILD_BUG_ON(TLC_MNG_CHAIN_A_MSK != ANT_A);
-    BUILD_BUG_ON(TLC_MNG_CHAIN_B_MSK != ANT_B);
-    // Since TLC offload only supports 2 chains, if the non-shared antenna isn't enabled,
-    // chainsEnabled must have exactly one chain enabled.
-    return (U08)(valid_tx_ant != rsMngGetDualAntMsk()
-                     ? valid_tx_ant
-                     : (non_shared_ant & chainsEnabled ? non_shared_ant : chainsEnabled));
+  BUILD_BUG_ON(TLC_MNG_CHAIN_A_MSK != ANT_A);
+  BUILD_BUG_ON(TLC_MNG_CHAIN_B_MSK != ANT_B);
+  // Since TLC offload only supports 2 chains, if the non-shared antenna isn't enabled,
+  // chainsEnabled must have exactly one chain enabled.
+  return (U08)(valid_tx_ant != rsMngGetDualAntMsk()
+                   ? valid_tx_ant
+                   : (non_shared_ant & chainsEnabled ? non_shared_ant : chainsEnabled));
 }
 
-#define rsMngGetSingleAntMsk(chainsEnabled)                                    \
-    (_rsMngGetSingleAntMsk((chainsEnabled), staInfo->mvm->cfg->non_shared_ant, \
-                           iwl_mvm_get_valid_tx_ant(staInfo->mvm)))
+#define rsMngGetSingleAntMsk(chainsEnabled)                                  \
+  (_rsMngGetSingleAntMsk((chainsEnabled), staInfo->mvm->cfg->non_shared_ant, \
+                         iwl_mvm_get_valid_tx_ant(staInfo->mvm)))
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM__RATESCALEMNG_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiGroupDatapath.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiGroupDatapath.h
index 9c1f6ca..e62478b 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiGroupDatapath.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiGroupDatapath.h
@@ -65,21 +65,21 @@
  *****************************************************************************/
 
 typedef enum _TLC_MNG_NSS_E {
-    TLC_MNG_NSS_1,
-    TLC_MNG_NSS_2,
+  TLC_MNG_NSS_1,
+  TLC_MNG_NSS_2,
 
-    TLC_MNG_NSS_MAX,
+  TLC_MNG_NSS_MAX,
 } TLC_MNG_NSS_E;
 
 typedef enum _TLC_MNG_MODE_E {
-    TLC_MNG_MODE_CCK = 0,
-    TLC_MNG_MODE_OFDM_LEGACY = TLC_MNG_MODE_CCK,
-    TLC_MNG_MODE_LEGACY = TLC_MNG_MODE_CCK,
-    TLC_MNG_MODE_HT,
-    TLC_MNG_MODE_VHT,
-    TLC_MNG_MODE_HE,
-    TLC_MNG_MODE_INVALID,  // keep last
-    TLC_MNG_MODE_NUM = TLC_MNG_MODE_INVALID,
+  TLC_MNG_MODE_CCK = 0,
+  TLC_MNG_MODE_OFDM_LEGACY = TLC_MNG_MODE_CCK,
+  TLC_MNG_MODE_LEGACY = TLC_MNG_MODE_CCK,
+  TLC_MNG_MODE_HT,
+  TLC_MNG_MODE_VHT,
+  TLC_MNG_MODE_HE,
+  TLC_MNG_MODE_INVALID,  // keep last
+  TLC_MNG_MODE_NUM = TLC_MNG_MODE_INVALID,
 } TLC_MNG_MODE_E;
 
 // TLC_MNG_CONFIG_FLAGS - set the bit when the feature is enabled
@@ -124,11 +124,11 @@
 #define TLC_MNG_CONFIG_FLAGS_HE_BLOCK_2X_LTF_MSK BIT(15)
 
 typedef enum _TLC_MNG_CH_WIDTH_E {
-    TLC_MNG_CH_WIDTH_20MHZ,
-    TLC_MNG_CH_WIDTH_40MHZ,
-    TLC_MNG_CH_WIDTH_80MHZ,
-    TLC_MNG_CH_WIDTH_160MHZ,
-    TLC_MNG_CH_WIDTH_MAX,
+  TLC_MNG_CH_WIDTH_20MHZ,
+  TLC_MNG_CH_WIDTH_40MHZ,
+  TLC_MNG_CH_WIDTH_80MHZ,
+  TLC_MNG_CH_WIDTH_160MHZ,
+  TLC_MNG_CH_WIDTH_MAX,
 } TLC_MNG_CH_WIDTH_E;
 
 #define TLC_MNG_CHAIN_A_MSK BIT(0)
@@ -138,40 +138,40 @@
 #define TLC_AMSDU_SUPPORTED 1
 
 typedef struct _TLC_MNG_CONFIG_PARAMS_CMD_API_S_VER_2 {
-    U08 maxChWidth;      // one of TLC_MNG_CH_WIDTH_E
-    U08 bestSuppMode;    // best mode supported - as defined above in TLC_MNG_MODE_E
-    U08 chainsEnabled;   // bitmask of TLC_MNG_CHAIN_[A/B]_MSK
-    U08 amsduSupported;  // TX AMSDU transmission is supported
-    // Use TLC_AMSDU_[NOT_]SUPPORTED
-    U16 configFlags;  // bitmask of TLC_MNG_CONFIG_FLAGS_*
-    U16 nonHt;        // bitmap of supported non-HT CCK and OFDM rates
-    /* bit   | rate
-       -------|--------
-       0    | R_1M   CCK
-       1    | R_2M   CCK
-       2    | R_5_5M CCK
-       3    | R_11M  CCK
-       4    | R_6M   OFDM
-       5    | R_9M   OFDM
-       6    | R_12M  OFDM
-       7    | R_18M  OFDM
-       8    | R_24M  OFDM
-       9    | R_36M  OFDM
-       10   | R_48M  OFDM
-       11   | R_54M  OFDM
-       */
-    U16 mcs[TLC_MNG_NSS_MAX][2];  // supported HT/VHT/HE rates per nss. [0] for 80mhz width
-    // and lower, [1] for 160mhz.
-    // This is done in order to conform with HE capabilites.
-    U16 maxMpduLen;  // Max length of MPDU, in bytes.
-    // Used to calculate allowed A-MSDU sizes.
-    U08 sgiChWidthSupport;  // bitmap of SGI support per channel width.
-    // use 1 << BIT(TLC_MNG_CH_WIDTH_*) to indicate sgi support
-    // for that channel width.
-    // unused for HE.
-    U08 reserved1[1];
+  U08 maxChWidth;      // one of TLC_MNG_CH_WIDTH_E
+  U08 bestSuppMode;    // best mode supported - as defined above in TLC_MNG_MODE_E
+  U08 chainsEnabled;   // bitmask of TLC_MNG_CHAIN_[A/B]_MSK
+  U08 amsduSupported;  // TX AMSDU transmission is supported
+  // Use TLC_AMSDU_[NOT_]SUPPORTED
+  U16 configFlags;  // bitmask of TLC_MNG_CONFIG_FLAGS_*
+  U16 nonHt;        // bitmap of supported non-HT CCK and OFDM rates
+  /* bit   | rate
+     -------|--------
+     0    | R_1M   CCK
+     1    | R_2M   CCK
+     2    | R_5_5M CCK
+     3    | R_11M  CCK
+     4    | R_6M   OFDM
+     5    | R_9M   OFDM
+     6    | R_12M  OFDM
+     7    | R_18M  OFDM
+     8    | R_24M  OFDM
+     9    | R_36M  OFDM
+     10   | R_48M  OFDM
+     11   | R_54M  OFDM
+     */
+  U16 mcs[TLC_MNG_NSS_MAX][2];  // supported HT/VHT/HE rates per nss. [0] for 80mhz width
+  // and lower, [1] for 160mhz.
+  // This is done in order to conform with HE capabilites.
+  U16 maxMpduLen;  // Max length of MPDU, in bytes.
+  // Used to calculate allowed A-MSDU sizes.
+  U08 sgiChWidthSupport;  // bitmap of SGI support per channel width.
+  // use 1 << BIT(TLC_MNG_CH_WIDTH_*) to indicate sgi support
+  // for that channel width.
+  // unused for HE.
+  U08 reserved1[1];
 
-    enum nl80211_band band;
+  enum nl80211_band band;
 } TLC_MNG_CONFIG_PARAMS_CMD_API_S_VER_2;
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM_APIGROUPDATAPATH_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiVersion.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiVersion.h
index 08ecdc3e..62a9611 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiVersion.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/apiVersion.h
@@ -130,7 +130,7 @@
 #define GET_NUM_OF_HT_EXT_LTF_API_M API_VERSION(GET_NUM_OF_HT_EXT_LTF_API_M, 1)
 
 #define GET_NUM_OF_HT_SPACE_TIME_STREAMS_API_M \
-    API_VERSION(GET_NUM_OF_HT_SPACE_TIME_STREAMS_API_M, 1)
+  API_VERSION(GET_NUM_OF_HT_SPACE_TIME_STREAMS_API_M, 1)
 
 #define IS_RATE_HT_HIGH_RATE_API_M API_VERSION(IS_RATE_HT_HIGH_RATE_API_M, 1)
 
@@ -145,7 +145,7 @@
 #define IS_VHT_STBC_PRESENT_API_M API_VERSION(IS_VHT_STBC_PRESENT_API_M, 1)
 
 #define GET_NUM_OF_VHT_SPACE_TIME_STREAMS_API_M \
-    API_VERSION(GET_NUM_OF_VHT_SPACE_TIME_STREAMS_API_M, 1)
+  API_VERSION(GET_NUM_OF_VHT_SPACE_TIME_STREAMS_API_M, 1)
 
 #define IS_RATE_OFDM_LDPC_API_M API_VERSION(IS_RATE_OFDM_LDPC_API_M, 2)
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/ax-softap-testmode.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/ax-softap-testmode.c
index e90e21e..ac4e0f3 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/ax-softap-testmode.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/ax-softap-testmode.c
@@ -32,111 +32,112 @@
  *****************************************************************************/
 
 #include "fw/api/ax-softap-testmode.h"
+
 #include <net/mac80211.h>
+
 #include "debugfs.h"
 #include "mvm.h"
 
 static ssize_t iwl_dbgfs_ax_softap_testmode_dl_basic_write(struct iwl_mvm* mvm, char* buf,
                                                            size_t count, loff_t* ppos) {
-    struct ax_softap_testmode_dl_basic_cmd* cmd = (struct ax_softap_testmode_dl_basic_cmd*)buf;
+  struct ax_softap_testmode_dl_basic_cmd* cmd = (struct ax_softap_testmode_dl_basic_cmd*)buf;
 
-    int ret;
-    uint32_t status;
+  int ret;
+  uint32_t status;
 
-    if (sizeof(*cmd) != count) {
-        IWL_ERR(mvm, "Bad size for softap dl basic cmd (%zd) should be (%zd)\n", count,
-                sizeof(*cmd));
-        return -EINVAL;
-    }
+  if (sizeof(*cmd) != count) {
+    IWL_ERR(mvm, "Bad size for softap dl basic cmd (%zd) should be (%zd)\n", count, sizeof(*cmd));
+    return -EINVAL;
+  }
 
-    status = 0;
+  status = 0;
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd_pdu_status(
-        mvm, iwl_cmd_id(AX_SOFTAP_TESTMODE_DL_BASIC, DATA_PATH_GROUP, 0), count, cmd, &status);
-    mutex_unlock(&mvm->mutex);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to send softap dl basic cmd (%d)\n", ret);
-        return ret;
-    }
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd_pdu_status(
+      mvm, iwl_cmd_id(AX_SOFTAP_TESTMODE_DL_BASIC, DATA_PATH_GROUP, 0), count, cmd, &status);
+  mutex_unlock(&mvm->mutex);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send softap dl basic cmd (%d)\n", ret);
+    return ret;
+  }
 
-    if (status) {
-        IWL_ERR(mvm, "softap dl basic cmd failed (%d)\n", status);
-        return -EIO;
-    }
+  if (status) {
+    IWL_ERR(mvm, "softap dl basic cmd failed (%d)\n", status);
+    return -EIO;
+  }
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_ax_softap_testmode_dl_mu_bar_write(struct iwl_mvm* mvm, char* buf,
                                                             size_t count, loff_t* ppos) {
-    struct ax_softap_testmode_dl_mu_bar_cmd* cmd = (struct ax_softap_testmode_dl_mu_bar_cmd*)buf;
+  struct ax_softap_testmode_dl_mu_bar_cmd* cmd = (struct ax_softap_testmode_dl_mu_bar_cmd*)buf;
 
-    int ret;
-    uint32_t status;
+  int ret;
+  uint32_t status;
 
-    if (sizeof(*cmd) != count) {
-        IWL_ERR(mvm, "Bad size for softap dl mu bar cmd (%zd) should be (%zd)\n", count,
-                sizeof(*cmd));
-        return -EINVAL;
-    }
+  if (sizeof(*cmd) != count) {
+    IWL_ERR(mvm, "Bad size for softap dl mu bar cmd (%zd) should be (%zd)\n", count, sizeof(*cmd));
+    return -EINVAL;
+  }
 
-    status = 0;
+  status = 0;
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd_pdu_status(
-        mvm, iwl_cmd_id(AX_SOFTAP_TESTMODE_DL_MU_BAR, DATA_PATH_GROUP, 0), count, cmd, &status);
-    mutex_unlock(&mvm->mutex);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to send softap dl mu bar cmd (%d)\n", ret);
-        return ret;
-    }
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd_pdu_status(
+      mvm, iwl_cmd_id(AX_SOFTAP_TESTMODE_DL_MU_BAR, DATA_PATH_GROUP, 0), count, cmd, &status);
+  mutex_unlock(&mvm->mutex);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send softap dl mu bar cmd (%d)\n", ret);
+    return ret;
+  }
 
-    if (status) {
-        IWL_ERR(mvm, "softap dl mu bar cmd failed (%d)\n", status);
-        return -EIO;
-    }
+  if (status) {
+    IWL_ERR(mvm, "softap dl mu bar cmd failed (%d)\n", status);
+    return -EIO;
+  }
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_ax_softap_testmode_ul_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                                      loff_t* ppos) {
-    struct ax_softap_testmode_ul_cmd* cmd = (struct ax_softap_testmode_ul_cmd*)buf;
+  struct ax_softap_testmode_ul_cmd* cmd = (struct ax_softap_testmode_ul_cmd*)buf;
 
-    int ret;
-    uint32_t status;
+  int ret;
+  uint32_t status;
 
-    if (sizeof(*cmd) != count) {
-        IWL_ERR(mvm, "Bad size for softap ul cmd (%zd) should be (%zd)\n", count, sizeof(*cmd));
-        return -EINVAL;
-    }
+  if (sizeof(*cmd) != count) {
+    IWL_ERR(mvm, "Bad size for softap ul cmd (%zd) should be (%zd)\n", count, sizeof(*cmd));
+    return -EINVAL;
+  }
 
-    status = 0;
+  status = 0;
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd_pdu_status(mvm, iwl_cmd_id(AX_SOFTAP_TESTMODE_UL, DATA_PATH_GROUP, 0),
-                                      count, cmd, &status);
-    mutex_unlock(&mvm->mutex);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to send softap ul cmd (%d)\n", ret);
-        return ret;
-    }
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, iwl_cmd_id(AX_SOFTAP_TESTMODE_UL, DATA_PATH_GROUP, 0),
+                                    count, cmd, &status);
+  mutex_unlock(&mvm->mutex);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send softap ul cmd (%d)\n", ret);
+    return ret;
+  }
 
-    if (status) {
-        IWL_ERR(mvm, "softap ul cmd failed (%d)\n", status);
-        return -EIO;
-    }
+  if (status) {
+    IWL_ERR(mvm, "softap ul cmd failed (%d)\n", status);
+    return -EIO;
+  }
 
-    return count;
+  return count;
 }
 
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
-    _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
-#define MVM_DEBUGFS_ADD_FILE_AX_SOFTAP_TM(name, parent, mode)                                  \
-    do {                                                                                       \
-        if (!debugfs_create_file(#name, mode, parent, mvm, &iwl_dbgfs_##name##_ops)) goto err; \
-    } while (0)
+  _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_ADD_FILE_AX_SOFTAP_TM(name, parent, mode)                    \
+  do {                                                                           \
+    if (!debugfs_create_file(#name, mode, parent, mvm, &iwl_dbgfs_##name##_ops)) \
+      goto err;                                                                  \
+  } while (0)
 
 #define DL_BASIC_CMD_SIZE (sizeof(struct ax_softap_testmode_dl_basic_cmd) + 1)
 #define DL_MU_BAR_CMD_SIZE (sizeof(struct ax_softap_testmode_dl_mu_bar_cmd) + 1)
@@ -148,19 +149,19 @@
 
 static void ax_softap_testmode_add_debugfs(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                                            struct ieee80211_sta* sta, struct dentry* dir) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
 
-    MVM_DEBUGFS_ADD_FILE_AX_SOFTAP_TM(ax_softap_testmode_dl_basic, dir, S_IWUSR);
-    MVM_DEBUGFS_ADD_FILE_AX_SOFTAP_TM(ax_softap_testmode_dl_mu_bar, dir, S_IWUSR);
-    MVM_DEBUGFS_ADD_FILE_AX_SOFTAP_TM(ax_softap_testmode_ul, dir, S_IWUSR);
-    return;
+  MVM_DEBUGFS_ADD_FILE_AX_SOFTAP_TM(ax_softap_testmode_dl_basic, dir, S_IWUSR);
+  MVM_DEBUGFS_ADD_FILE_AX_SOFTAP_TM(ax_softap_testmode_dl_mu_bar, dir, S_IWUSR);
+  MVM_DEBUGFS_ADD_FILE_AX_SOFTAP_TM(ax_softap_testmode_ul, dir, S_IWUSR);
+  return;
 err:
-    IWL_ERR(mvm, "Can't create debugfs entity\n");
+  IWL_ERR(mvm, "Can't create debugfs entity\n");
 }
 
 void iwl_mvm_ax_softap_testmode_sta_add_debugfs(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                                                 struct ieee80211_sta* sta, struct dentry* dir) {
-    if (fw_has_capa(&IWL_MAC80211_GET_MVM(hw)->fw->ucode_capa, IWL_UCODE_TLV_CAPA_AX_SAP_TM_V2)) {
-        ax_softap_testmode_add_debugfs(hw, vif, sta, dir);
-    }
+  if (fw_has_capa(&IWL_MAC80211_GET_MVM(hw)->fw->ucode_capa, IWL_UCODE_TLV_CAPA_AX_SAP_TM_V2)) {
+    ax_softap_testmode_add_debugfs(hw, vif, sta, dir);
+  }
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/binding.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/binding.c
index 81ad2a4..85744de 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/binding.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/binding.c
@@ -33,142 +33,159 @@
  *****************************************************************************/
 
 #include <net/mac80211.h>
+
 #include "fw-api.h"
 #include "mvm.h"
 
 struct iwl_mvm_iface_iterator_data {
-    struct ieee80211_vif* ignore_vif;
-    int idx;
+  struct ieee80211_vif* ignore_vif;
+  int idx;
 
-    struct iwl_mvm_phy_ctxt* phyctxt;
+  struct iwl_mvm_phy_ctxt* phyctxt;
 
-    uint16_t ids[MAX_MACS_IN_BINDING];
-    uint16_t colors[MAX_MACS_IN_BINDING];
+  uint16_t ids[MAX_MACS_IN_BINDING];
+  uint16_t colors[MAX_MACS_IN_BINDING];
 };
 
 static int iwl_mvm_binding_cmd(struct iwl_mvm* mvm, uint32_t action,
                                struct iwl_mvm_iface_iterator_data* data) {
-    struct iwl_binding_cmd cmd;
-    struct iwl_mvm_phy_ctxt* phyctxt = data->phyctxt;
-    int i, ret;
-    uint32_t status;
-    int size;
+  struct iwl_binding_cmd cmd;
+  struct iwl_mvm_phy_ctxt* phyctxt = data->phyctxt;
+  int i, ret;
+  uint32_t status;
+  int size;
 
-    memset(&cmd, 0, sizeof(cmd));
+  memset(&cmd, 0, sizeof(cmd));
 
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
-        size = sizeof(cmd);
-        if (phyctxt->channel->band == NL80211_BAND_2GHZ || !iwl_mvm_is_cdb_supported(mvm)) {
-            cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
-        } else {
-            cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
-        }
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+    size = sizeof(cmd);
+    if (phyctxt->channel->band == NL80211_BAND_2GHZ || !iwl_mvm_is_cdb_supported(mvm)) {
+      cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
     } else {
-        size = IWL_BINDING_CMD_SIZE_V1;
+      cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
     }
+  } else {
+    size = IWL_BINDING_CMD_SIZE_V1;
+  }
 
-    cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
-    cmd.action = cpu_to_le32(action);
-    cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
+  cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
+  cmd.action = cpu_to_le32(action);
+  cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
 
-    for (i = 0; i < MAX_MACS_IN_BINDING; i++) {
-        cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
-    }
-    for (i = 0; i < data->idx; i++) {
-        cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i], data->colors[i]));
-    }
+  for (i = 0; i < MAX_MACS_IN_BINDING; i++) {
+    cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+  }
+  for (i = 0; i < data->idx; i++) {
+    cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i], data->colors[i]));
+  }
 
-    status = 0;
-    ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, size, &cmd, &status);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n", action, ret);
-        return ret;
-    }
-
-    if (status) {
-        IWL_ERR(mvm, "Binding command failed: %u\n", status);
-        ret = -EIO;
-    }
-
+  status = 0;
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, size, &cmd, &status);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n", action, ret);
     return ret;
+  }
+
+  if (status) {
+    IWL_ERR(mvm, "Binding command failed: %u\n", status);
+    ret = -EIO;
+  }
+
+  return ret;
 }
 
 static void iwl_mvm_iface_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_iface_iterator_data* data = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_iface_iterator_data* data = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (vif == data->ignore_vif) { return; }
+  if (vif == data->ignore_vif) {
+    return;
+  }
 
-    if (mvmvif->phy_ctxt != data->phyctxt) { return; }
+  if (mvmvif->phy_ctxt != data->phyctxt) {
+    return;
+  }
 
-    if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING)) { return; }
+  if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING)) {
+    return;
+  }
 
-    data->ids[data->idx] = mvmvif->id;
-    data->colors[data->idx] = mvmvif->color;
-    data->idx++;
+  data->ids[data->idx] = mvmvif->id;
+  data->colors[data->idx] = mvmvif->color;
+  data->idx++;
 }
 
 static int iwl_mvm_binding_update(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                   struct iwl_mvm_phy_ctxt* phyctxt, bool add) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_iface_iterator_data data = {
-        .ignore_vif = vif,
-        .phyctxt = phyctxt,
-    };
-    uint32_t action = FW_CTXT_ACTION_MODIFY;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_iface_iterator_data data = {
+      .ignore_vif = vif,
+      .phyctxt = phyctxt,
+  };
+  uint32_t action = FW_CTXT_ACTION_MODIFY;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_iface_iterator, &data);
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_iface_iterator, &data);
 
-    /*
-     * If there are no other interfaces yet we
-     * need to create a new binding.
-     */
-    if (data.idx == 0) {
-        if (add) {
-            action = FW_CTXT_ACTION_ADD;
-        } else {
-            action = FW_CTXT_ACTION_REMOVE;
-        }
-    }
-
+  /*
+   * If there are no other interfaces yet we
+   * need to create a new binding.
+   */
+  if (data.idx == 0) {
     if (add) {
-        if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING)) { return -EINVAL; }
+      action = FW_CTXT_ACTION_ADD;
+    } else {
+      action = FW_CTXT_ACTION_REMOVE;
+    }
+  }
 
-        data.ids[data.idx] = mvmvif->id;
-        data.colors[data.idx] = mvmvif->color;
-        data.idx++;
+  if (add) {
+    if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING)) {
+      return -EINVAL;
     }
 
-    return iwl_mvm_binding_cmd(mvm, action, &data);
+    data.ids[data.idx] = mvmvif->id;
+    data.colors[data.idx] = mvmvif->color;
+    data.idx++;
+  }
+
+  return iwl_mvm_binding_cmd(mvm, action, &data);
 }
 
 int iwl_mvm_binding_add_vif(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) { return -EINVAL; }
+  if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) {
+    return -EINVAL;
+  }
 
-    /*
-     * Update SF - Disable if needed. if this fails, SF might still be on
-     * while many macs are bound, which is forbidden - so fail the binding.
-     */
-    if (iwl_mvm_sf_update(mvm, vif, false)) { return -EINVAL; }
+  /*
+   * Update SF - Disable if needed. if this fails, SF might still be on
+   * while many macs are bound, which is forbidden - so fail the binding.
+   */
+  if (iwl_mvm_sf_update(mvm, vif, false)) {
+    return -EINVAL;
+  }
 
-    return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
+  return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
 }
 
 int iwl_mvm_binding_remove_vif(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int ret;
 
-    if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) { return -EINVAL; }
+  if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) {
+    return -EINVAL;
+  }
 
-    ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+  ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
 
-    if (!ret)
-        if (iwl_mvm_sf_update(mvm, vif, true)) { IWL_ERR(mvm, "Failed to update SF state\n"); }
+  if (!ret)
+    if (iwl_mvm_sf_update(mvm, vif, true)) {
+      IWL_ERR(mvm, "Failed to update SF state\n");
+    }
 
-    return ret;
+  return ret;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/coex.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/coex.c
index 3b3346f..72f0056 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/coex.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/coex.c
@@ -33,11 +33,12 @@
  *
  *****************************************************************************/
 
+#include "fw/api/coex.h"
+
 #include <linux/etherdevice.h>
 #include <linux/ieee80211.h>
 #include <net/mac80211.h>
 
-#include "fw/api/coex.h"
 #include "iwl-debug.h"
 #include "iwl-modparams.h"
 #include "mvm.h"
@@ -108,678 +109,738 @@
 
 static enum iwl_bt_coex_lut_type iwl_get_coex_type(struct iwl_mvm* mvm,
                                                    const struct ieee80211_vif* vif) {
-    struct ieee80211_chanctx_conf* chanctx_conf;
-    enum iwl_bt_coex_lut_type ret;
-    uint16_t phy_ctx_id;
-    uint32_t primary_ch_phy_id, secondary_ch_phy_id;
+  struct ieee80211_chanctx_conf* chanctx_conf;
+  enum iwl_bt_coex_lut_type ret;
+  uint16_t phy_ctx_id;
+  uint32_t primary_ch_phy_id, secondary_ch_phy_id;
 
-    /*
-     * Checking that we hold mvm->mutex is a good idea, but the rate
-     * control can't acquire the mutex since it runs in Tx path.
-     * So this is racy in that case, but in the worst case, the AMPDU
-     * size limit will be wrong for a short time which is not a big
-     * issue.
-     */
+  /*
+   * Checking that we hold mvm->mutex is a good idea, but the rate
+   * control can't acquire the mutex since it runs in Tx path.
+   * So this is racy in that case, but in the worst case, the AMPDU
+   * size limit will be wrong for a short time which is not a big
+   * issue.
+   */
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    chanctx_conf = rcu_dereference(vif->chanctx_conf);
+  chanctx_conf = rcu_dereference(vif->chanctx_conf);
 
-    if (!chanctx_conf || chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
-        rcu_read_unlock();
-        return BT_COEX_INVALID_LUT;
-    }
-
-    ret = BT_COEX_TX_DIS_LUT;
-
-    if (mvm->cfg->bt_shared_single_ant) {
-        rcu_read_unlock();
-        return ret;
-    }
-
-    phy_ctx_id = *((uint16_t*)chanctx_conf->drv_priv);
-    primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
-    secondary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id);
-
-    if (primary_ch_phy_id == phy_ctx_id) {
-        ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
-    } else if (secondary_ch_phy_id == phy_ctx_id) {
-        ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
-    }
-    /* else - default = TX TX disallowed */
-
+  if (!chanctx_conf || chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
     rcu_read_unlock();
+    return BT_COEX_INVALID_LUT;
+  }
 
+  ret = BT_COEX_TX_DIS_LUT;
+
+  if (mvm->cfg->bt_shared_single_ant) {
+    rcu_read_unlock();
     return ret;
+  }
+
+  phy_ctx_id = *((uint16_t*)chanctx_conf->drv_priv);
+  primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
+  secondary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id);
+
+  if (primary_ch_phy_id == phy_ctx_id) {
+    ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
+  } else if (secondary_ch_phy_id == phy_ctx_id) {
+    ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
+  }
+  /* else - default = TX TX disallowed */
+
+  rcu_read_unlock();
+
+  return ret;
 }
 
 int iwl_mvm_send_bt_init_conf(struct iwl_mvm* mvm) {
-    struct iwl_bt_coex_cmd bt_cmd = {};
-    uint32_t mode;
+  struct iwl_bt_coex_cmd bt_cmd = {};
+  uint32_t mode;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
-        switch (mvm->bt_force_ant_mode) {
-        case BT_FORCE_ANT_BT:
-            mode = BT_COEX_BT;
-            break;
-        case BT_FORCE_ANT_WIFI:
-            mode = BT_COEX_WIFI;
-            break;
-        default:
-            WARN_ON(1);
-            mode = 0;
-        }
-
-        bt_cmd.mode = cpu_to_le32(mode);
-        goto send_cmd;
+  if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
+    switch (mvm->bt_force_ant_mode) {
+      case BT_FORCE_ANT_BT:
+        mode = BT_COEX_BT;
+        break;
+      case BT_FORCE_ANT_WIFI:
+        mode = BT_COEX_WIFI;
+        break;
+      default:
+        WARN_ON(1);
+        mode = 0;
     }
 
-    mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
     bt_cmd.mode = cpu_to_le32(mode);
+    goto send_cmd;
+  }
 
-    if (IWL_MVM_BT_COEX_SYNC2SCO) {
-        bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
-    }
+  mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
+  bt_cmd.mode = cpu_to_le32(mode);
 
-    if (iwl_mvm_is_mplut_supported(mvm)) {
-        bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
-    }
+  if (IWL_MVM_BT_COEX_SYNC2SCO) {
+    bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
+  }
 
-    bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
+  if (iwl_mvm_is_mplut_supported(mvm)) {
+    bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
+  }
+
+  bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
 
 send_cmd:
-    memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
-    memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
+  memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+  memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
 
-    return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
+  return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
 }
 
 static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm* mvm, uint8_t sta_id, bool enable) {
-    struct iwl_bt_coex_reduced_txp_update_cmd cmd = {};
-    struct iwl_mvm_sta* mvmsta;
-    uint32_t value;
-    int ret;
+  struct iwl_bt_coex_reduced_txp_update_cmd cmd = {};
+  struct iwl_mvm_sta* mvmsta;
+  uint32_t value;
+  int ret;
 
-    mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
-    if (!mvmsta) { return 0; }
+  mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+  if (!mvmsta) {
+    return 0;
+  }
 
-    /* nothing to do */
-    if (mvmsta->bt_reduced_txpower == enable) { return 0; }
+  /* nothing to do */
+  if (mvmsta->bt_reduced_txpower == enable) {
+    return 0;
+  }
 
-    value = mvmsta->sta_id;
+  value = mvmsta->sta_id;
 
-    if (enable) { value |= BT_REDUCED_TX_POWER_BIT; }
+  if (enable) {
+    value |= BT_REDUCED_TX_POWER_BIT;
+  }
 
-    IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", enable ? "en" : "dis", sta_id);
+  IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", enable ? "en" : "dis", sta_id);
 
-    cmd.reduced_txp = cpu_to_le32(value);
-    mvmsta->bt_reduced_txpower = enable;
+  cmd.reduced_txp = cpu_to_le32(value);
+  mvmsta->bt_reduced_txpower = enable;
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC, sizeof(cmd), &cmd);
+  ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC, sizeof(cmd), &cmd);
 
-    return ret;
+  return ret;
 }
 
 struct iwl_bt_iterator_data {
-    struct iwl_bt_coex_profile_notif* notif;
-    struct iwl_mvm* mvm;
-    struct ieee80211_chanctx_conf* primary;
-    struct ieee80211_chanctx_conf* secondary;
-    bool primary_ll;
-    uint8_t primary_load;
-    uint8_t secondary_load;
+  struct iwl_bt_coex_profile_notif* notif;
+  struct iwl_mvm* mvm;
+  struct ieee80211_chanctx_conf* primary;
+  struct ieee80211_chanctx_conf* secondary;
+  bool primary_ll;
+  uint8_t primary_load;
+  uint8_t secondary_load;
 };
 
 static inline void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                                      bool enable, int rssi) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    mvmvif->bf_data.last_bt_coex_event = rssi;
-    mvmvif->bf_data.bt_coex_max_thold = enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
-    mvmvif->bf_data.bt_coex_min_thold = enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
+  mvmvif->bf_data.last_bt_coex_event = rssi;
+  mvmvif->bf_data.bt_coex_max_thold = enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
+  mvmvif->bf_data.bt_coex_min_thold = enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
 }
 
 #define MVM_COEX_TCM_PERIOD (HZ * 10)
 
 static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm* mvm, struct iwl_bt_iterator_data* data) {
-    unsigned long now = jiffies;
+  unsigned long now = jiffies;
 
-    if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD)) { return; }
+  if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD)) {
+    return;
+  }
 
-    mvm->bt_coex_last_tcm_ts = now;
+  mvm->bt_coex_last_tcm_ts = now;
 
-    /* We assume here that we don't have more than 2 vifs on 2.4GHz */
+  /* We assume here that we don't have more than 2 vifs on 2.4GHz */
 
-    /* if the primary is low latency, it will stay primary */
-    if (data->primary_ll) { return; }
+  /* if the primary is low latency, it will stay primary */
+  if (data->primary_ll) {
+    return;
+  }
 
-    if (data->primary_load >= data->secondary_load) { return; }
+  if (data->primary_load >= data->secondary_load) {
+    return;
+  }
 
-    swap(data->primary, data->secondary);
+  swap(data->primary, data->secondary);
 }
 
 /* must be called under rcu_read_lock */
 static void iwl_mvm_bt_notif_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_bt_iterator_data* data = _data;
-    struct iwl_mvm* mvm = data->mvm;
-    struct ieee80211_chanctx_conf* chanctx_conf;
-    /* default smps_mode is AUTOMATIC - only used for client modes */
-    enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
-    uint32_t bt_activity_grading, min_ag_for_static_smps;
-    int ave_rssi;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_bt_iterator_data* data = _data;
+  struct iwl_mvm* mvm = data->mvm;
+  struct ieee80211_chanctx_conf* chanctx_conf;
+  /* default smps_mode is AUTOMATIC - only used for client modes */
+  enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+  uint32_t bt_activity_grading, min_ag_for_static_smps;
+  int ave_rssi;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    switch (vif->type) {
+  switch (vif->type) {
     case NL80211_IFTYPE_STATION:
-        break;
+      break;
     case NL80211_IFTYPE_AP:
-        if (!mvmvif->ap_ibss_active) { return; }
-        break;
+      if (!mvmvif->ap_ibss_active) {
+        return;
+      }
+      break;
     default:
-        return;
-    }
+      return;
+  }
 
-    chanctx_conf = rcu_dereference(vif->chanctx_conf);
+  chanctx_conf = rcu_dereference(vif->chanctx_conf);
 
-    /* If channel context is invalid or not on 2.4GHz .. */
-    if ((!chanctx_conf || chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
-        if (vif->type == NL80211_IFTYPE_STATION) {
-            /* ... relax constraints and disable rssi events */
-            iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
-            iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
-            iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-        }
-        return;
-    }
-
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2)) {
-        min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
-    } else {
-        min_ag_for_static_smps = BT_HIGH_TRAFFIC;
-    }
-
-    bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
-    if (bt_activity_grading >= min_ag_for_static_smps) {
-        smps_mode = IEEE80211_SMPS_STATIC;
-    } else if (bt_activity_grading >= BT_LOW_TRAFFIC) {
-        smps_mode = IEEE80211_SMPS_DYNAMIC;
-    }
-
-    /* relax SMPS constraints for next association */
-    if (!vif->bss_conf.assoc) { smps_mode = IEEE80211_SMPS_AUTOMATIC; }
-
-    if (mvmvif->phy_ctxt && (mvm->last_bt_notif.rrc_status & BIT(mvmvif->phy_ctxt->id))) {
-        smps_mode = IEEE80211_SMPS_AUTOMATIC;
-    }
-
-    IWL_DEBUG_COEX(data->mvm, "mac %d: bt_activity_grading %d smps_req %d\n", mvmvif->id,
-                   bt_activity_grading, smps_mode);
-
+  /* If channel context is invalid or not on 2.4GHz .. */
+  if ((!chanctx_conf || chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
     if (vif->type == NL80211_IFTYPE_STATION) {
-        iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
+      /* ... relax constraints and disable rssi events */
+      iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
+      iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
+      iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+    }
+    return;
+  }
+
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2)) {
+    min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
+  } else {
+    min_ag_for_static_smps = BT_HIGH_TRAFFIC;
+  }
+
+  bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
+  if (bt_activity_grading >= min_ag_for_static_smps) {
+    smps_mode = IEEE80211_SMPS_STATIC;
+  } else if (bt_activity_grading >= BT_LOW_TRAFFIC) {
+    smps_mode = IEEE80211_SMPS_DYNAMIC;
+  }
+
+  /* relax SMPS constraints for next association */
+  if (!vif->bss_conf.assoc) {
+    smps_mode = IEEE80211_SMPS_AUTOMATIC;
+  }
+
+  if (mvmvif->phy_ctxt && (mvm->last_bt_notif.rrc_status & BIT(mvmvif->phy_ctxt->id))) {
+    smps_mode = IEEE80211_SMPS_AUTOMATIC;
+  }
+
+  IWL_DEBUG_COEX(data->mvm, "mac %d: bt_activity_grading %d smps_req %d\n", mvmvif->id,
+                 bt_activity_grading, smps_mode);
+
+  if (vif->type == NL80211_IFTYPE_STATION) {
+    iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
+  }
+
+  /* low latency is always primary */
+  if (iwl_mvm_vif_low_latency(mvmvif)) {
+    data->primary_ll = true;
+
+    data->secondary = data->primary;
+    data->primary = chanctx_conf;
+  }
+
+  if (vif->type == NL80211_IFTYPE_AP) {
+    if (!mvmvif->ap_ibss_active) {
+      return;
     }
 
-    /* low latency is always primary */
-    if (iwl_mvm_vif_low_latency(mvmvif)) {
-        data->primary_ll = true;
-
-        data->secondary = data->primary;
-        data->primary = chanctx_conf;
+    if (chanctx_conf == data->primary) {
+      return;
     }
 
-    if (vif->type == NL80211_IFTYPE_AP) {
-        if (!mvmvif->ap_ibss_active) { return; }
-
-        if (chanctx_conf == data->primary) { return; }
-
-        if (!data->primary_ll) {
-            /*
-             * downgrade the current primary no matter what its
-             * type is.
-             */
-            data->secondary = data->primary;
-            data->primary = chanctx_conf;
-        } else {
-            /* there is low latency vif - we will be secondary */
-            data->secondary = chanctx_conf;
-        }
-
-        if (data->primary == chanctx_conf) {
-            data->primary_load = mvm->tcm.result.load[mvmvif->id];
-        } else if (data->secondary == chanctx_conf) {
-            data->secondary_load = mvm->tcm.result.load[mvmvif->id];
-        }
-        return;
-    }
-
-    /*
-     * STA / P2P Client, try to be primary if first vif. If we are in low
-     * latency mode, we are already in primary and just don't do much
-     */
-    if (!data->primary || data->primary == chanctx_conf) {
-        data->primary = chanctx_conf;
-    } else if (!data->secondary)
-    /* if secondary is not NULL, it might be a GO */
-    {
-        data->secondary = chanctx_conf;
+    if (!data->primary_ll) {
+      /*
+       * downgrade the current primary no matter what its
+       * type is.
+       */
+      data->secondary = data->primary;
+      data->primary = chanctx_conf;
+    } else {
+      /* there is low latency vif - we will be secondary */
+      data->secondary = chanctx_conf;
     }
 
     if (data->primary == chanctx_conf) {
-        data->primary_load = mvm->tcm.result.load[mvmvif->id];
+      data->primary_load = mvm->tcm.result.load[mvmvif->id];
     } else if (data->secondary == chanctx_conf) {
-        data->secondary_load = mvm->tcm.result.load[mvmvif->id];
+      data->secondary_load = mvm->tcm.result.load[mvmvif->id];
     }
-    /*
-     * don't reduce the Tx power if one of these is true:
-     *  we are in LOOSE
-     *  single share antenna product
-     *  BT is inactive
-     *  we are not associated
-     */
-    if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || mvm->cfg->bt_shared_single_ant ||
-        !vif->bss_conf.assoc || le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
-        iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
-        iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-        return;
+    return;
+  }
+
+  /*
+   * STA / P2P Client, try to be primary if first vif. If we are in low
+   * latency mode, we are already in primary and just don't do much
+   */
+  if (!data->primary || data->primary == chanctx_conf) {
+    data->primary = chanctx_conf;
+  } else if (!data->secondary)
+  /* if secondary is not NULL, it might be a GO */
+  {
+    data->secondary = chanctx_conf;
+  }
+
+  if (data->primary == chanctx_conf) {
+    data->primary_load = mvm->tcm.result.load[mvmvif->id];
+  } else if (data->secondary == chanctx_conf) {
+    data->secondary_load = mvm->tcm.result.load[mvmvif->id];
+  }
+  /*
+   * don't reduce the Tx power if one of these is true:
+   *  we are in LOOSE
+   *  single share antenna product
+   *  BT is inactive
+   *  we are not associated
+   */
+  if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || mvm->cfg->bt_shared_single_ant ||
+      !vif->bss_conf.assoc || le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
+    iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
+    iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+    return;
+  }
+
+  /* try to get the avg rssi from fw */
+  ave_rssi = mvmvif->bf_data.ave_beacon_signal;
+
+  /* if the RSSI isn't valid, fake it is very low */
+  if (!ave_rssi) {
+    ave_rssi = -100;
+  }
+  if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
+    if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) {
+      IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
     }
-
-    /* try to get the avg rssi from fw */
-    ave_rssi = mvmvif->bf_data.ave_beacon_signal;
-
-    /* if the RSSI isn't valid, fake it is very low */
-    if (!ave_rssi) { ave_rssi = -100; }
-    if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
-        if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) {
-            IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-        }
-    } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
-        if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) {
-            IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-        }
+  } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
+    if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) {
+      IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
     }
+  }
 
-    /* Begin to monitor the RSSI: it may influence the reduced Tx power */
-    iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
+  /* Begin to monitor the RSSI: it may influence the reduced Tx power */
+  iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
 }
 
 static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm* mvm) {
-    struct iwl_bt_iterator_data data = {
-        .mvm = mvm,
-        .notif = &mvm->last_bt_notif,
-    };
-    struct iwl_bt_coex_ci_cmd cmd = {};
-    uint8_t ci_bw_idx;
+  struct iwl_bt_iterator_data data = {
+      .mvm = mvm,
+      .notif = &mvm->last_bt_notif,
+  };
+  struct iwl_bt_coex_ci_cmd cmd = {};
+  uint8_t ci_bw_idx;
 
-    /* Ignore updates if we are in force mode */
-    if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { return; }
+  /* Ignore updates if we are in force mode */
+  if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
+    return;
+  }
 
-    rcu_read_lock();
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_bt_notif_iterator, &data);
+  rcu_read_lock();
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_bt_notif_iterator, &data);
 
-    iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
+  iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
 
-    if (data.primary) {
-        struct ieee80211_chanctx_conf* chan = data.primary;
-        if (WARN_ON(!chan->def.chan)) {
-            rcu_read_unlock();
-            return;
-        }
-
-        if (chan->def.width < NL80211_CHAN_WIDTH_40) {
-            ci_bw_idx = 0;
-        } else {
-            if (chan->def.center_freq1 > chan->def.chan->center_freq) {
-                ci_bw_idx = 2;
-            } else {
-                ci_bw_idx = 1;
-            }
-        }
-
-        cmd.bt_primary_ci = iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-        cmd.primary_ch_phy_id = cpu_to_le32(*((uint16_t*)data.primary->drv_priv));
+  if (data.primary) {
+    struct ieee80211_chanctx_conf* chan = data.primary;
+    if (WARN_ON(!chan->def.chan)) {
+      rcu_read_unlock();
+      return;
     }
 
-    if (data.secondary) {
-        struct ieee80211_chanctx_conf* chan = data.secondary;
-        if (WARN_ON(!data.secondary->def.chan)) {
-            rcu_read_unlock();
-            return;
-        }
-
-        if (chan->def.width < NL80211_CHAN_WIDTH_40) {
-            ci_bw_idx = 0;
-        } else {
-            if (chan->def.center_freq1 > chan->def.chan->center_freq) {
-                ci_bw_idx = 2;
-            } else {
-                ci_bw_idx = 1;
-            }
-        }
-
-        cmd.bt_secondary_ci = iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-        cmd.secondary_ch_phy_id = cpu_to_le32(*((uint16_t*)data.secondary->drv_priv));
+    if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+      ci_bw_idx = 0;
+    } else {
+      if (chan->def.center_freq1 > chan->def.chan->center_freq) {
+        ci_bw_idx = 2;
+      } else {
+        ci_bw_idx = 1;
+      }
     }
 
-    rcu_read_unlock();
+    cmd.bt_primary_ci = iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+    cmd.primary_ch_phy_id = cpu_to_le32(*((uint16_t*)data.primary->drv_priv));
+  }
 
-    /* Don't spam the fw with the same command over and over */
-    if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
-        if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0, sizeof(cmd), &cmd)) {
-            IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
-        }
-        memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+  if (data.secondary) {
+    struct ieee80211_chanctx_conf* chan = data.secondary;
+    if (WARN_ON(!data.secondary->def.chan)) {
+      rcu_read_unlock();
+      return;
     }
+
+    if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+      ci_bw_idx = 0;
+    } else {
+      if (chan->def.center_freq1 > chan->def.chan->center_freq) {
+        ci_bw_idx = 2;
+      } else {
+        ci_bw_idx = 1;
+      }
+    }
+
+    cmd.bt_secondary_ci = iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+    cmd.secondary_ch_phy_id = cpu_to_le32(*((uint16_t*)data.secondary->drv_priv));
+  }
+
+  rcu_read_unlock();
+
+  /* Don't spam the fw with the same command over and over */
+  if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
+    if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0, sizeof(cmd), &cmd)) {
+      IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
+    }
+    memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+  }
 }
 
 void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_bt_coex_profile_notif* notif = (void*)pkt->data;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_bt_coex_profile_notif* notif = (void*)pkt->data;
 
-    IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-    IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
-    IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", le32_to_cpu(notif->primary_ch_lut));
-    IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", le32_to_cpu(notif->secondary_ch_lut));
-    IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", le32_to_cpu(notif->bt_activity_grading));
+  IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
+  IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+  IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", le32_to_cpu(notif->primary_ch_lut));
+  IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", le32_to_cpu(notif->secondary_ch_lut));
+  IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", le32_to_cpu(notif->bt_activity_grading));
 
-    /* remember this notification for future use: rssi fluctuations */
-    memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
+  /* remember this notification for future use: rssi fluctuations */
+  memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
 
-    iwl_mvm_bt_coex_notif_handle(mvm);
+  iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
 void iwl_mvm_bt_rssi_event(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                            enum ieee80211_rssi_event_data rssi_event) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* Ignore updates if we are in force mode */
-    if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { return; }
+  /* Ignore updates if we are in force mode */
+  if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
+    return;
+  }
 
-    /*
-     * Rssi update while not associated - can happen since the statistics
-     * are handled asynchronously
-     */
-    if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { return; }
+  /*
+   * Rssi update while not associated - can happen since the statistics
+   * are handled asynchronously
+   */
+  if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
+    return;
+  }
 
-    /* No BT - reports should be disabled */
-    if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) { return; }
+  /* No BT - reports should be disabled */
+  if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
+    return;
+  }
 
-    IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
-                   rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
+  IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
+                 rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
 
-    /*
-     * Check if rssi is good enough for reduced Tx power, but not in loose
-     * scheme.
-     */
-    if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
-        iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) {
-        ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
-    } else {
-        ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
-    }
+  /*
+   * Check if rssi is good enough for reduced Tx power, but not in loose
+   * scheme.
+   */
+  if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+      iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) {
+    ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
+  } else {
+    ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
+  }
 
-    if (ret) { IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); }
+  if (ret) {
+    IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
+  }
 }
 
 #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
 #define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
 
 uint16_t iwl_mvm_coex_agg_time_limit(struct iwl_mvm* mvm, struct ieee80211_sta* sta) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-    struct iwl_mvm_phy_ctxt* phy_ctxt = mvmvif->phy_ctxt;
-    enum iwl_bt_coex_lut_type lut_type;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+  struct iwl_mvm_phy_ctxt* phy_ctxt = mvmvif->phy_ctxt;
+  enum iwl_bt_coex_lut_type lut_type;
 
-    if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) { return LINK_QUAL_AGG_TIME_LIMIT_DEF; }
+  if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) {
+    return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+  }
 
 #ifdef CPTCFG_IWLWIFI_FRQ_MGR
-    /* 2G coex */
-    if (mvm->coex_2g_enabled) { return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT; }
-#endif
-    if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC) {
-        return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-    }
-
-    lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-
-    if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT) {
-        return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-    }
-
-    /* tight coex, high bt traffic, reduce AGG time limit */
+  /* 2G coex */
+  if (mvm->coex_2g_enabled) {
     return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
+  }
+#endif
+  if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC) {
+    return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+  }
+
+  lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+  if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT) {
+    return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+  }
+
+  /* tight coex, high bt traffic, reduce AGG time limit */
+  return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
 }
 
 bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm* mvm, struct ieee80211_sta* sta) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-    struct iwl_mvm_phy_ctxt* phy_ctxt = mvmvif->phy_ctxt;
-    enum iwl_bt_coex_lut_type lut_type;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+  struct iwl_mvm_phy_ctxt* phy_ctxt = mvmvif->phy_ctxt;
+  enum iwl_bt_coex_lut_type lut_type;
 
-    if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) { return true; }
+  if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) {
+    return true;
+  }
 
-    if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC) { return true; }
+  if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC) {
+    return true;
+  }
 
-    /*
-     * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
-     * since BT is already killed.
-     * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
-     * we Tx.
-     * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
-     */
-    lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-    return lut_type != BT_COEX_LOOSE_LUT;
+  /*
+   * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
+   * since BT is already killed.
+   * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
+   * we Tx.
+   * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
+   */
+  lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+  return lut_type != BT_COEX_LOOSE_LUT;
 }
 
 bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm* mvm, uint8_t ant) {
-    /* there is no other antenna, shared antenna is always available */
-    if (mvm->cfg->bt_shared_single_ant) { return true; }
+  /* there is no other antenna, shared antenna is always available */
+  if (mvm->cfg->bt_shared_single_ant) {
+    return true;
+  }
 
-    if (ant & mvm->cfg->non_shared_ant) { return true; }
+  if (ant & mvm->cfg->non_shared_ant) {
+    return true;
+  }
 
-    return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
+  return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
 }
 
 bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm* mvm) {
-    /* there is no other antenna, shared antenna is always available */
-    if (mvm->cfg->bt_shared_single_ant) { return true; }
+  /* there is no other antenna, shared antenna is always available */
+  if (mvm->cfg->bt_shared_single_ant) {
+    return true;
+  }
 
-    return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
+  return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
 }
 
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm* mvm, enum nl80211_band band) {
-    uint32_t bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+  uint32_t bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 
-    if (band != NL80211_BAND_2GHZ) { return false; }
+  if (band != NL80211_BAND_2GHZ) {
+    return false;
+  }
 
-    return bt_activity >= BT_LOW_TRAFFIC;
+  return bt_activity >= BT_LOW_TRAFFIC;
 }
 
 uint8_t iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm* mvm, uint8_t enabled_ants) {
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
-        (mvm->cfg->non_shared_ant & enabled_ants)) {
-        return mvm->cfg->non_shared_ant;
-    }
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
+      (mvm->cfg->non_shared_ant & enabled_ants)) {
+    return mvm->cfg->non_shared_ant;
+  }
 
-    return first_antenna(enabled_ants);
+  return first_antenna(enabled_ants);
 }
 
 uint8_t iwl_mvm_bt_coex_tx_prio(struct iwl_mvm* mvm, struct ieee80211_hdr* hdr,
                                 struct ieee80211_tx_info* info, uint8_t ac) {
-    __le16 fc = hdr->frame_control;
-    bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
+  __le16 fc = hdr->frame_control;
+  bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
 
-    if (info->band != NL80211_BAND_2GHZ) { return 0; }
-
-    if (unlikely(mvm->bt_tx_prio)) { return mvm->bt_tx_prio - 1; }
-
-    if (likely(ieee80211_is_data(fc))) {
-        if (likely(ieee80211_is_data_qos(fc))) {
-            switch (ac) {
-            case IEEE80211_AC_BE:
-                return mplut_enabled ? 1 : 0;
-            case IEEE80211_AC_VI:
-                return mplut_enabled ? 2 : 3;
-            case IEEE80211_AC_VO:
-                return 3;
-            default:
-                return 0;
-            }
-        } else if (is_multicast_ether_addr(hdr->addr1)) {
-            return 3;
-        } else {
-            return 0;
-        }
-    } else if (ieee80211_is_mgmt(fc)) {
-        return ieee80211_is_disassoc(fc) ? 0 : 3;
-    } else if (ieee80211_is_ctl(fc)) {
-        /* ignore cfend and cfendack frames as we never send those */
-        return 3;
-    }
-
+  if (info->band != NL80211_BAND_2GHZ) {
     return 0;
+  }
+
+  if (unlikely(mvm->bt_tx_prio)) {
+    return mvm->bt_tx_prio - 1;
+  }
+
+  if (likely(ieee80211_is_data(fc))) {
+    if (likely(ieee80211_is_data_qos(fc))) {
+      switch (ac) {
+        case IEEE80211_AC_BE:
+          return mplut_enabled ? 1 : 0;
+        case IEEE80211_AC_VI:
+          return mplut_enabled ? 2 : 3;
+        case IEEE80211_AC_VO:
+          return 3;
+        default:
+          return 0;
+      }
+    } else if (is_multicast_ether_addr(hdr->addr1)) {
+      return 3;
+    } else {
+      return 0;
+    }
+  } else if (ieee80211_is_mgmt(fc)) {
+    return ieee80211_is_disassoc(fc) ? 0 : 3;
+  } else if (ieee80211_is_ctl(fc)) {
+    /* ignore cfend and cfendack frames as we never send those */
+    return 3;
+  }
+
+  return 0;
 }
 
-void iwl_mvm_bt_coex_vif_change(struct iwl_mvm* mvm) {
-    iwl_mvm_bt_coex_notif_handle(mvm);
-}
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm* mvm) { iwl_mvm_bt_coex_notif_handle(mvm); }
 
 #ifdef CPTCFG_IWLWIFI_LTE_COEX
 int iwl_mvm_send_lte_coex_config_cmd(struct iwl_mvm* mvm) {
-    const struct iwl_lte_coex_config_cmd* cmd = &mvm->lte_state.config;
+  const struct iwl_lte_coex_config_cmd* cmd = &mvm->lte_state.config;
 
-    if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_LTE_COEX)) {
-        IWL_DEBUG_COEX(mvm, "LTE-Coex not supported!\n");
-        return -EOPNOTSUPP;
-    }
+  if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_LTE_COEX)) {
+    IWL_DEBUG_COEX(mvm, "LTE-Coex not supported!\n");
+    return -EOPNOTSUPP;
+  }
 
-    IWL_DEBUG_COEX(mvm,
-                   "LTE-Coex: lte_coex_config_cmd:\n"
-                   "\tstate: %d\n\tband: %d\n\tchan: %d\n",
-                   le32_to_cpu(cmd->lte_state), le32_to_cpu(cmd->lte_band),
-                   le32_to_cpu(cmd->lte_chan));
+  IWL_DEBUG_COEX(mvm,
+                 "LTE-Coex: lte_coex_config_cmd:\n"
+                 "\tstate: %d\n\tband: %d\n\tchan: %d\n",
+                 le32_to_cpu(cmd->lte_state), le32_to_cpu(cmd->lte_band),
+                 le32_to_cpu(cmd->lte_chan));
 
-    IWL_DEBUG_COEX(mvm,
-                   "\ttx safe freq min: %d\n\ttx safe freq max: %d\n"
-                   "\trx safe freq min: %d\n\trx safe freq max: %d\n",
-                   le32_to_cpu(cmd->tx_safe_freq_min), le32_to_cpu(cmd->tx_safe_freq_max),
-                   le32_to_cpu(cmd->rx_safe_freq_min), le32_to_cpu(cmd->rx_safe_freq_max));
+  IWL_DEBUG_COEX(mvm,
+                 "\ttx safe freq min: %d\n\ttx safe freq max: %d\n"
+                 "\trx safe freq min: %d\n\trx safe freq max: %d\n",
+                 le32_to_cpu(cmd->tx_safe_freq_min), le32_to_cpu(cmd->tx_safe_freq_max),
+                 le32_to_cpu(cmd->rx_safe_freq_min), le32_to_cpu(cmd->rx_safe_freq_max));
 
-    return iwl_mvm_send_cmd_pdu(mvm, LTE_COEX_CONFIG_CMD, 0, sizeof(*cmd), cmd);
+  return iwl_mvm_send_cmd_pdu(mvm, LTE_COEX_CONFIG_CMD, 0, sizeof(*cmd), cmd);
 }
 
 int iwl_mvm_send_lte_coex_wifi_reported_channel_cmd(struct iwl_mvm* mvm) {
-    const struct iwl_lte_coex_wifi_reported_channel_cmd* cmd = &mvm->lte_state.rprtd_chan;
+  const struct iwl_lte_coex_wifi_reported_channel_cmd* cmd = &mvm->lte_state.rprtd_chan;
 
-    if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_LTE_COEX)) {
-        IWL_DEBUG_COEX(mvm, "LTE-Coex not supported!\n");
-        return -EOPNOTSUPP;
-    }
+  if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_LTE_COEX)) {
+    IWL_DEBUG_COEX(mvm, "LTE-Coex not supported!\n");
+    return -EOPNOTSUPP;
+  }
 
-    IWL_DEBUG_COEX(mvm,
-                   "LTE-COEX: lte_coex_wifi_reported_channel_cmd:\n"
-                   "\tchannel: %d\n\tbandwidth: %d\n",
-                   le32_to_cpu(cmd->channel), le32_to_cpu(cmd->bandwidth));
+  IWL_DEBUG_COEX(mvm,
+                 "LTE-COEX: lte_coex_wifi_reported_channel_cmd:\n"
+                 "\tchannel: %d\n\tbandwidth: %d\n",
+                 le32_to_cpu(cmd->channel), le32_to_cpu(cmd->bandwidth));
 
-    return iwl_mvm_send_cmd_pdu(mvm, LTE_COEX_WIFI_REPORTED_CHANNEL_CMD, 0, sizeof(*cmd), cmd);
+  return iwl_mvm_send_cmd_pdu(mvm, LTE_COEX_WIFI_REPORTED_CHANNEL_CMD, 0, sizeof(*cmd), cmd);
 }
 
 int iwl_mvm_send_lte_coex_static_params_cmd(struct iwl_mvm* mvm) {
-    const struct iwl_lte_coex_static_params_cmd* cmd = &mvm->lte_state.stat;
+  const struct iwl_lte_coex_static_params_cmd* cmd = &mvm->lte_state.stat;
 
-    if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_LTE_COEX)) {
-        IWL_DEBUG_COEX(mvm, "LTE-Coex not supported!\n");
-        return -EOPNOTSUPP;
-    }
+  if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_LTE_COEX)) {
+    IWL_DEBUG_COEX(mvm, "LTE-Coex not supported!\n");
+    return -EOPNOTSUPP;
+  }
 
-    IWL_DEBUG_COEX(mvm,
-                   "LTE-COEX: lte_coex_static_params_cmd:\n"
-                   "\tmfu config[0]: %d\n\ttx power[0]: %d\n",
-                   le32_to_cpu(cmd->mfu_config[0]), cmd->tx_power_in_dbm[0]);
+  IWL_DEBUG_COEX(mvm,
+                 "LTE-COEX: lte_coex_static_params_cmd:\n"
+                 "\tmfu config[0]: %d\n\ttx power[0]: %d\n",
+                 le32_to_cpu(cmd->mfu_config[0]), cmd->tx_power_in_dbm[0]);
 
-    return iwl_mvm_send_cmd_pdu(mvm, LTE_COEX_STATIC_PARAMS_CMD, 0, sizeof(*cmd), cmd);
+  return iwl_mvm_send_cmd_pdu(mvm, LTE_COEX_STATIC_PARAMS_CMD, 0, sizeof(*cmd), cmd);
 }
 
 int iwl_mvm_send_lte_fine_tuning_params_cmd(struct iwl_mvm* mvm) {
-    const struct iwl_lte_coex_fine_tuning_params_cmd* cmd = &mvm->lte_state.ft;
+  const struct iwl_lte_coex_fine_tuning_params_cmd* cmd = &mvm->lte_state.ft;
 
-    if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_LTE_COEX)) {
-        IWL_DEBUG_COEX(mvm, "LTE-Coex not supported!\n");
-        return -EOPNOTSUPP;
-    }
+  if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_LTE_COEX)) {
+    IWL_DEBUG_COEX(mvm, "LTE-Coex not supported!\n");
+    return -EOPNOTSUPP;
+  }
 
-    IWL_DEBUG_COEX(mvm,
-                   "LTE-COEX: lte_fine_tuning_params_cmd:\n"
-                   "\trx protection assert timing: %d\n",
-                   le32_to_cpu(cmd->rx_protection_assert_timing));
+  IWL_DEBUG_COEX(mvm,
+                 "LTE-COEX: lte_fine_tuning_params_cmd:\n"
+                 "\trx protection assert timing: %d\n",
+                 le32_to_cpu(cmd->rx_protection_assert_timing));
 
-    IWL_DEBUG_COEX(mvm,
-                   "\ttx protection assert timing: %d\n"
-                   "\trx protection timeout: %d\n\tmin tx power: %d\n",
-                   le32_to_cpu(cmd->tx_protection_assert_timing),
-                   le32_to_cpu(cmd->rx_protection_timeout), le32_to_cpu(cmd->min_tx_power));
+  IWL_DEBUG_COEX(mvm,
+                 "\ttx protection assert timing: %d\n"
+                 "\trx protection timeout: %d\n\tmin tx power: %d\n",
+                 le32_to_cpu(cmd->tx_protection_assert_timing),
+                 le32_to_cpu(cmd->rx_protection_timeout), le32_to_cpu(cmd->min_tx_power));
 
-    IWL_DEBUG_COEX(mvm,
-                   "\tul load uapsd threshold: %d\n"
-                   "\trx failure during ul uapsd threshold: %d\n",
-                   le32_to_cpu(cmd->lte_ul_load_uapsd_threshold),
-                   le32_to_cpu(cmd->rx_failure_during_ul_uapsd_threshold));
+  IWL_DEBUG_COEX(mvm,
+                 "\tul load uapsd threshold: %d\n"
+                 "\trx failure during ul uapsd threshold: %d\n",
+                 le32_to_cpu(cmd->lte_ul_load_uapsd_threshold),
+                 le32_to_cpu(cmd->rx_failure_during_ul_uapsd_threshold));
 
-    IWL_DEBUG_COEX(mvm,
-                   "\trx failure during ul scan compensation threshold: %d\n"
-                   "\trx duration for ack protection: %d\n",
-                   le32_to_cpu(cmd->rx_failure_during_ul_sc_threshold),
-                   le32_to_cpu(cmd->rx_duration_for_ack_protection_us));
+  IWL_DEBUG_COEX(mvm,
+                 "\trx failure during ul scan compensation threshold: %d\n"
+                 "\trx duration for ack protection: %d\n",
+                 le32_to_cpu(cmd->rx_failure_during_ul_sc_threshold),
+                 le32_to_cpu(cmd->rx_duration_for_ack_protection_us));
 
-    IWL_DEBUG_COEX(mvm,
-                   "\tbeacon failure during ul counter: %d\n"
-                   "\tdtim failure during ul counter: %d\n",
-                   le32_to_cpu(cmd->beacon_failure_during_ul_counter),
-                   le32_to_cpu(cmd->dtim_failure_during_ul_counter));
+  IWL_DEBUG_COEX(mvm,
+                 "\tbeacon failure during ul counter: %d\n"
+                 "\tdtim failure during ul counter: %d\n",
+                 le32_to_cpu(cmd->beacon_failure_during_ul_counter),
+                 le32_to_cpu(cmd->dtim_failure_during_ul_counter));
 
-    return iwl_mvm_send_cmd_pdu(mvm, LTE_COEX_FINE_TUNING_PARAMS_CMD, 0, sizeof(*cmd), cmd);
+  return iwl_mvm_send_cmd_pdu(mvm, LTE_COEX_FINE_TUNING_PARAMS_CMD, 0, sizeof(*cmd), cmd);
 }
 
 int iwl_mvm_send_lte_sps_cmd(struct iwl_mvm* mvm) {
-    const struct iwl_lte_coex_sps_cmd* cmd = &mvm->lte_state.sps;
+  const struct iwl_lte_coex_sps_cmd* cmd = &mvm->lte_state.sps;
 
-    if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_LTE_COEX)) {
-        IWL_DEBUG_COEX(mvm, "LTE-Coex not supported!\n");
-        return -EOPNOTSUPP;
-    }
+  if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_LTE_COEX)) {
+    IWL_DEBUG_COEX(mvm, "LTE-Coex not supported!\n");
+    return -EOPNOTSUPP;
+  }
 
-    IWL_DEBUG_COEX(mvm, "LTE-COEX: lte_sps_cmd:\n\tsps info: %d\n",
-                   le32_to_cpu(cmd->lte_semi_persistent_info));
+  IWL_DEBUG_COEX(mvm, "LTE-COEX: lte_sps_cmd:\n\tsps info: %d\n",
+                 le32_to_cpu(cmd->lte_semi_persistent_info));
 
-    return iwl_mvm_send_cmd_pdu(mvm, LTE_COEX_SPS_CMD, 0, sizeof(*cmd), cmd);
+  return iwl_mvm_send_cmd_pdu(mvm, LTE_COEX_SPS_CMD, 0, sizeof(*cmd), cmd);
 }
 
 void iwl_mvm_reset_lte_state(struct iwl_mvm* mvm) {
-    struct lte_coex_state* lte_state = &mvm->lte_state;
+  struct lte_coex_state* lte_state = &mvm->lte_state;
 
-    lte_state->state = LTE_OFF;
-    lte_state->has_config = 0;
-    lte_state->has_rprtd_chan = 0;
-    lte_state->has_sps = 0;
-    lte_state->has_ft = 0;
+  lte_state->state = LTE_OFF;
+  lte_state->has_config = 0;
+  lte_state->has_rprtd_chan = 0;
+  lte_state->has_sps = 0;
+  lte_state->has_ft = 0;
 }
 
 void iwl_mvm_send_lte_commands(struct iwl_mvm* mvm) {
-    struct lte_coex_state* lte_state = &mvm->lte_state;
+  struct lte_coex_state* lte_state = &mvm->lte_state;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (lte_state->has_static) { iwl_mvm_send_lte_coex_static_params_cmd(mvm); }
-    if (lte_state->has_rprtd_chan) { iwl_mvm_send_lte_coex_wifi_reported_channel_cmd(mvm); }
-    if (lte_state->state != LTE_OFF) { iwl_mvm_send_lte_coex_config_cmd(mvm); }
-    if (lte_state->has_sps) { iwl_mvm_send_lte_sps_cmd(mvm); }
-    if (lte_state->has_ft) { iwl_mvm_send_lte_fine_tuning_params_cmd(mvm); }
+  if (lte_state->has_static) {
+    iwl_mvm_send_lte_coex_static_params_cmd(mvm);
+  }
+  if (lte_state->has_rprtd_chan) {
+    iwl_mvm_send_lte_coex_wifi_reported_channel_cmd(mvm);
+  }
+  if (lte_state->state != LTE_OFF) {
+    iwl_mvm_send_lte_coex_config_cmd(mvm);
+  }
+  if (lte_state->has_sps) {
+    iwl_mvm_send_lte_sps_cmd(mvm);
+  }
+  if (lte_state->has_ft) {
+    iwl_mvm_send_lte_fine_tuning_params_cmd(mvm);
+  }
 }
 #endif /* CPTCFG_IWLWIFI_LTE_COEX */
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/constants.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/constants.h
index 67e7f7b..6ef90bfb 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/constants.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/constants.h
@@ -48,9 +48,9 @@
 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 1
 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
 #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
-#define IWL_MVM_UAPSD_QUEUES                                                   \
-    (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO | IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
-     IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+#define IWL_MVM_UAPSD_QUEUES                                                 \
+  (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO | IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
+   IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
 #define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
 #define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8
 #define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
@@ -142,9 +142,9 @@
 #define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS (mvm->trans->dbg_cfg.MVM_PS_HEAVY_TX_THLD_PACKETS)
 #define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS (mvm->trans->dbg_cfg.MVM_PS_HEAVY_RX_THLD_PACKETS)
 #define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS \
-    (mvm->trans->dbg_cfg.MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS)
+  (mvm->trans->dbg_cfg.MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS)
 #define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS \
-    (mvm->trans->dbg_cfg.MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS)
+  (mvm->trans->dbg_cfg.MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS)
 #define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT (mvm->trans->dbg_cfg.MVM_PS_HEAVY_TX_THLD_PERCENT)
 #define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT (mvm->trans->dbg_cfg.MVM_PS_HEAVY_RX_THLD_PERCENT)
 #define IWL_MVM_PS_SNOOZE_INTERVAL (mvm->trans->dbg_cfg.MVM_PS_SNOOZE_INTERVAL)
@@ -160,7 +160,7 @@
 #define IWL_MVM_BT_COEX_MPLUT_REG0 (mvm->trans->dbg_cfg.MVM_BT_COEX_MPLUT_REG0)
 #define IWL_MVM_BT_COEX_MPLUT_REG1 (mvm->trans->dbg_cfg.MVM_BT_COEX_MPLUT_REG1)
 #define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS \
-    (mvm->trans->dbg_cfg.MVM_BT_COEX_ANTENNA_COUPLING_THRS)
+  (mvm->trans->dbg_cfg.MVM_BT_COEX_ANTENNA_COUPLING_THRS)
 #define IWL_MVM_FW_MCAST_FILTER_PASS_ALL (mvm->trans->dbg_cfg.MVM_FW_MCAST_FILTER_PASS_ALL)
 #define IWL_MVM_FW_BCAST_FILTER_PASS_ALL (mvm->trans->dbg_cfg.MVM_FW_BCAST_FILTER_PASS_ALL)
 #define IWL_MVM_TOF_IS_RESPONDER (mvm->trans->dbg_cfg.MVM_TOF_IS_RESPONDER)
@@ -183,7 +183,7 @@
 #define IWL_MVM_DYNQUOTA_START_PERCENT (mvm->trans->dbg_cfg.MVM_DYNQUOTA_START_PERCENT)
 #define IWL_MVM_DYNQUOTA_INC_HIGH_PERCENT (mvm->trans->dbg_cfg.MVM_DYNQUOTA_INC_HIGH_PERCENT)
 #define IWL_MVM_LOWLAT_QUOTA_MIN_PCT_P2PCLIENT \
-    (mvm->trans->dbg_cfg.MVM_LOWLAT_QUOTA_MIN_PCT_P2PCLIENT)
+  (mvm->trans->dbg_cfg.MVM_LOWLAT_QUOTA_MIN_PCT_P2PCLIENT)
 #define IWL_MVM_LOWLAT_QUOTA_MIN_PCT_P2PGO (mvm->trans->dbg_cfg.MVM_LOWLAT_QUOTA_MIN_PCT_P2PGO)
 #endif /* CPTCFG_IWLMVM_ADVANCED_QUOTA_MGMT */
 #define IWL_MVM_QUOTA_THRESHOLD (mvm->trans->dbg_cfg.MVM_QUOTA_THRESHOLD)
@@ -192,14 +192,14 @@
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE (mvm->trans->dbg_cfg.MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE)
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE (mvm->trans->dbg_cfg.MVM_RS_HT_VHT_RETRIES_PER_RATE)
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW \
-    (mvm->trans->dbg_cfg.MVM_RS_HT_VHT_RETRIES_PER_RATE_TW)
+  (mvm->trans->dbg_cfg.MVM_RS_HT_VHT_RETRIES_PER_RATE_TW)
 #define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES (mvm->trans->dbg_cfg.MVM_RS_INITIAL_MIMO_NUM_RATES)
 #define IWL_MVM_RS_INITIAL_SISO_NUM_RATES (mvm->trans->dbg_cfg.MVM_RS_INITIAL_SISO_NUM_RATES)
 #define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES (mvm->trans->dbg_cfg.MVM_RS_INITIAL_LEGACY_NUM_RATES)
 #define IWL_MVM_RS_INITIAL_LEGACY_RETRIES (mvm->trans->dbg_cfg.MVM_RS_INITIAL_LEGACY_RETRIES)
 #define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES (mvm->trans->dbg_cfg.MVM_RS_SECONDARY_LEGACY_RETRIES)
 #define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES \
-    (mvm->trans->dbg_cfg.MVM_RS_SECONDARY_LEGACY_NUM_RATES)
+  (mvm->trans->dbg_cfg.MVM_RS_SECONDARY_LEGACY_NUM_RATES)
 #define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES (mvm->trans->dbg_cfg.MVM_RS_SECONDARY_SISO_NUM_RATES)
 #define IWL_MVM_RS_SECONDARY_SISO_RETRIES (mvm->trans->dbg_cfg.MVM_RS_SECONDARY_SISO_RETRIES)
 #define IWL_MVM_RS_RATE_MIN_FAILURE_TH (mvm->trans->dbg_cfg.MVM_RS_RATE_MIN_FAILURE_TH)
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/d3.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/d3.c
index c9f1396..b982dc8 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/d3.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/d3.c
@@ -41,548 +41,587 @@
 #include <net/cfg80211.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
+
 #include "fw-api.h"
 #include "iwl-modparams.h"
 #include "mvm.h"
 
 void iwl_mvm_set_rekey_data(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                             struct cfg80211_gtk_rekey_data* data) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (iwlwifi_mod_params.swcrypto) { return; }
+  if (iwlwifi_mod_params.swcrypto) {
+    return;
+  }
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
-    memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
-    mvmvif->rekey_data.replay_ctr = cpu_to_le64(be64_to_cpup((__be64*)data->replay_ctr));
-    mvmvif->rekey_data.valid = true;
+  memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
+  memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
+  mvmvif->rekey_data.replay_ctr = cpu_to_le64(be64_to_cpup((__be64*)data->replay_ctr));
+  mvmvif->rekey_data.valid = true;
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                               struct inet6_dev* idev) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct inet6_ifaddr* ifa;
-    int idx = 0;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct inet6_ifaddr* ifa;
+  int idx = 0;
 
-    memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs));
+  memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs));
 
-    read_lock_bh(&idev->lock);
-    list_for_each_entry(ifa, &idev->addr_list, if_list) {
-        mvmvif->target_ipv6_addrs[idx] = ifa->addr;
-        if (ifa->flags & IFA_F_TENTATIVE) { __set_bit(idx, mvmvif->tentative_addrs); }
-        idx++;
-        if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) { break; }
+  read_lock_bh(&idev->lock);
+  list_for_each_entry(ifa, &idev->addr_list, if_list) {
+    mvmvif->target_ipv6_addrs[idx] = ifa->addr;
+    if (ifa->flags & IFA_F_TENTATIVE) {
+      __set_bit(idx, mvmvif->tentative_addrs);
     }
-    read_unlock_bh(&idev->lock);
+    idx++;
+    if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) {
+      break;
+    }
+  }
+  read_unlock_bh(&idev->lock);
 
-    mvmvif->num_target_ipv6_addrs = idx;
+  mvmvif->num_target_ipv6_addrs = idx;
 }
 #endif
 
 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw* hw, struct ieee80211_vif* vif, int idx) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    mvmvif->tx_key_idx = idx;
+  mvmvif->tx_key_idx = idx;
 }
 
 static void iwl_mvm_convert_p1k(uint16_t* p1k, __le16* out) {
-    int i;
+  int i;
 
-    for (i = 0; i < IWL_P1K_SIZE; i++) {
-        out[i] = cpu_to_le16(p1k[i]);
-    }
+  for (i = 0; i < IWL_P1K_SIZE; i++) {
+    out[i] = cpu_to_le16(p1k[i]);
+  }
 }
 
 static const uint8_t* iwl_mvm_find_max_pn(struct ieee80211_key_conf* key,
                                           struct iwl_mvm_key_pn* ptk_pn,
                                           struct ieee80211_key_seq* seq, int tid, int queues) {
-    const uint8_t* ret = seq->ccmp.pn;
-    int i;
+  const uint8_t* ret = seq->ccmp.pn;
+  int i;
 
-    /* get the PN from mac80211, used on the default queue */
-    ieee80211_get_key_rx_seq(key, tid, seq);
+  /* get the PN from mac80211, used on the default queue */
+  ieee80211_get_key_rx_seq(key, tid, seq);
 
-    /* and use the internal data for the other queues */
-    for (i = 1; i < queues; i++) {
-        const uint8_t* tmp = ptk_pn->q[i].pn[tid];
+  /* and use the internal data for the other queues */
+  for (i = 1; i < queues; i++) {
+    const uint8_t* tmp = ptk_pn->q[i].pn[tid];
 
-        if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) { ret = tmp; }
+    if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) {
+      ret = tmp;
     }
+  }
 
-    return ret;
+  return ret;
 }
 
 struct wowlan_key_data {
-    struct iwl_wowlan_rsc_tsc_params_cmd* rsc_tsc;
-    struct iwl_wowlan_tkip_params_cmd* tkip;
-    bool error, use_rsc_tsc, use_tkip, configure_keys;
-    int wep_key_idx;
+  struct iwl_wowlan_rsc_tsc_params_cmd* rsc_tsc;
+  struct iwl_wowlan_tkip_params_cmd* tkip;
+  bool error, use_rsc_tsc, use_tkip, configure_keys;
+  int wep_key_idx;
 };
 
 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                                         struct ieee80211_sta* sta, struct ieee80211_key_conf* key,
                                         void* _data) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct wowlan_key_data* data = _data;
-    struct aes_sc *aes_sc, *aes_tx_sc = NULL;
-    struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
-    struct iwl_p1k_cache* rx_p1ks;
-    uint8_t* rx_mic_key;
-    struct ieee80211_key_seq seq;
-    uint32_t cur_rx_iv32 = 0;
-    uint16_t p1k[IWL_P1K_SIZE];
-    int ret, i;
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct wowlan_key_data* data = _data;
+  struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+  struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+  struct iwl_p1k_cache* rx_p1ks;
+  uint8_t* rx_mic_key;
+  struct ieee80211_key_seq seq;
+  uint32_t cur_rx_iv32 = 0;
+  uint16_t p1k[IWL_P1K_SIZE];
+  int ret, i;
 
-    switch (key->cipher) {
+  switch (key->cipher) {
     case WLAN_CIPHER_SUITE_WEP40:
     case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
-        struct {
-            struct iwl_mvm_wep_key_cmd wep_key_cmd;
-            struct iwl_mvm_wep_key wep_key;
-        } __packed wkc = {
-            .wep_key_cmd.mac_id_n_color =
-                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)),
-            .wep_key_cmd.num_keys = 1,
-            /* firmware sets STA_KEY_FLG_WEP_13BYTES */
-            .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
-            .wep_key.key_index = key->keyidx,
-            .wep_key.key_size = key->keylen,
-        };
+      struct {
+        struct iwl_mvm_wep_key_cmd wep_key_cmd;
+        struct iwl_mvm_wep_key wep_key;
+      } __packed wkc = {
+          .wep_key_cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)),
+          .wep_key_cmd.num_keys = 1,
+          /* firmware sets STA_KEY_FLG_WEP_13BYTES */
+          .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
+          .wep_key.key_index = key->keyidx,
+          .wep_key.key_size = key->keylen,
+      };
 
-        /*
-         * This will fail -- the key functions don't set support
-         * pairwise WEP keys. However, that's better than silently
-         * failing WoWLAN. Or maybe not?
-         */
-        if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { break; }
+      /*
+       * This will fail -- the key functions don't set support
+       * pairwise WEP keys. However, that's better than silently
+       * failing WoWLAN. Or maybe not?
+       */
+      if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+        break;
+      }
 
-        memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
-        if (key->keyidx == mvmvif->tx_key_idx) {
-            /* TX key must be at offset 0 */
-            wkc.wep_key.key_offset = 0;
-        } else {
-            /* others start at 1 */
-            data->wep_key_idx++;
-            wkc.wep_key.key_offset = data->wep_key_idx;
-        }
+      memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
+      if (key->keyidx == mvmvif->tx_key_idx) {
+        /* TX key must be at offset 0 */
+        wkc.wep_key.key_offset = 0;
+      } else {
+        /* others start at 1 */
+        data->wep_key_idx++;
+        wkc.wep_key.key_offset = data->wep_key_idx;
+      }
 
-        if (data->configure_keys) {
-            mutex_lock(&mvm->mutex);
-            ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
-            data->error = ret != 0;
+      if (data->configure_keys) {
+        mutex_lock(&mvm->mutex);
+        ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
+        data->error = ret != 0;
 
-            mvm->ptk_ivlen = key->iv_len;
-            mvm->ptk_icvlen = key->icv_len;
-            mvm->gtk_ivlen = key->iv_len;
-            mvm->gtk_icvlen = key->icv_len;
-            mutex_unlock(&mvm->mutex);
-        }
+        mvm->ptk_ivlen = key->iv_len;
+        mvm->ptk_icvlen = key->icv_len;
+        mvm->gtk_ivlen = key->iv_len;
+        mvm->gtk_icvlen = key->icv_len;
+        mutex_unlock(&mvm->mutex);
+      }
 
-        /* don't upload key again */
-        return;
+      /* don't upload key again */
+      return;
     }
     default:
-        data->error = true;
-        return;
+      data->error = true;
+      return;
     case WLAN_CIPHER_SUITE_AES_CMAC:
-        /*
-         * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
-         * but we also shouldn't abort suspend due to that. It does have
-         * support for the IGTK key renewal, but doesn't really use the
-         * IGTK for anything. This means we could spuriously wake up or
-         * be deauthenticated, but that was considered acceptable.
-         */
-        return;
+      /*
+       * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
+       * but we also shouldn't abort suspend due to that. It does have
+       * support for the IGTK key renewal, but doesn't really use the
+       * IGTK for anything. This means we could spuriously wake up or
+       * be deauthenticated, but that was considered acceptable.
+       */
+      return;
     case WLAN_CIPHER_SUITE_TKIP:
-        if (sta) {
-            uint64_t pn64;
+      if (sta) {
+        uint64_t pn64;
 
-            tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
-            tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+        tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+        tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
 
-            rx_p1ks = data->tkip->rx_uni;
+        rx_p1ks = data->tkip->rx_uni;
 
-            pn64 = atomic64_read(&key->tx_pn);
-            tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
-            tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
+        pn64 = atomic64_read(&key->tx_pn);
+        tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+        tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
 
-            ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), p1k);
-            iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
+        ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), p1k);
+        iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
 
-            memcpy(data->tkip->mic_keys.tx, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
-                   IWL_MIC_KEY_SIZE);
+        memcpy(data->tkip->mic_keys.tx, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+               IWL_MIC_KEY_SIZE);
 
-            rx_mic_key = data->tkip->mic_keys.rx_unicast;
-        } else {
-            tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
-            rx_p1ks = data->tkip->rx_multi;
-            rx_mic_key = data->tkip->mic_keys.rx_mcast;
+        rx_mic_key = data->tkip->mic_keys.rx_unicast;
+      } else {
+        tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+        rx_p1ks = data->tkip->rx_multi;
+        rx_mic_key = data->tkip->mic_keys.rx_mcast;
+      }
+
+      /*
+       * For non-QoS this relies on the fact that both the uCode and
+       * mac80211 use TID 0 (as they need to to avoid replay attacks)
+       * for checking the IV in the frames.
+       */
+      for (i = 0; i < IWL_NUM_RSC; i++) {
+        ieee80211_get_key_rx_seq(key, i, &seq);
+        tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+        tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+        /* wrapping isn't allowed, AP must rekey */
+        if (seq.tkip.iv32 > cur_rx_iv32) {
+          cur_rx_iv32 = seq.tkip.iv32;
         }
+      }
 
-        /*
-         * For non-QoS this relies on the fact that both the uCode and
-         * mac80211 use TID 0 (as they need to to avoid replay attacks)
-         * for checking the IV in the frames.
-         */
-        for (i = 0; i < IWL_NUM_RSC; i++) {
-            ieee80211_get_key_rx_seq(key, i, &seq);
-            tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
-            tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
-            /* wrapping isn't allowed, AP must rekey */
-            if (seq.tkip.iv32 > cur_rx_iv32) { cur_rx_iv32 = seq.tkip.iv32; }
-        }
+      ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, cur_rx_iv32, p1k);
+      iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
+      ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, cur_rx_iv32 + 1, p1k);
+      iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
 
-        ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, cur_rx_iv32, p1k);
-        iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
-        ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, cur_rx_iv32 + 1, p1k);
-        iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
+      memcpy(rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], IWL_MIC_KEY_SIZE);
 
-        memcpy(rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], IWL_MIC_KEY_SIZE);
-
-        data->use_tkip = true;
-        data->use_rsc_tsc = true;
-        break;
+      data->use_tkip = true;
+      data->use_rsc_tsc = true;
+      break;
     case WLAN_CIPHER_SUITE_CCMP:
-        if (sta) {
-            uint64_t pn64;
+      if (sta) {
+        uint64_t pn64;
 
-            aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
-            aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+        aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+        aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
 
-            pn64 = atomic64_read(&key->tx_pn);
-            aes_tx_sc->pn = cpu_to_le64(pn64);
-        } else {
-            aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+        pn64 = atomic64_read(&key->tx_pn);
+        aes_tx_sc->pn = cpu_to_le64(pn64);
+      } else {
+        aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+      }
+
+      /*
+       * For non-QoS this relies on the fact that both the uCode and
+       * mac80211/our RX code use TID 0 for checking the PN.
+       */
+      if (sta && iwl_mvm_has_new_rx_api(mvm)) {
+        struct iwl_mvm_sta* mvmsta;
+        struct iwl_mvm_key_pn* ptk_pn;
+        const uint8_t* pn;
+
+        mvmsta = iwl_mvm_sta_from_mac80211(sta);
+        ptk_pn =
+            rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx], lockdep_is_held(&mvm->mutex));
+        if (WARN_ON(!ptk_pn)) {
+          break;
         }
 
-        /*
-         * For non-QoS this relies on the fact that both the uCode and
-         * mac80211/our RX code use TID 0 for checking the PN.
-         */
-        if (sta && iwl_mvm_has_new_rx_api(mvm)) {
-            struct iwl_mvm_sta* mvmsta;
-            struct iwl_mvm_key_pn* ptk_pn;
-            const uint8_t* pn;
-
-            mvmsta = iwl_mvm_sta_from_mac80211(sta);
-            ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx],
-                                               lockdep_is_held(&mvm->mutex));
-            if (WARN_ON(!ptk_pn)) { break; }
-
-            for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
-                pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, mvm->trans->num_rx_queues);
-                aes_sc[i].pn = cpu_to_le64((uint64_t)pn[5] | ((uint64_t)pn[4] << 8) |
-                                           ((uint64_t)pn[3] << 16) | ((uint64_t)pn[2] << 24) |
-                                           ((uint64_t)pn[1] << 32) | ((uint64_t)pn[0] << 40));
-            }
-        } else {
-            for (i = 0; i < IWL_NUM_RSC; i++) {
-                uint8_t* pn = seq.ccmp.pn;
-
-                ieee80211_get_key_rx_seq(key, i, &seq);
-                aes_sc[i].pn = cpu_to_le64((uint64_t)pn[5] | ((uint64_t)pn[4] << 8) |
-                                           ((uint64_t)pn[3] << 16) | ((uint64_t)pn[2] << 24) |
-                                           ((uint64_t)pn[1] << 32) | ((uint64_t)pn[0] << 40));
-            }
+        for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+          pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, mvm->trans->num_rx_queues);
+          aes_sc[i].pn = cpu_to_le64((uint64_t)pn[5] | ((uint64_t)pn[4] << 8) |
+                                     ((uint64_t)pn[3] << 16) | ((uint64_t)pn[2] << 24) |
+                                     ((uint64_t)pn[1] << 32) | ((uint64_t)pn[0] << 40));
         }
-        data->use_rsc_tsc = true;
-        break;
+      } else {
+        for (i = 0; i < IWL_NUM_RSC; i++) {
+          uint8_t* pn = seq.ccmp.pn;
+
+          ieee80211_get_key_rx_seq(key, i, &seq);
+          aes_sc[i].pn = cpu_to_le64((uint64_t)pn[5] | ((uint64_t)pn[4] << 8) |
+                                     ((uint64_t)pn[3] << 16) | ((uint64_t)pn[2] << 24) |
+                                     ((uint64_t)pn[1] << 32) | ((uint64_t)pn[0] << 40));
+        }
+      }
+      data->use_rsc_tsc = true;
+      break;
+  }
+
+  if (data->configure_keys) {
+    mutex_lock(&mvm->mutex);
+    /*
+     * The D3 firmware hardcodes the key offset 0 as the key it
+     * uses to transmit packets to the AP, i.e. the PTK.
+     */
+    if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+      mvm->ptk_ivlen = key->iv_len;
+      mvm->ptk_icvlen = key->icv_len;
+      ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
+    } else {
+      /*
+       * firmware only supports TSC/RSC for a single key,
+       * so if there are multiple keep overwriting them
+       * with new ones -- this relies on mac80211 doing
+       * list_add_tail().
+       */
+      mvm->gtk_ivlen = key->iv_len;
+      mvm->gtk_icvlen = key->icv_len;
+      ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
     }
-
-    if (data->configure_keys) {
-        mutex_lock(&mvm->mutex);
-        /*
-         * The D3 firmware hardcodes the key offset 0 as the key it
-         * uses to transmit packets to the AP, i.e. the PTK.
-         */
-        if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
-            mvm->ptk_ivlen = key->iv_len;
-            mvm->ptk_icvlen = key->icv_len;
-            ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
-        } else {
-            /*
-             * firmware only supports TSC/RSC for a single key,
-             * so if there are multiple keep overwriting them
-             * with new ones -- this relies on mac80211 doing
-             * list_add_tail().
-             */
-            mvm->gtk_ivlen = key->iv_len;
-            mvm->gtk_icvlen = key->icv_len;
-            ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
-        }
-        mutex_unlock(&mvm->mutex);
-        data->error = ret != 0;
-    }
+    mutex_unlock(&mvm->mutex);
+    data->error = ret != 0;
+  }
 }
 
 static int iwl_mvm_send_patterns(struct iwl_mvm* mvm, struct cfg80211_wowlan* wowlan) {
-    struct iwl_wowlan_patterns_cmd* pattern_cmd;
-    struct iwl_host_cmd cmd = {
-        .id = WOWLAN_PATTERNS,
-        .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-    };
-    int i, err;
+  struct iwl_wowlan_patterns_cmd* pattern_cmd;
+  struct iwl_host_cmd cmd = {
+      .id = WOWLAN_PATTERNS,
+      .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+  };
+  int i, err;
 
-    if (!wowlan->n_patterns) { return 0; }
+  if (!wowlan->n_patterns) {
+    return 0;
+  }
 
-    cmd.len[0] = sizeof(*pattern_cmd) + wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
+  cmd.len[0] = sizeof(*pattern_cmd) + wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
 
-    pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
-    if (!pattern_cmd) { return -ENOMEM; }
+  pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+  if (!pattern_cmd) {
+    return -ENOMEM;
+  }
 
-    pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+  pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
 
-    for (i = 0; i < wowlan->n_patterns; i++) {
-        int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+  for (i = 0; i < wowlan->n_patterns; i++) {
+    int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
 
-        memcpy(&pattern_cmd->patterns[i].mask, wowlan->patterns[i].mask, mask_len);
-        memcpy(&pattern_cmd->patterns[i].pattern, wowlan->patterns[i].pattern,
-               wowlan->patterns[i].pattern_len);
-        pattern_cmd->patterns[i].mask_size = mask_len;
-        pattern_cmd->patterns[i].pattern_size = wowlan->patterns[i].pattern_len;
-    }
+    memcpy(&pattern_cmd->patterns[i].mask, wowlan->patterns[i].mask, mask_len);
+    memcpy(&pattern_cmd->patterns[i].pattern, wowlan->patterns[i].pattern,
+           wowlan->patterns[i].pattern_len);
+    pattern_cmd->patterns[i].mask_size = mask_len;
+    pattern_cmd->patterns[i].pattern_size = wowlan->patterns[i].pattern_len;
+  }
 
-    cmd.data[0] = pattern_cmd;
-    err = iwl_mvm_send_cmd(mvm, &cmd);
-    kfree(pattern_cmd);
-    return err;
+  cmd.data[0] = pattern_cmd;
+  err = iwl_mvm_send_cmd(mvm, &cmd);
+  kfree(pattern_cmd);
+  return err;
 }
 
 static int iwl_mvm_d3_reprogram(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                 struct ieee80211_sta* ap_sta) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct ieee80211_chanctx_conf* ctx;
-    uint8_t chains_static, chains_dynamic;
-    struct cfg80211_chan_def chandef;
-    int ret, i;
-    struct iwl_binding_cmd_v1 binding_cmd = {};
-    struct iwl_time_quota_cmd quota_cmd = {};
-    struct iwl_time_quota_data* quota;
-    uint32_t status;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct ieee80211_chanctx_conf* ctx;
+  uint8_t chains_static, chains_dynamic;
+  struct cfg80211_chan_def chandef;
+  int ret, i;
+  struct iwl_binding_cmd_v1 binding_cmd = {};
+  struct iwl_time_quota_cmd quota_cmd = {};
+  struct iwl_time_quota_data* quota;
+  uint32_t status;
 
-    if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) { return -EINVAL; }
+  if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) {
+    return -EINVAL;
+  }
 
-    /* add back the PHY */
-    if (WARN_ON(!mvmvif->phy_ctxt)) { return -EINVAL; }
+  /* add back the PHY */
+  if (WARN_ON(!mvmvif->phy_ctxt)) {
+    return -EINVAL;
+  }
 
-    rcu_read_lock();
-    ctx = rcu_dereference(vif->chanctx_conf);
-    if (WARN_ON(!ctx)) {
-        rcu_read_unlock();
-        return -EINVAL;
-    }
-    chandef = ctx->def;
-    chains_static = ctx->rx_chains_static;
-    chains_dynamic = ctx->rx_chains_dynamic;
+  rcu_read_lock();
+  ctx = rcu_dereference(vif->chanctx_conf);
+  if (WARN_ON(!ctx)) {
     rcu_read_unlock();
+    return -EINVAL;
+  }
+  chandef = ctx->def;
+  chains_static = ctx->rx_chains_static;
+  chains_dynamic = ctx->rx_chains_dynamic;
+  rcu_read_unlock();
 
-    ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, chains_static, chains_dynamic);
-    if (ret) { return ret; }
+  ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, chains_static, chains_dynamic);
+  if (ret) {
+    return ret;
+  }
 
-    /* add back the MAC */
-    mvmvif->uploaded = false;
+  /* add back the MAC */
+  mvmvif->uploaded = false;
 
-    if (WARN_ON(!vif->bss_conf.assoc)) { return -EINVAL; }
+  if (WARN_ON(!vif->bss_conf.assoc)) {
+    return -EINVAL;
+  }
 
-    ret = iwl_mvm_mac_ctxt_add(mvm, vif);
-    if (ret) { return ret; }
+  ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+  if (ret) {
+    return ret;
+  }
 
-    /* add back binding - XXX refactor? */
-    binding_cmd.id_and_color =
-        cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color));
-    binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
-    binding_cmd.phy =
-        cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color));
-    binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
-    for (i = 1; i < MAX_MACS_IN_BINDING; i++) {
-        binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
-    }
+  /* add back binding - XXX refactor? */
+  binding_cmd.id_and_color =
+      cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color));
+  binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+  binding_cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color));
+  binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+  for (i = 1; i < MAX_MACS_IN_BINDING; i++) {
+    binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+  }
 
-    status = 0;
-    ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, IWL_BINDING_CMD_SIZE_V1,
-                                      &binding_cmd, &status);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
-        return ret;
-    }
+  status = 0;
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, IWL_BINDING_CMD_SIZE_V1, &binding_cmd,
+                                    &status);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
+    return ret;
+  }
 
-    if (status) {
-        IWL_ERR(mvm, "Binding command failed: %u\n", status);
-        return -EIO;
-    }
+  if (status) {
+    IWL_ERR(mvm, "Binding command failed: %u\n", status);
+    return -EIO;
+  }
 
-    ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
-    if (ret) { return ret; }
-    rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
+  ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
+  if (ret) {
+    return ret;
+  }
+  rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
 
-    ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
-    if (ret) { return ret; }
+  ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+  if (ret) {
+    return ret;
+  }
 
-    /* and some quota */
-    quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, 0);
-    quota->id_and_color =
-        cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color));
-    quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
-    quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+  /* and some quota */
+  quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, 0);
+  quota->id_and_color =
+      cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color));
+  quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+  quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
 
-    for (i = 1; i < MAX_BINDINGS; i++) {
-        quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, i);
-        quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
-    }
+  for (i = 1; i < MAX_BINDINGS; i++) {
+    quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, i);
+    quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+  }
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, iwl_mvm_quota_cmd_size(mvm), &quota_cmd);
-    if (ret) { IWL_ERR(mvm, "Failed to send quota: %d\n", ret); }
+  ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, iwl_mvm_quota_cmd_size(mvm), &quota_cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+  }
 
-    if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) {
-        IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
-    }
+  if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) {
+    IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
+  }
 
-    return 0;
+  return 0;
 }
 
 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_nonqos_seq_query_cmd query_cmd = {
-        .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
-        .mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)),
-    };
-    struct iwl_host_cmd cmd = {
-        .id = NON_QOS_TX_COUNTER_CMD,
-        .flags = CMD_WANT_SKB,
-    };
-    int err;
-    uint32_t size;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_nonqos_seq_query_cmd query_cmd = {
+      .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
+      .mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)),
+  };
+  struct iwl_host_cmd cmd = {
+      .id = NON_QOS_TX_COUNTER_CMD,
+      .flags = CMD_WANT_SKB,
+  };
+  int err;
+  uint32_t size;
 
-    cmd.data[0] = &query_cmd;
-    cmd.len[0] = sizeof(query_cmd);
+  cmd.data[0] = &query_cmd;
+  cmd.len[0] = sizeof(query_cmd);
 
-    err = iwl_mvm_send_cmd(mvm, &cmd);
-    if (err) { return err; }
-
-    size = iwl_rx_packet_payload_len(cmd.resp_pkt);
-    if (size < sizeof(__le16)) {
-        err = -EINVAL;
-    } else {
-        err = le16_to_cpup((__le16*)cmd.resp_pkt->data);
-        /* firmware returns next, not last-used seqno */
-        err = (uint16_t)(err - 0x10);
-    }
-
-    iwl_free_resp(&cmd);
+  err = iwl_mvm_send_cmd(mvm, &cmd);
+  if (err) {
     return err;
+  }
+
+  size = iwl_rx_packet_payload_len(cmd.resp_pkt);
+  if (size < sizeof(__le16)) {
+    err = -EINVAL;
+  } else {
+    err = le16_to_cpup((__le16*)cmd.resp_pkt->data);
+    /* firmware returns next, not last-used seqno */
+    err = (uint16_t)(err - 0x10);
+  }
+
+  iwl_free_resp(&cmd);
+  return err;
 }
 
 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_nonqos_seq_query_cmd query_cmd = {
-        .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
-        .mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)),
-        .value = cpu_to_le16(mvmvif->seqno),
-    };
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_nonqos_seq_query_cmd query_cmd = {
+      .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
+      .mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)),
+      .value = cpu_to_le16(mvmvif->seqno),
+  };
 
-    /* return if called during restart, not resume from D3 */
-    if (!mvmvif->seqno_valid) { return; }
+  /* return if called during restart, not resume from D3 */
+  if (!mvmvif->seqno_valid) {
+    return;
+  }
 
-    mvmvif->seqno_valid = false;
+  mvmvif->seqno_valid = false;
 
-    if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, sizeof(query_cmd), &query_cmd)) {
-        IWL_ERR(mvm, "failed to set non-QoS seqno\n");
-    }
+  if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, sizeof(query_cmd), &query_cmd)) {
+    IWL_ERR(mvm, "failed to set non-QoS seqno\n");
+  }
 }
 
 static int iwl_mvm_switch_to_d3(struct iwl_mvm* mvm) {
-    iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+  iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
 
-    iwl_mvm_stop_device(mvm);
-    /*
-     * Set the HW restart bit -- this is mostly true as we're
-     * going to load new firmware and reprogram that, though
-     * the reprogramming is going to be manual to avoid adding
-     * all the MACs that aren't support.
-     * We don't have to clear up everything though because the
-     * reprogramming is manual. When we resume, we'll actually
-     * go through a proper restart sequence again to switch
-     * back to the runtime firmware image.
-     */
-    set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+  iwl_mvm_stop_device(mvm);
+  /*
+   * Set the HW restart bit -- this is mostly true as we're
+   * going to load new firmware and reprogram that, though
+   * the reprogramming is going to be manual to avoid adding
+   * all the MACs that aren't support.
+   * We don't have to clear up everything though because the
+   * reprogramming is manual. When we resume, we'll actually
+   * go through a proper restart sequence again to switch
+   * back to the runtime firmware image.
+   */
+  set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
 
-    /* the fw is reset, so all the keys are cleared */
-    memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+  /* the fw is reset, so all the keys are cleared */
+  memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
 
-    mvm->ptk_ivlen = 0;
-    mvm->ptk_icvlen = 0;
-    mvm->ptk_ivlen = 0;
-    mvm->ptk_icvlen = 0;
+  mvm->ptk_ivlen = 0;
+  mvm->ptk_icvlen = 0;
+  mvm->ptk_ivlen = 0;
+  mvm->ptk_icvlen = 0;
 
-    return iwl_mvm_load_d3_fw(mvm);
+  return iwl_mvm_load_d3_fw(mvm);
 }
 
 static int iwl_mvm_get_wowlan_config(struct iwl_mvm* mvm, struct cfg80211_wowlan* wowlan,
                                      struct iwl_wowlan_config_cmd* wowlan_config_cmd,
                                      struct ieee80211_vif* vif, struct iwl_mvm_vif* mvmvif,
                                      struct ieee80211_sta* ap_sta) {
-    int ret;
-    struct iwl_mvm_sta* mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
+  int ret;
+  struct iwl_mvm_sta* mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
 
-    /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
+  /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
 
-    wowlan_config_cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
-    wowlan_config_cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+  wowlan_config_cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
+  wowlan_config_cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
 
-    /* Query the last used seqno and set it */
-    ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
-    if (ret < 0) { return ret; }
+  /* Query the last used seqno and set it */
+  ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
+  if (ret < 0) {
+    return ret;
+  }
 
-    wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
+  wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
 
-    iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
+  iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
 
-    if (wowlan->disconnect)
-        wowlan_config_cmd->wakeup_filter |=
-            cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE);
-    if (wowlan->magic_pkt) {
-        wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
-    }
-    if (wowlan->gtk_rekey_failure) {
-        wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
-    }
-    if (wowlan->eap_identity_req) {
-        wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
-    }
-    if (wowlan->four_way_handshake) {
-        wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
-    }
-    if (wowlan->n_patterns) {
-        wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
-    }
+  if (wowlan->disconnect)
+    wowlan_config_cmd->wakeup_filter |=
+        cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+  if (wowlan->magic_pkt) {
+    wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
+  }
+  if (wowlan->gtk_rekey_failure) {
+    wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+  }
+  if (wowlan->eap_identity_req) {
+    wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+  }
+  if (wowlan->four_way_handshake) {
+    wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+  }
+  if (wowlan->n_patterns) {
+    wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
+  }
 
-    if (wowlan->rfkill_release) {
-        wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
-    }
+  if (wowlan->rfkill_release) {
+    wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+  }
 
-    if (wowlan->tcp) {
-        /*
-         * Set the "link change" (really "link lost") flag as well
-         * since that implies losing the TCP connection.
-         */
-        wowlan_config_cmd->wakeup_filter |= cpu_to_le32(
-            IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
-            IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | IWL_WOWLAN_WAKEUP_LINK_CHANGE);
-    }
+  if (wowlan->tcp) {
+    /*
+     * Set the "link change" (really "link lost") flag as well
+     * since that implies losing the TCP connection.
+     */
+    wowlan_config_cmd->wakeup_filter |=
+        cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
+                    IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+  }
 
-    if (wowlan->any) {
-        wowlan_config_cmd->wakeup_filter |=
-            cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE |
-                        IWL_WOWLAN_WAKEUP_RX_FRAME | IWL_WOWLAN_WAKEUP_BCN_FILTERING);
-    }
+  if (wowlan->any) {
+    wowlan_config_cmd->wakeup_filter |=
+        cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+                    IWL_WOWLAN_WAKEUP_RX_FRAME | IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+  }
 
-    return 0;
+  return 0;
 }
 
 static void iwl_mvm_iter_d0i3_ap_keys(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
@@ -591,1327 +630,1477 @@
                                                    struct ieee80211_sta* sta,
                                                    struct ieee80211_key_conf* key, void* data),
                                       void* data) {
-    struct ieee80211_sta* ap_sta;
+  struct ieee80211_sta* ap_sta;
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]);
-    if (IS_ERR_OR_NULL(ap_sta)) { goto out; }
+  ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]);
+  if (IS_ERR_OR_NULL(ap_sta)) {
+    goto out;
+  }
 
-    ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data);
+  ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data);
 out:
-    rcu_read_unlock();
+  rcu_read_unlock();
 }
 
 int iwl_mvm_wowlan_config_key_params(struct iwl_mvm* mvm, struct ieee80211_vif* vif, bool d0i3,
                                      uint32_t cmd_flags) {
-    struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
-    struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
-    bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
-    struct wowlan_key_data key_data = {
-        .configure_keys = !d0i3 && !unified,
-        .use_rsc_tsc = false,
-        .tkip = &tkip_cmd,
-        .use_tkip = false,
-    };
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int ret;
+  struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+  struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+  bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+  struct wowlan_key_data key_data = {
+      .configure_keys = !d0i3 && !unified,
+      .use_rsc_tsc = false,
+      .tkip = &tkip_cmd,
+      .use_tkip = false,
+  };
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int ret;
 
-    key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
-    if (!key_data.rsc_tsc) { return -ENOMEM; }
+  key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+  if (!key_data.rsc_tsc) {
+    return -ENOMEM;
+  }
 
+  /*
+   * if we have to configure keys, call ieee80211_iter_keys(),
+   * as we need non-atomic context in order to take the
+   * required locks.
+   * for the d0i3 we can't use ieee80211_iter_keys(), as
+   * taking (almost) any mutex might result in deadlock.
+   */
+  if (!d0i3) {
     /*
-     * if we have to configure keys, call ieee80211_iter_keys(),
-     * as we need non-atomic context in order to take the
-     * required locks.
-     * for the d0i3 we can't use ieee80211_iter_keys(), as
-     * taking (almost) any mutex might result in deadlock.
+     * Note that currently we don't propagate cmd_flags
+     * to the iterator. In case of key_data.configure_keys,
+     * all the configured commands are SYNC, and
+     * iwl_mvm_wowlan_program_keys() will take care of
+     * locking/unlocking mvm->mutex.
      */
-    if (!d0i3) {
-        /*
-         * Note that currently we don't propagate cmd_flags
-         * to the iterator. In case of key_data.configure_keys,
-         * all the configured commands are SYNC, and
-         * iwl_mvm_wowlan_program_keys() will take care of
-         * locking/unlocking mvm->mutex.
-         */
-        ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, &key_data);
-    } else {
-        iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_wowlan_program_keys, &key_data);
-    }
+    ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, &key_data);
+  } else {
+    iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_wowlan_program_keys, &key_data);
+  }
 
-    if (key_data.error) {
-        ret = -EIO;
-        goto out;
-    }
+  if (key_data.error) {
+    ret = -EIO;
+    goto out;
+  }
 
-    if (key_data.use_rsc_tsc) {
-        ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, cmd_flags, sizeof(*key_data.rsc_tsc),
-                                   key_data.rsc_tsc);
-        if (ret) { goto out; }
+  if (key_data.use_rsc_tsc) {
+    ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, cmd_flags, sizeof(*key_data.rsc_tsc),
+                               key_data.rsc_tsc);
+    if (ret) {
+      goto out;
     }
+  }
 
-    if (key_data.use_tkip && !fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
-        ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TKIP_PARAM, cmd_flags, sizeof(tkip_cmd), &tkip_cmd);
-        if (ret) { goto out; }
+  if (key_data.use_tkip && !fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
+    ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TKIP_PARAM, cmd_flags, sizeof(tkip_cmd), &tkip_cmd);
+    if (ret) {
+      goto out;
     }
+  }
 
-    /* configure rekey data only if offloaded rekey is supported (d3) */
-    if (mvmvif->rekey_data.valid && !d0i3) {
-        memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
-        memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, NL80211_KCK_LEN);
-        kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
-        memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, NL80211_KEK_LEN);
-        kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
-        kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
+  /* configure rekey data only if offloaded rekey is supported (d3) */
+  if (mvmvif->rekey_data.valid && !d0i3) {
+    memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+    memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, NL80211_KCK_LEN);
+    kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+    memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, NL80211_KEK_LEN);
+    kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+    kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
 
-        ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL, cmd_flags, sizeof(kek_kck_cmd),
-                                   &kek_kck_cmd);
-        if (ret) { goto out; }
+    ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL, cmd_flags, sizeof(kek_kck_cmd),
+                               &kek_kck_cmd);
+    if (ret) {
+      goto out;
     }
-    ret = 0;
+  }
+  ret = 0;
 out:
-    kfree(key_data.rsc_tsc);
-    return ret;
+  kfree(key_data.rsc_tsc);
+  return ret;
 }
 
 static int iwl_mvm_wowlan_config(struct iwl_mvm* mvm, struct cfg80211_wowlan* wowlan,
                                  struct iwl_wowlan_config_cmd* wowlan_config_cmd,
                                  struct ieee80211_vif* vif, struct iwl_mvm_vif* mvmvif,
                                  struct ieee80211_sta* ap_sta) {
-    int ret;
-    bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+  int ret;
+  bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
-    if (!unified_image) {
-        ret = iwl_mvm_switch_to_d3(mvm);
-        if (ret) { return ret; }
-
-        ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
-        if (ret) { return ret; }
+  if (!unified_image) {
+    ret = iwl_mvm_switch_to_d3(mvm);
+    if (ret) {
+      return ret;
     }
 
-    if (!iwlwifi_mod_params.swcrypto) {
-        /*
-         * This needs to be unlocked due to lock ordering
-         * constraints. Since we're in the suspend path
-         * that isn't really a problem though.
-         */
-        mutex_unlock(&mvm->mutex);
-        ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false, CMD_ASYNC);
-        mutex_lock(&mvm->mutex);
-        if (ret) { return ret; }
+    ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
+    if (ret) {
+      return ret;
     }
+  }
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(*wowlan_config_cmd),
-                               wowlan_config_cmd);
-    if (ret) { return ret; }
+  if (!iwlwifi_mod_params.swcrypto) {
+    /*
+     * This needs to be unlocked due to lock ordering
+     * constraints. Since we're in the suspend path
+     * that isn't really a problem though.
+     */
+    mutex_unlock(&mvm->mutex);
+    ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false, CMD_ASYNC);
+    mutex_lock(&mvm->mutex);
+    if (ret) {
+      return ret;
+    }
+  }
 
-    ret = iwl_mvm_send_patterns(mvm, wowlan);
-    if (ret) { return ret; }
+  ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(*wowlan_config_cmd),
+                             wowlan_config_cmd);
+  if (ret) {
+    return ret;
+  }
 
-    return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
+  ret = iwl_mvm_send_patterns(mvm, wowlan);
+  if (ret) {
+    return ret;
+  }
+
+  return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
 }
 
 static int iwl_mvm_netdetect_config(struct iwl_mvm* mvm, struct cfg80211_wowlan* wowlan,
                                     struct cfg80211_sched_scan_request* nd_config,
                                     struct ieee80211_vif* vif) {
-    struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
-    int ret;
-    bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+  struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+  int ret;
+  bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
-    if (!unified_image) {
-        ret = iwl_mvm_switch_to_d3(mvm);
-        if (ret) { return ret; }
-    } else {
-        /* In theory, we wouldn't have to stop a running sched
-         * scan in order to start another one (for
-         * net-detect).  But in practice this doesn't seem to
-         * work properly, so stop any running sched_scan now.
-         */
-        ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
-        if (ret) { return ret; }
+  if (!unified_image) {
+    ret = iwl_mvm_switch_to_d3(mvm);
+    if (ret) {
+      return ret;
     }
-
-    /* rfkill release can be either for wowlan or netdetect */
-    if (wowlan->rfkill_release) {
-        wowlan_config_cmd.wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+  } else {
+    /* In theory, we wouldn't have to stop a running sched
+     * scan in order to start another one (for
+     * net-detect).  But in practice this doesn't seem to
+     * work properly, so stop any running sched_scan now.
+     */
+    ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+    if (ret) {
+      return ret;
     }
+  }
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(wowlan_config_cmd),
-                               &wowlan_config_cmd);
-    if (ret) { return ret; }
+  /* rfkill release can be either for wowlan or netdetect */
+  if (wowlan->rfkill_release) {
+    wowlan_config_cmd.wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+  }
 
-    ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, IWL_MVM_SCAN_NETDETECT);
-    if (ret) { return ret; }
+  ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(wowlan_config_cmd),
+                             &wowlan_config_cmd);
+  if (ret) {
+    return ret;
+  }
 
-    if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) { return -EBUSY; }
+  ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, IWL_MVM_SCAN_NETDETECT);
+  if (ret) {
+    return ret;
+  }
 
-    /* save the sched scan matchsets... */
-    if (nd_config->n_match_sets) {
-        mvm->nd_match_sets =
-            kmemdup(nd_config->match_sets, sizeof(*nd_config->match_sets) * nd_config->n_match_sets,
-                    GFP_KERNEL);
-        if (mvm->nd_match_sets) { mvm->n_nd_match_sets = nd_config->n_match_sets; }
+  if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) {
+    return -EBUSY;
+  }
+
+  /* save the sched scan matchsets... */
+  if (nd_config->n_match_sets) {
+    mvm->nd_match_sets =
+        kmemdup(nd_config->match_sets, sizeof(*nd_config->match_sets) * nd_config->n_match_sets,
+                GFP_KERNEL);
+    if (mvm->nd_match_sets) {
+      mvm->n_nd_match_sets = nd_config->n_match_sets;
     }
+  }
 
-    /* ...and the sched scan channels for later reporting */
-    mvm->nd_channels = kmemdup(nd_config->channels,
-                               sizeof(*nd_config->channels) * nd_config->n_channels, GFP_KERNEL);
-    if (mvm->nd_channels) { mvm->n_nd_channels = nd_config->n_channels; }
+  /* ...and the sched scan channels for later reporting */
+  mvm->nd_channels = kmemdup(nd_config->channels,
+                             sizeof(*nd_config->channels) * nd_config->n_channels, GFP_KERNEL);
+  if (mvm->nd_channels) {
+    mvm->n_nd_channels = nd_config->n_channels;
+  }
 
-    return 0;
+  return 0;
 }
 
 static void iwl_mvm_free_nd(struct iwl_mvm* mvm) {
-    kfree(mvm->nd_match_sets);
-    mvm->nd_match_sets = NULL;
-    mvm->n_nd_match_sets = 0;
-    kfree(mvm->nd_channels);
-    mvm->nd_channels = NULL;
-    mvm->n_nd_channels = 0;
+  kfree(mvm->nd_match_sets);
+  mvm->nd_match_sets = NULL;
+  mvm->n_nd_match_sets = 0;
+  kfree(mvm->nd_channels);
+  mvm->nd_channels = NULL;
+  mvm->n_nd_channels = 0;
 }
 
 static int __iwl_mvm_suspend(struct ieee80211_hw* hw, struct cfg80211_wowlan* wowlan, bool test) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct ieee80211_vif* vif = NULL;
-    struct iwl_mvm_vif* mvmvif = NULL;
-    struct ieee80211_sta* ap_sta = NULL;
-    struct iwl_d3_manager_config d3_cfg_cmd_data = {
-        /*
-         * Program the minimum sleep time to 10 seconds, as many
-         * platforms have issues processing a wakeup signal while
-         * still being in the process of suspending.
-         */
-        .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
-    };
-    struct iwl_host_cmd d3_cfg_cmd = {
-        .id = D3_CONFIG_CMD,
-        .flags = CMD_WANT_SKB,
-        .data[0] = &d3_cfg_cmd_data,
-        .len[0] = sizeof(d3_cfg_cmd_data),
-    };
-    int ret;
-    int len __maybe_unused;
-    bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct ieee80211_vif* vif = NULL;
+  struct iwl_mvm_vif* mvmvif = NULL;
+  struct ieee80211_sta* ap_sta = NULL;
+  struct iwl_d3_manager_config d3_cfg_cmd_data = {
+      /*
+       * Program the minimum sleep time to 10 seconds, as many
+       * platforms have issues processing a wakeup signal while
+       * still being in the process of suspending.
+       */
+      .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
+  };
+  struct iwl_host_cmd d3_cfg_cmd = {
+      .id = D3_CONFIG_CMD,
+      .flags = CMD_WANT_SKB,
+      .data[0] = &d3_cfg_cmd_data,
+      .len[0] = sizeof(d3_cfg_cmd_data),
+  };
+  int ret;
+  int len __maybe_unused;
+  bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
-    if (!wowlan) {
-        /*
-         * mac80211 shouldn't get here, but for D3 test
-         * it doesn't warrant a warning
-         */
-        WARN_ON(!test);
-        return -EINVAL;
-    }
-
-    mutex_lock(&mvm->mutex);
-
-    vif = iwl_mvm_get_bss_vif(mvm);
-    if (IS_ERR_OR_NULL(vif)) {
-        ret = 1;
-        goto out_noreset;
-    }
-
-    mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-    if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
-        /* if we're not associated, this must be netdetect */
-        if (!wowlan->nd_config) {
-            ret = 1;
-            goto out_noreset;
-        }
-
-        ret = iwl_mvm_netdetect_config(mvm, wowlan, wowlan->nd_config, vif);
-        if (ret) { goto out; }
-
-        mvm->net_detect = true;
-    } else {
-        struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
-
-        ap_sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
-                                           lockdep_is_held(&mvm->mutex));
-        if (IS_ERR_OR_NULL(ap_sta)) {
-            ret = -EINVAL;
-            goto out_noreset;
-        }
-
-        ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta);
-        if (ret) { goto out_noreset; }
-        ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta);
-        if (ret) { goto out; }
-
-        mvm->net_detect = false;
-    }
-
-    ret = iwl_mvm_power_update_device(mvm);
-    if (ret) { goto out; }
-
-    ret = iwl_mvm_power_update_mac(mvm);
-    if (ret) { goto out; }
-
-#ifdef CPTCFG_IWLWIFI_DEBUGFS
-    if (mvm->d3_wake_sysassert) {
-        d3_cfg_cmd_data.wakeup_flags |= cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
-    }
-#endif
-
+  if (!wowlan) {
     /*
-     * TODO: this is needed because the firmware is not stopping
-     * the recording automatically before entering D3.  This can
-     * be removed once the FW starts doing that.
+     * mac80211 shouldn't get here, but for D3 test
+     * it doesn't warrant a warning
      */
-    _iwl_fw_dbg_stop_recording(mvm->fwrt.trans, NULL);
+    WARN_ON(!test);
+    return -EINVAL;
+  }
 
-    /* must be last -- this switches firmware state */
-    ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
-    if (ret) { goto out; }
+  mutex_lock(&mvm->mutex);
+
+  vif = iwl_mvm_get_bss_vif(mvm);
+  if (IS_ERR_OR_NULL(vif)) {
+    ret = 1;
+    goto out_noreset;
+  }
+
+  mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+  if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
+    /* if we're not associated, this must be netdetect */
+    if (!wowlan->nd_config) {
+      ret = 1;
+      goto out_noreset;
+    }
+
+    ret = iwl_mvm_netdetect_config(mvm, wowlan, wowlan->nd_config, vif);
+    if (ret) {
+      goto out;
+    }
+
+    mvm->net_detect = true;
+  } else {
+    struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+
+    ap_sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
+                                       lockdep_is_held(&mvm->mutex));
+    if (IS_ERR_OR_NULL(ap_sta)) {
+      ret = -EINVAL;
+      goto out_noreset;
+    }
+
+    ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta);
+    if (ret) {
+      goto out_noreset;
+    }
+    ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta);
+    if (ret) {
+      goto out;
+    }
+
+    mvm->net_detect = false;
+  }
+
+  ret = iwl_mvm_power_update_device(mvm);
+  if (ret) {
+    goto out;
+  }
+
+  ret = iwl_mvm_power_update_mac(mvm);
+  if (ret) {
+    goto out;
+  }
+
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
-    if (len >= sizeof(uint32_t)) {
-        mvm->d3_test_pme_ptr = le32_to_cpup((__le32*)d3_cfg_cmd.resp_pkt->data);
-    }
+  if (mvm->d3_wake_sysassert) {
+    d3_cfg_cmd_data.wakeup_flags |= cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
+  }
 #endif
-    iwl_free_resp(&d3_cfg_cmd);
 
-    clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+  /*
+   * TODO: this is needed because the firmware is not stopping
+   * the recording automatically before entering D3.  This can
+   * be removed once the FW starts doing that.
+   */
+  _iwl_fw_dbg_stop_recording(mvm->fwrt.trans, NULL);
 
-    iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
+  /* must be last -- this switches firmware state */
+  ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
+  if (ret) {
+    goto out;
+  }
+#ifdef CPTCFG_IWLWIFI_DEBUGFS
+  len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
+  if (len >= sizeof(uint32_t)) {
+    mvm->d3_test_pme_ptr = le32_to_cpup((__le32*)d3_cfg_cmd.resp_pkt->data);
+  }
+#endif
+  iwl_free_resp(&d3_cfg_cmd);
+
+  clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+  iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
 out:
-    if (ret < 0) {
-        iwl_mvm_free_nd(mvm);
+  if (ret < 0) {
+    iwl_mvm_free_nd(mvm);
 
-        if (!unified_image) {
-            iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
-            if (mvm->fw_restart > 0) {
-                mvm->fw_restart--;
-                ieee80211_restart_hw(mvm->hw);
-            }
-        }
+    if (!unified_image) {
+      iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+      if (mvm->fw_restart > 0) {
+        mvm->fw_restart--;
+        ieee80211_restart_hw(mvm->hw);
+      }
     }
+  }
 out_noreset:
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return ret;
+  return ret;
 }
 
 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm* mvm) {
-    struct iwl_notification_wait wait_d3;
-    static const uint16_t d3_notif[] = {D3_CONFIG_CMD};
-    int ret;
+  struct iwl_notification_wait wait_d3;
+  static const uint16_t d3_notif[] = {D3_CONFIG_CMD};
+  int ret;
 
-    iwl_init_notification_wait(&mvm->notif_wait, &wait_d3, d3_notif, ARRAY_SIZE(d3_notif), NULL,
-                               NULL);
+  iwl_init_notification_wait(&mvm->notif_wait, &wait_d3, d3_notif, ARRAY_SIZE(d3_notif), NULL,
+                             NULL);
 
-    ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
-    if (ret) { goto remove_notif; }
+  ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
+  if (ret) {
+    goto remove_notif;
+  }
 
-    ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
-    WARN_ON_ONCE(ret);
-    return ret;
+  ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
+  WARN_ON_ONCE(ret);
+  return ret;
 
 remove_notif:
-    iwl_remove_notification(&mvm->notif_wait, &wait_d3);
-    return ret;
+  iwl_remove_notification(&mvm->notif_wait, &wait_d3);
+  return ret;
 }
 
 int iwl_mvm_suspend(struct ieee80211_hw* hw, struct cfg80211_wowlan* wowlan) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct iwl_trans* trans = mvm->trans;
-    int ret;
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_trans* trans = mvm->trans;
+  int ret;
 
-    /* make sure the d0i3 exit work is not pending */
-    flush_work(&mvm->d0i3_exit_work);
-    iwl_mvm_pause_tcm(mvm, true);
+  /* make sure the d0i3 exit work is not pending */
+  flush_work(&mvm->d0i3_exit_work);
+  iwl_mvm_pause_tcm(mvm, true);
 
-    iwl_fw_runtime_suspend(&mvm->fwrt);
+  iwl_fw_runtime_suspend(&mvm->fwrt);
 
-    ret = iwl_trans_suspend(trans);
-    if (ret) { return ret; }
+  ret = iwl_trans_suspend(trans);
+  if (ret) {
+    return ret;
+  }
 
-    if (wowlan->any) {
-        trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+  if (wowlan->any) {
+    trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
 
-        if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
-            ret = iwl_mvm_enter_d0i3_sync(mvm);
+    if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
+      ret = iwl_mvm_enter_d0i3_sync(mvm);
 
-            if (ret) { return ret; }
-        }
-
-        mutex_lock(&mvm->d0i3_suspend_mutex);
-        __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
-        mutex_unlock(&mvm->d0i3_suspend_mutex);
-
-        iwl_trans_d3_suspend(trans, false, false);
-
-        return 0;
+      if (ret) {
+        return ret;
+      }
     }
 
-    trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+    mutex_lock(&mvm->d0i3_suspend_mutex);
+    __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+    mutex_unlock(&mvm->d0i3_suspend_mutex);
 
-    return __iwl_mvm_suspend(hw, wowlan, false);
+    iwl_trans_d3_suspend(trans, false, false);
+
+    return 0;
+  }
+
+  trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+
+  return __iwl_mvm_suspend(hw, wowlan, false);
 }
 
 /* converted data from the different status responses */
 struct iwl_wowlan_status_data {
-    uint16_t pattern_number;
-    uint16_t qos_seq_ctr[8];
-    uint32_t wakeup_reasons;
-    uint32_t wake_packet_length;
-    uint32_t wake_packet_bufsize;
-    const uint8_t* wake_packet;
+  uint16_t pattern_number;
+  uint16_t qos_seq_ctr[8];
+  uint32_t wakeup_reasons;
+  uint32_t wake_packet_length;
+  uint32_t wake_packet_bufsize;
+  const uint8_t* wake_packet;
 };
 
 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                           struct iwl_wowlan_status_data* status) {
-    struct sk_buff* pkt = NULL;
-    struct cfg80211_wowlan_wakeup wakeup = {
-        .pattern_idx = -1,
-    };
-    struct cfg80211_wowlan_wakeup* wakeup_report = &wakeup;
-    uint32_t reasons = status->wakeup_reasons;
+  struct sk_buff* pkt = NULL;
+  struct cfg80211_wowlan_wakeup wakeup = {
+      .pattern_idx = -1,
+  };
+  struct cfg80211_wowlan_wakeup* wakeup_report = &wakeup;
+  uint32_t reasons = status->wakeup_reasons;
 
-    if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
-        wakeup_report = NULL;
+  if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+    wakeup_report = NULL;
+    goto report;
+  }
+
+  pm_wakeup_event(mvm->dev, 0);
+
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) {
+    wakeup.magic_pkt = true;
+  }
+
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) {
+    wakeup.pattern_idx = status->pattern_number;
+  }
+
+  if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+                 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) {
+    wakeup.disconnect = true;
+  }
+
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) {
+    wakeup.gtk_rekey_failure = true;
+  }
+
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) {
+    wakeup.rfkill_release = true;
+  }
+
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) {
+    wakeup.eap_identity_req = true;
+  }
+
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) {
+    wakeup.four_way_handshake = true;
+  }
+
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) {
+    wakeup.tcp_connlost = true;
+  }
+
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) {
+    wakeup.tcp_nomoretokens = true;
+  }
+
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) {
+    wakeup.tcp_match = true;
+  }
+
+  if (status->wake_packet_bufsize) {
+    int pktsize = status->wake_packet_bufsize;
+    int pktlen = status->wake_packet_length;
+    const uint8_t* pktdata = status->wake_packet;
+    struct ieee80211_hdr* hdr = (void*)pktdata;
+    int truncated = pktlen - pktsize;
+
+    /* this would be a firmware bug */
+    if (WARN_ON_ONCE(truncated < 0)) {
+      truncated = 0;
+    }
+
+    if (ieee80211_is_data(hdr->frame_control)) {
+      int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+      int ivlen = 0, icvlen = 4; /* also FCS */
+
+      pkt = alloc_skb(pktsize, GFP_KERNEL);
+      if (!pkt) {
         goto report;
-    }
+      }
 
-    pm_wakeup_event(mvm->dev, 0);
+      skb_put_data(pkt, pktdata, hdrlen);
+      pktdata += hdrlen;
+      pktsize -= hdrlen;
 
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) { wakeup.magic_pkt = true; }
-
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) { wakeup.pattern_idx = status->pattern_number; }
-
-    if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
-                   IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) {
-        wakeup.disconnect = true;
-    }
-
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) { wakeup.gtk_rekey_failure = true; }
-
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) { wakeup.rfkill_release = true; }
-
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) { wakeup.eap_identity_req = true; }
-
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) { wakeup.four_way_handshake = true; }
-
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) { wakeup.tcp_connlost = true; }
-
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) { wakeup.tcp_nomoretokens = true; }
-
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) { wakeup.tcp_match = true; }
-
-    if (status->wake_packet_bufsize) {
-        int pktsize = status->wake_packet_bufsize;
-        int pktlen = status->wake_packet_length;
-        const uint8_t* pktdata = status->wake_packet;
-        struct ieee80211_hdr* hdr = (void*)pktdata;
-        int truncated = pktlen - pktsize;
-
-        /* this would be a firmware bug */
-        if (WARN_ON_ONCE(truncated < 0)) { truncated = 0; }
-
-        if (ieee80211_is_data(hdr->frame_control)) {
-            int hdrlen = ieee80211_hdrlen(hdr->frame_control);
-            int ivlen = 0, icvlen = 4; /* also FCS */
-
-            pkt = alloc_skb(pktsize, GFP_KERNEL);
-            if (!pkt) { goto report; }
-
-            skb_put_data(pkt, pktdata, hdrlen);
-            pktdata += hdrlen;
-            pktsize -= hdrlen;
-
-            if (ieee80211_has_protected(hdr->frame_control)) {
-                /*
-                 * This is unlocked and using gtk_i(c)vlen,
-                 * but since everything is under RTNL still
-                 * that's not really a problem - changing
-                 * it would be difficult.
-                 */
-                if (is_multicast_ether_addr(hdr->addr1)) {
-                    ivlen = mvm->gtk_ivlen;
-                    icvlen += mvm->gtk_icvlen;
-                } else {
-                    ivlen = mvm->ptk_ivlen;
-                    icvlen += mvm->ptk_icvlen;
-                }
-            }
-
-            /* if truncated, FCS/ICV is (partially) gone */
-            if (truncated >= icvlen) {
-                icvlen = 0;
-                truncated -= icvlen;
-            } else {
-                icvlen -= truncated;
-                truncated = 0;
-            }
-
-            pktsize -= ivlen + icvlen;
-            pktdata += ivlen;
-
-            skb_put_data(pkt, pktdata, pktsize);
-
-            if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) { goto report; }
-            wakeup.packet = pkt->data;
-            wakeup.packet_present_len = pkt->len;
-            wakeup.packet_len = pkt->len - truncated;
-            wakeup.packet_80211 = false;
+      if (ieee80211_has_protected(hdr->frame_control)) {
+        /*
+         * This is unlocked and using gtk_i(c)vlen,
+         * but since everything is under RTNL still
+         * that's not really a problem - changing
+         * it would be difficult.
+         */
+        if (is_multicast_ether_addr(hdr->addr1)) {
+          ivlen = mvm->gtk_ivlen;
+          icvlen += mvm->gtk_icvlen;
         } else {
-            int fcslen = 4;
-
-            if (truncated >= 4) {
-                truncated -= 4;
-                fcslen = 0;
-            } else {
-                fcslen -= truncated;
-                truncated = 0;
-            }
-            pktsize -= fcslen;
-            wakeup.packet = status->wake_packet;
-            wakeup.packet_present_len = pktsize;
-            wakeup.packet_len = pktlen - truncated;
-            wakeup.packet_80211 = true;
+          ivlen = mvm->ptk_ivlen;
+          icvlen += mvm->ptk_icvlen;
         }
+      }
+
+      /* if truncated, FCS/ICV is (partially) gone */
+      if (truncated >= icvlen) {
+        icvlen = 0;
+        truncated -= icvlen;
+      } else {
+        icvlen -= truncated;
+        truncated = 0;
+      }
+
+      pktsize -= ivlen + icvlen;
+      pktdata += ivlen;
+
+      skb_put_data(pkt, pktdata, pktsize);
+
+      if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) {
+        goto report;
+      }
+      wakeup.packet = pkt->data;
+      wakeup.packet_present_len = pkt->len;
+      wakeup.packet_len = pkt->len - truncated;
+      wakeup.packet_80211 = false;
+    } else {
+      int fcslen = 4;
+
+      if (truncated >= 4) {
+        truncated -= 4;
+        fcslen = 0;
+      } else {
+        fcslen -= truncated;
+        truncated = 0;
+      }
+      pktsize -= fcslen;
+      wakeup.packet = status->wake_packet;
+      wakeup.packet_present_len = pktsize;
+      wakeup.packet_len = pktlen - truncated;
+      wakeup.packet_80211 = true;
     }
+  }
 
 report:
-    ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
-    kfree_skb(pkt);
+  ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+  kfree_skb(pkt);
 }
 
 static void iwl_mvm_aes_sc_to_seq(struct aes_sc* sc, struct ieee80211_key_seq* seq) {
-    uint64_t pn;
+  uint64_t pn;
 
-    pn = le64_to_cpu(sc->pn);
-    seq->ccmp.pn[0] = pn >> 40;
-    seq->ccmp.pn[1] = pn >> 32;
-    seq->ccmp.pn[2] = pn >> 24;
-    seq->ccmp.pn[3] = pn >> 16;
-    seq->ccmp.pn[4] = pn >> 8;
-    seq->ccmp.pn[5] = pn;
+  pn = le64_to_cpu(sc->pn);
+  seq->ccmp.pn[0] = pn >> 40;
+  seq->ccmp.pn[1] = pn >> 32;
+  seq->ccmp.pn[2] = pn >> 24;
+  seq->ccmp.pn[3] = pn >> 16;
+  seq->ccmp.pn[4] = pn >> 8;
+  seq->ccmp.pn[5] = pn;
 }
 
 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc* sc, struct ieee80211_key_seq* seq) {
-    seq->tkip.iv32 = le32_to_cpu(sc->iv32);
-    seq->tkip.iv16 = le16_to_cpu(sc->iv16);
+  seq->tkip.iv32 = le32_to_cpu(sc->iv32);
+  seq->tkip.iv16 = le16_to_cpu(sc->iv16);
 }
 
 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm* mvm, struct aes_sc* scs,
                                    struct ieee80211_sta* sta, struct ieee80211_key_conf* key) {
-    int tid;
+  int tid;
 
-    BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+  BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
 
-    if (sta && iwl_mvm_has_new_rx_api(mvm)) {
-        struct iwl_mvm_sta* mvmsta;
-        struct iwl_mvm_key_pn* ptk_pn;
+  if (sta && iwl_mvm_has_new_rx_api(mvm)) {
+    struct iwl_mvm_sta* mvmsta;
+    struct iwl_mvm_key_pn* ptk_pn;
 
-        mvmsta = iwl_mvm_sta_from_mac80211(sta);
+    mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
-        ptk_pn =
-            rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx], lockdep_is_held(&mvm->mutex));
-        if (WARN_ON(!ptk_pn)) { return; }
-
-        for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
-            struct ieee80211_key_seq seq = {};
-            int i;
-
-            iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
-            ieee80211_set_key_rx_seq(key, tid, &seq);
-            for (i = 1; i < mvm->trans->num_rx_queues; i++) {
-                memcpy(ptk_pn->q[i].pn[tid], seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
-            }
-        }
-    } else {
-        for (tid = 0; tid < IWL_NUM_RSC; tid++) {
-            struct ieee80211_key_seq seq = {};
-
-            iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
-            ieee80211_set_key_rx_seq(key, tid, &seq);
-        }
+    ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx], lockdep_is_held(&mvm->mutex));
+    if (WARN_ON(!ptk_pn)) {
+      return;
     }
+
+    for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+      struct ieee80211_key_seq seq = {};
+      int i;
+
+      iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+      ieee80211_set_key_rx_seq(key, tid, &seq);
+      for (i = 1; i < mvm->trans->num_rx_queues; i++) {
+        memcpy(ptk_pn->q[i].pn[tid], seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
+      }
+    }
+  } else {
+    for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+      struct ieee80211_key_seq seq = {};
+
+      iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+      ieee80211_set_key_rx_seq(key, tid, &seq);
+    }
+  }
 }
 
 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc* scs, struct ieee80211_key_conf* key) {
-    int tid;
+  int tid;
 
-    BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+  BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
 
-    for (tid = 0; tid < IWL_NUM_RSC; tid++) {
-        struct ieee80211_key_seq seq = {};
+  for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+    struct ieee80211_key_seq seq = {};
 
-        iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
-        ieee80211_set_key_rx_seq(key, tid, &seq);
-    }
+    iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
+    ieee80211_set_key_rx_seq(key, tid, &seq);
+  }
 }
 
 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm* mvm, struct ieee80211_key_conf* key,
                                    struct iwl_wowlan_status* status) {
-    union iwl_all_tsc_rsc* rsc = &status->gtk[0].rsc.all_tsc_rsc;
+  union iwl_all_tsc_rsc* rsc = &status->gtk[0].rsc.all_tsc_rsc;
 
-    switch (key->cipher) {
+  switch (key->cipher) {
     case WLAN_CIPHER_SUITE_CCMP:
-        iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
-        break;
+      iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
+      break;
     case WLAN_CIPHER_SUITE_TKIP:
-        iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
-        break;
+      iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
+      break;
     default:
-        WARN_ON(1);
-    }
+      WARN_ON(1);
+  }
 }
 
 struct iwl_mvm_d3_gtk_iter_data {
-    struct iwl_mvm* mvm;
-    struct iwl_wowlan_status* status;
-    void* last_gtk;
-    uint32_t cipher;
-    bool find_phase, unhandled_cipher;
-    int num_keys;
+  struct iwl_mvm* mvm;
+  struct iwl_wowlan_status* status;
+  void* last_gtk;
+  uint32_t cipher;
+  bool find_phase, unhandled_cipher;
+  int num_keys;
 };
 
 static void iwl_mvm_d3_update_keys(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                                    struct ieee80211_sta* sta, struct ieee80211_key_conf* key,
                                    void* _data) {
-    struct iwl_mvm_d3_gtk_iter_data* data = _data;
+  struct iwl_mvm_d3_gtk_iter_data* data = _data;
 
-    if (data->unhandled_cipher) { return; }
+  if (data->unhandled_cipher) {
+    return;
+  }
 
-    switch (key->cipher) {
+  switch (key->cipher) {
     case WLAN_CIPHER_SUITE_WEP40:
     case WLAN_CIPHER_SUITE_WEP104:
-        /* ignore WEP completely, nothing to do */
-        return;
+      /* ignore WEP completely, nothing to do */
+      return;
     case WLAN_CIPHER_SUITE_CCMP:
     case WLAN_CIPHER_SUITE_TKIP:
-        /* we support these */
-        break;
+      /* we support these */
+      break;
     default:
-        /* everything else (even CMAC for MFP) - disconnect from AP */
-        data->unhandled_cipher = true;
-        return;
-    }
+      /* everything else (even CMAC for MFP) - disconnect from AP */
+      data->unhandled_cipher = true;
+      return;
+  }
 
-    data->num_keys++;
+  data->num_keys++;
 
-    /*
-     * pairwise key - update sequence counters only;
-     * note that this assumes no TDLS sessions are active
-     */
-    if (sta) {
-        struct ieee80211_key_seq seq = {};
-        union iwl_all_tsc_rsc* sc = &data->status->gtk[0].rsc.all_tsc_rsc;
-
-        if (data->find_phase) { return; }
-
-        switch (key->cipher) {
-        case WLAN_CIPHER_SUITE_CCMP:
-            iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, sta, key);
-            atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
-            break;
-        case WLAN_CIPHER_SUITE_TKIP:
-            iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
-            iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
-            atomic64_set(&key->tx_pn, (uint64_t)seq.tkip.iv16 | ((uint64_t)seq.tkip.iv32 << 16));
-            break;
-        }
-
-        /* that's it for this key */
-        return;
-    }
+  /*
+   * pairwise key - update sequence counters only;
+   * note that this assumes no TDLS sessions are active
+   */
+  if (sta) {
+    struct ieee80211_key_seq seq = {};
+    union iwl_all_tsc_rsc* sc = &data->status->gtk[0].rsc.all_tsc_rsc;
 
     if (data->find_phase) {
-        data->last_gtk = key;
-        data->cipher = key->cipher;
-        return;
+      return;
     }
 
-    if (data->status->num_of_gtk_rekeys) {
-        ieee80211_remove_key(key);
-    } else if (data->last_gtk == key) {
-        iwl_mvm_set_key_rx_seq(data->mvm, key, data->status);
+    switch (key->cipher) {
+      case WLAN_CIPHER_SUITE_CCMP:
+        iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, sta, key);
+        atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
+        break;
+      case WLAN_CIPHER_SUITE_TKIP:
+        iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
+        iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+        atomic64_set(&key->tx_pn, (uint64_t)seq.tkip.iv16 | ((uint64_t)seq.tkip.iv32 << 16));
+        break;
     }
+
+    /* that's it for this key */
+    return;
+  }
+
+  if (data->find_phase) {
+    data->last_gtk = key;
+    data->cipher = key->cipher;
+    return;
+  }
+
+  if (data->status->num_of_gtk_rekeys) {
+    ieee80211_remove_key(key);
+  } else if (data->last_gtk == key) {
+    iwl_mvm_set_key_rx_seq(data->mvm, key, data->status);
+  }
 }
 
 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                           struct iwl_wowlan_status* status) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_d3_gtk_iter_data gtkdata = {
-        .mvm = mvm,
-        .status = status,
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+      .mvm = mvm,
+      .status = status,
+  };
+  uint32_t disconnection_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+                                   IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+
+  if (!status || !vif->bss_conf.bssid) {
+    return false;
+  }
+
+  if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) {
+    return false;
+  }
+
+  /* find last GTK that we used initially, if any */
+  gtkdata.find_phase = true;
+  ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, &gtkdata);
+  /* not trying to keep connections with MFP/unhandled ciphers */
+  if (gtkdata.unhandled_cipher) {
+    return false;
+  }
+  if (!gtkdata.num_keys) {
+    goto out;
+  }
+  if (!gtkdata.last_gtk) {
+    return false;
+  }
+
+  /*
+   * invalidate all other GTKs that might still exist and update
+   * the one that we used
+   */
+  gtkdata.find_phase = false;
+  ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, &gtkdata);
+
+  if (status->num_of_gtk_rekeys) {
+    struct ieee80211_key_conf* key;
+    struct {
+      struct ieee80211_key_conf conf;
+      uint8_t key[32];
+    } conf = {
+        .conf.cipher = gtkdata.cipher,
+        .conf.keyidx = iwlmvm_wowlan_gtk_idx(&status->gtk[0]),
     };
-    uint32_t disconnection_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
-                                     IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+    __be64 replay_ctr;
 
-    if (!status || !vif->bss_conf.bssid) { return false; }
-
-    if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons) { return false; }
-
-    /* find last GTK that we used initially, if any */
-    gtkdata.find_phase = true;
-    ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, &gtkdata);
-    /* not trying to keep connections with MFP/unhandled ciphers */
-    if (gtkdata.unhandled_cipher) { return false; }
-    if (!gtkdata.num_keys) { goto out; }
-    if (!gtkdata.last_gtk) { return false; }
-
-    /*
-     * invalidate all other GTKs that might still exist and update
-     * the one that we used
-     */
-    gtkdata.find_phase = false;
-    ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, &gtkdata);
-
-    if (status->num_of_gtk_rekeys) {
-        struct ieee80211_key_conf* key;
-        struct {
-            struct ieee80211_key_conf conf;
-            uint8_t key[32];
-        } conf = {
-            .conf.cipher = gtkdata.cipher,
-            .conf.keyidx = iwlmvm_wowlan_gtk_idx(&status->gtk[0]),
-        };
-        __be64 replay_ctr;
-
-        switch (gtkdata.cipher) {
-        case WLAN_CIPHER_SUITE_CCMP:
-            conf.conf.keylen = WLAN_KEY_LEN_CCMP;
-            memcpy(conf.conf.key, status->gtk[0].key, WLAN_KEY_LEN_CCMP);
-            break;
-        case WLAN_CIPHER_SUITE_TKIP:
-            conf.conf.keylen = WLAN_KEY_LEN_TKIP;
-            memcpy(conf.conf.key, status->gtk[0].key, 16);
-            /* leave TX MIC key zeroed, we don't use it anyway */
-            memcpy(conf.conf.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, status->gtk[0].tkip_mic_key,
-                   8);
-            break;
-        }
-
-        key = ieee80211_gtk_rekey_add(vif, &conf.conf);
-        if (IS_ERR(key)) { return false; }
-        iwl_mvm_set_key_rx_seq(mvm, key, status);
-
-        replay_ctr = cpu_to_be64(le64_to_cpu(status->replay_ctr));
-
-        ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void*)&replay_ctr, GFP_KERNEL);
+    switch (gtkdata.cipher) {
+      case WLAN_CIPHER_SUITE_CCMP:
+        conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+        memcpy(conf.conf.key, status->gtk[0].key, WLAN_KEY_LEN_CCMP);
+        break;
+      case WLAN_CIPHER_SUITE_TKIP:
+        conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+        memcpy(conf.conf.key, status->gtk[0].key, 16);
+        /* leave TX MIC key zeroed, we don't use it anyway */
+        memcpy(conf.conf.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, status->gtk[0].tkip_mic_key, 8);
+        break;
     }
 
-out:
-    mvmvif->seqno_valid = true;
-    /* +0x10 because the set API expects next-to-use, not last-used */
-    mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+    key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+    if (IS_ERR(key)) {
+      return false;
+    }
+    iwl_mvm_set_key_rx_seq(mvm, key, status);
 
-    return true;
+    replay_ctr = cpu_to_be64(le64_to_cpu(status->replay_ctr));
+
+    ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void*)&replay_ctr, GFP_KERNEL);
+  }
+
+out:
+  mvmvif->seqno_valid = true;
+  /* +0x10 because the set API expects next-to-use, not last-used */
+  mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+
+  return true;
 }
 
 struct iwl_wowlan_status* iwl_mvm_send_wowlan_get_status(struct iwl_mvm* mvm) {
-    struct iwl_wowlan_status *v7, *status;
-    struct iwl_host_cmd cmd = {
-        .id = WOWLAN_GET_STATUSES,
-        .flags = CMD_WANT_SKB,
-    };
-    int ret, len, status_size;
+  struct iwl_wowlan_status *v7, *status;
+  struct iwl_host_cmd cmd = {
+      .id = WOWLAN_GET_STATUSES,
+      .flags = CMD_WANT_SKB,
+  };
+  int ret, len, status_size;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    ret = iwl_mvm_send_cmd(mvm, &cmd);
-    if (ret) {
-        IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret);
-        return ERR_PTR(ret);
-    }
+  ret = iwl_mvm_send_cmd(mvm, &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret);
+    return ERR_PTR(ret);
+  }
 
-    if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
-        struct iwl_wowlan_status_v6* v6 = (void*)cmd.resp_pkt->data;
-        int data_size;
+  if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
+    struct iwl_wowlan_status_v6* v6 = (void*)cmd.resp_pkt->data;
+    int data_size;
 
-        status_size = sizeof(*v6);
-        len = iwl_rx_packet_payload_len(cmd.resp_pkt);
-
-        if (len < status_size) {
-            IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-            status = ERR_PTR(-EIO);
-            goto out_free_resp;
-        }
-
-        data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4);
-
-        if (len != (status_size + data_size)) {
-            IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-            status = ERR_PTR(-EIO);
-            goto out_free_resp;
-        }
-
-        status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
-        if (!status) { goto out_free_resp; }
-
-        BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > sizeof(status->gtk[0].key));
-        BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) > sizeof(status->gtk[0].tkip_mic_key));
-
-        /* copy GTK info to the right place */
-        memcpy(status->gtk[0].key, v6->gtk.decrypt_key, sizeof(v6->gtk.decrypt_key));
-        memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key, sizeof(v6->gtk.tkip_mic_key));
-        memcpy(&status->gtk[0].rsc, &v6->gtk.rsc, sizeof(status->gtk[0].rsc));
-
-        /* hardcode the key length to 16 since v6 only supports 16 */
-        status->gtk[0].key_len = 16;
-
-        /*
-         * The key index only uses 2 bits (values 0 to 3) and
-         * we always set bit 7 which means this is the
-         * currently used key.
-         */
-        status->gtk[0].key_flags = v6->gtk.key_index | BIT(7);
-
-        status->replay_ctr = v6->replay_ctr;
-
-        /* everything starting from pattern_number is identical */
-        memcpy(&status->pattern_number, &v6->pattern_number,
-               offsetof(struct iwl_wowlan_status, wake_packet) -
-                   offsetof(struct iwl_wowlan_status, pattern_number) + data_size);
-
-        goto out_free_resp;
-    }
-
-    v7 = (void*)cmd.resp_pkt->data;
-    status_size = sizeof(*v7);
+    status_size = sizeof(*v6);
     len = iwl_rx_packet_payload_len(cmd.resp_pkt);
 
     if (len < status_size) {
-        IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-        status = ERR_PTR(-EIO);
-        goto out_free_resp;
+      IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+      status = ERR_PTR(-EIO);
+      goto out_free_resp;
     }
 
-    if (len != (status_size + ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4))) {
-        IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-        status = ERR_PTR(-EIO);
-        goto out_free_resp;
+    data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4);
+
+    if (len != (status_size + data_size)) {
+      IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+      status = ERR_PTR(-EIO);
+      goto out_free_resp;
     }
 
-    status = kmemdup(v7, len, GFP_KERNEL);
+    status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
+    if (!status) {
+      goto out_free_resp;
+    }
+
+    BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > sizeof(status->gtk[0].key));
+    BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) > sizeof(status->gtk[0].tkip_mic_key));
+
+    /* copy GTK info to the right place */
+    memcpy(status->gtk[0].key, v6->gtk.decrypt_key, sizeof(v6->gtk.decrypt_key));
+    memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key, sizeof(v6->gtk.tkip_mic_key));
+    memcpy(&status->gtk[0].rsc, &v6->gtk.rsc, sizeof(status->gtk[0].rsc));
+
+    /* hardcode the key length to 16 since v6 only supports 16 */
+    status->gtk[0].key_len = 16;
+
+    /*
+     * The key index only uses 2 bits (values 0 to 3) and
+     * we always set bit 7 which means this is the
+     * currently used key.
+     */
+    status->gtk[0].key_flags = v6->gtk.key_index | BIT(7);
+
+    status->replay_ctr = v6->replay_ctr;
+
+    /* everything starting from pattern_number is identical */
+    memcpy(&status->pattern_number, &v6->pattern_number,
+           offsetof(struct iwl_wowlan_status, wake_packet) -
+               offsetof(struct iwl_wowlan_status, pattern_number) + data_size);
+
+    goto out_free_resp;
+  }
+
+  v7 = (void*)cmd.resp_pkt->data;
+  status_size = sizeof(*v7);
+  len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+
+  if (len < status_size) {
+    IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+    status = ERR_PTR(-EIO);
+    goto out_free_resp;
+  }
+
+  if (len != (status_size + ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4))) {
+    IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+    status = ERR_PTR(-EIO);
+    goto out_free_resp;
+  }
+
+  status = kmemdup(v7, len, GFP_KERNEL);
 
 out_free_resp:
-    iwl_free_resp(&cmd);
-    return status;
+  iwl_free_resp(&cmd);
+  return status;
 }
 
 static struct iwl_wowlan_status* iwl_mvm_get_wakeup_status(struct iwl_mvm* mvm) {
-    int ret;
+  int ret;
 
-    /* only for tracing for now */
-    ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
-    if (ret) { IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); }
+  /* only for tracing for now */
+  ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
+  if (ret) {
+    IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+  }
 
-    return iwl_mvm_send_wowlan_get_status(mvm);
+  return iwl_mvm_send_wowlan_get_status(mvm);
 }
 
 /* releases the MVM mutex */
 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_wowlan_status_data status;
-    struct iwl_wowlan_status* fw_status;
-    int i;
-    bool keep;
-    struct iwl_mvm_sta* mvm_ap_sta;
+  struct iwl_wowlan_status_data status;
+  struct iwl_wowlan_status* fw_status;
+  int i;
+  bool keep;
+  struct iwl_mvm_sta* mvm_ap_sta;
 
-    fw_status = iwl_mvm_get_wakeup_status(mvm);
-    if (IS_ERR_OR_NULL(fw_status)) { goto out_unlock; }
+  fw_status = iwl_mvm_get_wakeup_status(mvm);
+  if (IS_ERR_OR_NULL(fw_status)) {
+    goto out_unlock;
+  }
 
-    status.pattern_number = le16_to_cpu(fw_status->pattern_number);
-    for (i = 0; i < 8; i++) {
-        status.qos_seq_ctr[i] = le16_to_cpu(fw_status->qos_seq_ctr[i]);
-    }
-    status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
-    status.wake_packet_length = le32_to_cpu(fw_status->wake_packet_length);
-    status.wake_packet_bufsize = le32_to_cpu(fw_status->wake_packet_bufsize);
-    status.wake_packet = fw_status->wake_packet;
+  status.pattern_number = le16_to_cpu(fw_status->pattern_number);
+  for (i = 0; i < 8; i++) {
+    status.qos_seq_ctr[i] = le16_to_cpu(fw_status->qos_seq_ctr[i]);
+  }
+  status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
+  status.wake_packet_length = le32_to_cpu(fw_status->wake_packet_length);
+  status.wake_packet_bufsize = le32_to_cpu(fw_status->wake_packet_bufsize);
+  status.wake_packet = fw_status->wake_packet;
 
-    /* still at hard-coded place 0 for D3 image */
-    mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
-    if (!mvm_ap_sta) { goto out_free; }
+  /* still at hard-coded place 0 for D3 image */
+  mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
+  if (!mvm_ap_sta) {
+    goto out_free;
+  }
 
-    for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
-        uint16_t seq = status.qos_seq_ctr[i];
-        /* firmware stores last-used value, we store next value */
-        seq += 0x10;
-        mvm_ap_sta->tid_data[i].seq_number = seq;
-    }
+  for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+    uint16_t seq = status.qos_seq_ctr[i];
+    /* firmware stores last-used value, we store next value */
+    seq += 0x10;
+    mvm_ap_sta->tid_data[i].seq_number = seq;
+  }
 
-    /* now we have all the data we need, unlock to avoid mac80211 issues */
-    mutex_unlock(&mvm->mutex);
+  /* now we have all the data we need, unlock to avoid mac80211 issues */
+  mutex_unlock(&mvm->mutex);
 
-    iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
+  iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
 
-    keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
+  keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
 
-    kfree(fw_status);
-    return keep;
+  kfree(fw_status);
+  return keep;
 
 out_free:
-    kfree(fw_status);
+  kfree(fw_status);
 out_unlock:
-    mutex_unlock(&mvm->mutex);
-    return false;
+  mutex_unlock(&mvm->mutex);
+  return false;
 }
 
 void iwl_mvm_d0i3_update_keys(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                               struct iwl_wowlan_status* status) {
-    struct iwl_mvm_d3_gtk_iter_data gtkdata = {
-        .mvm = mvm,
-        .status = status,
-    };
+  struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+      .mvm = mvm,
+      .status = status,
+  };
 
-    /*
-     * rekey handling requires taking locks that can't be taken now.
-     * however, d0i3 doesn't offload rekey, so we're fine.
-     */
-    if (WARN_ON_ONCE(status->num_of_gtk_rekeys)) { return; }
+  /*
+   * rekey handling requires taking locks that can't be taken now.
+   * however, d0i3 doesn't offload rekey, so we're fine.
+   */
+  if (WARN_ON_ONCE(status->num_of_gtk_rekeys)) {
+    return;
+  }
 
-    /* find last GTK that we used initially, if any */
-    gtkdata.find_phase = true;
-    iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
+  /* find last GTK that we used initially, if any */
+  gtkdata.find_phase = true;
+  iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
 
-    gtkdata.find_phase = false;
-    iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
+  gtkdata.find_phase = false;
+  iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
 }
 
 struct iwl_mvm_nd_query_results {
-    uint32_t matched_profiles;
-    struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+  uint32_t matched_profiles;
+  struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
 };
 
 static int iwl_mvm_netdetect_query_results(struct iwl_mvm* mvm,
                                            struct iwl_mvm_nd_query_results* results) {
-    struct iwl_scan_offload_profiles_query* query;
-    struct iwl_host_cmd cmd = {
-        .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
-        .flags = CMD_WANT_SKB,
-    };
-    int ret, len;
+  struct iwl_scan_offload_profiles_query* query;
+  struct iwl_host_cmd cmd = {
+      .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
+      .flags = CMD_WANT_SKB,
+  };
+  int ret, len;
 
-    ret = iwl_mvm_send_cmd(mvm, &cmd);
-    if (ret) {
-        IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
-        return ret;
-    }
+  ret = iwl_mvm_send_cmd(mvm, &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
+    return ret;
+  }
 
-    len = iwl_rx_packet_payload_len(cmd.resp_pkt);
-    if (len < sizeof(*query)) {
-        IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
-        ret = -EIO;
-        goto out_free_resp;
-    }
+  len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+  if (len < sizeof(*query)) {
+    IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
+    ret = -EIO;
+    goto out_free_resp;
+  }
 
-    query = (void*)cmd.resp_pkt->data;
+  query = (void*)cmd.resp_pkt->data;
 
-    results->matched_profiles = le32_to_cpu(query->matched_profiles);
-    memcpy(results->matches, query->matches, sizeof(results->matches));
+  results->matched_profiles = le32_to_cpu(query->matched_profiles);
+  memcpy(results->matches, query->matches, sizeof(results->matches));
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
+  mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
 #endif
 
 out_free_resp:
-    iwl_free_resp(&cmd);
-    return ret;
+  iwl_free_resp(&cmd);
+  return ret;
 }
 
 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct cfg80211_wowlan_nd_info* net_detect = NULL;
-    struct cfg80211_wowlan_wakeup wakeup = {
-        .pattern_idx = -1,
-    };
-    struct cfg80211_wowlan_wakeup* wakeup_report = &wakeup;
-    struct iwl_mvm_nd_query_results query;
-    struct iwl_wowlan_status* fw_status;
-    unsigned long matched_profiles;
-    uint32_t reasons = 0;
-    int i, j, n_matches, ret;
+  struct cfg80211_wowlan_nd_info* net_detect = NULL;
+  struct cfg80211_wowlan_wakeup wakeup = {
+      .pattern_idx = -1,
+  };
+  struct cfg80211_wowlan_wakeup* wakeup_report = &wakeup;
+  struct iwl_mvm_nd_query_results query;
+  struct iwl_wowlan_status* fw_status;
+  unsigned long matched_profiles;
+  uint32_t reasons = 0;
+  int i, j, n_matches, ret;
 
-    fw_status = iwl_mvm_get_wakeup_status(mvm);
-    if (!IS_ERR_OR_NULL(fw_status)) {
-        reasons = le32_to_cpu(fw_status->wakeup_reasons);
-        kfree(fw_status);
+  fw_status = iwl_mvm_get_wakeup_status(mvm);
+  if (!IS_ERR_OR_NULL(fw_status)) {
+    reasons = le32_to_cpu(fw_status->wakeup_reasons);
+    kfree(fw_status);
+  }
+
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) {
+    wakeup.rfkill_release = true;
+  }
+
+  if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+    goto out;
+  }
+
+  ret = iwl_mvm_netdetect_query_results(mvm, &query);
+  if (ret || !query.matched_profiles) {
+    wakeup_report = NULL;
+    goto out;
+  }
+
+  matched_profiles = query.matched_profiles;
+  if (mvm->n_nd_match_sets) {
+    n_matches = hweight_long(matched_profiles);
+  } else {
+    IWL_ERR(mvm, "no net detect match information available\n");
+    n_matches = 0;
+  }
+
+  net_detect = kzalloc(struct_size(net_detect, matches, n_matches), GFP_KERNEL);
+  if (!net_detect || !n_matches) {
+    goto out_report_nd;
+  }
+
+  for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
+    struct iwl_scan_offload_profile_match* fw_match;
+    struct cfg80211_wowlan_nd_match* match;
+    int idx, n_channels = 0;
+
+    fw_match = &query.matches[i];
+
+    for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++) {
+      n_channels += hweight8(fw_match->matching_channels[j]);
     }
 
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) { wakeup.rfkill_release = true; }
-
-    if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { goto out; }
-
-    ret = iwl_mvm_netdetect_query_results(mvm, &query);
-    if (ret || !query.matched_profiles) {
-        wakeup_report = NULL;
-        goto out;
+    match = kzalloc(struct_size(match, channels, n_channels), GFP_KERNEL);
+    if (!match) {
+      goto out_report_nd;
     }
 
-    matched_profiles = query.matched_profiles;
-    if (mvm->n_nd_match_sets) {
-        n_matches = hweight_long(matched_profiles);
-    } else {
-        IWL_ERR(mvm, "no net detect match information available\n");
-        n_matches = 0;
+    net_detect->matches[net_detect->n_matches++] = match;
+
+    /* We inverted the order of the SSIDs in the scan
+     * request, so invert the index here.
+     */
+    idx = mvm->n_nd_match_sets - i - 1;
+    match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
+    memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, match->ssid.ssid_len);
+
+    if (mvm->n_nd_channels < n_channels) {
+      continue;
     }
 
-    net_detect = kzalloc(struct_size(net_detect, matches, n_matches), GFP_KERNEL);
-    if (!net_detect || !n_matches) { goto out_report_nd; }
-
-    for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
-        struct iwl_scan_offload_profile_match* fw_match;
-        struct cfg80211_wowlan_nd_match* match;
-        int idx, n_channels = 0;
-
-        fw_match = &query.matches[i];
-
-        for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++) {
-            n_channels += hweight8(fw_match->matching_channels[j]);
-        }
-
-        match = kzalloc(struct_size(match, channels, n_channels), GFP_KERNEL);
-        if (!match) { goto out_report_nd; }
-
-        net_detect->matches[net_detect->n_matches++] = match;
-
-        /* We inverted the order of the SSIDs in the scan
-         * request, so invert the index here.
-         */
-        idx = mvm->n_nd_match_sets - i - 1;
-        match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
-        memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, match->ssid.ssid_len);
-
-        if (mvm->n_nd_channels < n_channels) { continue; }
-
-        for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
-            if (fw_match->matching_channels[j / 8] & (BIT(j % 8))) {
-                match->channels[match->n_channels++] = mvm->nd_channels[j]->center_freq;
-            }
-    }
+    for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
+      if (fw_match->matching_channels[j / 8] & (BIT(j % 8))) {
+        match->channels[match->n_channels++] = mvm->nd_channels[j]->center_freq;
+      }
+  }
 
 out_report_nd:
-    wakeup.net_detect = net_detect;
+  wakeup.net_detect = net_detect;
 out:
-    iwl_mvm_free_nd(mvm);
+  iwl_mvm_free_nd(mvm);
 
-    mutex_unlock(&mvm->mutex);
-    ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+  mutex_unlock(&mvm->mutex);
+  ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
 
-    if (net_detect) {
-        for (i = 0; i < net_detect->n_matches; i++) {
-            kfree(net_detect->matches[i]);
-        }
-        kfree(net_detect);
+  if (net_detect) {
+    for (i = 0; i < net_detect->n_matches; i++) {
+      kfree(net_detect->matches[i]);
     }
+    kfree(net_detect);
+  }
 }
 
 static void iwl_mvm_read_d3_sram(struct iwl_mvm* mvm) {
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    const struct fw_img* img = &mvm->fw->img[IWL_UCODE_WOWLAN];
-    uint32_t len = img->sec[IWL_UCODE_SECTION_DATA].len;
-    uint32_t offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+  const struct fw_img* img = &mvm->fw->img[IWL_UCODE_WOWLAN];
+  uint32_t len = img->sec[IWL_UCODE_SECTION_DATA].len;
+  uint32_t offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
 
-    if (!mvm->store_d3_resume_sram) { return; }
+  if (!mvm->store_d3_resume_sram) {
+    return;
+  }
 
+  if (!mvm->d3_resume_sram) {
+    mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
     if (!mvm->d3_resume_sram) {
-        mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
-        if (!mvm->d3_resume_sram) { return; }
+      return;
     }
+  }
 
-    iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
+  iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
 #endif
 }
 
 static void iwl_mvm_d3_disconnect_iter(void* data, uint8_t* mac, struct ieee80211_vif* vif) {
-    /* skip the one we keep connection on */
-    if (data == vif) { return; }
+  /* skip the one we keep connection on */
+  if (data == vif) {
+    return;
+  }
 
-    if (vif->type == NL80211_IFTYPE_STATION) { ieee80211_resume_disconnect(vif); }
+  if (vif->type == NL80211_IFTYPE_STATION) {
+    ieee80211_resume_disconnect(vif);
+  }
 }
 
 static int iwl_mvm_check_rt_status(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    uint32_t base = mvm->error_event_table[0];
-    struct error_table_start {
-        /* cf. struct iwl_error_event_table */
-        uint32_t valid;
-        uint32_t error_id;
-    } err_info;
+  uint32_t base = mvm->error_event_table[0];
+  struct error_table_start {
+    /* cf. struct iwl_error_event_table */
+    uint32_t valid;
+    uint32_t error_id;
+  } err_info;
 
-    iwl_trans_read_mem_bytes(mvm->trans, base, &err_info, sizeof(err_info));
+  iwl_trans_read_mem_bytes(mvm->trans, base, &err_info, sizeof(err_info));
 
-    if (err_info.valid && err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
-        struct cfg80211_wowlan_wakeup wakeup = {
-            .rfkill_release = true,
-        };
-        ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
-    }
-    return err_info.valid;
+  if (err_info.valid && err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+    struct cfg80211_wowlan_wakeup wakeup = {
+        .rfkill_release = true,
+    };
+    ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
+  }
+  return err_info.valid;
 }
 
 static int __iwl_mvm_resume(struct iwl_mvm* mvm, bool test) {
-    struct ieee80211_vif* vif = NULL;
-    int ret = 1;
-    enum iwl_d3_status d3_status;
-    bool keep = false;
-    bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
-    bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
+  struct ieee80211_vif* vif = NULL;
+  int ret = 1;
+  enum iwl_d3_status d3_status;
+  bool keep = false;
+  bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+  bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    /* get the BSS vif pointer again */
-    vif = iwl_mvm_get_bss_vif(mvm);
-    if (IS_ERR_OR_NULL(vif)) { goto err; }
+  /* get the BSS vif pointer again */
+  vif = iwl_mvm_get_bss_vif(mvm);
+  if (IS_ERR_OR_NULL(vif)) {
+    goto err;
+  }
 
-    ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
-    if (ret) { goto err; }
+  ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
+  if (ret) {
+    goto err;
+  }
 
-    if (d3_status != IWL_D3_STATUS_ALIVE) {
-        IWL_INFO(mvm, "Device was reset during suspend\n");
-        goto err;
+  if (d3_status != IWL_D3_STATUS_ALIVE) {
+    IWL_INFO(mvm, "Device was reset during suspend\n");
+    goto err;
+  }
+
+  iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+  /* query SRAM first in case we want event logging */
+  iwl_mvm_read_d3_sram(mvm);
+
+  if (iwl_mvm_check_rt_status(mvm, vif)) {
+    set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+    iwl_mvm_dump_nic_error_log(mvm);
+    iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, false, 0);
+    ret = 1;
+    goto err;
+  }
+
+  if (d0i3_first) {
+    ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+    if (ret < 0) {
+      IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", ret);
+      goto err;
     }
+  }
 
-    iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
-    /* query SRAM first in case we want event logging */
-    iwl_mvm_read_d3_sram(mvm);
+  /*
+   * Query the current location and source from the D3 firmware so we
+   * can play it back when we re-intiailize the D0 firmware
+   */
+  iwl_mvm_update_changed_regdom(mvm);
 
-    if (iwl_mvm_check_rt_status(mvm, vif)) {
-        set_bit(STATUS_FW_ERROR, &mvm->trans->status);
-        iwl_mvm_dump_nic_error_log(mvm);
-        iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, false, 0);
-        ret = 1;
-        goto err;
-    }
+  if (!unified_image) { /*  Re-configure default SAR profile */
+    iwl_mvm_sar_select_profile(mvm, 1, 1);
+  }
 
-    if (d0i3_first) {
-        ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
-        if (ret < 0) {
-            IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", ret);
-            goto err;
-        }
-    }
-
-    /*
-     * Query the current location and source from the D3 firmware so we
-     * can play it back when we re-intiailize the D0 firmware
+  if (mvm->net_detect) {
+    /* If this is a non-unified image, we restart the FW,
+     * so no need to stop the netdetect scan.  If that
+     * fails, continue and try to get the wake-up reasons,
+     * but trigger a HW restart by keeping a failure code
+     * in ret.
      */
-    iwl_mvm_update_changed_regdom(mvm);
-
-    if (!unified_image) { /*  Re-configure default SAR profile */
-        iwl_mvm_sar_select_profile(mvm, 1, 1);
+    if (unified_image) {
+      ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, false);
     }
 
-    if (mvm->net_detect) {
-        /* If this is a non-unified image, we restart the FW,
-         * so no need to stop the netdetect scan.  If that
-         * fails, continue and try to get the wake-up reasons,
-         * but trigger a HW restart by keeping a failure code
-         * in ret.
-         */
-        if (unified_image) { ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, false); }
-
-        iwl_mvm_query_netdetect_reasons(mvm, vif);
-        /* has unlocked the mutex, so skip that */
-        goto out;
-    } else {
-        keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
+    iwl_mvm_query_netdetect_reasons(mvm, vif);
+    /* has unlocked the mutex, so skip that */
+    goto out;
+  } else {
+    keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-        if (keep) { mvm->keep_vif = vif; }
-#endif
-        /* has unlocked the mutex, so skip that */
-        goto out_iterate;
+    if (keep) {
+      mvm->keep_vif = vif;
     }
+#endif
+    /* has unlocked the mutex, so skip that */
+    goto out_iterate;
+  }
 
 err:
-    iwl_mvm_free_nd(mvm);
-    mutex_unlock(&mvm->mutex);
+  iwl_mvm_free_nd(mvm);
+  mutex_unlock(&mvm->mutex);
 
 out_iterate:
-    if (!test)
-        ieee80211_iterate_active_interfaces_rtnl(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                                 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
+  if (!test)
+    ieee80211_iterate_active_interfaces_rtnl(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
 
 out:
-    /* no need to reset the device in unified images, if successful */
-    if (unified_image && !ret) {
-        /* nothing else to do if we already sent D0I3_END_CMD */
-        if (d0i3_first) { return 0; }
-
-        ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
-        if (!ret) { return 0; }
+  /* no need to reset the device in unified images, if successful */
+  if (unified_image && !ret) {
+    /* nothing else to do if we already sent D0I3_END_CMD */
+    if (d0i3_first) {
+      return 0;
     }
 
-    /*
-     * Reconfigure the device in one of the following cases:
-     * 1. We are not using a unified image
-     * 2. We are using a unified image but had an error while exiting D3
-     */
-    set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
-    set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
-    /*
-     * When switching images we return 1, which causes mac80211
-     * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
-     * This type of reconfig calls iwl_mvm_restart_complete(),
-     * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need
-     * to take the reference here.
-     */
-    iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+    ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+    if (!ret) {
+      return 0;
+    }
+  }
 
-    return 1;
+  /*
+   * Reconfigure the device in one of the following cases:
+   * 1. We are not using a unified image
+   * 2. We are using a unified image but had an error while exiting D3
+   */
+  set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+  set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
+  /*
+   * When switching images we return 1, which causes mac80211
+   * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
+   * This type of reconfig calls iwl_mvm_restart_complete(),
+   * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need
+   * to take the reference here.
+   */
+  iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+  return 1;
 }
 
 static int iwl_mvm_resume_d3(struct iwl_mvm* mvm) {
-    iwl_trans_resume(mvm->trans);
+  iwl_trans_resume(mvm->trans);
 
-    return __iwl_mvm_resume(mvm, false);
+  return __iwl_mvm_resume(mvm, false);
 }
 
 static int iwl_mvm_resume_d0i3(struct iwl_mvm* mvm) {
-    bool exit_now;
-    enum iwl_d3_status d3_status;
-    struct iwl_trans* trans = mvm->trans;
+  bool exit_now;
+  enum iwl_d3_status d3_status;
+  struct iwl_trans* trans = mvm->trans;
 
-    iwl_trans_d3_resume(trans, &d3_status, false, false);
+  iwl_trans_d3_resume(trans, &d3_status, false, false);
 
+  /*
+   * make sure to clear D0I3_DEFER_WAKEUP before
+   * calling iwl_trans_resume(), which might wait
+   * for d0i3 exit completion.
+   */
+  mutex_lock(&mvm->d0i3_suspend_mutex);
+  __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+  exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
+  mutex_unlock(&mvm->d0i3_suspend_mutex);
+  if (exit_now) {
+    IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+    _iwl_mvm_exit_d0i3(mvm);
+  }
+
+  iwl_trans_resume(trans);
+
+  if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
+    int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
+
+    if (ret) {
+      return ret;
+    }
     /*
-     * make sure to clear D0I3_DEFER_WAKEUP before
-     * calling iwl_trans_resume(), which might wait
-     * for d0i3 exit completion.
+     * d0i3 exit will be deferred until reconfig_complete.
+     * make sure there we are out of d0i3.
      */
-    mutex_lock(&mvm->d0i3_suspend_mutex);
-    __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
-    exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
-    mutex_unlock(&mvm->d0i3_suspend_mutex);
-    if (exit_now) {
-        IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
-        _iwl_mvm_exit_d0i3(mvm);
-    }
-
-    iwl_trans_resume(trans);
-
-    if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
-        int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
-
-        if (ret) { return ret; }
-        /*
-         * d0i3 exit will be deferred until reconfig_complete.
-         * make sure there we are out of d0i3.
-         */
-    }
-    return 0;
+  }
+  return 0;
 }
 
 int iwl_mvm_resume(struct ieee80211_hw* hw) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    int ret;
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  int ret;
 
-    if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
-        ret = iwl_mvm_resume_d0i3(mvm);
-    } else {
-        ret = iwl_mvm_resume_d3(mvm);
-    }
+  if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
+    ret = iwl_mvm_resume_d0i3(mvm);
+  } else {
+    ret = iwl_mvm_resume_d3(mvm);
+  }
 
-    mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+  mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
 
-    iwl_mvm_resume_tcm(mvm);
+  iwl_mvm_resume_tcm(mvm);
 
-    iwl_fw_runtime_resume(&mvm->fwrt);
+  iwl_fw_runtime_resume(&mvm->fwrt);
 
-    return ret;
+  return ret;
 }
 
 void iwl_mvm_set_wakeup(struct ieee80211_hw* hw, bool enabled) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
 
-    device_set_wakeup_enable(mvm->trans->dev, enabled);
+  device_set_wakeup_enable(mvm->trans->dev, enabled);
 }
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
 static int iwl_mvm_d3_test_open(struct inode* inode, struct file* file) {
-    struct iwl_mvm* mvm = inode->i_private;
-    int err;
+  struct iwl_mvm* mvm = inode->i_private;
+  int err;
 
-    if (mvm->d3_test_active) { return -EBUSY; }
+  if (mvm->d3_test_active) {
+    return -EBUSY;
+  }
 
-    file->private_data = inode->i_private;
+  file->private_data = inode->i_private;
 
-    synchronize_net();
+  synchronize_net();
 
-    mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+  mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
 
-    iwl_mvm_pause_tcm(mvm, true);
+  iwl_mvm_pause_tcm(mvm, true);
 
-    iwl_fw_runtime_suspend(&mvm->fwrt);
+  iwl_fw_runtime_suspend(&mvm->fwrt);
 
-    /* start pseudo D3 */
-    rtnl_lock();
-    err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
-    rtnl_unlock();
-    if (err > 0) { err = -EINVAL; }
-    if (err) { return err; }
+  /* start pseudo D3 */
+  rtnl_lock();
+  err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
+  rtnl_unlock();
+  if (err > 0) {
+    err = -EINVAL;
+  }
+  if (err) {
+    return err;
+  }
 
-    mvm->d3_test_active = true;
-    mvm->keep_vif = NULL;
-    return 0;
+  mvm->d3_test_active = true;
+  mvm->keep_vif = NULL;
+  return 0;
 }
 
 static ssize_t iwl_mvm_d3_test_read(struct file* file, char __user* user_buf, size_t count,
                                     loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    uint32_t pme_asserted;
+  struct iwl_mvm* mvm = file->private_data;
+  uint32_t pme_asserted;
 
-    while (true) {
-        /* read pme_ptr if available */
-        if (mvm->d3_test_pme_ptr) {
-            pme_asserted = iwl_trans_read_mem32(mvm->trans, mvm->d3_test_pme_ptr);
-            if (pme_asserted) { break; }
-        }
-
-        if (msleep_interruptible(100)) { break; }
+  while (true) {
+    /* read pme_ptr if available */
+    if (mvm->d3_test_pme_ptr) {
+      pme_asserted = iwl_trans_read_mem32(mvm->trans, mvm->d3_test_pme_ptr);
+      if (pme_asserted) {
+        break;
+      }
     }
 
-    return 0;
+    if (msleep_interruptible(100)) {
+      break;
+    }
+  }
+
+  return 0;
 }
 
 static void iwl_mvm_d3_test_disconn_work_iter(void* _data, uint8_t* mac,
                                               struct ieee80211_vif* vif) {
-    /* skip the one we keep connection on */
-    if (_data == vif) { return; }
+  /* skip the one we keep connection on */
+  if (_data == vif) {
+    return;
+  }
 
-    if (vif->type == NL80211_IFTYPE_STATION) { ieee80211_connection_loss(vif); }
+  if (vif->type == NL80211_IFTYPE_STATION) {
+    ieee80211_connection_loss(vif);
+  }
 }
 
 static int iwl_mvm_d3_test_release(struct inode* inode, struct file* file) {
-    struct iwl_mvm* mvm = inode->i_private;
-    bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+  struct iwl_mvm* mvm = inode->i_private;
+  bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
-    mvm->d3_test_active = false;
+  mvm->d3_test_active = false;
 
-    iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+  iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
 
-    rtnl_lock();
-    __iwl_mvm_resume(mvm, true);
-    rtnl_unlock();
+  rtnl_lock();
+  __iwl_mvm_resume(mvm, true);
+  rtnl_unlock();
 
-    iwl_mvm_resume_tcm(mvm);
+  iwl_mvm_resume_tcm(mvm);
 
-    iwl_fw_runtime_resume(&mvm->fwrt);
+  iwl_fw_runtime_resume(&mvm->fwrt);
 
-    mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+  mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
 
-    iwl_abort_notification_waits(&mvm->notif_wait);
-    if (!unified_image) {
-        int remaining_time = 10;
+  iwl_abort_notification_waits(&mvm->notif_wait);
+  if (!unified_image) {
+    int remaining_time = 10;
 
-        ieee80211_restart_hw(mvm->hw);
+    ieee80211_restart_hw(mvm->hw);
 
-        /* wait for restart and disconnect all interfaces */
-        while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && remaining_time > 0) {
-            remaining_time--;
-            msleep(1000);
-        }
-
-        if (remaining_time == 0) { IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); }
+    /* wait for restart and disconnect all interfaces */
+    while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && remaining_time > 0) {
+      remaining_time--;
+      msleep(1000);
     }
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
+    if (remaining_time == 0) {
+      IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
+    }
+  }
 
-    return 0;
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
+
+  return 0;
 }
 
 const struct file_operations iwl_dbgfs_d3_test_ops = {
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs-vif.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs-vif.c
index e469367..5ad11ce 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs-vif.c
@@ -39,1269 +39,1406 @@
 
 static void iwl_dbgfs_update_pm(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                 enum iwl_dbgfs_pm_mask param, int val) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_dbgfs_pm* dbgfs_pm = &mvmvif->dbgfs_pm;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_dbgfs_pm* dbgfs_pm = &mvmvif->dbgfs_pm;
 
-    dbgfs_pm->mask |= param;
+  dbgfs_pm->mask |= param;
 
-    switch (param) {
+  switch (param) {
     case MVM_DEBUGFS_PM_KEEP_ALIVE: {
-        int dtimper = vif->bss_conf.dtim_period ?: 1;
-        int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+      int dtimper = vif->bss_conf.dtim_period ?: 1;
+      int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
 
-        IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
-        if (val * MSEC_PER_SEC < 3 * dtimper_msec)
-            IWL_WARN(
-                mvm,
-                "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
-                val * MSEC_PER_SEC, 3 * dtimper_msec);
-        dbgfs_pm->keep_alive_seconds = val;
-        break;
+      IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
+      if (val * MSEC_PER_SEC < 3 * dtimper_msec)
+        IWL_WARN(mvm,
+                 "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
+                 val * MSEC_PER_SEC, 3 * dtimper_msec);
+      dbgfs_pm->keep_alive_seconds = val;
+      break;
     }
     case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
-        IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n", val ? "enabled" : "disabled");
-        dbgfs_pm->skip_over_dtim = val;
-        break;
+      IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n", val ? "enabled" : "disabled");
+      dbgfs_pm->skip_over_dtim = val;
+      break;
     case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
-        IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
-        dbgfs_pm->skip_dtim_periods = val;
-        break;
+      IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
+      dbgfs_pm->skip_dtim_periods = val;
+      break;
     case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
-        IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
-        dbgfs_pm->rx_data_timeout = val;
-        break;
+      IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
+      dbgfs_pm->rx_data_timeout = val;
+      break;
     case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
-        IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
-        dbgfs_pm->tx_data_timeout = val;
-        break;
+      IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
+      dbgfs_pm->tx_data_timeout = val;
+      break;
     case MVM_DEBUGFS_PM_LPRX_ENA:
-        IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
-        dbgfs_pm->lprx_ena = val;
-        break;
+      IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
+      dbgfs_pm->lprx_ena = val;
+      break;
     case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
-        IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
-        dbgfs_pm->lprx_rssi_threshold = val;
-        break;
+      IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
+      dbgfs_pm->lprx_rssi_threshold = val;
+      break;
     case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
-        IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
-        dbgfs_pm->snooze_ena = val;
-        break;
+      IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+      dbgfs_pm->snooze_ena = val;
+      break;
     case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING:
-        IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
-        dbgfs_pm->uapsd_misbehaving = val;
-        break;
+      IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
+      dbgfs_pm->uapsd_misbehaving = val;
+      break;
     case MVM_DEBUGFS_PM_USE_PS_POLL:
-        IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val);
-        dbgfs_pm->use_ps_poll = val;
-        break;
-    }
+      IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val);
+      dbgfs_pm->use_ps_poll = val;
+      break;
+  }
 }
 
 static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                          loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    enum iwl_dbgfs_pm_mask param;
-    int val, ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  enum iwl_dbgfs_pm_mask param;
+  int val, ret;
 
-    if (!strncmp("keep_alive=", buf, 11)) {
-        if (sscanf(buf + 11, "%d", &val) != 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_PM_KEEP_ALIVE;
-    } else if (!strncmp("skip_over_dtim=", buf, 15)) {
-        if (sscanf(buf + 15, "%d", &val) != 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
-    } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
-        if (sscanf(buf + 18, "%d", &val) != 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
-    } else if (!strncmp("rx_data_timeout=", buf, 16)) {
-        if (sscanf(buf + 16, "%d", &val) != 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
-    } else if (!strncmp("tx_data_timeout=", buf, 16)) {
-        if (sscanf(buf + 16, "%d", &val) != 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
-    } else if (!strncmp("lprx=", buf, 5)) {
-        if (sscanf(buf + 5, "%d", &val) != 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_PM_LPRX_ENA;
-    } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
-        if (sscanf(buf + 20, "%d", &val) != 1) { return -EINVAL; }
-        if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val < POWER_LPRX_RSSI_THRESHOLD_MIN) {
-            return -EINVAL;
-        }
-        param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
-    } else if (!strncmp("snooze_enable=", buf, 14)) {
-        if (sscanf(buf + 14, "%d", &val) != 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
-    } else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
-        if (sscanf(buf + 18, "%d", &val) != 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
-    } else if (!strncmp("use_ps_poll=", buf, 12)) {
-        if (sscanf(buf + 12, "%d", &val) != 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_PM_USE_PS_POLL;
-    } else {
-        return -EINVAL;
+  if (!strncmp("keep_alive=", buf, 11)) {
+    if (sscanf(buf + 11, "%d", &val) != 1) {
+      return -EINVAL;
     }
+    param = MVM_DEBUGFS_PM_KEEP_ALIVE;
+  } else if (!strncmp("skip_over_dtim=", buf, 15)) {
+    if (sscanf(buf + 15, "%d", &val) != 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
+  } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
+    if (sscanf(buf + 18, "%d", &val) != 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
+  } else if (!strncmp("rx_data_timeout=", buf, 16)) {
+    if (sscanf(buf + 16, "%d", &val) != 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
+  } else if (!strncmp("tx_data_timeout=", buf, 16)) {
+    if (sscanf(buf + 16, "%d", &val) != 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
+  } else if (!strncmp("lprx=", buf, 5)) {
+    if (sscanf(buf + 5, "%d", &val) != 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_PM_LPRX_ENA;
+  } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
+    if (sscanf(buf + 20, "%d", &val) != 1) {
+      return -EINVAL;
+    }
+    if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val < POWER_LPRX_RSSI_THRESHOLD_MIN) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+  } else if (!strncmp("snooze_enable=", buf, 14)) {
+    if (sscanf(buf + 14, "%d", &val) != 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
+  } else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
+    if (sscanf(buf + 18, "%d", &val) != 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
+  } else if (!strncmp("use_ps_poll=", buf, 12)) {
+    if (sscanf(buf + 12, "%d", &val) != 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_PM_USE_PS_POLL;
+  } else {
+    return -EINVAL;
+  }
 
-    mutex_lock(&mvm->mutex);
-    iwl_dbgfs_update_pm(mvm, vif, param, val);
-    ret = iwl_mvm_power_update_mac(mvm);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  iwl_dbgfs_update_pm(mvm, vif, param, val);
+  ret = iwl_mvm_power_update_mac(mvm);
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file* file, char __user* user_buf, size_t count,
                                          loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    char buf[64];
-    int bufsz = sizeof(buf);
-    int pos;
+  struct ieee80211_vif* vif = file->private_data;
+  char buf[64];
+  int bufsz = sizeof(buf);
+  int pos;
 
-    pos = scnprintf(buf, bufsz, "bss limit = %d\n", vif->bss_conf.txpower);
+  pos = scnprintf(buf, bufsz, "bss limit = %d\n", vif->bss_conf.txpower);
 
 #ifdef CPTCFG_IWLWIFI_FRQ_MGR
-    {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-        struct iwl_mvm* mvm = mvmvif->mvm;
+  {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+    struct iwl_mvm* mvm = mvmvif->mvm;
 
-        mutex_lock(&mvm->mutex);
-        if (mvmvif->phy_ctxt)
-            pos += scnprintf(buf + pos, bufsz - pos, "fm limit = %d\n",
-                             mvmvif->phy_ctxt->fm_tx_power_limit);
-        mutex_unlock(&mvm->mutex);
-    }
+    mutex_lock(&mvm->mutex);
+    if (mvmvif->phy_ctxt)
+      pos +=
+          scnprintf(buf + pos, bufsz - pos, "fm limit = %d\n", mvmvif->phy_ctxt->fm_tx_power_limit);
+    mutex_unlock(&mvm->mutex);
+  }
 #endif
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_pm_params_read(struct file* file, char __user* user_buf, size_t count,
                                         loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    char buf[512];
-    int bufsz = sizeof(buf);
-    int pos;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  char buf[512];
+  int bufsz = sizeof(buf);
+  int pos;
 
-    pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
+  pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_mac_params_read(struct file* file, char __user* user_buf, size_t count,
                                          loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    uint8_t ap_sta_id;
-    struct ieee80211_chanctx_conf* chanctx_conf;
-    char buf[512];
-    int bufsz = sizeof(buf);
-    int pos = 0;
-    int i;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  uint8_t ap_sta_id;
+  struct ieee80211_chanctx_conf* chanctx_conf;
+  char buf[512];
+  int bufsz = sizeof(buf);
+  int pos = 0;
+  int i;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    ap_sta_id = mvmvif->ap_sta_id;
+  ap_sta_id = mvmvif->ap_sta_id;
 
-    switch (ieee80211_vif_type_p2p(vif)) {
+  switch (ieee80211_vif_type_p2p(vif)) {
     case NL80211_IFTYPE_ADHOC:
-        pos += scnprintf(buf + pos, bufsz - pos, "type: ibss\n");
-        break;
+      pos += scnprintf(buf + pos, bufsz - pos, "type: ibss\n");
+      break;
     case NL80211_IFTYPE_STATION:
-        pos += scnprintf(buf + pos, bufsz - pos, "type: bss\n");
-        break;
+      pos += scnprintf(buf + pos, bufsz - pos, "type: bss\n");
+      break;
     case NL80211_IFTYPE_AP:
-        pos += scnprintf(buf + pos, bufsz - pos, "type: ap\n");
-        break;
+      pos += scnprintf(buf + pos, bufsz - pos, "type: ap\n");
+      break;
     case NL80211_IFTYPE_P2P_CLIENT:
-        pos += scnprintf(buf + pos, bufsz - pos, "type: p2p client\n");
-        break;
+      pos += scnprintf(buf + pos, bufsz - pos, "type: p2p client\n");
+      break;
     case NL80211_IFTYPE_P2P_GO:
-        pos += scnprintf(buf + pos, bufsz - pos, "type: p2p go\n");
-        break;
+      pos += scnprintf(buf + pos, bufsz - pos, "type: p2p go\n");
+      break;
     case NL80211_IFTYPE_P2P_DEVICE:
-        pos += scnprintf(buf + pos, bufsz - pos, "type: p2p dev\n");
-        break;
+      pos += scnprintf(buf + pos, bufsz - pos, "type: p2p dev\n");
+      break;
     case NL80211_IFTYPE_NAN:
-        pos += scnprintf(buf + pos, bufsz - pos, "type: NAN\n");
-        break;
+      pos += scnprintf(buf + pos, bufsz - pos, "type: NAN\n");
+      break;
     default:
-        break;
+      break;
+  }
+
+  pos += scnprintf(buf + pos, bufsz - pos, "mac id/color: %d / %d\n", mvmvif->id, mvmvif->color);
+  pos += scnprintf(buf + pos, bufsz - pos, "bssid: %pM\n", vif->bss_conf.bssid);
+  pos += scnprintf(buf + pos, bufsz - pos, "Load: %d\n", mvm->tcm.result.load[mvmvif->id]);
+  pos += scnprintf(buf + pos, bufsz - pos, "QoS:\n");
+  for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
+    pos += scnprintf(buf + pos, bufsz - pos,
+                     "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n", i,
+                     mvmvif->queue_params[i].txop, mvmvif->queue_params[i].cw_min,
+                     mvmvif->queue_params[i].cw_max, mvmvif->queue_params[i].aifs,
+                     mvmvif->queue_params[i].uapsd);
+
+  if (vif->type == NL80211_IFTYPE_STATION && ap_sta_id != IWL_MVM_INVALID_STA) {
+    struct iwl_mvm_sta* mvm_sta;
+
+    mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
+    if (mvm_sta) {
+      pos += scnprintf(buf + pos, bufsz - pos, "ap_sta_id %d - reduced Tx power %d\n", ap_sta_id,
+                       mvm_sta->bt_reduced_txpower);
     }
+  }
 
-    pos += scnprintf(buf + pos, bufsz - pos, "mac id/color: %d / %d\n", mvmvif->id, mvmvif->color);
-    pos += scnprintf(buf + pos, bufsz - pos, "bssid: %pM\n", vif->bss_conf.bssid);
-    pos += scnprintf(buf + pos, bufsz - pos, "Load: %d\n", mvm->tcm.result.load[mvmvif->id]);
-    pos += scnprintf(buf + pos, bufsz - pos, "QoS:\n");
-    for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
-        pos += scnprintf(buf + pos, bufsz - pos,
-                         "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n", i,
-                         mvmvif->queue_params[i].txop, mvmvif->queue_params[i].cw_min,
-                         mvmvif->queue_params[i].cw_max, mvmvif->queue_params[i].aifs,
-                         mvmvif->queue_params[i].uapsd);
+  rcu_read_lock();
+  chanctx_conf = rcu_dereference(vif->chanctx_conf);
+  if (chanctx_conf)
+    pos += scnprintf(buf + pos, bufsz - pos, "idle rx chains %d, active rx chains: %d\n",
+                     chanctx_conf->rx_chains_static, chanctx_conf->rx_chains_dynamic);
+  rcu_read_unlock();
 
-    if (vif->type == NL80211_IFTYPE_STATION && ap_sta_id != IWL_MVM_INVALID_STA) {
-        struct iwl_mvm_sta* mvm_sta;
+  mutex_unlock(&mvm->mutex);
 
-        mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
-        if (mvm_sta) {
-            pos += scnprintf(buf + pos, bufsz - pos, "ap_sta_id %d - reduced Tx power %d\n",
-                             ap_sta_id, mvm_sta->bt_reduced_txpower);
-        }
-    }
-
-    rcu_read_lock();
-    chanctx_conf = rcu_dereference(vif->chanctx_conf);
-    if (chanctx_conf)
-        pos += scnprintf(buf + pos, bufsz - pos, "idle rx chains %d, active rx chains: %d\n",
-                         chanctx_conf->rx_chains_static, chanctx_conf->rx_chains_dynamic);
-    rcu_read_unlock();
-
-    mutex_unlock(&mvm->mutex);
-
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static void iwl_dbgfs_update_bf(struct ieee80211_vif* vif, enum iwl_dbgfs_bf_mask param,
                                 int value) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_dbgfs_bf* dbgfs_bf = &mvmvif->dbgfs_bf;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_dbgfs_bf* dbgfs_bf = &mvmvif->dbgfs_bf;
 
-    dbgfs_bf->mask |= param;
+  dbgfs_bf->mask |= param;
 
-    switch (param) {
+  switch (param) {
     case MVM_DEBUGFS_BF_ENERGY_DELTA:
-        dbgfs_bf->bf_energy_delta = value;
-        break;
+      dbgfs_bf->bf_energy_delta = value;
+      break;
     case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
-        dbgfs_bf->bf_roaming_energy_delta = value;
-        break;
+      dbgfs_bf->bf_roaming_energy_delta = value;
+      break;
     case MVM_DEBUGFS_BF_ROAMING_STATE:
-        dbgfs_bf->bf_roaming_state = value;
-        break;
+      dbgfs_bf->bf_roaming_state = value;
+      break;
     case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
-        dbgfs_bf->bf_temp_threshold = value;
-        break;
+      dbgfs_bf->bf_temp_threshold = value;
+      break;
     case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
-        dbgfs_bf->bf_temp_fast_filter = value;
-        break;
+      dbgfs_bf->bf_temp_fast_filter = value;
+      break;
     case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
-        dbgfs_bf->bf_temp_slow_filter = value;
-        break;
+      dbgfs_bf->bf_temp_slow_filter = value;
+      break;
     case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
-        dbgfs_bf->bf_enable_beacon_filter = value;
-        break;
+      dbgfs_bf->bf_enable_beacon_filter = value;
+      break;
     case MVM_DEBUGFS_BF_DEBUG_FLAG:
-        dbgfs_bf->bf_debug_flag = value;
-        break;
+      dbgfs_bf->bf_debug_flag = value;
+      break;
     case MVM_DEBUGFS_BF_ESCAPE_TIMER:
-        dbgfs_bf->bf_escape_timer = value;
-        break;
+      dbgfs_bf->bf_escape_timer = value;
+      break;
     case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
-        dbgfs_bf->ba_enable_beacon_abort = value;
-        break;
+      dbgfs_bf->ba_enable_beacon_abort = value;
+      break;
     case MVM_DEBUGFS_BA_ESCAPE_TIMER:
-        dbgfs_bf->ba_escape_timer = value;
-        break;
-    }
+      dbgfs_bf->ba_escape_timer = value;
+      break;
+  }
 }
 
 static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                          loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    enum iwl_dbgfs_bf_mask param;
-    int value, ret = 0;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  enum iwl_dbgfs_bf_mask param;
+  int value, ret = 0;
 
-    if (!strncmp("bf_energy_delta=", buf, 16)) {
-        if (sscanf(buf + 16, "%d", &value) != 1) { return -EINVAL; }
-        if (value < IWL_BF_ENERGY_DELTA_MIN || value > IWL_BF_ENERGY_DELTA_MAX) { return -EINVAL; }
-        param = MVM_DEBUGFS_BF_ENERGY_DELTA;
-    } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
-        if (sscanf(buf + 24, "%d", &value) != 1) { return -EINVAL; }
-        if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN || value > IWL_BF_ROAMING_ENERGY_DELTA_MAX) {
-            return -EINVAL;
-        }
-        param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
-    } else if (!strncmp("bf_roaming_state=", buf, 17)) {
-        if (sscanf(buf + 17, "%d", &value) != 1) { return -EINVAL; }
-        if (value < IWL_BF_ROAMING_STATE_MIN || value > IWL_BF_ROAMING_STATE_MAX) {
-            return -EINVAL;
-        }
-        param = MVM_DEBUGFS_BF_ROAMING_STATE;
-    } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
-        if (sscanf(buf + 18, "%d", &value) != 1) { return -EINVAL; }
-        if (value < IWL_BF_TEMP_THRESHOLD_MIN || value > IWL_BF_TEMP_THRESHOLD_MAX) {
-            return -EINVAL;
-        }
-        param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
-    } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
-        if (sscanf(buf + 20, "%d", &value) != 1) { return -EINVAL; }
-        if (value < IWL_BF_TEMP_FAST_FILTER_MIN || value > IWL_BF_TEMP_FAST_FILTER_MAX) {
-            return -EINVAL;
-        }
-        param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
-    } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
-        if (sscanf(buf + 20, "%d", &value) != 1) { return -EINVAL; }
-        if (value < IWL_BF_TEMP_SLOW_FILTER_MIN || value > IWL_BF_TEMP_SLOW_FILTER_MAX) {
-            return -EINVAL;
-        }
-        param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
-    } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
-        if (sscanf(buf + 24, "%d", &value) != 1) { return -EINVAL; }
-        if (value < 0 || value > 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
-    } else if (!strncmp("bf_debug_flag=", buf, 14)) {
-        if (sscanf(buf + 14, "%d", &value) != 1) { return -EINVAL; }
-        if (value < 0 || value > 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_BF_DEBUG_FLAG;
-    } else if (!strncmp("bf_escape_timer=", buf, 16)) {
-        if (sscanf(buf + 16, "%d", &value) != 1) { return -EINVAL; }
-        if (value < IWL_BF_ESCAPE_TIMER_MIN || value > IWL_BF_ESCAPE_TIMER_MAX) { return -EINVAL; }
-        param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
-    } else if (!strncmp("ba_escape_timer=", buf, 16)) {
-        if (sscanf(buf + 16, "%d", &value) != 1) { return -EINVAL; }
-        if (value < IWL_BA_ESCAPE_TIMER_MIN || value > IWL_BA_ESCAPE_TIMER_MAX) { return -EINVAL; }
-        param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
-    } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
-        if (sscanf(buf + 23, "%d", &value) != 1) { return -EINVAL; }
-        if (value < 0 || value > 1) { return -EINVAL; }
-        param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
-    } else {
-        return -EINVAL;
+  if (!strncmp("bf_energy_delta=", buf, 16)) {
+    if (sscanf(buf + 16, "%d", &value) != 1) {
+      return -EINVAL;
     }
-
-    mutex_lock(&mvm->mutex);
-    iwl_dbgfs_update_bf(vif, param, value);
-    if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
-        ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
-    } else {
-        ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+    if (value < IWL_BF_ENERGY_DELTA_MIN || value > IWL_BF_ENERGY_DELTA_MAX) {
+      return -EINVAL;
     }
-    mutex_unlock(&mvm->mutex);
+    param = MVM_DEBUGFS_BF_ENERGY_DELTA;
+  } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
+    if (sscanf(buf + 24, "%d", &value) != 1) {
+      return -EINVAL;
+    }
+    if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN || value > IWL_BF_ROAMING_ENERGY_DELTA_MAX) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
+  } else if (!strncmp("bf_roaming_state=", buf, 17)) {
+    if (sscanf(buf + 17, "%d", &value) != 1) {
+      return -EINVAL;
+    }
+    if (value < IWL_BF_ROAMING_STATE_MIN || value > IWL_BF_ROAMING_STATE_MAX) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_BF_ROAMING_STATE;
+  } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+    if (sscanf(buf + 18, "%d", &value) != 1) {
+      return -EINVAL;
+    }
+    if (value < IWL_BF_TEMP_THRESHOLD_MIN || value > IWL_BF_TEMP_THRESHOLD_MAX) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+  } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+    if (sscanf(buf + 20, "%d", &value) != 1) {
+      return -EINVAL;
+    }
+    if (value < IWL_BF_TEMP_FAST_FILTER_MIN || value > IWL_BF_TEMP_FAST_FILTER_MAX) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+  } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+    if (sscanf(buf + 20, "%d", &value) != 1) {
+      return -EINVAL;
+    }
+    if (value < IWL_BF_TEMP_SLOW_FILTER_MIN || value > IWL_BF_TEMP_SLOW_FILTER_MAX) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
+  } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+    if (sscanf(buf + 24, "%d", &value) != 1) {
+      return -EINVAL;
+    }
+    if (value < 0 || value > 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
+  } else if (!strncmp("bf_debug_flag=", buf, 14)) {
+    if (sscanf(buf + 14, "%d", &value) != 1) {
+      return -EINVAL;
+    }
+    if (value < 0 || value > 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_BF_DEBUG_FLAG;
+  } else if (!strncmp("bf_escape_timer=", buf, 16)) {
+    if (sscanf(buf + 16, "%d", &value) != 1) {
+      return -EINVAL;
+    }
+    if (value < IWL_BF_ESCAPE_TIMER_MIN || value > IWL_BF_ESCAPE_TIMER_MAX) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
+  } else if (!strncmp("ba_escape_timer=", buf, 16)) {
+    if (sscanf(buf + 16, "%d", &value) != 1) {
+      return -EINVAL;
+    }
+    if (value < IWL_BA_ESCAPE_TIMER_MIN || value > IWL_BA_ESCAPE_TIMER_MAX) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
+  } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
+    if (sscanf(buf + 23, "%d", &value) != 1) {
+      return -EINVAL;
+    }
+    if (value < 0 || value > 1) {
+      return -EINVAL;
+    }
+    param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
+  } else {
+    return -EINVAL;
+  }
 
-    return ret ?: count;
+  mutex_lock(&mvm->mutex);
+  iwl_dbgfs_update_bf(vif, param, value);
+  if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
+    ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+  } else {
+    ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+  }
+  mutex_unlock(&mvm->mutex);
+
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_bf_params_read(struct file* file, char __user* user_buf, size_t count,
                                         loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    char buf[256];
-    int pos = 0;
-    const size_t bufsz = sizeof(buf);
-    struct iwl_beacon_filter_cmd cmd = {
-        IWL_BF_CMD_CONFIG_DEFAULTS,
-        .bf_enable_beacon_filter = cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
-        .ba_enable_beacon_abort = cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
-    };
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  char buf[256];
+  int pos = 0;
+  const size_t bufsz = sizeof(buf);
+  struct iwl_beacon_filter_cmd cmd = {
+      IWL_BF_CMD_CONFIG_DEFAULTS,
+      .bf_enable_beacon_filter = cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+      .ba_enable_beacon_abort = cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
+  };
 
-    iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
-    if (mvmvif->bf_data.bf_enabled) {
-        cmd.bf_enable_beacon_filter = cpu_to_le32(1);
-    } else {
-        cmd.bf_enable_beacon_filter = 0;
-    }
+  iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+  if (mvmvif->bf_data.bf_enabled) {
+    cmd.bf_enable_beacon_filter = cpu_to_le32(1);
+  } else {
+    cmd.bf_enable_beacon_filter = 0;
+  }
 
-    pos += scnprintf(buf + pos, bufsz - pos, "bf_energy_delta = %d\n",
-                     le32_to_cpu(cmd.bf_energy_delta));
-    pos += scnprintf(buf + pos, bufsz - pos, "bf_roaming_energy_delta = %d\n",
-                     le32_to_cpu(cmd.bf_roaming_energy_delta));
-    pos += scnprintf(buf + pos, bufsz - pos, "bf_roaming_state = %d\n",
-                     le32_to_cpu(cmd.bf_roaming_state));
-    pos += scnprintf(buf + pos, bufsz - pos, "bf_temp_threshold = %d\n",
-                     le32_to_cpu(cmd.bf_temp_threshold));
-    pos += scnprintf(buf + pos, bufsz - pos, "bf_temp_fast_filter = %d\n",
-                     le32_to_cpu(cmd.bf_temp_fast_filter));
-    pos += scnprintf(buf + pos, bufsz - pos, "bf_temp_slow_filter = %d\n",
-                     le32_to_cpu(cmd.bf_temp_slow_filter));
-    pos += scnprintf(buf + pos, bufsz - pos, "bf_enable_beacon_filter = %d\n",
-                     le32_to_cpu(cmd.bf_enable_beacon_filter));
-    pos +=
-        scnprintf(buf + pos, bufsz - pos, "bf_debug_flag = %d\n", le32_to_cpu(cmd.bf_debug_flag));
-    pos += scnprintf(buf + pos, bufsz - pos, "bf_escape_timer = %d\n",
-                     le32_to_cpu(cmd.bf_escape_timer));
-    pos += scnprintf(buf + pos, bufsz - pos, "ba_escape_timer = %d\n",
-                     le32_to_cpu(cmd.ba_escape_timer));
-    pos += scnprintf(buf + pos, bufsz - pos, "ba_enable_beacon_abort = %d\n",
-                     le32_to_cpu(cmd.ba_enable_beacon_abort));
+  pos +=
+      scnprintf(buf + pos, bufsz - pos, "bf_energy_delta = %d\n", le32_to_cpu(cmd.bf_energy_delta));
+  pos += scnprintf(buf + pos, bufsz - pos, "bf_roaming_energy_delta = %d\n",
+                   le32_to_cpu(cmd.bf_roaming_energy_delta));
+  pos += scnprintf(buf + pos, bufsz - pos, "bf_roaming_state = %d\n",
+                   le32_to_cpu(cmd.bf_roaming_state));
+  pos += scnprintf(buf + pos, bufsz - pos, "bf_temp_threshold = %d\n",
+                   le32_to_cpu(cmd.bf_temp_threshold));
+  pos += scnprintf(buf + pos, bufsz - pos, "bf_temp_fast_filter = %d\n",
+                   le32_to_cpu(cmd.bf_temp_fast_filter));
+  pos += scnprintf(buf + pos, bufsz - pos, "bf_temp_slow_filter = %d\n",
+                   le32_to_cpu(cmd.bf_temp_slow_filter));
+  pos += scnprintf(buf + pos, bufsz - pos, "bf_enable_beacon_filter = %d\n",
+                   le32_to_cpu(cmd.bf_enable_beacon_filter));
+  pos += scnprintf(buf + pos, bufsz - pos, "bf_debug_flag = %d\n", le32_to_cpu(cmd.bf_debug_flag));
+  pos +=
+      scnprintf(buf + pos, bufsz - pos, "bf_escape_timer = %d\n", le32_to_cpu(cmd.bf_escape_timer));
+  pos +=
+      scnprintf(buf + pos, bufsz - pos, "ba_escape_timer = %d\n", le32_to_cpu(cmd.ba_escape_timer));
+  pos += scnprintf(buf + pos, bufsz - pos, "ba_enable_beacon_abort = %d\n",
+                   le32_to_cpu(cmd.ba_enable_beacon_abort));
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static inline char* iwl_dbgfs_is_match(char* name, char* buf) {
-    int len = strlen(name);
+  int len = strlen(name);
 
-    return !strncmp(name, buf, len) ? buf + len : NULL;
+  return !strncmp(name, buf, len) ? buf + len : NULL;
 }
 
 static ssize_t iwl_dbgfs_os_device_timediff_read(struct file* file, char __user* user_buf,
                                                  size_t count, loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    uint32_t curr_gp2;
-    uint64_t curr_os;
-    s64 diff;
-    char buf[64];
-    const size_t bufsz = sizeof(buf);
-    int pos = 0;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  uint32_t curr_gp2;
+  uint64_t curr_os;
+  s64 diff;
+  char buf[64];
+  const size_t bufsz = sizeof(buf);
+  int pos = 0;
 
-    iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
-    do_div(curr_os, NSEC_PER_USEC);
-    diff = curr_os - curr_gp2;
-    pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
+  iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+  do_div(curr_os, NSEC_PER_USEC);
+  diff = curr_os - curr_gp2;
+  pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                           loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    uint32_t value;
-    int ret = -EINVAL;
-    char* data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  uint32_t value;
+  int ret = -EINVAL;
+  char* data;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    data = iwl_dbgfs_is_match("tof_disabled=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.tof_cfg.tof_disabled = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("tof_disabled=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.tof_cfg.tof_disabled = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.tof_cfg.one_sided_disabled = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.tof_cfg.one_sided_disabled = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("is_debug_mode=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.tof_cfg.is_debug_mode = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("is_debug_mode=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.tof_cfg.is_debug_mode = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("is_buf=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.tof_cfg.is_buf_required = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("is_buf=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.tof_cfg.is_buf_required = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0 && value) {
-            ret = iwl_mvm_tof_config_cmd(mvm);
-            goto out;
-        }
+  data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0 && value) {
+      ret = iwl_mvm_tof_config_cmd(mvm);
+      goto out;
     }
+  }
 
 out:
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_tof_enable_read(struct file* file, char __user* user_buf, size_t count,
                                          loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    char buf[256];
-    int pos = 0;
-    const size_t bufsz = sizeof(buf);
-    struct iwl_tof_config_cmd* cmd;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  char buf[256];
+  int pos = 0;
+  const size_t bufsz = sizeof(buf);
+  struct iwl_tof_config_cmd* cmd;
 
-    cmd = &mvm->tof_data.tof_cfg;
+  cmd = &mvm->tof_data.tof_cfg;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n", cmd->tof_disabled);
-    pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n", cmd->one_sided_disabled);
-    pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n", cmd->is_debug_mode);
-    pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n", cmd->is_buf_required);
+  pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n", cmd->tof_disabled);
+  pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n", cmd->one_sided_disabled);
+  pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n", cmd->is_debug_mode);
+  pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n", cmd->is_buf_required);
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif* vif, char* buf,
                                                     size_t count, loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    uint32_t value;
-    int ret = 0;
-    char* data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  uint32_t value;
+  int ret = 0;
+  char* data;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    data = iwl_dbgfs_is_match("burst_period=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (!ret) { mvm->tof_data.responder_cfg.burst_period = cpu_to_le16(value); }
-        goto out;
+  data = iwl_dbgfs_is_match("burst_period=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (!ret) {
+      mvm->tof_data.responder_cfg.burst_period = cpu_to_le16(value);
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.min_delta_ftm = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.min_delta_ftm = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("burst_duration=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.burst_duration = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("burst_duration=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.burst_duration = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.num_of_burst_exp = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.num_of_burst_exp = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("abort_responder=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.abort_responder = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("abort_responder=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.abort_responder = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("get_ch_est=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.get_ch_est = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("get_ch_est=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.get_ch_est = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.recv_sta_req_params = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.recv_sta_req_params = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("channel_num=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.channel_num = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("channel_num=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.channel_num = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("bandwidth=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.bandwidth = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("bandwidth=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.bandwidth = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("rate=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.rate = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("rate=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.rate = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("bssid=", buf);
-    if (data) {
-        uint8_t* mac = mvm->tof_data.responder_cfg.bssid;
+  data = iwl_dbgfs_is_match("bssid=", buf);
+  if (data) {
+    uint8_t* mac = mvm->tof_data.responder_cfg.bssid;
 
-        if (!mac_pton(data, mac)) {
-            ret = -EINVAL;
-            goto out;
-        }
+    if (!mac_pton(data, mac)) {
+      ret = -EINVAL;
+      goto out;
     }
+  }
 
-    data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.tsf_timer_offset_msecs = cpu_to_le16(value); }
-        goto out;
+  data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.tsf_timer_offset_msecs = cpu_to_le16(value);
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("toa_offset=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.toa_offset = cpu_to_le16(value); }
-        goto out;
+  data = iwl_dbgfs_is_match("toa_offset=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.toa_offset = cpu_to_le16(value);
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("center_freq=", buf);
-    if (data) {
-        struct iwl_tof_responder_config_cmd* cmd = &mvm->tof_data.responder_cfg;
+  data = iwl_dbgfs_is_match("center_freq=", buf);
+  if (data) {
+    struct iwl_tof_responder_config_cmd* cmd = &mvm->tof_data.responder_cfg;
 
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0 && value) {
-            enum nl80211_band band =
-                (cmd->channel_num <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
-            struct ieee80211_channel chn = {
-                .band = band,
-                .center_freq = ieee80211_channel_to_frequency(cmd->channel_num, band),
-            };
-            struct cfg80211_chan_def chandef = {
-                .chan = &chn,
-                .center_freq1 = ieee80211_channel_to_frequency(value, band),
-            };
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0 && value) {
+      enum nl80211_band band = (cmd->channel_num <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+      struct ieee80211_channel chn = {
+          .band = band,
+          .center_freq = ieee80211_channel_to_frequency(cmd->channel_num, band),
+      };
+      struct cfg80211_chan_def chandef = {
+          .chan = &chn,
+          .center_freq1 = ieee80211_channel_to_frequency(value, band),
+      };
 
-            cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
-        }
-        goto out;
+      cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.ftm_per_burst = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.ftm_per_burst = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("asap_mode=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.responder_cfg.asap_mode = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("asap_mode=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.responder_cfg.asap_mode = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0 && value) {
-            ret = iwl_mvm_tof_responder_cmd(mvm, vif);
-            goto out;
-        }
+  data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0 && value) {
+      ret = iwl_mvm_tof_responder_cmd(mvm, vif);
+      goto out;
     }
+  }
 
 out:
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_tof_responder_params_read(struct file* file, char __user* user_buf,
                                                    size_t count, loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    char buf[256];
-    int pos = 0;
-    const size_t bufsz = sizeof(buf);
-    struct iwl_tof_responder_config_cmd* cmd;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  char buf[256];
+  int pos = 0;
+  const size_t bufsz = sizeof(buf);
+  struct iwl_tof_responder_config_cmd* cmd;
 
-    cmd = &mvm->tof_data.responder_cfg;
+  cmd = &mvm->tof_data.responder_cfg;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n", le16_to_cpu(cmd->burst_period));
-    pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n", cmd->burst_duration);
-    pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n", cmd->bandwidth);
-    pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n", cmd->channel_num);
-    pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n", cmd->ctrl_ch_position);
-    pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n", cmd->bssid);
-    pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n", cmd->min_delta_ftm);
-    pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n", cmd->num_of_burst_exp);
-    pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
-    pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n", cmd->abort_responder);
-    pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n", cmd->get_ch_est);
-    pos +=
-        scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n", cmd->recv_sta_req_params);
-    pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n", cmd->ftm_per_burst);
-    pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n", cmd->ftm_resp_ts_avail);
-    pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n", cmd->asap_mode);
-    pos += scnprintf(buf + pos, bufsz - pos, "tsf_timer_offset_msecs = %d\n",
-                     le16_to_cpu(cmd->tsf_timer_offset_msecs));
-    pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n", le16_to_cpu(cmd->toa_offset));
+  pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n", le16_to_cpu(cmd->burst_period));
+  pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n", cmd->burst_duration);
+  pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n", cmd->bandwidth);
+  pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n", cmd->channel_num);
+  pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n", cmd->ctrl_ch_position);
+  pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n", cmd->bssid);
+  pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n", cmd->min_delta_ftm);
+  pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n", cmd->num_of_burst_exp);
+  pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
+  pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n", cmd->abort_responder);
+  pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n", cmd->get_ch_est);
+  pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n", cmd->recv_sta_req_params);
+  pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n", cmd->ftm_per_burst);
+  pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n", cmd->ftm_resp_ts_avail);
+  pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n", cmd->asap_mode);
+  pos += scnprintf(buf + pos, bufsz - pos, "tsf_timer_offset_msecs = %d\n",
+                   le16_to_cpu(cmd->tsf_timer_offset_msecs));
+  pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n", le16_to_cpu(cmd->toa_offset));
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                                  loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    uint32_t value;
-    int ret = 0;
-    char* data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  uint32_t value;
+  int ret = 0;
+  char* data;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    data = iwl_dbgfs_is_match("request_id=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req.request_id = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("request_id=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req.request_id = value;
+    }
+    goto out;
+  }
+
+  data = iwl_dbgfs_is_match("initiator=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req.initiator = value;
+    }
+    goto out;
+  }
+
+  data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req.one_sided_los_disable = value;
+    }
+    goto out;
+  }
+
+  data = iwl_dbgfs_is_match("req_timeout=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req.req_timeout = value;
+    }
+    goto out;
+  }
+
+  data = iwl_dbgfs_is_match("report_policy=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req.report_policy = value;
+    }
+    goto out;
+  }
+
+  data = iwl_dbgfs_is_match("macaddr_random=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req.macaddr_random = value;
+    }
+    goto out;
+  }
+
+  data = iwl_dbgfs_is_match("num_of_ap=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req.num_of_ap = value;
+    }
+    goto out;
+  }
+
+  data = iwl_dbgfs_is_match("macaddr_template=", buf);
+  if (data) {
+    uint8_t mac[ETH_ALEN];
+
+    if (!mac_pton(data, mac)) {
+      ret = -EINVAL;
+      goto out;
+    }
+    memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+    goto out;
+  }
+
+  data = iwl_dbgfs_is_match("macaddr_mask=", buf);
+  if (data) {
+    uint8_t mac[ETH_ALEN];
+
+    if (!mac_pton(data, mac)) {
+      ret = -EINVAL;
+      goto out;
+    }
+    memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+    goto out;
+  }
+
+  data = iwl_dbgfs_is_match("ap=", buf);
+  if (data) {
+    struct iwl_tof_range_req_ap_entry ap = {};
+    int size = sizeof(struct iwl_tof_range_req_ap_entry);
+    uint16_t burst_period;
+    uint8_t* mac = ap.bssid;
+    unsigned int i;
+
+    if (sscanf(data,
+               "%u %hhd %hhd %hhd"
+               "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
+               "%hhd %hhd %hd"
+               "%hhd %hhd %d"
+               "%hhx %hhd %hhd %hhd",
+               &i, &ap.channel_num, &ap.bandwidth, &ap.ctrl_ch_position, mac, mac + 1, mac + 2,
+               mac + 3, mac + 4, mac + 5, &ap.measure_type, &ap.num_of_bursts, &burst_period,
+               &ap.samples_per_burst, &ap.retries_per_sample, &ap.tsf_delta, &ap.location_req,
+               &ap.asap_mode, &ap.enable_dyn_ack, &ap.rssi) != 20) {
+      ret = -EINVAL;
+      goto out;
+    }
+    if (i >= IWL_MVM_TOF_MAX_APS) {
+      IWL_ERR(mvm, "Invalid AP index %d\n", i);
+      ret = -EINVAL;
+      goto out;
     }
 
-    data = iwl_dbgfs_is_match("initiator=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req.initiator = value; }
-        goto out;
+    ap.burst_period = cpu_to_le16(burst_period);
+
+    memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
+    goto out;
+  }
+
+  data = iwl_dbgfs_is_match("send_range_request=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0 && value) {
+      ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req.one_sided_los_disable = value; }
-        goto out;
-    }
-
-    data = iwl_dbgfs_is_match("req_timeout=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req.req_timeout = value; }
-        goto out;
-    }
-
-    data = iwl_dbgfs_is_match("report_policy=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req.report_policy = value; }
-        goto out;
-    }
-
-    data = iwl_dbgfs_is_match("macaddr_random=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req.macaddr_random = value; }
-        goto out;
-    }
-
-    data = iwl_dbgfs_is_match("num_of_ap=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req.num_of_ap = value; }
-        goto out;
-    }
-
-    data = iwl_dbgfs_is_match("macaddr_template=", buf);
-    if (data) {
-        uint8_t mac[ETH_ALEN];
-
-        if (!mac_pton(data, mac)) {
-            ret = -EINVAL;
-            goto out;
-        }
-        memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
-        goto out;
-    }
-
-    data = iwl_dbgfs_is_match("macaddr_mask=", buf);
-    if (data) {
-        uint8_t mac[ETH_ALEN];
-
-        if (!mac_pton(data, mac)) {
-            ret = -EINVAL;
-            goto out;
-        }
-        memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
-        goto out;
-    }
-
-    data = iwl_dbgfs_is_match("ap=", buf);
-    if (data) {
-        struct iwl_tof_range_req_ap_entry ap = {};
-        int size = sizeof(struct iwl_tof_range_req_ap_entry);
-        uint16_t burst_period;
-        uint8_t* mac = ap.bssid;
-        unsigned int i;
-
-        if (sscanf(data,
-                   "%u %hhd %hhd %hhd"
-                   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
-                   "%hhd %hhd %hd"
-                   "%hhd %hhd %d"
-                   "%hhx %hhd %hhd %hhd",
-                   &i, &ap.channel_num, &ap.bandwidth, &ap.ctrl_ch_position, mac, mac + 1, mac + 2,
-                   mac + 3, mac + 4, mac + 5, &ap.measure_type, &ap.num_of_bursts, &burst_period,
-                   &ap.samples_per_burst, &ap.retries_per_sample, &ap.tsf_delta, &ap.location_req,
-                   &ap.asap_mode, &ap.enable_dyn_ack, &ap.rssi) != 20) {
-            ret = -EINVAL;
-            goto out;
-        }
-        if (i >= IWL_MVM_TOF_MAX_APS) {
-            IWL_ERR(mvm, "Invalid AP index %d\n", i);
-            ret = -EINVAL;
-            goto out;
-        }
-
-        ap.burst_period = cpu_to_le16(burst_period);
-
-        memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
-        goto out;
-    }
-
-    data = iwl_dbgfs_is_match("send_range_request=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0 && value) { ret = iwl_mvm_tof_range_request_cmd(mvm, vif); }
-        goto out;
-    }
-
-    ret = -EINVAL;
+  ret = -EINVAL;
 out:
-    mutex_unlock(&mvm->mutex);
-    return ret ?: count;
+  mutex_unlock(&mvm->mutex);
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_tof_range_request_read(struct file* file, char __user* user_buf,
                                                 size_t count, loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    char buf[512];
-    int pos = 0;
-    const size_t bufsz = sizeof(buf);
-    struct iwl_tof_range_req_cmd* cmd;
-    int i;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  char buf[512];
+  int pos = 0;
+  const size_t bufsz = sizeof(buf);
+  struct iwl_tof_range_req_cmd* cmd;
+  int i;
 
-    cmd = &mvm->tof_data.range_req;
+  cmd = &mvm->tof_data.range_req;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n", cmd->request_id);
-    pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n", cmd->initiator);
-    pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
-                     cmd->one_sided_los_disable);
-    pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n", cmd->req_timeout);
-    pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n", cmd->report_policy);
-    pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n", cmd->macaddr_random);
-    pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n", cmd->macaddr_template);
-    pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n", cmd->macaddr_mask);
-    pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n", cmd->num_of_ap);
-    for (i = 0; i < cmd->num_of_ap; i++) {
-        struct iwl_tof_range_req_ap_entry* ap = &cmd->ap[i];
+  pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n", cmd->request_id);
+  pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n", cmd->initiator);
+  pos +=
+      scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n", cmd->one_sided_los_disable);
+  pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n", cmd->req_timeout);
+  pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n", cmd->report_policy);
+  pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n", cmd->macaddr_random);
+  pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n", cmd->macaddr_template);
+  pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n", cmd->macaddr_mask);
+  pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n", cmd->num_of_ap);
+  for (i = 0; i < cmd->num_of_ap; i++) {
+    struct iwl_tof_range_req_ap_entry* ap = &cmd->ap[i];
 
-        pos +=
-            scnprintf(buf + pos, bufsz - pos,
-                      "ap %.2d: channel_num=%hhd bw=%hhd"
-                      " control=%hhd bssid=%pM type=%hhd"
-                      " num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
-                      " retries=%hhd tsf_delta=%d"
-                      " tsf_delta_direction=%hhd location_req=0x%hhx "
-                      " asap=%hhd enable=%hhd rssi=%hhd\n",
-                      i, ap->channel_num, ap->bandwidth, ap->ctrl_ch_position, ap->bssid,
-                      ap->measure_type, ap->num_of_bursts, ap->burst_period, ap->samples_per_burst,
-                      ap->retries_per_sample, ap->tsf_delta, ap->tsf_delta_direction,
-                      ap->location_req, ap->asap_mode, ap->enable_dyn_ack, ap->rssi);
-    }
+    pos += scnprintf(buf + pos, bufsz - pos,
+                     "ap %.2d: channel_num=%hhd bw=%hhd"
+                     " control=%hhd bssid=%pM type=%hhd"
+                     " num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
+                     " retries=%hhd tsf_delta=%d"
+                     " tsf_delta_direction=%hhd location_req=0x%hhx "
+                     " asap=%hhd enable=%hhd rssi=%hhd\n",
+                     i, ap->channel_num, ap->bandwidth, ap->ctrl_ch_position, ap->bssid,
+                     ap->measure_type, ap->num_of_bursts, ap->burst_period, ap->samples_per_burst,
+                     ap->retries_per_sample, ap->tsf_delta, ap->tsf_delta_direction,
+                     ap->location_req, ap->asap_mode, ap->enable_dyn_ack, ap->rssi);
+  }
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                                  loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    uint32_t value;
-    int ret = 0;
-    char* data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  uint32_t value;
+  int ret = 0;
+  char* data;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req_ext.tsf_timer_offset_msec = cpu_to_le16(value); }
-        goto out;
+  data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req_ext.tsf_timer_offset_msec = cpu_to_le16(value);
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req_ext.min_delta_ftm = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req_ext.min_delta_ftm = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req_ext.ftm_format_and_bw20M = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req_ext.ftm_format_and_bw20M = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req_ext.ftm_format_and_bw40M = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req_ext.ftm_format_and_bw40M = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.range_req_ext.ftm_format_and_bw80M = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.range_req_ext.ftm_format_and_bw80M = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0 && value) { ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif); }
-        goto out;
+  data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0 && value) {
+      ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
     }
+    goto out;
+  }
 
-    ret = -EINVAL;
+  ret = -EINVAL;
 out:
-    mutex_unlock(&mvm->mutex);
-    return ret ?: count;
+  mutex_unlock(&mvm->mutex);
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file* file, char __user* user_buf,
                                                 size_t count, loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    char buf[256];
-    int pos = 0;
-    const size_t bufsz = sizeof(buf);
-    struct iwl_tof_range_req_ext_cmd* cmd;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  char buf[256];
+  int pos = 0;
+  const size_t bufsz = sizeof(buf);
+  struct iwl_tof_range_req_ext_cmd* cmd;
 
-    cmd = &mvm->tof_data.range_req_ext;
+  cmd = &mvm->tof_data.range_req_ext;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "tsf_timer_offset_msec = %hd\n",
-                     cmd->tsf_timer_offset_msec);
-    pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n", cmd->min_delta_ftm);
-    pos += scnprintf(buf + pos, bufsz - pos, "ftm_format_and_bw20M = %hhd\n",
-                     cmd->ftm_format_and_bw20M);
-    pos += scnprintf(buf + pos, bufsz - pos, "ftm_format_and_bw40M = %hhd\n",
-                     cmd->ftm_format_and_bw40M);
-    pos += scnprintf(buf + pos, bufsz - pos, "ftm_format_and_bw80M = %hhd\n",
-                     cmd->ftm_format_and_bw80M);
+  pos += scnprintf(buf + pos, bufsz - pos, "tsf_timer_offset_msec = %hd\n",
+                   cmd->tsf_timer_offset_msec);
+  pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n", cmd->min_delta_ftm);
+  pos +=
+      scnprintf(buf + pos, bufsz - pos, "ftm_format_and_bw20M = %hhd\n", cmd->ftm_format_and_bw20M);
+  pos +=
+      scnprintf(buf + pos, bufsz - pos, "ftm_format_and_bw40M = %hhd\n", cmd->ftm_format_and_bw40M);
+  pos +=
+      scnprintf(buf + pos, bufsz - pos, "ftm_format_and_bw80M = %hhd\n", cmd->ftm_format_and_bw80M);
 
-    mutex_unlock(&mvm->mutex);
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  mutex_unlock(&mvm->mutex);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                                loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    uint32_t value;
-    int abort_id, ret = 0;
-    char* data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  uint32_t value;
+  int abort_id, ret = 0;
+  char* data;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    data = iwl_dbgfs_is_match("abort_id=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0) { mvm->tof_data.last_abort_id = value; }
-        goto out;
+  data = iwl_dbgfs_is_match("abort_id=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0) {
+      mvm->tof_data.last_abort_id = value;
     }
+    goto out;
+  }
 
-    data = iwl_dbgfs_is_match("send_range_abort=", buf);
-    if (data) {
-        ret = kstrtou32(data, 10, &value);
-        if (ret == 0 && value) {
-            abort_id = mvm->tof_data.last_abort_id;
-            ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
-            goto out;
-        }
+  data = iwl_dbgfs_is_match("send_range_abort=", buf);
+  if (data) {
+    ret = kstrtou32(data, 10, &value);
+    if (ret == 0 && value) {
+      abort_id = mvm->tof_data.last_abort_id;
+      ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
+      goto out;
     }
+  }
 
 out:
-    mutex_unlock(&mvm->mutex);
-    return ret ?: count;
+  mutex_unlock(&mvm->mutex);
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_tof_range_abort_read(struct file* file, char __user* user_buf,
                                               size_t count, loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    char buf[32];
-    int pos = 0;
-    const size_t bufsz = sizeof(buf);
-    int last_abort_id;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  char buf[32];
+  int pos = 0;
+  const size_t bufsz = sizeof(buf);
+  int last_abort_id;
 
-    mutex_lock(&mvm->mutex);
-    last_abort_id = mvm->tof_data.last_abort_id;
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  last_abort_id = mvm->tof_data.last_abort_id;
+  mutex_unlock(&mvm->mutex);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n", last_abort_id);
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n", last_abort_id);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_tof_range_response_read(struct file* file, char __user* user_buf,
                                                  size_t count, loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    char* buf;
-    int pos = 0;
-    const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
-    struct iwl_tof_range_rsp_ntfy* cmd;
-    int i, ret;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  char* buf;
+  int pos = 0;
+  const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
+  struct iwl_tof_range_rsp_ntfy* cmd;
+  int i, ret;
 
-    buf = kzalloc(bufsz, GFP_KERNEL);
-    if (!buf) { return -ENOMEM; }
+  buf = kzalloc(bufsz, GFP_KERNEL);
+  if (!buf) {
+    return -ENOMEM;
+  }
 
-    mutex_lock(&mvm->mutex);
-    cmd = &mvm->tof_data.range_resp;
+  mutex_lock(&mvm->mutex);
+  cmd = &mvm->tof_data.range_resp;
 
-    pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n", cmd->request_id);
-    pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n", cmd->request_status);
-    pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n", cmd->last_in_batch);
-    pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n", cmd->num_of_aps);
-    for (i = 0; i < cmd->num_of_aps; i++) {
-        struct iwl_tof_range_rsp_ap_entry_ntfy* ap = &cmd->ap[i];
+  pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n", cmd->request_id);
+  pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n", cmd->request_status);
+  pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n", cmd->last_in_batch);
+  pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n", cmd->num_of_aps);
+  for (i = 0; i < cmd->num_of_aps; i++) {
+    struct iwl_tof_range_rsp_ap_entry_ntfy* ap = &cmd->ap[i];
 
-        pos += scnprintf(buf + pos, bufsz - pos,
-                         "ap %.2d: bssid=%pM status=%hhd bw=%hhd"
-                         " rtt=%d rtt_var=%d rtt_spread=%d"
-                         " rssi=%hhd  rssi_spread=%hhd"
-                         " range=%d range_var=%d"
-                         " time_stamp=%d\n",
-                         i, ap->bssid, ap->measure_status, ap->measure_bw, ap->rtt,
-                         ap->rtt_variance, ap->rtt_spread, ap->rssi, ap->rssi_spread, ap->range,
-                         ap->range_variance, ap->timestamp);
-    }
-    mutex_unlock(&mvm->mutex);
+    pos += scnprintf(buf + pos, bufsz - pos,
+                     "ap %.2d: bssid=%pM status=%hhd bw=%hhd"
+                     " rtt=%d rtt_var=%d rtt_spread=%d"
+                     " rssi=%hhd  rssi_spread=%hhd"
+                     " range=%d range_var=%d"
+                     " time_stamp=%d\n",
+                     i, ap->bssid, ap->measure_status, ap->measure_bw, ap->rtt, ap->rtt_variance,
+                     ap->rtt_spread, ap->rssi, ap->rssi_spread, ap->range, ap->range_variance,
+                     ap->timestamp);
+  }
+  mutex_unlock(&mvm->mutex);
 
-    ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-    kfree(buf);
-    return ret;
+  ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  kfree(buf);
+  return ret;
 }
 
 static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                            loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    uint8_t value;
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  uint8_t value;
+  int ret;
 
-    ret = kstrtou8(buf, 0, &value);
-    if (ret) { return ret; }
-    if (value > 1) { return -EINVAL; }
+  ret = kstrtou8(buf, 0, &value);
+  if (ret) {
+    return ret;
+  }
+  if (value > 1) {
+    return -EINVAL;
+  }
 
-    mutex_lock(&mvm->mutex);
-    iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS);
+  mutex_unlock(&mvm->mutex);
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_low_latency_read(struct file* file, char __user* user_buf, size_t count,
                                           loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    char buf[30] = {};
-    int len;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  char buf[30] = {};
+  int len;
 
-    len = scnprintf(buf, sizeof(buf) - 1, "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n",
-                    !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
-                    !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
-                    !!(mvmvif->low_latency & LOW_LATENCY_VCMD),
-                    !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE));
-    return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+  len = scnprintf(
+      buf, sizeof(buf) - 1, "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n",
+      !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
+      !!(mvmvif->low_latency & LOW_LATENCY_VCMD), !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE));
+  return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
 static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file* file, char __user* user_buf,
                                                 size_t count, loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    char buf[20];
-    int len;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  char buf[20];
+  int len;
 
-    len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid);
-    return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+  len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
 static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                                  loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    bool ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  bool ret;
 
-    mutex_lock(&mvm->mutex);
-    ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid);
+  mutex_unlock(&mvm->mutex);
 
-    return ret ? count : -EINVAL;
+  return ret ? count : -EINVAL;
 }
 
 static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                           loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    struct ieee80211_chanctx_conf* chanctx_conf;
-    struct iwl_mvm_phy_ctxt* phy_ctxt;
-    uint16_t value;
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  struct ieee80211_chanctx_conf* chanctx_conf;
+  struct iwl_mvm_phy_ctxt* phy_ctxt;
+  uint16_t value;
+  int ret;
 
-    ret = kstrtou16(buf, 0, &value);
-    if (ret) { return ret; }
+  ret = kstrtou16(buf, 0, &value);
+  if (ret) {
+    return ret;
+  }
 
-    mutex_lock(&mvm->mutex);
-    rcu_read_lock();
+  mutex_lock(&mvm->mutex);
+  rcu_read_lock();
 
-    chanctx_conf = rcu_dereference(vif->chanctx_conf);
-    /* make sure the channel context is assigned */
-    if (!chanctx_conf) {
-        rcu_read_unlock();
-        mutex_unlock(&mvm->mutex);
-        return -EINVAL;
-    }
-
-    phy_ctxt = &mvm->phy_ctxts[*(uint16_t*)chanctx_conf->drv_priv];
+  chanctx_conf = rcu_dereference(vif->chanctx_conf);
+  /* make sure the channel context is assigned */
+  if (!chanctx_conf) {
     rcu_read_unlock();
-
-    mvm->dbgfs_rx_phyinfo = value;
-
-    ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
-                                   chanctx_conf->rx_chains_static, chanctx_conf->rx_chains_dynamic);
     mutex_unlock(&mvm->mutex);
+    return -EINVAL;
+  }
 
-    return ret ?: count;
+  phy_ctxt = &mvm->phy_ctxts[*(uint16_t*)chanctx_conf->drv_priv];
+  rcu_read_unlock();
+
+  mvm->dbgfs_rx_phyinfo = value;
+
+  ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
+                                 chanctx_conf->rx_chains_static, chanctx_conf->rx_chains_dynamic);
+  mutex_unlock(&mvm->mutex);
+
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file* file, char __user* user_buf, size_t count,
                                          loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    char buf[8];
-    int len;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  char buf[8];
+  int len;
 
-    len = scnprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo);
+  len = scnprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
 static void iwl_dbgfs_quota_check(void* data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int* ret = data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int* ret = data;
 
-    if (mvmvif->dbgfs_quota_min) { *ret = -EINVAL; }
+  if (mvmvif->dbgfs_quota_min) {
+    *ret = -EINVAL;
+  }
 }
 
 static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                          loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    uint16_t value;
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  uint16_t value;
+  int ret;
 
 #ifdef CPTCFG_IWLMVM_ADVANCED_QUOTA_MGMT
-    /* not yet supported in that code */
-    if (!IWL_MVM_DYNQUOTA_DISABLED) { return -EINVAL; }
+  /* not yet supported in that code */
+  if (!IWL_MVM_DYNQUOTA_DISABLED) {
+    return -EINVAL;
+  }
 #endif
 
-    ret = kstrtou16(buf, 0, &value);
-    if (ret) { return ret; }
+  ret = kstrtou16(buf, 0, &value);
+  if (ret) {
+    return ret;
+  }
 
-    if (value > 95) { return -EINVAL; }
+  if (value > 95) {
+    return -EINVAL;
+  }
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    mvmvif->dbgfs_quota_min = 0;
-    ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_dbgfs_quota_check, &ret);
-    if (ret == 0) {
-        mvmvif->dbgfs_quota_min = value;
-        iwl_mvm_update_quotas(mvm, false, NULL);
-    }
-    mutex_unlock(&mvm->mutex);
+  mvmvif->dbgfs_quota_min = 0;
+  ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_dbgfs_quota_check, &ret);
+  if (ret == 0) {
+    mvmvif->dbgfs_quota_min = value;
+    iwl_mvm_update_quotas(mvm, false, NULL);
+  }
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_quota_min_read(struct file* file, char __user* user_buf, size_t count,
                                         loff_t* ppos) {
-    struct ieee80211_vif* vif = file->private_data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    char buf[10];
-    int len;
+  struct ieee80211_vif* vif = file->private_data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  char buf[10];
+  int len;
 
-    len = scnprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
+  len = scnprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
 static ssize_t iwl_dbgfs_twt_setup_write(struct ieee80211_vif* vif, char* buf, size_t count,
                                          loff_t* ppos) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    struct iwl_dhc_twt_operation* dhc_twt_cmd;
-    struct iwl_dhc_cmd* cmd;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  struct iwl_dhc_twt_operation* dhc_twt_cmd;
+  struct iwl_dhc_cmd* cmd;
 
-    uint32_t twt_operation;
-    uint64_t target_wake_time;
-    uint32_t interval_exp;
-    uint32_t interval_mantissa;
-    uint32_t min_wake_duration;
-    uint8_t trigger;
-    uint8_t flow_type;
-    uint8_t flow_id;
-    uint8_t protection;
-    int ret;
+  uint32_t twt_operation;
+  uint64_t target_wake_time;
+  uint32_t interval_exp;
+  uint32_t interval_mantissa;
+  uint32_t min_wake_duration;
+  uint8_t trigger;
+  uint8_t flow_type;
+  uint8_t flow_id;
+  uint8_t protection;
+  int ret;
 
-    ret = sscanf(buf, "%u %llu %u %u %u %hhu %hhu %hhu %hhu", &twt_operation, &target_wake_time,
-                 &interval_exp, &interval_mantissa, &min_wake_duration, &trigger, &flow_type,
-                 &flow_id, &protection);
+  ret = sscanf(buf, "%u %llu %u %u %u %hhu %hhu %hhu %hhu", &twt_operation, &target_wake_time,
+               &interval_exp, &interval_mantissa, &min_wake_duration, &trigger, &flow_type,
+               &flow_id, &protection);
 
-    if (ret != 9) { return -EINVAL; }
+  if (ret != 9) {
+    return -EINVAL;
+  }
 
-    cmd = kzalloc(sizeof(*cmd) + sizeof(*dhc_twt_cmd), GFP_KERNEL);
-    if (!cmd) { return -ENOMEM; }
+  cmd = kzalloc(sizeof(*cmd) + sizeof(*dhc_twt_cmd), GFP_KERNEL);
+  if (!cmd) {
+    return -ENOMEM;
+  }
 
-    dhc_twt_cmd = (void*)cmd->data;
-    dhc_twt_cmd->mac_id = cpu_to_le32(mvmvif->id);
-    dhc_twt_cmd->twt_operation = cpu_to_le32(twt_operation);
-    dhc_twt_cmd->target_wake_time = cpu_to_le64(target_wake_time);
-    dhc_twt_cmd->interval_exp = cpu_to_le32(interval_exp);
-    dhc_twt_cmd->interval_mantissa = cpu_to_le32(interval_mantissa);
-    dhc_twt_cmd->min_wake_duration = cpu_to_le32(min_wake_duration);
-    dhc_twt_cmd->trigger = trigger;
-    dhc_twt_cmd->flow_type = flow_type;
-    dhc_twt_cmd->flow_id = flow_id;
-    dhc_twt_cmd->protection = protection;
+  dhc_twt_cmd = (void*)cmd->data;
+  dhc_twt_cmd->mac_id = cpu_to_le32(mvmvif->id);
+  dhc_twt_cmd->twt_operation = cpu_to_le32(twt_operation);
+  dhc_twt_cmd->target_wake_time = cpu_to_le64(target_wake_time);
+  dhc_twt_cmd->interval_exp = cpu_to_le32(interval_exp);
+  dhc_twt_cmd->interval_mantissa = cpu_to_le32(interval_mantissa);
+  dhc_twt_cmd->min_wake_duration = cpu_to_le32(min_wake_duration);
+  dhc_twt_cmd->trigger = trigger;
+  dhc_twt_cmd->flow_type = flow_type;
+  dhc_twt_cmd->flow_id = flow_id;
+  dhc_twt_cmd->protection = protection;
 
-    cmd->length = cpu_to_le32(sizeof(*dhc_twt_cmd) >> 2);
-    cmd->index_and_mask =
-        cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | DHC_INT_UMAC_TWT_OPERATION);
+  cmd->length = cpu_to_le32(sizeof(*dhc_twt_cmd) >> 2);
+  cmd->index_and_mask =
+      cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | DHC_INT_UMAC_TWT_OPERATION);
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0), 0,
-                               sizeof(*cmd) + sizeof(*dhc_twt_cmd), cmd);
-    mutex_unlock(&mvm->mutex);
-    kfree(cmd);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0), 0,
+                             sizeof(*cmd) + sizeof(*dhc_twt_cmd), cmd);
+  mutex_unlock(&mvm->mutex);
+  kfree(cmd);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 #endif /* CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED */
 
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
-    _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+  _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
-    _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
-#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode)                                           \
-    do {                                                                                       \
-        if (!debugfs_create_file(#name, mode, parent, vif, &iwl_dbgfs_##name##_ops)) goto err; \
-    } while (0)
+  _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode)                             \
+  do {                                                                           \
+    if (!debugfs_create_file(#name, mode, parent, vif, &iwl_dbgfs_##name##_ops)) \
+      goto err;                                                                  \
+  } while (0)
 
 MVM_DEBUGFS_READ_FILE_OPS(mac_params);
 MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
@@ -1323,97 +1460,99 @@
 #endif
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct dentry* dbgfs_dir = vif->debugfs_dir;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    char buf[100];
+  struct dentry* dbgfs_dir = vif->debugfs_dir;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  char buf[100];
 
-    /*
-     * Check if debugfs directory already exist before creating it.
-     * This may happen when, for example, resetting hw or suspend-resume
-     */
-    if (!dbgfs_dir || mvmvif->dbgfs_dir) { return; }
+  /*
+   * Check if debugfs directory already exist before creating it.
+   * This may happen when, for example, resetting hw or suspend-resume
+   */
+  if (!dbgfs_dir || mvmvif->dbgfs_dir) {
+    return;
+  }
 
-    mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
+  mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
 
-    if (!mvmvif->dbgfs_dir) {
+  if (!mvmvif->dbgfs_dir) {
 #if LINUX_VERSION_IS_GEQ(3, 12, 0)
-        IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n", dbgfs_dir);
+    IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n", dbgfs_dir);
 #else
-        IWL_ERR(mvm, "Failed to create debugfs directory under %s\n", dbgfs_dir->d_name.name);
-#endif
-        return;
-    }
-
-    if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
-        ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
-         (vif->type == NL80211_IFTYPE_STATION && vif->p2p))) {
-        MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, 0600);
-    }
-
-    MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400);
-    MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400);
-    MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400);
-
-    if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) {
-        MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600);
-    }
-
-#ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
-    if (vif->type == NL80211_IFTYPE_STATION) {
-        MVM_DEBUGFS_ADD_FILE_VIF(twt_setup, mvmvif->dbgfs_dir, S_IWUSR);
-    }
-#endif
-
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) && !vif->p2p &&
-        (vif->type != NL80211_IFTYPE_P2P_DEVICE) && (vif->type != NL80211_IFTYPE_NAN)) {
-        if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP) {
-            MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params, mvmvif->dbgfs_dir, 0600);
-        }
-
-        MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir, 0600);
-        MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir, 0600);
-        MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir, 0600);
-        MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir, 0600);
-        MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir, 0400);
-    }
-
-    /*
-     * Create symlink for convenience pointing to interface specific
-     * debugfs entries for the driver. For example, under
-     * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
-     * find
-     * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
-     */
-#if LINUX_VERSION_IS_GEQ(3, 12, 0)
-    snprintf(buf, 100, "../../../%pd3/%pd", dbgfs_dir, mvmvif->dbgfs_dir);
-#else
-    snprintf(buf, 100, "../../../%s/%s/%s/%s", dbgfs_dir->d_parent->d_parent->d_name.name,
-             dbgfs_dir->d_parent->d_name.name, dbgfs_dir->d_name.name,
-             mvmvif->dbgfs_dir->d_name.name);
-#endif
-
-    mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, mvm->debugfs_dir, buf);
-    if (!mvmvif->dbgfs_slink)
-#if LINUX_VERSION_IS_GEQ(3, 12, 0)
-        IWL_ERR(mvm, "Can't create debugfs symbolic link under %pd\n", dbgfs_dir);
-#else
-        IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n", dbgfs_dir->d_name.name);
+    IWL_ERR(mvm, "Failed to create debugfs directory under %s\n", dbgfs_dir->d_name.name);
 #endif
     return;
+  }
+
+  if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+      ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
+       (vif->type == NL80211_IFTYPE_STATION && vif->p2p))) {
+    MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, 0600);
+  }
+
+  MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400);
+
+  if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) {
+    MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600);
+  }
+
+#ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
+  if (vif->type == NL80211_IFTYPE_STATION) {
+    MVM_DEBUGFS_ADD_FILE_VIF(twt_setup, mvmvif->dbgfs_dir, S_IWUSR);
+  }
+#endif
+
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) && !vif->p2p &&
+      (vif->type != NL80211_IFTYPE_P2P_DEVICE) && (vif->type != NL80211_IFTYPE_NAN)) {
+    if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP) {
+      MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params, mvmvif->dbgfs_dir, 0600);
+    }
+
+    MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir, 0600);
+    MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir, 0600);
+    MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir, 0600);
+    MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir, 0600);
+    MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir, 0400);
+  }
+
+  /*
+   * Create symlink for convenience pointing to interface specific
+   * debugfs entries for the driver. For example, under
+   * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
+   * find
+   * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
+   */
+#if LINUX_VERSION_IS_GEQ(3, 12, 0)
+  snprintf(buf, 100, "../../../%pd3/%pd", dbgfs_dir, mvmvif->dbgfs_dir);
+#else
+  snprintf(buf, 100, "../../../%s/%s/%s/%s", dbgfs_dir->d_parent->d_parent->d_name.name,
+           dbgfs_dir->d_parent->d_name.name, dbgfs_dir->d_name.name,
+           mvmvif->dbgfs_dir->d_name.name);
+#endif
+
+  mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, mvm->debugfs_dir, buf);
+  if (!mvmvif->dbgfs_slink)
+#if LINUX_VERSION_IS_GEQ(3, 12, 0)
+    IWL_ERR(mvm, "Can't create debugfs symbolic link under %pd\n", dbgfs_dir);
+#else
+    IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n", dbgfs_dir->d_name.name);
+#endif
+  return;
 err:
-    IWL_ERR(mvm, "Can't create debugfs entity\n");
+  IWL_ERR(mvm, "Can't create debugfs entity\n");
 }
 
 void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    debugfs_remove(mvmvif->dbgfs_slink);
-    mvmvif->dbgfs_slink = NULL;
+  debugfs_remove(mvmvif->dbgfs_slink);
+  mvmvif->dbgfs_slink = NULL;
 
-    debugfs_remove_recursive(mvmvif->dbgfs_dir);
-    mvmvif->dbgfs_dir = NULL;
+  debugfs_remove_recursive(mvmvif->dbgfs_dir);
+  mvmvif->dbgfs_dir = NULL;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs.c
index 5cc8e68..fea7506 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs.c
@@ -33,11 +33,12 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include "debugfs.h"
+
 #include <linux/ieee80211.h>
 #include <linux/netdevice.h>
 #include <linux/vmalloc.h>
 
-#include "debugfs.h"
 #include "fw/error-dump.h"
 #include "iwl-io.h"
 #include "mvm.h"
@@ -49,248 +50,274 @@
 #ifdef CPTCFG_IWLWIFI_THERMAL_DEBUGFS
 static ssize_t iwl_dbgfs_tt_tx_backoff_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                              loff_t* ppos) {
-    int i = 0;
-    int ret;
-    uint32_t temperature, backoff;
-    char* value_str;
-    char* seps = "\n ";
-    char* buf_ptr = buf;
-    struct iwl_tt_tx_backoff new_backoff_values[TT_TX_BACKOFF_SIZE];
+  int i = 0;
+  int ret;
+  uint32_t temperature, backoff;
+  char* value_str;
+  char* seps = "\n ";
+  char* buf_ptr = buf;
+  struct iwl_tt_tx_backoff new_backoff_values[TT_TX_BACKOFF_SIZE];
 
-    mutex_lock(&mvm->mutex);
-    while ((value_str = strsep(&buf_ptr, seps))) {
-        if (sscanf(value_str, "%u=%u", &temperature, &backoff) != 2) { break; }
-
-        if (temperature >= mvm->thermal_throttle.params.ct_kill_entry ||
-            backoff < mvm->thermal_throttle.min_backoff) {
-            ret = -EINVAL;
-            goto out;
-        }
-
-        if (i == TT_TX_BACKOFF_SIZE) {
-            ret = -EINVAL;
-            goto out;
-        }
-
-        new_backoff_values[i].backoff = backoff;
-        new_backoff_values[i].temperature = temperature;
-        i++;
+  mutex_lock(&mvm->mutex);
+  while ((value_str = strsep(&buf_ptr, seps))) {
+    if (sscanf(value_str, "%u=%u", &temperature, &backoff) != 2) {
+      break;
     }
 
-    if (i != TT_TX_BACKOFF_SIZE) {
-        ret = -EINVAL;
-        goto out;
+    if (temperature >= mvm->thermal_throttle.params.ct_kill_entry ||
+        backoff < mvm->thermal_throttle.min_backoff) {
+      ret = -EINVAL;
+      goto out;
     }
 
-    memcpy(mvm->thermal_throttle.params.tx_backoff, new_backoff_values,
-           sizeof(mvm->thermal_throttle.params.tx_backoff));
+    if (i == TT_TX_BACKOFF_SIZE) {
+      ret = -EINVAL;
+      goto out;
+    }
 
-    ret = count;
+    new_backoff_values[i].backoff = backoff;
+    new_backoff_values[i].temperature = temperature;
+    i++;
+  }
+
+  if (i != TT_TX_BACKOFF_SIZE) {
+    ret = -EINVAL;
+    goto out;
+  }
+
+  memcpy(mvm->thermal_throttle.params.tx_backoff, new_backoff_values,
+         sizeof(mvm->thermal_throttle.params.tx_backoff));
+
+  ret = count;
 
 out:
-    mutex_unlock(&mvm->mutex);
-    return ret;
+  mutex_unlock(&mvm->mutex);
+  return ret;
 }
 
 static ssize_t iwl_dbgfs_tt_tx_backoff_read(struct file* file, char __user* user_buf, size_t count,
                                             loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    struct iwl_tt_tx_backoff* tx_backoff = mvm->thermal_throttle.params.tx_backoff;
-    /* we need 10 chars per line: 3 chars for the temperature + 1
-     * for the equal sign + 5 for the backoff value + end of line.
-     */
-    char buf[TT_TX_BACKOFF_SIZE * 10 + 1];
-    int i, pos = 0, bufsz = sizeof(buf);
+  struct iwl_mvm* mvm = file->private_data;
+  struct iwl_tt_tx_backoff* tx_backoff = mvm->thermal_throttle.params.tx_backoff;
+  /* we need 10 chars per line: 3 chars for the temperature + 1
+   * for the equal sign + 5 for the backoff value + end of line.
+   */
+  char buf[TT_TX_BACKOFF_SIZE * 10 + 1];
+  int i, pos = 0, bufsz = sizeof(buf);
 
-    mutex_lock(&mvm->mutex);
-    for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
-        pos += scnprintf(buf + pos, bufsz - pos, "%d=%d\n", tx_backoff[i].temperature,
-                         tx_backoff[i].backoff);
-    }
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
+    pos += scnprintf(buf + pos, bufsz - pos, "%d=%d\n", tx_backoff[i].temperature,
+                     tx_backoff[i].backoff);
+  }
+  mutex_unlock(&mvm->mutex);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 #endif
 
 static ssize_t iwl_dbgfs_ctdp_budget_read(struct file* file, char __user* user_buf, size_t count,
                                           loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    char buf[16];
-    int pos, budget;
+  struct iwl_mvm* mvm = file->private_data;
+  char buf[16];
+  int pos, budget;
 
-    if (!iwl_mvm_is_ctdp_supported(mvm)) { return -EOPNOTSUPP; }
+  if (!iwl_mvm_is_ctdp_supported(mvm)) {
+    return -EOPNOTSUPP;
+  }
 
-    if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
-        return -EIO;
-    }
+  if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+    return -EIO;
+  }
 
-    mutex_lock(&mvm->mutex);
-    budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0);
+  mutex_unlock(&mvm->mutex);
 
-    if (budget < 0) { return budget; }
+  if (budget < 0) {
+    return budget;
+  }
 
-    pos = scnprintf(buf, sizeof(buf), "%d\n", budget);
+  pos = scnprintf(buf, sizeof(buf), "%d\n", budget);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                          loff_t* ppos) {
-    int ret;
+  int ret;
 
-    if (!iwl_mvm_is_ctdp_supported(mvm)) { return -EOPNOTSUPP; }
+  if (!iwl_mvm_is_ctdp_supported(mvm)) {
+    return -EOPNOTSUPP;
+  }
 
-    if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
-        return -EIO;
-    }
+  if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+    return -EIO;
+  }
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0);
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                             loff_t* ppos) {
-    if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
-        return -EIO;
-    }
+  if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+    return -EIO;
+  }
 
-    iwl_mvm_enter_ctkill(mvm);
+  iwl_mvm_enter_ctkill(mvm);
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                         loff_t* ppos) {
-    int ret;
-    uint32_t flush_arg;
+  int ret;
+  uint32_t flush_arg;
 
-    if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
-        return -EIO;
-    }
+  if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+    return -EIO;
+  }
 
-    if (kstrtou32(buf, 0, &flush_arg)) { return -EINVAL; }
+  if (kstrtou32(buf, 0, &flush_arg)) {
+    return -EINVAL;
+  }
 
-    if (iwl_mvm_has_new_tx_api(mvm)) {
-        IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING all tids queues on sta_id = %d\n", flush_arg);
-        mutex_lock(&mvm->mutex);
-        ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ?: count;
-        mutex_unlock(&mvm->mutex);
-        return ret;
-    }
-
-    IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n", flush_arg);
-
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING all tids queues on sta_id = %d\n", flush_arg);
     mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ?: count;
+    ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ?: count;
     mutex_unlock(&mvm->mutex);
-
     return ret;
+  }
+
+  IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n", flush_arg);
+
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ?: count;
+  mutex_unlock(&mvm->mutex);
+
+  return ret;
 }
 
 static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                          loff_t* ppos) {
-    struct iwl_mvm_sta* mvmsta;
-    int sta_id, drain, ret;
+  struct iwl_mvm_sta* mvmsta;
+  int sta_id, drain, ret;
 
-    if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
-        return -EIO;
-    }
+  if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+    return -EIO;
+  }
 
-    if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) { return -EINVAL; }
-    if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT) { return -EINVAL; }
-    if (drain < 0 || drain > 1) { return -EINVAL; }
+  if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) {
+    return -EINVAL;
+  }
+  if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT) {
+    return -EINVAL;
+  }
+  if (drain < 0 || drain > 1) {
+    return -EINVAL;
+  }
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+  mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
 
-    if (!mvmsta) {
-        ret = -ENOENT;
-    } else {
-        ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ?: count;
-    }
+  if (!mvmsta) {
+    ret = -ENOENT;
+  } else {
+    ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ?: count;
+  }
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return ret;
+  return ret;
 }
 
 static ssize_t iwl_dbgfs_sram_read(struct file* file, char __user* user_buf, size_t count,
                                    loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    const struct fw_img* img;
-    unsigned int ofs, len;
-    size_t ret;
-    uint8_t* ptr;
+  struct iwl_mvm* mvm = file->private_data;
+  const struct fw_img* img;
+  unsigned int ofs, len;
+  size_t ret;
+  uint8_t* ptr;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EINVAL; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EINVAL;
+  }
 
-    /* default is to dump the entire data segment */
-    img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
-    ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
-    len = img->sec[IWL_UCODE_SECTION_DATA].len;
+  /* default is to dump the entire data segment */
+  img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
+  ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+  len = img->sec[IWL_UCODE_SECTION_DATA].len;
 
-    if (mvm->dbgfs_sram_len) {
-        ofs = mvm->dbgfs_sram_offset;
-        len = mvm->dbgfs_sram_len;
-    }
+  if (mvm->dbgfs_sram_len) {
+    ofs = mvm->dbgfs_sram_offset;
+    len = mvm->dbgfs_sram_len;
+  }
 
-    ptr = kzalloc(len, GFP_KERNEL);
-    if (!ptr) { return -ENOMEM; }
+  ptr = kzalloc(len, GFP_KERNEL);
+  if (!ptr) {
+    return -ENOMEM;
+  }
 
-    iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
+  iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
 
-    ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len);
+  ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len);
 
-    kfree(ptr);
+  kfree(ptr);
 
-    return ret;
+  return ret;
 }
 
 static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm* mvm, char* buf, size_t count, loff_t* ppos) {
-    const struct fw_img* img;
-    uint32_t offset, len;
-    uint32_t img_offset, img_len;
+  const struct fw_img* img;
+  uint32_t offset, len;
+  uint32_t img_offset, img_len;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EINVAL; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EINVAL;
+  }
 
-    img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
-    img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
-    img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+  img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
+  img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
+  img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
 
-    if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
-        if ((offset & 0x3) || (len & 0x3)) { return -EINVAL; }
-
-        if (offset + len > img_offset + img_len) { return -EINVAL; }
-
-        mvm->dbgfs_sram_offset = offset;
-        mvm->dbgfs_sram_len = len;
-    } else {
-        mvm->dbgfs_sram_offset = 0;
-        mvm->dbgfs_sram_len = 0;
+  if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+    if ((offset & 0x3) || (len & 0x3)) {
+      return -EINVAL;
     }
 
-    return count;
+    if (offset + len > img_offset + img_len) {
+      return -EINVAL;
+    }
+
+    mvm->dbgfs_sram_offset = offset;
+    mvm->dbgfs_sram_len = len;
+  } else {
+    mvm->dbgfs_sram_offset = 0;
+    mvm->dbgfs_sram_len = 0;
+  }
+
+  return count;
 }
 
 static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file* file, char __user* user_buf,
                                                   size_t count, loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    char buf[16];
-    int pos;
+  struct iwl_mvm* mvm = file->private_data;
+  char buf[16];
+  int pos;
 
-    if (!mvm->temperature_test) {
-        pos = scnprintf(buf, sizeof(buf), "disabled\n");
-    } else {
-        pos = scnprintf(buf, sizeof(buf), "%d\n", mvm->temperature);
-    }
+  if (!mvm->temperature_test) {
+    pos = scnprintf(buf, sizeof(buf), "disabled\n");
+  } else {
+    pos = scnprintf(buf, sizeof(buf), "%d\n", mvm->temperature);
+  }
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 /*
@@ -302,938 +329,1015 @@
  */
 static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                                    loff_t* ppos) {
-    int temperature;
+  int temperature;
 
-    if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test) {
+    return -EIO;
+  }
 
-    if (kstrtoint(buf, 10, &temperature)) { return -EINVAL; }
-    /* not a legal temperature */
-    if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX &&
-         temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) ||
-        temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) {
-        return -EINVAL;
+  if (kstrtoint(buf, 10, &temperature)) {
+    return -EINVAL;
+  }
+  /* not a legal temperature */
+  if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX &&
+       temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) ||
+      temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) {
+    return -EINVAL;
+  }
+
+  mutex_lock(&mvm->mutex);
+  if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) {
+    if (!mvm->temperature_test) {
+      goto out;
     }
 
-    mutex_lock(&mvm->mutex);
-    if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) {
-        if (!mvm->temperature_test) { goto out; }
-
-        mvm->temperature_test = false;
-        /* Since we can't read the temp while awake, just set
-         * it to zero until we get the next RX stats from the
-         * firmware.
-         */
-        mvm->temperature = 0;
-    } else {
-        mvm->temperature_test = true;
-        mvm->temperature = temperature;
-    }
-    IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n",
-                   mvm->temperature_test ? "En" : "Dis", mvm->temperature);
-    /* handle the temperature change */
-    iwl_mvm_tt_handler(mvm);
+    mvm->temperature_test = false;
+    /* Since we can't read the temp while awake, just set
+     * it to zero until we get the next RX stats from the
+     * firmware.
+     */
+    mvm->temperature = 0;
+  } else {
+    mvm->temperature_test = true;
+    mvm->temperature = temperature;
+  }
+  IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n",
+                 mvm->temperature_test ? "En" : "Dis", mvm->temperature);
+  /* handle the temperature change */
+  iwl_mvm_tt_handler(mvm);
 
 out:
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_nic_temp_read(struct file* file, char __user* user_buf, size_t count,
                                        loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    char buf[16];
-    int pos, ret;
-    int32_t temp;
+  struct iwl_mvm* mvm = file->private_data;
+  char buf[16];
+  int pos, ret;
+  int32_t temp;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_get_temp(mvm, &temp);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_get_temp(mvm, &temp);
+  mutex_unlock(&mvm->mutex);
 
-    if (ret) { return -EIO; }
+  if (ret) {
+    return -EIO;
+  }
 
-    pos = scnprintf(buf, sizeof(buf), "%d\n", temp);
+  pos = scnprintf(buf, sizeof(buf), "%d\n", temp);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 #ifdef CONFIG_ACPI
 static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file* file, char __user* user_buf,
                                               size_t count, loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    char buf[256];
-    int pos = 0;
-    int bufsz = sizeof(buf);
-    int tbl_idx;
-    uint8_t* value;
+  struct iwl_mvm* mvm = file->private_data;
+  char buf[256];
+  int pos = 0;
+  int bufsz = sizeof(buf);
+  int tbl_idx;
+  uint8_t* value;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    mutex_lock(&mvm->mutex);
-    tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
-    if (tbl_idx < 0) {
-        mutex_unlock(&mvm->mutex);
-        return tbl_idx;
-    }
-
-    if (!tbl_idx) {
-        pos = scnprintf(buf, bufsz, "SAR geographic profile disabled\n");
-    } else {
-        value = &mvm->geo_profiles[tbl_idx - 1].values[0];
-
-        pos += scnprintf(buf + pos, bufsz - pos, "Use geographic profile %d\n", tbl_idx);
-        pos += scnprintf(buf + pos, bufsz - pos,
-                         "2.4GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax "
-                         "tx power: %hhd dBm\n",
-                         value[1], value[2], value[0]);
-        pos += scnprintf(buf + pos, bufsz - pos,
-                         "5.2GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax "
-                         "tx power: %hhd dBm\n",
-                         value[4], value[5], value[3]);
-    }
+  mutex_lock(&mvm->mutex);
+  tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
+  if (tbl_idx < 0) {
     mutex_unlock(&mvm->mutex);
+    return tbl_idx;
+  }
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  if (!tbl_idx) {
+    pos = scnprintf(buf, bufsz, "SAR geographic profile disabled\n");
+  } else {
+    value = &mvm->geo_profiles[tbl_idx - 1].values[0];
+
+    pos += scnprintf(buf + pos, bufsz - pos, "Use geographic profile %d\n", tbl_idx);
+    pos += scnprintf(buf + pos, bufsz - pos,
+                     "2.4GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax "
+                     "tx power: %hhd dBm\n",
+                     value[1], value[2], value[0]);
+    pos += scnprintf(buf + pos, bufsz - pos,
+                     "5.2GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax "
+                     "tx power: %hhd dBm\n",
+                     value[4], value[5], value[3]);
+  }
+  mutex_unlock(&mvm->mutex);
+
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 #endif
 
 static ssize_t iwl_dbgfs_stations_read(struct file* file, char __user* user_buf, size_t count,
                                        loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    struct ieee80211_sta* sta;
-    char buf[400];
-    int i, pos = 0, bufsz = sizeof(buf);
+  struct iwl_mvm* mvm = file->private_data;
+  struct ieee80211_sta* sta;
+  char buf[400];
+  int i, pos = 0, bufsz = sizeof(buf);
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
-        pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
-        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex));
-        if (!sta) {
-            pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
-        } else if (IS_ERR(sta)) {
-            pos += scnprintf(buf + pos, bufsz - pos, "%ld\n", PTR_ERR(sta));
-        } else {
-            pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", sta->addr);
-        }
+  for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+    pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
+    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex));
+    if (!sta) {
+      pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
+    } else if (IS_ERR(sta)) {
+      pos += scnprintf(buf + pos, bufsz - pos, "%ld\n", PTR_ERR(sta));
+    } else {
+      pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", sta->addr);
     }
+  }
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_rs_data_read(struct file* file, char __user* user_buf, size_t count,
                                       loff_t* ppos) {
-    struct ieee80211_sta* sta = file->private_data;
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_lq_sta_rs_fw* lq_sta = &mvmsta->lq_sta.rs_fw;
-    struct iwl_mvm* mvm = lq_sta->pers.drv;
-    static const size_t bufsz = 2048;
-    char* buff;
-    int desc = 0;
-    ssize_t ret;
+  struct ieee80211_sta* sta = file->private_data;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_lq_sta_rs_fw* lq_sta = &mvmsta->lq_sta.rs_fw;
+  struct iwl_mvm* mvm = lq_sta->pers.drv;
+  static const size_t bufsz = 2048;
+  char* buff;
+  int desc = 0;
+  ssize_t ret;
 
-    buff = kmalloc(bufsz, GFP_KERNEL);
-    if (!buff) { return -ENOMEM; }
+  buff = kmalloc(bufsz, GFP_KERNEL);
+  if (!buff) {
+    return -ENOMEM;
+  }
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n", lq_sta->pers.sta_id);
-    desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n", lq_sta->pers.dbg_fixed_rate);
-    desc += scnprintf(buff + desc, bufsz - desc, "A-MPDU size limit %d\n",
-                      lq_sta->pers.dbg_agg_frame_count_lim);
-    desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n",
-                      (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
-                      (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
-                      (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
-    desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X ", lq_sta->last_rate_n_flags);
+  desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n", lq_sta->pers.sta_id);
+  desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n", lq_sta->pers.dbg_fixed_rate);
+  desc += scnprintf(buff + desc, bufsz - desc, "A-MPDU size limit %d\n",
+                    lq_sta->pers.dbg_agg_frame_count_lim);
+  desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n",
+                    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+                    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+                    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+  desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X ", lq_sta->last_rate_n_flags);
 
-    desc += rs_pretty_print_rate(buff + desc, bufsz - desc, lq_sta->last_rate_n_flags);
-    mutex_unlock(&mvm->mutex);
+  desc += rs_pretty_print_rate(buff + desc, bufsz - desc, lq_sta->last_rate_n_flags);
+  mutex_unlock(&mvm->mutex);
 
-    ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-    kfree(buff);
-    return ret;
+  ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+  kfree(buff);
+  return ret;
 }
 
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
 static void iwl_rs_set_fixed_rate(struct iwl_mvm* mvm, struct iwl_lq_sta_rs_fw* lq_sta) {
-    struct iwl_dhc_cmd* dhc_cmd;
-    struct iwl_dhc_tlc_cmd* dhc_tlc_cmd;
-    uint32_t cmd_id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0);
-    int ret;
+  struct iwl_dhc_cmd* dhc_cmd;
+  struct iwl_dhc_tlc_cmd* dhc_tlc_cmd;
+  uint32_t cmd_id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0);
+  int ret;
 
-    dhc_cmd = kzalloc(sizeof(*dhc_cmd) + sizeof(*dhc_tlc_cmd), GFP_KERNEL);
-    if (!dhc_cmd) {
-        lq_sta->pers.dbg_fixed_rate = 0;
-        return;
-    }
+  dhc_cmd = kzalloc(sizeof(*dhc_cmd) + sizeof(*dhc_tlc_cmd), GFP_KERNEL);
+  if (!dhc_cmd) {
+    lq_sta->pers.dbg_fixed_rate = 0;
+    return;
+  }
 
-    IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n", lq_sta->pers.sta_id, lq_sta->pers.dbg_fixed_rate);
+  IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n", lq_sta->pers.sta_id, lq_sta->pers.dbg_fixed_rate);
 
-    dhc_tlc_cmd = (void*)dhc_cmd->data;
-    dhc_tlc_cmd->sta_id = lq_sta->pers.sta_id;
-    dhc_tlc_cmd->data[IWL_TLC_DEBUG_FIXED_RATE] = cpu_to_le32(lq_sta->pers.dbg_fixed_rate);
-    dhc_tlc_cmd->flags = cpu_to_le32(BIT(IWL_TLC_DEBUG_FIXED_RATE));
-    dhc_cmd->length = cpu_to_le32(sizeof(*dhc_tlc_cmd) >> 2);
-    dhc_cmd->index_and_mask =
-        cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | DHC_INTEGRATION_TLC_DEBUG_CONFIG);
+  dhc_tlc_cmd = (void*)dhc_cmd->data;
+  dhc_tlc_cmd->sta_id = lq_sta->pers.sta_id;
+  dhc_tlc_cmd->data[IWL_TLC_DEBUG_FIXED_RATE] = cpu_to_le32(lq_sta->pers.dbg_fixed_rate);
+  dhc_tlc_cmd->flags = cpu_to_le32(BIT(IWL_TLC_DEBUG_FIXED_RATE));
+  dhc_cmd->length = cpu_to_le32(sizeof(*dhc_tlc_cmd) >> 2);
+  dhc_cmd->index_and_mask =
+      cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | DHC_INTEGRATION_TLC_DEBUG_CONFIG);
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(*dhc_cmd) + sizeof(*dhc_tlc_cmd), dhc_cmd);
-    mutex_unlock(&mvm->mutex);
-    if (ret) {
-        lq_sta->pers.dbg_fixed_rate = 0;
-        IWL_ERR(mvm, "Failed to send TLC Debug command: %d\n", ret);
-    }
-    kfree(dhc_cmd);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(*dhc_cmd) + sizeof(*dhc_tlc_cmd), dhc_cmd);
+  mutex_unlock(&mvm->mutex);
+  if (ret) {
+    lq_sta->pers.dbg_fixed_rate = 0;
+    IWL_ERR(mvm, "Failed to send TLC Debug command: %d\n", ret);
+  }
+  kfree(dhc_cmd);
 }
 
 static ssize_t iwl_dbgfs_fixed_rate_write(struct ieee80211_sta* sta, char* buf, size_t count,
                                           loff_t* ppos) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_lq_sta_rs_fw* lq_sta = &mvmsta->lq_sta.rs_fw;
-    struct iwl_mvm* mvm = lq_sta->pers.drv;
-    uint32_t parsed_rate;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_lq_sta_rs_fw* lq_sta = &mvmsta->lq_sta.rs_fw;
+  struct iwl_mvm* mvm = lq_sta->pers.drv;
+  uint32_t parsed_rate;
 
-    if (kstrtou32(buf, 0, &parsed_rate)) {
-        lq_sta->pers.dbg_fixed_rate = 0;
-    } else {
-        lq_sta->pers.dbg_fixed_rate = parsed_rate;
-    }
+  if (kstrtou32(buf, 0, &parsed_rate)) {
+    lq_sta->pers.dbg_fixed_rate = 0;
+  } else {
+    lq_sta->pers.dbg_fixed_rate = parsed_rate;
+  }
 
-    iwl_rs_set_fixed_rate(mvm, lq_sta);
-    return count;
+  iwl_rs_set_fixed_rate(mvm, lq_sta);
+  return count;
 }
 
 static void iwl_rs_set_ampdu_size(struct iwl_mvm* mvm, struct iwl_lq_sta_rs_fw* lq_sta) {
-    struct iwl_dhc_cmd* dhc_cmd;
-    struct iwl_dhc_tlc_cmd* dhc_tlc_cmd;
-    uint32_t cmd_id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0);
-    int ret;
+  struct iwl_dhc_cmd* dhc_cmd;
+  struct iwl_dhc_tlc_cmd* dhc_tlc_cmd;
+  uint32_t cmd_id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0);
+  int ret;
 
-    dhc_cmd = kzalloc(sizeof(*dhc_cmd) + sizeof(*dhc_tlc_cmd), GFP_KERNEL);
-    if (!dhc_cmd) {
-        lq_sta->pers.dbg_agg_frame_count_lim = 0;
-        return;
-    }
+  dhc_cmd = kzalloc(sizeof(*dhc_cmd) + sizeof(*dhc_tlc_cmd), GFP_KERNEL);
+  if (!dhc_cmd) {
+    lq_sta->pers.dbg_agg_frame_count_lim = 0;
+    return;
+  }
 
-    IWL_DEBUG_RATE(mvm, "sta_id %d agg_frame_cmdt_lim %d\n", lq_sta->pers.sta_id,
-                   lq_sta->pers.dbg_agg_frame_count_lim);
+  IWL_DEBUG_RATE(mvm, "sta_id %d agg_frame_cmdt_lim %d\n", lq_sta->pers.sta_id,
+                 lq_sta->pers.dbg_agg_frame_count_lim);
 
-    dhc_tlc_cmd = (void*)dhc_cmd->data;
-    dhc_tlc_cmd->sta_id = lq_sta->pers.sta_id;
-    dhc_tlc_cmd->data[IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM] =
-        cpu_to_le32(lq_sta->pers.dbg_agg_frame_count_lim);
-    dhc_tlc_cmd->flags = cpu_to_le32(BIT(IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM));
-    dhc_cmd->length = cpu_to_le32(sizeof(*dhc_tlc_cmd) >> 2);
-    dhc_cmd->index_and_mask =
-        cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | DHC_INTEGRATION_TLC_DEBUG_CONFIG);
+  dhc_tlc_cmd = (void*)dhc_cmd->data;
+  dhc_tlc_cmd->sta_id = lq_sta->pers.sta_id;
+  dhc_tlc_cmd->data[IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM] =
+      cpu_to_le32(lq_sta->pers.dbg_agg_frame_count_lim);
+  dhc_tlc_cmd->flags = cpu_to_le32(BIT(IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM));
+  dhc_cmd->length = cpu_to_le32(sizeof(*dhc_tlc_cmd) >> 2);
+  dhc_cmd->index_and_mask =
+      cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | DHC_INTEGRATION_TLC_DEBUG_CONFIG);
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(*dhc_cmd) + sizeof(*dhc_tlc_cmd), dhc_cmd);
-    mutex_unlock(&mvm->mutex);
-    if (ret) {
-        lq_sta->pers.dbg_agg_frame_count_lim = 0;
-        IWL_ERR(mvm, "Failed to send TLC Debug command: %d\n", ret);
-    }
-    kfree(dhc_cmd);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(*dhc_cmd) + sizeof(*dhc_tlc_cmd), dhc_cmd);
+  mutex_unlock(&mvm->mutex);
+  if (ret) {
+    lq_sta->pers.dbg_agg_frame_count_lim = 0;
+    IWL_ERR(mvm, "Failed to send TLC Debug command: %d\n", ret);
+  }
+  kfree(dhc_cmd);
 }
 
 static ssize_t iwl_dbgfs_ampdu_size_write(struct ieee80211_sta* sta, char* buf, size_t count,
                                           loff_t* ppos) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_lq_sta_rs_fw* lq_sta = &mvmsta->lq_sta.rs_fw;
-    struct iwl_mvm* mvm = lq_sta->pers.drv;
-    uint32_t ampdu_size;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_lq_sta_rs_fw* lq_sta = &mvmsta->lq_sta.rs_fw;
+  struct iwl_mvm* mvm = lq_sta->pers.drv;
+  uint32_t ampdu_size;
 
-    if (kstrtou32(buf, 0, &ampdu_size)) {
-        lq_sta->pers.dbg_agg_frame_count_lim = 0;
-    } else {
-        lq_sta->pers.dbg_agg_frame_count_lim = ampdu_size;
-    }
+  if (kstrtou32(buf, 0, &ampdu_size)) {
+    lq_sta->pers.dbg_agg_frame_count_lim = 0;
+  } else {
+    lq_sta->pers.dbg_agg_frame_count_lim = ampdu_size;
+  }
 
-    iwl_rs_set_ampdu_size(mvm, lq_sta);
-    return count;
+  iwl_rs_set_ampdu_size(mvm, lq_sta);
+  return count;
 }
 #endif
 
 static ssize_t iwl_dbgfs_disable_power_off_read(struct file* file, char __user* user_buf,
                                                 size_t count, loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    char buf[64];
-    int bufsz = sizeof(buf);
-    int pos = 0;
+  struct iwl_mvm* mvm = file->private_data;
+  char buf[64];
+  int bufsz = sizeof(buf);
+  int pos = 0;
 
-    pos += scnprintf(buf + pos, bufsz - pos, "disable_power_off_d0=%d\n", mvm->disable_power_off);
-    pos +=
-        scnprintf(buf + pos, bufsz - pos, "disable_power_off_d3=%d\n", mvm->disable_power_off_d3);
+  pos += scnprintf(buf + pos, bufsz - pos, "disable_power_off_d0=%d\n", mvm->disable_power_off);
+  pos += scnprintf(buf + pos, bufsz - pos, "disable_power_off_d3=%d\n", mvm->disable_power_off_d3);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                                  loff_t* ppos) {
-    int ret, val;
+  int ret, val;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    if (!strncmp("disable_power_off_d0=", buf, 21)) {
-        if (sscanf(buf + 21, "%d", &val) != 1) { return -EINVAL; }
-        mvm->disable_power_off = val;
-    } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
-        if (sscanf(buf + 21, "%d", &val) != 1) { return -EINVAL; }
-        mvm->disable_power_off_d3 = val;
-    } else {
-        return -EINVAL;
+  if (!strncmp("disable_power_off_d0=", buf, 21)) {
+    if (sscanf(buf + 21, "%d", &val) != 1) {
+      return -EINVAL;
     }
+    mvm->disable_power_off = val;
+  } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
+    if (sscanf(buf + 21, "%d", &val) != 1) {
+      return -EINVAL;
+    }
+    mvm->disable_power_off_d3 = val;
+  } else {
+    return -EINVAL;
+  }
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_power_update_device(mvm);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_power_update_device(mvm);
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 #ifdef CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE
 static ssize_t iwl_dbgfs_ax_softap_client_testmode_write(struct iwl_mvm* mvm, char* buf,
                                                          size_t count, loff_t* ppos) {
-    uint32_t status;
-    int ret;
-    bool is_enabled;
-    struct ax_softap_client_testmode_cmd cmd;
+  uint32_t status;
+  int ret;
+  bool is_enabled;
+  struct ax_softap_client_testmode_cmd cmd;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    ret = kstrtobool(buf, &is_enabled);
-    if (ret) {
-        IWL_ERR(mvm, "Invalid softap client debugfs value (%d)\n", ret);
-        return ret;
-    }
+  ret = kstrtobool(buf, &is_enabled);
+  if (ret) {
+    IWL_ERR(mvm, "Invalid softap client debugfs value (%d)\n", ret);
+    return ret;
+  }
 
-    cmd.enable = is_enabled ? 1 : 0;
+  cmd.enable = is_enabled ? 1 : 0;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    ret = iwl_mvm_send_cmd_pdu_status(
-        mvm, iwl_cmd_id(AX_SOFTAP_CLIENT_TESTMODE, DATA_PATH_GROUP, 0), sizeof(cmd), &cmd, &status);
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, iwl_cmd_id(AX_SOFTAP_CLIENT_TESTMODE, DATA_PATH_GROUP, 0),
+                                    sizeof(cmd), &cmd, &status);
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    if (ret) {
-        IWL_ERR(mvm, "Failed to send softap client cmd (%d)\n", ret);
-        return ret;
-    }
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send softap client cmd (%d)\n", ret);
+    return ret;
+  }
 
-    if (status) {
-        IWL_ERR(mvm, "softap client cmd failed (%d)\n", status);
-        return -EIO;
-    }
+  if (status) {
+    IWL_ERR(mvm, "softap client cmd failed (%d)\n", status);
+    return -EIO;
+  }
 
-    mvm->is_bar_enabled = cmd.enable ? false : true;
+  mvm->is_bar_enabled = cmd.enable ? false : true;
 
-    return count;
+  return count;
 }
 #endif
 
 static int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif* notif, char* buf, int pos,
                                   int bufsz) {
-    pos += scnprintf(buf + pos, bufsz - pos, "MBOX dw0:\n");
+  pos += scnprintf(buf + pos, bufsz - pos, "MBOX dw0:\n");
 
-    BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
-    BT_MBOX_PRINT(0, LE_PROF1, false);
-    BT_MBOX_PRINT(0, LE_PROF2, false);
-    BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
-    BT_MBOX_PRINT(0, CHL_SEQ_N, false);
-    BT_MBOX_PRINT(0, INBAND_S, false);
-    BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
-    BT_MBOX_PRINT(0, LE_SCAN, false);
-    BT_MBOX_PRINT(0, LE_ADV, false);
-    BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
-    BT_MBOX_PRINT(0, OPEN_CON_1, true);
+  BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
+  BT_MBOX_PRINT(0, LE_PROF1, false);
+  BT_MBOX_PRINT(0, LE_PROF2, false);
+  BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
+  BT_MBOX_PRINT(0, CHL_SEQ_N, false);
+  BT_MBOX_PRINT(0, INBAND_S, false);
+  BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
+  BT_MBOX_PRINT(0, LE_SCAN, false);
+  BT_MBOX_PRINT(0, LE_ADV, false);
+  BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
+  BT_MBOX_PRINT(0, OPEN_CON_1, true);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "MBOX dw1:\n");
+  pos += scnprintf(buf + pos, bufsz - pos, "MBOX dw1:\n");
 
-    BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
-    BT_MBOX_PRINT(1, IP_SR, false);
-    BT_MBOX_PRINT(1, LE_MSTR, false);
-    BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
-    BT_MBOX_PRINT(1, MSG_TYPE, false);
-    BT_MBOX_PRINT(1, SSN, true);
+  BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
+  BT_MBOX_PRINT(1, IP_SR, false);
+  BT_MBOX_PRINT(1, LE_MSTR, false);
+  BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
+  BT_MBOX_PRINT(1, MSG_TYPE, false);
+  BT_MBOX_PRINT(1, SSN, true);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "MBOX dw2:\n");
+  pos += scnprintf(buf + pos, bufsz - pos, "MBOX dw2:\n");
 
-    BT_MBOX_PRINT(2, SNIFF_ACT, false);
-    BT_MBOX_PRINT(2, PAG, false);
-    BT_MBOX_PRINT(2, INQUIRY, false);
-    BT_MBOX_PRINT(2, CONN, false);
-    BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
-    BT_MBOX_PRINT(2, DISC, false);
-    BT_MBOX_PRINT(2, SCO_TX_ACT, false);
-    BT_MBOX_PRINT(2, SCO_RX_ACT, false);
-    BT_MBOX_PRINT(2, ESCO_RE_TX, false);
-    BT_MBOX_PRINT(2, SCO_DURATION, true);
+  BT_MBOX_PRINT(2, SNIFF_ACT, false);
+  BT_MBOX_PRINT(2, PAG, false);
+  BT_MBOX_PRINT(2, INQUIRY, false);
+  BT_MBOX_PRINT(2, CONN, false);
+  BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
+  BT_MBOX_PRINT(2, DISC, false);
+  BT_MBOX_PRINT(2, SCO_TX_ACT, false);
+  BT_MBOX_PRINT(2, SCO_RX_ACT, false);
+  BT_MBOX_PRINT(2, ESCO_RE_TX, false);
+  BT_MBOX_PRINT(2, SCO_DURATION, true);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "MBOX dw3:\n");
+  pos += scnprintf(buf + pos, bufsz - pos, "MBOX dw3:\n");
 
-    BT_MBOX_PRINT(3, SCO_STATE, false);
-    BT_MBOX_PRINT(3, SNIFF_STATE, false);
-    BT_MBOX_PRINT(3, A2DP_STATE, false);
-    BT_MBOX_PRINT(3, A2DP_SRC, false);
-    BT_MBOX_PRINT(3, ACL_STATE, false);
-    BT_MBOX_PRINT(3, MSTR_STATE, false);
-    BT_MBOX_PRINT(3, OBX_STATE, false);
-    BT_MBOX_PRINT(3, OPEN_CON_2, false);
-    BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
-    BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
-    BT_MBOX_PRINT(3, INBAND_P, false);
-    BT_MBOX_PRINT(3, MSG_TYPE_2, false);
-    BT_MBOX_PRINT(3, SSN_2, false);
-    BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
+  BT_MBOX_PRINT(3, SCO_STATE, false);
+  BT_MBOX_PRINT(3, SNIFF_STATE, false);
+  BT_MBOX_PRINT(3, A2DP_STATE, false);
+  BT_MBOX_PRINT(3, A2DP_SRC, false);
+  BT_MBOX_PRINT(3, ACL_STATE, false);
+  BT_MBOX_PRINT(3, MSTR_STATE, false);
+  BT_MBOX_PRINT(3, OBX_STATE, false);
+  BT_MBOX_PRINT(3, OPEN_CON_2, false);
+  BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
+  BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
+  BT_MBOX_PRINT(3, INBAND_P, false);
+  BT_MBOX_PRINT(3, MSG_TYPE_2, false);
+  BT_MBOX_PRINT(3, SSN_2, false);
+  BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
 
-    return pos;
+  return pos;
 }
 
 static ssize_t iwl_dbgfs_bt_notif_read(struct file* file, char __user* user_buf, size_t count,
                                        loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    struct iwl_bt_coex_profile_notif* notif = &mvm->last_bt_notif;
-    char* buf;
-    int ret, pos = 0, bufsz = sizeof(char) * 1024;
+  struct iwl_mvm* mvm = file->private_data;
+  struct iwl_bt_coex_profile_notif* notif = &mvm->last_bt_notif;
+  char* buf;
+  int ret, pos = 0, bufsz = sizeof(char) * 1024;
 
-    buf = kmalloc(bufsz, GFP_KERNEL);
-    if (!buf) { return -ENOMEM; }
+  buf = kmalloc(bufsz, GFP_KERNEL);
+  if (!buf) {
+    return -ENOMEM;
+  }
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
+  pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", notif->bt_ci_compliance);
-    pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
-                     le32_to_cpu(notif->primary_ch_lut));
-    pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
-                     le32_to_cpu(notif->secondary_ch_lut));
-    pos += scnprintf(buf + pos, bufsz - pos, "bt_activity_grading = %d\n",
-                     le32_to_cpu(notif->bt_activity_grading));
-    pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", notif->rrc_status & 0xF);
-    pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", notif->ttc_status & 0xF);
+  pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", notif->bt_ci_compliance);
+  pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
+                   le32_to_cpu(notif->primary_ch_lut));
+  pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
+                   le32_to_cpu(notif->secondary_ch_lut));
+  pos += scnprintf(buf + pos, bufsz - pos, "bt_activity_grading = %d\n",
+                   le32_to_cpu(notif->bt_activity_grading));
+  pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", notif->rrc_status & 0xF);
+  pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", notif->ttc_status & 0xF);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", IWL_MVM_BT_COEX_SYNC2SCO);
-    pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", IWL_MVM_BT_COEX_MPLUT);
+  pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", IWL_MVM_BT_COEX_SYNC2SCO);
+  pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", IWL_MVM_BT_COEX_MPLUT);
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-    kfree(buf);
+  ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  kfree(buf);
 
-    return ret;
+  return ret;
 }
 #undef BT_MBOX_PRINT
 
 static ssize_t iwl_dbgfs_bt_cmd_read(struct file* file, char __user* user_buf, size_t count,
                                      loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    struct iwl_bt_coex_ci_cmd* cmd = &mvm->last_bt_ci_cmd;
-    char buf[256];
-    int bufsz = sizeof(buf);
-    int pos = 0;
+  struct iwl_mvm* mvm = file->private_data;
+  struct iwl_bt_coex_ci_cmd* cmd = &mvm->last_bt_ci_cmd;
+  char buf[256];
+  int bufsz = sizeof(buf);
+  int pos = 0;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
-    pos += scnprintf(buf + pos, bufsz - pos, "\tPrimary Channel Bitmap 0x%016llx\n",
-                     le64_to_cpu(cmd->bt_primary_ci));
-    pos += scnprintf(buf + pos, bufsz - pos, "\tSecondary Channel Bitmap 0x%016llx\n",
-                     le64_to_cpu(cmd->bt_secondary_ci));
+  pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
+  pos += scnprintf(buf + pos, bufsz - pos, "\tPrimary Channel Bitmap 0x%016llx\n",
+                   le64_to_cpu(cmd->bt_primary_ci));
+  pos += scnprintf(buf + pos, bufsz - pos, "\tSecondary Channel Bitmap 0x%016llx\n",
+                   le64_to_cpu(cmd->bt_secondary_ci));
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                           loff_t* ppos) {
-    uint32_t bt_tx_prio;
+  uint32_t bt_tx_prio;
 
-    if (sscanf(buf, "%u", &bt_tx_prio) != 1) { return -EINVAL; }
-    if (bt_tx_prio > 4) { return -EINVAL; }
+  if (sscanf(buf, "%u", &bt_tx_prio) != 1) {
+    return -EINVAL;
+  }
+  if (bt_tx_prio > 4) {
+    return -EINVAL;
+  }
 
-    mvm->bt_tx_prio = bt_tx_prio;
+  mvm->bt_tx_prio = bt_tx_prio;
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_bt_force_ant_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                             loff_t* ppos) {
-    static const char* const modes_str[BT_FORCE_ANT_MAX] = {
-        [BT_FORCE_ANT_DIS] = "dis",
-        [BT_FORCE_ANT_AUTO] = "auto",
-        [BT_FORCE_ANT_BT] = "bt",
-        [BT_FORCE_ANT_WIFI] = "wifi",
-    };
-    int ret, bt_force_ant_mode;
+  static const char* const modes_str[BT_FORCE_ANT_MAX] = {
+      [BT_FORCE_ANT_DIS] = "dis",
+      [BT_FORCE_ANT_AUTO] = "auto",
+      [BT_FORCE_ANT_BT] = "bt",
+      [BT_FORCE_ANT_WIFI] = "wifi",
+  };
+  int ret, bt_force_ant_mode;
 
-    ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
-    if (ret < 0) { return ret; }
+  ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
+  if (ret < 0) {
+    return ret;
+  }
 
-    bt_force_ant_mode = ret;
+  bt_force_ant_mode = ret;
+  ret = 0;
+  mutex_lock(&mvm->mutex);
+  if (mvm->bt_force_ant_mode == bt_force_ant_mode) {
+    goto out;
+  }
+
+  mvm->bt_force_ant_mode = bt_force_ant_mode;
+  IWL_DEBUG_COEX(mvm, "Force mode: %s\n", modes_str[mvm->bt_force_ant_mode]);
+
+  if (iwl_mvm_firmware_running(mvm)) {
+    ret = iwl_mvm_send_bt_init_conf(mvm);
+  } else {
     ret = 0;
-    mutex_lock(&mvm->mutex);
-    if (mvm->bt_force_ant_mode == bt_force_ant_mode) { goto out; }
-
-    mvm->bt_force_ant_mode = bt_force_ant_mode;
-    IWL_DEBUG_COEX(mvm, "Force mode: %s\n", modes_str[mvm->bt_force_ant_mode]);
-
-    if (iwl_mvm_firmware_running(mvm)) {
-        ret = iwl_mvm_send_bt_init_conf(mvm);
-    } else {
-        ret = 0;
-    }
+  }
 
 out:
-    mutex_unlock(&mvm->mutex);
-    return ret ?: count;
+  mutex_unlock(&mvm->mutex);
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_fw_ver_read(struct file* file, char __user* user_buf, size_t count,
                                      loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    char *buff, *pos, *endpos;
-    static const size_t bufsz = 1024;
-    int ret;
+  struct iwl_mvm* mvm = file->private_data;
+  char *buff, *pos, *endpos;
+  static const size_t bufsz = 1024;
+  int ret;
 
-    buff = kmalloc(bufsz, GFP_KERNEL);
-    if (!buff) { return -ENOMEM; }
+  buff = kmalloc(bufsz, GFP_KERNEL);
+  if (!buff) {
+    return -ENOMEM;
+  }
 
-    pos = buff;
-    endpos = pos + bufsz;
+  pos = buff;
+  endpos = pos + bufsz;
 
-    pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n", mvm->trans->cfg->fw_name_pre);
-    pos += scnprintf(pos, endpos - pos, "FW: %s\n", mvm->fwrt.fw->human_readable);
-    pos += scnprintf(pos, endpos - pos, "Device: %s\n", mvm->fwrt.trans->cfg->name);
-    pos += scnprintf(pos, endpos - pos, "Bus: %s\n", mvm->fwrt.dev->bus->name);
+  pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n", mvm->trans->cfg->fw_name_pre);
+  pos += scnprintf(pos, endpos - pos, "FW: %s\n", mvm->fwrt.fw->human_readable);
+  pos += scnprintf(pos, endpos - pos, "Device: %s\n", mvm->fwrt.trans->cfg->name);
+  pos += scnprintf(pos, endpos - pos, "Bus: %s\n", mvm->fwrt.dev->bus->name);
 
-    ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
-    kfree(buff);
+  ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+  kfree(buff);
 
-    return ret;
+  return ret;
 }
 
 #define PRINT_STATS_LE32(_struct, _memb) \
-    pos += scnprintf(buf + pos, bufsz - pos, fmt_table, #_memb, le32_to_cpu(_struct->_memb))
+  pos += scnprintf(buf + pos, bufsz - pos, fmt_table, #_memb, le32_to_cpu(_struct->_memb))
 
 static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file* file, char __user* user_buf, size_t count,
                                           loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    static const char* fmt_table = "\t%-30s %10u\n";
-    static const char* fmt_header = "%-32s\n";
-    int pos = 0;
-    char* buf;
-    int ret;
-    size_t bufsz;
+  struct iwl_mvm* mvm = file->private_data;
+  static const char* fmt_table = "\t%-30s %10u\n";
+  static const char* fmt_header = "%-32s\n";
+  int pos = 0;
+  char* buf;
+  int ret;
+  size_t bufsz;
 
-    if (iwl_mvm_has_new_rx_stats_api(mvm)) {
-        bufsz = ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) + (4 * 33) + 1;
-    } else
-    /* 43 = size of each data line; 33 = size of each header */
-    {
-        bufsz = ((sizeof(struct mvm_statistics_rx_v3) / sizeof(__le32)) * 43) + (4 * 33) + 1;
-    }
+  if (iwl_mvm_has_new_rx_stats_api(mvm)) {
+    bufsz = ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) + (4 * 33) + 1;
+  } else
+  /* 43 = size of each data line; 33 = size of each header */
+  {
+    bufsz = ((sizeof(struct mvm_statistics_rx_v3) / sizeof(__le32)) * 43) + (4 * 33) + 1;
+  }
 
-    buf = kzalloc(bufsz, GFP_KERNEL);
-    if (!buf) { return -ENOMEM; }
+  buf = kzalloc(bufsz, GFP_KERNEL);
+  if (!buf) {
+    return -ENOMEM;
+  }
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    if (iwl_mvm_firmware_running(mvm)) { iwl_mvm_request_statistics(mvm, false); }
+  if (iwl_mvm_firmware_running(mvm)) {
+    iwl_mvm_request_statistics(mvm, false);
+  }
 
-    pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM");
-    if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
-        struct mvm_statistics_rx_phy_v2* ofdm = &mvm->rx_stats_v3.ofdm;
+  pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM");
+  if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+    struct mvm_statistics_rx_phy_v2* ofdm = &mvm->rx_stats_v3.ofdm;
 
-        PRINT_STATS_LE32(ofdm, ina_cnt);
-        PRINT_STATS_LE32(ofdm, fina_cnt);
-        PRINT_STATS_LE32(ofdm, plcp_err);
-        PRINT_STATS_LE32(ofdm, crc32_err);
-        PRINT_STATS_LE32(ofdm, overrun_err);
-        PRINT_STATS_LE32(ofdm, early_overrun_err);
-        PRINT_STATS_LE32(ofdm, crc32_good);
-        PRINT_STATS_LE32(ofdm, false_alarm_cnt);
-        PRINT_STATS_LE32(ofdm, fina_sync_err_cnt);
-        PRINT_STATS_LE32(ofdm, sfd_timeout);
-        PRINT_STATS_LE32(ofdm, fina_timeout);
-        PRINT_STATS_LE32(ofdm, unresponded_rts);
-        PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
-        PRINT_STATS_LE32(ofdm, sent_ack_cnt);
-        PRINT_STATS_LE32(ofdm, sent_cts_cnt);
-        PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
-        PRINT_STATS_LE32(ofdm, dsp_self_kill);
-        PRINT_STATS_LE32(ofdm, mh_format_err);
-        PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum);
-        PRINT_STATS_LE32(ofdm, reserved);
-    } else {
-        struct mvm_statistics_rx_phy* ofdm = &mvm->rx_stats.ofdm;
+    PRINT_STATS_LE32(ofdm, ina_cnt);
+    PRINT_STATS_LE32(ofdm, fina_cnt);
+    PRINT_STATS_LE32(ofdm, plcp_err);
+    PRINT_STATS_LE32(ofdm, crc32_err);
+    PRINT_STATS_LE32(ofdm, overrun_err);
+    PRINT_STATS_LE32(ofdm, early_overrun_err);
+    PRINT_STATS_LE32(ofdm, crc32_good);
+    PRINT_STATS_LE32(ofdm, false_alarm_cnt);
+    PRINT_STATS_LE32(ofdm, fina_sync_err_cnt);
+    PRINT_STATS_LE32(ofdm, sfd_timeout);
+    PRINT_STATS_LE32(ofdm, fina_timeout);
+    PRINT_STATS_LE32(ofdm, unresponded_rts);
+    PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
+    PRINT_STATS_LE32(ofdm, sent_ack_cnt);
+    PRINT_STATS_LE32(ofdm, sent_cts_cnt);
+    PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
+    PRINT_STATS_LE32(ofdm, dsp_self_kill);
+    PRINT_STATS_LE32(ofdm, mh_format_err);
+    PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum);
+    PRINT_STATS_LE32(ofdm, reserved);
+  } else {
+    struct mvm_statistics_rx_phy* ofdm = &mvm->rx_stats.ofdm;
 
-        PRINT_STATS_LE32(ofdm, unresponded_rts);
-        PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
-        PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
-        PRINT_STATS_LE32(ofdm, dsp_self_kill);
-        PRINT_STATS_LE32(ofdm, reserved);
-    }
+    PRINT_STATS_LE32(ofdm, unresponded_rts);
+    PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
+    PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
+    PRINT_STATS_LE32(ofdm, dsp_self_kill);
+    PRINT_STATS_LE32(ofdm, reserved);
+  }
 
-    pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - CCK");
-    if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
-        struct mvm_statistics_rx_phy_v2* cck = &mvm->rx_stats_v3.cck;
+  pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - CCK");
+  if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+    struct mvm_statistics_rx_phy_v2* cck = &mvm->rx_stats_v3.cck;
 
-        PRINT_STATS_LE32(cck, ina_cnt);
-        PRINT_STATS_LE32(cck, fina_cnt);
-        PRINT_STATS_LE32(cck, plcp_err);
-        PRINT_STATS_LE32(cck, crc32_err);
-        PRINT_STATS_LE32(cck, overrun_err);
-        PRINT_STATS_LE32(cck, early_overrun_err);
-        PRINT_STATS_LE32(cck, crc32_good);
-        PRINT_STATS_LE32(cck, false_alarm_cnt);
-        PRINT_STATS_LE32(cck, fina_sync_err_cnt);
-        PRINT_STATS_LE32(cck, sfd_timeout);
-        PRINT_STATS_LE32(cck, fina_timeout);
-        PRINT_STATS_LE32(cck, unresponded_rts);
-        PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
-        PRINT_STATS_LE32(cck, sent_ack_cnt);
-        PRINT_STATS_LE32(cck, sent_cts_cnt);
-        PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
-        PRINT_STATS_LE32(cck, dsp_self_kill);
-        PRINT_STATS_LE32(cck, mh_format_err);
-        PRINT_STATS_LE32(cck, re_acq_main_rssi_sum);
-        PRINT_STATS_LE32(cck, reserved);
-    } else {
-        struct mvm_statistics_rx_phy* cck = &mvm->rx_stats.cck;
+    PRINT_STATS_LE32(cck, ina_cnt);
+    PRINT_STATS_LE32(cck, fina_cnt);
+    PRINT_STATS_LE32(cck, plcp_err);
+    PRINT_STATS_LE32(cck, crc32_err);
+    PRINT_STATS_LE32(cck, overrun_err);
+    PRINT_STATS_LE32(cck, early_overrun_err);
+    PRINT_STATS_LE32(cck, crc32_good);
+    PRINT_STATS_LE32(cck, false_alarm_cnt);
+    PRINT_STATS_LE32(cck, fina_sync_err_cnt);
+    PRINT_STATS_LE32(cck, sfd_timeout);
+    PRINT_STATS_LE32(cck, fina_timeout);
+    PRINT_STATS_LE32(cck, unresponded_rts);
+    PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
+    PRINT_STATS_LE32(cck, sent_ack_cnt);
+    PRINT_STATS_LE32(cck, sent_cts_cnt);
+    PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
+    PRINT_STATS_LE32(cck, dsp_self_kill);
+    PRINT_STATS_LE32(cck, mh_format_err);
+    PRINT_STATS_LE32(cck, re_acq_main_rssi_sum);
+    PRINT_STATS_LE32(cck, reserved);
+  } else {
+    struct mvm_statistics_rx_phy* cck = &mvm->rx_stats.cck;
 
-        PRINT_STATS_LE32(cck, unresponded_rts);
-        PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
-        PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
-        PRINT_STATS_LE32(cck, dsp_self_kill);
-        PRINT_STATS_LE32(cck, reserved);
-    }
+    PRINT_STATS_LE32(cck, unresponded_rts);
+    PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
+    PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
+    PRINT_STATS_LE32(cck, dsp_self_kill);
+    PRINT_STATS_LE32(cck, reserved);
+  }
 
-    pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - GENERAL");
-    if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
-        struct mvm_statistics_rx_non_phy_v3* general = &mvm->rx_stats_v3.general;
+  pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - GENERAL");
+  if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+    struct mvm_statistics_rx_non_phy_v3* general = &mvm->rx_stats_v3.general;
 
-        PRINT_STATS_LE32(general, bogus_cts);
-        PRINT_STATS_LE32(general, bogus_ack);
-        PRINT_STATS_LE32(general, non_bssid_frames);
-        PRINT_STATS_LE32(general, filtered_frames);
-        PRINT_STATS_LE32(general, non_channel_beacons);
-        PRINT_STATS_LE32(general, channel_beacons);
-        PRINT_STATS_LE32(general, num_missed_bcon);
-        PRINT_STATS_LE32(general, adc_rx_saturation_time);
-        PRINT_STATS_LE32(general, ina_detection_search_time);
-        PRINT_STATS_LE32(general, beacon_silence_rssi_a);
-        PRINT_STATS_LE32(general, beacon_silence_rssi_b);
-        PRINT_STATS_LE32(general, beacon_silence_rssi_c);
-        PRINT_STATS_LE32(general, interference_data_flag);
-        PRINT_STATS_LE32(general, channel_load);
-        PRINT_STATS_LE32(general, dsp_false_alarms);
-        PRINT_STATS_LE32(general, beacon_rssi_a);
-        PRINT_STATS_LE32(general, beacon_rssi_b);
-        PRINT_STATS_LE32(general, beacon_rssi_c);
-        PRINT_STATS_LE32(general, beacon_energy_a);
-        PRINT_STATS_LE32(general, beacon_energy_b);
-        PRINT_STATS_LE32(general, beacon_energy_c);
-        PRINT_STATS_LE32(general, num_bt_kills);
-        PRINT_STATS_LE32(general, mac_id);
-        PRINT_STATS_LE32(general, directed_data_mpdu);
-    } else {
-        struct mvm_statistics_rx_non_phy* general = &mvm->rx_stats.general;
+    PRINT_STATS_LE32(general, bogus_cts);
+    PRINT_STATS_LE32(general, bogus_ack);
+    PRINT_STATS_LE32(general, non_bssid_frames);
+    PRINT_STATS_LE32(general, filtered_frames);
+    PRINT_STATS_LE32(general, non_channel_beacons);
+    PRINT_STATS_LE32(general, channel_beacons);
+    PRINT_STATS_LE32(general, num_missed_bcon);
+    PRINT_STATS_LE32(general, adc_rx_saturation_time);
+    PRINT_STATS_LE32(general, ina_detection_search_time);
+    PRINT_STATS_LE32(general, beacon_silence_rssi_a);
+    PRINT_STATS_LE32(general, beacon_silence_rssi_b);
+    PRINT_STATS_LE32(general, beacon_silence_rssi_c);
+    PRINT_STATS_LE32(general, interference_data_flag);
+    PRINT_STATS_LE32(general, channel_load);
+    PRINT_STATS_LE32(general, dsp_false_alarms);
+    PRINT_STATS_LE32(general, beacon_rssi_a);
+    PRINT_STATS_LE32(general, beacon_rssi_b);
+    PRINT_STATS_LE32(general, beacon_rssi_c);
+    PRINT_STATS_LE32(general, beacon_energy_a);
+    PRINT_STATS_LE32(general, beacon_energy_b);
+    PRINT_STATS_LE32(general, beacon_energy_c);
+    PRINT_STATS_LE32(general, num_bt_kills);
+    PRINT_STATS_LE32(general, mac_id);
+    PRINT_STATS_LE32(general, directed_data_mpdu);
+  } else {
+    struct mvm_statistics_rx_non_phy* general = &mvm->rx_stats.general;
 
-        PRINT_STATS_LE32(general, bogus_cts);
-        PRINT_STATS_LE32(general, bogus_ack);
-        PRINT_STATS_LE32(general, non_channel_beacons);
-        PRINT_STATS_LE32(general, channel_beacons);
-        PRINT_STATS_LE32(general, num_missed_bcon);
-        PRINT_STATS_LE32(general, adc_rx_saturation_time);
-        PRINT_STATS_LE32(general, ina_detection_search_time);
-        PRINT_STATS_LE32(general, beacon_silence_rssi_a);
-        PRINT_STATS_LE32(general, beacon_silence_rssi_b);
-        PRINT_STATS_LE32(general, beacon_silence_rssi_c);
-        PRINT_STATS_LE32(general, interference_data_flag);
-        PRINT_STATS_LE32(general, channel_load);
-        PRINT_STATS_LE32(general, beacon_rssi_a);
-        PRINT_STATS_LE32(general, beacon_rssi_b);
-        PRINT_STATS_LE32(general, beacon_rssi_c);
-        PRINT_STATS_LE32(general, beacon_energy_a);
-        PRINT_STATS_LE32(general, beacon_energy_b);
-        PRINT_STATS_LE32(general, beacon_energy_c);
-        PRINT_STATS_LE32(general, num_bt_kills);
-        PRINT_STATS_LE32(general, mac_id);
-    }
+    PRINT_STATS_LE32(general, bogus_cts);
+    PRINT_STATS_LE32(general, bogus_ack);
+    PRINT_STATS_LE32(general, non_channel_beacons);
+    PRINT_STATS_LE32(general, channel_beacons);
+    PRINT_STATS_LE32(general, num_missed_bcon);
+    PRINT_STATS_LE32(general, adc_rx_saturation_time);
+    PRINT_STATS_LE32(general, ina_detection_search_time);
+    PRINT_STATS_LE32(general, beacon_silence_rssi_a);
+    PRINT_STATS_LE32(general, beacon_silence_rssi_b);
+    PRINT_STATS_LE32(general, beacon_silence_rssi_c);
+    PRINT_STATS_LE32(general, interference_data_flag);
+    PRINT_STATS_LE32(general, channel_load);
+    PRINT_STATS_LE32(general, beacon_rssi_a);
+    PRINT_STATS_LE32(general, beacon_rssi_b);
+    PRINT_STATS_LE32(general, beacon_rssi_c);
+    PRINT_STATS_LE32(general, beacon_energy_a);
+    PRINT_STATS_LE32(general, beacon_energy_b);
+    PRINT_STATS_LE32(general, beacon_energy_c);
+    PRINT_STATS_LE32(general, num_bt_kills);
+    PRINT_STATS_LE32(general, mac_id);
+  }
 
-    pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - HT");
-    if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
-        struct mvm_statistics_rx_ht_phy_v1* ht = &mvm->rx_stats_v3.ofdm_ht;
+  pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - HT");
+  if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+    struct mvm_statistics_rx_ht_phy_v1* ht = &mvm->rx_stats_v3.ofdm_ht;
 
-        PRINT_STATS_LE32(ht, plcp_err);
-        PRINT_STATS_LE32(ht, overrun_err);
-        PRINT_STATS_LE32(ht, early_overrun_err);
-        PRINT_STATS_LE32(ht, crc32_good);
-        PRINT_STATS_LE32(ht, crc32_err);
-        PRINT_STATS_LE32(ht, mh_format_err);
-        PRINT_STATS_LE32(ht, agg_crc32_good);
-        PRINT_STATS_LE32(ht, agg_mpdu_cnt);
-        PRINT_STATS_LE32(ht, agg_cnt);
-        PRINT_STATS_LE32(ht, unsupport_mcs);
-    } else {
-        struct mvm_statistics_rx_ht_phy* ht = &mvm->rx_stats.ofdm_ht;
+    PRINT_STATS_LE32(ht, plcp_err);
+    PRINT_STATS_LE32(ht, overrun_err);
+    PRINT_STATS_LE32(ht, early_overrun_err);
+    PRINT_STATS_LE32(ht, crc32_good);
+    PRINT_STATS_LE32(ht, crc32_err);
+    PRINT_STATS_LE32(ht, mh_format_err);
+    PRINT_STATS_LE32(ht, agg_crc32_good);
+    PRINT_STATS_LE32(ht, agg_mpdu_cnt);
+    PRINT_STATS_LE32(ht, agg_cnt);
+    PRINT_STATS_LE32(ht, unsupport_mcs);
+  } else {
+    struct mvm_statistics_rx_ht_phy* ht = &mvm->rx_stats.ofdm_ht;
 
-        PRINT_STATS_LE32(ht, mh_format_err);
-        PRINT_STATS_LE32(ht, agg_mpdu_cnt);
-        PRINT_STATS_LE32(ht, agg_cnt);
-        PRINT_STATS_LE32(ht, unsupport_mcs);
-    }
+    PRINT_STATS_LE32(ht, mh_format_err);
+    PRINT_STATS_LE32(ht, agg_mpdu_cnt);
+    PRINT_STATS_LE32(ht, agg_cnt);
+    PRINT_STATS_LE32(ht, unsupport_mcs);
+  }
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-    kfree(buf);
+  ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  kfree(buf);
 
-    return ret;
+  return ret;
 }
 #undef PRINT_STAT_LE32
 
 static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm* mvm, char __user* user_buf, size_t count,
                                           loff_t* ppos, struct iwl_mvm_frame_stats* stats) {
-    char *buff, *pos, *endpos;
-    int idx, i;
-    int ret;
-    static const size_t bufsz = 1024;
+  char *buff, *pos, *endpos;
+  int idx, i;
+  int ret;
+  static const size_t bufsz = 1024;
 
-    buff = kmalloc(bufsz, GFP_KERNEL);
-    if (!buff) { return -ENOMEM; }
+  buff = kmalloc(bufsz, GFP_KERNEL);
+  if (!buff) {
+    return -ENOMEM;
+  }
 
-    spin_lock_bh(&mvm->drv_stats_lock);
+  spin_lock_bh(&mvm->drv_stats_lock);
 
-    pos = buff;
-    endpos = pos + bufsz;
+  pos = buff;
+  endpos = pos + bufsz;
 
-    pos += scnprintf(pos, endpos - pos, "Legacy/HT/VHT\t:\t%d/%d/%d\n", stats->legacy_frames,
-                     stats->ht_frames, stats->vht_frames);
-    pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n", stats->bw_20_frames,
-                     stats->bw_40_frames, stats->bw_80_frames);
-    pos +=
-        scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n", stats->ngi_frames, stats->sgi_frames);
-    pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n", stats->siso_frames,
-                     stats->mimo2_frames);
-    pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n", stats->fail_frames,
-                     stats->success_frames);
-    pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n", stats->agg_frames);
-    pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n", stats->ampdu_count);
-    pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
-                     stats->ampdu_count > 0 ? (stats->agg_frames / stats->ampdu_count) : 0);
+  pos += scnprintf(pos, endpos - pos, "Legacy/HT/VHT\t:\t%d/%d/%d\n", stats->legacy_frames,
+                   stats->ht_frames, stats->vht_frames);
+  pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n", stats->bw_20_frames,
+                   stats->bw_40_frames, stats->bw_80_frames);
+  pos +=
+      scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n", stats->ngi_frames, stats->sgi_frames);
+  pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n", stats->siso_frames,
+                   stats->mimo2_frames);
+  pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n", stats->fail_frames,
+                   stats->success_frames);
+  pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n", stats->agg_frames);
+  pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n", stats->ampdu_count);
+  pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
+                   stats->ampdu_count > 0 ? (stats->agg_frames / stats->ampdu_count) : 0);
 
-    pos += scnprintf(pos, endpos - pos, "Last Rates\n");
+  pos += scnprintf(pos, endpos - pos, "Last Rates\n");
 
-    idx = stats->last_frame_idx - 1;
-    for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
-        idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
-        if (stats->last_rates[idx] == 0) { continue; }
-        pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", (int)(ARRAY_SIZE(stats->last_rates) - i));
-        pos += rs_pretty_print_rate(pos, endpos - pos, stats->last_rates[idx]);
+  idx = stats->last_frame_idx - 1;
+  for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
+    idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
+    if (stats->last_rates[idx] == 0) {
+      continue;
     }
-    spin_unlock_bh(&mvm->drv_stats_lock);
+    pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", (int)(ARRAY_SIZE(stats->last_rates) - i));
+    pos += rs_pretty_print_rate(pos, endpos - pos, stats->last_rates[idx]);
+  }
+  spin_unlock_bh(&mvm->drv_stats_lock);
 
-    ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
-    kfree(buff);
+  ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+  kfree(buff);
 
-    return ret;
+  return ret;
 }
 
 static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file* file, char __user* user_buf, size_t count,
                                            loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
+  struct iwl_mvm* mvm = file->private_data;
 
-    return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos, &mvm->drv_rx_stats);
+  return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos, &mvm->drv_rx_stats);
 }
 
 static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                           loff_t* ppos) {
-    int __maybe_unused ret;
+  int __maybe_unused ret;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    /* allow one more restart that we're provoking here */
-    if (mvm->fw_restart >= 0) { mvm->fw_restart++; }
+  /* allow one more restart that we're provoking here */
+  if (mvm->fw_restart >= 0) {
+    mvm->fw_restart++;
+  }
 
-    /* take the return value to make compiler happy - it will fail anyway */
-    ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
+  /* take the return value to make compiler happy - it will fail anyway */
+  ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm* mvm, char* buf, size_t count, loff_t* ppos) {
-    int ret;
+  int ret;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
-    if (ret) { return ret; }
+  ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
+  if (ret) {
+    return ret;
+  }
 
-    iwl_force_nmi(mvm->trans);
+  iwl_force_nmi(mvm->trans);
 
-    iwl_mvm_unref(mvm, IWL_MVM_REF_NMI);
+  iwl_mvm_unref(mvm, IWL_MVM_REF_NMI);
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_scan_ant_rxchain_read(struct file* file, char __user* user_buf,
                                                size_t count, loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    int pos = 0;
-    char buf[32];
-    const size_t bufsz = sizeof(buf);
+  struct iwl_mvm* mvm = file->private_data;
+  int pos = 0;
+  char buf[32];
+  const size_t bufsz = sizeof(buf);
 
-    /* print which antennas were set for the scan command by the user */
-    pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
-    if (mvm->scan_rx_ant & ANT_A) { pos += scnprintf(buf + pos, bufsz - pos, "A"); }
-    if (mvm->scan_rx_ant & ANT_B) { pos += scnprintf(buf + pos, bufsz - pos, "B"); }
-    if (mvm->scan_rx_ant & ANT_C) { pos += scnprintf(buf + pos, bufsz - pos, "C"); }
-    pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
+  /* print which antennas were set for the scan command by the user */
+  pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
+  if (mvm->scan_rx_ant & ANT_A) {
+    pos += scnprintf(buf + pos, bufsz - pos, "A");
+  }
+  if (mvm->scan_rx_ant & ANT_B) {
+    pos += scnprintf(buf + pos, bufsz - pos, "B");
+  }
+  if (mvm->scan_rx_ant & ANT_C) {
+    pos += scnprintf(buf + pos, bufsz - pos, "C");
+  }
+  pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                                 loff_t* ppos) {
-    uint8_t scan_rx_ant;
+  uint8_t scan_rx_ant;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    if (sscanf(buf, "%hhx", &scan_rx_ant) != 1) { return -EINVAL; }
-    if (scan_rx_ant > ANT_ABC) { return -EINVAL; }
-    if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm))) { return -EINVAL; }
+  if (sscanf(buf, "%hhx", &scan_rx_ant) != 1) {
+    return -EINVAL;
+  }
+  if (scan_rx_ant > ANT_ABC) {
+    return -EINVAL;
+  }
+  if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm))) {
+    return -EINVAL;
+  }
 
-    if (mvm->scan_rx_ant != scan_rx_ant) {
-        mvm->scan_rx_ant = scan_rx_ant;
-        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-            iwl_mvm_config_scan(mvm);
-        }
+  if (mvm->scan_rx_ant != scan_rx_ant) {
+    mvm->scan_rx_ant = scan_rx_ant;
+    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+      iwl_mvm_config_scan(mvm);
     }
+  }
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                                loff_t* ppos) {
-    struct iwl_rss_config_cmd cmd = {
-        .flags = cpu_to_le32(IWL_RSS_ENABLE),
-        .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | IWL_RSS_HASH_TYPE_IPV4_UDP |
-                     IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV6_TCP |
-                     IWL_RSS_HASH_TYPE_IPV6_UDP | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
-    };
-    int ret, i, num_repeats, nbytes = count / 2;
+  struct iwl_rss_config_cmd cmd = {
+      .flags = cpu_to_le32(IWL_RSS_ENABLE),
+      .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | IWL_RSS_HASH_TYPE_IPV4_UDP |
+                   IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV6_TCP |
+                   IWL_RSS_HASH_TYPE_IPV6_UDP | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+  };
+  int ret, i, num_repeats, nbytes = count / 2;
 
-    ret = hex2bin(cmd.indirection_table, buf, nbytes);
-    if (ret) { return ret; }
+  ret = hex2bin(cmd.indirection_table, buf, nbytes);
+  if (ret) {
+    return ret;
+  }
 
-    /*
-     * The input is the redirection table, partial or full.
-     * Repeat the pattern if needed.
-     * For example, input of 01020F will be repeated 42 times,
-     * indirecting RSS hash results to queues 1, 2, 15 (skipping
-     * queues 3 - 14).
-     */
-    num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes;
-    for (i = 1; i < num_repeats; i++) {
-        memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, nbytes);
-    }
-    /* handle cut in the middle pattern for the last places */
-    memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
-           ARRAY_SIZE(cmd.indirection_table) % nbytes);
+  /*
+   * The input is the redirection table, partial or full.
+   * Repeat the pattern if needed.
+   * For example, input of 01020F will be repeated 42 times,
+   * indirecting RSS hash results to queues 1, 2, 15 (skipping
+   * queues 3 - 14).
+   */
+  num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes;
+  for (i = 1; i < num_repeats; i++) {
+    memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, nbytes);
+  }
+  /* handle cut in the middle pattern for the last places */
+  memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
+         ARRAY_SIZE(cmd.indirection_table) % nbytes);
 
-    netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
+  netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
 
-    mutex_lock(&mvm->mutex);
-    if (iwl_mvm_firmware_running(mvm)) {
-        ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
-    } else {
-        ret = 0;
-    }
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  if (iwl_mvm_firmware_running(mvm)) {
+    ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+  } else {
+    ret = 0;
+  }
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                              loff_t* ppos) {
-    struct iwl_rx_cmd_buffer rxb = {
-        ._rx_page_order = 0,
-        .truesize = 0, /* not used */
-        ._offset = 0,
-    };
-    struct iwl_rx_packet* pkt;
-    struct iwl_rx_mpdu_desc* desc;
-    int bin_len = count / 2;
-    int ret = -EINVAL;
-    size_t mpdu_cmd_hdr_size = (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
-                                   ? sizeof(struct iwl_rx_mpdu_desc)
-                                   : IWL_RX_DESC_SIZE_V1;
+  struct iwl_rx_cmd_buffer rxb = {
+      ._rx_page_order = 0,
+      .truesize = 0, /* not used */
+      ._offset = 0,
+  };
+  struct iwl_rx_packet* pkt;
+  struct iwl_rx_mpdu_desc* desc;
+  int bin_len = count / 2;
+  int ret = -EINVAL;
+  size_t mpdu_cmd_hdr_size = (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+                                 ? sizeof(struct iwl_rx_mpdu_desc)
+                                 : IWL_RX_DESC_SIZE_V1;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    /* supporting only 9000 descriptor */
-    if (!mvm->trans->cfg->mq_rx_supported) { return -ENOTSUPP; }
+  /* supporting only 9000 descriptor */
+  if (!mvm->trans->cfg->mq_rx_supported) {
+    return -ENOTSUPP;
+  }
 
-    rxb._page = alloc_pages(GFP_ATOMIC, 0);
-    if (!rxb._page) { return -ENOMEM; }
-    pkt = rxb_addr(&rxb);
+  rxb._page = alloc_pages(GFP_ATOMIC, 0);
+  if (!rxb._page) {
+    return -ENOMEM;
+  }
+  pkt = rxb_addr(&rxb);
 
-    ret = hex2bin(page_address(rxb._page), buf, bin_len);
-    if (ret) { goto out; }
+  ret = hex2bin(page_address(rxb._page), buf, bin_len);
+  if (ret) {
+    goto out;
+  }
 
-    /* avoid invalid memory access */
-    if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size) { goto out; }
+  /* avoid invalid memory access */
+  if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size) {
+    goto out;
+  }
 
-    /* check this is RX packet */
-    if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) != WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)) {
-        goto out;
-    }
+  /* check this is RX packet */
+  if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) != WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)) {
+    goto out;
+  }
 
-    /* check the length in metadata matches actual received length */
-    desc = (void*)pkt->data;
-    if (le16_to_cpu(desc->mpdu_len) != (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt))) { goto out; }
+  /* check the length in metadata matches actual received length */
+  desc = (void*)pkt->data;
+  if (le16_to_cpu(desc->mpdu_len) != (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt))) {
+    goto out;
+  }
 
-    local_bh_disable();
-    iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0);
-    local_bh_enable();
-    ret = 0;
+  local_bh_disable();
+  iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0);
+  local_bh_enable();
+  ret = 0;
 
 out:
-    iwl_free_rxb(&rxb);
+  iwl_free_rxb(&rxb);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file* file, char __user* user_buf, size_t count,
                                           loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    int conf;
-    char buf[8];
-    const size_t bufsz = sizeof(buf);
-    int pos = 0;
+  struct iwl_mvm* mvm = file->private_data;
+  int conf;
+  char buf[8];
+  const size_t bufsz = sizeof(buf);
+  int pos = 0;
 
-    mutex_lock(&mvm->mutex);
-    conf = mvm->fwrt.dump.conf;
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  conf = mvm->fwrt.dump.conf;
+  mutex_unlock(&mvm->mutex);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
+  pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 /*
@@ -1244,637 +1348,706 @@
  */
 static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                               loff_t* ppos) {
-    struct iwl_trans* trans = mvm->trans;
-    const struct iwl_fw_dbg_dest_tlv_v1* dest = trans->dbg_dest_tlv;
-    struct iwl_continuous_record_cmd cont_rec = {};
-    int ret, rec_mode;
+  struct iwl_trans* trans = mvm->trans;
+  const struct iwl_fw_dbg_dest_tlv_v1* dest = trans->dbg_dest_tlv;
+  struct iwl_continuous_record_cmd cont_rec = {};
+  int ret, rec_mode;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    if (!dest) { return -EOPNOTSUPP; }
+  if (!dest) {
+    return -EOPNOTSUPP;
+  }
 
-    if (dest->monitor_mode != SMEM_MODE || trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
-        return -EOPNOTSUPP;
-    }
+  if (dest->monitor_mode != SMEM_MODE || trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
+    return -EOPNOTSUPP;
+  }
 
-    ret = kstrtoint(buf, 0, &rec_mode);
-    if (ret) { return ret; }
+  ret = kstrtoint(buf, 0, &rec_mode);
+  if (ret) {
+    return ret;
+  }
 
-    cont_rec.record_mode.enable_recording =
-        rec_mode ? cpu_to_le16(ENABLE_CONT_RECORDING) : cpu_to_le16(DISABLE_CONT_RECORDING);
+  cont_rec.record_mode.enable_recording =
+      rec_mode ? cpu_to_le16(ENABLE_CONT_RECORDING) : cpu_to_le16(DISABLE_CONT_RECORDING);
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd_pdu(mvm, LDBG_CONFIG_CMD, 0, sizeof(cont_rec), &cont_rec);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd_pdu(mvm, LDBG_CONFIG_CMD, 0, sizeof(cont_rec), &cont_rec);
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                            loff_t* ppos) {
-    unsigned int conf_id;
-    int ret;
+  unsigned int conf_id;
+  int ret;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    ret = kstrtouint(buf, 0, &conf_id);
-    if (ret) { return ret; }
+  ret = kstrtouint(buf, 0, &conf_id);
+  if (ret) {
+    return ret;
+  }
 
-    if (WARN_ON(conf_id >= FW_DBG_CONF_MAX)) { return -EINVAL; }
+  if (WARN_ON(conf_id >= FW_DBG_CONF_MAX)) {
+    return -EINVAL;
+  }
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id);
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                               loff_t* ppos) {
-    int ret;
+  int ret;
 
-    ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
-    if (ret) { return ret; }
-    if (count == 0) { return 0; }
+  ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
+  if (ret) {
+    return ret;
+  }
+  if (count == 0) {
+    return 0;
+  }
 
-    iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1));
+  iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1));
 
-    iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
+  iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                              loff_t* ppos) {
-    unsigned int max_amsdu_len;
-    int ret;
+  unsigned int max_amsdu_len;
+  int ret;
 
-    ret = kstrtouint(buf, 0, &max_amsdu_len);
-    if (ret) { return ret; }
+  ret = kstrtouint(buf, 0, &max_amsdu_len);
+  if (ret) {
+    return ret;
+  }
 
-    if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454) { return -EINVAL; }
-    mvm->max_amsdu_len = max_amsdu_len;
+  if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454) {
+    return -EINVAL;
+  }
+  mvm->max_amsdu_len = max_amsdu_len;
 
-    return count;
+  return count;
 }
 
 #define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
 #ifdef CPTCFG_IWLWIFI_BCAST_FILTERING
 static ssize_t iwl_dbgfs_bcast_filters_read(struct file* file, char __user* user_buf, size_t count,
                                             loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    struct iwl_bcast_filter_cmd cmd;
-    const struct iwl_fw_bcast_filter* filter;
-    char* buf;
-    int bufsz = 1024;
-    int i, j, pos = 0;
-    ssize_t ret;
+  struct iwl_mvm* mvm = file->private_data;
+  struct iwl_bcast_filter_cmd cmd;
+  const struct iwl_fw_bcast_filter* filter;
+  char* buf;
+  int bufsz = 1024;
+  int i, j, pos = 0;
+  ssize_t ret;
 
-    buf = kzalloc(bufsz, GFP_KERNEL);
-    if (!buf) { return -ENOMEM; }
+  buf = kzalloc(bufsz, GFP_KERNEL);
+  if (!buf) {
+    return -ENOMEM;
+  }
 
-    mutex_lock(&mvm->mutex);
-    if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
-        ADD_TEXT("None\n");
-        mutex_unlock(&mvm->mutex);
-        goto out;
-    }
+  mutex_lock(&mvm->mutex);
+  if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+    ADD_TEXT("None\n");
     mutex_unlock(&mvm->mutex);
+    goto out;
+  }
+  mutex_unlock(&mvm->mutex);
 
-    for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
-        filter = &cmd.filters[i];
+  for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
+    filter = &cmd.filters[i];
 
-        ADD_TEXT("Filter [%d]:\n", i);
-        ADD_TEXT("\tDiscard=%d\n", filter->discard);
-        ADD_TEXT("\tFrame Type: %s\n", filter->frame_type ? "IPv4" : "Generic");
+    ADD_TEXT("Filter [%d]:\n", i);
+    ADD_TEXT("\tDiscard=%d\n", filter->discard);
+    ADD_TEXT("\tFrame Type: %s\n", filter->frame_type ? "IPv4" : "Generic");
 
-        for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
-            const struct iwl_fw_bcast_filter_attr* attr;
+    for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
+      const struct iwl_fw_bcast_filter_attr* attr;
 
-            attr = &filter->attrs[j];
-            if (!attr->mask) { break; }
+      attr = &filter->attrs[j];
+      if (!attr->mask) {
+        break;
+      }
 
-            ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n", j,
-                     attr->offset, attr->offset_type ? "IP End" : "Payload Start",
-                     be32_to_cpu(attr->mask), be32_to_cpu(attr->val), le16_to_cpu(attr->reserved1));
-        }
+      ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n", j,
+               attr->offset, attr->offset_type ? "IP End" : "Payload Start",
+               be32_to_cpu(attr->mask), be32_to_cpu(attr->val), le16_to_cpu(attr->reserved1));
     }
+  }
 out:
-    ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-    kfree(buf);
-    return ret;
+  ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  kfree(buf);
+  return ret;
 }
 
 static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                              loff_t* ppos) {
-    int pos, next_pos;
-    struct iwl_fw_bcast_filter filter = {};
-    struct iwl_bcast_filter_cmd cmd;
-    uint32_t filter_id, attr_id, mask, value;
-    int err = 0;
+  int pos, next_pos;
+  struct iwl_fw_bcast_filter filter = {};
+  struct iwl_bcast_filter_cmd cmd;
+  uint32_t filter_id, attr_id, mask, value;
+  int err = 0;
 
-    if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard, &filter.frame_type, &pos) !=
-        3) {
-        return -EINVAL;
+  if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard, &filter.frame_type, &pos) != 3) {
+    return -EINVAL;
+  }
+
+  if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
+      filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4) {
+    return -EINVAL;
+  }
+
+  for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs); attr_id++) {
+    struct iwl_fw_bcast_filter_attr* attr = &filter.attrs[attr_id];
+
+    if (pos >= count) {
+      break;
     }
 
-    if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
-        filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4) {
-        return -EINVAL;
+    if (sscanf(&buf[pos], "%hhi %hhi %i %i %n", &attr->offset, &attr->offset_type, &mask, &value,
+               &next_pos) != 4) {
+      return -EINVAL;
     }
 
-    for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs); attr_id++) {
-        struct iwl_fw_bcast_filter_attr* attr = &filter.attrs[attr_id];
-
-        if (pos >= count) { break; }
-
-        if (sscanf(&buf[pos], "%hhi %hhi %i %i %n", &attr->offset, &attr->offset_type, &mask,
-                   &value, &next_pos) != 4) {
-            return -EINVAL;
-        }
-
-        attr->mask = cpu_to_be32(mask);
-        attr->val = cpu_to_be32(value);
-        if (mask) { filter.num_attrs++; }
-
-        pos += next_pos;
+    attr->mask = cpu_to_be32(mask);
+    attr->val = cpu_to_be32(value);
+    if (mask) {
+      filter.num_attrs++;
     }
 
-    mutex_lock(&mvm->mutex);
-    memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id], &filter, sizeof(filter));
+    pos += next_pos;
+  }
 
-    /* send updated bcast filtering configuration */
-    if (iwl_mvm_firmware_running(mvm) && mvm->dbgfs_bcast_filtering.override &&
-        iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
-        err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, sizeof(cmd), &cmd);
-    }
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id], &filter, sizeof(filter));
 
-    return err ?: count;
+  /* send updated bcast filtering configuration */
+  if (iwl_mvm_firmware_running(mvm) && mvm->dbgfs_bcast_filtering.override &&
+      iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+    err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, sizeof(cmd), &cmd);
+  }
+  mutex_unlock(&mvm->mutex);
+
+  return err ?: count;
 }
 
 static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file* file, char __user* user_buf,
                                                  size_t count, loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    struct iwl_bcast_filter_cmd cmd;
-    char* buf;
-    int bufsz = 1024;
-    int i, pos = 0;
-    ssize_t ret;
+  struct iwl_mvm* mvm = file->private_data;
+  struct iwl_bcast_filter_cmd cmd;
+  char* buf;
+  int bufsz = 1024;
+  int i, pos = 0;
+  ssize_t ret;
 
-    buf = kzalloc(bufsz, GFP_KERNEL);
-    if (!buf) { return -ENOMEM; }
+  buf = kzalloc(bufsz, GFP_KERNEL);
+  if (!buf) {
+    return -ENOMEM;
+  }
 
-    mutex_lock(&mvm->mutex);
-    if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
-        ADD_TEXT("None\n");
-        mutex_unlock(&mvm->mutex);
-        goto out;
-    }
+  mutex_lock(&mvm->mutex);
+  if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+    ADD_TEXT("None\n");
     mutex_unlock(&mvm->mutex);
+    goto out;
+  }
+  mutex_unlock(&mvm->mutex);
 
-    for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
-        const struct iwl_fw_bcast_mac* mac = &cmd.macs[i];
+  for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
+    const struct iwl_fw_bcast_mac* mac = &cmd.macs[i];
 
-        ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n", i, mac->default_discard,
-                 mac->attached_filters);
-    }
+    ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n", i, mac->default_discard,
+             mac->attached_filters);
+  }
 out:
-    ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-    kfree(buf);
-    return ret;
+  ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  kfree(buf);
+  return ret;
 }
 
 static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                                   loff_t* ppos) {
-    struct iwl_bcast_filter_cmd cmd;
-    struct iwl_fw_bcast_mac mac = {};
-    uint32_t mac_id, attached_filters;
-    int err = 0;
+  struct iwl_bcast_filter_cmd cmd;
+  struct iwl_fw_bcast_mac mac = {};
+  uint32_t mac_id, attached_filters;
+  int err = 0;
 
-    if (!mvm->bcast_filters) { return -ENOENT; }
+  if (!mvm->bcast_filters) {
+    return -ENOENT;
+  }
 
-    if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard, &attached_filters) != 3) {
-        return -EINVAL;
-    }
+  if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard, &attached_filters) != 3) {
+    return -EINVAL;
+  }
 
-    if (mac_id >= ARRAY_SIZE(cmd.macs) || mac.default_discard > 1 ||
-        attached_filters >= BIT(ARRAY_SIZE(cmd.filters))) {
-        return -EINVAL;
-    }
+  if (mac_id >= ARRAY_SIZE(cmd.macs) || mac.default_discard > 1 ||
+      attached_filters >= BIT(ARRAY_SIZE(cmd.filters))) {
+    return -EINVAL;
+  }
 
-    mac.attached_filters = cpu_to_le16(attached_filters);
+  mac.attached_filters = cpu_to_le16(attached_filters);
 
-    mutex_lock(&mvm->mutex);
-    memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id], &mac, sizeof(mac));
+  mutex_lock(&mvm->mutex);
+  memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id], &mac, sizeof(mac));
 
-    /* send updated bcast filtering configuration */
-    if (iwl_mvm_firmware_running(mvm) && mvm->dbgfs_bcast_filtering.override &&
-        iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
-        err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, sizeof(cmd), &cmd);
-    }
-    mutex_unlock(&mvm->mutex);
+  /* send updated bcast filtering configuration */
+  if (iwl_mvm_firmware_running(mvm) && mvm->dbgfs_bcast_filtering.override &&
+      iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+    err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, sizeof(cmd), &cmd);
+  }
+  mutex_unlock(&mvm->mutex);
 
-    return err ?: count;
+  return err ?: count;
 }
 #endif
 
 #ifdef CONFIG_PM_SLEEP
 static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm* mvm, char* buf, size_t count, loff_t* ppos) {
-    int store;
+  int store;
 
-    if (sscanf(buf, "%d", &store) != 1) { return -EINVAL; }
+  if (sscanf(buf, "%d", &store) != 1) {
+    return -EINVAL;
+  }
 
-    mvm->store_d3_resume_sram = store;
+  mvm->store_d3_resume_sram = store;
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_d3_sram_read(struct file* file, char __user* user_buf, size_t count,
                                       loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    const struct fw_img* img;
-    int ofs, len, pos = 0;
-    size_t bufsz, ret;
-    char* buf;
-    uint8_t* ptr = mvm->d3_resume_sram;
+  struct iwl_mvm* mvm = file->private_data;
+  const struct fw_img* img;
+  int ofs, len, pos = 0;
+  size_t bufsz, ret;
+  char* buf;
+  uint8_t* ptr = mvm->d3_resume_sram;
 
-    img = &mvm->fw->img[IWL_UCODE_WOWLAN];
-    len = img->sec[IWL_UCODE_SECTION_DATA].len;
+  img = &mvm->fw->img[IWL_UCODE_WOWLAN];
+  len = img->sec[IWL_UCODE_SECTION_DATA].len;
 
-    bufsz = len * 4 + 256;
-    buf = kzalloc(bufsz, GFP_KERNEL);
-    if (!buf) { return -ENOMEM; }
+  bufsz = len * 4 + 256;
+  buf = kzalloc(bufsz, GFP_KERNEL);
+  if (!buf) {
+    return -ENOMEM;
+  }
 
-    pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n",
-                     mvm->store_d3_resume_sram ? "en" : "dis");
+  pos +=
+      scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n", mvm->store_d3_resume_sram ? "en" : "dis");
 
-    if (ptr) {
-        for (ofs = 0; ofs < len; ofs += 16) {
-            pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x %16ph\n", ofs, ptr + ofs);
-        }
-    } else {
-        pos += scnprintf(buf + pos, bufsz - pos, "(no data captured)\n");
+  if (ptr) {
+    for (ofs = 0; ofs < len; ofs += 16) {
+      pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x %16ph\n", ofs, ptr + ofs);
     }
+  } else {
+    pos += scnprintf(buf + pos, bufsz - pos, "(no data captured)\n");
+  }
 
-    ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 
-    kfree(buf);
+  kfree(buf);
 
-    return ret;
+  return ret;
 }
 #endif
 
-#define PRINT_MVM_REF(ref)                                                           \
-    do {                                                                             \
-        if (mvm->refs[ref])                                                          \
-            pos += scnprintf(buf + pos, bufsz - pos, "\t(0x%lx): %d %s\n", BIT(ref), \
-                             mvm->refs[ref], #ref);                                  \
-    } while (0)
+#define PRINT_MVM_REF(ref)                                                                         \
+  do {                                                                                             \
+    if (mvm->refs[ref])                                                                            \
+      pos +=                                                                                       \
+          scnprintf(buf + pos, bufsz - pos, "\t(0x%lx): %d %s\n", BIT(ref), mvm->refs[ref], #ref); \
+  } while (0)
 
 static ssize_t iwl_dbgfs_d0i3_refs_read(struct file* file, char __user* user_buf, size_t count,
                                         loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    int i, pos = 0;
-    char buf[256];
-    const size_t bufsz = sizeof(buf);
-    uint32_t refs = 0;
+  struct iwl_mvm* mvm = file->private_data;
+  int i, pos = 0;
+  char buf[256];
+  const size_t bufsz = sizeof(buf);
+  uint32_t refs = 0;
 
-    for (i = 0; i < IWL_MVM_REF_COUNT; i++)
-        if (mvm->refs[i]) { refs |= BIT(i); }
+  for (i = 0; i < IWL_MVM_REF_COUNT; i++)
+    if (mvm->refs[i]) {
+      refs |= BIT(i);
+    }
 
-    pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%x\n", refs);
+  pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%x\n", refs);
 
-    PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
-    PRINT_MVM_REF(IWL_MVM_REF_SCAN);
-    PRINT_MVM_REF(IWL_MVM_REF_ROC);
-    PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
-    PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
-    PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
-    PRINT_MVM_REF(IWL_MVM_REF_USER);
-    PRINT_MVM_REF(IWL_MVM_REF_TX);
-    PRINT_MVM_REF(IWL_MVM_REF_TX_AGG);
-    PRINT_MVM_REF(IWL_MVM_REF_ADD_IF);
-    PRINT_MVM_REF(IWL_MVM_REF_START_AP);
-    PRINT_MVM_REF(IWL_MVM_REF_BSS_CHANGED);
-    PRINT_MVM_REF(IWL_MVM_REF_PREPARE_TX);
-    PRINT_MVM_REF(IWL_MVM_REF_PROTECT_TDLS);
-    PRINT_MVM_REF(IWL_MVM_REF_CHECK_CTKILL);
-    PRINT_MVM_REF(IWL_MVM_REF_PRPH_READ);
-    PRINT_MVM_REF(IWL_MVM_REF_PRPH_WRITE);
-    PRINT_MVM_REF(IWL_MVM_REF_NMI);
-    PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
-    PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
-    PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
-    PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
-    PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE);
-    PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD);
-    PRINT_MVM_REF(IWL_MVM_REF_RX);
+  PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
+  PRINT_MVM_REF(IWL_MVM_REF_SCAN);
+  PRINT_MVM_REF(IWL_MVM_REF_ROC);
+  PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
+  PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
+  PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
+  PRINT_MVM_REF(IWL_MVM_REF_USER);
+  PRINT_MVM_REF(IWL_MVM_REF_TX);
+  PRINT_MVM_REF(IWL_MVM_REF_TX_AGG);
+  PRINT_MVM_REF(IWL_MVM_REF_ADD_IF);
+  PRINT_MVM_REF(IWL_MVM_REF_START_AP);
+  PRINT_MVM_REF(IWL_MVM_REF_BSS_CHANGED);
+  PRINT_MVM_REF(IWL_MVM_REF_PREPARE_TX);
+  PRINT_MVM_REF(IWL_MVM_REF_PROTECT_TDLS);
+  PRINT_MVM_REF(IWL_MVM_REF_CHECK_CTKILL);
+  PRINT_MVM_REF(IWL_MVM_REF_PRPH_READ);
+  PRINT_MVM_REF(IWL_MVM_REF_PRPH_WRITE);
+  PRINT_MVM_REF(IWL_MVM_REF_NMI);
+  PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
+  PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
+  PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
+  PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
+  PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE);
+  PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD);
+  PRINT_MVM_REF(IWL_MVM_REF_RX);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                          loff_t* ppos) {
-    unsigned long value;
-    int ret;
-    bool taken;
+  unsigned long value;
+  int ret;
+  bool taken;
 
-    ret = kstrtoul(buf, 10, &value);
-    if (ret < 0) { return ret; }
+  ret = kstrtoul(buf, 10, &value);
+  if (ret < 0) {
+    return ret;
+  }
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    taken = mvm->refs[IWL_MVM_REF_USER];
-    if (value == 1 && !taken) {
-        iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
-    } else if (value == 0 && taken) {
-        iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
-    } else {
-        ret = -EINVAL;
-    }
+  taken = mvm->refs[IWL_MVM_REF_USER];
+  if (value == 1 && !taken) {
+    iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
+  } else if (value == 0 && taken) {
+    iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
+  } else {
+    ret = -EINVAL;
+  }
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    if (ret < 0) { return ret; }
-    return count;
+  if (ret < 0) {
+    return ret;
+  }
+  return count;
 }
 
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
-    _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+  _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
-    _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
-#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode)                                  \
-    do {                                                                                       \
-        if (!debugfs_create_file(alias, mode, parent, mvm, &iwl_dbgfs_##name##_ops)) goto err; \
-    } while (0)
+  _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode)                    \
+  do {                                                                           \
+    if (!debugfs_create_file(alias, mode, parent, mvm, &iwl_dbgfs_##name##_ops)) \
+      goto err;                                                                  \
+  } while (0)
 #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
-    MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+  MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
 
 #define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \
-    _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+  _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
 #define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \
-    _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+  _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
 
-#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode)                              \
-    do {                                                                                       \
-        if (!debugfs_create_file(alias, mode, parent, sta, &iwl_dbgfs_##name##_ops)) goto err; \
-    } while (0)
+#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode)                \
+  do {                                                                           \
+    if (!debugfs_create_file(alias, mode, parent, sta, &iwl_dbgfs_##name##_ops)) \
+      goto err;                                                                  \
+  } while (0)
 #define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
-    MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
+  MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
 
 static ssize_t iwl_dbgfs_prph_reg_read(struct file* file, char __user* user_buf, size_t count,
                                        loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    int pos = 0;
-    char buf[32];
-    const size_t bufsz = sizeof(buf);
-    int ret;
+  struct iwl_mvm* mvm = file->private_data;
+  int pos = 0;
+  char buf[32];
+  const size_t bufsz = sizeof(buf);
+  int ret;
 
-    if (!mvm->dbgfs_prph_reg_addr) { return -EINVAL; }
+  if (!mvm->dbgfs_prph_reg_addr) {
+    return -EINVAL;
+  }
 
-    ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_READ);
-    if (ret) { return ret; }
+  ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_READ);
+  if (ret) {
+    return ret;
+  }
 
-    pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n", mvm->dbgfs_prph_reg_addr,
-                     iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
+  pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n", mvm->dbgfs_prph_reg_addr,
+                   iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
 
-    iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_READ);
+  iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_READ);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 static ssize_t iwl_dbgfs_prph_reg_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                         loff_t* ppos) {
-    uint8_t args;
-    uint32_t value;
-    int ret;
+  uint8_t args;
+  uint32_t value;
+  int ret;
 
-    args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
-    /* if we only want to set the reg address - nothing more to do */
-    if (args == 1) { goto out; }
+  args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
+  /* if we only want to set the reg address - nothing more to do */
+  if (args == 1) {
+    goto out;
+  }
 
-    /* otherwise, make sure we have both address and value */
-    if (args != 2) { return -EINVAL; }
+  /* otherwise, make sure we have both address and value */
+  if (args != 2) {
+    return -EINVAL;
+  }
 
-    ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
-    if (ret) { return ret; }
+  ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
+  if (ret) {
+    return ret;
+  }
 
-    iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
+  iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
 
-    iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
+  iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 out:
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                              loff_t* ppos) {
-    int ret;
+  int ret;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                                  loff_t* ppos) {
-    struct iwl_he_monitor_cmd he_mon_cmd = {};
-    uint32_t aid;
-    int ret;
+  struct iwl_he_monitor_cmd he_mon_cmd = {};
+  uint32_t aid;
+  int ret;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid, &he_mon_cmd.bssid[0],
-                 &he_mon_cmd.bssid[1], &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3],
-                 &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]);
-    if (ret != 7) { return -EINVAL; }
+  ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid, &he_mon_cmd.bssid[0],
+               &he_mon_cmd.bssid[1], &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3],
+               &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]);
+  if (ret != 7) {
+    return -EINVAL;
+  }
 
-    he_mon_cmd.aid = cpu_to_le16(aid);
+  he_mon_cmd.aid = cpu_to_le16(aid);
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0), 0,
-                               sizeof(he_mon_cmd), &he_mon_cmd);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0), 0,
+                             sizeof(he_mon_cmd), &he_mon_cmd);
+  mutex_unlock(&mvm->mutex);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_uapsd_noagg_bssids_read(struct file* file, char __user* user_buf,
                                                  size_t count, loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    uint8_t buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1];
-    unsigned int pos = 0;
-    size_t bufsz = sizeof(buf);
-    int i;
+  struct iwl_mvm* mvm = file->private_data;
+  uint8_t buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1];
+  unsigned int pos = 0;
+  size_t bufsz = sizeof(buf);
+  int i;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) {
-        pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", mvm->uapsd_noagg_bssids[i].addr);
-    }
+  for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) {
+    pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", mvm->uapsd_noagg_bssids[i].addr);
+  }
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
 #ifdef CPTCFG_IWLMVM_VENDOR_CMDS
 static ssize_t iwl_dbgfs_tx_power_status_read(struct file* file, char __user* user_buf,
                                               size_t count, loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    char buf[64];
-    int bufsz = sizeof(buf);
-    int pos = 0;
-    uint32_t mode = le32_to_cpu(mvm->txp_cmd.v5.v3.set_mode);
-    bool txp_cmd_valid = mode == IWL_TX_POWER_MODE_SET_DEVICE;
-    uint16_t val_24 = le16_to_cpu(mvm->txp_cmd.v5.v3.dev_24);
-    uint16_t val_52l = le16_to_cpu(mvm->txp_cmd.v5.v3.dev_52_low);
-    uint16_t val_52h = le16_to_cpu(mvm->txp_cmd.v5.v3.dev_52_high);
-    char buf_24[15] = "(not limited)";
-    char buf_52l[15] = "(not limited)";
-    char buf_52h[15] = "(not limited)";
+  struct iwl_mvm* mvm = file->private_data;
+  char buf[64];
+  int bufsz = sizeof(buf);
+  int pos = 0;
+  uint32_t mode = le32_to_cpu(mvm->txp_cmd.v5.v3.set_mode);
+  bool txp_cmd_valid = mode == IWL_TX_POWER_MODE_SET_DEVICE;
+  uint16_t val_24 = le16_to_cpu(mvm->txp_cmd.v5.v3.dev_24);
+  uint16_t val_52l = le16_to_cpu(mvm->txp_cmd.v5.v3.dev_52_low);
+  uint16_t val_52h = le16_to_cpu(mvm->txp_cmd.v5.v3.dev_52_high);
+  char buf_24[15] = "(not limited)";
+  char buf_52l[15] = "(not limited)";
+  char buf_52h[15] = "(not limited)";
 
-    if (txp_cmd_valid && val_24 < IWL_DEV_MAX_TX_POWER) {
-        sprintf(buf_24, "%d.%03d dBm", val_24 >> 3, (val_24 & 7) * 125);
-    }
-    if (txp_cmd_valid && val_52l < IWL_DEV_MAX_TX_POWER) {
-        sprintf(buf_52l, "%d.%03d dBm", val_52l >> 3, (val_52l & 7) * 125);
-    }
-    if (txp_cmd_valid && val_52h < IWL_DEV_MAX_TX_POWER) {
-        sprintf(buf_52h, "%d.%03d dBm", val_52h >> 3, (val_52h & 7) * 125);
-    }
+  if (txp_cmd_valid && val_24 < IWL_DEV_MAX_TX_POWER) {
+    sprintf(buf_24, "%d.%03d dBm", val_24 >> 3, (val_24 & 7) * 125);
+  }
+  if (txp_cmd_valid && val_52l < IWL_DEV_MAX_TX_POWER) {
+    sprintf(buf_52l, "%d.%03d dBm", val_52l >> 3, (val_52l & 7) * 125);
+  }
+  if (txp_cmd_valid && val_52h < IWL_DEV_MAX_TX_POWER) {
+    sprintf(buf_52h, "%d.%03d dBm", val_52h >> 3, (val_52h & 7) * 125);
+  }
 
-    pos += scnprintf(buf + pos, bufsz - pos, "2.4 = %s\n", buf_24);
-    pos += scnprintf(buf + pos, bufsz - pos, "5.2L = %s\n", buf_52l);
-    pos += scnprintf(buf + pos, bufsz - pos, "5.2H = %s\n", buf_52h);
+  pos += scnprintf(buf + pos, bufsz - pos, "2.4 = %s\n", buf_24);
+  pos += scnprintf(buf + pos, bufsz - pos, "5.2L = %s\n", buf_52l);
+  pos += scnprintf(buf + pos, bufsz - pos, "5.2H = %s\n", buf_52h);
 
-    return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 #endif
 
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
 static ssize_t iwl_dbgfs_debug_profile_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                              loff_t* ppos) {
-    struct iwl_dhc_cmd* dhc_cmd;
-    struct iwl_dhc_profile_cmd* profile_cmd;
-    struct iwl_host_cmd hcmd = {
-        .id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0),
-    };
-    int ret;
-    uint32_t report, reset, period, metrics;
+  struct iwl_dhc_cmd* dhc_cmd;
+  struct iwl_dhc_profile_cmd* profile_cmd;
+  struct iwl_host_cmd hcmd = {
+      .id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0),
+  };
+  int ret;
+  uint32_t report, reset, period, metrics;
 
-    if (sscanf(buf, "%x,%x,%x,%x", &report, &reset, &period, &metrics) != 4) { return -EINVAL; }
+  if (sscanf(buf, "%x,%x,%x,%x", &report, &reset, &period, &metrics) != 4) {
+    return -EINVAL;
+  }
 
-    /* allocate the maximal amount of memory that can be sent */
-    dhc_cmd = kzalloc(sizeof(*dhc_cmd) + sizeof(*profile_cmd), GFP_KERNEL);
-    if (!dhc_cmd) { return -ENOMEM; }
+  /* allocate the maximal amount of memory that can be sent */
+  dhc_cmd = kzalloc(sizeof(*dhc_cmd) + sizeof(*profile_cmd), GFP_KERNEL);
+  if (!dhc_cmd) {
+    return -ENOMEM;
+  }
 
-    hcmd.len[0] = sizeof(*dhc_cmd);
-    if (report) {
-        dhc_cmd->length = cpu_to_le32(sizeof(reset) >> 2);
-        dhc_cmd->index_and_mask =
-            cpu_to_le32(DHC_TABLE_AUTOMATION | DHC_TARGET_UMAC | DHC_AUTO_UMAC_REPORT_PROFILING);
-        dhc_cmd->data[0] = cpu_to_le32(reset);
-        hcmd.len[0] += sizeof(reset);
-    } else {
-        dhc_cmd->length = cpu_to_le32(sizeof(*profile_cmd) >> 2);
-        dhc_cmd->index_and_mask = cpu_to_le32(DHC_TABLE_AUTOMATION | DHC_TARGET_UMAC |
-                                              DHC_AUTO_UMAC_SET_PROFILING_REPORT_CONF);
+  hcmd.len[0] = sizeof(*dhc_cmd);
+  if (report) {
+    dhc_cmd->length = cpu_to_le32(sizeof(reset) >> 2);
+    dhc_cmd->index_and_mask =
+        cpu_to_le32(DHC_TABLE_AUTOMATION | DHC_TARGET_UMAC | DHC_AUTO_UMAC_REPORT_PROFILING);
+    dhc_cmd->data[0] = cpu_to_le32(reset);
+    hcmd.len[0] += sizeof(reset);
+  } else {
+    dhc_cmd->length = cpu_to_le32(sizeof(*profile_cmd) >> 2);
+    dhc_cmd->index_and_mask = cpu_to_le32(DHC_TABLE_AUTOMATION | DHC_TARGET_UMAC |
+                                          DHC_AUTO_UMAC_SET_PROFILING_REPORT_CONF);
 
-        profile_cmd = (void*)dhc_cmd->data;
-        profile_cmd->reset = cpu_to_le32(reset);
-        profile_cmd->period = cpu_to_le32(period);
-        profile_cmd->enabled_metrics = cpu_to_le32(metrics);
-        hcmd.len[0] += sizeof(*profile_cmd);
-    }
-    hcmd.data[0] = dhc_cmd;
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd(mvm, &hcmd);
-    if (ret) { IWL_ERR(mvm, "failed to send DHC profiling cmd\n"); }
-    mutex_unlock(&mvm->mutex);
-    kfree(dhc_cmd);
+    profile_cmd = (void*)dhc_cmd->data;
+    profile_cmd->reset = cpu_to_le32(reset);
+    profile_cmd->period = cpu_to_le32(period);
+    profile_cmd->enabled_metrics = cpu_to_le32(metrics);
+    hcmd.len[0] += sizeof(*profile_cmd);
+  }
+  hcmd.data[0] = dhc_cmd;
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd(mvm, &hcmd);
+  if (ret) {
+    IWL_ERR(mvm, "failed to send DHC profiling cmd\n");
+  }
+  mutex_unlock(&mvm->mutex);
+  kfree(dhc_cmd);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_send_dhc(struct iwl_mvm* mvm, char* buf, uint32_t index_and_mask) {
-    int ret;
-    uint32_t user_val;
-    __le32 cmd_data;
+  int ret;
+  uint32_t user_val;
+  __le32 cmd_data;
 
-    struct iwl_dhc_cmd cmd = {
-        .length = cpu_to_le32(1),
-        .index_and_mask = cpu_to_le32(index_and_mask),
-    };
+  struct iwl_dhc_cmd cmd = {
+      .length = cpu_to_le32(1),
+      .index_and_mask = cpu_to_le32(index_and_mask),
+  };
 
-    struct iwl_host_cmd hcmd = {
-        .id = iwl_cmd_id(DEBUG_HOST_COMMAND, LEGACY_GROUP, 0),
-        .data = {&cmd, &cmd_data},
-        .len = {sizeof(cmd), sizeof(cmd_data)},
-    };
+  struct iwl_host_cmd hcmd = {
+      .id = iwl_cmd_id(DEBUG_HOST_COMMAND, LEGACY_GROUP, 0),
+      .data = {&cmd, &cmd_data},
+      .len = {sizeof(cmd), sizeof(cmd_data)},
+  };
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    ret = kstrtou32(buf, 10, &user_val);
-    cmd_data = cpu_to_le32(user_val);
+  ret = kstrtou32(buf, 10, &user_val);
+  cmd_data = cpu_to_le32(user_val);
 
-    if (ret < 0) { goto out; }
+  if (ret < 0) {
+    goto out;
+  }
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd(mvm, &hcmd);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd(mvm, &hcmd);
+  mutex_unlock(&mvm->mutex);
 
 out:
-    iwl_free_resp(&hcmd);
-    return ret;
+  iwl_free_resp(&hcmd);
+  return ret;
 }
 
 static ssize_t iwl_dbgfs_enable_adwell_fine_tune_report_write(struct iwl_mvm* mvm, char* buf,
                                                               size_t count, loff_t* ppos) {
-    int ret;
-    uint32_t index_and_mask = DHC_AUTO_UMAC_ADAPTIVE_DWELL_SCAN_FINE_TUNE_ENABLE_REPORT |
-                              DHC_TABLE_AUTOMATION | DHC_TARGET_UMAC;
+  int ret;
+  uint32_t index_and_mask = DHC_AUTO_UMAC_ADAPTIVE_DWELL_SCAN_FINE_TUNE_ENABLE_REPORT |
+                            DHC_TABLE_AUTOMATION | DHC_TARGET_UMAC;
 
-    ret = iwl_dbgfs_send_dhc(mvm, buf, index_and_mask);
+  ret = iwl_dbgfs_send_dhc(mvm, buf, index_and_mask);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_enable_adwell_channel_dwell_report_write(struct iwl_mvm* mvm, char* buf,
                                                                   size_t count, loff_t* ppos) {
-    int ret;
-    uint32_t index_and_mask =
-        DHC_AUTO_UMAC_SCAN_CHANNEL_DWELL_ENABLE_REPORT | DHC_TABLE_AUTOMATION | DHC_TARGET_UMAC;
+  int ret;
+  uint32_t index_and_mask =
+      DHC_AUTO_UMAC_SCAN_CHANNEL_DWELL_ENABLE_REPORT | DHC_TABLE_AUTOMATION | DHC_TARGET_UMAC;
 
-    ret = iwl_dbgfs_send_dhc(mvm, buf, index_and_mask);
+  ret = iwl_dbgfs_send_dhc(mvm, buf, index_and_mask);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_disable_tx_fifo_mask_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                                     loff_t* ppos) {
-    int ret;
+  int ret;
 
-    uint32_t index_and_mask = DHC_TOOLS_LMAC_TXF_FIFO_DISABLE;
+  uint32_t index_and_mask = DHC_TOOLS_LMAC_TXF_FIFO_DISABLE;
 
-    ret = iwl_dbgfs_send_dhc(mvm, buf, index_and_mask);
+  ret = iwl_dbgfs_send_dhc(mvm, buf, index_and_mask);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 static ssize_t iwl_dbgfs_ps_config_write(struct iwl_mvm* mvm, char* buf, size_t count,
                                          loff_t* ppos) {
-    int ret;
-    uint32_t index_and_mask =
-        DHC_AUTO_UMAC_CONFIGURE_POWER_FLAGS | DHC_TABLE_AUTOMATION | DHC_TARGET_UMAC;
+  int ret;
+  uint32_t index_and_mask =
+      DHC_AUTO_UMAC_CONFIGURE_POWER_FLAGS | DHC_TABLE_AUTOMATION | DHC_TARGET_UMAC;
 
-    ret = iwl_dbgfs_send_dhc(mvm, buf, index_and_mask);
+  ret = iwl_dbgfs_send_dhc(mvm, buf, index_and_mask);
 
-    return ret ?: count;
+  return ret ?: count;
 }
 
 #endif /* CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED */
@@ -1953,118 +2126,128 @@
 
 static ssize_t iwl_dbgfs_mem_read(struct file* file, char __user* user_buf, size_t count,
                                   loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    struct iwl_dbg_mem_access_cmd cmd = {};
-    struct iwl_dbg_mem_access_rsp* rsp;
-    struct iwl_host_cmd hcmd = {
-        .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
-        .data =
-            {
-                &cmd,
-            },
-        .len = {sizeof(cmd)},
-    };
-    size_t delta;
-    ssize_t ret, len;
+  struct iwl_mvm* mvm = file->private_data;
+  struct iwl_dbg_mem_access_cmd cmd = {};
+  struct iwl_dbg_mem_access_rsp* rsp;
+  struct iwl_host_cmd hcmd = {
+      .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+      .data =
+          {
+              &cmd,
+          },
+      .len = {sizeof(cmd)},
+  };
+  size_t delta;
+  ssize_t ret, len;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, DEBUG_GROUP, 0);
-    cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
+  hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, DEBUG_GROUP, 0);
+  cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
 
-    /* Take care of alignment of both the position and the length */
-    delta = *ppos & 0x3;
-    cmd.addr = cpu_to_le32(*ppos - delta);
-    cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS));
+  /* Take care of alignment of both the position and the length */
+  delta = *ppos & 0x3;
+  cmd.addr = cpu_to_le32(*ppos - delta);
+  cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS));
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd(mvm, &hcmd);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd(mvm, &hcmd);
+  mutex_unlock(&mvm->mutex);
 
-    if (ret < 0) { return ret; }
+  if (ret < 0) {
+    return ret;
+  }
 
-    rsp = (void*)hcmd.resp_pkt->data;
-    if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
-        ret = -ENXIO;
-        goto out;
-    }
+  rsp = (void*)hcmd.resp_pkt->data;
+  if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
+    ret = -ENXIO;
+    goto out;
+  }
 
-    len = min((size_t)le32_to_cpu(rsp->len) << 2,
-              iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp));
-    len = min(len - delta, count);
-    if (len < 0) {
-        ret = -EFAULT;
-        goto out;
-    }
+  len = min((size_t)le32_to_cpu(rsp->len) << 2,
+            iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp));
+  len = min(len - delta, count);
+  if (len < 0) {
+    ret = -EFAULT;
+    goto out;
+  }
 
-    ret = len - copy_to_user(user_buf, (void*)rsp->data + delta, len);
-    *ppos += ret;
+  ret = len - copy_to_user(user_buf, (void*)rsp->data + delta, len);
+  *ppos += ret;
 
 out:
-    iwl_free_resp(&hcmd);
-    return ret;
+  iwl_free_resp(&hcmd);
+  return ret;
 }
 
 static ssize_t iwl_dbgfs_mem_write(struct file* file, const char __user* user_buf, size_t count,
                                    loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    struct iwl_dbg_mem_access_cmd* cmd;
-    struct iwl_dbg_mem_access_rsp* rsp;
-    struct iwl_host_cmd hcmd = {};
-    size_t cmd_size;
-    size_t data_size;
-    uint32_t op, len;
-    ssize_t ret;
+  struct iwl_mvm* mvm = file->private_data;
+  struct iwl_dbg_mem_access_cmd* cmd;
+  struct iwl_dbg_mem_access_rsp* rsp;
+  struct iwl_host_cmd hcmd = {};
+  size_t cmd_size;
+  size_t data_size;
+  uint32_t op, len;
+  ssize_t ret;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return -EIO; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return -EIO;
+  }
 
-    hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, DEBUG_GROUP, 0);
+  hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, DEBUG_GROUP, 0);
 
-    if (*ppos & 0x3 || count < 4) {
-        op = DEBUG_MEM_OP_WRITE_BYTES;
-        len = min(count, (size_t)(4 - (*ppos & 0x3)));
-        data_size = len;
-    } else {
-        op = DEBUG_MEM_OP_WRITE;
-        len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS);
-        data_size = len << 2;
-    }
+  if (*ppos & 0x3 || count < 4) {
+    op = DEBUG_MEM_OP_WRITE_BYTES;
+    len = min(count, (size_t)(4 - (*ppos & 0x3)));
+    data_size = len;
+  } else {
+    op = DEBUG_MEM_OP_WRITE;
+    len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS);
+    data_size = len << 2;
+  }
 
-    cmd_size = sizeof(*cmd) + ALIGN(data_size, 4);
-    cmd = kzalloc(cmd_size, GFP_KERNEL);
-    if (!cmd) { return -ENOMEM; }
+  cmd_size = sizeof(*cmd) + ALIGN(data_size, 4);
+  cmd = kzalloc(cmd_size, GFP_KERNEL);
+  if (!cmd) {
+    return -ENOMEM;
+  }
 
-    cmd->op = cpu_to_le32(op);
-    cmd->len = cpu_to_le32(len);
-    cmd->addr = cpu_to_le32(*ppos);
-    if (copy_from_user((void*)cmd->data, user_buf, data_size)) {
-        kfree(cmd);
-        return -EFAULT;
-    }
-
-    hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, hcmd.data[0] = (void*)cmd;
-    hcmd.len[0] = cmd_size;
-
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd(mvm, &hcmd);
-    mutex_unlock(&mvm->mutex);
-
+  cmd->op = cpu_to_le32(op);
+  cmd->len = cpu_to_le32(len);
+  cmd->addr = cpu_to_le32(*ppos);
+  if (copy_from_user((void*)cmd->data, user_buf, data_size)) {
     kfree(cmd);
+    return -EFAULT;
+  }
 
-    if (ret < 0) { return ret; }
+  hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, hcmd.data[0] = (void*)cmd;
+  hcmd.len[0] = cmd_size;
 
-    rsp = (void*)hcmd.resp_pkt->data;
-    if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
-        ret = -ENXIO;
-        goto out;
-    }
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd(mvm, &hcmd);
+  mutex_unlock(&mvm->mutex);
 
-    ret = data_size;
-    *ppos += ret;
+  kfree(cmd);
+
+  if (ret < 0) {
+    return ret;
+  }
+
+  rsp = (void*)hcmd.resp_pkt->data;
+  if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
+    ret = -ENXIO;
+    goto out;
+  }
+
+  ret = data_size;
+  *ppos += ret;
 
 out:
-    iwl_free_resp(&hcmd);
-    return ret;
+  iwl_free_resp(&hcmd);
+  return ret;
 }
 
 static const struct file_operations iwl_dbgfs_mem_ops = {
@@ -2076,184 +2259,194 @@
 
 void iwl_mvm_sta_add_debugfs(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                              struct ieee80211_sta* sta, struct dentry* dir) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
 
 #ifdef CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE
-    iwl_mvm_ax_softap_testmode_sta_add_debugfs(hw, vif, sta, dir);
+  iwl_mvm_ax_softap_testmode_sta_add_debugfs(hw, vif, sta, dir);
 #endif
 
-    if (iwl_mvm_has_tlc_offload(mvm)) {
-        MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400);
+  if (iwl_mvm_has_tlc_offload(mvm)) {
+    MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400);
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
-        MVM_DEBUGFS_ADD_STA_FILE(fixed_rate, dir, 0400);
-        MVM_DEBUGFS_ADD_STA_FILE(ampdu_size, dir, 0400);
+    MVM_DEBUGFS_ADD_STA_FILE(fixed_rate, dir, 0400);
+    MVM_DEBUGFS_ADD_STA_FILE(ampdu_size, dir, 0400);
 #endif
-    }
+  }
 
-    return;
+  return;
 err:
-    IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
+  IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
 }
 
 int iwl_mvm_dbgfs_register(struct iwl_mvm* mvm, struct dentry* dbgfs_dir) {
 #ifdef CPTCFG_IWLWIFI_THERMAL_DEBUGFS
-    struct iwl_tt_params* tt_params = &mvm->thermal_throttle.params;
+  struct iwl_tt_params* tt_params = &mvm->thermal_throttle.params;
 #endif
-    struct dentry* bcast_dir __maybe_unused;
-    char buf[100];
+  struct dentry* bcast_dir __maybe_unused;
+  char buf[100];
 
-    spin_lock_init(&mvm->drv_stats_lock);
+  spin_lock_init(&mvm->drv_stats_lock);
 
-    mvm->debugfs_dir = dbgfs_dir;
+  mvm->debugfs_dir = dbgfs_dir;
 
 #ifdef CPTCFG_IWLWIFI_THERMAL_DEBUGFS
-    MVM_DEBUGFS_ADD_FILE(tt_tx_backoff, dbgfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(tt_tx_backoff, dbgfs_dir, 0400);
 #endif
-    MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, 0400);
-    MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, 0400);
-    MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, 0400);
-    MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, 0400);
-    MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, 0400);
-    MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
-    MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
-    MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400);
-    MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
-    MVM_DEBUGFS_ADD_FILE(debug_profile, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(enable_adwell_fine_tune_report, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(enable_adwell_channel_dwell_report, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(disable_tx_fifo_mask, mvm->debugfs_dir, 0200);
-    MVM_DEBUGFS_ADD_FILE(ps_config, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(debug_profile, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(enable_adwell_fine_tune_report, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(enable_adwell_channel_dwell_report, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(disable_tx_fifo_mask, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(ps_config, mvm->debugfs_dir, 0200);
 #endif
 #ifdef CONFIG_ACPI
-    MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
 #endif
 #ifdef CPTCFG_IWLMVM_VENDOR_CMDS
-    MVM_DEBUGFS_ADD_FILE(tx_power_status, mvm->debugfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(tx_power_status, mvm->debugfs_dir, 0400);
 #endif
 #ifdef CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE
-    MVM_DEBUGFS_ADD_FILE(ax_softap_client_testmode, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(ax_softap_client_testmode, mvm->debugfs_dir, 0200);
 #endif
-    MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0200);
+  MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0200);
 
-    if (!debugfs_create_bool("enable_scan_iteration_notif", 0600, mvm->debugfs_dir,
-                             &mvm->scan_iter_notif_enabled)) {
-        goto err;
-    }
-    if (!debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir, &mvm->drop_bcn_ap_mode)) {
-        goto err;
-    }
+  if (!debugfs_create_bool("enable_scan_iteration_notif", 0600, mvm->debugfs_dir,
+                           &mvm->scan_iter_notif_enabled)) {
+    goto err;
+  }
+  if (!debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir, &mvm->drop_bcn_ap_mode)) {
+    goto err;
+  }
 
-    MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, 0400);
 
 #ifdef CPTCFG_IWLWIFI_BCAST_FILTERING
-    if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
-        bcast_dir = debugfs_create_dir("bcast_filtering", mvm->debugfs_dir);
-        if (!bcast_dir) { goto err; }
-
-        if (!debugfs_create_bool("override", 0600, bcast_dir,
-                                 &mvm->dbgfs_bcast_filtering.override)) {
-            goto err;
-        }
-
-        MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, bcast_dir, 0600);
-        MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs, bcast_dir, 0600);
+  if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
+    bcast_dir = debugfs_create_dir("bcast_filtering", mvm->debugfs_dir);
+    if (!bcast_dir) {
+      goto err;
     }
+
+    if (!debugfs_create_bool("override", 0600, bcast_dir, &mvm->dbgfs_bcast_filtering.override)) {
+      goto err;
+    }
+
+    MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, bcast_dir, 0600);
+    MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs, bcast_dir, 0600);
+  }
 #endif
 
 #ifdef CPTCFG_IWLMVM_ADVANCED_QUOTA_MGMT
-    MVM_DEBUGFS_ADD_FILE(quota_status, mvm->debugfs_dir, 0400);
+  MVM_DEBUGFS_ADD_FILE(quota_status, mvm->debugfs_dir, 0400);
 #endif
 
 #ifdef CONFIG_PM_SLEEP
-    MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600);
-    MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
-    if (!debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
-                             &mvm->d3_wake_sysassert)) {
-        goto err;
-    }
-    if (!debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir,
-                            &mvm->last_netdetect_scans)) {
-        goto err;
-    }
+  MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600);
+  MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
+  if (!debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, &mvm->d3_wake_sysassert)) {
+    goto err;
+  }
+  if (!debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir,
+                          &mvm->last_netdetect_scans)) {
+    goto err;
+  }
 #endif
 
-    if (!debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir, &mvm->ps_disabled)) { goto err; }
-    if (!debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir, &mvm->nvm_hw_blob)) { goto err; }
-    if (!debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir, &mvm->nvm_sw_blob)) { goto err; }
-    if (!debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir, &mvm->nvm_calib_blob)) {
-        goto err;
-    }
-    if (!debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir, &mvm->nvm_prod_blob)) { goto err; }
-    if (!debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, &mvm->nvm_phy_sku_blob)) {
-        goto err;
-    }
+  if (!debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir, &mvm->ps_disabled)) {
+    goto err;
+  }
+  if (!debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir, &mvm->nvm_hw_blob)) {
+    goto err;
+  }
+  if (!debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir, &mvm->nvm_sw_blob)) {
+    goto err;
+  }
+  if (!debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir, &mvm->nvm_calib_blob)) {
+    goto err;
+  }
+  if (!debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir, &mvm->nvm_prod_blob)) {
+    goto err;
+  }
+  if (!debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, &mvm->nvm_phy_sku_blob)) {
+    goto err;
+  }
 
 #ifdef CPTCFG_IWLWIFI_THERMAL_DEBUGFS
-    if (!debugfs_create_u32("ct_kill_exit", 0600, mvm->debugfs_dir, &tt_params->ct_kill_exit)) {
-        goto err;
-    }
-    if (!debugfs_create_u32("ct_kill_entry", 0600, mvm->debugfs_dir, &tt_params->ct_kill_entry)) {
-        goto err;
-    }
-    if (!debugfs_create_u32("ct_kill_duration", 0600, mvm->debugfs_dir,
-                            &tt_params->ct_kill_duration)) {
-        goto err;
-    }
-    if (!debugfs_create_u32("dynamic_smps_entry", 0600, mvm->debugfs_dir,
-                            &tt_params->dynamic_smps_entry)) {
-        goto err;
-    }
-    if (!debugfs_create_u32("dynamic_smps_exit", 0600, mvm->debugfs_dir,
-                            &tt_params->dynamic_smps_exit)) {
-        goto err;
-    }
-    if (!debugfs_create_u32("tx_protection_entry", 0600, mvm->debugfs_dir,
-                            &tt_params->tx_protection_entry)) {
-        goto err;
-    }
-    if (!debugfs_create_u32("tx_protection_exit", 0600, mvm->debugfs_dir,
-                            &tt_params->tx_protection_exit)) {
-        goto err;
-    }
+  if (!debugfs_create_u32("ct_kill_exit", 0600, mvm->debugfs_dir, &tt_params->ct_kill_exit)) {
+    goto err;
+  }
+  if (!debugfs_create_u32("ct_kill_entry", 0600, mvm->debugfs_dir, &tt_params->ct_kill_entry)) {
+    goto err;
+  }
+  if (!debugfs_create_u32("ct_kill_duration", 0600, mvm->debugfs_dir,
+                          &tt_params->ct_kill_duration)) {
+    goto err;
+  }
+  if (!debugfs_create_u32("dynamic_smps_entry", 0600, mvm->debugfs_dir,
+                          &tt_params->dynamic_smps_entry)) {
+    goto err;
+  }
+  if (!debugfs_create_u32("dynamic_smps_exit", 0600, mvm->debugfs_dir,
+                          &tt_params->dynamic_smps_exit)) {
+    goto err;
+  }
+  if (!debugfs_create_u32("tx_protection_entry", 0600, mvm->debugfs_dir,
+                          &tt_params->tx_protection_entry)) {
+    goto err;
+  }
+  if (!debugfs_create_u32("tx_protection_exit", 0600, mvm->debugfs_dir,
+                          &tt_params->tx_protection_exit)) {
+    goto err;
+  }
 #endif
 
-    debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
+  debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
 
-    /*
-     * Create a symlink with mac80211. It will be removed when mac80211
-     * exists (before the opmode exists which removes the target.)
-     */
+  /*
+   * Create a symlink with mac80211. It will be removed when mac80211
+   * exists (before the opmode exists which removes the target.)
+   */
 #if LINUX_VERSION_IS_GEQ(3, 12, 0)
-    snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
+  snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
 #else
-    snprintf(buf, 100, "../../%s/%s", dbgfs_dir->d_parent->d_parent->d_name.name,
-             dbgfs_dir->d_parent->d_name.name);
+  snprintf(buf, 100, "../../%s/%s", dbgfs_dir->d_parent->d_parent->d_name.name,
+           dbgfs_dir->d_parent->d_name.name);
 #endif
-    if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf)) { goto err; }
+  if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf)) {
+    goto err;
+  }
 
-    return 0;
+  return 0;
 err:
-    IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
-    return -ENOMEM;
+  IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
+  return -ENOMEM;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs.h
index 5660ca1..dbb325b 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/debugfs.h
@@ -35,40 +35,41 @@
 #ifndef SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM_DEBUGFS_H_
 #define SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM_DEBUGFS_H_
 
-#define MVM_DEBUGFS_READ_FILE_OPS(name)                            \
-    static const struct file_operations iwl_dbgfs_##name##_ops = { \
-        .read = iwl_dbgfs_##name##_read,                           \
-        .open = simple_open,                                       \
-        .llseek = generic_file_llseek,                             \
-    }
+#define MVM_DEBUGFS_READ_FILE_OPS(name)                          \
+  static const struct file_operations iwl_dbgfs_##name##_ops = { \
+      .read = iwl_dbgfs_##name##_read,                           \
+      .open = simple_open,                                       \
+      .llseek = generic_file_llseek,                             \
+  }
 
-#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)                                     \
-    static ssize_t _iwl_dbgfs_##name##_write(struct file* file, const char __user* user_buf, \
-                                             size_t count, loff_t* ppos) {                   \
-        argtype* arg = file->private_data;                                                   \
-        char buf[buflen] = {};                                                               \
-        size_t buf_size = min(count, sizeof(buf) - 1);                                       \
-                                                                                             \
-        if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT;                         \
-                                                                                             \
-        return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos);                           \
-    }
+#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)                                   \
+  static ssize_t _iwl_dbgfs_##name##_write(struct file* file, const char __user* user_buf, \
+                                           size_t count, loff_t* ppos) {                   \
+    argtype* arg = file->private_data;                                                     \
+    char buf[buflen] = {};                                                                 \
+    size_t buf_size = min(count, sizeof(buf) - 1);                                         \
+                                                                                           \
+    if (copy_from_user(buf, user_buf, buf_size))                                           \
+      return -EFAULT;                                                                      \
+                                                                                           \
+    return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos);                             \
+  }
 
-#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype)    \
-    MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)               \
-    static const struct file_operations iwl_dbgfs_##name##_ops = { \
-        .write = _iwl_dbgfs_##name##_write,                        \
-        .read = iwl_dbgfs_##name##_read,                           \
-        .open = simple_open,                                       \
-        .llseek = generic_file_llseek,                             \
-    };
+#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype)  \
+  MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)               \
+  static const struct file_operations iwl_dbgfs_##name##_ops = { \
+      .write = _iwl_dbgfs_##name##_write,                        \
+      .read = iwl_dbgfs_##name##_read,                           \
+      .open = simple_open,                                       \
+      .llseek = generic_file_llseek,                             \
+  };
 
-#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype)         \
-    MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)               \
-    static const struct file_operations iwl_dbgfs_##name##_ops = { \
-        .write = _iwl_dbgfs_##name##_write,                        \
-        .open = simple_open,                                       \
-        .llseek = generic_file_llseek,                             \
-    };
+#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype)       \
+  MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)               \
+  static const struct file_operations iwl_dbgfs_##name##_ops = { \
+      .write = _iwl_dbgfs_##name##_write,                        \
+      .open = simple_open,                                       \
+      .llseek = generic_file_llseek,                             \
+  };
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM_DEBUGFS_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fm-ops.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fm-ops.c
index 6789610..46d593f 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fm-ops.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fm-ops.c
@@ -32,6 +32,7 @@
  *
  *****************************************************************************/
 #include <linux/fm/iui_fm.h>
+
 #include "mvm.h"
 
 /*
@@ -44,13 +45,13 @@
 static bool debug_mode;
 
 struct chan_ifaces {
-    struct iui_fm_wlan_channel_tx_power* chan_txpwr;
-    int num_of_vif; /* for statistics */
+  struct iui_fm_wlan_channel_tx_power* chan_txpwr;
+  int num_of_vif; /* for statistics */
 };
 
 struct chan_list {
-    struct iui_fm_wlan_info* winfo;
-    enum iwl_fm_chan_change_action action;
+  struct iui_fm_wlan_info* winfo;
+  enum iwl_fm_chan_change_action action;
 };
 
 /* last reported channel notification to the FM */
@@ -65,76 +66,78 @@
  * Search for an interface with a given frequency
  */
 static void iwl_mvm_fm_iface_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct chan_ifaces* data = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct ieee80211_chanctx_conf* chanctx_conf;
+  struct chan_ifaces* data = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct ieee80211_chanctx_conf* chanctx_conf;
 
-    /* P2P device or NAN are never assigned a channel */
-    if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) || (vif->type == NL80211_IFTYPE_NAN)) { return; }
+  /* P2P device or NAN are never assigned a channel */
+  if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) || (vif->type == NL80211_IFTYPE_NAN)) {
+    return;
+  }
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    chanctx_conf = rcu_dereference(vif->chanctx_conf);
-    /* make sure the channel context is assigned */
-    if (!chanctx_conf) {
-        rcu_read_unlock();
-        return;
-    }
-
-    if (chanctx_conf->min_def.center_freq1 == KHZ_TO_MHZ(data->chan_txpwr->frequency)) {
-        mvmvif->phy_ctxt->fm_tx_power_limit = data->chan_txpwr->max_tx_pwr;
-        data->num_of_vif++;
-        rcu_read_unlock();
-        /* FM requests to remove Tx power limitation */
-        if (data->chan_txpwr->max_tx_pwr == IUI_FM_WLAN_NO_TX_PWR_LIMIT) {
-            data->chan_txpwr->max_tx_pwr = IWL_DEFAULT_MAX_TX_POWER;
-        }
-
-        iwl_mvm_fm_set_tx_power(g_mvm, vif, data->chan_txpwr->max_tx_pwr);
-        return;
-    }
-
+  chanctx_conf = rcu_dereference(vif->chanctx_conf);
+  /* make sure the channel context is assigned */
+  if (!chanctx_conf) {
     rcu_read_unlock();
+    return;
+  }
+
+  if (chanctx_conf->min_def.center_freq1 == KHZ_TO_MHZ(data->chan_txpwr->frequency)) {
+    mvmvif->phy_ctxt->fm_tx_power_limit = data->chan_txpwr->max_tx_pwr;
+    data->num_of_vif++;
+    rcu_read_unlock();
+    /* FM requests to remove Tx power limitation */
+    if (data->chan_txpwr->max_tx_pwr == IUI_FM_WLAN_NO_TX_PWR_LIMIT) {
+      data->chan_txpwr->max_tx_pwr = IWL_DEFAULT_MAX_TX_POWER;
+    }
+
+    iwl_mvm_fm_set_tx_power(g_mvm, vif, data->chan_txpwr->max_tx_pwr);
+    return;
+  }
+
+  rcu_read_unlock();
 }
 
 /*
  * Search for an interface with a given frequency
  */
 static void iwl_mvm_fm_chan_vldt_iter(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct chan_ifaces* data = _data;
-    struct ieee80211_chanctx_conf* chanctx_conf;
+  struct chan_ifaces* data = _data;
+  struct ieee80211_chanctx_conf* chanctx_conf;
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    chanctx_conf = rcu_dereference(vif->chanctx_conf);
-    /* make sure the channel context is assigned */
-    if (!chanctx_conf) {
-        rcu_read_unlock();
-        return;
-    }
-
-    if (chanctx_conf->min_def.center_freq1 == KHZ_TO_MHZ(data->chan_txpwr->frequency)) {
-        data->num_of_vif++;
-    }
-
+  chanctx_conf = rcu_dereference(vif->chanctx_conf);
+  /* make sure the channel context is assigned */
+  if (!chanctx_conf) {
     rcu_read_unlock();
+    return;
+  }
+
+  if (chanctx_conf->min_def.center_freq1 == KHZ_TO_MHZ(data->chan_txpwr->frequency)) {
+    data->num_of_vif++;
+  }
+
+  rcu_read_unlock();
 }
 
 static enum iui_fm_wlan_bandwidth iwl_mvm_fm_get_bandwidth(enum nl80211_chan_width bandwidth) {
-    switch (bandwidth) {
+  switch (bandwidth) {
     case NL80211_CHAN_WIDTH_20_NOHT:
-        return IUI_FM_WLAN_BW_20MHZ;
+      return IUI_FM_WLAN_BW_20MHZ;
     case NL80211_CHAN_WIDTH_20:
-        return IUI_FM_WLAN_BW_20MHZ;
+      return IUI_FM_WLAN_BW_20MHZ;
     case NL80211_CHAN_WIDTH_40:
-        return IUI_FM_WLAN_BW_40MHZ;
+      return IUI_FM_WLAN_BW_40MHZ;
     case NL80211_CHAN_WIDTH_80:
-        return IUI_FM_WLAN_BW_80MHZ;
+      return IUI_FM_WLAN_BW_80MHZ;
     case NL80211_CHAN_WIDTH_160:
-        return IUI_FM_WLAN_BW_160MHZ;
+      return IUI_FM_WLAN_BW_160MHZ;
     default:
-        return IUI_FM_WLAN_BW_INVALID;
-    }
+      return IUI_FM_WLAN_BW_INVALID;
+  }
 }
 
 /*
@@ -142,31 +145,33 @@
  */
 static void iwl_mvm_fm_chan_iterator(struct ieee80211_hw* hw, struct ieee80211_chanctx_conf* ctx,
                                      void* _data) {
-    int i;
-    enum nl80211_chan_width band;
-    uint32_t freq;
-    struct chan_list* data = _data;
-    struct iui_fm_wlan_info* winfo = data->winfo;
+  int i;
+  enum nl80211_chan_width band;
+  uint32_t freq;
+  struct chan_list* data = _data;
+  struct iui_fm_wlan_info* winfo = data->winfo;
 
-    if (winfo->num_channels == IUI_FM_WLAN_MAX_CHANNELS) { return; }
+  if (winfo->num_channels == IUI_FM_WLAN_MAX_CHANNELS) {
+    return;
+  }
 
-    freq = MHZ_TO_KHZ(ctx->min_def.center_freq1);
-    band = ctx->min_def.width;
+  freq = MHZ_TO_KHZ(ctx->min_def.center_freq1);
+  band = ctx->min_def.width;
 
-    for (i = 0; i < winfo->num_channels; i++)
-        if (winfo->channel_info[i].frequency == freq) {
-            /*
-             * channel exists - but bandwidth maybe invalid since
-             * we are removing a ctx that operates on this channel
-             */
-            winfo->channel_info[winfo->num_channels].bandwidth = iwl_mvm_fm_get_bandwidth(band);
-            return; /* channel already exists in list */
-        }
+  for (i = 0; i < winfo->num_channels; i++)
+    if (winfo->channel_info[i].frequency == freq) {
+      /*
+       * channel exists - but bandwidth maybe invalid since
+       * we are removing a ctx that operates on this channel
+       */
+      winfo->channel_info[winfo->num_channels].bandwidth = iwl_mvm_fm_get_bandwidth(band);
+      return; /* channel already exists in list */
+    }
 
-    winfo->channel_info[winfo->num_channels].frequency = freq;
-    winfo->channel_info[winfo->num_channels].bandwidth = iwl_mvm_fm_get_bandwidth(band);
+  winfo->channel_info[winfo->num_channels].frequency = freq;
+  winfo->channel_info[winfo->num_channels].bandwidth = iwl_mvm_fm_get_bandwidth(band);
 
-    winfo->num_channels++;
+  winfo->num_channels++;
 }
 
 /*
@@ -174,30 +179,34 @@
  * Reducing the Tx power of all interfaces that use a specific channel.
  */
 static enum iui_fm_mitigation_status iwl_mvm_fm_mitig_txpwr(struct iui_fm_wlan_mitigation* mit) {
-    int i;
-    struct chan_ifaces chan_ifaces;
-    struct iui_fm_wlan_channel_tx_power* chan_txpwr_list = mit->channel_tx_pwr;
-    uint32_t num_channels = mit->num_channels;
+  int i;
+  struct chan_ifaces chan_ifaces;
+  struct iui_fm_wlan_channel_tx_power* chan_txpwr_list = mit->channel_tx_pwr;
+  uint32_t num_channels = mit->num_channels;
 
-    /* Not required to mitigate tx power */
-    if (!(mit->bitmask & WLAN_MITI)) { goto ret; }
+  /* Not required to mitigate tx power */
+  if (!(mit->bitmask & WLAN_MITI)) {
+    goto ret;
+  }
 
-    if (IUI_FM_WLAN_MAX_CHANNELS < num_channels) { return IUI_FM_MITIGATION_ERROR_INVALID_PARAM; }
+  if (IUI_FM_WLAN_MAX_CHANNELS < num_channels) {
+    return IUI_FM_MITIGATION_ERROR_INVALID_PARAM;
+  }
 
-    for (i = 0; i < num_channels; i++) {
-        chan_ifaces.chan_txpwr = &chan_txpwr_list[i];
-        chan_ifaces.num_of_vif = 0;
-        /* find all interfaces that use this channel */
-        ieee80211_iterate_active_interfaces(g_mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                            iwl_mvm_fm_iface_iterator, &chan_ifaces);
-        IWL_DEBUG_EXTERNAL(g_mvm,
-                           "FM: Changed Tx power to %d for %d"
-                           " vifs on channel %d\n",
-                           chan_ifaces.chan_txpwr->max_tx_pwr, chan_ifaces.num_of_vif,
-                           chan_ifaces.chan_txpwr->frequency);
-    }
+  for (i = 0; i < num_channels; i++) {
+    chan_ifaces.chan_txpwr = &chan_txpwr_list[i];
+    chan_ifaces.num_of_vif = 0;
+    /* find all interfaces that use this channel */
+    ieee80211_iterate_active_interfaces(g_mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                        iwl_mvm_fm_iface_iterator, &chan_ifaces);
+    IWL_DEBUG_EXTERNAL(g_mvm,
+                       "FM: Changed Tx power to %d for %d"
+                       " vifs on channel %d\n",
+                       chan_ifaces.chan_txpwr->max_tx_pwr, chan_ifaces.num_of_vif,
+                       chan_ifaces.chan_txpwr->frequency);
+  }
 ret:
-    return IUI_FM_MITIGATION_COMPLETE_OK;
+  return IUI_FM_MITIGATION_COMPLETE_OK;
 }
 
 /*
@@ -206,16 +215,20 @@
  */
 static enum iui_fm_mitigation_status iwl_mvm_fm_mitig_adc_dac_freq(
     struct iui_fm_wlan_mitigation* mit) {
-    uint32_t adc_dac_freq = mit->wlan_adc_dac_freq;
+  uint32_t adc_dac_freq = mit->wlan_adc_dac_freq;
 
-    /* Not required to mitigate adc dac */
-    if (!(mit->bitmask & WLAN_MITI)) { goto ret; }
+  /* Not required to mitigate adc dac */
+  if (!(mit->bitmask & WLAN_MITI)) {
+    goto ret;
+  }
 
-    if (adc_dac_freq != 0) { return IUI_FM_MITIGATION_ERROR_INVALID_PARAM; }
+  if (adc_dac_freq != 0) {
+    return IUI_FM_MITIGATION_ERROR_INVALID_PARAM;
+  }
 
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM: adc - dac mitigation\n");
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM: adc - dac mitigation\n");
 ret:
-    return IUI_FM_MITIGATION_COMPLETE_OK;
+  return IUI_FM_MITIGATION_COMPLETE_OK;
 }
 
 /*
@@ -224,125 +237,134 @@
  */
 static enum iui_fm_mitigation_status iwl_mvm_fm_mitig_rxgain_behavior(
     struct iui_fm_wlan_mitigation* mit) {
-    enum iui_fm_wlan_rx_gain_behavior rx_gain = mit->rx_gain_behavior;
+  enum iui_fm_wlan_rx_gain_behavior rx_gain = mit->rx_gain_behavior;
 
-    /* Not required to mitigate rx gain */
-    if (!(mit->bitmask & WLAN_MITI)) { goto ret; }
+  /* Not required to mitigate rx gain */
+  if (!(mit->bitmask & WLAN_MITI)) {
+    goto ret;
+  }
 
-    if (rx_gain != IUI_FM_WLAN_RX_GAIN_NORMAL) { return IUI_FM_MITIGATION_ERROR_INVALID_PARAM; }
+  if (rx_gain != IUI_FM_WLAN_RX_GAIN_NORMAL) {
+    return IUI_FM_MITIGATION_ERROR_INVALID_PARAM;
+  }
 
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM: rxgain behaviour mitigation - not implemented\n");
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM: rxgain behaviour mitigation - not implemented\n");
 ret:
-    return IUI_FM_MITIGATION_COMPLETE_OK;
+  return IUI_FM_MITIGATION_COMPLETE_OK;
 }
 
 /*
  * Enables/Disable 2G coex mode -  aggregation limiting.
  */
 static enum iui_fm_mitigation_status iwl_mvm_fm_2g_coex(struct iui_fm_wlan_mitigation* mit) {
-    enum iui_fm_mitigation_status ret = IUI_FM_MITIGATION_COMPLETE_OK;
-    struct iwl_config_2g_coex_cmd cmd = {};
-    int i;
-    struct iui_fm_wlan_channel_tx_power* chan;
-    struct iui_fm_wlan_channel_tx_power* chan_txpwr_list = mit->channel_tx_pwr;
-    uint32_t num_channels = mit->num_channels;
+  enum iui_fm_mitigation_status ret = IUI_FM_MITIGATION_COMPLETE_OK;
+  struct iwl_config_2g_coex_cmd cmd = {};
+  int i;
+  struct iui_fm_wlan_channel_tx_power* chan;
+  struct iui_fm_wlan_channel_tx_power* chan_txpwr_list = mit->channel_tx_pwr;
+  uint32_t num_channels = mit->num_channels;
 
-    /* Not required to mitigate 2g coex */
-    if (!(mit->bitmask & WLAN_MITI)) { return ret; }
-
-    /* fw does not support the 2g coex cmd */
-    if (!fw_has_capa(&g_mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_2G_COEX_SUPPORT)) {
-        goto sofia_xmm;
-    }
-
-    /* No need to change 2g coex state */
-    if (g_mvm->coex_2g_enabled == mit->wlan_2g_coex_enable) { return ret; }
-
-    g_mvm->coex_2g_enabled = mit->wlan_2g_coex_enable;
-
-    cmd.enabled = cpu_to_le32(g_mvm->coex_2g_enabled);
-
-    mutex_lock(&g_mvm->mutex);
-    ret = iwl_mvm_send_cmd_pdu(g_mvm, CONFIG_2G_COEX_CMD, 0, sizeof(cmd), &cmd);
-    mutex_unlock(&g_mvm->mutex);
-    if (ret) {
-        IWL_ERR(g_mvm, "Failed to send 2g coex command(%sabling)\n",
-                g_mvm->coex_2g_enabled ? "en" : "dis");
-        return IUI_FM_MITIGATION_ERROR;
-    }
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM 2G coex: %sabling 2G coex mode (sent fw cmd)\n",
-                       g_mvm->coex_2g_enabled ? "en" : "dis");
+  /* Not required to mitigate 2g coex */
+  if (!(mit->bitmask & WLAN_MITI)) {
     return ret;
+  }
+
+  /* fw does not support the 2g coex cmd */
+  if (!fw_has_capa(&g_mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_2G_COEX_SUPPORT)) {
+    goto sofia_xmm;
+  }
+
+  /* No need to change 2g coex state */
+  if (g_mvm->coex_2g_enabled == mit->wlan_2g_coex_enable) {
+    return ret;
+  }
+
+  g_mvm->coex_2g_enabled = mit->wlan_2g_coex_enable;
+
+  cmd.enabled = cpu_to_le32(g_mvm->coex_2g_enabled);
+
+  mutex_lock(&g_mvm->mutex);
+  ret = iwl_mvm_send_cmd_pdu(g_mvm, CONFIG_2G_COEX_CMD, 0, sizeof(cmd), &cmd);
+  mutex_unlock(&g_mvm->mutex);
+  if (ret) {
+    IWL_ERR(g_mvm, "Failed to send 2g coex command(%sabling)\n",
+            g_mvm->coex_2g_enabled ? "en" : "dis");
+    return IUI_FM_MITIGATION_ERROR;
+  }
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM 2G coex: %sabling 2G coex mode (sent fw cmd)\n",
+                     g_mvm->coex_2g_enabled ? "en" : "dis");
+  return ret;
 
 sofia_xmm:
-    /* Flow for SOFIA 3G & XMM6321 - don't support the 2g coex cmd */
-    if (IUI_FM_WLAN_MAX_CHANNELS < num_channels) { return IUI_FM_MITIGATION_ERROR_INVALID_PARAM; }
+  /* Flow for SOFIA 3G & XMM6321 - don't support the 2g coex cmd */
+  if (IUI_FM_WLAN_MAX_CHANNELS < num_channels) {
+    return IUI_FM_MITIGATION_ERROR_INVALID_PARAM;
+  }
 
-    for (i = 0; i < num_channels; i++) {
-        chan = &chan_txpwr_list[i];
-        if (chan->frequency == FM_2G_COEX_ENABLE_DISABLE) {
-            mutex_lock(&g_mvm->mutex);
-            if (chan->max_tx_pwr == FM_2G_COEX_ENABLE) {
-                g_mvm->coex_2g_enabled = true;
-            } else if (chan->max_tx_pwr == FM_2G_COEX_DISABLE) {
-                g_mvm->coex_2g_enabled = false;
-            } else {
-                IWL_DEBUG_EXTERNAL(g_mvm,
-                                   "FM 2G coex: ERROR: Invalid paramters for enable/disable(%d)\n",
-                                   chan->max_tx_pwr);
-                ret = IUI_FM_MITIGATION_ERROR_INVALID_PARAM;
-            }
-            IWL_DEBUG_EXTERNAL(g_mvm, "FM 2G coex: %sabling 2G coex mode\n",
-                               g_mvm->coex_2g_enabled ? "en" : "dis");
-            mutex_unlock(&g_mvm->mutex);
-            break;
-        }
+  for (i = 0; i < num_channels; i++) {
+    chan = &chan_txpwr_list[i];
+    if (chan->frequency == FM_2G_COEX_ENABLE_DISABLE) {
+      mutex_lock(&g_mvm->mutex);
+      if (chan->max_tx_pwr == FM_2G_COEX_ENABLE) {
+        g_mvm->coex_2g_enabled = true;
+      } else if (chan->max_tx_pwr == FM_2G_COEX_DISABLE) {
+        g_mvm->coex_2g_enabled = false;
+      } else {
+        IWL_DEBUG_EXTERNAL(g_mvm, "FM 2G coex: ERROR: Invalid paramters for enable/disable(%d)\n",
+                           chan->max_tx_pwr);
+        ret = IUI_FM_MITIGATION_ERROR_INVALID_PARAM;
+      }
+      IWL_DEBUG_EXTERNAL(g_mvm, "FM 2G coex: %sabling 2G coex mode\n",
+                         g_mvm->coex_2g_enabled ? "en" : "dis");
+      mutex_unlock(&g_mvm->mutex);
+      break;
     }
+  }
 
-    return ret;
+  return ret;
 }
 
 static int iwl_mvm_fm_send_dcdc_cmd(uint32_t div0, uint32_t div1, uint32_t flags) {
-    int ret;
-    struct iwl_dc2dc_config_resp* resp;
-    struct iwl_rx_packet* pkt;
-    struct iwl_dc2dc_config_cmd dcdc = {
-        .flags = cpu_to_le32(flags),
-        .dc2dc_freq_tune0 = cpu_to_le32(div0),
-        .dc2dc_freq_tune1 = cpu_to_le32(div1),
-    };
-    struct iwl_host_cmd cmd = {
-        .id = DC2DC_CONFIG_CMD,
-        .flags = CMD_WANT_SKB,
-        .data = {&dcdc},
-        .len = {sizeof(struct iwl_dc2dc_config_cmd)},
-    };
+  int ret;
+  struct iwl_dc2dc_config_resp* resp;
+  struct iwl_rx_packet* pkt;
+  struct iwl_dc2dc_config_cmd dcdc = {
+      .flags = cpu_to_le32(flags),
+      .dc2dc_freq_tune0 = cpu_to_le32(div0),
+      .dc2dc_freq_tune1 = cpu_to_le32(div1),
+  };
+  struct iwl_host_cmd cmd = {
+      .id = DC2DC_CONFIG_CMD,
+      .flags = CMD_WANT_SKB,
+      .data = {&dcdc},
+      .len = {sizeof(struct iwl_dc2dc_config_cmd)},
+  };
 
-    /* fw does not support the dcdc cmd */
-    if (!fw_has_capa(&g_mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT)) {
-        return -EINVAL;
-    }
+  /* fw does not support the dcdc cmd */
+  if (!fw_has_capa(&g_mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT)) {
+    return -EINVAL;
+  }
 
-    ret = iwl_mvm_send_cmd(g_mvm, &cmd);
+  ret = iwl_mvm_send_cmd(g_mvm, &cmd);
 
-    if (ret) {
-        IWL_ERR(g_mvm, "FM: Failed to send dcdc cmd (ret = %d)\n", ret);
-        return ret;
-    }
-
-    pkt = cmd.resp_pkt;
-    if (!pkt) {
-        IWL_ERR(g_mvm, "FM: Error DCDC cmd response is NULL\n");
-        return -EIO;
-    }
-    resp = (void*)pkt->data;
-
-    /* update the current dcdc values */
-    g_dcdc_div0 = le32_to_cpu(resp->dc2dc_freq_tune0);
-    g_dcdc_div1 = le32_to_cpu(resp->dc2dc_freq_tune1);
-
-    iwl_free_resp(&cmd);
+  if (ret) {
+    IWL_ERR(g_mvm, "FM: Failed to send dcdc cmd (ret = %d)\n", ret);
     return ret;
+  }
+
+  pkt = cmd.resp_pkt;
+  if (!pkt) {
+    IWL_ERR(g_mvm, "FM: Error DCDC cmd response is NULL\n");
+    return -EIO;
+  }
+  resp = (void*)pkt->data;
+
+  /* update the current dcdc values */
+  g_dcdc_div0 = le32_to_cpu(resp->dc2dc_freq_tune0);
+  g_dcdc_div1 = le32_to_cpu(resp->dc2dc_freq_tune1);
+
+  iwl_free_resp(&cmd);
+  return ret;
 }
 
 /*
@@ -352,29 +374,33 @@
  * and therefore DCDC needs to change its clock rate.
  */
 static enum iui_fm_mitigation_status iwl_mvm_fm_mitig_dcdc(struct iui_fm_wlan_mitigation* mit) {
-    enum iui_fm_mitigation_status ret = IUI_FM_MITIGATION_COMPLETE_OK;
+  enum iui_fm_mitigation_status ret = IUI_FM_MITIGATION_COMPLETE_OK;
 
-    /* Not required to mitigate dcdc */
-    if (!(mit->bitmask & DCDC_MITI)) { return ret; }
+  /* Not required to mitigate dcdc */
+  if (!(mit->bitmask & DCDC_MITI)) {
+    return ret;
+  }
 
-    /* Current dcdc values match requested values */
-    if (mit->dcdc_div0 == g_dcdc_div0 && mit->dcdc_div1 == g_dcdc_div0) {
-        IWL_DEBUG_EXTERNAL(
-            g_mvm, "FM DCDC: Current dcdc values match requested values - not mitigating\n");
-        goto out;
-    }
+  /* Current dcdc values match requested values */
+  if (mit->dcdc_div0 == g_dcdc_div0 && mit->dcdc_div1 == g_dcdc_div0) {
+    IWL_DEBUG_EXTERNAL(g_mvm,
+                       "FM DCDC: Current dcdc values match requested values - not mitigating\n");
+    goto out;
+  }
 
-    mutex_lock(&g_mvm->mutex);
-    ret = iwl_mvm_fm_send_dcdc_cmd(mit->dcdc_div0, mit->dcdc_div1, DCDC_FREQ_TUNE_SET);
-    mutex_unlock(&g_mvm->mutex);
+  mutex_lock(&g_mvm->mutex);
+  ret = iwl_mvm_fm_send_dcdc_cmd(mit->dcdc_div0, mit->dcdc_div1, DCDC_FREQ_TUNE_SET);
+  mutex_unlock(&g_mvm->mutex);
 
-    if (ret) { ret = IUI_FM_MITIGATION_ERROR; }
+  if (ret) {
+    ret = IUI_FM_MITIGATION_ERROR;
+  }
 
 out:
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM DCDC: mitigation %s (div0 = %d, div1 = %d)\n",
-                       ret ? "failed" : "succeeded", g_dcdc_div0, g_dcdc_div1);
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM DCDC: mitigation %s (div0 = %d, div1 = %d)\n",
+                     ret ? "failed" : "succeeded", g_dcdc_div0, g_dcdc_div1);
 
-    return ret;
+  return ret;
 }
 
 /*
@@ -382,73 +408,76 @@
  * the DCDC mitigation.
  */
 void iwl_mvm_fm_notify_current_dcdc(void) {
-    int ret;
-    struct iui_fm_freq_notification notification;
-    struct iui_fm_wlan_info winfo;
+  int ret;
+  struct iui_fm_freq_notification notification;
+  struct iui_fm_wlan_info winfo;
 
-    if (!g_mvm) { return; }
+  if (!g_mvm) {
+    return;
+  }
 
-    memset(&winfo, 0, sizeof(struct iui_fm_wlan_info));
+  memset(&winfo, 0, sizeof(struct iui_fm_wlan_info));
 
-    /* Get current DCDC from the FW */
-    ret = iwl_mvm_fm_send_dcdc_cmd(0, 0, 0);
-    if (ret) { goto out; }
+  /* Get current DCDC from the FW */
+  ret = iwl_mvm_fm_send_dcdc_cmd(0, 0, 0);
+  if (ret) {
+    goto out;
+  }
 
-    winfo.dcdc_div0 = g_dcdc_div0;
-    winfo.dcdc_div1 = g_dcdc_div1;
+  winfo.dcdc_div0 = g_dcdc_div0;
+  winfo.dcdc_div1 = g_dcdc_div1;
 
-    /* mark the change that we are reporting */
-    winfo.bitmask |= DCDC_UPDATE;
+  /* mark the change that we are reporting */
+  winfo.bitmask |= DCDC_UPDATE;
 
-    notification.type = IUI_FM_FREQ_NOTIFICATION_TYPE_WLAN;
-    notification.info.wlan_info = &winfo;
+  notification.type = IUI_FM_FREQ_NOTIFICATION_TYPE_WLAN;
+  notification.info.wlan_info = &winfo;
 
-    ret = iwl_mvm_fm_notify_frequency(debug_mode, IUI_FM_MACRO_ID_WLAN, &notification);
+  ret = iwl_mvm_fm_notify_frequency(debug_mode, IUI_FM_MACRO_ID_WLAN, &notification);
 
 out:
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM: notified fm about dcdc div0 = %d div1 = %d (fail = %d)\n",
-                       winfo.dcdc_div0, winfo.dcdc_div1, ret);
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM: notified fm about dcdc div0 = %d div1 = %d (fail = %d)\n",
+                     winfo.dcdc_div0, winfo.dcdc_div1, ret);
 }
 
 /*
  * Check if the list of channels that the FM supplied is valid
  */
 static bool iwl_mvm_fm_invalid_channel_list(struct iui_fm_wlan_mitigation* mit) {
-    struct iui_fm_wlan_channel_tx_power* chan_txpwr_list = mit->channel_tx_pwr;
-    uint32_t num_channels = mit->num_channels;
-    uint8_t i, j;
+  struct iui_fm_wlan_channel_tx_power* chan_txpwr_list = mit->channel_tx_pwr;
+  uint32_t num_channels = mit->num_channels;
+  uint8_t i, j;
 
-    /* Check if the same frequency appears twice */
-    for (i = 0; i < num_channels; i++) {
-        for (j = 0; j < num_channels; j++) {
-            if (chan_txpwr_list[i].frequency == chan_txpwr_list[j].frequency && i != j) {
-                int freq;
+  /* Check if the same frequency appears twice */
+  for (i = 0; i < num_channels; i++) {
+    for (j = 0; j < num_channels; j++) {
+      if (chan_txpwr_list[i].frequency == chan_txpwr_list[j].frequency && i != j) {
+        int freq;
 
-                freq = chan_txpwr_list[i].frequency;
-                IWL_DEBUG_EXTERNAL(
-                    g_mvm, "FM: Invalid channel list: duplicated frequencies (freq = %d)\n", freq);
-                return true;
-            }
-        }
+        freq = chan_txpwr_list[i].frequency;
+        IWL_DEBUG_EXTERNAL(g_mvm, "FM: Invalid channel list: duplicated frequencies (freq = %d)\n",
+                           freq);
+        return true;
+      }
     }
+  }
 
-    /* Check that all of the channels are used */
-    for (i = 0; i < num_channels; i++) {
-        struct chan_ifaces chan_ifaces = {
-            .chan_txpwr = &chan_txpwr_list[i],
-            .num_of_vif = 0,
-        };
-        ieee80211_iterate_active_interfaces(g_mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                            iwl_mvm_fm_chan_vldt_iter, &chan_ifaces);
-        if (!chan_ifaces.num_of_vif) {
-            IWL_DEBUG_EXTERNAL(g_mvm,
-                               "FM: Invalid channel list: frequency is not in use (freq = %d)\n",
-                               chan_txpwr_list[i].frequency);
-            return true;
-        }
+  /* Check that all of the channels are used */
+  for (i = 0; i < num_channels; i++) {
+    struct chan_ifaces chan_ifaces = {
+        .chan_txpwr = &chan_txpwr_list[i],
+        .num_of_vif = 0,
+    };
+    ieee80211_iterate_active_interfaces(g_mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                        iwl_mvm_fm_chan_vldt_iter, &chan_ifaces);
+    if (!chan_ifaces.num_of_vif) {
+      IWL_DEBUG_EXTERNAL(g_mvm, "FM: Invalid channel list: frequency is not in use (freq = %d)\n",
+                         chan_txpwr_list[i].frequency);
+      return true;
     }
+  }
 
-    return false;
+  return false;
 }
 
 /*
@@ -463,44 +492,56 @@
 static enum iui_fm_mitigation_status iwl_mvm_fm_wlan_mitigation(
     const enum iui_fm_macro_id macro_id, const struct iui_fm_mitigation* mitigation,
     const uint32_t sequence) {
-    enum iui_fm_mitigation_status ret;
-    struct iui_fm_wlan_mitigation* mit;
+  enum iui_fm_mitigation_status ret;
+  struct iui_fm_wlan_mitigation* mit;
 
-    if (WARN_ON(!g_mvm)) { return IUI_FM_MITIGATION_ERROR; }
+  if (WARN_ON(!g_mvm)) {
+    return IUI_FM_MITIGATION_ERROR;
+  }
 
-    if (macro_id != IUI_FM_MACRO_ID_WLAN || mitigation->type != IUI_FM_MITIGATION_TYPE_WLAN) {
-        ret = IUI_FM_MITIGATION_ERROR_INVALID_PARAM;
-        goto end;
-    }
+  if (macro_id != IUI_FM_MACRO_ID_WLAN || mitigation->type != IUI_FM_MITIGATION_TYPE_WLAN) {
+    ret = IUI_FM_MITIGATION_ERROR_INVALID_PARAM;
+    goto end;
+  }
 
-    mit = mitigation->info.wlan_mitigation;
+  mit = mitigation->info.wlan_mitigation;
 
-    if (iwl_mvm_fm_invalid_channel_list(mit)) { return IUI_FM_MITIGATION_ERROR_INVALID_PARAM; }
+  if (iwl_mvm_fm_invalid_channel_list(mit)) {
+    return IUI_FM_MITIGATION_ERROR_INVALID_PARAM;
+  }
 
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM: fm mitigation callback bit mask 0x%x\n", mit->bitmask);
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM: fm mitigation callback bit mask 0x%x\n", mit->bitmask);
 
-    ret = iwl_mvm_fm_mitig_dcdc(mit);
-    if (ret) { goto end; }
+  ret = iwl_mvm_fm_mitig_dcdc(mit);
+  if (ret) {
+    goto end;
+  }
 
-    /* Enable/Disable 2G coex mode */
-    ret = iwl_mvm_fm_2g_coex(mit);
-    if (ret) { goto end; }
+  /* Enable/Disable 2G coex mode */
+  ret = iwl_mvm_fm_2g_coex(mit);
+  if (ret) {
+    goto end;
+  }
 
-    /*
-     * Going to mitigate the Tx power of all stations using the channels in
-     * the channel list mit->channel_tx_pwr received from the FM.
-     */
-    ret = iwl_mvm_fm_mitig_txpwr(mit);
-    if (ret) { goto end; }
+  /*
+   * Going to mitigate the Tx power of all stations using the channels in
+   * the channel list mit->channel_tx_pwr received from the FM.
+   */
+  ret = iwl_mvm_fm_mitig_txpwr(mit);
+  if (ret) {
+    goto end;
+  }
 
-    ret = iwl_mvm_fm_mitig_adc_dac_freq(mit);
+  ret = iwl_mvm_fm_mitig_adc_dac_freq(mit);
 
-    if (ret) { goto end; }
+  if (ret) {
+    goto end;
+  }
 
-    ret = iwl_mvm_fm_mitig_rxgain_behavior(mit);
+  ret = iwl_mvm_fm_mitig_rxgain_behavior(mit);
 end:
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM: fm mitigation callback %s\n", ret ? "failed" : "succeeded");
-    return ret;
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM: fm mitigation callback %s\n", ret ? "failed" : "succeeded");
+  return ret;
 }
 
 /*
@@ -509,25 +550,27 @@
  * The check includes the channel frequency & BW.
  */
 static bool iwl_mvm_fm_channel_changed(struct iui_fm_wlan_info* winfo) {
-    int i, j;
-    bool changed;
+  int i, j;
+  bool changed;
 
-    for (i = 0; i < winfo->num_channels; i++) {
-        changed = true;
-        for (j = 0; j < last_chan_notif.num_channels; j++) {
-            /* The channel was not updated */
-            if (winfo->channel_info[i].frequency == last_chan_notif.channel_info[j].frequency &&
-                winfo->channel_info[i].bandwidth == last_chan_notif.channel_info[j].bandwidth) {
-                changed = false;
-                break;
-            }
-        }
-        /* Channel was updated */
-        if (changed) { return true; }
+  for (i = 0; i < winfo->num_channels; i++) {
+    changed = true;
+    for (j = 0; j < last_chan_notif.num_channels; j++) {
+      /* The channel was not updated */
+      if (winfo->channel_info[i].frequency == last_chan_notif.channel_info[j].frequency &&
+          winfo->channel_info[i].bandwidth == last_chan_notif.channel_info[j].bandwidth) {
+        changed = false;
+        break;
+      }
     }
+    /* Channel was updated */
+    if (changed) {
+      return true;
+    }
+  }
 
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM: Channel list has not changed - not reporting\n");
-    return false;
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM: Channel list has not changed - not reporting\n");
+  return false;
 }
 
 /*
@@ -546,59 +589,65 @@
  *
  */
 static void iwl_mvm_fm_remove_channels(struct iui_fm_wlan_info* winfo) {
-    int i, j;
-    bool found;
+  int i, j;
+  bool found;
 
-    for (i = 0; i < last_chan_notif.num_channels; i++) {
-        /* Can't report more channels since we are in the max */
-        if (winfo->num_channels == IUI_FM_WLAN_MAX_CHANNELS) { return; }
-        found = false;
-
-        if (last_chan_notif.channel_info[i].bandwidth == IUI_FM_WLAN_BW_INVALID) { continue; }
-        /*
-         * Search for the old reported channel in the new reported
-         * channels
-         */
-        for (j = 0; j < winfo->num_channels; j++) {
-            if (last_chan_notif.channel_info[i].frequency == winfo->channel_info[j].frequency) {
-                found = true;
-                break;
-            }
-        }
-        /*
-         * The old reported channel is not in the new ones (It was
-         * removed) - Adding it to the report.
-         */
-        if (!found) {
-            winfo->channel_info[winfo->num_channels].frequency =
-                last_chan_notif.channel_info[i].frequency;
-            winfo->channel_info[winfo->num_channels].bandwidth = IUI_FM_WLAN_BW_INVALID;
-            winfo->num_channels++;
-            IWL_DEBUG_EXTERNAL(g_mvm, "FM: reporting channel %d invalid\n",
-                               last_chan_notif.channel_info[i].frequency);
-        }
+  for (i = 0; i < last_chan_notif.num_channels; i++) {
+    /* Can't report more channels since we are in the max */
+    if (winfo->num_channels == IUI_FM_WLAN_MAX_CHANNELS) {
+      return;
     }
+    found = false;
+
+    if (last_chan_notif.channel_info[i].bandwidth == IUI_FM_WLAN_BW_INVALID) {
+      continue;
+    }
+    /*
+     * Search for the old reported channel in the new reported
+     * channels
+     */
+    for (j = 0; j < winfo->num_channels; j++) {
+      if (last_chan_notif.channel_info[i].frequency == winfo->channel_info[j].frequency) {
+        found = true;
+        break;
+      }
+    }
+    /*
+     * The old reported channel is not in the new ones (It was
+     * removed) - Adding it to the report.
+     */
+    if (!found) {
+      winfo->channel_info[winfo->num_channels].frequency =
+          last_chan_notif.channel_info[i].frequency;
+      winfo->channel_info[winfo->num_channels].bandwidth = IUI_FM_WLAN_BW_INVALID;
+      winfo->num_channels++;
+      IWL_DEBUG_EXTERNAL(g_mvm, "FM: reporting channel %d invalid\n",
+                         last_chan_notif.channel_info[i].frequency);
+    }
+  }
 }
 
 static void iwl_mvm_fm_notif_chan_change_wk(struct work_struct* wk) {
-    int ret;
-    struct iui_fm_freq_notification notification;
+  int ret;
+  struct iui_fm_freq_notification notification;
 
-    /* FM is enabled - but registration failed */
-    if (!g_mvm) { return; }
+  /* FM is enabled - but registration failed */
+  if (!g_mvm) {
+    return;
+  }
 
-    mutex_lock(&g_mvm->mutex);
+  mutex_lock(&g_mvm->mutex);
 
-    notification.type = IUI_FM_FREQ_NOTIFICATION_TYPE_WLAN;
-    notification.info.wlan_info = &last_chan_notif;
-    /* parameter not yet supported */
-    notification.info.wlan_info->wlan_adc_dac_freq = 0;
+  notification.type = IUI_FM_FREQ_NOTIFICATION_TYPE_WLAN;
+  notification.info.wlan_info = &last_chan_notif;
+  /* parameter not yet supported */
+  notification.info.wlan_info->wlan_adc_dac_freq = 0;
 
-    ret = iwl_mvm_fm_notify_frequency(debug_mode, IUI_FM_MACRO_ID_WLAN, &notification);
+  ret = iwl_mvm_fm_notify_frequency(debug_mode, IUI_FM_MACRO_ID_WLAN, &notification);
 
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM: notified fm about channel change (fail = %d)\n", ret);
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM: notified fm about channel change (fail = %d)\n", ret);
 
-    mutex_unlock(&g_mvm->mutex);
+  mutex_unlock(&g_mvm->mutex);
 }
 
 /*
@@ -606,54 +655,58 @@
  */
 void iwl_mvm_fm_notify_channel_change(struct ieee80211_chanctx_conf* ctx,
                                       enum iwl_fm_chan_change_action action) {
-    int i;
-    struct iui_fm_wlan_info winfo = {
-        .num_channels = 0,
-    };
-    struct chan_list chan_info = {
-        .winfo = &winfo,
-        .action = action,
-    };
+  int i;
+  struct iui_fm_wlan_info winfo = {
+      .num_channels = 0,
+  };
+  struct chan_list chan_info = {
+      .winfo = &winfo,
+      .action = action,
+  };
 
-    /* FM is enabled - but registration failed */
-    if (!g_mvm) { return; }
+  /* FM is enabled - but registration failed */
+  if (!g_mvm) {
+    return;
+  }
 
-    lockdep_assert_held(&g_mvm->mutex);
+  lockdep_assert_held(&g_mvm->mutex);
 
-    /*
-     * if notifying the FM about adding/removing a channel ctx we
-     * need to add this channel to the list before iterating over
-     * the channel list since the list is updated only after this
-     * function is called.
-     */
-    if (action != IWL_FM_CHANGE_CHANCTX) {
-        winfo.channel_info[0].frequency = MHZ_TO_KHZ(ctx->min_def.center_freq1);
-        winfo.channel_info[0].bandwidth = iwl_mvm_fm_get_bandwidth(ctx->min_def.width);
-        /* when removing a channel - report the BW invalid */
-        winfo.num_channels++;
-        if (action == IWL_FM_REMOVE_CHANCTX) {
-            winfo.channel_info[0].bandwidth = IUI_FM_WLAN_BW_INVALID;
-        }
+  /*
+   * if notifying the FM about adding/removing a channel ctx we
+   * need to add this channel to the list before iterating over
+   * the channel list since the list is updated only after this
+   * function is called.
+   */
+  if (action != IWL_FM_CHANGE_CHANCTX) {
+    winfo.channel_info[0].frequency = MHZ_TO_KHZ(ctx->min_def.center_freq1);
+    winfo.channel_info[0].bandwidth = iwl_mvm_fm_get_bandwidth(ctx->min_def.width);
+    /* when removing a channel - report the BW invalid */
+    winfo.num_channels++;
+    if (action == IWL_FM_REMOVE_CHANCTX) {
+      winfo.channel_info[0].bandwidth = IUI_FM_WLAN_BW_INVALID;
     }
+  }
 
-    /* finding all bandwidths of used channels for FM notification */
-    ieee80211_iter_chan_contexts_atomic(g_mvm->hw, iwl_mvm_fm_chan_iterator, &chan_info);
+  /* finding all bandwidths of used channels for FM notification */
+  ieee80211_iter_chan_contexts_atomic(g_mvm->hw, iwl_mvm_fm_chan_iterator, &chan_info);
 
-    iwl_mvm_fm_remove_channels(&winfo);
+  iwl_mvm_fm_remove_channels(&winfo);
 
-    for (i = 0; i < winfo.num_channels; i++)
-        IWL_DEBUG_EXTERNAL(g_mvm, "FM: notifying fm about: channel=%d bandwith=%d\n",
-                           winfo.channel_info[i].frequency, winfo.channel_info[i].bandwidth);
+  for (i = 0; i < winfo.num_channels; i++)
+    IWL_DEBUG_EXTERNAL(g_mvm, "FM: notifying fm about: channel=%d bandwith=%d\n",
+                       winfo.channel_info[i].frequency, winfo.channel_info[i].bandwidth);
 
-    /* Do not report to FM if no change happened */
-    if (!iwl_mvm_fm_channel_changed(&winfo)) { return; }
+  /* Do not report to FM if no change happened */
+  if (!iwl_mvm_fm_channel_changed(&winfo)) {
+    return;
+  }
 
-    /* mark the change that we are reporting */
-    winfo.bitmask = WLAN_UPDATE;
-    /* Update the last notification to the FM */
-    memcpy(&last_chan_notif, &winfo, sizeof(struct iui_fm_wlan_info));
+  /* mark the change that we are reporting */
+  winfo.bitmask = WLAN_UPDATE;
+  /* Update the last notification to the FM */
+  memcpy(&last_chan_notif, &winfo, sizeof(struct iui_fm_wlan_info));
 
-    schedule_work(&fm_chan_notif_wk);
+  schedule_work(&fm_chan_notif_wk);
 }
 
 /*
@@ -662,42 +715,47 @@
  * Manager.
  */
 int iwl_mvm_fm_register(struct iwl_mvm* mvm) {
-    int ret;
+  int ret;
 
-    if (g_mvm) { return -EINVAL; }
+  if (g_mvm) {
+    return -EINVAL;
+  }
 
-    g_mvm = mvm;
-    INIT_WORK(&fm_chan_notif_wk, iwl_mvm_fm_notif_chan_change_wk);
+  g_mvm = mvm;
+  INIT_WORK(&fm_chan_notif_wk, iwl_mvm_fm_notif_chan_change_wk);
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    debug_mode = g_mvm->trans->dbg_cfg.fm_debug_mode;
+  debug_mode = g_mvm->trans->dbg_cfg.fm_debug_mode;
 #else
-    debug_mode = false;
+  debug_mode = false;
 #endif
 
-    ret =
-        iwl_mvm_fm_register_callback(debug_mode, IUI_FM_MACRO_ID_WLAN, iwl_mvm_fm_wlan_mitigation);
+  ret = iwl_mvm_fm_register_callback(debug_mode, IUI_FM_MACRO_ID_WLAN, iwl_mvm_fm_wlan_mitigation);
 
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM: registering fm callback function (fail = %d)\n", ret);
-    if (ret) { g_mvm = NULL; }
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM: registering fm callback function (fail = %d)\n", ret);
+  if (ret) {
+    g_mvm = NULL;
+  }
 
-    return ret ? -EINVAL : ret;
+  return ret ? -EINVAL : ret;
 }
 
 /*
  * Unregister the Frequency Mitigation Callback function implementation
  */
 int iwl_mvm_fm_unregister(struct iwl_mvm* mvm) {
-    int ret;
+  int ret;
 
-    if (g_mvm != mvm) { return 0; }
+  if (g_mvm != mvm) {
+    return 0;
+  }
 
-    ret = iwl_mvm_fm_register_callback(debug_mode, IUI_FM_MACRO_ID_WLAN, NULL);
+  ret = iwl_mvm_fm_register_callback(debug_mode, IUI_FM_MACRO_ID_WLAN, NULL);
 
-    IWL_DEBUG_EXTERNAL(g_mvm, "FM: unregistering fm callback function (fail = %d)\n", ret);
+  IWL_DEBUG_EXTERNAL(g_mvm, "FM: unregistering fm callback function (fail = %d)\n", ret);
 
-    cancel_work_sync(&fm_chan_notif_wk);
-    g_mvm = NULL;
+  cancel_work_sync(&fm_chan_notif_wk);
+  g_mvm = NULL;
 
-    return ret ? -EINVAL : ret;
+  return ret ? -EINVAL : ret;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fm-test.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fm-test.c
index 40a0b7a..2428e47 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fm-test.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fm-test.c
@@ -69,70 +69,77 @@
 
 static ssize_t iwl_mvm_fm_debug_mitigate_write(struct file* file, const char __user* user_buf,
                                                size_t count, loff_t* ppos) {
-    struct iui_fm_mitigation mitigation;
-    struct iui_fm_wlan_mitigation wm;
-    char buf[128];
-    size_t buf_size = sizeof(buf);
-    int mitigate_2g;
-    int ret;
-    int mitigate_dcdc;
+  struct iui_fm_mitigation mitigation;
+  struct iui_fm_wlan_mitigation wm;
+  char buf[128];
+  size_t buf_size = sizeof(buf);
+  int mitigate_2g;
+  int ret;
+  int mitigate_dcdc;
 
-    mitigation.info.wlan_mitigation = &wm;
-    mitigation.type = IUI_FM_MITIGATION_TYPE_WLAN;
+  mitigation.info.wlan_mitigation = &wm;
+  mitigation.type = IUI_FM_MITIGATION_TYPE_WLAN;
 
-    if (copy_from_user(buf, user_buf, buf_size)) { return -EFAULT; }
+  if (copy_from_user(buf, user_buf, buf_size)) {
+    return -EFAULT;
+  }
 
-    /* All platforms that are not xmm6321 & SOFIA 3G */
-    if (IUI_FM_WLAN_MAX_CHANNELS == 4) {
-        if (sscanf(buf, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d", &wm.num_channels,
-                   &wm.channel_tx_pwr[0].frequency, &wm.channel_tx_pwr[0].max_tx_pwr,
-                   &wm.channel_tx_pwr[1].frequency, &wm.channel_tx_pwr[1].max_tx_pwr,
-                   &wm.channel_tx_pwr[2].frequency, &wm.channel_tx_pwr[2].max_tx_pwr,
-                   &wm.channel_tx_pwr[3].frequency, &wm.channel_tx_pwr[3].max_tx_pwr,
-                   &mitigate_dcdc, &wm.dcdc_div0, &wm.dcdc_div1, &mitigate_2g,
-                   &wm.wlan_2g_coex_enable) != 14) {
-            return -EINVAL;
-        }
-    } else if (sscanf(buf, "%d,%d,%d,%d,%d", &wm.num_channels, &wm.channel_tx_pwr[0].frequency,
-                      &wm.channel_tx_pwr[0].max_tx_pwr, &wm.channel_tx_pwr[1].frequency,
-                      &wm.channel_tx_pwr[1].max_tx_pwr) != 5) {
-        return -EINVAL;
+  /* All platforms that are not xmm6321 & SOFIA 3G */
+  if (IUI_FM_WLAN_MAX_CHANNELS == 4) {
+    if (sscanf(buf, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d", &wm.num_channels,
+               &wm.channel_tx_pwr[0].frequency, &wm.channel_tx_pwr[0].max_tx_pwr,
+               &wm.channel_tx_pwr[1].frequency, &wm.channel_tx_pwr[1].max_tx_pwr,
+               &wm.channel_tx_pwr[2].frequency, &wm.channel_tx_pwr[2].max_tx_pwr,
+               &wm.channel_tx_pwr[3].frequency, &wm.channel_tx_pwr[3].max_tx_pwr, &mitigate_dcdc,
+               &wm.dcdc_div0, &wm.dcdc_div1, &mitigate_2g, &wm.wlan_2g_coex_enable) != 14) {
+      return -EINVAL;
     }
+  } else if (sscanf(buf, "%d,%d,%d,%d,%d", &wm.num_channels, &wm.channel_tx_pwr[0].frequency,
+                    &wm.channel_tx_pwr[0].max_tx_pwr, &wm.channel_tx_pwr[1].frequency,
+                    &wm.channel_tx_pwr[1].max_tx_pwr) != 5) {
+    return -EINVAL;
+  }
 
-    if (IUI_FM_WLAN_MAX_CHANNELS < wm.num_channels) { return -EINVAL; }
+  if (IUI_FM_WLAN_MAX_CHANNELS < wm.num_channels) {
+    return -EINVAL;
+  }
 
-    wm.wlan_adc_dac_freq = 0;
-    wm.rx_gain_behavior = IUI_FM_WLAN_RX_GAIN_NORMAL;
+  wm.wlan_adc_dac_freq = 0;
+  wm.rx_gain_behavior = IUI_FM_WLAN_RX_GAIN_NORMAL;
 
-    wm.bitmask = 0;
+  wm.bitmask = 0;
 
-    /* Set bit bitmask to indicate the required mitigations */
-    if (wm.num_channels || mitigate_2g) { wm.bitmask |= WLAN_MITI; }
-    if (mitigate_dcdc) { wm.bitmask |= DCDC_MITI; }
+  /* Set bit bitmask to indicate the required mitigations */
+  if (wm.num_channels || mitigate_2g) {
+    wm.bitmask |= WLAN_MITI;
+  }
+  if (mitigate_dcdc) {
+    wm.bitmask |= DCDC_MITI;
+  }
 
-    ret = fm_callback(IUI_FM_MACRO_ID_WLAN, &mitigation, 0);
-    pr_info("FM[test-mode]: mitigation callback %s (bitmask = 0x%x)\n",
-            ret ? "failed" : "succeeded", wm.bitmask);
+  ret = fm_callback(IUI_FM_MACRO_ID_WLAN, &mitigation, 0);
+  pr_info("FM[test-mode]: mitigation callback %s (bitmask = 0x%x)\n", ret ? "failed" : "succeeded",
+          wm.bitmask);
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_mvm_fm_debug_notify_read(struct file* file, char __user* userbuf, size_t count,
                                             loff_t* ppos) {
-    char buf[512];
-    int bufsz = sizeof(buf);
-    int pos = 0;
-    uint8_t i;
+  char buf[512];
+  int bufsz = sizeof(buf);
+  int pos = 0;
+  uint8_t i;
 
-    pos += scnprintf(buf + pos, bufsz - pos, "num_channels=%d\n", fm_notif.num_channels);
-    for (i = 0; i < fm_notif.num_channels; i++)
-        pos += scnprintf(buf + pos, bufsz - pos, "channel=%d, bandwidth=%d\n",
-                         fm_notif.channel_info[i].frequency, fm_notif.channel_info[i].bandwidth);
+  pos += scnprintf(buf + pos, bufsz - pos, "num_channels=%d\n", fm_notif.num_channels);
+  for (i = 0; i < fm_notif.num_channels; i++)
+    pos += scnprintf(buf + pos, bufsz - pos, "channel=%d, bandwidth=%d\n",
+                     fm_notif.channel_info[i].frequency, fm_notif.channel_info[i].bandwidth);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "dcdc_div0=%d\n", fm_notif.dcdc_div0);
-    pos += scnprintf(buf + pos, bufsz - pos, "dcdc_div1=%d\n", fm_notif.dcdc_div1);
+  pos += scnprintf(buf + pos, bufsz - pos, "dcdc_div0=%d\n", fm_notif.dcdc_div0);
+  pos += scnprintf(buf + pos, bufsz - pos, "dcdc_div1=%d\n", fm_notif.dcdc_div1);
 
-    return simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+  return simple_read_from_buffer(userbuf, count, ppos, buf, pos);
 }
 
 static const struct file_operations fm_debug_mitigate_ops = {
@@ -148,57 +155,62 @@
 };
 
 static int iwl_mvm_fm_create_debugfs(void) {
-    struct dentry* entry;
+  struct dentry* entry;
 
-    fm_debug_dir = debugfs_create_dir("frq_mgr", NULL);
+  fm_debug_dir = debugfs_create_dir("frq_mgr", NULL);
 
-    if (!fm_debug_dir) { goto err; }
+  if (!fm_debug_dir) {
+    goto err;
+  }
 
-    entry = debugfs_create_file("mitigate", S_IWUSR, fm_debug_dir, NULL, &fm_debug_mitigate_ops);
-    if (!entry) { goto err; }
+  entry = debugfs_create_file("mitigate", S_IWUSR, fm_debug_dir, NULL, &fm_debug_mitigate_ops);
+  if (!entry) {
+    goto err;
+  }
 
-    entry = debugfs_create_file("notify", S_IRUSR, fm_debug_dir, NULL, &fm_debug_notify_ops);
-    if (!entry) { goto err; }
+  entry = debugfs_create_file("notify", S_IRUSR, fm_debug_dir, NULL, &fm_debug_notify_ops);
+  if (!entry) {
+    goto err;
+  }
 
-    return 0;
+  return 0;
 err:
-    pr_info("FM: Could not create debugfs entries\n");
-    debugfs_remove_recursive(fm_debug_dir);
-    return -1;
+  pr_info("FM: Could not create debugfs entries\n");
+  debugfs_remove_recursive(fm_debug_dir);
+  return -1;
 }
 
 int32_t iwl_mvm_fm_test_register_callback(const enum iui_fm_macro_id macro_id,
                                           const iui_fm_mitigation_cb mitigation_cb) {
-    int ret = 0;
+  int ret = 0;
 
-    fm_callback = mitigation_cb;
+  fm_callback = mitigation_cb;
 
-    /* Unregister fm callback */
-    if (!mitigation_cb) {
-        debugfs_remove_recursive(fm_debug_dir);
-        goto end;
-    }
+  /* Unregister fm callback */
+  if (!mitigation_cb) {
+    debugfs_remove_recursive(fm_debug_dir);
+    goto end;
+  }
 
-    /* Register fm callback */
-    if (iwl_mvm_fm_create_debugfs()) {
-        ret = -1;
-        goto end;
-    }
+  /* Register fm callback */
+  if (iwl_mvm_fm_create_debugfs()) {
+    ret = -1;
+    goto end;
+  }
 
 end:
-    pr_info("FM[test-mode]: %sregistering fm callback function (fail = %d)\n", ret ? "un" : "",
-            ret);
-    return ret;
+  pr_info("FM[test-mode]: %sregistering fm callback function (fail = %d)\n", ret ? "un" : "", ret);
+  return ret;
 }
 
 int32_t iwl_mvm_fm_test_notify_frequency(
     const enum iui_fm_macro_id macro_id,
     const struct iui_fm_freq_notification* const notification) {
-    /* Platform does not have a FM or test mode was requested */
-    memcpy(&fm_notif, notification->info.wlan_info, sizeof(struct iui_fm_wlan_info));
+  /* Platform does not have a FM or test mode was requested */
+  memcpy(&fm_notif, notification->info.wlan_info, sizeof(struct iui_fm_wlan_info));
 
-    pr_info("FM[test-mode]: notifying fm about change (mask = 0x%x)\n",
-            notification->info.wlan_info->bitmask);
+  pr_info("FM[test-mode]: notifying fm about change (mask = 0x%x)\n",
+          notification->info.wlan_info->bitmask);
 
-    return 0;
+  return 0;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fw.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fw.c
index 0df2bf6..b2b2245 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fw.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fw.c
@@ -34,18 +34,17 @@
  *
  *****************************************************************************/
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/acpi.h"
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/img.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-debug.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
-#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-op-mode.h"
-#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-prph.h"
-#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h"
-
-#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-modparams.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-nvm-parse.h"
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-op-mode.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-phy-db.h"
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-prph.h"
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mvm.h"
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/testmode.h"
@@ -58,11 +57,11 @@
 #define UCODE_VALID_OK cpu_to_le32(0x1)
 
 struct iwl_mvm_alive_data {
-    bool valid;
-    uint32_t scd_base_addr;
+  bool valid;
+  uint32_t scd_base_addr;
 };
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 /* set device type and latency */
 static int iwl_set_soc_latency(struct iwl_mvm* mvm) {
     struct iwl_soc_configuration_cmd cmd;
@@ -494,8 +493,8 @@
 #endif  // NEEDS_PORTING
 
 zx_status_t iwl_run_init_mvm_ucode(struct iwl_mvm* mvm, bool read_nvm) {
-    return ZX_ERR_NOT_SUPPORTED;
-#if 0   // NEEDS_PORTING
+  return ZX_ERR_NOT_SUPPORTED;
+#if 0  // NEEDS_PORTING
     struct iwl_notification_wait calib_wait;
     static const uint16_t init_complete[] = {INIT_COMPLETE_NOTIF, CALIB_RES_NOTIF_PHY_DB};
     int ret;
@@ -598,7 +597,7 @@
 #endif  // NEEDS_PORTING
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static int iwl_mvm_config_ltr(struct iwl_mvm* mvm) {
     struct iwl_ltr_config_cmd cmd = {
         .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/led.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/led.c
index aa49693..3b71403 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/led.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/led.c
@@ -33,103 +33,114 @@
  *****************************************************************************/
 
 #include <linux/leds.h>
+
 #include "iwl-csr.h"
 #include "iwl-io.h"
 #include "mvm.h"
 
 static void iwl_mvm_send_led_fw_cmd(struct iwl_mvm* mvm, bool on) {
-    struct iwl_led_cmd led_cmd = {
-        .status = cpu_to_le32(on),
-    };
-    struct iwl_host_cmd cmd = {
-        .id = WIDE_ID(LONG_GROUP, LEDS_CMD),
-        .len =
-            {
-                sizeof(led_cmd),
-            },
-        .data =
-            {
-                &led_cmd,
-            },
-        .flags = CMD_ASYNC,
-    };
-    int err;
+  struct iwl_led_cmd led_cmd = {
+      .status = cpu_to_le32(on),
+  };
+  struct iwl_host_cmd cmd = {
+      .id = WIDE_ID(LONG_GROUP, LEDS_CMD),
+      .len =
+          {
+              sizeof(led_cmd),
+          },
+      .data =
+          {
+              &led_cmd,
+          },
+      .flags = CMD_ASYNC,
+  };
+  int err;
 
-    if (!iwl_mvm_firmware_running(mvm)) { return; }
+  if (!iwl_mvm_firmware_running(mvm)) {
+    return;
+  }
 
-    err = iwl_mvm_send_cmd(mvm, &cmd);
+  err = iwl_mvm_send_cmd(mvm, &cmd);
 
-    if (err) { IWL_WARN(mvm, "LED command failed: %d\n", err); }
+  if (err) {
+    IWL_WARN(mvm, "LED command failed: %d\n", err);
+  }
 }
 
 static void iwl_mvm_led_set(struct iwl_mvm* mvm, bool on) {
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT)) {
-        iwl_mvm_send_led_fw_cmd(mvm, on);
-        return;
-    }
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT)) {
+    iwl_mvm_send_led_fw_cmd(mvm, on);
+    return;
+  }
 
-    iwl_write32(mvm->trans, CSR_LED_REG, on ? CSR_LED_REG_TURN_ON : CSR_LED_REG_TURN_OFF);
+  iwl_write32(mvm->trans, CSR_LED_REG, on ? CSR_LED_REG_TURN_ON : CSR_LED_REG_TURN_OFF);
 }
 
 static void iwl_led_brightness_set(struct led_classdev* led_cdev, enum led_brightness brightness) {
-    struct iwl_mvm* mvm = container_of(led_cdev, struct iwl_mvm, led);
+  struct iwl_mvm* mvm = container_of(led_cdev, struct iwl_mvm, led);
 
-    iwl_mvm_led_set(mvm, brightness > 0);
+  iwl_mvm_led_set(mvm, brightness > 0);
 }
 
 int iwl_mvm_leds_init(struct iwl_mvm* mvm) {
-    int mode = iwlwifi_mod_params.led_mode;
-    int ret;
+  int mode = iwlwifi_mod_params.led_mode;
+  int ret;
 
-    switch (mode) {
+  switch (mode) {
     case IWL_LED_BLINK:
-        IWL_ERR(mvm, "Blink led mode not supported, used default\n");
+      IWL_ERR(mvm, "Blink led mode not supported, used default\n");
     case IWL_LED_DEFAULT:
     case IWL_LED_RF_STATE:
-        mode = IWL_LED_RF_STATE;
-        break;
+      mode = IWL_LED_RF_STATE;
+      break;
     case IWL_LED_DISABLE:
-        IWL_INFO(mvm, "Led disabled\n");
-        return 0;
+      IWL_INFO(mvm, "Led disabled\n");
+      return 0;
     default:
-        return -EINVAL;
-    }
+      return -EINVAL;
+  }
 
-    mvm->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(mvm->hw->wiphy));
-    mvm->led.brightness_set = iwl_led_brightness_set;
-    mvm->led.max_brightness = 1;
+  mvm->led.name = kasprintf(GFP_KERNEL, "%s-led", wiphy_name(mvm->hw->wiphy));
+  mvm->led.brightness_set = iwl_led_brightness_set;
+  mvm->led.max_brightness = 1;
 
-    if (mode == IWL_LED_RF_STATE) {
-        mvm->led.default_trigger = ieee80211_get_radio_led_name(mvm->hw);
-    }
+  if (mode == IWL_LED_RF_STATE) {
+    mvm->led.default_trigger = ieee80211_get_radio_led_name(mvm->hw);
+  }
 
-    ret = led_classdev_register(mvm->trans->dev, &mvm->led);
-    if (ret) {
-        kfree(mvm->led.name);
-        IWL_INFO(mvm, "Failed to enable led\n");
-        return ret;
-    }
+  ret = led_classdev_register(mvm->trans->dev, &mvm->led);
+  if (ret) {
+    kfree(mvm->led.name);
+    IWL_INFO(mvm, "Failed to enable led\n");
+    return ret;
+  }
 
-    mvm->init_status |= IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
-    return 0;
+  mvm->init_status |= IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
+  return 0;
 }
 
 void iwl_mvm_leds_sync(struct iwl_mvm* mvm) {
-    if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) { return; }
+  if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) {
+    return;
+  }
 
-    /*
-     * if we control through the register, we're doing it
-     * even when the firmware isn't up, so no need to sync
-     */
-    if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) { return; }
+  /*
+   * if we control through the register, we're doing it
+   * even when the firmware isn't up, so no need to sync
+   */
+  if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
+    return;
+  }
 
-    iwl_mvm_led_set(mvm, mvm->led.brightness > 0);
+  iwl_mvm_led_set(mvm, mvm->led.brightness > 0);
 }
 
 void iwl_mvm_leds_exit(struct iwl_mvm* mvm) {
-    if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) { return; }
+  if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) {
+    return;
+  }
 
-    led_classdev_unregister(&mvm->led);
-    kfree(mvm->led.name);
-    mvm->init_status &= ~IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
+  led_classdev_unregister(&mvm->led);
+  kfree(mvm->led.name);
+  mvm->init_status &= ~IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/lte-coex.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/lte-coex.h
index 5899289..37f3a24 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/lte-coex.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/lte-coex.h
@@ -53,7 +53,7 @@
 #define LTE_OTHER_ERR 4
 
 struct lte_coex_state_cmd {
-    __u8 lte_state;
+  __u8 lte_state;
 } __packed;
 
 #define LTE_MWS_CONF_LENGTH 12
@@ -61,8 +61,8 @@
 #define LTE_SAFE_PT_FIRST -128
 #define LTE_SAFE_PT_LAST 127
 struct lte_coex_config_info_cmd {
-    __u32 mws_conf_data[LTE_MWS_CONF_LENGTH];
-    __s8 safe_power_table[LTE_SAFE_PT_LENGTH];
+  __u32 mws_conf_data[LTE_MWS_CONF_LENGTH];
+  __s8 safe_power_table[LTE_SAFE_PT_LENGTH];
 } __packed;
 
 #define LTE_CONNECTED_BANDS_LENGTH 8
@@ -73,17 +73,17 @@
 #define LTE_MAX_TX_MIN 0
 #define LTE_MAX_TX_MAX 31
 struct lte_coex_dynamic_info_cmd {
-    __u32 lte_connected_bands[LTE_CONNECTED_BANDS_LENGTH];
-    __u32 lte_frame_structure[LTE_FRAME_STRUCT_LENGTH];
-    __u16 wifi_tx_safe_freq_min;
-    __u16 wifi_tx_safe_freq_max;
-    __u16 wifi_rx_safe_freq_min;
-    __u16 wifi_rx_safe_freq_max;
-    __u8 wifi_max_tx_power[LTE_TX_POWER_LENGTH];
+  __u32 lte_connected_bands[LTE_CONNECTED_BANDS_LENGTH];
+  __u32 lte_frame_structure[LTE_FRAME_STRUCT_LENGTH];
+  __u16 wifi_tx_safe_freq_min;
+  __u16 wifi_tx_safe_freq_max;
+  __u16 wifi_rx_safe_freq_min;
+  __u16 wifi_rx_safe_freq_max;
+  __u8 wifi_max_tx_power[LTE_TX_POWER_LENGTH];
 } __packed;
 
 struct lte_coex_sps_info_cmd {
-    __u32 sps_info;
+  __u32 sps_info;
 } __packed;
 
 #define LTE_RC_CHAN_MIN 1
@@ -91,8 +91,8 @@
 #define LTE_RC_BW_MIN 0
 #define LTE_RC_BW_MAX 3
 struct lte_coex_wifi_reported_chan_cmd {
-    __u8 chan;
-    __u8 bandwidth;
+  __u8 chan;
+  __u8 bandwidth;
 } __packed;
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM_LTE_COEX_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mac-ctxt.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mac-ctxt.c
index 90b825f..2188b07 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mac-ctxt.c
@@ -36,6 +36,7 @@
 
 #include <linux/etherdevice.h>
 #include <net/mac80211.h>
+
 #include "fw-api.h"
 #include "iwl-io.h"
 #include "iwl-prph.h"
@@ -56,895 +57,934 @@
 };
 
 struct iwl_mvm_mac_iface_iterator_data {
-    struct iwl_mvm* mvm;
-    struct ieee80211_vif* vif;
-    unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
-    unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
-    enum iwl_tsf_id preferred_tsf;
-    bool found_vif;
+  struct iwl_mvm* mvm;
+  struct ieee80211_vif* vif;
+  unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
+  unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
+  enum iwl_tsf_id preferred_tsf;
+  bool found_vif;
 };
 
 static void iwl_mvm_mac_tsf_id_iter(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_mac_iface_iterator_data* data = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    uint16_t min_bi;
+  struct iwl_mvm_mac_iface_iterator_data* data = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  uint16_t min_bi;
 
-    /* Skip the interface for which we are trying to assign a tsf_id  */
-    if (vif == data->vif) { return; }
+  /* Skip the interface for which we are trying to assign a tsf_id  */
+  if (vif == data->vif) {
+    return;
+  }
 
-    /*
-     * The TSF is a hardware/firmware resource, there are 4 and
-     * the driver should assign and free them as needed. However,
-     * there are cases where 2 MACs should share the same TSF ID
-     * for the purpose of clock sync, an optimization to avoid
-     * clock drift causing overlapping TBTTs/DTIMs for a GO and
-     * client in the system.
-     *
-     * The firmware will decide according to the MAC type which
-     * will be the master and slave. Clients that need to sync
-     * with a remote station will be the master, and an AP or GO
-     * will be the slave.
-     *
-     * Depending on the new interface type it can be slaved to
-     * or become the master of an existing interface.
-     */
-    switch (data->vif->type) {
+  /*
+   * The TSF is a hardware/firmware resource, there are 4 and
+   * the driver should assign and free them as needed. However,
+   * there are cases where 2 MACs should share the same TSF ID
+   * for the purpose of clock sync, an optimization to avoid
+   * clock drift causing overlapping TBTTs/DTIMs for a GO and
+   * client in the system.
+   *
+   * The firmware will decide according to the MAC type which
+   * will be the master and slave. Clients that need to sync
+   * with a remote station will be the master, and an AP or GO
+   * will be the slave.
+   *
+   * Depending on the new interface type it can be slaved to
+   * or become the master of an existing interface.
+   */
+  switch (data->vif->type) {
     case NL80211_IFTYPE_STATION:
-        /*
-         * The new interface is a client, so if the one we're iterating
-         * is an AP, and the beacon interval of the AP is a multiple or
-         * divisor of the beacon interval of the client, the same TSF
-         * should be used to avoid drift between the new client and
-         * existing AP. The existing AP will get drift updates from the
-         * new client context in this case.
-         */
-        if (vif->type != NL80211_IFTYPE_AP || data->preferred_tsf != NUM_TSF_IDS ||
-            !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) {
-            break;
-        }
-
-        min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int);
-
-        if (!min_bi) { break; }
-
-        if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) {
-            data->preferred_tsf = mvmvif->tsf_id;
-            return;
-        }
+      /*
+       * The new interface is a client, so if the one we're iterating
+       * is an AP, and the beacon interval of the AP is a multiple or
+       * divisor of the beacon interval of the client, the same TSF
+       * should be used to avoid drift between the new client and
+       * existing AP. The existing AP will get drift updates from the
+       * new client context in this case.
+       */
+      if (vif->type != NL80211_IFTYPE_AP || data->preferred_tsf != NUM_TSF_IDS ||
+          !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) {
         break;
+      }
+
+      min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int);
+
+      if (!min_bi) {
+        break;
+      }
+
+      if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) {
+        data->preferred_tsf = mvmvif->tsf_id;
+        return;
+      }
+      break;
 
     case NL80211_IFTYPE_AP:
-        /*
-         * The new interface is AP/GO, so if its beacon interval is a
-         * multiple or a divisor of the beacon interval of an existing
-         * interface, it should get drift updates from an existing
-         * client or use the same TSF as an existing GO. There's no
-         * drift between TSFs internally but if they used different
-         * TSFs then a new client MAC could update one of them and
-         * cause drift that way.
-         */
-        if ((vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_STATION) ||
-            data->preferred_tsf != NUM_TSF_IDS ||
-            !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) {
-            break;
-        }
-
-        min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int);
-
-        if (!min_bi) { break; }
-
-        if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) {
-            data->preferred_tsf = mvmvif->tsf_id;
-            return;
-        }
+      /*
+       * The new interface is AP/GO, so if its beacon interval is a
+       * multiple or a divisor of the beacon interval of an existing
+       * interface, it should get drift updates from an existing
+       * client or use the same TSF as an existing GO. There's no
+       * drift between TSFs internally but if they used different
+       * TSFs then a new client MAC could update one of them and
+       * cause drift that way.
+       */
+      if ((vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_STATION) ||
+          data->preferred_tsf != NUM_TSF_IDS ||
+          !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) {
         break;
+      }
+
+      min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int);
+
+      if (!min_bi) {
+        break;
+      }
+
+      if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) {
+        data->preferred_tsf = mvmvif->tsf_id;
+        return;
+      }
+      break;
     default:
-        /*
-         * For all other interface types there's no need to
-         * take drift into account. Either they're exclusive
-         * like IBSS and monitor, or we don't care much about
-         * their TSF (like P2P Device), but we won't be able
-         * to share the TSF resource.
-         */
-        break;
-    }
+      /*
+       * For all other interface types there's no need to
+       * take drift into account. Either they're exclusive
+       * like IBSS and monitor, or we don't care much about
+       * their TSF (like P2P Device), but we won't be able
+       * to share the TSF resource.
+       */
+      break;
+  }
 
-    /*
-     * Unless we exited above, we can't share the TSF resource
-     * that the virtual interface we're iterating over is using
-     * with the new one, so clear the available bit and if this
-     * was the preferred one, reset that as well.
-     */
-    __clear_bit(mvmvif->tsf_id, data->available_tsf_ids);
+  /*
+   * Unless we exited above, we can't share the TSF resource
+   * that the virtual interface we're iterating over is using
+   * with the new one, so clear the available bit and if this
+   * was the preferred one, reset that as well.
+   */
+  __clear_bit(mvmvif->tsf_id, data->available_tsf_ids);
 
-    if (data->preferred_tsf == mvmvif->tsf_id) { data->preferred_tsf = NUM_TSF_IDS; }
+  if (data->preferred_tsf == mvmvif->tsf_id) {
+    data->preferred_tsf = NUM_TSF_IDS;
+  }
 }
 
 static void iwl_mvm_mac_iface_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_mac_iface_iterator_data* data = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_mac_iface_iterator_data* data = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    /* Iterator may already find the interface being added -- skip it */
-    if (vif == data->vif) {
-        data->found_vif = true;
-        return;
-    }
+  /* Iterator may already find the interface being added -- skip it */
+  if (vif == data->vif) {
+    data->found_vif = true;
+    return;
+  }
 
-    /* Mark MAC IDs as used by clearing the available bit, and
-     * (below) mark TSFs as used if their existing use is not
-     * compatible with the new interface type.
-     * No locking or atomic bit operations are needed since the
-     * data is on the stack of the caller function.
-     */
-    __clear_bit(mvmvif->id, data->available_mac_ids);
+  /* Mark MAC IDs as used by clearing the available bit, and
+   * (below) mark TSFs as used if their existing use is not
+   * compatible with the new interface type.
+   * No locking or atomic bit operations are needed since the
+   * data is on the stack of the caller function.
+   */
+  __clear_bit(mvmvif->id, data->available_mac_ids);
 
-    /* find a suitable tsf_id */
-    iwl_mvm_mac_tsf_id_iter(_data, mac, vif);
+  /* find a suitable tsf_id */
+  iwl_mvm_mac_tsf_id_iter(_data, mac, vif);
 }
 
 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_mac_iface_iterator_data data = {
-        .mvm = mvm,
-        .vif = vif,
-        .available_tsf_ids = {(1 << NUM_TSF_IDS) - 1},
-        /* no preference yet */
-        .preferred_tsf = NUM_TSF_IDS,
-    };
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_mac_iface_iterator_data data = {
+      .mvm = mvm,
+      .vif = vif,
+      .available_tsf_ids = {(1 << NUM_TSF_IDS) - 1},
+      /* no preference yet */
+      .preferred_tsf = NUM_TSF_IDS,
+  };
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-                                               iwl_mvm_mac_tsf_id_iter, &data);
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+                                             iwl_mvm_mac_tsf_id_iter, &data);
 
-    if (data.preferred_tsf != NUM_TSF_IDS) {
-        mvmvif->tsf_id = data.preferred_tsf;
-    } else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids)) {
-        mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS);
-    }
+  if (data.preferred_tsf != NUM_TSF_IDS) {
+    mvmvif->tsf_id = data.preferred_tsf;
+  } else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids)) {
+    mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS);
+  }
 }
 
 int iwl_mvm_mac_ctxt_init(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_mac_iface_iterator_data data = {
-        .mvm = mvm,
-        .vif = vif,
-        .available_mac_ids = {(1 << NUM_MAC_INDEX_DRIVER) - 1},
-        .available_tsf_ids = {(1 << NUM_TSF_IDS) - 1},
-        /* no preference yet */
-        .preferred_tsf = NUM_TSF_IDS,
-        .found_vif = false,
-    };
-    uint32_t ac;
-    int ret, i, queue_limit;
-    unsigned long used_hw_queues;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_mac_iface_iterator_data data = {
+      .mvm = mvm,
+      .vif = vif,
+      .available_mac_ids = {(1 << NUM_MAC_INDEX_DRIVER) - 1},
+      .available_tsf_ids = {(1 << NUM_TSF_IDS) - 1},
+      /* no preference yet */
+      .preferred_tsf = NUM_TSF_IDS,
+      .found_vif = false,
+  };
+  uint32_t ac;
+  int ret, i, queue_limit;
+  unsigned long used_hw_queues;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /*
-     * Allocate a MAC ID and a TSF for this MAC, along with the queues
-     * and other resources.
-     */
+  /*
+   * Allocate a MAC ID and a TSF for this MAC, along with the queues
+   * and other resources.
+   */
 
-    /*
-     * Before the iterator, we start with all MAC IDs and TSFs available.
-     *
-     * During iteration, all MAC IDs are cleared that are in use by other
-     * virtual interfaces, and all TSF IDs are cleared that can't be used
-     * by this new virtual interface because they're used by an interface
-     * that can't share it with the new one.
-     * At the same time, we check if there's a preferred TSF in the case
-     * that we should share it with another interface.
-     */
+  /*
+   * Before the iterator, we start with all MAC IDs and TSFs available.
+   *
+   * During iteration, all MAC IDs are cleared that are in use by other
+   * virtual interfaces, and all TSF IDs are cleared that can't be used
+   * by this new virtual interface because they're used by an interface
+   * that can't share it with the new one.
+   * At the same time, we check if there's a preferred TSF in the case
+   * that we should share it with another interface.
+   */
 
-    /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
-    switch (vif->type) {
+  /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
+  switch (vif->type) {
     case NL80211_IFTYPE_ADHOC:
-        break;
+      break;
     case NL80211_IFTYPE_STATION:
-        if (!vif->p2p) { break; }
+      if (!vif->p2p) {
+        break;
+      }
     /* fall through */
     default:
-        __clear_bit(0, data.available_mac_ids);
-    }
+      __clear_bit(0, data.available_mac_ids);
+  }
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-                                               iwl_mvm_mac_iface_iterator, &data);
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+                                             iwl_mvm_mac_iface_iterator, &data);
 
-    /*
-     * In the case we're getting here during resume, it's similar to
-     * firmware restart, and with RESUME_ALL the iterator will find
-     * the vif being added already.
-     * We don't want to reassign any IDs in either case since doing
-     * so would probably assign different IDs (as interfaces aren't
-     * necessarily added in the same order), but the old IDs were
-     * preserved anyway, so skip ID assignment for both resume and
-     * recovery.
-     */
-    if (data.found_vif) { return 0; }
+  /*
+   * In the case we're getting here during resume, it's similar to
+   * firmware restart, and with RESUME_ALL the iterator will find
+   * the vif being added already.
+   * We don't want to reassign any IDs in either case since doing
+   * so would probably assign different IDs (as interfaces aren't
+   * necessarily added in the same order), but the old IDs were
+   * preserved anyway, so skip ID assignment for both resume and
+   * recovery.
+   */
+  if (data.found_vif) {
+    return 0;
+  }
 
-    /* Therefore, in recovery, we can't get here */
-    if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) { return -EBUSY; }
+  /* Therefore, in recovery, we can't get here */
+  if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) {
+    return -EBUSY;
+  }
 
-    mvmvif->id = find_first_bit(data.available_mac_ids, NUM_MAC_INDEX_DRIVER);
-    if (mvmvif->id == NUM_MAC_INDEX_DRIVER) {
-        IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n");
-        ret = -EIO;
-        goto exit_fail;
-    }
+  mvmvif->id = find_first_bit(data.available_mac_ids, NUM_MAC_INDEX_DRIVER);
+  if (mvmvif->id == NUM_MAC_INDEX_DRIVER) {
+    IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n");
+    ret = -EIO;
+    goto exit_fail;
+  }
 
-    if (data.preferred_tsf != NUM_TSF_IDS) {
-        mvmvif->tsf_id = data.preferred_tsf;
-    } else {
-        mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS);
-    }
-    if (mvmvif->tsf_id == NUM_TSF_IDS) {
-        IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n");
-        ret = -EIO;
-        goto exit_fail;
-    }
+  if (data.preferred_tsf != NUM_TSF_IDS) {
+    mvmvif->tsf_id = data.preferred_tsf;
+  } else {
+    mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS);
+  }
+  if (mvmvif->tsf_id == NUM_TSF_IDS) {
+    IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n");
+    ret = -EIO;
+    goto exit_fail;
+  }
 
-    mvmvif->color = 0;
+  mvmvif->color = 0;
 
-    INIT_LIST_HEAD(&mvmvif->time_event_data.list);
-    mvmvif->time_event_data.id = TE_MAX;
+  INIT_LIST_HEAD(&mvmvif->time_event_data.list);
+  mvmvif->time_event_data.id = TE_MAX;
 
-    /* No need to allocate data queues to P2P Device MAC and NAN.*/
-    if (vif->type == NL80211_IFTYPE_P2P_DEVICE || vif->type == NL80211_IFTYPE_NAN) {
-        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-            vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
-        }
-
-        return 0;
-    }
-
-    /*
-     * queues in mac80211 almost entirely independent of
-     * the ones here - no real limit
-     */
-    queue_limit = IEEE80211_MAX_QUEUES;
-
-    /*
-     * Find available queues, and allocate them to the ACs. When in
-     * DQA-mode they aren't really used, and this is done only so the
-     * mac80211 ieee80211_check_queues() function won't fail
-     */
+  /* No need to allocate data queues to P2P Device MAC and NAN.*/
+  if (vif->type == NL80211_IFTYPE_P2P_DEVICE || vif->type == NL80211_IFTYPE_NAN) {
     for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-        uint8_t queue = find_first_zero_bit(&used_hw_queues, queue_limit);
-
-        if (queue >= queue_limit) {
-            IWL_ERR(mvm, "Failed to allocate queue\n");
-            ret = -EIO;
-            goto exit_fail;
-        }
-
-        __set_bit(queue, &used_hw_queues);
-        vif->hw_queue[ac] = queue;
-    }
-
-    /* Allocate the CAB queue for softAP and GO interfaces */
-    if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) {
-        /*
-         * For TVQM this will be overwritten later with the FW assigned
-         * queue value (when queue is enabled).
-         */
-        mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
-    }
-
-    mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
-    mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
-    mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
-
-    for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
-        mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
+      vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
     }
 
     return 0;
+  }
+
+  /*
+   * queues in mac80211 almost entirely independent of
+   * the ones here - no real limit
+   */
+  queue_limit = IEEE80211_MAX_QUEUES;
+
+  /*
+   * Find available queues, and allocate them to the ACs. When in
+   * DQA-mode they aren't really used, and this is done only so the
+   * mac80211 ieee80211_check_queues() function won't fail
+   */
+  for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+    uint8_t queue = find_first_zero_bit(&used_hw_queues, queue_limit);
+
+    if (queue >= queue_limit) {
+      IWL_ERR(mvm, "Failed to allocate queue\n");
+      ret = -EIO;
+      goto exit_fail;
+    }
+
+    __set_bit(queue, &used_hw_queues);
+    vif->hw_queue[ac] = queue;
+  }
+
+  /* Allocate the CAB queue for softAP and GO interfaces */
+  if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) {
+    /*
+     * For TVQM this will be overwritten later with the FW assigned
+     * queue value (when queue is enabled).
+     */
+    mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+  }
+
+  mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
+  mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
+  mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+
+  for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+    mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
+  }
+
+  return 0;
 
 exit_fail:
-    memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
-    return ret;
+  memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
+  return ret;
 }
 
 static void iwl_mvm_ack_rates(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                               enum nl80211_band band, uint8_t* cck_rates, uint8_t* ofdm_rates) {
-    struct ieee80211_supported_band* sband;
-    unsigned long basic = vif->bss_conf.basic_rates;
-    int lowest_present_ofdm = 100;
-    int lowest_present_cck = 100;
-    uint8_t cck = 0;
-    uint8_t ofdm = 0;
-    int i;
+  struct ieee80211_supported_band* sband;
+  unsigned long basic = vif->bss_conf.basic_rates;
+  int lowest_present_ofdm = 100;
+  int lowest_present_cck = 100;
+  uint8_t cck = 0;
+  uint8_t ofdm = 0;
+  int i;
 
-    sband = mvm->hw->wiphy->bands[band];
+  sband = mvm->hw->wiphy->bands[band];
 
-    for_each_set_bit(i, &basic, BITS_PER_LONG) {
-        int hw = sband->bitrates[i].hw_value;
-        if (hw >= IWL_FIRST_OFDM_RATE) {
-            ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
-            if (lowest_present_ofdm > hw) { lowest_present_ofdm = hw; }
-        } else {
-            BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+  for_each_set_bit(i, &basic, BITS_PER_LONG) {
+    int hw = sband->bitrates[i].hw_value;
+    if (hw >= IWL_FIRST_OFDM_RATE) {
+      ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+      if (lowest_present_ofdm > hw) {
+        lowest_present_ofdm = hw;
+      }
+    } else {
+      BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
 
-            cck |= BIT(hw);
-            if (lowest_present_cck > hw) { lowest_present_cck = hw; }
-        }
+      cck |= BIT(hw);
+      if (lowest_present_cck > hw) {
+        lowest_present_cck = hw;
+      }
     }
+  }
 
-    /*
-     * Now we've got the basic rates as bitmaps in the ofdm and cck
-     * variables. This isn't sufficient though, as there might not
-     * be all the right rates in the bitmap. E.g. if the only basic
-     * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
-     * and 6 Mbps because the 802.11-2007 standard says in 9.6:
-     *
-     *    [...] a STA responding to a received frame shall transmit
-     *    its Control Response frame [...] at the highest rate in the
-     *    BSSBasicRateSet parameter that is less than or equal to the
-     *    rate of the immediately previous frame in the frame exchange
-     *    sequence ([...]) and that is of the same modulation class
-     *    ([...]) as the received frame. If no rate contained in the
-     *    BSSBasicRateSet parameter meets these conditions, then the
-     *    control frame sent in response to a received frame shall be
-     *    transmitted at the highest mandatory rate of the PHY that is
-     *    less than or equal to the rate of the received frame, and
-     *    that is of the same modulation class as the received frame.
-     *
-     * As a consequence, we need to add all mandatory rates that are
-     * lower than all of the basic rates to these bitmaps.
-     */
+  /*
+   * Now we've got the basic rates as bitmaps in the ofdm and cck
+   * variables. This isn't sufficient though, as there might not
+   * be all the right rates in the bitmap. E.g. if the only basic
+   * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+   * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+   *
+   *    [...] a STA responding to a received frame shall transmit
+   *    its Control Response frame [...] at the highest rate in the
+   *    BSSBasicRateSet parameter that is less than or equal to the
+   *    rate of the immediately previous frame in the frame exchange
+   *    sequence ([...]) and that is of the same modulation class
+   *    ([...]) as the received frame. If no rate contained in the
+   *    BSSBasicRateSet parameter meets these conditions, then the
+   *    control frame sent in response to a received frame shall be
+   *    transmitted at the highest mandatory rate of the PHY that is
+   *    less than or equal to the rate of the received frame, and
+   *    that is of the same modulation class as the received frame.
+   *
+   * As a consequence, we need to add all mandatory rates that are
+   * lower than all of the basic rates to these bitmaps.
+   */
 
-    if (IWL_RATE_24M_INDEX < lowest_present_ofdm) {
-        ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
-    }
-    if (IWL_RATE_12M_INDEX < lowest_present_ofdm) {
-        ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
-    }
-    /* 6M already there or needed so always add */
-    ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
+  if (IWL_RATE_24M_INDEX < lowest_present_ofdm) {
+    ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
+  }
+  if (IWL_RATE_12M_INDEX < lowest_present_ofdm) {
+    ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
+  }
+  /* 6M already there or needed so always add */
+  ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
 
-    /*
-     * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
-     * Note, however:
-     *  - if no CCK rates are basic, it must be ERP since there must
-     *    be some basic rates at all, so they're OFDM => ERP PHY
-     *    (or we're in 5 GHz, and the cck bitmap will never be used)
-     *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
-     *  - if 5.5M is basic, 1M and 2M are mandatory
-     *  - if 2M is basic, 1M is mandatory
-     *  - if 1M is basic, that's the only valid ACK rate.
-     * As a consequence, it's not as complicated as it sounds, just add
-     * any lower rates to the ACK rate bitmap.
-     */
-    if (IWL_RATE_11M_INDEX < lowest_present_cck) {
-        cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
-    }
-    if (IWL_RATE_5M_INDEX < lowest_present_cck) {
-        cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
-    }
-    if (IWL_RATE_2M_INDEX < lowest_present_cck) {
-        cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
-    }
-    /* 1M already there or needed so always add */
-    cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
+  /*
+   * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+   * Note, however:
+   *  - if no CCK rates are basic, it must be ERP since there must
+   *    be some basic rates at all, so they're OFDM => ERP PHY
+   *    (or we're in 5 GHz, and the cck bitmap will never be used)
+   *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+   *  - if 5.5M is basic, 1M and 2M are mandatory
+   *  - if 2M is basic, 1M is mandatory
+   *  - if 1M is basic, that's the only valid ACK rate.
+   * As a consequence, it's not as complicated as it sounds, just add
+   * any lower rates to the ACK rate bitmap.
+   */
+  if (IWL_RATE_11M_INDEX < lowest_present_cck) {
+    cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
+  }
+  if (IWL_RATE_5M_INDEX < lowest_present_cck) {
+    cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
+  }
+  if (IWL_RATE_2M_INDEX < lowest_present_cck) {
+    cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
+  }
+  /* 1M already there or needed so always add */
+  cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
 
-    *cck_rates = cck;
-    *ofdm_rates = ofdm;
+  *cck_rates = cck;
+  *ofdm_rates = ofdm;
 }
 
 static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                           struct iwl_mac_ctx_cmd* cmd) {
-    /* for both sta and ap, ht_operation_mode hold the protection_mode */
-    uint8_t protection_mode = vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
-    /* The fw does not distinguish between ht and fat */
-    uint32_t ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
+  /* for both sta and ap, ht_operation_mode hold the protection_mode */
+  uint8_t protection_mode = vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+  /* The fw does not distinguish between ht and fat */
+  uint32_t ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
 
-    IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
-    /*
-     * See section 9.23.3.1 of IEEE 80211-2012.
-     * Nongreenfield HT STAs Present is not supported.
-     */
-    switch (protection_mode) {
+  IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
+  /*
+   * See section 9.23.3.1 of IEEE 80211-2012.
+   * Nongreenfield HT STAs Present is not supported.
+   */
+  switch (protection_mode) {
     case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
-        break;
+      break;
     case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
     case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
-        cmd->protection_flags |= cpu_to_le32(ht_flag);
-        break;
+      cmd->protection_flags |= cpu_to_le32(ht_flag);
+      break;
     case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
-        /* Protect when channel wider than 20MHz */
-        if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) {
-            cmd->protection_flags |= cpu_to_le32(ht_flag);
-        }
-        break;
+      /* Protect when channel wider than 20MHz */
+      if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) {
+        cmd->protection_flags |= cpu_to_le32(ht_flag);
+      }
+      break;
     default:
-        IWL_ERR(mvm, "Illegal protection mode %d\n", protection_mode);
-        break;
-    }
+      IWL_ERR(mvm, "Illegal protection mode %d\n", protection_mode);
+      break;
+  }
 }
 
 static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                         struct iwl_mac_ctx_cmd* cmd, const uint8_t* bssid_override,
                                         uint32_t action) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct ieee80211_chanctx_conf* chanctx;
-    bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
-    uint8_t cck_ack_rates, ofdm_ack_rates;
-    const uint8_t* bssid = bssid_override ?: vif->bss_conf.bssid;
-    int i;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct ieee80211_chanctx_conf* chanctx;
+  bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
+  uint8_t cck_ack_rates, ofdm_ack_rates;
+  const uint8_t* bssid = bssid_override ?: vif->bss_conf.bssid;
+  int i;
 
-    cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
-    cmd->action = cpu_to_le32(action);
+  cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+  cmd->action = cpu_to_le32(action);
 
-    switch (vif->type) {
+  switch (vif->type) {
     case NL80211_IFTYPE_STATION:
-        if (vif->p2p) {
-            cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA);
-        } else {
-            cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA);
-        }
-        break;
+      if (vif->p2p) {
+        cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA);
+      } else {
+        cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA);
+      }
+      break;
     case NL80211_IFTYPE_AP:
-        cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO);
-        break;
+      cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO);
+      break;
     case NL80211_IFTYPE_MONITOR:
-        cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER);
-        break;
+      cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER);
+      break;
     case NL80211_IFTYPE_P2P_DEVICE:
-        cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE);
-        break;
+      cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE);
+      break;
     case NL80211_IFTYPE_ADHOC:
-        cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS);
-        break;
+      cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS);
+      break;
     default:
-        WARN_ON_ONCE(1);
-    }
+      WARN_ON_ONCE(1);
+  }
 
-    cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id);
+  cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id);
 
-    memcpy(cmd->node_addr, vif->addr, ETH_ALEN);
+  memcpy(cmd->node_addr, vif->addr, ETH_ALEN);
 
-    if (bssid) {
-        memcpy(cmd->bssid_addr, bssid, ETH_ALEN);
-    } else {
-        eth_broadcast_addr(cmd->bssid_addr);
-    }
+  if (bssid) {
+    memcpy(cmd->bssid_addr, bssid, ETH_ALEN);
+  } else {
+    eth_broadcast_addr(cmd->bssid_addr);
+  }
 
-    rcu_read_lock();
-    chanctx = rcu_dereference(vif->chanctx_conf);
-    iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band : NL80211_BAND_2GHZ,
-                      &cck_ack_rates, &ofdm_ack_rates);
-    rcu_read_unlock();
+  rcu_read_lock();
+  chanctx = rcu_dereference(vif->chanctx_conf);
+  iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band : NL80211_BAND_2GHZ, &cck_ack_rates,
+                    &ofdm_ack_rates);
+  rcu_read_unlock();
 
-    cmd->cck_rates = cpu_to_le32((uint32_t)cck_ack_rates);
-    cmd->ofdm_rates = cpu_to_le32((uint32_t)ofdm_ack_rates);
+  cmd->cck_rates = cpu_to_le32((uint32_t)cck_ack_rates);
+  cmd->ofdm_rates = cpu_to_le32((uint32_t)ofdm_ack_rates);
 
-    cmd->cck_short_preamble =
-        cpu_to_le32(vif->bss_conf.use_short_preamble ? MAC_FLG_SHORT_PREAMBLE : 0);
-    cmd->short_slot = cpu_to_le32(vif->bss_conf.use_short_slot ? MAC_FLG_SHORT_SLOT : 0);
+  cmd->cck_short_preamble =
+      cpu_to_le32(vif->bss_conf.use_short_preamble ? MAC_FLG_SHORT_PREAMBLE : 0);
+  cmd->short_slot = cpu_to_le32(vif->bss_conf.use_short_slot ? MAC_FLG_SHORT_SLOT : 0);
 
-    cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+  cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
 
-    for (i = 0; i < IEEE80211_NUM_ACS; i++) {
-        uint8_t txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
+  for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+    uint8_t txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
 
-        cmd->ac[txf].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min);
-        cmd->ac[txf].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max);
-        cmd->ac[txf].edca_txop = cpu_to_le16(mvmvif->queue_params[i].txop * 32);
-        cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs;
-        cmd->ac[txf].fifos_mask = BIT(txf);
-    }
+    cmd->ac[txf].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min);
+    cmd->ac[txf].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max);
+    cmd->ac[txf].edca_txop = cpu_to_le16(mvmvif->queue_params[i].txop * 32);
+    cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs;
+    cmd->ac[txf].fifos_mask = BIT(txf);
+  }
 
-    if (vif->bss_conf.qos) { cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); }
+  if (vif->bss_conf.qos) {
+    cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+  }
 
-    if (vif->bss_conf.use_cts_prot) {
-        cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
-    }
+  if (vif->bss_conf.use_cts_prot) {
+    cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+  }
 
-    IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", vif->bss_conf.use_cts_prot,
-                   vif->bss_conf.ht_operation_mode);
-    if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
-        cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
-    }
-    if (ht_enabled) { iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd); }
+  IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", vif->bss_conf.use_cts_prot,
+                 vif->bss_conf.ht_operation_mode);
+  if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
+    cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
+  }
+  if (ht_enabled) {
+    iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd);
+  }
 }
 
 static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm* mvm, struct iwl_mac_ctx_cmd* cmd) {
-    int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(*cmd), cmd);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", le32_to_cpu(cmd->action), ret);
-    }
-    return ret;
+  int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(*cmd), cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", le32_to_cpu(cmd->action), ret);
+  }
+  return ret;
 }
 
 static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint32_t action,
                                     bool force_assoc_off, const uint8_t* bssid_override) {
-    struct iwl_mac_ctx_cmd cmd = {};
-    struct iwl_mac_data_sta* ctxt_sta;
+  struct iwl_mac_ctx_cmd cmd = {};
+  struct iwl_mac_data_sta* ctxt_sta;
 
-    WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+  WARN_ON(vif->type != NL80211_IFTYPE_STATION);
 
-    /* Fill the common data for all mac context types */
-    iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action);
+  /* Fill the common data for all mac context types */
+  iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action);
 
-    if (vif->p2p) {
-        struct ieee80211_p2p_noa_attr* noa = &vif->bss_conf.p2p_noa_attr;
+  if (vif->p2p) {
+    struct ieee80211_p2p_noa_attr* noa = &vif->bss_conf.p2p_noa_attr;
 #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA
-        /*
-         * Pass CT window including OPPPS enable flag as part of a WA
-         * to pass P2P OPPPS certification test. Refer to
-         * IWLMVM_P2P_OPPPS_TEST_WA description in Kconfig.noupstream.
-         */
-        if (mvm->p2p_opps_test_wa_vif) {
-            cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow);
-        } else
+    /*
+     * Pass CT window including OPPPS enable flag as part of a WA
+     * to pass P2P OPPPS certification test. Refer to
+     * IWLMVM_P2P_OPPPS_TEST_WA description in Kconfig.noupstream.
+     */
+    if (mvm->p2p_opps_test_wa_vif) {
+      cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow);
+    } else
 #endif
-            cmd.p2p_sta.ctwin =
-                cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
-        ctxt_sta = &cmd.p2p_sta.sta;
-    } else {
-        ctxt_sta = &cmd.sta;
+      cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+    ctxt_sta = &cmd.p2p_sta.sta;
+  } else {
+    ctxt_sta = &cmd.sta;
+  }
+
+  /* We need the dtim_period to set the MAC as associated */
+  if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && !force_assoc_off) {
+    uint32_t dtim_offs;
+
+    /*
+     * The DTIM count counts down, so when it is N that means N
+     * more beacon intervals happen until the DTIM TBTT. Therefore
+     * add this to the current time. If that ends up being in the
+     * future, the firmware will handle it.
+     *
+     * Also note that the system_timestamp (which we get here as
+     * "sync_device_ts") and TSF timestamp aren't at exactly the
+     * same offset in the frame -- the TSF is at the first symbol
+     * of the TSF, the system timestamp is at signal acquisition
+     * time. This means there's an offset between them of at most
+     * a few hundred microseconds (24 * 8 bits + PLCP time gives
+     * 384us in the longest case), this is currently not relevant
+     * as the firmware wakes up around 2ms before the TBTT.
+     */
+    dtim_offs = vif->bss_conf.sync_dtim_count * vif->bss_conf.beacon_int;
+    /* convert TU to usecs */
+    dtim_offs *= 1024;
+
+    ctxt_sta->dtim_tsf = cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs);
+    ctxt_sta->dtim_time = cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs);
+    ctxt_sta->assoc_beacon_arrive_time = cpu_to_le32(vif->bss_conf.sync_device_ts);
+
+    IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", le64_to_cpu(ctxt_sta->dtim_tsf),
+                   le32_to_cpu(ctxt_sta->dtim_time), dtim_offs);
+
+    ctxt_sta->is_assoc = cpu_to_le32(1);
+  } else {
+    ctxt_sta->is_assoc = cpu_to_le32(0);
+
+    /* Allow beacons to pass through as long as we are not
+     * associated, or we do not have dtim period information.
+     */
+    cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+  }
+
+  ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+  ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period);
+
+  ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
+  ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
+
+  if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) {
+    cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+  }
+
+  if (vif->bss_conf.assoc && vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
+    cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+    if (vif->bss_conf.twt_requester) {
+      ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED);
     }
+  }
 
-    /* We need the dtim_period to set the MAC as associated */
-    if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && !force_assoc_off) {
-        uint32_t dtim_offs;
-
-        /*
-         * The DTIM count counts down, so when it is N that means N
-         * more beacon intervals happen until the DTIM TBTT. Therefore
-         * add this to the current time. If that ends up being in the
-         * future, the firmware will handle it.
-         *
-         * Also note that the system_timestamp (which we get here as
-         * "sync_device_ts") and TSF timestamp aren't at exactly the
-         * same offset in the frame -- the TSF is at the first symbol
-         * of the TSF, the system timestamp is at signal acquisition
-         * time. This means there's an offset between them of at most
-         * a few hundred microseconds (24 * 8 bits + PLCP time gives
-         * 384us in the longest case), this is currently not relevant
-         * as the firmware wakes up around 2ms before the TBTT.
-         */
-        dtim_offs = vif->bss_conf.sync_dtim_count * vif->bss_conf.beacon_int;
-        /* convert TU to usecs */
-        dtim_offs *= 1024;
-
-        ctxt_sta->dtim_tsf = cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs);
-        ctxt_sta->dtim_time = cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs);
-        ctxt_sta->assoc_beacon_arrive_time = cpu_to_le32(vif->bss_conf.sync_device_ts);
-
-        IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n",
-                       le64_to_cpu(ctxt_sta->dtim_tsf), le32_to_cpu(ctxt_sta->dtim_time),
-                       dtim_offs);
-
-        ctxt_sta->is_assoc = cpu_to_le32(1);
-    } else {
-        ctxt_sta->is_assoc = cpu_to_le32(0);
-
-        /* Allow beacons to pass through as long as we are not
-         * associated, or we do not have dtim period information.
-         */
-        cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
-    }
-
-    ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
-    ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period);
-
-    ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
-    ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
-
-    if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) {
-        cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
-    }
-
-    if (vif->bss_conf.assoc && vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
-        cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
-        if (vif->bss_conf.twt_requester) { ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); }
-    }
-
-    return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+  return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
 
 static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                          uint32_t action) {
-    struct iwl_mac_ctx_cmd cmd = {};
-    uint32_t tfd_queue_msk = BIT(mvm->snif_queue);
-    int ret;
+  struct iwl_mac_ctx_cmd cmd = {};
+  uint32_t tfd_queue_msk = BIT(mvm->snif_queue);
+  int ret;
 
-    WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
+  WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
 
-    iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+  iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
 
-    cmd.filter_flags =
-        cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON |
-                    MAC_FILTER_IN_PROBE_REQUEST | MAC_FILTER_IN_CRC32);
-    ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
+  cmd.filter_flags =
+      cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON |
+                  MAC_FILTER_IN_PROBE_REQUEST | MAC_FILTER_IN_CRC32);
+  ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
 
-    /* Allocate sniffer station */
-    ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk, vif->type,
-                                   IWL_STA_GENERAL_PURPOSE);
-    if (ret) { return ret; }
+  /* Allocate sniffer station */
+  ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk, vif->type,
+                                 IWL_STA_GENERAL_PURPOSE);
+  if (ret) {
+    return ret;
+  }
 
-    return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+  return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
 
 static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                      uint32_t action) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mac_ctx_cmd cmd = {};
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mac_ctx_cmd cmd = {};
 
-    WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+  WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
 
-    iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+  iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
 
-    cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST);
+  cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST);
 
-    /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
-    cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
+  /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
+  cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
 
-    /* TODO: Assumes that the beacon id == mac context id */
-    cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
+  /* TODO: Assumes that the beacon id == mac context id */
+  cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
 
-    return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+  return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
 
 struct iwl_mvm_go_iterator_data {
-    bool go_active;
+  bool go_active;
 };
 
 static void iwl_mvm_go_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_go_iterator_data* data = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_go_iterator_data* data = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_ibss_active) {
-        data->go_active = true;
-    }
+  if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_ibss_active) {
+    data->go_active = true;
+  }
 }
 
 static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                            uint32_t action) {
-    struct iwl_mac_ctx_cmd cmd = {};
-    struct iwl_mvm_go_iterator_data data = {};
+  struct iwl_mac_ctx_cmd cmd = {};
+  struct iwl_mvm_go_iterator_data data = {};
 
-    WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
+  WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
 
-    iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+  iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
 
-    /* Override the filter flags to accept only probe requests */
-    cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+  /* Override the filter flags to accept only probe requests */
+  cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
 
-    /*
-     * This flag should be set to true when the P2P Device is
-     * discoverable and there is at least another active P2P GO. Settings
-     * this flag will allow the P2P Device to be discoverable on other
-     * channels in addition to its listen channel.
-     * Note that this flag should not be set in other cases as it opens the
-     * Rx filters on all MAC and increases the number of interrupts.
-     */
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-                                               iwl_mvm_go_iterator, &data);
+  /*
+   * This flag should be set to true when the P2P Device is
+   * discoverable and there is at least another active P2P GO. Settings
+   * this flag will allow the P2P Device to be discoverable on other
+   * channels in addition to its listen channel.
+   * Note that this flag should not be set in other cases as it opens the
+   * Rx filters on all MAC and increases the number of interrupts.
+   */
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+                                             iwl_mvm_go_iterator, &data);
 
-    cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0);
-    return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+  cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0);
+  return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
 
 static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm* mvm, __le32* tim_index, __le32* tim_size,
                                      uint8_t* beacon, uint32_t frame_size) {
-    uint32_t tim_idx;
-    struct ieee80211_mgmt* mgmt = (struct ieee80211_mgmt*)beacon;
+  uint32_t tim_idx;
+  struct ieee80211_mgmt* mgmt = (struct ieee80211_mgmt*)beacon;
 
-    /* The index is relative to frame start but we start looking at the
-     * variable-length part of the beacon. */
-    tim_idx = mgmt->u.beacon.variable - beacon;
+  /* The index is relative to frame start but we start looking at the
+   * variable-length part of the beacon. */
+  tim_idx = mgmt->u.beacon.variable - beacon;
 
-    /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
-    while ((tim_idx < (frame_size - 2)) && (beacon[tim_idx] != WLAN_EID_TIM)) {
-        tim_idx += beacon[tim_idx + 1] + 2;
-    }
+  /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+  while ((tim_idx < (frame_size - 2)) && (beacon[tim_idx] != WLAN_EID_TIM)) {
+    tim_idx += beacon[tim_idx + 1] + 2;
+  }
 
-    /* If TIM field was found, set variables */
-    if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
-        *tim_index = cpu_to_le32(tim_idx);
-        *tim_size = cpu_to_le32((uint32_t)beacon[tim_idx + 1]);
-    } else {
-        IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
-    }
+  /* If TIM field was found, set variables */
+  if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+    *tim_index = cpu_to_le32(tim_idx);
+    *tim_size = cpu_to_le32((uint32_t)beacon[tim_idx + 1]);
+  } else {
+    IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
+  }
 }
 
 static uint32_t iwl_mvm_find_ie_offset(uint8_t* beacon, uint8_t eid, uint32_t frame_size) {
-    struct ieee80211_mgmt* mgmt = (void*)beacon;
-    const uint8_t* ie;
+  struct ieee80211_mgmt* mgmt = (void*)beacon;
+  const uint8_t* ie;
 
-    if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon))) { return 0; }
+  if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon))) {
+    return 0;
+  }
 
-    frame_size -= mgmt->u.beacon.variable - beacon;
+  frame_size -= mgmt->u.beacon.variable - beacon;
 
-    ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size);
-    if (!ie) { return 0; }
+  ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size);
+  if (!ie) {
+    return 0;
+  }
 
-    return ie - beacon;
+  return ie - beacon;
 }
 
 static uint8_t iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info* info,
                                                 struct ieee80211_vif* vif) {
-    uint8_t rate;
+  uint8_t rate;
 
-    if (info->band == NL80211_BAND_5GHZ || vif->p2p) {
-        rate = IWL_FIRST_OFDM_RATE;
-    } else {
-        rate = IWL_FIRST_CCK_RATE;
-    }
+  if (info->band == NL80211_BAND_5GHZ || vif->p2p) {
+    rate = IWL_FIRST_OFDM_RATE;
+  } else {
+    rate = IWL_FIRST_CCK_RATE;
+  }
 
 #ifdef CPTCFG_IWLWIFI_FORCE_OFDM_RATE
-    rate = IWL_FIRST_OFDM_RATE;
+  rate = IWL_FIRST_OFDM_RATE;
 #endif
-    return rate;
+  return rate;
 }
 
 static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                     struct sk_buff* beacon, struct iwl_tx_cmd* tx) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct ieee80211_tx_info* info;
-    uint8_t rate;
-    uint32_t tx_flags;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct ieee80211_tx_info* info;
+  uint8_t rate;
+  uint32_t tx_flags;
 
-    info = IEEE80211_SKB_CB(beacon);
+  info = IEEE80211_SKB_CB(beacon);
 
-    /* Set up TX command fields */
-    tx->len = cpu_to_le16((uint16_t)beacon->len);
-    tx->sta_id = mvmvif->bcast_sta.sta_id;
-    tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
-    tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
-    tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, (void*)beacon->data, info, 0)
-                << TX_CMD_FLG_BT_PRIO_POS;
-    tx->tx_flags = cpu_to_le32(tx_flags);
+  /* Set up TX command fields */
+  tx->len = cpu_to_le16((uint16_t)beacon->len);
+  tx->sta_id = mvmvif->bcast_sta.sta_id;
+  tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+  tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
+  tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, (void*)beacon->data, info, 0) << TX_CMD_FLG_BT_PRIO_POS;
+  tx->tx_flags = cpu_to_le32(tx_flags);
 
-    /*
-     * TODO: the firwmare advertises this, but has a bug. We should revert
-     *   this when the firmware will be fixed.
-     */
-    if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION) || true) {
-        iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
+  /*
+   * TODO: the firwmare advertises this, but has a bug. We should revert
+   *   this when the firmware will be fixed.
+   */
+  if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION) || true) {
+    iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
 
-        tx->rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS);
-    }
+    tx->rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS);
+  }
 
-    rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
+  rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
 
-    tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
-    if (rate == IWL_FIRST_CCK_RATE) { tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); }
+  tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
+  if (rate == IWL_FIRST_CCK_RATE) {
+    tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
+  }
 }
 
 static int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm* mvm, struct sk_buff* beacon, void* data,
                                             int len) {
-    struct iwl_host_cmd cmd = {
-        .id = BEACON_TEMPLATE_CMD,
-        .flags = CMD_ASYNC,
-    };
+  struct iwl_host_cmd cmd = {
+      .id = BEACON_TEMPLATE_CMD,
+      .flags = CMD_ASYNC,
+  };
 
-    cmd.len[0] = len;
-    cmd.data[0] = data;
-    cmd.dataflags[0] = 0;
-    cmd.len[1] = beacon->len;
-    cmd.data[1] = beacon->data;
-    cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+  cmd.len[0] = len;
+  cmd.data[0] = data;
+  cmd.dataflags[0] = 0;
+  cmd.len[1] = beacon->len;
+  cmd.data[1] = beacon->data;
+  cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
 
-    return iwl_mvm_send_cmd(mvm, &cmd);
+  return iwl_mvm_send_cmd(mvm, &cmd);
 }
 
 static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                            struct sk_buff* beacon) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mac_beacon_cmd_v6 beacon_cmd = {};
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mac_beacon_cmd_v6 beacon_cmd = {};
 
-    iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
+  iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
 
-    beacon_cmd.template_id = cpu_to_le32((uint32_t)mvmvif->id);
+  beacon_cmd.template_id = cpu_to_le32((uint32_t)mvmvif->id);
 
-    if (vif->type == NL80211_IFTYPE_AP)
-        iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data,
-                                 beacon->len);
+  if (vif->type == NL80211_IFTYPE_AP)
+    iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data,
+                             beacon->len);
 
-    return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd));
+  return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd));
 }
 
 static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                            struct sk_buff* beacon) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mac_beacon_cmd_v7 beacon_cmd = {};
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mac_beacon_cmd_v7 beacon_cmd = {};
 
-    iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
+  iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
 
-    beacon_cmd.template_id = cpu_to_le32((uint32_t)mvmvif->id);
+  beacon_cmd.template_id = cpu_to_le32((uint32_t)mvmvif->id);
 
-    if (vif->type == NL80211_IFTYPE_AP)
-        iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data,
-                                 beacon->len);
+  if (vif->type == NL80211_IFTYPE_AP)
+    iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data,
+                             beacon->len);
 
-    beacon_cmd.csa_offset =
-        cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len));
-    beacon_cmd.ecsa_offset =
-        cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len));
+  beacon_cmd.csa_offset =
+      cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len));
+  beacon_cmd.ecsa_offset =
+      cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len));
 
-    return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd));
+  return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd));
 }
 
 static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                            struct sk_buff* beacon) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct ieee80211_tx_info* info = IEEE80211_SKB_CB(beacon);
-    struct iwl_mac_beacon_cmd beacon_cmd = {};
-    uint8_t rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
-    uint16_t flags;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct ieee80211_tx_info* info = IEEE80211_SKB_CB(beacon);
+  struct iwl_mac_beacon_cmd beacon_cmd = {};
+  uint8_t rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
+  uint16_t flags;
 
-    flags = iwl_mvm_mac80211_idx_to_hwrate(rate);
+  flags = iwl_mvm_mac80211_idx_to_hwrate(rate);
 
-    if (rate == IWL_FIRST_CCK_RATE) { flags |= IWL_MAC_BEACON_CCK; }
+  if (rate == IWL_FIRST_CCK_RATE) {
+    flags |= IWL_MAC_BEACON_CCK;
+  }
 
-    beacon_cmd.flags = cpu_to_le16(flags);
-    beacon_cmd.byte_cnt = cpu_to_le16((uint16_t)beacon->len);
-    beacon_cmd.template_id = cpu_to_le32((uint32_t)mvmvif->id);
+  beacon_cmd.flags = cpu_to_le16(flags);
+  beacon_cmd.byte_cnt = cpu_to_le16((uint16_t)beacon->len);
+  beacon_cmd.template_id = cpu_to_le32((uint32_t)mvmvif->id);
 
-    if (vif->type == NL80211_IFTYPE_AP)
-        iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data,
-                                 beacon->len);
+  if (vif->type == NL80211_IFTYPE_AP)
+    iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data,
+                             beacon->len);
 
-    beacon_cmd.csa_offset =
-        cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len));
-    beacon_cmd.ecsa_offset =
-        cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len));
+  beacon_cmd.csa_offset =
+      cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len));
+  beacon_cmd.ecsa_offset =
+      cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len));
 
-    return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd));
+  return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd));
 }
 
 static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                         struct sk_buff* beacon) {
-    if (WARN_ON(!beacon)) { return -EINVAL; }
+  if (WARN_ON(!beacon)) {
+    return -EINVAL;
+  }
 
-    if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
-        return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon);
-    }
+  if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
+    return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon);
+  }
 
-    /* TODO: remove first condition once FW merge new TLV */
-    if (iwl_mvm_has_new_tx_api(mvm) ||
-        fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) {
-        return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon);
-    }
+  /* TODO: remove first condition once FW merge new TLV */
+  if (iwl_mvm_has_new_tx_api(mvm) ||
+      fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) {
+    return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon);
+  }
 
-    return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon);
+  return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon);
 }
 
 /* The beacon template for the AP/GO/IBSS has changed and needs update */
 int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct sk_buff* beacon;
-    int ret;
+  struct sk_buff* beacon;
+  int ret;
 
-    WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC);
+  WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC);
 
-    beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
-    if (!beacon) { return -ENOMEM; }
+  beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
+  if (!beacon) {
+    return -ENOMEM;
+  }
 
-    ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
-    dev_kfree_skb(beacon);
-    return ret;
+  ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
+  dev_kfree_skb(beacon);
+  return ret;
 }
 
 struct iwl_mvm_mac_ap_iterator_data {
-    struct iwl_mvm* mvm;
-    struct ieee80211_vif* vif;
-    uint32_t beacon_device_ts;
-    uint16_t beacon_int;
+  struct iwl_mvm* mvm;
+  struct ieee80211_vif* vif;
+  uint32_t beacon_device_ts;
+  uint16_t beacon_int;
 };
 
 /* Find the beacon_device_ts and beacon_int for a managed interface */
 static void iwl_mvm_mac_ap_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_mac_ap_iterator_data* data = _data;
+  struct iwl_mvm_mac_ap_iterator_data* data = _data;
 
-    if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) { return; }
+  if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) {
+    return;
+  }
 
-    /* Station client has higher priority over P2P client*/
-    if (vif->p2p && data->beacon_device_ts) { return; }
+  /* Station client has higher priority over P2P client*/
+  if (vif->p2p && data->beacon_device_ts) {
+    return;
+  }
 
-    data->beacon_device_ts = vif->bss_conf.sync_device_ts;
-    data->beacon_int = vif->bss_conf.beacon_int;
+  data->beacon_device_ts = vif->bss_conf.sync_device_ts;
+  data->beacon_int = vif->bss_conf.beacon_int;
 }
 
 /*
@@ -953,446 +993,469 @@
 static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                          struct iwl_mac_ctx_cmd* cmd,
                                          struct iwl_mac_data_ap* ctxt_ap, bool add) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_mac_ap_iterator_data data = {.mvm = mvm, .vif = vif, .beacon_device_ts = 0};
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_mac_ap_iterator_data data = {.mvm = mvm, .vif = vif, .beacon_device_ts = 0};
 
-    /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
-    cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+  /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
+  cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
 
+  /*
+   * in AP mode, pass probe requests and beacons from other APs
+   * (needed for ht protection); when there're no any associated
+   * station don't ask FW to pass beacons to prevent unnecessary
+   * wake-ups.
+   */
+  cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+  if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
+    cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+    IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
+  } else {
+    IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
+  }
+
+  if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
+    cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+  }
+
+  ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+  ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period);
+
+  if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+    ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue);
+  }
+
+  /*
+   * Only set the beacon time when the MAC is being added, when we
+   * just modify the MAC then we should keep the time -- the firmware
+   * can otherwise have a "jumping" TBTT.
+   */
+  if (add) {
     /*
-     * in AP mode, pass probe requests and beacons from other APs
-     * (needed for ht protection); when there're no any associated
-     * station don't ask FW to pass beacons to prevent unnecessary
-     * wake-ups.
+     * If there is a station/P2P client interface which is
+     * associated, set the AP's TBTT far enough from the station's
+     * TBTT. Otherwise, set it to the current system time
      */
-    cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
-    if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
-        cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
-        IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
+    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+                                               iwl_mvm_mac_ap_iterator, &data);
+
+    if (data.beacon_device_ts) {
+      uint32_t rand = (prandom_u32() % (64 - 36)) + 36;
+      mvmvif->ap_beacon_time =
+          data.beacon_device_ts + ieee80211_tu_to_usec(data.beacon_int * rand / 100);
     } else {
-        IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
+      mvmvif->ap_beacon_time = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
     }
+  }
 
-    if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
-        cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
-    }
+  ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
+  ctxt_ap->beacon_tsf = 0; /* unused */
 
-    ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
-    ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period);
-
-    if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
-        ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue);
-    }
-
-    /*
-     * Only set the beacon time when the MAC is being added, when we
-     * just modify the MAC then we should keep the time -- the firmware
-     * can otherwise have a "jumping" TBTT.
-     */
-    if (add) {
-        /*
-         * If there is a station/P2P client interface which is
-         * associated, set the AP's TBTT far enough from the station's
-         * TBTT. Otherwise, set it to the current system time
-         */
-        ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-                                                   iwl_mvm_mac_ap_iterator, &data);
-
-        if (data.beacon_device_ts) {
-            uint32_t rand = (prandom_u32() % (64 - 36)) + 36;
-            mvmvif->ap_beacon_time =
-                data.beacon_device_ts + ieee80211_tu_to_usec(data.beacon_int * rand / 100);
-        } else {
-            mvmvif->ap_beacon_time = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
-        }
-    }
-
-    ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
-    ctxt_ap->beacon_tsf = 0; /* unused */
-
-    /* TODO: Assume that the beacon id == mac context id */
-    ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
+  /* TODO: Assume that the beacon id == mac context id */
+  ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
 }
 
 static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                    uint32_t action) {
-    struct iwl_mac_ctx_cmd cmd = {};
+  struct iwl_mac_ctx_cmd cmd = {};
 
-    WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
+  WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
 
-    /* Fill the common data for all mac context types */
-    iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+  /* Fill the common data for all mac context types */
+  iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
 
-    /* Fill the data specific for ap mode */
-    iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap, action == FW_CTXT_ACTION_ADD);
+  /* Fill the data specific for ap mode */
+  iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap, action == FW_CTXT_ACTION_ADD);
 
-    return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+  return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
 
 static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                    uint32_t action) {
-    struct iwl_mac_ctx_cmd cmd = {};
-    struct ieee80211_p2p_noa_attr* noa = &vif->bss_conf.p2p_noa_attr;
+  struct iwl_mac_ctx_cmd cmd = {};
+  struct ieee80211_p2p_noa_attr* noa = &vif->bss_conf.p2p_noa_attr;
 
-    WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
+  WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
 
-    /* Fill the common data for all mac context types */
-    iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+  /* Fill the common data for all mac context types */
+  iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
 
-    /* Fill the data specific for GO mode */
-    iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap, action == FW_CTXT_ACTION_ADD);
+  /* Fill the data specific for GO mode */
+  iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap, action == FW_CTXT_ACTION_ADD);
 
-    cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
-    cmd.go.opp_ps_enabled = cpu_to_le32(!!(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT));
+  cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+  cmd.go.opp_ps_enabled = cpu_to_le32(!!(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT));
 
-    return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+  return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
 
 static int iwl_mvm_mac_ctx_send(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint32_t action,
                                 bool force_assoc_off, const uint8_t* bssid_override) {
-    switch (vif->type) {
+  switch (vif->type) {
     case NL80211_IFTYPE_STATION:
-        return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, force_assoc_off, bssid_override);
-        break;
+      return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, force_assoc_off, bssid_override);
+      break;
     case NL80211_IFTYPE_AP:
-        if (!vif->p2p) {
-            return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
-        } else {
-            return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
-        }
-        break;
+      if (!vif->p2p) {
+        return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
+      } else {
+        return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
+      }
+      break;
     case NL80211_IFTYPE_MONITOR:
-        return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
+      return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
     case NL80211_IFTYPE_P2P_DEVICE:
-        return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+      return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
     case NL80211_IFTYPE_ADHOC:
-        return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
+      return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
     default:
-        break;
-    }
+      break;
+  }
 
-    return -EOPNOTSUPP;
+  return -EOPNOTSUPP;
 }
 
 int iwl_mvm_mac_ctxt_add(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int ret;
 
-    if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) { return -EOPNOTSUPP; }
+  if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) {
+    return -EOPNOTSUPP;
+  }
 
-    if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", vif->addr,
-                  ieee80211_vif_type_p2p(vif))) {
-        return -EIO;
-    }
+  if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", vif->addr,
+                ieee80211_vif_type_p2p(vif))) {
+    return -EIO;
+  }
 
-    ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, true, NULL);
-    if (ret) { return ret; }
+  ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, true, NULL);
+  if (ret) {
+    return ret;
+  }
 
-    /* will only do anything at resume from D3 time */
-    iwl_mvm_set_last_nonqos_seq(mvm, vif);
+  /* will only do anything at resume from D3 time */
+  iwl_mvm_set_last_nonqos_seq(mvm, vif);
 
-    mvmvif->uploaded = true;
-    return 0;
+  mvmvif->uploaded = true;
+  return 0;
 }
 
 int iwl_mvm_mac_ctxt_changed(struct iwl_mvm* mvm, struct ieee80211_vif* vif, bool force_assoc_off,
                              const uint8_t* bssid_override) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) { return -EOPNOTSUPP; }
+  if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) {
+    return -EOPNOTSUPP;
+  }
 
-    if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", vif->addr,
-                  ieee80211_vif_type_p2p(vif))) {
-        return -EIO;
-    }
+  if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", vif->addr,
+                ieee80211_vif_type_p2p(vif))) {
+    return -EIO;
+  }
 
-    return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, force_assoc_off, bssid_override);
+  return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, force_assoc_off, bssid_override);
 }
 
 int iwl_mvm_mac_ctxt_remove(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mac_ctx_cmd cmd;
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mac_ctx_cmd cmd;
+  int ret;
 
-    if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) { return -EOPNOTSUPP; }
+  if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) {
+    return -EOPNOTSUPP;
+  }
 
-    if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", vif->addr,
-                  ieee80211_vif_type_p2p(vif))) {
-        return -EIO;
-    }
+  if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", vif->addr,
+                ieee80211_vif_type_p2p(vif))) {
+    return -EIO;
+  }
 
-    memset(&cmd, 0, sizeof(cmd));
+  memset(&cmd, 0, sizeof(cmd));
 
-    cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
-    cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+  cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+  cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
-        return ret;
-    }
+  ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
+    return ret;
+  }
 
-    mvmvif->uploaded = false;
+  mvmvif->uploaded = false;
 
-    if (vif->type == NL80211_IFTYPE_MONITOR) {
-        __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
-        iwl_mvm_dealloc_snif_sta(mvm);
-    }
+  if (vif->type == NL80211_IFTYPE_MONITOR) {
+    __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
+    iwl_mvm_dealloc_snif_sta(mvm);
+  }
 
-    return 0;
+  return 0;
 }
 
 static void iwl_mvm_csa_count_down(struct iwl_mvm* mvm, struct ieee80211_vif* csa_vif, uint32_t gp2,
                                    bool tx_success) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
 
-    /* Don't start to countdown from a failed beacon */
-    if (!tx_success && !mvmvif->csa_countdown) { return; }
+  /* Don't start to countdown from a failed beacon */
+  if (!tx_success && !mvmvif->csa_countdown) {
+    return;
+  }
 
-    mvmvif->csa_countdown = true;
+  mvmvif->csa_countdown = true;
 
-    if (!ieee80211_csa_is_complete(csa_vif)) {
-        int c = ieee80211_csa_update_counter(csa_vif);
+  if (!ieee80211_csa_is_complete(csa_vif)) {
+    int c = ieee80211_csa_update_counter(csa_vif);
 
-        iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
-        if (csa_vif->p2p && !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 && tx_success) {
-            uint32_t rel_time =
-                (c + 1) * csa_vif->bss_conf.beacon_int - IWL_MVM_CHANNEL_SWITCH_TIME_GO;
-            uint32_t apply_time = gp2 + rel_time * 1024;
+    iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
+    if (csa_vif->p2p && !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 && tx_success) {
+      uint32_t rel_time = (c + 1) * csa_vif->bss_conf.beacon_int - IWL_MVM_CHANNEL_SWITCH_TIME_GO;
+      uint32_t apply_time = gp2 + rel_time * 1024;
 
-            iwl_mvm_schedule_csa_period(
-                mvm, csa_vif, IWL_MVM_CHANNEL_SWITCH_TIME_GO - IWL_MVM_CHANNEL_SWITCH_MARGIN,
-                apply_time);
-        }
-    } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
-        /* we don't have CSA NoA scheduled yet, switch now */
-        ieee80211_csa_finish(csa_vif);
-        RCU_INIT_POINTER(mvm->csa_vif, NULL);
+      iwl_mvm_schedule_csa_period(
+          mvm, csa_vif, IWL_MVM_CHANNEL_SWITCH_TIME_GO - IWL_MVM_CHANNEL_SWITCH_MARGIN, apply_time);
     }
+  } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
+    /* we don't have CSA NoA scheduled yet, switch now */
+    ieee80211_csa_finish(csa_vif);
+    RCU_INIT_POINTER(mvm->csa_vif, NULL);
+  }
 }
 
 void iwl_mvm_rx_beacon_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_extended_beacon_notif* beacon = (void*)pkt->data;
-    struct iwl_mvm_tx_resp* beacon_notify_hdr;
-    struct ieee80211_vif* csa_vif;
-    struct ieee80211_vif* tx_blocked_vif;
-    struct agg_tx_status* agg_status;
-    uint16_t status;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_extended_beacon_notif* beacon = (void*)pkt->data;
+  struct iwl_mvm_tx_resp* beacon_notify_hdr;
+  struct ieee80211_vif* csa_vif;
+  struct ieee80211_vif* tx_blocked_vif;
+  struct agg_tx_status* agg_status;
+  uint16_t status;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    beacon_notify_hdr = &beacon->beacon_notify_hdr;
-    mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
-    mvm->ibss_manager = beacon->ibss_mgr_status != 0;
+  beacon_notify_hdr = &beacon->beacon_notify_hdr;
+  mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
+  mvm->ibss_manager = beacon->ibss_mgr_status != 0;
 
-    agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
-    status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
-    IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n", status,
-                 beacon_notify_hdr->failure_frame, le64_to_cpu(beacon->tsf),
-                 mvm->ap_last_beacon_gp2, le32_to_cpu(beacon_notify_hdr->initial_rate));
+  agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
+  status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
+  IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n", status,
+               beacon_notify_hdr->failure_frame, le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2,
+               le32_to_cpu(beacon_notify_hdr->initial_rate));
 
-    csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex));
-    if (unlikely(csa_vif && csa_vif->csa_active))
-        iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
-                               (status == TX_STATUS_SUCCESS));
+  csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex));
+  if (unlikely(csa_vif && csa_vif->csa_active))
+    iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2, (status == TX_STATUS_SUCCESS));
 
-    tx_blocked_vif =
-        rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex));
-    if (unlikely(tx_blocked_vif)) {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+  tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex));
+  if (unlikely(tx_blocked_vif)) {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
 
-        /*
-         * The channel switch is started and we have blocked the
-         * stations. If this is the first beacon (the timeout wasn't
-         * set), set the unblock timeout, otherwise countdown
-         */
-        if (!mvm->csa_tx_block_bcn_timeout) {
-            mvm->csa_tx_block_bcn_timeout = IWL_MVM_CS_UNBLOCK_TX_TIMEOUT;
-        } else {
-            mvm->csa_tx_block_bcn_timeout--;
-        }
-
-        /* Check if the timeout is expired, and unblock tx */
-        if (mvm->csa_tx_block_bcn_timeout == 0) {
-            iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
-            RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
-        }
+    /*
+     * The channel switch is started and we have blocked the
+     * stations. If this is the first beacon (the timeout wasn't
+     * set), set the unblock timeout, otherwise countdown
+     */
+    if (!mvm->csa_tx_block_bcn_timeout) {
+      mvm->csa_tx_block_bcn_timeout = IWL_MVM_CS_UNBLOCK_TX_TIMEOUT;
+    } else {
+      mvm->csa_tx_block_bcn_timeout--;
     }
+
+    /* Check if the timeout is expired, and unblock tx */
+    if (mvm->csa_tx_block_bcn_timeout == 0) {
+      iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+      RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+    }
+  }
 }
 
 static void iwl_mvm_beacon_loss_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_missed_beacons_notif* missed_beacons = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm* mvm = mvmvif->mvm;
-    struct iwl_fw_dbg_trigger_missed_bcon* bcon_trig;
-    struct iwl_fw_dbg_trigger_tlv* trigger;
-    uint32_t stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
-    uint32_t rx_missed_bcon, rx_missed_bcon_since_rx;
+  struct iwl_missed_beacons_notif* missed_beacons = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm* mvm = mvmvif->mvm;
+  struct iwl_fw_dbg_trigger_missed_bcon* bcon_trig;
+  struct iwl_fw_dbg_trigger_tlv* trigger;
+  uint32_t stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
+  uint32_t rx_missed_bcon, rx_missed_bcon_since_rx;
 
-    if (mvmvif->id != (uint16_t)le32_to_cpu(missed_beacons->mac_id)) { return; }
+  if (mvmvif->id != (uint16_t)le32_to_cpu(missed_beacons->mac_id)) {
+    return;
+  }
 
-    rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
-    rx_missed_bcon_since_rx = le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
-    /*
-     * TODO: the threshold should be adjusted based on latency conditions,
-     * and/or in case of a CS flow on one of the other AP vifs.
-     */
-    if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
-        IWL_MVM_MISSED_BEACONS_THRESHOLD) {
-        ieee80211_beacon_loss(vif);
-    }
+  rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
+  rx_missed_bcon_since_rx = le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
+  /*
+   * TODO: the threshold should be adjusted based on latency conditions,
+   * and/or in case of a CS flow on one of the other AP vifs.
+   */
+  if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
+      IWL_MVM_MISSED_BEACONS_THRESHOLD) {
+    ieee80211_beacon_loss(vif);
+  }
 
-    trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
-                                    FW_DBG_TRIGGER_MISSED_BEACONS);
-    if (!trigger) { return; }
+  trigger =
+      iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MISSED_BEACONS);
+  if (!trigger) {
+    return;
+  }
 
-    bcon_trig = (void*)trigger->data;
-    stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
-    stop_trig_missed_bcon_since_rx = le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
+  bcon_trig = (void*)trigger->data;
+  stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
+  stop_trig_missed_bcon_since_rx = le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
 
-    /* TODO: implement start trigger */
+  /* TODO: implement start trigger */
 
-    if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
-        rx_missed_bcon >= stop_trig_missed_bcon) {
-        iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
-    }
+  if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
+      rx_missed_bcon >= stop_trig_missed_bcon) {
+    iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
+  }
 }
 
 void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_missed_beacons_notif* mb = (void*)pkt->data;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_missed_beacons_notif* mb = (void*)pkt->data;
 
-    IWL_DEBUG_INFO(mvm, "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
-                   le32_to_cpu(mb->mac_id), le32_to_cpu(mb->consec_missed_beacons),
-                   le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
-                   le32_to_cpu(mb->num_recvd_beacons), le32_to_cpu(mb->num_expected_beacons));
+  IWL_DEBUG_INFO(mvm, "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
+                 le32_to_cpu(mb->mac_id), le32_to_cpu(mb->consec_missed_beacons),
+                 le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
+                 le32_to_cpu(mb->num_recvd_beacons), le32_to_cpu(mb->num_expected_beacons));
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_beacon_loss_iterator, mb);
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_beacon_loss_iterator, mb);
 
-    iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS);
+  iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS);
 }
 
 void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_stored_beacon_notif* sb = (void*)pkt->data;
-    struct ieee80211_rx_status rx_status;
-    struct sk_buff* skb;
-    uint32_t size = le32_to_cpu(sb->byte_count);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_stored_beacon_notif* sb = (void*)pkt->data;
+  struct ieee80211_rx_status rx_status;
+  struct sk_buff* skb;
+  uint32_t size = le32_to_cpu(sb->byte_count);
 
-    if (size == 0) { return; }
+  if (size == 0) {
+    return;
+  }
 
-    skb = alloc_skb(size, GFP_ATOMIC);
-    if (!skb) {
-        IWL_ERR(mvm, "alloc_skb failed\n");
-        return;
-    }
+  skb = alloc_skb(size, GFP_ATOMIC);
+  if (!skb) {
+    IWL_ERR(mvm, "alloc_skb failed\n");
+    return;
+  }
 
-    /* update rx_status according to the notification's metadata */
-    memset(&rx_status, 0, sizeof(rx_status));
-    rx_status.mactime = le64_to_cpu(sb->tsf);
-    /* TSF as indicated by the firmware  is at INA time */
-    rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
-    rx_status.device_timestamp = le32_to_cpu(sb->system_time);
-    rx_status.band =
-        (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
-    rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), rx_status.band);
+  /* update rx_status according to the notification's metadata */
+  memset(&rx_status, 0, sizeof(rx_status));
+  rx_status.mactime = le64_to_cpu(sb->tsf);
+  /* TSF as indicated by the firmware  is at INA time */
+  rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
+  rx_status.device_timestamp = le32_to_cpu(sb->system_time);
+  rx_status.band =
+      (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+  rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), rx_status.band);
 
-    /* copy the data */
-    skb_put_data(skb, sb->data, size);
-    memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+  /* copy the data */
+  skb_put_data(skb, sb->data, size);
+  memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
 
-    /* pass it as regular rx to mac80211 */
-    ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
+  /* pass it as regular rx to mac80211 */
+  ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
 }
 
 static void iwl_mvm_probe_resp_data_iter(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_probe_resp_data_notif* notif = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_probe_resp_data *old_data, *new_data;
+  struct iwl_probe_resp_data_notif* notif = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_probe_resp_data *old_data, *new_data;
 
-    if (mvmvif->id != (uint16_t)le32_to_cpu(notif->mac_id)) { return; }
+  if (mvmvif->id != (uint16_t)le32_to_cpu(notif->mac_id)) {
+    return;
+  }
 
-    new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
-    if (!new_data) { return; }
+  new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
+  if (!new_data) {
+    return;
+  }
 
-    memcpy(&new_data->notif, notif, sizeof(new_data->notif));
+  memcpy(&new_data->notif, notif, sizeof(new_data->notif));
 
-    /* noa_attr contains 1 reserved byte, need to substruct it */
-    new_data->noa_len = sizeof(struct ieee80211_vendor_ie) + sizeof(new_data->notif.noa_attr) - 1;
+  /* noa_attr contains 1 reserved byte, need to substruct it */
+  new_data->noa_len = sizeof(struct ieee80211_vendor_ie) + sizeof(new_data->notif.noa_attr) - 1;
 
-    /*
-     * If it's a one time NoA, only one descriptor is needed,
-     * adjust the length according to len_low.
-     */
-    if (new_data->notif.noa_attr.len_low == sizeof(struct ieee80211_p2p_noa_desc) + 2) {
-        new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc);
-    }
+  /*
+   * If it's a one time NoA, only one descriptor is needed,
+   * adjust the length according to len_low.
+   */
+  if (new_data->notif.noa_attr.len_low == sizeof(struct ieee80211_p2p_noa_desc) + 2) {
+    new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc);
+  }
 
-    old_data =
-        rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvmvif->mvm->mutex));
-    rcu_assign_pointer(mvmvif->probe_resp_data, new_data);
+  old_data =
+      rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvmvif->mvm->mutex));
+  rcu_assign_pointer(mvmvif->probe_resp_data, new_data);
 
-    if (old_data) { kfree_rcu(old_data, rcu_head); }
+  if (old_data) {
+    kfree_rcu(old_data, rcu_head);
+  }
 
-    if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA && notif->csa_counter >= 1) {
-        ieee80211_csa_set_counter(vif, notif->csa_counter);
-    }
+  if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA && notif->csa_counter >= 1) {
+    ieee80211_csa_set_counter(vif, notif->csa_counter);
+  }
 }
 
 void iwl_mvm_probe_resp_data_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_probe_resp_data_notif* notif = (void*)pkt->data;
-    int len = iwl_rx_packet_payload_len(pkt);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_probe_resp_data_notif* notif = (void*)pkt->data;
+  int len = iwl_rx_packet_payload_len(pkt);
 
-    if (WARN_ON_ONCE(len < sizeof(*notif))) { return; }
+  if (WARN_ON_ONCE(len < sizeof(*notif))) {
+    return;
+  }
 
-    IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", notif->noa_active,
-                   notif->csa_counter);
+  IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", notif->noa_active,
+                 notif->csa_counter);
 
-    ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_ACTIVE,
-                                        iwl_mvm_probe_resp_data_iter, notif);
+  ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_ACTIVE,
+                                      iwl_mvm_probe_resp_data_iter, notif);
 }
 
 void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_channel_switch_noa_notif* notif = (void*)pkt->data;
-    struct ieee80211_vif* csa_vif;
-    struct iwl_mvm_vif* mvmvif;
-    int len = iwl_rx_packet_payload_len(pkt);
-    uint32_t id_n_color;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_channel_switch_noa_notif* notif = (void*)pkt->data;
+  struct ieee80211_vif* csa_vif;
+  struct iwl_mvm_vif* mvmvif;
+  int len = iwl_rx_packet_payload_len(pkt);
+  uint32_t id_n_color;
 
-    if (WARN_ON_ONCE(len < sizeof(*notif))) { return; }
-
-    rcu_read_lock();
-
-    csa_vif = rcu_dereference(mvm->csa_vif);
-    if (WARN_ON(!csa_vif || !csa_vif->csa_active)) { goto out_unlock; }
-
-    id_n_color = le32_to_cpu(notif->id_and_color);
-
-    mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
-    if (WARN(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color) != id_n_color,
-             "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
-             FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color), id_n_color)) {
-        goto out_unlock;
-    }
-
-    IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n");
-
-    schedule_delayed_work(
-        &mvm->cs_tx_unblock_dwork,
-        msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT * csa_vif->bss_conf.beacon_int));
-
-    ieee80211_csa_finish(csa_vif);
-
-    rcu_read_unlock();
-
-    RCU_INIT_POINTER(mvm->csa_vif, NULL);
-
+  if (WARN_ON_ONCE(len < sizeof(*notif))) {
     return;
+  }
+
+  rcu_read_lock();
+
+  csa_vif = rcu_dereference(mvm->csa_vif);
+  if (WARN_ON(!csa_vif || !csa_vif->csa_active)) {
+    goto out_unlock;
+  }
+
+  id_n_color = le32_to_cpu(notif->id_and_color);
+
+  mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
+  if (WARN(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color) != id_n_color,
+           "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
+           FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color), id_n_color)) {
+    goto out_unlock;
+  }
+
+  IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n");
+
+  schedule_delayed_work(&mvm->cs_tx_unblock_dwork, msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
+                                                                    csa_vif->bss_conf.beacon_int));
+
+  ieee80211_csa_finish(csa_vif);
+
+  rcu_read_unlock();
+
+  RCU_INIT_POINTER(mvm->csa_vif, NULL);
+
+  return;
 
 out_unlock:
-    rcu_read_unlock();
+  rcu_read_unlock();
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mac80211.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mac80211.c
index a6a0cd6..70e77e6 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mac80211.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mac80211.c
@@ -33,7 +33,11 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+
+// This file must be included before all header files.
+// clang-format off
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h"
+// clang-format on
 
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/error-dump.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-eeprom-parse.h"
@@ -56,7 +60,7 @@
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/nan.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tof.h"
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
     {
         .max = CPTCFG_IWLWIFI_NUM_STA_INTERFACES,
@@ -184,27 +188,31 @@
 #endif  // NEEDS_PORTING
 
 void iwl_mvm_ref(struct iwl_mvm* mvm, enum iwl_mvm_ref_type ref_type) {
-    if (!iwl_mvm_is_d0i3_supported(mvm)) { return; }
+  if (!iwl_mvm_is_d0i3_supported(mvm)) {
+    return;
+  }
 
-    IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
-    mtx_lock(&mvm->refs_lock);
-    mvm->refs[ref_type]++;
-    mtx_unlock(&mvm->refs_lock);
-    iwl_trans_ref(mvm->trans);
+  IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
+  mtx_lock(&mvm->refs_lock);
+  mvm->refs[ref_type]++;
+  mtx_unlock(&mvm->refs_lock);
+  iwl_trans_ref(mvm->trans);
 }
 
 void iwl_mvm_unref(struct iwl_mvm* mvm, enum iwl_mvm_ref_type ref_type) {
-    if (!iwl_mvm_is_d0i3_supported(mvm)) { return; }
+  if (!iwl_mvm_is_d0i3_supported(mvm)) {
+    return;
+  }
 
-    IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
-    mtx_lock(&mvm->refs_lock);
-    if (WARN_ON(!mvm->refs[ref_type])) {
-        mtx_unlock(&mvm->refs_lock);
-        return;
-    }
-    mvm->refs[ref_type]--;
+  IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
+  mtx_lock(&mvm->refs_lock);
+  if (WARN_ON(!mvm->refs[ref_type])) {
     mtx_unlock(&mvm->refs_lock);
-    iwl_trans_unref(mvm->trans);
+    return;
+  }
+  mvm->refs[ref_type]--;
+  mtx_unlock(&mvm->refs_lock);
+  iwl_trans_unref(mvm->trans);
 }
 
 #if 0   // NEEDS_PORTING
@@ -228,25 +236,27 @@
 #endif  // NEEDS_PORTING
 
 bool iwl_mvm_ref_taken(struct iwl_mvm* mvm) {
-    int i;
-    bool taken = false;
+  int i;
+  bool taken = false;
 
-    if (!iwl_mvm_is_d0i3_supported(mvm)) { return true; }
+  if (!iwl_mvm_is_d0i3_supported(mvm)) {
+    return true;
+  }
 
-    mtx_lock(&mvm->refs_lock);
-    for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
-        if (mvm->refs[i]) {
-            taken = true;
-            break;
-        }
+  mtx_lock(&mvm->refs_lock);
+  for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
+    if (mvm->refs[i]) {
+      taken = true;
+      break;
     }
-    mtx_unlock(&mvm->refs_lock);
+  }
+  mtx_unlock(&mvm->refs_lock);
 
-    return taken;
+  return taken;
 }
 
 zx_status_t iwl_mvm_ref_sync(struct iwl_mvm* mvm, enum iwl_mvm_ref_type ref_type) {
-    iwl_mvm_ref(mvm, ref_type);
+  iwl_mvm_ref(mvm, ref_type);
 
 #if 0   // NEEDS_PORTING
     if (!wait_event_timeout(mvm->d0i3_exit_waitq, !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
@@ -257,10 +267,10 @@
     }
 #endif  // NEEDS_PORTING
 
-    return ZX_OK;
+  return ZX_OK;
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm* mvm) {
     int i;
 
@@ -395,13 +405,13 @@
         .extended_capabilities_mask = he_if_types_ext_capa_ap,
         .extended_capabilities_len = sizeof(he_if_types_ext_capa_ap),
     },
-#endif /* CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE */
+#endif  /* CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE */
 };
 #endif  // NEEDS_PORTING
 
 zx_status_t iwl_mvm_mac_setup_register(struct iwl_mvm* mvm) {
-    return ZX_OK;
-#if 0   // NEEDS_PORTING
+  return ZX_OK;
+#if 0  // NEEDS_PORTING
     struct ieee80211_hw* hw = mvm->hw;
     int num_mac, ret, i;
     static const uint32_t mvm_ciphers[] = {
@@ -854,7 +864,7 @@
 #endif  // NEEDS_PORTING
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw* hw, struct ieee80211_txq* txq) {
     struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
     struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_mac80211(txq);
@@ -909,10 +919,11 @@
 }
 
 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
-    do {                                                      \
-        if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) break;       \
-        iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt);  \
-    } while (0)
+  do {                                                        \
+    if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))                  \
+      break;                                                  \
+    iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt);      \
+  } while (0)
 
 static void iwl_mvm_ampdu_check_trigger(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                         struct ieee80211_sta* sta, uint16_t tid, uint16_t rx_ba_ssn,
@@ -4228,11 +4239,12 @@
 
 static void iwl_mvm_event_mlme_callback(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                         const struct ieee80211_event* event) {
-#define CHECK_MLME_TRIGGER(_cnt, _fmt...)                    \
-    do {                                                     \
-        if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) break; \
-        iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt);   \
-    } while (0)
+#define CHECK_MLME_TRIGGER(_cnt, _fmt...)              \
+  do {                                                 \
+    if ((trig_mlme->_cnt) && --(trig_mlme->_cnt))      \
+      break;                                           \
+    iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \
+  } while (0)
 
     struct iwl_fw_dbg_trigger_tlv* trig;
     struct iwl_fw_dbg_trigger_mlme* trig_mlme;
@@ -4584,7 +4596,7 @@
 #endif  // NEEDS_PORTING
 
 const struct ieee80211_ops iwl_mvm_hw_ops = {
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
     .tx = iwl_mvm_mac_tx,
     .wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
     .ampdu_action = iwl_mvm_mac_ampdu_action,
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mvm.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mvm.h
index 3619b0a..08cf7f3 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mvm.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mvm.h
@@ -160,11 +160,7 @@
  * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default)
  * @IWL_POWER_LEVEL_LP  - Low Power
  */
-enum iwl_power_scheme {
-  IWL_POWER_SCHEME_CAM = 1,
-  IWL_POWER_SCHEME_BPS,
-  IWL_POWER_SCHEME_LP
-};
+enum iwl_power_scheme { IWL_POWER_SCHEME_CAM = 1, IWL_POWER_SCHEME_BPS, IWL_POWER_SCHEME_LP };
 
 #define IWL_CONN_MAX_LISTEN_INTERVAL 10
 #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -420,8 +416,7 @@
 #if IS_ENABLED(CONFIG_IPV6)
   /* IPv6 addresses for WoWLAN */
   struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
-  unsigned long
-      tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)];
+  unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)];
   int num_target_ipv6_addrs;
 #endif
 
@@ -460,8 +455,7 @@
   struct ieee80211_key_conf* ap_wep_key;
 };
 
-static inline struct iwl_mvm_vif* iwl_mvm_vif_from_mac80211(
-    struct ieee80211_vif* vif) {
+static inline struct iwl_mvm_vif* iwl_mvm_vif_from_mac80211(struct ieee80211_vif* vif) {
   if (!vif) {
     return NULL;
   }
@@ -481,11 +475,9 @@
   IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
   IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
 
-  IWL_MVM_SCAN_REGULAR_MASK =
-      IWL_MVM_SCAN_REGULAR | IWL_MVM_SCAN_STOPPING_REGULAR,
+  IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR | IWL_MVM_SCAN_STOPPING_REGULAR,
   IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_STOPPING_SCHED,
-  IWL_MVM_SCAN_NETDETECT_MASK =
-      IWL_MVM_SCAN_NETDETECT | IWL_MVM_SCAN_STOPPING_NETDETECT,
+  IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | IWL_MVM_SCAN_STOPPING_NETDETECT,
 
   IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
   IWL_MVM_SCAN_MASK = 0xff,
@@ -751,8 +743,7 @@
 static inline struct iwl_mvm_baid_data* iwl_mvm_baid_data_from_reorder_buf(
     struct iwl_mvm_reorder_buffer* buf) {
   return (struct iwl_mvm_baid_data*)((uint8_t*)buf -
-                                     offsetof(struct iwl_mvm_baid_data,
-                                              reorder_buf) -
+                                     offsetof(struct iwl_mvm_baid_data, reorder_buf) -
                                      sizeof(*buf) * buf->queue);
 }
 
@@ -805,13 +796,11 @@
   bool stopped;
 };
 
-static inline struct iwl_mvm_txq* iwl_mvm_txq_from_mac80211(
-    struct ieee80211_txq* txq) {
+static inline struct iwl_mvm_txq* iwl_mvm_txq_from_mac80211(struct ieee80211_txq* txq) {
   return (struct iwl_mvm_txq*)txq->drv_priv;
 }
 
-static inline struct iwl_mvm_txq* iwl_mvm_txq_from_tid(
-    struct ieee80211_sta* sta, uint8_t tid) {
+static inline struct iwl_mvm_txq* iwl_mvm_txq_from_tid(struct ieee80211_sta* sta, uint8_t tid) {
   if (tid == IWL_MAX_TID_COUNT) {
     tid = IEEE80211_NUM_TIDS;
   }
@@ -1099,8 +1088,7 @@
   unsigned long bt_coex_last_tcm_ts;
 
   uint8_t uapsd_noagg_bssid_write_idx;
-  struct mac_address
-      uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM] __aligned(2);
+  struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM] __aligned(2);
 
   struct iwl_mvm_tcm tcm;
 
@@ -1123,8 +1111,7 @@
   uint16_t p2p_dev_queue;
 
   /* Indicate if device power save is allowed */
-  uint8_t
-      ps_disabled; /* uint8_t instead of bool to ease debugfs_create_* usage */
+  uint8_t ps_disabled;        /* uint8_t instead of bool to ease debugfs_create_* usage */
   unsigned int max_amsdu_len; /* used for debugfs only */
 
   struct ieee80211_vif __rcu* csa_vif;
@@ -1219,11 +1206,9 @@
 };
 
 /* Extract MVM priv from op_mode and _hw */
-#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \
-  ((struct iwl_mvm*)(_iwl_op_mode)->op_mode_specific)
+#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) ((struct iwl_mvm*)(_iwl_op_mode)->op_mode_specific)
 
-#define IWL_MAC80211_GET_MVM(_hw) \
-  IWL_OP_MODE_GET_MVM((struct iwl_op_mode*)((_hw)->priv))
+#define IWL_MAC80211_GET_MVM(_hw) IWL_OP_MODE_GET_MVM((struct iwl_op_mode*)((_hw)->priv))
 
 /**
  * enum iwl_mvm_status - MVM status bits
@@ -1275,8 +1260,7 @@
 /* Must be called with rcu_read_lock() held and it can only be
  * released when mvmsta is not needed anymore.
  */
-static inline struct iwl_mvm_sta* iwl_mvm_sta_from_staid_rcu(
-    struct iwl_mvm* mvm, uint8_t sta_id) {
+static inline struct iwl_mvm_sta* iwl_mvm_sta_from_staid_rcu(struct iwl_mvm* mvm, uint8_t sta_id) {
   struct ieee80211_sta* sta;
 
   if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) {
@@ -1293,16 +1277,15 @@
   return iwl_mvm_sta_from_mac80211(sta);
 }
 
-static inline struct iwl_mvm_sta* iwl_mvm_sta_from_staid_protected(
-    struct iwl_mvm* mvm, uint8_t sta_id) {
+static inline struct iwl_mvm_sta* iwl_mvm_sta_from_staid_protected(struct iwl_mvm* mvm,
+                                                                   uint8_t sta_id) {
   struct ieee80211_sta* sta;
 
   if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)) {
     return NULL;
   }
 
-  sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
-                                  lockdep_is_held(&mvm->mutex));
+  sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
 
   /* This can happen if the station has been removed right now */
   if (IS_ERR_OR_NULL(sta)) {
@@ -1346,22 +1329,17 @@
          (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
 }
 
-static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm* mvm,
-                                             uint8_t queue) {
-  return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
-         (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
+static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm* mvm, uint8_t queue) {
+  return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) && (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
 }
 
-static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm* mvm,
-                                             uint8_t queue) {
-  return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
-         (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
+static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm* mvm, uint8_t queue) {
+  return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) && (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
 }
 
 static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm* mvm) {
   bool nvm_lar = mvm->nvm_data->lar_enabled;
-  bool tlv_lar =
-      fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+  bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
   if (iwlwifi_mod_params.lar_disable) {
     return false;
@@ -1384,8 +1362,7 @@
 }
 
 static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm* mvm) {
-  return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
-         IWL_MVM_BT_COEX_RRC;
+  return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && IWL_MVM_BT_COEX_RRC;
 }
 
 static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm* mvm) {
@@ -1394,8 +1371,7 @@
 }
 
 static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm* mvm) {
-  return fw_has_capa(&mvm->fw->ucode_capa,
-                     IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) &&
+  return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) &&
          IWL_MVM_BT_COEX_MPLUT;
 }
 
@@ -1405,8 +1381,7 @@
 }
 
 static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm* mvm) {
-  return fw_has_capa(&mvm->fw->ucode_capa,
-                     IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
+  return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
 }
 
 static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm* mvm) {
@@ -1453,8 +1428,7 @@
   return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD);
 }
 
-static inline struct agg_tx_status* iwl_mvm_get_agg_status(struct iwl_mvm* mvm,
-                                                           void* tx_resp) {
+static inline struct agg_tx_status* iwl_mvm_get_agg_status(struct iwl_mvm* mvm, void* tx_resp) {
   if (iwl_mvm_has_new_tx_api(mvm)) {
     return &((struct iwl_mvm_tx_resp*)tx_resp)->status;
   } else {
@@ -1469,8 +1443,7 @@
    * temperature THs report.
    */
   return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) &&
-         fw_has_capa(&mvm->fw->ucode_capa,
-                     IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT);
+         fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT);
 #else  /* CONFIG_THERMAL */
   return false;
 #endif /* CONFIG_THERMAL */
@@ -1483,10 +1456,8 @@
 extern const uint8_t iwl_mvm_ac_to_tx_fifo[];
 extern const uint8_t iwl_mvm_ac_to_gen2_tx_fifo[];
 
-static inline uint8_t iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm* mvm,
-                                                enum ieee80211_ac_numbers ac) {
-  return iwl_mvm_has_new_tx_api(mvm) ? iwl_mvm_ac_to_gen2_tx_fifo[ac]
-                                     : iwl_mvm_ac_to_tx_fifo[ac];
+static inline uint8_t iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm* mvm, enum ieee80211_ac_numbers ac) {
+  return iwl_mvm_has_new_tx_api(mvm) ? iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
 }
 
 struct iwl_rate_info {
@@ -1507,58 +1478,42 @@
 zx_status_t iwl_run_init_mvm_ucode(struct iwl_mvm* mvm, bool read_nvm);
 
 /* Utils */
-zx_status_t iwl_mvm_legacy_rate_to_mac80211_idx(uint32_t rate_n_flags,
-                                                enum nl80211_band band,
+zx_status_t iwl_mvm_legacy_rate_to_mac80211_idx(uint32_t rate_n_flags, enum nl80211_band band,
                                                 int* idx);
 void iwl_mvm_hwrate_to_tx_rate(uint32_t rate_n_flags, enum nl80211_band band,
                                struct ieee80211_tx_rate* r);
 uint8_t iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm* mvm);
 uint8_t first_antenna(uint8_t mask);
-uint8_t iwl_mvm_next_antenna(struct iwl_mvm* mvm, uint8_t valid,
-                             uint8_t last_idx);
-void iwl_mvm_get_sync_time(struct iwl_mvm* mvm, uint32_t* gp2,
-                           uint64_t* boottime);
+uint8_t iwl_mvm_next_antenna(struct iwl_mvm* mvm, uint8_t valid, uint8_t last_idx);
+void iwl_mvm_get_sync_time(struct iwl_mvm* mvm, uint32_t* gp2, uint64_t* boottime);
 
 /* Tx / Host Commands */
-zx_status_t __must_check iwl_mvm_send_cmd(struct iwl_mvm* mvm,
-                                          struct iwl_host_cmd* cmd);
-int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm* mvm, uint32_t id,
-                                      uint32_t flags, uint16_t len,
-                                      const void* data);
-zx_status_t __must_check iwl_mvm_send_cmd_status(struct iwl_mvm* mvm,
-                                                 struct iwl_host_cmd* cmd,
+zx_status_t __must_check iwl_mvm_send_cmd(struct iwl_mvm* mvm, struct iwl_host_cmd* cmd);
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm* mvm, uint32_t id, uint32_t flags,
+                                      uint16_t len, const void* data);
+zx_status_t __must_check iwl_mvm_send_cmd_status(struct iwl_mvm* mvm, struct iwl_host_cmd* cmd,
                                                  uint32_t* status);
-int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm* mvm, uint32_t id,
-                                             uint16_t len, const void* data,
-                                             uint32_t* status);
-int iwl_mvm_tx_skb(struct iwl_mvm* mvm, struct sk_buff* skb,
-                   struct ieee80211_sta* sta);
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm* mvm, uint32_t id, uint16_t len,
+                                             const void* data, uint32_t* status);
+int iwl_mvm_tx_skb(struct iwl_mvm* mvm, struct sk_buff* skb, struct ieee80211_sta* sta);
 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm* mvm, struct sk_buff* skb);
-void iwl_mvm_set_tx_cmd(struct iwl_mvm* mvm, struct sk_buff* skb,
-                        struct iwl_tx_cmd* tx_cmd,
+void iwl_mvm_set_tx_cmd(struct iwl_mvm* mvm, struct sk_buff* skb, struct iwl_tx_cmd* tx_cmd,
                         struct ieee80211_tx_info* info, uint8_t sta_id);
 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm* mvm, struct iwl_tx_cmd* tx_cmd,
-                             struct ieee80211_tx_info* info,
-                             struct ieee80211_sta* sta, __le16 fc);
+                             struct ieee80211_tx_info* info, struct ieee80211_sta* sta, __le16 fc);
 void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw* hw, struct ieee80211_txq* txq);
-unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm* mvm,
-                                    struct ieee80211_sta* sta,
+unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm* mvm, struct ieee80211_sta* sta,
                                     unsigned int tid);
 
 #ifdef CPTCFG_IWLWIFI_DEBUG
 const char* iwl_mvm_get_tx_fail_reason(uint32_t status);
 #else
-static inline const char* iwl_mvm_get_tx_fail_reason(uint32_t status) {
-  return "";
-}
+static inline const char* iwl_mvm_get_tx_fail_reason(uint32_t status) { return ""; }
 #endif
-int iwl_mvm_flush_tx_path(struct iwl_mvm* mvm, uint32_t tfd_msk,
-                          uint32_t flags);
-int iwl_mvm_flush_sta(struct iwl_mvm* mvm, void* sta, bool internal,
-                      uint32_t flags);
-int iwl_mvm_flush_sta_tids(struct iwl_mvm* mvm, uint32_t sta_id, uint16_t tids,
-                           uint32_t flags);
+int iwl_mvm_flush_tx_path(struct iwl_mvm* mvm, uint32_t tfd_msk, uint32_t flags);
+int iwl_mvm_flush_sta(struct iwl_mvm* mvm, void* sta, bool internal, uint32_t flags);
+int iwl_mvm_flush_sta_tids(struct iwl_mvm* mvm, uint32_t sta_id, uint16_t tids, uint32_t flags);
 
 void iwl_mvm_async_handlers_purge(struct iwl_mvm* mvm);
 
@@ -1579,8 +1534,7 @@
 }
 
 /* Statistics */
-void iwl_mvm_handle_rx_statistics(struct iwl_mvm* mvm,
-                                  struct iwl_rx_packet* pkt);
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm* mvm, struct iwl_rx_packet* pkt);
 void iwl_mvm_rx_statistics(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 int iwl_mvm_request_statistics(struct iwl_mvm* mvm, bool clear);
 void iwl_mvm_accu_radio_stats(struct iwl_mvm* mvm);
@@ -1612,8 +1566,7 @@
   uint32_t valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
   uint32_t valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
 
-  phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
-                valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
+  phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS | valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
 
   return mvm->fw->phy_config & phy_config;
 }
@@ -1622,8 +1575,7 @@
 int iwl_mvm_load_d3_fw(struct iwl_mvm* mvm);
 
 int iwl_mvm_mac_setup_register(struct iwl_mvm* mvm);
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm* mvm,
-                                    struct iwl_bcast_filter_cmd* cmd);
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm* mvm, struct iwl_bcast_filter_cmd* cmd);
 #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS
 void iwl_mvm_tx_latency_wk(struct work_struct* wk);
 void iwl_mvm_tx_latency_watchdog_wk(struct work_struct* wk);
@@ -1642,31 +1594,25 @@
                             struct iwl_rx_cmd_buffer* rxb, int queue);
 void iwl_mvm_rx_frame_release(struct iwl_mvm* mvm, struct napi_struct* napi,
                               struct iwl_rx_cmd_buffer* rxb, int queue);
-int iwl_mvm_notify_rx_queue(struct iwl_mvm* mvm, uint32_t rxq_mask,
-                            const uint8_t* data, uint32_t count);
-void iwl_mvm_rx_queue_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb,
-                            int queue);
+int iwl_mvm_notify_rx_queue(struct iwl_mvm* mvm, uint32_t rxq_mask, const uint8_t* data,
+                            uint32_t count);
+void iwl_mvm_rx_queue_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb, int queue);
 void iwl_mvm_rx_tx_cmd(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm* mvm,
-                                   struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 void iwl_mvm_rx_ba_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm* mvm,
-                                   struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 void iwl_mvm_rx_fw_error(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_rx_card_state_notif(struct iwl_mvm* mvm,
-                                 struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_rx_mfuart_notif(struct iwl_mvm* mvm,
-                             struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm* mvm,
-                                     struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 
 /* MVM PHY */
 int iwl_mvm_phy_ctxt_add(struct iwl_mvm* mvm, struct iwl_mvm_phy_ctxt* ctxt,
-                         struct cfg80211_chan_def* chandef,
-                         uint8_t chains_static, uint8_t chains_dynamic);
+                         struct cfg80211_chan_def* chandef, uint8_t chains_static,
+                         uint8_t chains_dynamic);
 int iwl_mvm_phy_ctxt_changed(struct iwl_mvm* mvm, struct iwl_mvm_phy_ctxt* ctxt,
-                             struct cfg80211_chan_def* chandef,
-                             uint8_t chains_static, uint8_t chains_dynamic);
+                             struct cfg80211_chan_def* chandef, uint8_t chains_static,
+                             uint8_t chains_dynamic);
 void iwl_mvm_phy_ctxt_ref(struct iwl_mvm* mvm, struct iwl_mvm_phy_ctxt* ctxt);
 void iwl_mvm_phy_ctxt_unref(struct iwl_mvm* mvm, struct iwl_mvm_phy_ctxt* ctxt);
 int iwl_mvm_phy_ctx_count(struct iwl_mvm* mvm);
@@ -1676,38 +1622,27 @@
 /* MAC (virtual interface) programming */
 int iwl_mvm_mac_ctxt_init(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 int iwl_mvm_mac_ctxt_add(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
-int iwl_mvm_mac_ctxt_changed(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                             bool force_assoc_off,
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm* mvm, struct ieee80211_vif* vif, bool force_assoc_off,
                              const uint8_t* bssid_override);
 int iwl_mvm_mac_ctxt_remove(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
-int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm* mvm,
-                                    struct ieee80211_vif* vif);
-void iwl_mvm_rx_beacon_notif(struct iwl_mvm* mvm,
-                             struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm* mvm,
-                                     struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm* mvm,
-                                    struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm* mvm,
-                               struct iwl_rx_cmd_buffer* rxb);
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 void iwl_mvm_sta_pm_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_window_status_notif(struct iwl_mvm* mvm,
-                                 struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm* mvm,
-                                    struct ieee80211_vif* vif);
-void iwl_mvm_probe_resp_data_notif(struct iwl_mvm* mvm,
-                                   struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm* mvm,
-                                      struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_window_status_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
+void iwl_mvm_probe_resp_data_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 /* Bindings */
 int iwl_mvm_binding_add_vif(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 int iwl_mvm_binding_remove_vif(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 
 /* Quota management */
 static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm* mvm) {
-  return iwl_mvm_has_quota_low_latency(mvm)
-             ? sizeof(struct iwl_time_quota_cmd)
-             : sizeof(struct iwl_time_quota_cmd_v1);
+  return iwl_mvm_has_quota_low_latency(mvm) ? sizeof(struct iwl_time_quota_cmd)
+                                            : sizeof(struct iwl_time_quota_cmd_v1);
 }
 
 static inline struct iwl_time_quota_data* iwl_mvm_quota_cmd_get_quota(
@@ -1726,8 +1661,7 @@
                           struct ieee80211_vif* disabled_vif);
 
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
-int iwl_mvm_dhc_quota_enforce(struct iwl_mvm* mvm, struct iwl_mvm_vif* vif,
-                              int quota_percent);
+int iwl_mvm_dhc_quota_enforce(struct iwl_mvm* mvm, struct iwl_mvm_vif* vif, int quota_percent);
 #endif
 
 #ifdef CPTCFG_IWLMVM_ADVANCED_QUOTA_MGMT
@@ -1743,17 +1677,17 @@
   IWL_MVM_QUOTA_ERROR,
 };
 
-enum iwl_mvm_quota_result iwl_mvm_calculate_advanced_quotas(
-    struct iwl_mvm* mvm, struct ieee80211_vif* disabled_vif, bool force_update,
-    struct iwl_time_quota_cmd* cmd);
-ssize_t iwl_dbgfs_quota_status_read(struct file* file, char __user* user_buf,
-                                    size_t count, loff_t* ppos);
+enum iwl_mvm_quota_result iwl_mvm_calculate_advanced_quotas(struct iwl_mvm* mvm,
+                                                            struct ieee80211_vif* disabled_vif,
+                                                            bool force_update,
+                                                            struct iwl_time_quota_cmd* cmd);
+ssize_t iwl_dbgfs_quota_status_read(struct file* file, char __user* user_buf, size_t count,
+                                    loff_t* ppos);
 #endif
 
 /* Scanning */
 int iwl_mvm_reg_scan_start(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                           struct cfg80211_scan_request* req,
-                           struct ieee80211_scan_ies* ies);
+                           struct cfg80211_scan_request* req, struct ieee80211_scan_ies* ies);
 int iwl_mvm_scan_size(struct iwl_mvm* mvm);
 int iwl_mvm_scan_stop(struct iwl_mvm* mvm, int type, bool notify);
 int iwl_mvm_max_scan_ie_len(struct iwl_mvm* mvm);
@@ -1761,22 +1695,17 @@
 void iwl_mvm_scan_timeout_wk(struct work_struct* work);
 
 /* Scheduled scan */
-void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm* mvm,
-                                         struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm* mvm,
-                                              struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 int iwl_mvm_sched_scan_start(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                              struct cfg80211_sched_scan_request* req,
                              struct ieee80211_scan_ies* ies, int type);
-void iwl_mvm_rx_scan_match_found(struct iwl_mvm* mvm,
-                                 struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 
 /* UMAC scan */
 int iwl_mvm_config_scan(struct iwl_mvm* mvm);
-void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm* mvm,
-                                         struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm* mvm,
-                                              struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 
 /* MVM debugfs */
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
@@ -1784,14 +1713,11 @@
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 #else
-static inline int iwl_mvm_dbgfs_register(struct iwl_mvm* mvm,
-                                         struct dentry* dbgfs_dir) {
+static inline int iwl_mvm_dbgfs_register(struct iwl_mvm* mvm, struct dentry* dbgfs_dir) {
   return 0;
 }
-static inline void iwl_mvm_vif_dbgfs_register(struct iwl_mvm* mvm,
-                                              struct ieee80211_vif* vif) {}
-static inline void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm* mvm,
-                                           struct ieee80211_vif* vif) {}
+static inline void iwl_mvm_vif_dbgfs_register(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {}
+static inline void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {}
 #endif /* CPTCFG_IWLWIFI_DEBUGFS */
 
 /* rate scaling */
@@ -1805,12 +1731,11 @@
 int iwl_mvm_power_update_device(struct iwl_mvm* mvm);
 int iwl_mvm_power_update_mac(struct iwl_mvm* mvm);
 int iwl_mvm_power_update_ps(struct iwl_mvm* mvm);
-int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                                 char* buf, int bufsz);
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm* mvm, struct ieee80211_vif* vif, char* buf,
+                                 int bufsz);
 
 void iwl_mvm_power_vif_assoc(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
-void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm* mvm,
-                                              struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 
 #ifdef CPTCFG_IWLWIFI_LEDS
 int iwl_mvm_leds_init(struct iwl_mvm* mvm);
@@ -1828,41 +1753,31 @@
 void iwl_mvm_set_wakeup(struct ieee80211_hw* hw, bool enabled);
 void iwl_mvm_set_rekey_data(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                             struct cfg80211_gtk_rekey_data* data);
-void iwl_mvm_ipv6_addr_change(struct ieee80211_hw* hw,
-                              struct ieee80211_vif* vif,
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                               struct inet6_dev* idev);
-void iwl_mvm_set_default_unicast_key(struct ieee80211_hw* hw,
-                                     struct ieee80211_vif* vif, int idx);
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw* hw, struct ieee80211_vif* vif, int idx);
 extern const struct file_operations iwl_dbgfs_d3_test_ops;
 struct iwl_wowlan_status* iwl_mvm_send_wowlan_get_status(struct iwl_mvm* mvm);
 #ifdef CONFIG_PM
-int iwl_mvm_wowlan_config_key_params(struct iwl_mvm* mvm,
-                                     struct ieee80211_vif* vif, bool host_awake,
-                                     uint32_t cmd_flags);
+int iwl_mvm_wowlan_config_key_params(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
+                                     bool host_awake, uint32_t cmd_flags);
 void iwl_mvm_d0i3_update_keys(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                               struct iwl_wowlan_status* status);
-void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm* mvm,
-                                 struct ieee80211_vif* vif);
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 #else
-static inline int iwl_mvm_wowlan_config_key_params(struct iwl_mvm* mvm,
-                                                   struct ieee80211_vif* vif,
-                                                   bool host_awake,
-                                                   uint32_t cmd_flags) {
+static inline int iwl_mvm_wowlan_config_key_params(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
+                                                   bool host_awake, uint32_t cmd_flags) {
   return 0;
 }
 
-static inline void iwl_mvm_d0i3_update_keys(struct iwl_mvm* mvm,
-                                            struct ieee80211_vif* vif,
+static inline void iwl_mvm_d0i3_update_keys(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                             struct iwl_wowlan_status* status) {}
 
-static inline void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm* mvm,
-                                               struct ieee80211_vif* vif) {}
+static inline void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {}
 #endif
-void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta* mvm_ap_sta,
-                                struct iwl_wowlan_config_cmd* cmd);
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta* mvm_ap_sta, struct iwl_wowlan_config_cmd* cmd);
 int iwl_mvm_send_proto_offload(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                               bool disable_offloading, bool offload_ns,
-                               uint32_t cmd_flags);
+                               bool disable_offloading, bool offload_ns, uint32_t cmd_flags);
 
 /* D0i3 */
 void iwl_mvm_ref(struct iwl_mvm* mvm, enum iwl_mvm_ref_type ref_type);
@@ -1879,39 +1794,31 @@
 
 /* BT Coex */
 int iwl_mvm_send_bt_init_conf(struct iwl_mvm* mvm);
-void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm* mvm,
-                              struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 void iwl_mvm_bt_rssi_event(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                            enum ieee80211_rssi_event_data);
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm* mvm);
-uint16_t iwl_mvm_coex_agg_time_limit(struct iwl_mvm* mvm,
-                                     struct ieee80211_sta* sta);
-bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm* mvm,
-                                     struct ieee80211_sta* sta);
+uint16_t iwl_mvm_coex_agg_time_limit(struct iwl_mvm* mvm, struct ieee80211_sta* sta);
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm* mvm, struct ieee80211_sta* sta);
 bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm* mvm, uint8_t ant);
 bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm* mvm);
-bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm* mvm,
-                                    enum nl80211_band band);
-uint8_t iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm* mvm,
-                                           uint8_t enabled_ants);
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm* mvm, enum nl80211_band band);
+uint8_t iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm* mvm, uint8_t enabled_ants);
 uint8_t iwl_mvm_bt_coex_tx_prio(struct iwl_mvm* mvm, struct ieee80211_hdr* hdr,
                                 struct ieee80211_tx_info* info, uint8_t ac);
 
 /* beacon filtering */
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-void iwl_mvm_beacon_filter_debugfs_parameters(
-    struct ieee80211_vif* vif, struct iwl_beacon_filter_cmd* cmd);
+void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif* vif,
+                                              struct iwl_beacon_filter_cmd* cmd);
 #else
-static inline void iwl_mvm_beacon_filter_debugfs_parameters(
-    struct ieee80211_vif* vif, struct iwl_beacon_filter_cmd* cmd) {}
+static inline void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif* vif,
+                                                            struct iwl_beacon_filter_cmd* cmd) {}
 #endif
-int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm* mvm,
-                                   struct ieee80211_vif* vif, bool enable,
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm* mvm, struct ieee80211_vif* vif, bool enable,
                                    uint32_t flags);
-int iwl_mvm_enable_beacon_filter(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                                 uint32_t flags);
-int iwl_mvm_disable_beacon_filter(struct iwl_mvm* mvm,
-                                  struct ieee80211_vif* vif, uint32_t flags);
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint32_t flags);
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint32_t flags);
 /* SMPS */
 void iwl_mvm_update_smps(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                          enum iwl_mvm_smps_type_request req_type,
@@ -1919,14 +1826,12 @@
 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm* mvm);
 
 /* Low latency */
-int iwl_mvm_update_low_latency(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                               bool low_latency,
+int iwl_mvm_update_low_latency(struct iwl_mvm* mvm, struct ieee80211_vif* vif, bool low_latency,
                                enum iwl_mvm_low_latency_cause cause);
 /* get SystemLowLatencyMode - only needed for beacon threshold? */
 bool iwl_mvm_low_latency(struct iwl_mvm* mvm);
 bool iwl_mvm_low_latency_band(struct iwl_mvm* mvm, enum nl80211_band band);
-void iwl_mvm_send_low_latency_cmd(struct iwl_mvm* mvm, bool low_latency,
-                                  uint16_t mac_id);
+void iwl_mvm_send_low_latency_cmd(struct iwl_mvm* mvm, bool low_latency, uint16_t mac_id);
 
 /* get VMACLowLatencyMode */
 static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif* mvmvif) {
@@ -1943,9 +1848,8 @@
   return mvmvif->low_latency;
 }
 
-static inline void iwl_mvm_vif_set_low_latency(
-    struct iwl_mvm_vif* mvmvif, bool set,
-    enum iwl_mvm_low_latency_cause cause) {
+static inline void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif* mvmvif, bool set,
+                                               enum iwl_mvm_low_latency_cause cause) {
   if (set) {
     mvmvif->low_latency |= cause;
   } else {
@@ -1957,8 +1861,7 @@
  * command queue, which can't be flushed.
  */
 static inline uint32_t iwl_mvm_flushable_queues(struct iwl_mvm* mvm) {
-  return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
-          ~BIT(IWL_MVM_DQA_CMD_QUEUE));
+  return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & ~BIT(IWL_MVM_DQA_CMD_QUEUE));
 }
 
 static inline void iwl_mvm_stop_device(struct iwl_mvm* mvm) {
@@ -1977,8 +1880,8 @@
 }
 
 /* Re-configure the SCD for a queue that has already been configured */
-zx_status_t iwl_mvm_reconfig_scd(struct iwl_mvm* mvm, int queue, int fifo, int sta_id,
-                                 int tid, int frame_limit, uint16_t ssn);
+zx_status_t iwl_mvm_reconfig_scd(struct iwl_mvm* mvm, int queue, int fifo, int sta_id, int tid,
+                                 int frame_limit, uint16_t ssn);
 
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm* mvm, uint32_t backoff);
@@ -2006,8 +1909,7 @@
   IWL_FM_CHANGE_CHANCTX = 2,
 };
 
-int iwl_mvm_fm_set_tx_power(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                            int8_t txpower);
+int iwl_mvm_fm_set_tx_power(struct iwl_mvm* mvm, struct ieee80211_vif* vif, int8_t txpower);
 void iwl_mvm_fm_notify_channel_change(struct ieee80211_chanctx_conf* ctx,
                                       enum iwl_fm_chan_change_action action);
 void iwl_mvm_fm_notify_current_dcdc(void);
@@ -2016,24 +1918,18 @@
 #endif
 
 /* Location Aware Regulatory */
-struct iwl_mcc_update_resp* iwl_mvm_update_mcc(struct iwl_mvm* mvm,
-                                               const char* alpha2,
+struct iwl_mcc_update_resp* iwl_mvm_update_mcc(struct iwl_mvm* mvm, const char* alpha2,
                                                enum iwl_mcc_source src_id);
 int iwl_mvm_init_mcc(struct iwl_mvm* mvm);
-void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm* mvm,
-                                struct iwl_rx_cmd_buffer* rxb);
-struct ieee80211_regdomain* iwl_mvm_get_regdomain(struct wiphy* wiphy,
-                                                  const char* alpha2,
-                                                  enum iwl_mcc_source src_id,
-                                                  bool* changed);
-struct ieee80211_regdomain* iwl_mvm_get_current_regdomain(struct iwl_mvm* mvm,
-                                                          bool* changed);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+struct ieee80211_regdomain* iwl_mvm_get_regdomain(struct wiphy* wiphy, const char* alpha2,
+                                                  enum iwl_mcc_source src_id, bool* changed);
+struct ieee80211_regdomain* iwl_mvm_get_current_regdomain(struct iwl_mvm* mvm, bool* changed);
 int iwl_mvm_init_fw_regd(struct iwl_mvm* mvm);
 void iwl_mvm_update_changed_regdom(struct iwl_mvm* mvm);
 
 /* smart fifo */
-int iwl_mvm_sf_update(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                      bool added_vif);
+int iwl_mvm_sf_update(struct iwl_mvm* mvm, struct ieee80211_vif* vif, bool added_vif);
 
 void iwl_mvm_set_wiphy_vendor_commands(struct wiphy* wiphy);
 
@@ -2047,34 +1943,27 @@
 
 int iwl_mvm_tdls_sta_count(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm* mvm);
-void iwl_mvm_recalc_tdls_state(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                               bool sta_added);
-void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw* hw,
-                                           struct ieee80211_vif* vif);
-int iwl_mvm_tdls_channel_switch(struct ieee80211_hw* hw,
-                                struct ieee80211_vif* vif,
+void iwl_mvm_recalc_tdls_state(struct iwl_mvm* mvm, struct ieee80211_vif* vif, bool sta_added);
+void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw* hw, struct ieee80211_vif* vif);
+int iwl_mvm_tdls_channel_switch(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                                 struct ieee80211_sta* sta, uint8_t oper_class,
-                                struct cfg80211_chan_def* chandef,
-                                struct sk_buff* tmpl_skb, uint32_t ch_sw_tm_ie);
-void iwl_mvm_tdls_recv_channel_switch(
-    struct ieee80211_hw* hw, struct ieee80211_vif* vif,
-    struct ieee80211_tdls_ch_sw_params* params);
-void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw* hw,
-                                        struct ieee80211_vif* vif,
+                                struct cfg80211_chan_def* chandef, struct sk_buff* tmpl_skb,
+                                uint32_t ch_sw_tm_ie);
+void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
+                                      struct ieee80211_tdls_ch_sw_params* params);
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                                         struct ieee80211_sta* sta);
 void iwl_mvm_rx_tdls_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 void iwl_mvm_tdls_ch_switch_work(struct work_struct* work);
 
 #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE
-void iwl_mvm_tdls_peer_cache_pkt(struct iwl_mvm* mvm, struct ieee80211_hdr* hdr,
-                                 uint32_t len, int rxq);
-void iwl_mvm_tdls_peer_cache_clear(struct iwl_mvm* mvm,
-                                   struct ieee80211_vif* vif);
-struct iwl_mvm_tdls_peer_counter* iwl_mvm_tdls_peer_cache_find(
-    struct iwl_mvm* mvm, const uint8_t* addr);
+void iwl_mvm_tdls_peer_cache_pkt(struct iwl_mvm* mvm, struct ieee80211_hdr* hdr, uint32_t len,
+                                 int rxq);
+void iwl_mvm_tdls_peer_cache_clear(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
+struct iwl_mvm_tdls_peer_counter* iwl_mvm_tdls_peer_cache_find(struct iwl_mvm* mvm,
+                                                               const uint8_t* addr);
 #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
-void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm* mvm,
-                                     struct iwl_mvm_internal_rxq_notif* notif,
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm* mvm, struct iwl_mvm_internal_rxq_notif* notif,
                                      uint32_t size);
 void iwl_mvm_reorder_timer_expired(struct timer_list* t);
 struct ieee80211_vif* iwl_mvm_get_bss_vif(struct iwl_mvm* mvm);
@@ -2102,15 +1991,11 @@
 #endif
 
 void iwl_mvm_nic_restart(struct iwl_mvm* mvm, bool fw_error);
-unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm* mvm,
-                                    struct ieee80211_vif* vif, bool tdls,
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm* mvm, struct ieee80211_vif* vif, bool tdls,
                                     bool cmd_q);
-void iwl_mvm_connection_loss(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                             const char* errmsg);
-void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm* mvm,
-                                          struct ieee80211_vif* vif,
-                                          const struct ieee80211_sta* sta,
-                                          uint16_t tid);
+void iwl_mvm_connection_loss(struct iwl_mvm* mvm, struct ieee80211_vif* vif, const char* errmsg);
+void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
+                                          const struct ieee80211_sta* sta, uint16_t tid);
 
 #ifdef CPTCFG_IWLMVM_VENDOR_CMDS
 void iwl_mvm_send_tcm_event(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
@@ -2123,17 +2008,14 @@
 
 /* NAN */
 void iwl_mvm_nan_match(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
-void iwl_mvm_nan_de_term_notif(struct iwl_mvm* mvm,
-                               struct iwl_rx_cmd_buffer* rxb);
+void iwl_mvm_nan_de_term_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 int iwl_mvm_start_nan(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                       struct cfg80211_nan_conf* conf);
 int iwl_mvm_stop_nan(struct ieee80211_hw* hw, struct ieee80211_vif* vif);
 int iwl_mvm_add_nan_func(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                          const struct cfg80211_nan_func* nan_func);
-void iwl_mvm_del_nan_func(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
-                          uint8_t instance_id);
-int iwl_mvm_nan_config_nan_faw_cmd(struct iwl_mvm* mvm,
-                                   struct cfg80211_chan_def* chandef,
+void iwl_mvm_del_nan_func(struct ieee80211_hw* hw, struct ieee80211_vif* vif, uint8_t instance_id);
+int iwl_mvm_nan_config_nan_faw_cmd(struct iwl_mvm* mvm, struct cfg80211_chan_def* chandef,
                                    uint8_t slots);
 
 int iwl_mvm_sar_select_profile(struct iwl_mvm* mvm, int prof_a, int prof_b);
@@ -2145,10 +2027,8 @@
 
 /* 11ax Softap Test Mode */
 #ifdef CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE
-void iwl_mvm_ax_softap_testmode_sta_add_debugfs(struct ieee80211_hw* hw,
-                                                struct ieee80211_vif* vif,
-                                                struct ieee80211_sta* sta,
-                                                struct dentry* dir);
+void iwl_mvm_ax_softap_testmode_sta_add_debugfs(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
+                                                struct ieee80211_sta* sta, struct dentry* dir);
 #endif
 
 // The entry point for ops.c.
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/nan.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/nan.c
index 8cb57ea..4bfc4ce 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/nan.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/nan.c
@@ -31,10 +31,11 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include "fw/api/nan.h"
+
 #include <linux/etherdevice.h>
 #include <net/cfg80211.h>
 
-#include "fw/api/nan.h"
 #include "mvm.h"
 
 #define NAN_WARMUP_TIMEOUT_USEC (120000000ULL)
@@ -42,575 +43,603 @@
 #define NAN_CHANNEL_52 (149)
 
 enum srf_type {
-    SRF_BF_TYPE = BIT(0),
-    SRF_INCLUDE = BIT(1),
-    SRF_BLOOM_FILTER_IDX = BIT(2) | BIT(3),
+  SRF_BF_TYPE = BIT(0),
+  SRF_INCLUDE = BIT(1),
+  SRF_BLOOM_FILTER_IDX = BIT(2) | BIT(3),
 };
 
 static bool iwl_mvm_can_beacon(struct ieee80211_vif* vif, enum nl80211_band band, uint8_t channel) {
-    struct wiphy* wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
-    int freq = ieee80211_channel_to_frequency(channel, band);
-    struct ieee80211_channel* chan = ieee80211_get_channel(wiphy, freq);
-    struct cfg80211_chan_def def;
+  struct wiphy* wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
+  int freq = ieee80211_channel_to_frequency(channel, band);
+  struct ieee80211_channel* chan = ieee80211_get_channel(wiphy, freq);
+  struct cfg80211_chan_def def;
 
-    if (!chan) { return false; }
+  if (!chan) {
+    return false;
+  }
 
-    cfg80211_chandef_create(&def, chan, NL80211_CHAN_NO_HT);
-    return cfg80211_reg_can_beacon(wiphy, &def, vif->type);
+  cfg80211_chandef_create(&def, chan, NL80211_CHAN_NO_HT);
+  return cfg80211_reg_can_beacon(wiphy, &def, vif->type);
 }
 
 static inline bool iwl_mvm_nan_is_ver2(struct ieee80211_hw* hw) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
 
-    return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NAN2_VER2);
+  return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NAN2_VER2);
 }
 
 static inline size_t iwl_mvm_nan_cfg_cmd_len(struct ieee80211_hw* hw) {
-    return iwl_mvm_nan_is_ver2(hw) ? sizeof(struct iwl_nan_cfg_cmd_v2)
-                                   : sizeof(struct iwl_nan_cfg_cmd);
+  return iwl_mvm_nan_is_ver2(hw) ? sizeof(struct iwl_nan_cfg_cmd_v2)
+                                 : sizeof(struct iwl_nan_cfg_cmd);
 }
 
 static inline struct iwl_nan_umac_cfg* iwl_mvm_nan_get_umac_cfg(struct ieee80211_hw* hw,
                                                                 void* nan_cfg_cmd) {
-    return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_cfg_cmd_v2*)nan_cfg_cmd)->umac_cfg
-                                   : &((struct iwl_nan_cfg_cmd*)nan_cfg_cmd)->umac_cfg;
+  return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_cfg_cmd_v2*)nan_cfg_cmd)->umac_cfg
+                                 : &((struct iwl_nan_cfg_cmd*)nan_cfg_cmd)->umac_cfg;
 }
 
 static inline struct iwl_nan_testbed_cfg* iwl_mvm_nan_get_tb_cfg(struct ieee80211_hw* hw,
                                                                  void* nan_cfg_cmd) {
-    return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_cfg_cmd_v2*)nan_cfg_cmd)->tb_cfg
-                                   : &((struct iwl_nan_cfg_cmd*)nan_cfg_cmd)->tb_cfg;
+  return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_cfg_cmd_v2*)nan_cfg_cmd)->tb_cfg
+                                 : &((struct iwl_nan_cfg_cmd*)nan_cfg_cmd)->tb_cfg;
 }
 
 static inline struct iwl_nan_nan2_cfg* iwl_mvm_nan_get_nan2_cfg(struct ieee80211_hw* hw,
                                                                 void* nan_cfg_cmd) {
-    return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_cfg_cmd_v2*)nan_cfg_cmd)->nan2_cfg
-                                   : &((struct iwl_nan_cfg_cmd*)nan_cfg_cmd)->nan2_cfg;
+  return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_cfg_cmd_v2*)nan_cfg_cmd)->nan2_cfg
+                                 : &((struct iwl_nan_cfg_cmd*)nan_cfg_cmd)->nan2_cfg;
 }
 
 int iwl_mvm_start_nan(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                       struct cfg80211_nan_conf* conf) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    void* cmd;
-    struct iwl_nan_umac_cfg* umac_cfg;
-    struct iwl_nan_testbed_cfg* tb_cfg;
-    struct iwl_nan_nan2_cfg* nan2_cfg;
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    int ret = 0;
-    uint16_t cdw = 0;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  void* cmd;
+  struct iwl_nan_umac_cfg* umac_cfg;
+  struct iwl_nan_testbed_cfg* tb_cfg;
+  struct iwl_nan_nan2_cfg* nan2_cfg;
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  int ret = 0;
+  uint16_t cdw = 0;
 
-    IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Start NAN\n");
+  IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Start NAN\n");
 
-    /* 2GHz is mandatory and nl80211 should make sure it is set.
-     * Warn and add 2GHz if this happens anyway.
-     */
-    if (WARN_ON(conf->bands && !(conf->bands & BIT(NL80211_BAND_2GHZ)))) { return -EINVAL; }
+  /* 2GHz is mandatory and nl80211 should make sure it is set.
+   * Warn and add 2GHz if this happens anyway.
+   */
+  if (WARN_ON(conf->bands && !(conf->bands & BIT(NL80211_BAND_2GHZ)))) {
+    return -EINVAL;
+  }
 
-    conf->bands |= BIT(NL80211_BAND_2GHZ);
-    cmd = kzalloc(iwl_mvm_nan_cfg_cmd_len(hw), GFP_KERNEL);
-    if (!cmd) { return -ENOMEM; }
+  conf->bands |= BIT(NL80211_BAND_2GHZ);
+  cmd = kzalloc(iwl_mvm_nan_cfg_cmd_len(hw), GFP_KERNEL);
+  if (!cmd) {
+    return -ENOMEM;
+  }
 
-    umac_cfg = iwl_mvm_nan_get_umac_cfg(hw, cmd);
-    tb_cfg = iwl_mvm_nan_get_tb_cfg(hw, cmd);
-    nan2_cfg = iwl_mvm_nan_get_nan2_cfg(hw, cmd);
+  umac_cfg = iwl_mvm_nan_get_umac_cfg(hw, cmd);
+  tb_cfg = iwl_mvm_nan_get_tb_cfg(hw, cmd);
+  nan2_cfg = iwl_mvm_nan_get_nan2_cfg(hw, cmd);
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    umac_cfg->action = cpu_to_le32(FW_CTXT_ACTION_ADD);
-    umac_cfg->tsf_id = cpu_to_le32(mvmvif->tsf_id);
-    umac_cfg->beacon_template_id = cpu_to_le32(mvmvif->id);
+  umac_cfg->action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+  umac_cfg->tsf_id = cpu_to_le32(mvmvif->tsf_id);
+  umac_cfg->beacon_template_id = cpu_to_le32(mvmvif->id);
 
-    ether_addr_copy(umac_cfg->node_addr, vif->addr);
-    umac_cfg->sta_id = cpu_to_le32(mvm->aux_sta.sta_id);
-    umac_cfg->master_pref = conf->master_pref;
+  ether_addr_copy(umac_cfg->node_addr, vif->addr);
+  umac_cfg->sta_id = cpu_to_le32(mvm->aux_sta.sta_id);
+  umac_cfg->master_pref = conf->master_pref;
 
-    if (conf->bands & BIT(NL80211_BAND_2GHZ)) {
-        if (!iwl_mvm_can_beacon(vif, NL80211_BAND_2GHZ, NAN_CHANNEL_24)) {
-            IWL_ERR(mvm, "Can't beacon on %d\n", NAN_CHANNEL_24);
-            ret = -EINVAL;
-            goto out;
-        }
-
-        tb_cfg->chan24 = NAN_CHANNEL_24;
-        cdw |= conf->cdw_2g;
+  if (conf->bands & BIT(NL80211_BAND_2GHZ)) {
+    if (!iwl_mvm_can_beacon(vif, NL80211_BAND_2GHZ, NAN_CHANNEL_24)) {
+      IWL_ERR(mvm, "Can't beacon on %d\n", NAN_CHANNEL_24);
+      ret = -EINVAL;
+      goto out;
     }
 
-    if (conf->bands & BIT(NL80211_BAND_5GHZ)) {
-        if (!iwl_mvm_can_beacon(vif, NL80211_BAND_5GHZ, NAN_CHANNEL_52)) {
-            IWL_ERR(mvm, "Can't beacon on %d\n", NAN_CHANNEL_52);
-            ret = -EINVAL;
-            goto out;
-        }
+    tb_cfg->chan24 = NAN_CHANNEL_24;
+    cdw |= conf->cdw_2g;
+  }
 
-        tb_cfg->chan52 = NAN_CHANNEL_52;
-        cdw |= conf->cdw_5g << 3;
+  if (conf->bands & BIT(NL80211_BAND_5GHZ)) {
+    if (!iwl_mvm_can_beacon(vif, NL80211_BAND_5GHZ, NAN_CHANNEL_52)) {
+      IWL_ERR(mvm, "Can't beacon on %d\n", NAN_CHANNEL_52);
+      ret = -EINVAL;
+      goto out;
     }
 
-    tb_cfg->warmup_timer = cpu_to_le32(NAN_WARMUP_TIMEOUT_USEC);
-    tb_cfg->op_bands = 3;
-    nan2_cfg->cdw = cpu_to_le16(cdw);
+    tb_cfg->chan52 = NAN_CHANNEL_52;
+    cdw |= conf->cdw_5g << 3;
+  }
 
-    if ((conf->bands & BIT(NL80211_BAND_2GHZ)) && (conf->bands & BIT(NL80211_BAND_5GHZ))) {
-        umac_cfg->dual_band = cpu_to_le32(1);
-    }
+  tb_cfg->warmup_timer = cpu_to_le32(NAN_WARMUP_TIMEOUT_USEC);
+  tb_cfg->op_bands = 3;
+  nan2_cfg->cdw = cpu_to_le16(cdw);
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_CONFIG_CMD, NAN_GROUP, 0), 0,
-                               iwl_mvm_nan_cfg_cmd_len(hw), cmd);
+  if ((conf->bands & BIT(NL80211_BAND_2GHZ)) && (conf->bands & BIT(NL80211_BAND_5GHZ))) {
+    umac_cfg->dual_band = cpu_to_le32(1);
+  }
 
-    if (!ret) { mvm->nan_vif = vif; }
+  ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_CONFIG_CMD, NAN_GROUP, 0), 0,
+                             iwl_mvm_nan_cfg_cmd_len(hw), cmd);
+
+  if (!ret) {
+    mvm->nan_vif = vif;
+  }
 
 out:
-    mutex_unlock(&mvm->mutex);
-    kfree(cmd);
+  mutex_unlock(&mvm->mutex);
+  kfree(cmd);
 
-    return ret;
+  return ret;
 }
 
 int iwl_mvm_stop_nan(struct ieee80211_hw* hw, struct ieee80211_vif* vif) {
-    void* cmd;
-    struct iwl_nan_umac_cfg* umac_cfg;
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    int ret = 0;
+  void* cmd;
+  struct iwl_nan_umac_cfg* umac_cfg;
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  int ret = 0;
 
-    IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Stop NAN\n");
+  IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Stop NAN\n");
 
-    cmd = kzalloc(iwl_mvm_nan_cfg_cmd_len(hw), GFP_KERNEL);
-    if (!cmd) { return -ENOMEM; }
+  cmd = kzalloc(iwl_mvm_nan_cfg_cmd_len(hw), GFP_KERNEL);
+  if (!cmd) {
+    return -ENOMEM;
+  }
 
-    umac_cfg = iwl_mvm_nan_get_umac_cfg(hw, cmd);
+  umac_cfg = iwl_mvm_nan_get_umac_cfg(hw, cmd);
 
-    mutex_lock(&mvm->mutex);
-    umac_cfg->action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+  mutex_lock(&mvm->mutex);
+  umac_cfg->action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_CONFIG_CMD, NAN_GROUP, 0), 0,
-                               iwl_mvm_nan_cfg_cmd_len(hw), cmd);
+  ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_CONFIG_CMD, NAN_GROUP, 0), 0,
+                             iwl_mvm_nan_cfg_cmd_len(hw), cmd);
 
-    if (!ret) { mvm->nan_vif = NULL; }
-    mutex_unlock(&mvm->mutex);
-    kfree(cmd);
+  if (!ret) {
+    mvm->nan_vif = NULL;
+  }
+  mutex_unlock(&mvm->mutex);
+  kfree(cmd);
 
-    return ret;
+  return ret;
 }
 
 static enum iwl_fw_nan_func_type iwl_fw_nan_func_type(enum nl80211_nan_function_type type) {
-    switch (type) {
+  switch (type) {
     case NL80211_NAN_FUNC_PUBLISH:
-        return IWL_NAN_DE_FUNC_PUBLISH;
+      return IWL_NAN_DE_FUNC_PUBLISH;
     case NL80211_NAN_FUNC_SUBSCRIBE:
-        return IWL_NAN_DE_FUNC_SUBSCRIBE;
+      return IWL_NAN_DE_FUNC_SUBSCRIBE;
     case NL80211_NAN_FUNC_FOLLOW_UP:
-        return IWL_NAN_DE_FUNC_FOLLOW_UP;
+      return IWL_NAN_DE_FUNC_FOLLOW_UP;
     default:
-        return IWL_NAN_DE_FUNC_NOT_VALID;
-    }
+      return IWL_NAN_DE_FUNC_NOT_VALID;
+  }
 }
 
 static uint8_t iwl_mvm_get_match_filter_len(struct cfg80211_nan_func_filter* filters,
                                             uint8_t num_filters) {
-    int i;
-    unsigned int len = 0;
+  int i;
+  unsigned int len = 0;
 
-    len += num_filters;
-    for (i = 0; i < num_filters; i++) {
-        len += filters[i].len;
-    }
+  len += num_filters;
+  for (i = 0; i < num_filters; i++) {
+    len += filters[i].len;
+  }
 
-    if (WARN_ON_ONCE(len > U8_MAX)) { return 0; }
+  if (WARN_ON_ONCE(len > U8_MAX)) {
+    return 0;
+  }
 
-    return len;
+  return len;
 }
 
 static void iwl_mvm_copy_filters(struct cfg80211_nan_func_filter* filters, uint8_t num_filters,
                                  uint8_t* cmd_data) {
-    int i;
-    uint8_t offset = 0;
+  int i;
+  uint8_t offset = 0;
 
-    for (i = 0; i < num_filters; i++) {
-        memcpy(cmd_data + offset, &filters[i].len, sizeof(uint8_t));
-        offset++;
-        if (filters[i].len > 0) { memcpy(cmd_data + offset, filters[i].filter, filters[i].len); }
-
-        offset += filters[i].len;
+  for (i = 0; i < num_filters; i++) {
+    memcpy(cmd_data + offset, &filters[i].len, sizeof(uint8_t));
+    offset++;
+    if (filters[i].len > 0) {
+      memcpy(cmd_data + offset, filters[i].filter, filters[i].len);
     }
+
+    offset += filters[i].len;
+  }
 }
 
 static inline size_t iwl_mvm_nan_add_func_cmd_len(struct ieee80211_hw* hw) {
-    return iwl_mvm_nan_is_ver2(hw) ? sizeof(struct iwl_nan_add_func_cmd_v2)
-                                   : sizeof(struct iwl_nan_add_func_cmd);
+  return iwl_mvm_nan_is_ver2(hw) ? sizeof(struct iwl_nan_add_func_cmd_v2)
+                                 : sizeof(struct iwl_nan_add_func_cmd);
 }
 
 static inline struct iwl_nan_add_func_common* iwl_mvm_nan_get_add_func_common(
     struct ieee80211_hw* hw, void* nan_add_func_cmd) {
-    return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_add_func_cmd_v2*)nan_add_func_cmd)->cmn
-                                   : &((struct iwl_nan_add_func_cmd*)nan_add_func_cmd)->cmn;
+  return iwl_mvm_nan_is_ver2(hw) ? &((struct iwl_nan_add_func_cmd_v2*)nan_add_func_cmd)->cmn
+                                 : &((struct iwl_nan_add_func_cmd*)nan_add_func_cmd)->cmn;
 }
 
 static inline uint8_t* iwl_mvm_nan_get_add_func_data(struct ieee80211_hw* hw,
                                                      void* nan_add_func_cmd) {
-    return iwl_mvm_nan_is_ver2(hw) ? ((struct iwl_nan_add_func_cmd_v2*)nan_add_func_cmd)->data
-                                   : ((struct iwl_nan_add_func_cmd*)nan_add_func_cmd)->data;
+  return iwl_mvm_nan_is_ver2(hw) ? ((struct iwl_nan_add_func_cmd_v2*)nan_add_func_cmd)->data
+                                 : ((struct iwl_nan_add_func_cmd*)nan_add_func_cmd)->data;
 }
 
 int iwl_mvm_add_nan_func(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                          const struct cfg80211_nan_func* nan_func) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    void* cmd;
-    struct iwl_nan_add_func_common* cmn;
-    struct iwl_host_cmd hcmd = {
-        .id = iwl_cmd_id(NAN_DISCOVERY_FUNC_CMD, NAN_GROUP, 0),
-        .flags = CMD_WANT_SKB,
-    };
-    struct iwl_nan_add_func_res* resp;
-    struct iwl_rx_packet* pkt;
-    uint8_t* cmd_data;
-    uint16_t flags = 0;
-    uint8_t tx_filt_len, rx_filt_len;
-    size_t cmd_len;
-    int ret = 0;
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  void* cmd;
+  struct iwl_nan_add_func_common* cmn;
+  struct iwl_host_cmd hcmd = {
+      .id = iwl_cmd_id(NAN_DISCOVERY_FUNC_CMD, NAN_GROUP, 0),
+      .flags = CMD_WANT_SKB,
+  };
+  struct iwl_nan_add_func_res* resp;
+  struct iwl_rx_packet* pkt;
+  uint8_t* cmd_data;
+  uint16_t flags = 0;
+  uint8_t tx_filt_len, rx_filt_len;
+  size_t cmd_len;
+  int ret = 0;
 
-    IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Add NAN func\n");
+  IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Add NAN func\n");
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    /* We assume here that mac80211 properly validated the nan_func */
-    cmd_len = iwl_mvm_nan_add_func_cmd_len(hw) + ALIGN(nan_func->serv_spec_info_len, 4);
-    if (nan_func->srf_bf_len) {
-        cmd_len += ALIGN(nan_func->srf_bf_len + 1, 4);
-    } else if (nan_func->srf_num_macs) {
-        cmd_len += ALIGN(nan_func->srf_num_macs * ETH_ALEN + 1, 4);
+  /* We assume here that mac80211 properly validated the nan_func */
+  cmd_len = iwl_mvm_nan_add_func_cmd_len(hw) + ALIGN(nan_func->serv_spec_info_len, 4);
+  if (nan_func->srf_bf_len) {
+    cmd_len += ALIGN(nan_func->srf_bf_len + 1, 4);
+  } else if (nan_func->srf_num_macs) {
+    cmd_len += ALIGN(nan_func->srf_num_macs * ETH_ALEN + 1, 4);
+  }
+
+  rx_filt_len = iwl_mvm_get_match_filter_len(nan_func->rx_filters, nan_func->num_rx_filters);
+
+  tx_filt_len = iwl_mvm_get_match_filter_len(nan_func->tx_filters, nan_func->num_tx_filters);
+
+  cmd_len += ALIGN(rx_filt_len, 4);
+  cmd_len += ALIGN(tx_filt_len, 4);
+
+  cmd = kzalloc(cmd_len, GFP_KERNEL);
+
+  if (!cmd) {
+    ret = -ENOBUFS;
+    goto unlock;
+  }
+
+  hcmd.len[0] = cmd_len;
+  hcmd.data[0] = cmd;
+
+  cmn = iwl_mvm_nan_get_add_func_common(hw, cmd);
+
+  cmd_data = iwl_mvm_nan_get_add_func_data(hw, cmd);
+  cmn->action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+  cmn->type = iwl_fw_nan_func_type(nan_func->type);
+  cmn->instance_id = nan_func->instance_id;
+  cmn->dw_interval = 1;
+
+  memcpy(&cmn->service_id, nan_func->service_id, sizeof(cmn->service_id));
+
+  /*
+   * TODO: Currently we want all the events, however we might need to be
+   * able to unset this flag for solicited publish to disable "Replied"
+   * events.
+   */
+  flags |= IWL_NAN_DE_FUNC_FLAG_RAISE_EVENTS;
+  if (nan_func->subscribe_active || nan_func->publish_type == NL80211_NAN_UNSOLICITED_PUBLISH) {
+    flags |= IWL_NAN_DE_FUNC_FLAG_UNSOLICITED_OR_ACTIVE;
+  }
+
+  if (nan_func->close_range) {
+    flags |= IWL_NAN_DE_FUNC_FLAG_CLOSE_RANGE;
+  }
+
+  if (nan_func->type == NL80211_NAN_FUNC_FOLLOW_UP ||
+      (nan_func->type == NL80211_NAN_FUNC_PUBLISH && !nan_func->publish_bcast)) {
+    flags |= IWL_NAN_DE_FUNC_FLAG_UNICAST;
+  }
+
+  if (nan_func->publish_type == NL80211_NAN_SOLICITED_PUBLISH) {
+    flags |= IWL_NAN_DE_FUNC_FLAG_SOLICITED;
+  }
+
+  cmn->flags = cpu_to_le16(flags);
+  cmn->ttl = cpu_to_le32(nan_func->ttl);
+  cmn->serv_info_len = nan_func->serv_spec_info_len;
+  if (nan_func->serv_spec_info_len) {
+    memcpy(cmd_data, nan_func->serv_spec_info, nan_func->serv_spec_info_len);
+  }
+
+  if (nan_func->type == NL80211_NAN_FUNC_FOLLOW_UP) {
+    cmn->flw_up_id = nan_func->followup_id;
+    cmn->flw_up_req_id = nan_func->followup_reqid;
+    memcpy(cmn->flw_up_addr, nan_func->followup_dest.addr, ETH_ALEN);
+    cmn->ttl = cpu_to_le32(1);
+  }
+
+  cmd_data += ALIGN(cmn->serv_info_len, 4);
+  if (nan_func->srf_bf_len) {
+    uint8_t srf_ctl = 0;
+
+    srf_ctl |= SRF_BF_TYPE;
+    srf_ctl |= (nan_func->srf_bf_idx << 2) & SRF_BLOOM_FILTER_IDX;
+    if (nan_func->srf_include) {
+      srf_ctl |= SRF_INCLUDE;
     }
 
-    rx_filt_len = iwl_mvm_get_match_filter_len(nan_func->rx_filters, nan_func->num_rx_filters);
+    cmn->srf_len = nan_func->srf_bf_len + 1;
+    memcpy(cmd_data, &srf_ctl, sizeof(srf_ctl));
+    memcpy(cmd_data + 1, nan_func->srf_bf, nan_func->srf_bf_len);
+  } else if (nan_func->srf_num_macs) {
+    uint8_t srf_ctl = 0;
+    int i;
 
-    tx_filt_len = iwl_mvm_get_match_filter_len(nan_func->tx_filters, nan_func->num_tx_filters);
-
-    cmd_len += ALIGN(rx_filt_len, 4);
-    cmd_len += ALIGN(tx_filt_len, 4);
-
-    cmd = kzalloc(cmd_len, GFP_KERNEL);
-
-    if (!cmd) {
-        ret = -ENOBUFS;
-        goto unlock;
+    if (nan_func->srf_include) {
+      srf_ctl |= SRF_INCLUDE;
     }
 
-    hcmd.len[0] = cmd_len;
-    hcmd.data[0] = cmd;
+    cmn->srf_len = nan_func->srf_num_macs * ETH_ALEN + 1;
+    memcpy(cmd_data, &srf_ctl, sizeof(srf_ctl));
 
-    cmn = iwl_mvm_nan_get_add_func_common(hw, cmd);
-
-    cmd_data = iwl_mvm_nan_get_add_func_data(hw, cmd);
-    cmn->action = cpu_to_le32(FW_CTXT_ACTION_ADD);
-    cmn->type = iwl_fw_nan_func_type(nan_func->type);
-    cmn->instance_id = nan_func->instance_id;
-    cmn->dw_interval = 1;
-
-    memcpy(&cmn->service_id, nan_func->service_id, sizeof(cmn->service_id));
-
-    /*
-     * TODO: Currently we want all the events, however we might need to be
-     * able to unset this flag for solicited publish to disable "Replied"
-     * events.
-     */
-    flags |= IWL_NAN_DE_FUNC_FLAG_RAISE_EVENTS;
-    if (nan_func->subscribe_active || nan_func->publish_type == NL80211_NAN_UNSOLICITED_PUBLISH) {
-        flags |= IWL_NAN_DE_FUNC_FLAG_UNSOLICITED_OR_ACTIVE;
+    for (i = 0; i < nan_func->srf_num_macs; i++) {
+      memcpy(cmd_data + 1 + i * ETH_ALEN, nan_func->srf_macs[i].addr, ETH_ALEN);
     }
+  }
 
-    if (nan_func->close_range) { flags |= IWL_NAN_DE_FUNC_FLAG_CLOSE_RANGE; }
+  cmd_data += ALIGN(cmn->srf_len, 4);
 
-    if (nan_func->type == NL80211_NAN_FUNC_FOLLOW_UP ||
-        (nan_func->type == NL80211_NAN_FUNC_PUBLISH && !nan_func->publish_bcast)) {
-        flags |= IWL_NAN_DE_FUNC_FLAG_UNICAST;
-    }
+  if (rx_filt_len > 0) {
+    iwl_mvm_copy_filters(nan_func->rx_filters, nan_func->num_rx_filters, cmd_data);
+  }
 
-    if (nan_func->publish_type == NL80211_NAN_SOLICITED_PUBLISH) {
-        flags |= IWL_NAN_DE_FUNC_FLAG_SOLICITED;
-    }
+  cmn->rx_filter_len = rx_filt_len;
+  cmd_data += ALIGN(cmn->rx_filter_len, 4);
 
-    cmn->flags = cpu_to_le16(flags);
-    cmn->ttl = cpu_to_le32(nan_func->ttl);
-    cmn->serv_info_len = nan_func->serv_spec_info_len;
-    if (nan_func->serv_spec_info_len) {
-        memcpy(cmd_data, nan_func->serv_spec_info, nan_func->serv_spec_info_len);
-    }
+  if (tx_filt_len > 0) {
+    iwl_mvm_copy_filters(nan_func->tx_filters, nan_func->num_tx_filters, cmd_data);
+  }
 
-    if (nan_func->type == NL80211_NAN_FUNC_FOLLOW_UP) {
-        cmn->flw_up_id = nan_func->followup_id;
-        cmn->flw_up_req_id = nan_func->followup_reqid;
-        memcpy(cmn->flw_up_addr, nan_func->followup_dest.addr, ETH_ALEN);
-        cmn->ttl = cpu_to_le32(1);
-    }
+  cmn->tx_filter_len = tx_filt_len;
 
-    cmd_data += ALIGN(cmn->serv_info_len, 4);
-    if (nan_func->srf_bf_len) {
-        uint8_t srf_ctl = 0;
+  ret = iwl_mvm_send_cmd(mvm, &hcmd);
 
-        srf_ctl |= SRF_BF_TYPE;
-        srf_ctl |= (nan_func->srf_bf_idx << 2) & SRF_BLOOM_FILTER_IDX;
-        if (nan_func->srf_include) { srf_ctl |= SRF_INCLUDE; }
+  if (ret) {
+    IWL_ERR(mvm, "Couldn't send NAN_DISCOVERY_FUNC_CMD: %d\n", ret);
+    goto out_free;
+  }
 
-        cmn->srf_len = nan_func->srf_bf_len + 1;
-        memcpy(cmd_data, &srf_ctl, sizeof(srf_ctl));
-        memcpy(cmd_data + 1, nan_func->srf_bf, nan_func->srf_bf_len);
-    } else if (nan_func->srf_num_macs) {
-        uint8_t srf_ctl = 0;
-        int i;
+  pkt = hcmd.resp_pkt;
 
-        if (nan_func->srf_include) { srf_ctl |= SRF_INCLUDE; }
+  if (WARN_ON(iwl_rx_packet_payload_len(pkt) != sizeof(*resp))) {
+    ret = -EIO;
+    goto out_free_resp;
+  }
 
-        cmn->srf_len = nan_func->srf_num_macs * ETH_ALEN + 1;
-        memcpy(cmd_data, &srf_ctl, sizeof(srf_ctl));
+  resp = (void*)pkt->data;
 
-        for (i = 0; i < nan_func->srf_num_macs; i++) {
-            memcpy(cmd_data + 1 + i * ETH_ALEN, nan_func->srf_macs[i].addr, ETH_ALEN);
-        }
-    }
+  IWL_DEBUG_MAC80211(mvm, "Add NAN func response status: %d, instance_id: %d\n", resp->status,
+                     resp->instance_id);
 
-    cmd_data += ALIGN(cmn->srf_len, 4);
+  if (resp->status == IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_ENTRIES ||
+      resp->status == IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_MEMORY) {
+    ret = -ENOBUFS;
+    goto out_free_resp;
+  }
 
-    if (rx_filt_len > 0) {
-        iwl_mvm_copy_filters(nan_func->rx_filters, nan_func->num_rx_filters, cmd_data);
-    }
+  if (resp->status != IWL_NAN_DE_FUNC_STATUS_SUCCESSFUL) {
+    ret = -EIO;
+    goto out_free_resp;
+  }
 
-    cmn->rx_filter_len = rx_filt_len;
-    cmd_data += ALIGN(cmn->rx_filter_len, 4);
+  if (cmn->instance_id && WARN_ON(resp->instance_id != cmn->instance_id)) {
+    ret = -EIO;
+    goto out_free_resp;
+  }
 
-    if (tx_filt_len > 0) {
-        iwl_mvm_copy_filters(nan_func->tx_filters, nan_func->num_tx_filters, cmd_data);
-    }
-
-    cmn->tx_filter_len = tx_filt_len;
-
-    ret = iwl_mvm_send_cmd(mvm, &hcmd);
-
-    if (ret) {
-        IWL_ERR(mvm, "Couldn't send NAN_DISCOVERY_FUNC_CMD: %d\n", ret);
-        goto out_free;
-    }
-
-    pkt = hcmd.resp_pkt;
-
-    if (WARN_ON(iwl_rx_packet_payload_len(pkt) != sizeof(*resp))) {
-        ret = -EIO;
-        goto out_free_resp;
-    }
-
-    resp = (void*)pkt->data;
-
-    IWL_DEBUG_MAC80211(mvm, "Add NAN func response status: %d, instance_id: %d\n", resp->status,
-                       resp->instance_id);
-
-    if (resp->status == IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_ENTRIES ||
-        resp->status == IWL_NAN_DE_FUNC_STATUS_INSUFFICIENT_MEMORY) {
-        ret = -ENOBUFS;
-        goto out_free_resp;
-    }
-
-    if (resp->status != IWL_NAN_DE_FUNC_STATUS_SUCCESSFUL) {
-        ret = -EIO;
-        goto out_free_resp;
-    }
-
-    if (cmn->instance_id && WARN_ON(resp->instance_id != cmn->instance_id)) {
-        ret = -EIO;
-        goto out_free_resp;
-    }
-
-    ret = 0;
+  ret = 0;
 out_free_resp:
-    iwl_free_resp(&hcmd);
+  iwl_free_resp(&hcmd);
 out_free:
-    kfree(cmd);
+  kfree(cmd);
 unlock:
-    mutex_unlock(&mvm->mutex);
-    return ret;
+  mutex_unlock(&mvm->mutex);
+  return ret;
 }
 
 void iwl_mvm_del_nan_func(struct ieee80211_hw* hw, struct ieee80211_vif* vif, uint8_t instance_id) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    void* cmd;
-    struct iwl_nan_add_func_common* cmn;
-    int ret;
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  void* cmd;
+  struct iwl_nan_add_func_common* cmn;
+  int ret;
 
-    IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Remove NAN func\n");
+  IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "Remove NAN func\n");
 
-    cmd = kzalloc(iwl_mvm_nan_add_func_cmd_len(hw), GFP_KERNEL);
-    if (!cmd) {
-        IWL_ERR(mvm, "Failed to allocate command to remove NAN func instance_id: %d\n",
-                instance_id);
-        return;
-    }
+  cmd = kzalloc(iwl_mvm_nan_add_func_cmd_len(hw), GFP_KERNEL);
+  if (!cmd) {
+    IWL_ERR(mvm, "Failed to allocate command to remove NAN func instance_id: %d\n", instance_id);
+    return;
+  }
 
-    cmn = iwl_mvm_nan_get_add_func_common(hw, cmd);
+  cmn = iwl_mvm_nan_get_add_func_common(hw, cmd);
 
-    mutex_lock(&mvm->mutex);
-    cmn->action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
-    cmn->instance_id = instance_id;
+  mutex_lock(&mvm->mutex);
+  cmn->action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+  cmn->instance_id = instance_id;
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_DISCOVERY_FUNC_CMD, NAN_GROUP, 0), 0,
-                               iwl_mvm_nan_add_func_cmd_len(hw), cmd);
-    if (ret) { IWL_ERR(mvm, "Failed to remove NAN func instance_id: %d\n", instance_id); }
+  ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_DISCOVERY_FUNC_CMD, NAN_GROUP, 0), 0,
+                             iwl_mvm_nan_add_func_cmd_len(hw), cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to remove NAN func instance_id: %d\n", instance_id);
+  }
 
-    mutex_unlock(&mvm->mutex);
-    kfree(cmd);
+  mutex_unlock(&mvm->mutex);
+  kfree(cmd);
 }
 
 static uint8_t iwl_cfg_nan_func_type(uint8_t fw_type) {
-    switch (fw_type) {
+  switch (fw_type) {
     case IWL_NAN_DE_FUNC_PUBLISH:
-        return NL80211_NAN_FUNC_PUBLISH;
+      return NL80211_NAN_FUNC_PUBLISH;
     case IWL_NAN_DE_FUNC_SUBSCRIBE:
-        return NL80211_NAN_FUNC_SUBSCRIBE;
+      return NL80211_NAN_FUNC_SUBSCRIBE;
     case IWL_NAN_DE_FUNC_FOLLOW_UP:
-        return NL80211_NAN_FUNC_FOLLOW_UP;
+      return NL80211_NAN_FUNC_FOLLOW_UP;
     default:
-        return NL80211_NAN_FUNC_MAX_TYPE + 1;
-    }
+      return NL80211_NAN_FUNC_MAX_TYPE + 1;
+  }
 }
 
 static void iwl_mvm_nan_match_v1(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_nan_disc_evt_notify_v1* ev = (void*)pkt->data;
-    struct cfg80211_nan_match_params match = {0};
-    int len = iwl_rx_packet_payload_len(pkt);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_nan_disc_evt_notify_v1* ev = (void*)pkt->data;
+  struct cfg80211_nan_match_params match = {0};
+  int len = iwl_rx_packet_payload_len(pkt);
 
-    if (WARN_ON_ONCE(!mvm->nan_vif)) {
-        IWL_ERR(mvm, "NAN vif is NULL\n");
-        return;
-    }
+  if (WARN_ON_ONCE(!mvm->nan_vif)) {
+    IWL_ERR(mvm, "NAN vif is NULL\n");
+    return;
+  }
 
-    if (WARN_ON_ONCE(len < sizeof(*ev))) {
-        IWL_ERR(mvm, "Invalid NAN match event length: %d\n", len);
-        return;
-    }
+  if (WARN_ON_ONCE(len < sizeof(*ev))) {
+    IWL_ERR(mvm, "Invalid NAN match event length: %d\n", len);
+    return;
+  }
 
-    if (WARN_ON_ONCE(len < sizeof(*ev) + ev->service_info_len)) {
-        IWL_ERR(mvm, "Invalid NAN match event length: %d, info_len: %d\n", len,
-                ev->service_info_len);
-        return;
-    }
+  if (WARN_ON_ONCE(len < sizeof(*ev) + ev->service_info_len)) {
+    IWL_ERR(mvm, "Invalid NAN match event length: %d, info_len: %d\n", len, ev->service_info_len);
+    return;
+  }
 
-    match.type = iwl_cfg_nan_func_type(ev->type);
+  match.type = iwl_cfg_nan_func_type(ev->type);
 
-    if (WARN_ON_ONCE(match.type > NL80211_NAN_FUNC_MAX_TYPE)) {
-        IWL_ERR(mvm, "Invalid func type\n");
-        return;
-    }
+  if (WARN_ON_ONCE(match.type > NL80211_NAN_FUNC_MAX_TYPE)) {
+    IWL_ERR(mvm, "Invalid func type\n");
+    return;
+  }
 
-    match.inst_id = ev->instance_id;
-    match.peer_inst_id = ev->peer_instance;
-    match.addr = ev->peer_mac_addr;
-    match.info = ev->buf;
-    match.info_len = ev->service_info_len;
-    ieee80211_nan_func_match(mvm->nan_vif, &match, GFP_ATOMIC);
+  match.inst_id = ev->instance_id;
+  match.peer_inst_id = ev->peer_instance;
+  match.addr = ev->peer_mac_addr;
+  match.info = ev->buf;
+  match.info_len = ev->service_info_len;
+  ieee80211_nan_func_match(mvm->nan_vif, &match, GFP_ATOMIC);
 }
 
 static void iwl_mvm_nan_match_v2(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_nan_disc_evt_notify_v2* ev = (void*)pkt->data;
-    uint32_t len = iwl_rx_packet_payload_len(pkt);
-    uint32_t i = 0;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_nan_disc_evt_notify_v2* ev = (void*)pkt->data;
+  uint32_t len = iwl_rx_packet_payload_len(pkt);
+  uint32_t i = 0;
 
-    if (WARN_ONCE(!mvm->nan_vif, "NAN vif is NULL")) { return; }
+  if (WARN_ONCE(!mvm->nan_vif, "NAN vif is NULL")) {
+    return;
+  }
 
-    if (WARN_ONCE(len < sizeof(*ev), "Invalid NAN match event length=%u", len)) { return; }
+  if (WARN_ONCE(len < sizeof(*ev), "Invalid NAN match event length=%u", len)) {
+    return;
+  }
 
-    if (WARN_ONCE(len < sizeof(*ev) + le32_to_cpu(ev->match_len) + le32_to_cpu(ev->avail_attrs_len),
-                  "Bad NAN match event: len=%u, match=%u, attrs=%u\n", len, ev->match_len,
-                  ev->avail_attrs_len)) {
-        return;
-    }
+  if (WARN_ONCE(len < sizeof(*ev) + le32_to_cpu(ev->match_len) + le32_to_cpu(ev->avail_attrs_len),
+                "Bad NAN match event: len=%u, match=%u, attrs=%u\n", len, ev->match_len,
+                ev->avail_attrs_len)) {
+    return;
+  }
 
-    i = 0;
-    while (i < le32_to_cpu(ev->match_len)) {
-        struct cfg80211_nan_match_params match = {0};
-        struct iwl_nan_disc_info* disc_info = (struct iwl_nan_disc_info*)(((uint8_t*)(ev + 1)) + i);
+  i = 0;
+  while (i < le32_to_cpu(ev->match_len)) {
+    struct cfg80211_nan_match_params match = {0};
+    struct iwl_nan_disc_info* disc_info = (struct iwl_nan_disc_info*)(((uint8_t*)(ev + 1)) + i);
 
-        match.type = iwl_cfg_nan_func_type(disc_info->type);
-        match.inst_id = disc_info->instance_id;
-        match.peer_inst_id = disc_info->peer_instance;
-        match.addr = ev->peer_mac_addr;
-        match.info = disc_info->buf;
-        match.info_len = disc_info->service_info_len;
-        ieee80211_nan_func_match(mvm->nan_vif, &match, GFP_ATOMIC);
+    match.type = iwl_cfg_nan_func_type(disc_info->type);
+    match.inst_id = disc_info->instance_id;
+    match.peer_inst_id = disc_info->peer_instance;
+    match.addr = ev->peer_mac_addr;
+    match.info = disc_info->buf;
+    match.info_len = disc_info->service_info_len;
+    ieee80211_nan_func_match(mvm->nan_vif, &match, GFP_ATOMIC);
 
-        i += ALIGN(sizeof(*disc_info) + disc_info->service_info_len +
-                       le16_to_cpu(disc_info->sdea_service_info_len) +
-                       le16_to_cpu(disc_info->sec_ctxt_len),
-                   4);
-    }
+    i += ALIGN(sizeof(*disc_info) + disc_info->service_info_len +
+                   le16_to_cpu(disc_info->sdea_service_info_len) +
+                   le16_to_cpu(disc_info->sec_ctxt_len),
+               4);
+  }
 }
 
 void iwl_mvm_nan_match(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NAN_NOTIF_V2)) {
-        iwl_mvm_nan_match_v2(mvm, rxb);
-    } else {
-        iwl_mvm_nan_match_v1(mvm, rxb);
-    }
+  if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NAN_NOTIF_V2)) {
+    iwl_mvm_nan_match_v2(mvm, rxb);
+  } else {
+    iwl_mvm_nan_match_v1(mvm, rxb);
+  }
 }
 
 void iwl_mvm_nan_de_term_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_nan_de_term* ev = (void*)pkt->data;
-    int len = iwl_rx_packet_payload_len(pkt);
-    enum nl80211_nan_func_term_reason nl_reason;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_nan_de_term* ev = (void*)pkt->data;
+  int len = iwl_rx_packet_payload_len(pkt);
+  enum nl80211_nan_func_term_reason nl_reason;
 
-    if (WARN_ON_ONCE(!mvm->nan_vif)) {
-        IWL_ERR(mvm, "NAN vif is NULL\n");
-        return;
-    }
+  if (WARN_ON_ONCE(!mvm->nan_vif)) {
+    IWL_ERR(mvm, "NAN vif is NULL\n");
+    return;
+  }
 
-    if (WARN_ON_ONCE(len != sizeof(*ev))) {
-        IWL_ERR(mvm, "NAN DE termination event bad length: %d\n", len);
-        return;
-    }
+  if (WARN_ON_ONCE(len != sizeof(*ev))) {
+    IWL_ERR(mvm, "NAN DE termination event bad length: %d\n", len);
+    return;
+  }
 
-    switch (ev->reason) {
+  switch (ev->reason) {
     case IWL_NAN_DE_TERM_TTL_REACHED:
-        nl_reason = NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED;
-        break;
+      nl_reason = NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED;
+      break;
     case IWL_NAN_DE_TERM_USER_REQUEST:
-        nl_reason = NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST;
-        break;
+      nl_reason = NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST;
+      break;
     case IWL_NAN_DE_TERM_FAILURE:
-        nl_reason = NL80211_NAN_FUNC_TERM_REASON_ERROR;
-        break;
+      nl_reason = NL80211_NAN_FUNC_TERM_REASON_ERROR;
+      break;
     default:
-        WARN_ON_ONCE(1);
-        return;
-    }
+      WARN_ON_ONCE(1);
+      return;
+  }
 
-    ieee80211_nan_func_terminated(mvm->nan_vif, ev->instance_id, nl_reason, GFP_ATOMIC);
+  ieee80211_nan_func_terminated(mvm->nan_vif, ev->instance_id, nl_reason, GFP_ATOMIC);
 }
 
 int iwl_mvm_nan_config_nan_faw_cmd(struct iwl_mvm* mvm, struct cfg80211_chan_def* chandef,
                                    uint8_t slots) {
-    struct iwl_nan_faw_config cmd = {};
-    struct iwl_mvm_vif* mvmvif;
-    int ret;
+  struct iwl_nan_faw_config cmd = {};
+  struct iwl_mvm_vif* mvmvif;
+  int ret;
 
-    if (WARN_ON(!mvm->nan_vif)) { return -EINVAL; }
+  if (WARN_ON(!mvm->nan_vif)) {
+    return -EINVAL;
+  }
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    mvmvif = iwl_mvm_vif_from_mac80211(mvm->nan_vif);
+  mvmvif = iwl_mvm_vif_from_mac80211(mvm->nan_vif);
 
-    /* Set the channel info data */
-    cmd.faw_ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5);
+  /* Set the channel info data */
+  cmd.faw_ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5);
 
-    cmd.faw_ci.channel = chandef->chan->hw_value;
-    cmd.faw_ci.width = iwl_mvm_get_channel_width(chandef);
-    cmd.faw_ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
-    ieee80211_chandef_to_operating_class(chandef, &cmd.op_class);
-    cmd.slots = slots;
-    cmd.type = IWL_NAN_POST_NAN_ATTR_FURTHER_NAN;
-    cmd.id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+  cmd.faw_ci.channel = chandef->chan->hw_value;
+  cmd.faw_ci.width = iwl_mvm_get_channel_width(chandef);
+  cmd.faw_ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+  ieee80211_chandef_to_operating_class(chandef, &cmd.op_class);
+  cmd.slots = slots;
+  cmd.type = IWL_NAN_POST_NAN_ATTR_FURTHER_NAN;
+  cmd.id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_FAW_CONFIG_CMD, NAN_GROUP, 0), 0, sizeof(cmd),
-                               &cmd);
+  ret =
+      iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(NAN_FAW_CONFIG_CMD, NAN_GROUP, 0), 0, sizeof(cmd), &cmd);
 
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    return ret;
+  return ret;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/nvm.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/nvm.c
index 03eddac..a59a4a1 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/nvm.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/nvm.c
@@ -35,6 +35,7 @@
  *****************************************************************************/
 #include <linux/firmware.h>
 #include <linux/rtnetlink.h>
+
 #include "fw/acpi.h"
 #include "iwl-csr.h"
 #include "iwl-eeprom-parse.h"
@@ -59,131 +60,136 @@
  */
 static int iwl_nvm_write_chunk(struct iwl_mvm* mvm, uint16_t section, uint16_t offset,
                                uint16_t length, const uint8_t* data) {
-    struct iwl_nvm_access_cmd nvm_access_cmd = {
-        .offset = cpu_to_le16(offset),
-        .length = cpu_to_le16(length),
-        .type = cpu_to_le16(section),
-        .op_code = NVM_WRITE_OPCODE,
-    };
-    struct iwl_host_cmd cmd = {
-        .id = NVM_ACCESS_CMD,
-        .len = {sizeof(struct iwl_nvm_access_cmd), length},
-        .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
-        .data = {&nvm_access_cmd, data},
-        /* data may come from vmalloc, so use _DUP */
-        .dataflags = {0, IWL_HCMD_DFL_DUP},
-    };
-    struct iwl_rx_packet* pkt;
-    struct iwl_nvm_access_resp* nvm_resp;
-    int ret;
+  struct iwl_nvm_access_cmd nvm_access_cmd = {
+      .offset = cpu_to_le16(offset),
+      .length = cpu_to_le16(length),
+      .type = cpu_to_le16(section),
+      .op_code = NVM_WRITE_OPCODE,
+  };
+  struct iwl_host_cmd cmd = {
+      .id = NVM_ACCESS_CMD,
+      .len = {sizeof(struct iwl_nvm_access_cmd), length},
+      .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+      .data = {&nvm_access_cmd, data},
+      /* data may come from vmalloc, so use _DUP */
+      .dataflags = {0, IWL_HCMD_DFL_DUP},
+  };
+  struct iwl_rx_packet* pkt;
+  struct iwl_nvm_access_resp* nvm_resp;
+  int ret;
 
-    ret = iwl_mvm_send_cmd(mvm, &cmd);
-    if (ret) { return ret; }
-
-    pkt = cmd.resp_pkt;
-    /* Extract & check NVM write response */
-    nvm_resp = (void*)pkt->data;
-    if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) {
-        IWL_ERR(mvm, "NVM access write command failed for section %u (status = 0x%x)\n", section,
-                le16_to_cpu(nvm_resp->status));
-        ret = -EIO;
-    }
-
-    iwl_free_resp(&cmd);
+  ret = iwl_mvm_send_cmd(mvm, &cmd);
+  if (ret) {
     return ret;
+  }
+
+  pkt = cmd.resp_pkt;
+  /* Extract & check NVM write response */
+  nvm_resp = (void*)pkt->data;
+  if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) {
+    IWL_ERR(mvm, "NVM access write command failed for section %u (status = 0x%x)\n", section,
+            le16_to_cpu(nvm_resp->status));
+    ret = -EIO;
+  }
+
+  iwl_free_resp(&cmd);
+  return ret;
 }
 
 static int iwl_nvm_read_chunk(struct iwl_mvm* mvm, uint16_t section, uint16_t offset,
                               uint16_t length, uint8_t* data) {
-    struct iwl_nvm_access_cmd nvm_access_cmd = {
-        .offset = cpu_to_le16(offset),
-        .length = cpu_to_le16(length),
-        .type = cpu_to_le16(section),
-        .op_code = NVM_READ_OPCODE,
-    };
-    struct iwl_nvm_access_resp* nvm_resp;
-    struct iwl_rx_packet* pkt;
-    struct iwl_host_cmd cmd = {
-        .id = NVM_ACCESS_CMD,
-        .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
-        .data =
-            {
-                &nvm_access_cmd,
-            },
-    };
-    int ret, bytes_read, offset_read;
-    uint8_t* resp_data;
+  struct iwl_nvm_access_cmd nvm_access_cmd = {
+      .offset = cpu_to_le16(offset),
+      .length = cpu_to_le16(length),
+      .type = cpu_to_le16(section),
+      .op_code = NVM_READ_OPCODE,
+  };
+  struct iwl_nvm_access_resp* nvm_resp;
+  struct iwl_rx_packet* pkt;
+  struct iwl_host_cmd cmd = {
+      .id = NVM_ACCESS_CMD,
+      .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+      .data =
+          {
+              &nvm_access_cmd,
+          },
+  };
+  int ret, bytes_read, offset_read;
+  uint8_t* resp_data;
 
-    cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
+  cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
 
-    ret = iwl_mvm_send_cmd(mvm, &cmd);
-    if (ret) { return ret; }
+  ret = iwl_mvm_send_cmd(mvm, &cmd);
+  if (ret) {
+    return ret;
+  }
 
-    pkt = cmd.resp_pkt;
+  pkt = cmd.resp_pkt;
 
-    /* Extract NVM response */
-    nvm_resp = (void*)pkt->data;
-    ret = le16_to_cpu(nvm_resp->status);
-    bytes_read = le16_to_cpu(nvm_resp->length);
-    offset_read = le16_to_cpu(nvm_resp->offset);
-    resp_data = nvm_resp->data;
-    if (ret) {
-        if ((offset != 0) && (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
-            /*
-             * meaning of NOT_VALID_ADDRESS:
-             * driver try to read chunk from address that is
-             * multiple of 2K and got an error since addr is empty.
-             * meaning of (offset != 0): driver already
-             * read valid data from another chunk so this case
-             * is not an error.
-             */
-            IWL_DEBUG_EEPROM(
-                mvm->trans->dev,
-                "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
-                offset);
-            ret = 0;
-        } else {
-            IWL_DEBUG_EEPROM(mvm->trans->dev,
-                             "NVM access command failed with status %d (device: %s)\n", ret,
-                             mvm->cfg->name);
-            ret = -ENODATA;
-        }
-        goto exit;
+  /* Extract NVM response */
+  nvm_resp = (void*)pkt->data;
+  ret = le16_to_cpu(nvm_resp->status);
+  bytes_read = le16_to_cpu(nvm_resp->length);
+  offset_read = le16_to_cpu(nvm_resp->offset);
+  resp_data = nvm_resp->data;
+  if (ret) {
+    if ((offset != 0) && (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
+      /*
+       * meaning of NOT_VALID_ADDRESS:
+       * driver try to read chunk from address that is
+       * multiple of 2K and got an error since addr is empty.
+       * meaning of (offset != 0): driver already
+       * read valid data from another chunk so this case
+       * is not an error.
+       */
+      IWL_DEBUG_EEPROM(
+          mvm->trans->dev,
+          "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
+          offset);
+      ret = 0;
+    } else {
+      IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM access command failed with status %d (device: %s)\n",
+                       ret, mvm->cfg->name);
+      ret = -ENODATA;
     }
+    goto exit;
+  }
 
-    if (offset_read != offset) {
-        IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n", offset_read);
-        ret = -EINVAL;
-        goto exit;
-    }
+  if (offset_read != offset) {
+    IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n", offset_read);
+    ret = -EINVAL;
+    goto exit;
+  }
 
-    /* Write data to NVM */
-    memcpy(data + offset, resp_data, bytes_read);
-    ret = bytes_read;
+  /* Write data to NVM */
+  memcpy(data + offset, resp_data, bytes_read);
+  ret = bytes_read;
 
 exit:
-    iwl_free_resp(&cmd);
-    return ret;
+  iwl_free_resp(&cmd);
+  return ret;
 }
 
 static int iwl_nvm_write_section(struct iwl_mvm* mvm, uint16_t section, const uint8_t* data,
                                  uint16_t length) {
-    int offset = 0;
+  int offset = 0;
 
-    /* copy data in chunks of 2k (and remainder if any) */
+  /* copy data in chunks of 2k (and remainder if any) */
 
-    while (offset < length) {
-        int chunk_size, ret;
+  while (offset < length) {
+    int chunk_size, ret;
 
-        chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, length - offset);
+    chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, length - offset);
 
-        ret = iwl_nvm_write_chunk(mvm, section, offset, chunk_size, data + offset);
-        if (ret < 0) { return ret; }
-
-        offset += chunk_size;
+    ret = iwl_nvm_write_chunk(mvm, section, offset, chunk_size, data + offset);
+    if (ret < 0) {
+      return ret;
     }
 
-    return 0;
+    offset += chunk_size;
+  }
+
+  return 0;
 }
 
 /*
@@ -198,355 +204,382 @@
  */
 static int iwl_nvm_read_section(struct iwl_mvm* mvm, uint16_t section, uint8_t* data,
                                 uint32_t size_read) {
-    uint16_t length, offset = 0;
-    int ret;
+  uint16_t length, offset = 0;
+  int ret;
 
-    /* Set nvm section read length */
-    length = IWL_NVM_DEFAULT_CHUNK_SIZE;
+  /* Set nvm section read length */
+  length = IWL_NVM_DEFAULT_CHUNK_SIZE;
 
-    ret = length;
+  ret = length;
 
-    /* Read the NVM until exhausted (reading less than requested) */
-    while (ret == length) {
-        /* Check no memory assumptions fail and cause an overflow */
-        if ((size_read + offset + length) > mvm->cfg->base_params->eeprom_size) {
-            IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
-            return -ENOBUFS;
-        }
-
-        ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
-        if (ret < 0) {
-            IWL_DEBUG_EEPROM(mvm->trans->dev,
-                             "Cannot read NVM from section %d offset %d, length %d\n", section,
-                             offset, length);
-            return ret;
-        }
-        offset += ret;
+  /* Read the NVM until exhausted (reading less than requested) */
+  while (ret == length) {
+    /* Check no memory assumptions fail and cause an overflow */
+    if ((size_read + offset + length) > mvm->cfg->base_params->eeprom_size) {
+      IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
+      return -ENOBUFS;
     }
 
-    iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset);
+    ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
+    if (ret < 0) {
+      IWL_DEBUG_EEPROM(mvm->trans->dev, "Cannot read NVM from section %d offset %d, length %d\n",
+                       section, offset, length);
+      return ret;
+    }
+    offset += ret;
+  }
 
-    IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM section %d read completed\n", section);
-    return offset;
+  iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset);
+
+  IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM section %d read completed\n", section);
+  return offset;
 }
 
 static struct iwl_nvm_data* iwl_parse_nvm_sections(struct iwl_mvm* mvm) {
-    struct iwl_nvm_section* sections = mvm->nvm_sections;
-    const __be16* hw;
-    const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
-    bool lar_enabled;
-    int regulatory_type;
+  struct iwl_nvm_section* sections = mvm->nvm_sections;
+  const __be16* hw;
+  const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
+  bool lar_enabled;
+  int regulatory_type;
 
-    /* Checking for required sections */
-    if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
-        if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
-            !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
-            IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
-            return NULL;
-        }
+  /* Checking for required sections */
+  if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
+    if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+        !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
+      IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
+      return NULL;
+    }
+  } else {
+    if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP) {
+      regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
     } else {
-        if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP) {
-            regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
-        } else {
-            regulatory_type = NVM_SECTION_TYPE_REGULATORY;
-        }
-
-        /* SW and REGULATORY sections are mandatory */
-        if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
-            !mvm->nvm_sections[regulatory_type].data) {
-            IWL_ERR(mvm, "Can't parse empty family 8000 OTP/NVM sections\n");
-            return NULL;
-        }
-        /* MAC_OVERRIDE or at least HW section must exist */
-        if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
-            !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
-            IWL_ERR(mvm, "Can't parse mac_address, empty sections\n");
-            return NULL;
-        }
-
-        /* PHY_SKU section is mandatory in B0 */
-        if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
-            IWL_ERR(mvm, "Can't parse phy_sku in B0, empty sections\n");
-            return NULL;
-        }
+      regulatory_type = NVM_SECTION_TYPE_REGULATORY;
     }
 
-    hw = (const __be16*)sections[mvm->cfg->nvm_hw_section_num].data;
-    sw = (const __le16*)sections[NVM_SECTION_TYPE_SW].data;
-    calib = (const __le16*)sections[NVM_SECTION_TYPE_CALIBRATION].data;
-    mac_override = (const __le16*)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
-    phy_sku = (const __le16*)sections[NVM_SECTION_TYPE_PHY_SKU].data;
+    /* SW and REGULATORY sections are mandatory */
+    if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || !mvm->nvm_sections[regulatory_type].data) {
+      IWL_ERR(mvm, "Can't parse empty family 8000 OTP/NVM sections\n");
+      return NULL;
+    }
+    /* MAC_OVERRIDE or at least HW section must exist */
+    if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
+        !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
+      IWL_ERR(mvm, "Can't parse mac_address, empty sections\n");
+      return NULL;
+    }
 
-    regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP
-                     ? (const __le16*)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data
-                     : (const __le16*)sections[NVM_SECTION_TYPE_REGULATORY].data;
+    /* PHY_SKU section is mandatory in B0 */
+    if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+      IWL_ERR(mvm, "Can't parse phy_sku in B0, empty sections\n");
+      return NULL;
+    }
+  }
 
-    lar_enabled = !iwlwifi_mod_params.lar_disable &&
-                  fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+  hw = (const __be16*)sections[mvm->cfg->nvm_hw_section_num].data;
+  sw = (const __le16*)sections[NVM_SECTION_TYPE_SW].data;
+  calib = (const __le16*)sections[NVM_SECTION_TYPE_CALIBRATION].data;
+  mac_override = (const __le16*)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+  phy_sku = (const __le16*)sections[NVM_SECTION_TYPE_PHY_SKU].data;
 
-    return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib, regulatory, mac_override,
-                              phy_sku, mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, lar_enabled);
+  regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP
+                   ? (const __le16*)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data
+                   : (const __le16*)sections[NVM_SECTION_TYPE_REGULATORY].data;
+
+  lar_enabled = !iwlwifi_mod_params.lar_disable &&
+                fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+
+  return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib, regulatory, mac_override, phy_sku,
+                            mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, lar_enabled);
 }
 
 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
 int iwl_mvm_load_nvm_to_nic(struct iwl_mvm* mvm) {
-    int i, ret = 0;
-    struct iwl_nvm_section* sections = mvm->nvm_sections;
+  int i, ret = 0;
+  struct iwl_nvm_section* sections = mvm->nvm_sections;
 
-    IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
+  IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
 
-    for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
-        if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length) { continue; }
-        ret = iwl_nvm_write_section(mvm, i, sections[i].data, sections[i].length);
-        if (ret < 0) {
-            IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
-            break;
-        }
+  for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
+    if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length) {
+      continue;
     }
-    return ret;
+    ret = iwl_nvm_write_section(mvm, i, sections[i].data, sections[i].length);
+    if (ret < 0) {
+      IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+      break;
+    }
+  }
+  return ret;
 }
 
 int iwl_nvm_init(struct iwl_mvm* mvm) {
-    int ret, section;
-    uint32_t size_read = 0;
-    uint8_t *nvm_buffer, *temp;
-    const char* nvm_file_C = mvm->cfg->default_nvm_file_C_step;
+  int ret, section;
+  uint32_t size_read = 0;
+  uint8_t *nvm_buffer, *temp;
+  const char* nvm_file_C = mvm->cfg->default_nvm_file_C_step;
 
-    if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) { return -EINVAL; }
+  if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) {
+    return -EINVAL;
+  }
 
-    /* load NVM values from nic */
-    /* Read From FW NVM */
-    IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
+  /* load NVM values from nic */
+  /* Read From FW NVM */
+  IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
 
-    nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, GFP_KERNEL);
-    if (!nvm_buffer) { return -ENOMEM; }
-    for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
-        /* we override the constness for initial read */
-        ret = iwl_nvm_read_section(mvm, section, nvm_buffer, size_read);
-        if (ret == -ENODATA) {
-            ret = 0;
-            continue;
-        }
-        if (ret < 0) { break; }
-        size_read += ret;
-        temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
-        if (!temp) {
-            ret = -ENOMEM;
-            break;
-        }
+  nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, GFP_KERNEL);
+  if (!nvm_buffer) {
+    return -ENOMEM;
+  }
+  for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
+    /* we override the constness for initial read */
+    ret = iwl_nvm_read_section(mvm, section, nvm_buffer, size_read);
+    if (ret == -ENODATA) {
+      ret = 0;
+      continue;
+    }
+    if (ret < 0) {
+      break;
+    }
+    size_read += ret;
+    temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+    if (!temp) {
+      ret = -ENOMEM;
+      break;
+    }
 
-        iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret);
+    iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret);
 
-        mvm->nvm_sections[section].data = temp;
-        mvm->nvm_sections[section].length = ret;
+    mvm->nvm_sections[section].data = temp;
+    mvm->nvm_sections[section].length = ret;
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-        switch (section) {
-        case NVM_SECTION_TYPE_SW:
-            mvm->nvm_sw_blob.data = temp;
-            mvm->nvm_sw_blob.size = ret;
-            break;
-        case NVM_SECTION_TYPE_CALIBRATION:
-            mvm->nvm_calib_blob.data = temp;
-            mvm->nvm_calib_blob.size = ret;
-            break;
-        case NVM_SECTION_TYPE_PRODUCTION:
-            mvm->nvm_prod_blob.data = temp;
-            mvm->nvm_prod_blob.size = ret;
-            break;
-        case NVM_SECTION_TYPE_PHY_SKU:
-            mvm->nvm_phy_sku_blob.data = temp;
-            mvm->nvm_phy_sku_blob.size = ret;
-            break;
-        default:
-            if (section == mvm->cfg->nvm_hw_section_num) {
-                mvm->nvm_hw_blob.data = temp;
-                mvm->nvm_hw_blob.size = ret;
-                break;
-            }
+    switch (section) {
+      case NVM_SECTION_TYPE_SW:
+        mvm->nvm_sw_blob.data = temp;
+        mvm->nvm_sw_blob.size = ret;
+        break;
+      case NVM_SECTION_TYPE_CALIBRATION:
+        mvm->nvm_calib_blob.data = temp;
+        mvm->nvm_calib_blob.size = ret;
+        break;
+      case NVM_SECTION_TYPE_PRODUCTION:
+        mvm->nvm_prod_blob.data = temp;
+        mvm->nvm_prod_blob.size = ret;
+        break;
+      case NVM_SECTION_TYPE_PHY_SKU:
+        mvm->nvm_phy_sku_blob.data = temp;
+        mvm->nvm_phy_sku_blob.size = ret;
+        break;
+      default:
+        if (section == mvm->cfg->nvm_hw_section_num) {
+          mvm->nvm_hw_blob.data = temp;
+          mvm->nvm_hw_blob.size = ret;
+          break;
         }
-#endif
     }
-    if (!size_read) { IWL_ERR(mvm, "OTP is blank\n"); }
-    kfree(nvm_buffer);
+#endif
+  }
+  if (!size_read) {
+    IWL_ERR(mvm, "OTP is blank\n");
+  }
+  kfree(nvm_buffer);
 
-    /* Only if PNVM selected in the mod param - load external NVM  */
-    if (mvm->nvm_file_name) {
-        /* read External NVM file from the mod param */
+  /* Only if PNVM selected in the mod param - load external NVM  */
+  if (mvm->nvm_file_name) {
+    /* read External NVM file from the mod param */
+    ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections);
+    if (ret) {
+      mvm->nvm_file_name = nvm_file_C;
+
+      if ((ret == -EFAULT || ret == -ENOENT) && mvm->nvm_file_name) {
+        /* in case nvm file was failed try again */
         ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections);
         if (ret) {
-            mvm->nvm_file_name = nvm_file_C;
-
-            if ((ret == -EFAULT || ret == -ENOENT) && mvm->nvm_file_name) {
-                /* in case nvm file was failed try again */
-                ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections);
-                if (ret) { return ret; }
-            } else {
-                return ret;
-            }
+          return ret;
         }
+      } else {
+        return ret;
+      }
     }
+  }
 
-    /* parse the relevant nvm sections */
-    mvm->nvm_data = iwl_parse_nvm_sections(mvm);
-    if (!mvm->nvm_data) { return -ENODATA; }
-    IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", mvm->nvm_data->nvm_version);
+  /* parse the relevant nvm sections */
+  mvm->nvm_data = iwl_parse_nvm_sections(mvm);
+  if (!mvm->nvm_data) {
+    return -ENODATA;
+  }
+  IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", mvm->nvm_data->nvm_version);
 
-    return ret < 0 ? ret : 0;
+  return ret < 0 ? ret : 0;
 }
 
 struct iwl_mcc_update_resp* iwl_mvm_update_mcc(struct iwl_mvm* mvm, const char* alpha2,
                                                enum iwl_mcc_source src_id) {
-    struct iwl_mcc_update_cmd mcc_update_cmd = {
-        .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
-        .source_id = (uint8_t)src_id,
-    };
-    struct iwl_mcc_update_resp* resp_cp;
-    struct iwl_rx_packet* pkt;
-    struct iwl_host_cmd cmd = {
-        .id = MCC_UPDATE_CMD,
-        .flags = CMD_WANT_SKB,
-        .data = {&mcc_update_cmd},
-    };
+  struct iwl_mcc_update_cmd mcc_update_cmd = {
+      .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
+      .source_id = (uint8_t)src_id,
+  };
+  struct iwl_mcc_update_resp* resp_cp;
+  struct iwl_rx_packet* pkt;
+  struct iwl_host_cmd cmd = {
+      .id = MCC_UPDATE_CMD,
+      .flags = CMD_WANT_SKB,
+      .data = {&mcc_update_cmd},
+  };
 
-    int ret;
-    uint32_t status;
-    int resp_len, n_channels;
-    uint16_t mcc;
+  int ret;
+  uint32_t status;
+  int resp_len, n_channels;
+  uint16_t mcc;
 
-    if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) { return ERR_PTR(-EOPNOTSUPP); }
+  if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) {
+    return ERR_PTR(-EOPNOTSUPP);
+  }
 
-    cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
+  cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
 
-    IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", alpha2[0], alpha2[1],
-                  src_id);
+  IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", alpha2[0], alpha2[1], src_id);
 
-    ret = iwl_mvm_send_cmd(mvm, &cmd);
-    if (ret) { return ERR_PTR(ret); }
+  ret = iwl_mvm_send_cmd(mvm, &cmd);
+  if (ret) {
+    return ERR_PTR(ret);
+  }
 
-    pkt = cmd.resp_pkt;
+  pkt = cmd.resp_pkt;
 
-    /* Extract MCC response */
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) {
-        struct iwl_mcc_update_resp* mcc_resp = (void*)pkt->data;
+  /* Extract MCC response */
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) {
+    struct iwl_mcc_update_resp* mcc_resp = (void*)pkt->data;
 
-        n_channels = __le32_to_cpu(mcc_resp->n_channels);
-        resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32);
-        resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
-        if (!resp_cp) {
-            resp_cp = ERR_PTR(-ENOMEM);
-            goto exit;
-        }
-    } else {
-        struct iwl_mcc_update_resp_v3* mcc_resp_v3 = (void*)pkt->data;
+    n_channels = __le32_to_cpu(mcc_resp->n_channels);
+    resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32);
+    resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
+    if (!resp_cp) {
+      resp_cp = ERR_PTR(-ENOMEM);
+      goto exit;
+    }
+  } else {
+    struct iwl_mcc_update_resp_v3* mcc_resp_v3 = (void*)pkt->data;
 
-        n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
-        resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32);
-        resp_cp = kzalloc(resp_len, GFP_KERNEL);
-        if (!resp_cp) {
-            resp_cp = ERR_PTR(-ENOMEM);
-            goto exit;
-        }
-
-        resp_cp->status = mcc_resp_v3->status;
-        resp_cp->mcc = mcc_resp_v3->mcc;
-        resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap);
-        resp_cp->source_id = mcc_resp_v3->source_id;
-        resp_cp->time = mcc_resp_v3->time;
-        resp_cp->geo_info = mcc_resp_v3->geo_info;
-        resp_cp->n_channels = mcc_resp_v3->n_channels;
-        memcpy(resp_cp->channels, mcc_resp_v3->channels, n_channels * sizeof(__le32));
+    n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
+    resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32);
+    resp_cp = kzalloc(resp_len, GFP_KERNEL);
+    if (!resp_cp) {
+      resp_cp = ERR_PTR(-ENOMEM);
+      goto exit;
     }
 
-    status = le32_to_cpu(resp_cp->status);
+    resp_cp->status = mcc_resp_v3->status;
+    resp_cp->mcc = mcc_resp_v3->mcc;
+    resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap);
+    resp_cp->source_id = mcc_resp_v3->source_id;
+    resp_cp->time = mcc_resp_v3->time;
+    resp_cp->geo_info = mcc_resp_v3->geo_info;
+    resp_cp->n_channels = mcc_resp_v3->n_channels;
+    memcpy(resp_cp->channels, mcc_resp_v3->channels, n_channels * sizeof(__le32));
+  }
 
-    mcc = le16_to_cpu(resp_cp->mcc);
+  status = le32_to_cpu(resp_cp->status);
 
-    /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
-    if (mcc == 0) {
-        mcc = 0x3030; /* "00" - world */
-        resp_cp->mcc = cpu_to_le16(mcc);
-    }
+  mcc = le16_to_cpu(resp_cp->mcc);
 
-    IWL_DEBUG_LAR(mvm, "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", status,
-                  mcc, mcc >> 8, mcc & 0xff, n_channels);
+  /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
+  if (mcc == 0) {
+    mcc = 0x3030; /* "00" - world */
+    resp_cp->mcc = cpu_to_le16(mcc);
+  }
+
+  IWL_DEBUG_LAR(mvm, "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", status, mcc,
+                mcc >> 8, mcc & 0xff, n_channels);
 
 exit:
-    iwl_free_resp(&cmd);
-    return resp_cp;
+  iwl_free_resp(&cmd);
+  return resp_cp;
 }
 
 int iwl_mvm_init_mcc(struct iwl_mvm* mvm) {
-    bool tlv_lar;
-    bool nvm_lar;
-    int retval;
-    struct ieee80211_regdomain* regd;
-    char mcc[3];
+  bool tlv_lar;
+  bool nvm_lar;
+  int retval;
+  struct ieee80211_regdomain* regd;
+  char mcc[3];
 
-    if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
-        tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
-        nvm_lar = mvm->nvm_data->lar_enabled;
-        if (tlv_lar != nvm_lar)
-            IWL_INFO(mvm, "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
-                     tlv_lar ? "enabled" : "disabled", nvm_lar ? "enabled" : "disabled");
-    }
+  if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
+    tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+    nvm_lar = mvm->nvm_data->lar_enabled;
+    if (tlv_lar != nvm_lar)
+      IWL_INFO(mvm, "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
+               tlv_lar ? "enabled" : "disabled", nvm_lar ? "enabled" : "disabled");
+  }
 
-    if (!iwl_mvm_is_lar_supported(mvm)) { return 0; }
+  if (!iwl_mvm_is_lar_supported(mvm)) {
+    return 0;
+  }
 
-    /*
-     * try to replay the last set MCC to FW. If it doesn't exist,
-     * queue an update to cfg80211 to retrieve the default alpha2 from FW.
-     */
-    retval = iwl_mvm_init_fw_regd(mvm);
-    if (retval != -ENOENT) { return retval; }
-
-    /*
-     * Driver regulatory hint for initial update, this also informs the
-     * firmware we support wifi location updates.
-     * Disallow scans that might crash the FW while the LAR regdomain
-     * is not set.
-     */
-    mvm->lar_regdom_set = false;
-
-    regd = iwl_mvm_get_current_regdomain(mvm, NULL);
-    if (IS_ERR_OR_NULL(regd)) { return -EIO; }
-
-    if (iwl_mvm_is_wifi_mcc_supported(mvm) && !iwl_acpi_get_mcc(mvm->dev, mcc)) {
-        kfree(regd);
-        regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, MCC_SOURCE_BIOS, NULL);
-        if (IS_ERR_OR_NULL(regd)) { return -EIO; }
-    }
-
-    retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
-    kfree(regd);
+  /*
+   * try to replay the last set MCC to FW. If it doesn't exist,
+   * queue an update to cfg80211 to retrieve the default alpha2 from FW.
+   */
+  retval = iwl_mvm_init_fw_regd(mvm);
+  if (retval != -ENOENT) {
     return retval;
+  }
+
+  /*
+   * Driver regulatory hint for initial update, this also informs the
+   * firmware we support wifi location updates.
+   * Disallow scans that might crash the FW while the LAR regdomain
+   * is not set.
+   */
+  mvm->lar_regdom_set = false;
+
+  regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+  if (IS_ERR_OR_NULL(regd)) {
+    return -EIO;
+  }
+
+  if (iwl_mvm_is_wifi_mcc_supported(mvm) && !iwl_acpi_get_mcc(mvm->dev, mcc)) {
+    kfree(regd);
+    regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, MCC_SOURCE_BIOS, NULL);
+    if (IS_ERR_OR_NULL(regd)) {
+      return -EIO;
+    }
+  }
+
+  retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+  kfree(regd);
+  return retval;
 }
 
 void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_mcc_chub_notif* notif = (void*)pkt->data;
-    enum iwl_mcc_source src;
-    char mcc[3];
-    struct ieee80211_regdomain* regd;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_mcc_chub_notif* notif = (void*)pkt->data;
+  enum iwl_mcc_source src;
+  char mcc[3];
+  struct ieee80211_regdomain* regd;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) {
-        IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n");
-        return;
-    }
+  if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) {
+    IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n");
+    return;
+  }
 
-    if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) { return; }
+  if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) {
+    return;
+  }
 
-    mcc[0] = le16_to_cpu(notif->mcc) >> 8;
-    mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
-    mcc[2] = '\0';
-    src = notif->source_id;
+  mcc[0] = le16_to_cpu(notif->mcc) >> 8;
+  mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
+  mcc[2] = '\0';
+  src = notif->source_id;
 
-    IWL_DEBUG_LAR(mvm, "RX: received chub update mcc cmd (mcc '%s' src %d)\n", mcc, src);
-    regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
-    if (IS_ERR_OR_NULL(regd)) { return; }
+  IWL_DEBUG_LAR(mvm, "RX: received chub update mcc cmd (mcc '%s' src %d)\n", mcc, src);
+  regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
+  if (IS_ERR_OR_NULL(regd)) {
+    return;
+  }
 
-    regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
-    kfree(regd);
+  regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+  kfree(regd);
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/offloading.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/offloading.c
index 29d8750..1116019 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/offloading.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/offloading.c
@@ -35,164 +35,177 @@
 #include <linux/bitops.h>
 #include <net/addrconf.h>
 #include <net/ipv6.h>
+
 #include "mvm.h"
 
 void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta* mvm_ap_sta, struct iwl_wowlan_config_cmd* cmd) {
-    int i;
+  int i;
 
-    /*
-     * For QoS counters, we store the one to use next, so subtract 0x10
-     * since the uCode will add 0x10 *before* using the value while we
-     * increment after using the value (i.e. store the next value to use).
-     */
-    for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
-        uint16_t seq = mvm_ap_sta->tid_data[i].seq_number;
-        seq -= 0x10;
-        cmd->qos_seq[i] = cpu_to_le16(seq);
-    }
+  /*
+   * For QoS counters, we store the one to use next, so subtract 0x10
+   * since the uCode will add 0x10 *before* using the value while we
+   * increment after using the value (i.e. store the next value to use).
+   */
+  for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+    uint16_t seq = mvm_ap_sta->tid_data[i].seq_number;
+    seq -= 0x10;
+    cmd->qos_seq[i] = cpu_to_le16(seq);
+  }
 }
 
 int iwl_mvm_send_proto_offload(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                bool disable_offloading, bool offload_ns, uint32_t cmd_flags) {
-    union {
-        struct iwl_proto_offload_cmd_v1 v1;
-        struct iwl_proto_offload_cmd_v2 v2;
-        struct iwl_proto_offload_cmd_v3_small v3s;
-        struct iwl_proto_offload_cmd_v3_large v3l;
-    } cmd = {};
-    struct iwl_host_cmd hcmd = {
-        .id = PROT_OFFLOAD_CONFIG_CMD,
-        .flags = cmd_flags,
-        .data[0] = &cmd,
-        .dataflags[0] = IWL_HCMD_DFL_DUP,
-    };
-    struct iwl_proto_offload_cmd_common* common;
-    uint32_t enabled = 0, size;
-    uint32_t capa_flags = mvm->fw->ucode_capa.flags;
+  union {
+    struct iwl_proto_offload_cmd_v1 v1;
+    struct iwl_proto_offload_cmd_v2 v2;
+    struct iwl_proto_offload_cmd_v3_small v3s;
+    struct iwl_proto_offload_cmd_v3_large v3l;
+  } cmd = {};
+  struct iwl_host_cmd hcmd = {
+      .id = PROT_OFFLOAD_CONFIG_CMD,
+      .flags = cmd_flags,
+      .data[0] = &cmd,
+      .dataflags[0] = IWL_HCMD_DFL_DUP,
+  };
+  struct iwl_proto_offload_cmd_common* common;
+  uint32_t enabled = 0, size;
+  uint32_t capa_flags = mvm->fw->ucode_capa.flags;
 #if IS_ENABLED(CONFIG_IPV6)
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int i;
-    /*
-     * Skip tentative address when ns offload is enabled to avoid
-     * violating RFC4862.
-     * Keep tentative address when ns offload is disabled so the NS packets
-     * will not be filtered out and will wake up the host.
-     */
-    bool skip_tentative = offload_ns;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int i;
+  /*
+   * Skip tentative address when ns offload is enabled to avoid
+   * violating RFC4862.
+   * Keep tentative address when ns offload is disabled so the NS packets
+   * will not be filtered out and will wake up the host.
+   */
+  bool skip_tentative = offload_ns;
 
-    if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
-        capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
-        struct iwl_ns_config* nsc;
-        struct iwl_targ_addr* addrs;
-        int n_nsc, n_addrs;
-        int c;
-        int num_skipped = 0;
+  if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+      capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+    struct iwl_ns_config* nsc;
+    struct iwl_targ_addr* addrs;
+    int n_nsc, n_addrs;
+    int c;
+    int num_skipped = 0;
 
-        if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
-            nsc = cmd.v3s.ns_config;
-            n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
-            addrs = cmd.v3s.targ_addrs;
-            n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
-        } else {
-            nsc = cmd.v3l.ns_config;
-            n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
-            addrs = cmd.v3l.targ_addrs;
-            n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
-        }
-
-        /*
-         * For each address we have (and that will fit) fill a target
-         * address struct and combine for NS offload structs with the
-         * solicited node addresses.
-         */
-        for (i = 0, c = 0; i < mvmvif->num_target_ipv6_addrs && i < n_addrs && c < n_nsc; i++) {
-            struct in6_addr solicited_addr;
-            int j;
-
-            if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) {
-                num_skipped++;
-                continue;
-            }
-
-            addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i], &solicited_addr);
-            for (j = 0; j < c; j++)
-                if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr, &solicited_addr) == 0) { break; }
-            if (j == c) { c++; }
-            addrs[i].addr = mvmvif->target_ipv6_addrs[i];
-            addrs[i].config_num = cpu_to_le32(j);
-            nsc[j].dest_ipv6_addr = solicited_addr;
-            memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
-        }
-
-        if (mvmvif->num_target_ipv6_addrs - num_skipped) { enabled |= IWL_D3_PROTO_IPV6_VALID; }
-
-        if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
-            cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped);
-        } else {
-            cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped);
-        }
-    } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
-        bool found = false;
-
-        BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) != sizeof(mvmvif->target_ipv6_addrs[0]));
-
-        for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2);
-             i++) {
-            if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) { continue; }
-
-            memcpy(cmd.v2.target_ipv6_addr[i], &mvmvif->target_ipv6_addrs[i],
-                   sizeof(cmd.v2.target_ipv6_addr[i]));
-
-            found = true;
-        }
-        if (found) {
-            enabled |= IWL_D3_PROTO_IPV6_VALID;
-            memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
-        }
-    } else {
-        bool found = false;
-        BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) != sizeof(mvmvif->target_ipv6_addrs[0]));
-
-        for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1);
-             i++) {
-            if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) { continue; }
-
-            memcpy(cmd.v1.target_ipv6_addr[i], &mvmvif->target_ipv6_addrs[i],
-                   sizeof(cmd.v1.target_ipv6_addr[i]));
-
-            found = true;
-        }
-
-        if (found) {
-            enabled |= IWL_D3_PROTO_IPV6_VALID;
-            memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
-        }
-    }
-
-    if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID)) { enabled |= IWL_D3_PROTO_OFFLOAD_NS; }
-#endif
     if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
-        common = &cmd.v3s.common;
-        size = sizeof(cmd.v3s);
-    } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
-        common = &cmd.v3l.common;
-        size = sizeof(cmd.v3l);
-    } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
-        common = &cmd.v2.common;
-        size = sizeof(cmd.v2);
+      nsc = cmd.v3s.ns_config;
+      n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+      addrs = cmd.v3s.targ_addrs;
+      n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
     } else {
-        common = &cmd.v1.common;
-        size = sizeof(cmd.v1);
+      nsc = cmd.v3l.ns_config;
+      n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+      addrs = cmd.v3l.targ_addrs;
+      n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
     }
 
-    if (vif->bss_conf.arp_addr_cnt) {
-        enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
-        common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
-        memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
+    /*
+     * For each address we have (and that will fit) fill a target
+     * address struct and combine for NS offload structs with the
+     * solicited node addresses.
+     */
+    for (i = 0, c = 0; i < mvmvif->num_target_ipv6_addrs && i < n_addrs && c < n_nsc; i++) {
+      struct in6_addr solicited_addr;
+      int j;
+
+      if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) {
+        num_skipped++;
+        continue;
+      }
+
+      addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i], &solicited_addr);
+      for (j = 0; j < c; j++)
+        if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr, &solicited_addr) == 0) {
+          break;
+        }
+      if (j == c) {
+        c++;
+      }
+      addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+      addrs[i].config_num = cpu_to_le32(j);
+      nsc[j].dest_ipv6_addr = solicited_addr;
+      memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
     }
 
-    if (!disable_offloading) { common->enabled = cpu_to_le32(enabled); }
+    if (mvmvif->num_target_ipv6_addrs - num_skipped) {
+      enabled |= IWL_D3_PROTO_IPV6_VALID;
+    }
 
-    hcmd.len[0] = size;
-    return iwl_mvm_send_cmd(mvm, &hcmd);
+    if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+      cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped);
+    } else {
+      cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped);
+    }
+  } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+    bool found = false;
+
+    BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) != sizeof(mvmvif->target_ipv6_addrs[0]));
+
+    for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) {
+      if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) {
+        continue;
+      }
+
+      memcpy(cmd.v2.target_ipv6_addr[i], &mvmvif->target_ipv6_addrs[i],
+             sizeof(cmd.v2.target_ipv6_addr[i]));
+
+      found = true;
+    }
+    if (found) {
+      enabled |= IWL_D3_PROTO_IPV6_VALID;
+      memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
+    }
+  } else {
+    bool found = false;
+    BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) != sizeof(mvmvif->target_ipv6_addrs[0]));
+
+    for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) {
+      if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) {
+        continue;
+      }
+
+      memcpy(cmd.v1.target_ipv6_addr[i], &mvmvif->target_ipv6_addrs[i],
+             sizeof(cmd.v1.target_ipv6_addr[i]));
+
+      found = true;
+    }
+
+    if (found) {
+      enabled |= IWL_D3_PROTO_IPV6_VALID;
+      memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
+    }
+  }
+
+  if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID)) {
+    enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+  }
+#endif
+  if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+    common = &cmd.v3s.common;
+    size = sizeof(cmd.v3s);
+  } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+    common = &cmd.v3l.common;
+    size = sizeof(cmd.v3l);
+  } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+    common = &cmd.v2.common;
+    size = sizeof(cmd.v2);
+  } else {
+    common = &cmd.v1.common;
+    size = sizeof(cmd.v1);
+  }
+
+  if (vif->bss_conf.arp_addr_cnt) {
+    enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
+    common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+    memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
+  }
+
+  if (!disable_offloading) {
+    common->enabled = cpu_to_le32(enabled);
+  }
+
+  hcmd.len[0] = size;
+  return iwl_mvm_send_cmd(mvm, &hcmd);
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/ops.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/ops.c
index 26a7bac..8af864d 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/ops.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/ops.c
@@ -37,7 +37,7 @@
 #include <stdbool.h>
 #include <threads.h>
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 #include "fw-api.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/acpi.h"
 #endif  // NEEDS_PORTING
@@ -88,7 +88,7 @@
 
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
 static void iwl_mvm_rx_fw_logs(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    iwl_dnt_dispatch_collect_ucode_message(mvm->trans, rxb);
+  iwl_dnt_dispatch_collect_ucode_message(mvm->trans, rxb);
 }
 #endif
 
@@ -96,7 +96,7 @@
  * module init and exit functions
  */
 zx_status_t iwl_mvm_init(void) {
-    zx_status_t ret;
+  zx_status_t ret;
 
 #if 0   // NEEDS_PORTING
     ret = iwl_mvm_rate_control_register();
@@ -106,65 +106,69 @@
     }
 #endif  // NEEDS_PORTING
 
-    ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
-    if (ret) { pr_err("Unable to register MVM op_mode: %d\n", ret); }
+  ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
+  if (ret) {
+    pr_err("Unable to register MVM op_mode: %d\n", ret);
+  }
 
-    return ret;
+  return ret;
 }
 
 void __exit iwl_mvm_exit(void) {
-    iwl_opmode_deregister("iwlmvm");
+  iwl_opmode_deregister("iwlmvm");
 #if 0   // NEEDS_PORTING
     iwl_mvm_rate_control_unregister();
 #endif  // NEEDS_PORTING
 }
 
 static void iwl_mvm_nic_config(struct iwl_op_mode* op_mode) {
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
-    uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
-    uint32_t reg_val = 0;
-    uint32_t phy_config = iwl_mvm_get_phy_config(mvm);
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+  uint32_t reg_val = 0;
+  uint32_t phy_config = iwl_mvm_get_phy_config(mvm);
 
-    radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> FW_PHY_CFG_RADIO_TYPE_POS;
-    radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >> FW_PHY_CFG_RADIO_STEP_POS;
-    radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> FW_PHY_CFG_RADIO_DASH_POS;
+  radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> FW_PHY_CFG_RADIO_TYPE_POS;
+  radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >> FW_PHY_CFG_RADIO_STEP_POS;
+  radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> FW_PHY_CFG_RADIO_DASH_POS;
 
-    /* SKU control */
-    reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
-    reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+  /* SKU control */
+  reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+  reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
 
-    /* radio configuration */
-    reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
-    reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
-    reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+  /* radio configuration */
+  reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+  reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+  reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
 
-    WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
-            ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
+  WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
+          ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
 
-    /*
-     * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
-     * sampling, and shouldn't be set to any non-zero value.
-     * The same is supposed to be true of the other HW, but unsetting
-     * them (such as the 7260) causes automatic tests to fail on seemingly
-     * unrelated errors. Need to further investigate this, but for now
-     * we'll separate cases.
-     */
-    if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
-        reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
-    }
+  /*
+   * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
+   * sampling, and shouldn't be set to any non-zero value.
+   * The same is supposed to be true of the other HW, but unsetting
+   * them (such as the 7260) causes automatic tests to fail on seemingly
+   * unrelated errors. Need to further investigate this, but for now
+   * we'll separate cases.
+   */
+  if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
+    reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
+  }
 
-    if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) { reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; }
+  if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) {
+    reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
+  }
 
-    iwl_trans_set_bits_mask(
-        mvm->trans, CSR_HW_IF_CONFIG_REG,
-        CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
-            CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
-            CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
-            CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | CSR_HW_IF_CONFIG_REG_D3_DEBUG,
-        reg_val);
+  iwl_trans_set_bits_mask(
+      mvm->trans, CSR_HW_IF_CONFIG_REG,
+      CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+          CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+          CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+          CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | CSR_HW_IF_CONFIG_REG_D3_DEBUG,
+      reg_val);
 
-    IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, radio_cfg_step,
-                   radio_cfg_dash);
+  IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, radio_cfg_step,
+                 radio_cfg_dash);
 
 #if 0   // NEEDS_PORTING
     /*
@@ -184,17 +188,17 @@
  * trace-cmd and not by the driver
  */
 static void iwl_mvm_rx_dhn(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_dhn_hdr* notif = (void*)pkt->data;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_dhn_hdr* notif = (void*)pkt->data;
 
-    uint32_t length = le32_to_cpu(notif->length);
-    uint32_t index_and_mask = le32_to_cpu(notif->index_and_mask);
+  uint32_t length = le32_to_cpu(notif->length);
+  uint32_t index_and_mask = le32_to_cpu(notif->index_and_mask);
 
-    IWL_DEBUG_INFO(mvm,
-                   "Received Debug Host Notifcation:\n"
-                   "length: %u\n"
-                   "index and mask: 0x%x\n",
-                   length, index_and_mask);
+  IWL_DEBUG_INFO(mvm,
+                 "Received Debug Host Notifcation:\n"
+                 "length: %u\n"
+                 "index and mask: 0x%x\n",
+                 length, index_and_mask);
 }
 #endif /* CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED */
 
@@ -209,9 +213,9 @@
  *  mutex itself, it will be called from a worker without mvm->mutex held.
  */
 enum iwl_rx_handler_context {
-    RX_HANDLER_SYNC,
-    RX_HANDLER_ASYNC_LOCKED,
-    RX_HANDLER_ASYNC_UNLOCKED,
+  RX_HANDLER_SYNC,
+  RX_HANDLER_ASYNC_LOCKED,
+  RX_HANDLER_ASYNC_UNLOCKED,
 };
 
 /**
@@ -221,15 +225,15 @@
  * @fn: the function is called when notification is received
  */
 struct iwl_rx_handlers {
-    uint16_t cmd_id;
-    enum iwl_rx_handler_context context;
-    void (*fn)(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+  uint16_t cmd_id;
+  enum iwl_rx_handler_context context;
+  void (*fn)(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 };
 
 #define RX_HANDLER(_cmd_id, _fn, _context) \
-    { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
+  { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
 #define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \
-    { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
+  { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
 
 /*
  * Handlers for fw notifications
@@ -242,7 +246,7 @@
     RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
     RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
     RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, iwl_mvm_tlc_update_notif,
                    RX_HANDLER_SYNC),
 
@@ -528,20 +532,24 @@
 #endif
 
 static uint32_t iwl_mvm_min_backoff(struct iwl_mvm* mvm) {
-    const struct iwl_pwr_tx_backoff* backoff = mvm->cfg->pwr_tx_backoffs;
-    uint64_t dflt_pwr_limit;
+  const struct iwl_pwr_tx_backoff* backoff = mvm->cfg->pwr_tx_backoffs;
+  uint64_t dflt_pwr_limit;
 
-    if (!backoff) { return 0; }
+  if (!backoff) {
+    return 0;
+  }
 
-    dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
+  dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
 
-    while (backoff->pwr) {
-        if (dflt_pwr_limit >= backoff->pwr) { return backoff->backoff; }
-
-        backoff++;
+  while (backoff->pwr) {
+    if (dflt_pwr_limit >= backoff->pwr) {
+      return backoff->backoff;
     }
 
-    return 0;
+    backoff++;
+  }
+
+  return 0;
 }
 
 #if 0   // NEEDS_PORTING
@@ -574,8 +582,9 @@
 #define IWL_DBG_CFG_BINA(n, max)          /* nothing */
 #define IWL_DBG_CFG_RANGE(t, n, min, max) /* nothing */
 #define IWL_MOD_PARAM(t, n)               /* nothing */
-#define IWL_MVM_MOD_PARAM(t, n) \
-    if (mvm->trans->dbg_cfg.__mvm_mod_param_##n) iwlmvm_mod_params.n = mvm->trans->dbg_cfg.mvm_##n;
+#define IWL_MVM_MOD_PARAM(t, n)                \
+  if (mvm->trans->dbg_cfg.__mvm_mod_param_##n) \
+    iwlmvm_mod_params.n = mvm->trans->dbg_cfg.mvm_##n;
 #define DBG_CFG_REINCLUDE
 #include "iwl-dbg-cfg.h"
 #undef IWL_DBG_CFG
@@ -590,31 +599,31 @@
 #endif
 
 static int iwl_mvm_fwrt_dump_start(void* ctx) {
-    struct iwl_mvm* mvm = ctx;
-    int ret;
+  struct iwl_mvm* mvm = ctx;
+  int ret;
 
-    ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
-    if (ret) { return ret; }
+  ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
+  if (ret) {
+    return ret;
+  }
 
-    mtx_lock(&mvm->mutex);
+  mtx_lock(&mvm->mutex);
 
-    return 0;
+  return 0;
 }
 
 static void iwl_mvm_fwrt_dump_end(void* ctx) {
-    struct iwl_mvm* mvm = ctx;
+  struct iwl_mvm* mvm = ctx;
 
-    mtx_unlock(&mvm->mutex);
+  mtx_unlock(&mvm->mutex);
 
-    iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
+  iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
 }
 
-static bool iwl_mvm_fwrt_fw_running(void* ctx) {
-    return iwl_mvm_firmware_running(ctx);
-}
+static bool iwl_mvm_fwrt_fw_running(void* ctx) { return iwl_mvm_firmware_running(ctx); }
 
 static zx_status_t iwl_mvm_fwrt_send_hcmd(void* ctx, struct iwl_host_cmd* host_cmd) {
-    return ZX_ERR_NOT_SUPPORTED;
+  return ZX_ERR_NOT_SUPPORTED;
 #if 0   // NEEDS_PORTING
     struct iwl_mvm* mvm = (struct iwl_mvm*)ctx;
     int ret;
@@ -636,59 +645,61 @@
 
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
 static int iwl_mvm_tm_send_hcmd(void* op_mode, struct iwl_host_cmd* host_cmd) {
-    struct iwl_mvm* mvm = (struct iwl_mvm*)op_mode;
+  struct iwl_mvm* mvm = (struct iwl_mvm*)op_mode;
 
-    if (WARN_ON_ONCE(!op_mode)) { return -EINVAL; }
+  if (WARN_ON_ONCE(!op_mode)) {
+    return -EINVAL;
+  }
 
-    return iwl_mvm_send_cmd(mvm, host_cmd);
+  return iwl_mvm_send_cmd(mvm, host_cmd);
 }
 
 static int iwl_mvm_tm_cmd_exec_start(struct iwl_testmode* testmode) {
-    struct iwl_mvm* mvm = testmode->op_mode;
+  struct iwl_mvm* mvm = testmode->op_mode;
 
-    return iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TM_CMD);
+  return iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TM_CMD);
 }
 
 static void iwl_mvm_tm_cmd_exec_end(struct iwl_testmode* testmode) {
-    struct iwl_mvm* mvm = testmode->op_mode;
+  struct iwl_mvm* mvm = testmode->op_mode;
 
-    iwl_mvm_unref(mvm, IWL_MVM_REF_TM_CMD);
+  iwl_mvm_unref(mvm, IWL_MVM_REF_TM_CMD);
 }
 #endif
 
 static struct iwl_op_mode* iwl_op_mode_mvm_start(struct iwl_trans* trans, const struct iwl_cfg* cfg,
                                                  const struct iwl_fw* fw,
                                                  struct dentry* dbgfs_dir) {
-    struct iwl_op_mode* op_mode;
-    struct iwl_mvm* mvm;
-    struct iwl_trans_config trans_cfg = {};
-    static const uint8_t no_reclaim_cmds[] = {
-        TX_CMD,
-    };
-    int err, scan_size;
-    uint32_t min_backoff;
-    enum iwl_amsdu_size rb_size_default;
+  struct iwl_op_mode* op_mode;
+  struct iwl_mvm* mvm;
+  struct iwl_trans_config trans_cfg = {};
+  static const uint8_t no_reclaim_cmds[] = {
+      TX_CMD,
+  };
+  int err, scan_size;
+  uint32_t min_backoff;
+  enum iwl_amsdu_size rb_size_default;
 
-    /*
-     * We use IWL_MVM_STATION_COUNT to check the validity of the station
-     * index all over the driver - check that its value corresponds to the
-     * array size.
-     */
-    BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
+  /*
+   * We use IWL_MVM_STATION_COUNT to check the validity of the station
+   * index all over the driver - check that its value corresponds to the
+   * array size.
+   */
+  BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
 
-    // Allocate op_mode and mvm. In the Linux driver these are private members of a ieee80211_hw
-    // struct |hw|, which also contains iwl_mvm_hw_ops. The equivalent functionality of ieee80211_hw 
-    // has not yet been ported.
-    op_mode = calloc(1, sizeof(struct iwl_op_mode));
-    if (!op_mode) {
-        IWL_ERR(trans, "Failed to allocate op mode\n");
-    }
-    mvm = calloc(1, sizeof(struct iwl_mvm));
-    if (!mvm) {
-        IWL_ERR(trans, "Failed to allocate mvm\n");
-    }
+  // Allocate op_mode and mvm. In the Linux driver these are private members of a ieee80211_hw
+  // struct |hw|, which also contains iwl_mvm_hw_ops. The equivalent functionality of ieee80211_hw
+  // has not yet been ported.
+  op_mode = calloc(1, sizeof(struct iwl_op_mode));
+  if (!op_mode) {
+    IWL_ERR(trans, "Failed to allocate op mode\n");
+  }
+  mvm = calloc(1, sizeof(struct iwl_mvm));
+  if (!mvm) {
+    IWL_ERR(trans, "Failed to allocate mvm\n");
+  }
 
-    op_mode->op_mode_specific = mvm;
+  op_mode->op_mode_specific = mvm;
 
 #if 0   // NEEDS_PORTING
     if (cfg->max_rx_agg_size) {
@@ -704,51 +715,53 @@
     }
 #endif  // NEEDS_PORTING
 
-    mvm->dev = trans->dev;
-    mvm->trans = trans;
-    mvm->cfg = cfg;
-    mvm->fw = fw;
+  mvm->dev = trans->dev;
+  mvm->trans = trans;
+  mvm->cfg = cfg;
+  mvm->fw = fw;
 
-    iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, dbgfs_dir);
+  iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, dbgfs_dir);
 
-    mvm->init_status = 0;
+  mvm->init_status = 0;
 
-    if (iwl_mvm_has_new_rx_api(mvm)) {
-        op_mode->ops = &iwl_mvm_ops_mq;
-        trans->rx_mpdu_cmd_hdr_size = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
-                                          ? sizeof(struct iwl_rx_mpdu_desc)
-                                          : IWL_RX_DESC_SIZE_V1;
-    } else {
-        op_mode->ops = &iwl_mvm_ops;
-        trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+  if (iwl_mvm_has_new_rx_api(mvm)) {
+    op_mode->ops = &iwl_mvm_ops_mq;
+    trans->rx_mpdu_cmd_hdr_size = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+                                      ? sizeof(struct iwl_rx_mpdu_desc)
+                                      : IWL_RX_DESC_SIZE_V1;
+  } else {
+    op_mode->ops = &iwl_mvm_ops;
+    trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
 
-        if (WARN_ON(trans->num_rx_queues > 1)) { goto out_free; }
+    if (WARN_ON(trans->num_rx_queues > 1)) {
+      goto out_free;
     }
+  }
 
-    mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
+  mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
 
-    mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
-    mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
-    mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
-    mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+  mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
+  mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
+  mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+  mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
 
-    mvm->sf_state = SF_UNINIT;
-    if (iwl_mvm_has_unified_ucode(mvm)) {
-        iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
-    } else {
-        iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
-    }
-    mvm->drop_bcn_ap_mode = true;
+  mvm->sf_state = SF_UNINIT;
+  if (iwl_mvm_has_unified_ucode(mvm)) {
+    iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
+  } else {
+    iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
+  }
+  mvm->drop_bcn_ap_mode = true;
 
-    mtx_init(&mvm->mutex, mtx_plain);
-    mtx_init(&mvm->d0i3_suspend_mutex, mtx_plain);
-    mtx_init(&mvm->async_handlers_lock, mtx_plain);
-    list_initialize(&mvm->time_event_list);
-    list_initialize(&mvm->aux_roc_te_list);
-    list_initialize(&mvm->async_handlers_list);
-    mtx_init(&mvm->time_event_lock, mtx_plain);
+  mtx_init(&mvm->mutex, mtx_plain);
+  mtx_init(&mvm->d0i3_suspend_mutex, mtx_plain);
+  mtx_init(&mvm->async_handlers_lock, mtx_plain);
+  list_initialize(&mvm->time_event_list);
+  list_initialize(&mvm->aux_roc_te_list);
+  list_initialize(&mvm->async_handlers_list);
+  mtx_init(&mvm->time_event_lock, mtx_plain);
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
     INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
     INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
 #ifdef CPTCFG_MAC80211_LATENCY_MEASUREMENTS
@@ -762,23 +775,23 @@
     INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
     INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
 #endif  // NEEDS_PORTING
-    list_initialize(&mvm->add_stream_txqs);
+  list_initialize(&mvm->add_stream_txqs);
 
-    mtx_init(&mvm->d0i3_tx_lock, mtx_plain);
-    mtx_init(&mvm->refs_lock, mtx_plain);
+  mtx_init(&mvm->d0i3_tx_lock, mtx_plain);
+  mtx_init(&mvm->refs_lock, mtx_plain);
 #if 0   // NEEDS_PORTING
     skb_queue_head_init(&mvm->d0i3_tx);
     init_waitqueue_head(&mvm->d0i3_exit_waitq);
     init_waitqueue_head(&mvm->rx_sync_waitq);
 #endif  // NEEDS_PORTING
 
-    atomic_store(&mvm->queue_sync_counter, 0);
+  atomic_store(&mvm->queue_sync_counter, 0);
 
 #if 0   // NEEDS_PORTING
     SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
 #endif  // NEEDS_PORTING
 
-    mtx_init(&mvm->tcm.lock, mtx_plain);
+  mtx_init(&mvm->tcm.lock, mtx_plain);
 #if 0   // NEEDS_PORTING
     INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
     mvm->tcm.ts = jiffies;
@@ -787,64 +800,64 @@
 #endif  // NEEDS_PORTING
 
 #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE
-    INIT_LIST_HEAD(&mvm->tdls_peer_cache_list);
+  INIT_LIST_HEAD(&mvm->tdls_peer_cache_list);
 #endif
 
 #ifdef CPTCFG_IWLMVM_VENDOR_CMDS
-    mvm->rx_filters = IWL_MVM_VENDOR_RXFILTER_EINVAL;
+  mvm->rx_filters = IWL_MVM_VENDOR_RXFILTER_EINVAL;
 #endif
 
 #if 0   // NEEDS_PORTING
     INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
 #endif  // NEEDS_PORTING
 
-    /*
-     * Populate the state variables that the transport layer needs
-     * to know about.
-     */
-    trans_cfg.op_mode = op_mode;
-    trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
-    trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+  /*
+   * Populate the state variables that the transport layer needs
+   * to know about.
+   */
+  trans_cfg.op_mode = op_mode;
+  trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
+  trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
 
-    if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-        rb_size_default = IWL_AMSDU_2K;
-    } else {
-        rb_size_default = IWL_AMSDU_4K;
-    }
+  if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+    rb_size_default = IWL_AMSDU_2K;
+  } else {
+    rb_size_default = IWL_AMSDU_4K;
+  }
 
-    switch (iwlwifi_mod_params.amsdu_size) {
+  switch (iwlwifi_mod_params.amsdu_size) {
     case IWL_AMSDU_DEF:
-        trans_cfg.rx_buf_size = rb_size_default;
-        break;
+      trans_cfg.rx_buf_size = rb_size_default;
+      break;
     case IWL_AMSDU_4K:
-        trans_cfg.rx_buf_size = IWL_AMSDU_4K;
-        break;
+      trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+      break;
     case IWL_AMSDU_8K:
-        trans_cfg.rx_buf_size = IWL_AMSDU_8K;
-        break;
+      trans_cfg.rx_buf_size = IWL_AMSDU_8K;
+      break;
     case IWL_AMSDU_12K:
-        trans_cfg.rx_buf_size = IWL_AMSDU_12K;
-        break;
+      trans_cfg.rx_buf_size = IWL_AMSDU_12K;
+      break;
     default:
-        pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size);
-        trans_cfg.rx_buf_size = rb_size_default;
-    }
+      pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size);
+      trans_cfg.rx_buf_size = rb_size_default;
+  }
 
-    trans->wide_cmd_header = true;
-    trans_cfg.bc_table_dword = mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
+  trans->wide_cmd_header = true;
+  trans_cfg.bc_table_dword = mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
 
-    trans_cfg.command_groups = iwl_mvm_groups;
-    trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
+  trans_cfg.command_groups = iwl_mvm_groups;
+  trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
 
-    trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
-    trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
-    trans_cfg.scd_set_active = true;
+  trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+  trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
+  trans_cfg.scd_set_active = true;
 
 #if 0   // NEEDS_PORTING
     trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, driver_data[2]);
 #endif  // NEEDS_PORTING
 
-    trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
+  trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
 
 #if 0   // NEEDS_PORTING
     /* Set a short watchdog for the command queue */
@@ -855,17 +868,17 @@
     snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), "%s", fw->fw_version);
 #endif  // NEEDS_PORTING
 
-    /* Configure transport layer */
-    iwl_trans_configure(mvm->trans, &trans_cfg);
+  /* Configure transport layer */
+  iwl_trans_configure(mvm->trans, &trans_cfg);
 
-    trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
-    trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
-    trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
-    memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv, sizeof(trans->dbg_conf_tlv));
-    trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
+  trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+  trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
+  trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
+  memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv, sizeof(trans->dbg_conf_tlv));
+  trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
 
-    trans->iml = mvm->fw->iml;
-    trans->iml_len = mvm->fw->iml_len;
+  trans->iml = mvm->fw->iml;
+  trans->iml_len = mvm->fw->iml_len;
 
 #if 0   // NEEDS_PORTING
     /* set up notification wait support */
@@ -873,213 +886,233 @@
 #endif  // NEEDS_PORTING
 
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
-    iwl_dnt_init(mvm->trans, dbgfs_dir);
-    iwl_tm_init(trans, mvm->fw, &mvm->mutex, mvm);
+  iwl_dnt_init(mvm->trans, dbgfs_dir);
+  iwl_tm_init(trans, mvm->fw, &mvm->mutex, mvm);
 #endif
 
-    /* Init phy db */
-    mvm->phy_db = iwl_phy_db_init(trans);
-    if (!mvm->phy_db) {
-        IWL_ERR(mvm, "Cannot init phy_db\n");
-        goto out_free;
-    }
+  /* Init phy db */
+  mvm->phy_db = iwl_phy_db_init(trans);
+  if (!mvm->phy_db) {
+    IWL_ERR(mvm, "Cannot init phy_db\n");
+    goto out_free;
+  }
 
-    IWL_INFO(mvm, "Detected %s, REV=0x%X\n", mvm->cfg->name, mvm->trans->hw_rev);
+  IWL_INFO(mvm, "Detected %s, REV=0x%X\n", mvm->cfg->name, mvm->trans->hw_rev);
 
-    if (iwlwifi_mod_params.nvm_file) {
-        mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
-    }
+  if (iwlwifi_mod_params.nvm_file) {
+    mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
+  }
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    else if (trans->dbg_cfg.nvm_file) {
-        mvm->nvm_file_name = trans->dbg_cfg.nvm_file;
-    }
+  else if (trans->dbg_cfg.nvm_file) {
+    mvm->nvm_file_name = trans->dbg_cfg.nvm_file;
+  }
 #endif
-    else {
-        IWL_DEBUG_EEPROM(mvm->trans->dev, "working without external nvm file%s\n", "");
-    }
+  else {
+    IWL_DEBUG_EEPROM(mvm->trans->dev, "working without external nvm file%s\n", "");
+  }
 
-    err = iwl_trans_start_hw(mvm->trans);
-    if (err) { goto out_free; }
+  err = iwl_trans_start_hw(mvm->trans);
+  if (err) {
+    goto out_free;
+  }
 
-    mtx_lock(&mvm->mutex);
-    iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
-    err = iwl_run_init_mvm_ucode(mvm, true);
+  mtx_lock(&mvm->mutex);
+  iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
+  err = iwl_run_init_mvm_ucode(mvm, true);
 #if 0   // NEEDS_PORTING
     if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status)) {
         iwl_fw_alive_error_dump(&mvm->fwrt);
     }
 #endif  // NEEDS_PORTING
-    if (!iwlmvm_mod_params.init_dbg || !err) { iwl_mvm_stop_device(mvm); }
-    iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
-    mtx_unlock(&mvm->mutex);
-    if (err < 0) {
-        IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
-        goto out_free;
-    }
+  if (!iwlmvm_mod_params.init_dbg || !err) {
+    iwl_mvm_stop_device(mvm);
+  }
+  iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
+  mtx_unlock(&mvm->mutex);
+  if (err < 0) {
+    IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+    goto out_free;
+  }
 
-    scan_size = iwl_mvm_scan_size(mvm);
+  scan_size = iwl_mvm_scan_size(mvm);
 
-    mvm->scan_cmd = malloc(scan_size);
-    if (!mvm->scan_cmd) { goto out_free; }
+  mvm->scan_cmd = malloc(scan_size);
+  if (!mvm->scan_cmd) {
+    goto out_free;
+  }
 
-    /* Set EBS as successful as long as not stated otherwise by the FW. */
-    mvm->last_ebs_successful = true;
+  /* Set EBS as successful as long as not stated otherwise by the FW. */
+  mvm->last_ebs_successful = true;
 
-    err = iwl_mvm_mac_setup_register(mvm);
-    if (err) { goto out_free; }
-    mvm->hw_registered = true;
+  err = iwl_mvm_mac_setup_register(mvm);
+  if (err) {
+    goto out_free;
+  }
+  mvm->hw_registered = true;
 
-    min_backoff = iwl_mvm_min_backoff(mvm);
-    iwl_mvm_thermal_initialize(mvm, min_backoff);
+  min_backoff = iwl_mvm_min_backoff(mvm);
+  iwl_mvm_thermal_initialize(mvm, min_backoff);
 
-    err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
-    if (err) { goto out_unregister; }
+  err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
+  if (err) {
+    goto out_unregister;
+  }
 
-    if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
-        memset(&mvm->rx_stats_v3, 0, sizeof(struct mvm_statistics_rx_v3));
-    } else {
-        memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
-    }
+  if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+    memset(&mvm->rx_stats_v3, 0, sizeof(struct mvm_statistics_rx_v3));
+  } else {
+    memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+  }
 
 #ifdef CPTCFG_IWLWIFI_FRQ_MGR
-    err = iwl_mvm_fm_register(mvm);
-    if (err) { pr_err("Unable to register with Frequency Manager: %d\n", err); }
+  err = iwl_mvm_fm_register(mvm);
+  if (err) {
+    pr_err("Unable to register with Frequency Manager: %d\n", err);
+  }
 #endif
 
-    /* The transport always starts with a taken reference, we can
-     * release it now if d0i3 is supported */
-    if (iwl_mvm_is_d0i3_supported(mvm)) { iwl_trans_unref(mvm->trans); }
+  /* The transport always starts with a taken reference, we can
+   * release it now if d0i3 is supported */
+  if (iwl_mvm_is_d0i3_supported(mvm)) {
+    iwl_trans_unref(mvm->trans);
+  }
 
-    iwl_mvm_tof_init(mvm);
+  iwl_mvm_tof_init(mvm);
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    iwl_mvm_init_modparams(mvm);
+  iwl_mvm_init_modparams(mvm);
 #endif
 
 #ifdef CPTCFG_IWLMVM_AX_SOFTAP_TESTMODE
-    mvm->is_bar_enabled = true;
+  mvm->is_bar_enabled = true;
 #endif
 
-    iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
+  iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
 
-    return op_mode;
+  return op_mode;
 
 out_unregister:
-    if (iwlmvm_mod_params.init_dbg) { return op_mode; }
+  if (iwlmvm_mod_params.init_dbg) {
+    return op_mode;
+  }
 
 #if 0   // NEEDS_PORTING
     ieee80211_unregister_hw(mvm->hw);
 #endif  // NEEDS_PORTING
-    mvm->hw_registered = false;
-    iwl_mvm_leds_exit(mvm);
-    iwl_mvm_thermal_exit(mvm);
+  mvm->hw_registered = false;
+  iwl_mvm_leds_exit(mvm);
+  iwl_mvm_thermal_exit(mvm);
 out_free:
 #if 0   // NEEDS_PORTING
     iwl_fw_flush_dump(&mvm->fwrt);
 #endif  // NEEDS_PORTING
-    iwl_fw_runtime_free(&mvm->fwrt);
+  iwl_fw_runtime_free(&mvm->fwrt);
 
-    if (iwlmvm_mod_params.init_dbg) { return op_mode; }
+  if (iwlmvm_mod_params.init_dbg) {
+    return op_mode;
+  }
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
-    iwl_dnt_free(trans);
+  iwl_dnt_free(trans);
 #endif
-    iwl_phy_db_free(mvm->phy_db);
-    kfree(mvm->scan_cmd);
-    iwl_trans_op_mode_leave(trans);
+  iwl_phy_db_free(mvm->phy_db);
+  kfree(mvm->scan_cmd);
+  iwl_trans_op_mode_leave(trans);
 
-    free(op_mode);
-    free(mvm);
-    return NULL;
+  free(op_mode);
+  free(mvm);
+  return NULL;
 }
 
 static void iwl_op_mode_mvm_stop(struct iwl_op_mode* op_mode) {
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
-    int i;
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  int i;
 
-    /* If d0i3 is supported, we have released the reference that
-     * the transport started with, so we should take it back now
-     * that we are leaving.
-     */
-    if (iwl_mvm_is_d0i3_supported(mvm)) { iwl_trans_ref(mvm->trans); }
+  /* If d0i3 is supported, we have released the reference that
+   * the transport started with, so we should take it back now
+   * that we are leaving.
+   */
+  if (iwl_mvm_is_d0i3_supported(mvm)) {
+    iwl_trans_ref(mvm->trans);
+  }
 
-    iwl_mvm_leds_exit(mvm);
+  iwl_mvm_leds_exit(mvm);
 
-    iwl_mvm_thermal_exit(mvm);
+  iwl_mvm_thermal_exit(mvm);
 
-    if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
+  if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
 #if 0   // NEEDS_PORTING
         ieee80211_unregister_hw(mvm->hw);
 #endif  // NEEDS_PORTING
-        mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
-    }
+    mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
+  }
 
 #ifdef CPTCFG_IWLWIFI_FRQ_MGR
-    iwl_mvm_fm_unregister(mvm);
+  iwl_mvm_fm_unregister(mvm);
 #endif
 
-    kfree(mvm->scan_cmd);
-    kfree(mvm->mcast_filter_cmd);
-    mvm->mcast_filter_cmd = NULL;
+  kfree(mvm->scan_cmd);
+  kfree(mvm->mcast_filter_cmd);
+  mvm->mcast_filter_cmd = NULL;
 
 #ifdef CPTCFG_IWLMVM_VENDOR_CMDS
-    kfree(mvm->mcast_active_filter_cmd);
-    mvm->mcast_active_filter_cmd = NULL;
+  kfree(mvm->mcast_active_filter_cmd);
+  mvm->mcast_active_filter_cmd = NULL;
 #endif
 
 #if defined(CONFIG_PM_SLEEP) && defined(CPTCFG_IWLWIFI_DEBUGFS)
-    kfree(mvm->d3_resume_sram);
+  kfree(mvm->d3_resume_sram);
 #endif
-    iwl_trans_op_mode_leave(mvm->trans);
+  iwl_trans_op_mode_leave(mvm->trans);
 
-    iwl_phy_db_free(mvm->phy_db);
-    mvm->phy_db = NULL;
+  iwl_phy_db_free(mvm->phy_db);
+  mvm->phy_db = NULL;
 
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
-    iwl_dnt_free(mvm->trans);
+  iwl_dnt_free(mvm->trans);
 #endif
-    kfree(mvm->nvm_data);
-    for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) {
-        kfree((void*)mvm->nvm_sections[i].data);
-    }
+  kfree(mvm->nvm_data);
+  for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) {
+    kfree((void*)mvm->nvm_sections[i].data);
+  }
 
 #if 0   // NEEDS_PORTING
     cancel_delayed_work_sync(&mvm->tcm.work);
 #endif  // NEEDS_PORTING
 
 #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE
-    iwl_mvm_tdls_peer_cache_clear(mvm, NULL);
+  iwl_mvm_tdls_peer_cache_clear(mvm, NULL);
 #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
 
-    iwl_mvm_tof_clean(mvm);
+  iwl_mvm_tof_clean(mvm);
 
-    iwl_fw_runtime_free(&mvm->fwrt);
-    mtx_destroy(&mvm->mutex);
-    mtx_destroy(&mvm->d0i3_suspend_mutex);
+  iwl_fw_runtime_free(&mvm->fwrt);
+  mtx_destroy(&mvm->mutex);
+  mtx_destroy(&mvm->d0i3_suspend_mutex);
 
-    free(op_mode);
-    free(mvm);
+  free(op_mode);
+  free(mvm);
 }
 
 struct iwl_async_handler_entry {
-    list_node_t list;
-    struct iwl_rx_cmd_buffer rxb;
-    enum iwl_rx_handler_context context;
-    void (*fn)(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
+  list_node_t list;
+  struct iwl_rx_cmd_buffer rxb;
+  enum iwl_rx_handler_context context;
+  void (*fn)(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 };
 
 void iwl_mvm_async_handlers_purge(struct iwl_mvm* mvm) {
-    struct iwl_async_handler_entry *entry, *tmp;
+  struct iwl_async_handler_entry *entry, *tmp;
 
-    mtx_lock(&mvm->async_handlers_lock);
-    list_for_every_entry_safe(&mvm->async_handlers_list, entry, tmp,
-                              struct iwl_async_handler_entry, list) {
+  mtx_lock(&mvm->async_handlers_lock);
+  list_for_every_entry_safe (&mvm->async_handlers_list, entry, tmp, struct iwl_async_handler_entry,
+                             list) {
 #if 0   // NEEDS_PORTING
         iwl_free_rxb(&entry->rxb);
 #endif  // NEEDS_PORTING
-        list_delete(&entry->list);
-        kfree(entry);
-    }
-    mtx_unlock(&mvm->async_handlers_lock);
+    list_delete(&entry->list);
+    kfree(entry);
+  }
+  mtx_unlock(&mvm->async_handlers_lock);
 }
 
 #if 0   // NEEDS_PORTING
@@ -1138,50 +1171,54 @@
 
 static void iwl_mvm_rx_common(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb,
                               struct iwl_rx_packet* pkt) {
-    size_t i;
+  size_t i;
 
-    iwl_mvm_rx_check_trigger(mvm, pkt);
+  iwl_mvm_rx_check_trigger(mvm, pkt);
 
-    /*
-     * Do the notification wait before RX handlers so
-     * even if the RX handler consumes the RXB we have
-     * access to it in the notification wait entry.
-     */
-    iwl_notification_wait_notify(&mvm->notif_wait, pkt);
+  /*
+   * Do the notification wait before RX handlers so
+   * even if the RX handler consumes the RXB we have
+   * access to it in the notification wait entry.
+   */
+  iwl_notification_wait_notify(&mvm->notif_wait, pkt);
 
-    for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
-        const struct iwl_rx_handlers* rx_h = &iwl_mvm_rx_handlers[i];
-        struct iwl_async_handler_entry* entry;
+  for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
+    const struct iwl_rx_handlers* rx_h = &iwl_mvm_rx_handlers[i];
+    struct iwl_async_handler_entry* entry;
 
-        if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { continue; }
+    if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+      continue;
+    }
 
-        if (rx_h->context == RX_HANDLER_SYNC) {
-            rx_h->fn(mvm, rxb);
-            return;
-        }
+    if (rx_h->context == RX_HANDLER_SYNC) {
+      rx_h->fn(mvm, rxb);
+      return;
+    }
 
-        entry = calloc(1, sizeof(*entry));
-        /* we can't do much... */
-        if (!entry) { return; }
+    entry = calloc(1, sizeof(*entry));
+    /* we can't do much... */
+    if (!entry) {
+      return;
+    }
 
-        entry->rxb._page = rxb_steal_page(rxb);
-        entry->rxb._offset = rxb->_offset;
-        entry->rxb._rx_page_order = rxb->_rx_page_order;
-        entry->fn = rx_h->fn;
-        entry->context = rx_h->context;
-        mtx_lock(&mvm->async_handlers_lock);
-        list_add_tail(&entry->list, &mvm->async_handlers_list);
-        mtx_unlock(&mvm->async_handlers_lock);
+    entry->rxb._page = rxb_steal_page(rxb);
+    entry->rxb._offset = rxb->_offset;
+    entry->rxb._rx_page_order = rxb->_rx_page_order;
+    entry->fn = rx_h->fn;
+    entry->context = rx_h->context;
+    mtx_lock(&mvm->async_handlers_lock);
+    list_add_tail(&entry->list, &mvm->async_handlers_list);
+    mtx_unlock(&mvm->async_handlers_lock);
 #if 0   // NEEDS_PORTING
         schedule_work(&mvm->async_handlers_wk);
 #endif  // NEEDS_PORTING
-        break;
-    }
+    break;
+  }
 }
 
 static void iwl_mvm_rx(struct iwl_op_mode* op_mode, struct napi_struct* napi,
                        struct iwl_rx_cmd_buffer* rxb) {
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
     struct iwl_rx_packet* pkt = rxb_addr(rxb);
     struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
     uint16_t cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
@@ -1208,95 +1245,101 @@
 
 static void iwl_mvm_rx_mq(struct iwl_op_mode* op_mode, struct napi_struct* napi,
                           struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
-    uint16_t cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  uint16_t cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
 
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
-    /*
-     * RX data may be forwarded to userspace in case the user
-     * requested to monitor the rx w/o affecting the regular flow.
-     * In this case the iwl_test object will handle forwarding the rx
-     * data to user space.
-     */
-    iwl_tm_gnl_send_rx(mvm->trans, rxb);
+  /*
+   * RX data may be forwarded to userspace in case the user
+   * requested to monitor the rx w/o affecting the regular flow.
+   * In this case the iwl_test object will handle forwarding the rx
+   * data to user space.
+   */
+  iwl_tm_gnl_send_rx(mvm->trans, rxb);
 #endif
 
-    if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) {
-        iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
-    } else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) {
-        iwl_mvm_rx_queue_notif(mvm, rxb, 0);
-    } else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) {
-        iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
-    } else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) {
-        iwl_mvm_rx_monitor_ndp(mvm, napi, rxb, 0);
-    } else {
-        iwl_mvm_rx_common(mvm, rxb, pkt);
-    }
+  if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) {
+    iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
+  } else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) {
+    iwl_mvm_rx_queue_notif(mvm, rxb, 0);
+  } else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) {
+    iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
+  } else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) {
+    iwl_mvm_rx_monitor_ndp(mvm, napi, rxb, 0);
+  } else {
+    iwl_mvm_rx_common(mvm, rxb, pkt);
+  }
 }
 
 static void iwl_mvm_async_cb(struct iwl_op_mode* op_mode, const struct iwl_device_cmd* cmd) {
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
-    /*
-     * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
-     * commands that need to block the Tx queues.
-     */
-    iwl_trans_block_txq_ptrs(mvm->trans, false);
+  /*
+   * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
+   * commands that need to block the Tx queues.
+   */
+  iwl_trans_block_txq_ptrs(mvm->trans, false);
 }
 
 static void iwl_mvm_queue_state_change(struct iwl_op_mode* op_mode, int hw_queue, bool start) {
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
-    uint8_t sta_id = mvm->queue_info[hw_queue].ra_sta_id;
-    struct ieee80211_sta* sta;
-    struct ieee80211_txq* txq;
-    struct iwl_mvm_txq* mvmtxq;
-    int i;
-    unsigned long tid_bitmap;
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  uint8_t sta_id = mvm->queue_info[hw_queue].ra_sta_id;
+  struct ieee80211_sta* sta;
+  struct ieee80211_txq* txq;
+  struct iwl_mvm_txq* mvmtxq;
+  int i;
+  unsigned long tid_bitmap;
 
-    if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { return; }
+  if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
+    return;
+  }
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-    if (IS_ERR_OR_NULL(sta)) { goto out; }
+  sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+  if (IS_ERR_OR_NULL(sta)) {
+    goto out;
+  }
 
-    if (iwl_mvm_has_new_tx_api(mvm)) {
-        int tid = mvm->tvqm_info[hw_queue].txq_tid;
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    int tid = mvm->tvqm_info[hw_queue].txq_tid;
 
-        tid_bitmap = BIT(tid);
-    } else {
-        tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
+    tid_bitmap = BIT(tid);
+  } else {
+    tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
+  }
+
+  for (i = 0; i < (IWL_MAX_TID_COUNT + 1); i++) {
+    if (!(tid_bitmap & (1 << i))) {
+      continue;
     }
 
-    for(i = 0; i < (IWL_MAX_TID_COUNT + 1); i++) {
-        if (!(tid_bitmap & (1 << i))) {
-            continue;
-        }
+    int tid = i;
 
-        int tid = i;
+    if (tid == IWL_MAX_TID_COUNT) {
+      tid = IEEE80211_NUM_TIDS;
+    }
 
-        if (tid == IWL_MAX_TID_COUNT) { tid = IEEE80211_NUM_TIDS; }
-
-        txq = sta->txq[tid];
-        mvmtxq = iwl_mvm_txq_from_mac80211(txq);
-        mvmtxq->stopped = !start;
+    txq = sta->txq[tid];
+    mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+    mvmtxq->stopped = !start;
 
 #if 0   // NEEDS_PORTING
         if (start) { iwl_mvm_mac_itxq_xmit(mvm->hw, txq); }
 #endif  // NEEDS_PORTING
-    }
+  }
 
 out:
-    rcu_read_unlock();
+  rcu_read_unlock();
 }
 
 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode* op_mode, int hw_queue) {
-    iwl_mvm_queue_state_change(op_mode, hw_queue, false);
+  iwl_mvm_queue_state_change(op_mode, hw_queue, false);
 }
 
 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode* op_mode, int hw_queue) {
-    iwl_mvm_queue_state_change(op_mode, hw_queue, true);
+  iwl_mvm_queue_state_change(op_mode, hw_queue, true);
 }
 
 static void iwl_mvm_set_rfkill_state(struct iwl_mvm* mvm) {
@@ -1310,37 +1353,37 @@
 }
 
 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm* mvm, bool state) {
-    if (state) {
-        set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
-    } else {
-        clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
-    }
+  if (state) {
+    set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+  } else {
+    clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+  }
 
-    iwl_mvm_set_rfkill_state(mvm);
+  iwl_mvm_set_rfkill_state(mvm);
 }
 
 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode* op_mode, bool state) {
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
-    bool calibrating = READ_ONCE(mvm->calibrating);
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  bool calibrating = READ_ONCE(mvm->calibrating);
 
-    if (state) {
-        set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
-    } else {
-        clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
-    }
+  if (state) {
+    set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+  } else {
+    clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+  }
 
-    iwl_mvm_set_rfkill_state(mvm);
+  iwl_mvm_set_rfkill_state(mvm);
 
 #if 0   // NEEDS_PORTING
     /* iwl_run_init_mvm_ucode is waiting for results, abort it */
     if (calibrating) { iwl_abort_notification_waits(&mvm->notif_wait); }
 #endif  // NEEDS_PORTING
 
-    /*
-     * Stop the device if we run OPERATIONAL firmware or if we are in the
-     * middle of the calibrations.
-     */
-    return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
+  /*
+   * Stop the device if we run OPERATIONAL firmware or if we are in the
+   * middle of the calibrations.
+   */
+  return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
 }
 
 static void iwl_mvm_free_skb(struct iwl_op_mode* op_mode, struct sk_buff* skb) {
@@ -1355,8 +1398,8 @@
 }
 
 struct iwl_mvm_reprobe {
-    struct device* dev;
-    struct work_struct work;
+  struct device* dev;
+  struct work_struct work;
 };
 
 #if 0   // NEEDS_PORTING
@@ -1373,7 +1416,7 @@
 #endif  // NEEDS_PORTING
 
 void iwl_mvm_nic_restart(struct iwl_mvm* mvm, bool fw_error) {
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
     iwl_abort_notification_waits(&mvm->notif_wait);
 
     /*
@@ -1440,379 +1483,413 @@
 }
 
 static void iwl_mvm_nic_error(struct iwl_op_mode* op_mode) {
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
 #if 0   // NEEDS_PORTING
     if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { iwl_mvm_dump_nic_error_log(mvm); }
 #endif  // NEEDS_PORTING
 
-    iwl_mvm_nic_restart(mvm, true);
+  iwl_mvm_nic_restart(mvm, true);
 }
 
 static void iwl_mvm_cmd_queue_full(struct iwl_op_mode* op_mode) {
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
-    WARN_ON(1);
-    iwl_mvm_nic_restart(mvm, true);
+  WARN_ON(1);
+  iwl_mvm_nic_restart(mvm, true);
 }
 
 #ifdef CONFIG_PM
 struct iwl_d0i3_iter_data {
-    struct iwl_mvm* mvm;
-    struct ieee80211_vif* connected_vif;
-    uint8_t ap_sta_id;
-    uint8_t vif_count;
-    uint8_t offloading_tid;
-    bool disable_offloading;
+  struct iwl_mvm* mvm;
+  struct ieee80211_vif* connected_vif;
+  uint8_t ap_sta_id;
+  uint8_t vif_count;
+  uint8_t offloading_tid;
+  bool disable_offloading;
 };
 
 static bool iwl_mvm_disallow_offloading(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                         struct iwl_d0i3_iter_data* iter_data) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_sta* mvmsta;
-    uint32_t available_tids = 0;
-    uint8_t tid;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_sta* mvmsta;
+  uint32_t available_tids = 0;
+  uint8_t tid;
 
-    if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)) {
-        return false;
-    }
+  if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)) {
+    return false;
+  }
 
-    mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
-    if (!mvmsta) { return false; }
+  mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+  if (!mvmsta) {
+    return false;
+  }
 
-    spin_lock_bh(&mvmsta->lock);
-    for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
-        struct iwl_mvm_tid_data* tid_data = &mvmsta->tid_data[tid];
-
-        /*
-         * in case of pending tx packets, don't use this tid
-         * for offloading in order to prevent reuse of the same
-         * qos seq counters.
-         */
-        if (iwl_mvm_tid_queued(mvm, tid_data)) { continue; }
-
-        if (tid_data->state != IWL_AGG_OFF) { continue; }
-
-        available_tids |= BIT(tid);
-    }
-    spin_unlock_bh(&mvmsta->lock);
+  spin_lock_bh(&mvmsta->lock);
+  for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+    struct iwl_mvm_tid_data* tid_data = &mvmsta->tid_data[tid];
 
     /*
-     * disallow protocol offloading if we have no available tid
-     * (with no pending frames and no active aggregation,
-     * as we don't handle "holes" properly - the scheduler needs the
-     * frame's seq number and TFD index to match)
+     * in case of pending tx packets, don't use this tid
+     * for offloading in order to prevent reuse of the same
+     * qos seq counters.
      */
-    if (!available_tids) { return true; }
+    if (iwl_mvm_tid_queued(mvm, tid_data)) {
+      continue;
+    }
 
-    /* for simplicity, just use the first available tid */
-    iter_data->offloading_tid = ffs(available_tids) - 1;
-    return false;
+    if (tid_data->state != IWL_AGG_OFF) {
+      continue;
+    }
+
+    available_tids |= BIT(tid);
+  }
+  spin_unlock_bh(&mvmsta->lock);
+
+  /*
+   * disallow protocol offloading if we have no available tid
+   * (with no pending frames and no active aggregation,
+   * as we don't handle "holes" properly - the scheduler needs the
+   * frame's seq number and TFD index to match)
+   */
+  if (!available_tids) {
+    return true;
+  }
+
+  /* for simplicity, just use the first available tid */
+  iter_data->offloading_tid = ffs(available_tids) - 1;
+  return false;
 }
 
 static void iwl_mvm_enter_d0i3_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_d0i3_iter_data* data = _data;
-    struct iwl_mvm* mvm = data->mvm;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    uint32_t flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+  struct iwl_d0i3_iter_data* data = _data;
+  struct iwl_mvm* mvm = data->mvm;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  uint32_t flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
 
-    IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
-    if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) { return; }
+  IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
+  if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) {
+    return;
+  }
 
-    /*
-     * in case of pending tx packets or active aggregations,
-     * avoid offloading features in order to prevent reuse of
-     * the same qos seq counters.
-     */
-    if (iwl_mvm_disallow_offloading(mvm, vif, data)) { data->disable_offloading = true; }
+  /*
+   * in case of pending tx packets or active aggregations,
+   * avoid offloading features in order to prevent reuse of
+   * the same qos seq counters.
+   */
+  if (iwl_mvm_disallow_offloading(mvm, vif, data)) {
+    data->disable_offloading = true;
+  }
 
-    iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
-    iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, false, flags);
+  iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
+  iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, false, flags);
 
-    /*
-     * on init/association, mvm already configures POWER_TABLE_CMD
-     * and REPLY_MCAST_FILTER_CMD, so currently don't
-     * reconfigure them (we might want to use different
-     * params later on, though).
-     */
-    data->ap_sta_id = mvmvif->ap_sta_id;
-    data->vif_count++;
+  /*
+   * on init/association, mvm already configures POWER_TABLE_CMD
+   * and REPLY_MCAST_FILTER_CMD, so currently don't
+   * reconfigure them (we might want to use different
+   * params later on, though).
+   */
+  data->ap_sta_id = mvmvif->ap_sta_id;
+  data->vif_count++;
 
-    /*
-     * no new commands can be sent at this stage, so it's safe
-     * to save the vif pointer during d0i3 entrance.
-     */
-    data->connected_vif = vif;
+  /*
+   * no new commands can be sent at this stage, so it's safe
+   * to save the vif pointer during d0i3 entrance.
+   */
+  data->connected_vif = vif;
 }
 
 static void iwl_mvm_set_wowlan_data(struct iwl_mvm* mvm, struct iwl_wowlan_config_cmd* cmd,
                                     struct iwl_d0i3_iter_data* iter_data) {
-    struct ieee80211_sta* ap_sta;
-    struct iwl_mvm_sta* mvm_ap_sta;
+  struct ieee80211_sta* ap_sta;
+  struct iwl_mvm_sta* mvm_ap_sta;
 
-    if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA) { return; }
+  if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA) {
+    return;
+  }
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
-    if (IS_ERR_OR_NULL(ap_sta)) { goto out; }
+  ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
+  if (IS_ERR_OR_NULL(ap_sta)) {
+    goto out;
+  }
 
-    mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
-    cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
-    cmd->offloading_tid = iter_data->offloading_tid;
-    cmd->flags =
-        ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
-    /*
-     * The d0i3 uCode takes care of the nonqos counters,
-     * so configure only the qos seq ones.
-     */
-    iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
+  mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
+  cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
+  cmd->offloading_tid = iter_data->offloading_tid;
+  cmd->flags =
+      ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
+  /*
+   * The d0i3 uCode takes care of the nonqos counters,
+   * so configure only the qos seq ones.
+   */
+  iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
 out:
-    rcu_read_unlock();
+  rcu_read_unlock();
 }
 
 int iwl_mvm_enter_d0i3(struct iwl_op_mode* op_mode) {
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
-    uint32_t flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
-    int ret;
-    struct iwl_d0i3_iter_data d0i3_iter_data = {
-        .mvm = mvm,
-    };
-    struct iwl_wowlan_config_cmd wowlan_config_cmd = {
-        .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | IWL_WOWLAN_WAKEUP_BEACON_MISS |
-                                     IWL_WOWLAN_WAKEUP_LINK_CHANGE),
-    };
-    struct iwl_d3_manager_config d3_cfg_cmd = {
-        .min_sleep_time = cpu_to_le32(1000),
-        .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
-    };
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  uint32_t flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+  int ret;
+  struct iwl_d0i3_iter_data d0i3_iter_data = {
+      .mvm = mvm,
+  };
+  struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+      .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | IWL_WOWLAN_WAKEUP_BEACON_MISS |
+                                   IWL_WOWLAN_WAKEUP_LINK_CHANGE),
+  };
+  struct iwl_d3_manager_config d3_cfg_cmd = {
+      .min_sleep_time = cpu_to_le32(1000),
+      .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
+  };
 
-    IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
+  IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
 
-    if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) { return -EINVAL; }
+  if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) {
+    return -EINVAL;
+  }
 
-    set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+  set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
 
-    /*
-     * iwl_mvm_ref_sync takes a reference before checking the flag.
-     * so by checking there is no held reference we prevent a state
-     * in which iwl_mvm_ref_sync continues successfully while we
-     * configure the firmware to enter d0i3
-     */
-    if (iwl_mvm_ref_taken(mvm)) {
-        IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
-        clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
-        wake_up(&mvm->d0i3_exit_waitq);
-        return 1;
+  /*
+   * iwl_mvm_ref_sync takes a reference before checking the flag.
+   * so by checking there is no held reference we prevent a state
+   * in which iwl_mvm_ref_sync continues successfully while we
+   * configure the firmware to enter d0i3
+   */
+  if (iwl_mvm_ref_taken(mvm)) {
+    IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
+    clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+    wake_up(&mvm->d0i3_exit_waitq);
+    return 1;
+  }
+
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_enter_d0i3_iterator, &d0i3_iter_data);
+  if (d0i3_iter_data.vif_count == 1) {
+    mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
+    mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
+  } else {
+    WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
+    mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+    mvm->d0i3_offloading = false;
+  }
+
+  iwl_mvm_pause_tcm(mvm, true);
+  /* make sure we have no running tx while configuring the seqno */
+  synchronize_net();
+
+  /* Flush the hw queues, in case something got queued during entry */
+  /* TODO new tx api */
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
+  } else {
+    ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), flags);
+    if (ret) {
+      return ret;
+    }
+  }
+
+  /* configure wowlan configuration only if needed */
+  if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
+    /* wake on beacons only if beacon storing isn't supported */
+    if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_STORING)) {
+      wowlan_config_cmd.wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
     }
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_enter_d0i3_iterator, &d0i3_iter_data);
-    if (d0i3_iter_data.vif_count == 1) {
-        mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
-        mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
-    } else {
-        WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
-        mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
-        mvm->d0i3_offloading = false;
+    iwl_mvm_wowlan_config_key_params(mvm, d0i3_iter_data.connected_vif, true, flags);
+
+    iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
+
+    ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, sizeof(wowlan_config_cmd),
+                               &wowlan_config_cmd);
+    if (ret) {
+      return ret;
     }
+  }
 
-    iwl_mvm_pause_tcm(mvm, true);
-    /* make sure we have no running tx while configuring the seqno */
-    synchronize_net();
-
-    /* Flush the hw queues, in case something got queued during entry */
-    /* TODO new tx api */
-    if (iwl_mvm_has_new_tx_api(mvm)) {
-        WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
-    } else {
-        ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), flags);
-        if (ret) { return ret; }
-    }
-
-    /* configure wowlan configuration only if needed */
-    if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
-        /* wake on beacons only if beacon storing isn't supported */
-        if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_STORING)) {
-            wowlan_config_cmd.wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
-        }
-
-        iwl_mvm_wowlan_config_key_params(mvm, d0i3_iter_data.connected_vif, true, flags);
-
-        iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
-
-        ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, sizeof(wowlan_config_cmd),
-                                   &wowlan_config_cmd);
-        if (ret) { return ret; }
-    }
-
-    return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, flags | CMD_MAKE_TRANS_IDLE, sizeof(d3_cfg_cmd),
-                                &d3_cfg_cmd);
+  return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, flags | CMD_MAKE_TRANS_IDLE, sizeof(d3_cfg_cmd),
+                              &d3_cfg_cmd);
 }
 
 static void iwl_mvm_exit_d0i3_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm* mvm = _data;
-    uint32_t flags = CMD_ASYNC | CMD_HIGH_PRIO;
+  struct iwl_mvm* mvm = _data;
+  uint32_t flags = CMD_ASYNC | CMD_HIGH_PRIO;
 
-    IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
-    if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) { return; }
+  IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
+  if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) {
+    return;
+  }
 
-    iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
+  iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
 }
 
 struct iwl_mvm_d0i3_exit_work_iter_data {
-    struct iwl_mvm* mvm;
-    struct iwl_wowlan_status* status;
-    uint32_t wakeup_reasons;
+  struct iwl_mvm* mvm;
+  struct iwl_wowlan_status* status;
+  uint32_t wakeup_reasons;
 };
 
 static void iwl_mvm_d0i3_exit_work_iter(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_d0i3_exit_work_iter_data* data = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    uint32_t reasons = data->wakeup_reasons;
+  struct iwl_mvm_d0i3_exit_work_iter_data* data = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  uint32_t reasons = data->wakeup_reasons;
 
-    /* consider only the relevant station interface */
-    if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
-        data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id) {
-        return;
-    }
+  /* consider only the relevant station interface */
+  if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
+      data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id) {
+    return;
+  }
 
-    if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) {
-        iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
-    } else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON) {
-        ieee80211_beacon_loss(vif);
-    } else {
-        iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
-    }
+  if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) {
+    iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
+  } else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON) {
+    ieee80211_beacon_loss(vif);
+  } else {
+    iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
+  }
 }
 
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm* mvm, __le16* qos_seq) {
-    struct ieee80211_sta* sta = NULL;
-    struct iwl_mvm_sta* mvm_ap_sta;
-    int i;
-    bool wake_queues = false;
+  struct ieee80211_sta* sta = NULL;
+  struct iwl_mvm_sta* mvm_ap_sta;
+  int i;
+  bool wake_queues = false;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    spin_lock_bh(&mvm->d0i3_tx_lock);
+  spin_lock_bh(&mvm->d0i3_tx_lock);
 
-    if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA) { goto out; }
+  if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA) {
+    goto out;
+  }
 
-    IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
+  IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
 
-    /* get the sta in order to update seq numbers and re-enqueue skbs */
-    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
-                                    lockdep_is_held(&mvm->mutex));
+  /* get the sta in order to update seq numbers and re-enqueue skbs */
+  sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
+                                  lockdep_is_held(&mvm->mutex));
 
-    if (IS_ERR_OR_NULL(sta)) {
-        sta = NULL;
-        goto out;
+  if (IS_ERR_OR_NULL(sta)) {
+    sta = NULL;
+    goto out;
+  }
+
+  if (mvm->d0i3_offloading && qos_seq) {
+    /* update qos seq numbers if offloading was enabled */
+    mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
+    for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+      uint16_t seq = le16_to_cpu(qos_seq[i]);
+      /* firmware stores last-used one, we store next one */
+      seq += 0x10;
+      mvm_ap_sta->tid_data[i].seq_number = seq;
     }
-
-    if (mvm->d0i3_offloading && qos_seq) {
-        /* update qos seq numbers if offloading was enabled */
-        mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
-        for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
-            uint16_t seq = le16_to_cpu(qos_seq[i]);
-            /* firmware stores last-used one, we store next one */
-            seq += 0x10;
-            mvm_ap_sta->tid_data[i].seq_number = seq;
-        }
-    }
+  }
 out:
-    /* re-enqueue (or drop) all packets */
-    while (!skb_queue_empty(&mvm->d0i3_tx)) {
-        struct sk_buff* skb = __skb_dequeue(&mvm->d0i3_tx);
+  /* re-enqueue (or drop) all packets */
+  while (!skb_queue_empty(&mvm->d0i3_tx)) {
+    struct sk_buff* skb = __skb_dequeue(&mvm->d0i3_tx);
 
-        if (!sta || iwl_mvm_tx_skb(mvm, skb, sta)) { ieee80211_free_txskb(mvm->hw, skb); }
-
-        /* if the skb_queue is not empty, we need to wake queues */
-        wake_queues = true;
+    if (!sta || iwl_mvm_tx_skb(mvm, skb, sta)) {
+      ieee80211_free_txskb(mvm->hw, skb);
     }
-    clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
-    wake_up(&mvm->d0i3_exit_waitq);
-    mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
-    if (wake_queues) { ieee80211_wake_queues(mvm->hw); }
 
-    spin_unlock_bh(&mvm->d0i3_tx_lock);
+    /* if the skb_queue is not empty, we need to wake queues */
+    wake_queues = true;
+  }
+  clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+  wake_up(&mvm->d0i3_exit_waitq);
+  mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+  if (wake_queues) {
+    ieee80211_wake_queues(mvm->hw);
+  }
+
+  spin_unlock_bh(&mvm->d0i3_tx_lock);
 }
 
 static void iwl_mvm_d0i3_exit_work(struct work_struct* wk) {
-    struct iwl_mvm* mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
-    struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
-        .mvm = mvm,
-    };
+  struct iwl_mvm* mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
+  struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
+      .mvm = mvm,
+  };
 
-    struct iwl_wowlan_status* status;
-    uint32_t wakeup_reasons = 0;
-    __le16* qos_seq = NULL;
+  struct iwl_wowlan_status* status;
+  uint32_t wakeup_reasons = 0;
+  __le16* qos_seq = NULL;
 
-    mtx_lock(&mvm->mutex);
+  mtx_lock(&mvm->mutex);
 
-    status = iwl_mvm_send_wowlan_get_status(mvm);
-    if (IS_ERR_OR_NULL(status)) {
-        /* set to NULL so we don't need to check before kfree'ing */
-        status = NULL;
-        goto out;
-    }
+  status = iwl_mvm_send_wowlan_get_status(mvm);
+  if (IS_ERR_OR_NULL(status)) {
+    /* set to NULL so we don't need to check before kfree'ing */
+    status = NULL;
+    goto out;
+  }
 
-    wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
-    qos_seq = status->qos_seq_ctr;
+  wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
+  qos_seq = status->qos_seq_ctr;
 
-    IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
+  IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
 
-    iter_data.wakeup_reasons = wakeup_reasons;
-    iter_data.status = status;
-    ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                        iwl_mvm_d0i3_exit_work_iter, &iter_data);
+  iter_data.wakeup_reasons = wakeup_reasons;
+  iter_data.status = status;
+  ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                      iwl_mvm_d0i3_exit_work_iter, &iter_data);
 out:
-    iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+  iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 
-    IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n", wakeup_reasons);
+  IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n", wakeup_reasons);
 
-    /* qos_seq might point inside resp_pkt, so free it only now */
-    kfree(status);
+  /* qos_seq might point inside resp_pkt, so free it only now */
+  kfree(status);
 
-    /* the FW might have updated the regdomain */
-    iwl_mvm_update_changed_regdom(mvm);
+  /* the FW might have updated the regdomain */
+  iwl_mvm_update_changed_regdom(mvm);
 
-    iwl_mvm_resume_tcm(mvm);
-    iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
-    mtx_unlock(&mvm->mutex);
+  iwl_mvm_resume_tcm(mvm);
+  iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
+  mtx_unlock(&mvm->mutex);
 }
 
 int _iwl_mvm_exit_d0i3(struct iwl_mvm* mvm) {
-    uint32_t flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | CMD_WAKE_UP_TRANS;
-    int ret;
+  uint32_t flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | CMD_WAKE_UP_TRANS;
+  int ret;
 
-    IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+  IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
 
-    if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) { return -EINVAL; }
+  if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) {
+    return -EINVAL;
+  }
 
-    mtx_lock(&mvm->d0i3_suspend_mutex);
-    if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
-        IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
-        __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
-        mtx_unlock(&mvm->d0i3_suspend_mutex);
-        return 0;
-    }
+  mtx_lock(&mvm->d0i3_suspend_mutex);
+  if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
+    IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
+    __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
     mtx_unlock(&mvm->d0i3_suspend_mutex);
+    return 0;
+  }
+  mtx_unlock(&mvm->d0i3_suspend_mutex);
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
-    if (ret) { goto out; }
+  ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+  if (ret) {
+    goto out;
+  }
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_exit_d0i3_iterator, mvm);
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_exit_d0i3_iterator, mvm);
 out:
-    schedule_work(&mvm->d0i3_exit_work);
-    return ret;
+  schedule_work(&mvm->d0i3_exit_work);
+  return ret;
 }
 
 int iwl_mvm_exit_d0i3(struct iwl_op_mode* op_mode) {
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
-    iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
-    return _iwl_mvm_exit_d0i3(mvm);
+  iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
+  return _iwl_mvm_exit_d0i3(mvm);
 }
 
 #define IWL_MVM_D0I3_OPS .enter_d0i3 = iwl_mvm_enter_d0i3, .exit_d0i3 = iwl_mvm_exit_d0i3,
@@ -1820,22 +1897,22 @@
 #define IWL_MVM_D0I3_OPS
 #endif /* CONFIG_PM */
 
-#define IWL_MVM_COMMON_OPS                                                              \
-    /* these could be differentiated */                                                 \
-    .async_cb = iwl_mvm_async_cb, .queue_full = iwl_mvm_stop_sw_queue,                  \
-    .queue_not_full = iwl_mvm_wake_sw_queue, .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
-    .free_skb = iwl_mvm_free_skb, .nic_error = iwl_mvm_nic_error,                       \
-    .cmd_queue_full = iwl_mvm_cmd_queue_full, .nic_config = iwl_mvm_nic_config,         \
-    IWL_MVM_D0I3_OPS /* as we only register one, these MUST be common! */               \
-        .start = iwl_op_mode_mvm_start,                                                 \
-    .stop = iwl_op_mode_mvm_stop
+#define IWL_MVM_COMMON_OPS                                                            \
+  /* these could be differentiated */                                                 \
+  .async_cb = iwl_mvm_async_cb, .queue_full = iwl_mvm_stop_sw_queue,                  \
+  .queue_not_full = iwl_mvm_wake_sw_queue, .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
+  .free_skb = iwl_mvm_free_skb, .nic_error = iwl_mvm_nic_error,                       \
+  .cmd_queue_full = iwl_mvm_cmd_queue_full, .nic_config = iwl_mvm_nic_config,         \
+  IWL_MVM_D0I3_OPS /* as we only register one, these MUST be common! */               \
+      .start = iwl_op_mode_mvm_start,                                                 \
+  .stop = iwl_op_mode_mvm_stop
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
-#define IWL_MVM_COMMON_TEST_OPS                      \
-    .test_ops = {                                    \
-        .send_hcmd = iwl_mvm_tm_send_hcmd,           \
-        .cmd_exec_start = iwl_mvm_tm_cmd_exec_start, \
-        .cmd_exec_end = iwl_mvm_tm_cmd_exec_end,     \
-    },
+#define IWL_MVM_COMMON_TEST_OPS                    \
+  .test_ops = {                                    \
+      .send_hcmd = iwl_mvm_tm_send_hcmd,           \
+      .cmd_exec_start = iwl_mvm_tm_cmd_exec_start, \
+      .cmd_exec_end = iwl_mvm_tm_cmd_exec_end,     \
+  },
 #else
 #define IWL_MVM_COMMON_TEST_OPS
 #endif
@@ -1847,17 +1924,17 @@
 
 static void iwl_mvm_rx_mq_rss(struct iwl_op_mode* op_mode, struct napi_struct* napi,
                               struct iwl_rx_cmd_buffer* rxb, unsigned int queue) {
-    struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    uint16_t cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+  struct iwl_mvm* mvm = IWL_OP_MODE_GET_MVM(op_mode);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  uint16_t cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
 
-    if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) {
-        iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
-    } else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) {
-        iwl_mvm_rx_queue_notif(mvm, rxb, queue);
-    } else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) {
-        iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
-    }
+  if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) {
+    iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
+  } else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) {
+    iwl_mvm_rx_queue_notif(mvm, rxb, queue);
+  } else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) {
+    iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
+  }
 }
 
 static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/phy-ctxt.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/phy-ctxt.c
index b2d84db..582d2ae1 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/phy-ctxt.c
@@ -34,25 +34,26 @@
  *****************************************************************************/
 
 #include <net/mac80211.h>
+
 #include "fw-api.h"
 #include "mvm.h"
 
 /* Maps the driver specific channel width definition to the fw values */
 uint8_t iwl_mvm_get_channel_width(struct cfg80211_chan_def* chandef) {
-    switch (chandef->width) {
+  switch (chandef->width) {
     case NL80211_CHAN_WIDTH_20_NOHT:
     case NL80211_CHAN_WIDTH_20:
-        return PHY_VHT_CHANNEL_MODE20;
+      return PHY_VHT_CHANNEL_MODE20;
     case NL80211_CHAN_WIDTH_40:
-        return PHY_VHT_CHANNEL_MODE40;
+      return PHY_VHT_CHANNEL_MODE40;
     case NL80211_CHAN_WIDTH_80:
-        return PHY_VHT_CHANNEL_MODE80;
+      return PHY_VHT_CHANNEL_MODE80;
     case NL80211_CHAN_WIDTH_160:
-        return PHY_VHT_CHANNEL_MODE160;
+      return PHY_VHT_CHANNEL_MODE160;
     default:
-        WARN(1, "Invalid channel width=%u", chandef->width);
-        return PHY_VHT_CHANNEL_MODE20;
-    }
+      WARN(1, "Invalid channel width=%u", chandef->width);
+      return PHY_VHT_CHANNEL_MODE20;
+  }
 }
 
 /*
@@ -60,33 +61,33 @@
  * freq) definitions to the the fw values
  */
 uint8_t iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def* chandef) {
-    switch (chandef->chan->center_freq - chandef->center_freq1) {
+  switch (chandef->chan->center_freq - chandef->center_freq1) {
     case -70:
-        return PHY_VHT_CTRL_POS_4_BELOW;
+      return PHY_VHT_CTRL_POS_4_BELOW;
     case -50:
-        return PHY_VHT_CTRL_POS_3_BELOW;
+      return PHY_VHT_CTRL_POS_3_BELOW;
     case -30:
-        return PHY_VHT_CTRL_POS_2_BELOW;
+      return PHY_VHT_CTRL_POS_2_BELOW;
     case -10:
-        return PHY_VHT_CTRL_POS_1_BELOW;
+      return PHY_VHT_CTRL_POS_1_BELOW;
     case 10:
-        return PHY_VHT_CTRL_POS_1_ABOVE;
+      return PHY_VHT_CTRL_POS_1_ABOVE;
     case 30:
-        return PHY_VHT_CTRL_POS_2_ABOVE;
+      return PHY_VHT_CTRL_POS_2_ABOVE;
     case 50:
-        return PHY_VHT_CTRL_POS_3_ABOVE;
+      return PHY_VHT_CTRL_POS_3_ABOVE;
     case 70:
-        return PHY_VHT_CTRL_POS_4_ABOVE;
+      return PHY_VHT_CTRL_POS_4_ABOVE;
     default:
-        WARN(1, "Invalid channel definition");
+      WARN(1, "Invalid channel definition");
     case 0:
-        /*
-         * The FW is expected to check the control channel position only
-         * when in HT/VHT and the channel width is not 20MHz. Return
-         * this value as the default one.
-         */
-        return PHY_VHT_CTRL_POS_1_BELOW;
-    }
+      /*
+       * The FW is expected to check the control channel position only
+       * when in HT/VHT and the channel width is not 20MHz. Return
+       * this value as the default one.
+       */
+      return PHY_VHT_CTRL_POS_1_BELOW;
+  }
 }
 
 /*
@@ -94,11 +95,11 @@
  */
 static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt* ctxt, struct iwl_phy_context_cmd* cmd,
                                      uint32_t action, uint32_t apply_time) {
-    memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
+  memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
 
-    cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, ctxt->color));
-    cmd->action = cpu_to_le32(action);
-    cmd->apply_time = cpu_to_le32(apply_time);
+  cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, ctxt->color));
+  cmd->action = cpu_to_le32(action);
+  cmd->apply_time = cpu_to_le32(apply_time);
 }
 
 /*
@@ -107,39 +108,41 @@
 static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm* mvm, struct iwl_phy_context_cmd* cmd,
                                       struct cfg80211_chan_def* chandef, uint8_t chains_static,
                                       uint8_t chains_dynamic) {
-    uint8_t active_cnt, idle_cnt;
+  uint8_t active_cnt, idle_cnt;
 
-    /* Set the channel info data */
-    cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5);
+  /* Set the channel info data */
+  cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5);
 
-    cmd->ci.channel = chandef->chan->hw_value;
-    cmd->ci.width = iwl_mvm_get_channel_width(chandef);
-    cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+  cmd->ci.channel = chandef->chan->hw_value;
+  cmd->ci.width = iwl_mvm_get_channel_width(chandef);
+  cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
 
-    /* Set rx the chains */
-    idle_cnt = chains_static;
-    active_cnt = chains_dynamic;
+  /* Set rx the chains */
+  idle_cnt = chains_static;
+  active_cnt = chains_dynamic;
 
-    /* In scenarios where we only ever use a single-stream rates,
-     * i.e. legacy 11b/g/a associations, single-stream APs or even
-     * static SMPS, enable both chains to get diversity, improving
-     * the case where we're far enough from the AP that attenuation
-     * between the two antennas is sufficiently different to impact
-     * performance.
-     */
-    if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
-        idle_cnt = 2;
-        active_cnt = 2;
-    }
+  /* In scenarios where we only ever use a single-stream rates,
+   * i.e. legacy 11b/g/a associations, single-stream APs or even
+   * static SMPS, enable both chains to get diversity, improving
+   * the case where we're far enough from the AP that attenuation
+   * between the two antennas is sufficiently different to impact
+   * performance.
+   */
+  if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
+    idle_cnt = 2;
+    active_cnt = 2;
+  }
 
-    cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << PHY_RX_CHAIN_VALID_POS);
-    cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
-    cmd->rxchain_info |= cpu_to_le32(active_cnt << PHY_RX_CHAIN_MIMO_CNT_POS);
+  cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << PHY_RX_CHAIN_VALID_POS);
+  cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+  cmd->rxchain_info |= cpu_to_le32(active_cnt << PHY_RX_CHAIN_MIMO_CNT_POS);
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    if (unlikely(mvm->dbgfs_rx_phyinfo)) { cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); }
+  if (unlikely(mvm->dbgfs_rx_phyinfo)) {
+    cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+  }
 #endif
 
-    cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+  cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
 }
 
 /*
@@ -151,18 +154,20 @@
 static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm* mvm, struct iwl_mvm_phy_ctxt* ctxt,
                                   struct cfg80211_chan_def* chandef, uint8_t chains_static,
                                   uint8_t chains_dynamic, uint32_t action, uint32_t apply_time) {
-    struct iwl_phy_context_cmd cmd;
-    int ret;
+  struct iwl_phy_context_cmd cmd;
+  int ret;
 
-    /* Set the command header fields */
-    iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
+  /* Set the command header fields */
+  iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
 
-    /* Set the command data */
-    iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, chains_static, chains_dynamic);
+  /* Set the command data */
+  iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, chains_static, chains_dynamic);
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, sizeof(struct iwl_phy_context_cmd), &cmd);
-    if (ret) { IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); }
-    return ret;
+  ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, sizeof(struct iwl_phy_context_cmd), &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
+  }
+  return ret;
 }
 
 /*
@@ -171,17 +176,17 @@
 int iwl_mvm_phy_ctxt_add(struct iwl_mvm* mvm, struct iwl_mvm_phy_ctxt* ctxt,
                          struct cfg80211_chan_def* chandef, uint8_t chains_static,
                          uint8_t chains_dynamic) {
-    WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && ctxt->ref);
-    lockdep_assert_held(&mvm->mutex);
+  WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && ctxt->ref);
+  lockdep_assert_held(&mvm->mutex);
 
-    ctxt->channel = chandef->chan;
+  ctxt->channel = chandef->chan;
 
 #ifdef CPTCFG_IWLWIFI_FRQ_MGR
-    ctxt->fm_tx_power_limit = IWL_DEFAULT_MAX_TX_POWER;
+  ctxt->fm_tx_power_limit = IWL_DEFAULT_MAX_TX_POWER;
 #endif
 
-    return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic,
-                                  FW_CTXT_ACTION_ADD, 0);
+  return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic,
+                                FW_CTXT_ACTION_ADD, 0);
 }
 
 /*
@@ -189,8 +194,8 @@
  * in case the PHY context was already created, i.e., its reference count > 0.
  */
 void iwl_mvm_phy_ctxt_ref(struct iwl_mvm* mvm, struct iwl_mvm_phy_ctxt* ctxt) {
-    lockdep_assert_held(&mvm->mutex);
-    ctxt->ref++;
+  lockdep_assert_held(&mvm->mutex);
+  ctxt->ref++;
 }
 
 /*
@@ -201,66 +206,72 @@
 int iwl_mvm_phy_ctxt_changed(struct iwl_mvm* mvm, struct iwl_mvm_phy_ctxt* ctxt,
                              struct cfg80211_chan_def* chandef, uint8_t chains_static,
                              uint8_t chains_dynamic) {
-    enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
+  enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
-        ctxt->channel->band != chandef->chan->band) {
-        int ret;
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+      ctxt->channel->band != chandef->chan->band) {
+    int ret;
 
-        /* ... remove it here ...*/
-        ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic,
-                                     FW_CTXT_ACTION_REMOVE, 0);
-        if (ret) { return ret; }
-
-        /* ... and proceed to add it again */
-        action = FW_CTXT_ACTION_ADD;
+    /* ... remove it here ...*/
+    ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic,
+                                 FW_CTXT_ACTION_REMOVE, 0);
+    if (ret) {
+      return ret;
     }
 
-    ctxt->channel = chandef->chan;
-    ctxt->width = chandef->width;
-    return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, action, 0);
+    /* ... and proceed to add it again */
+    action = FW_CTXT_ACTION_ADD;
+  }
+
+  ctxt->channel = chandef->chan;
+  ctxt->width = chandef->width;
+  return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, action, 0);
 }
 
 void iwl_mvm_phy_ctxt_unref(struct iwl_mvm* mvm, struct iwl_mvm_phy_ctxt* ctxt) {
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (WARN_ON_ONCE(!ctxt)) { return; }
+  if (WARN_ON_ONCE(!ctxt)) {
+    return;
+  }
 
-    ctxt->ref--;
+  ctxt->ref--;
 
-    /*
-     * Move unused phy's to a default channel. When the phy is moved the,
-     * fw will cleanup immediate quiet bit if it was previously set,
-     * otherwise we might not be able to reuse this phy.
-     */
-    if (ctxt->ref == 0) {
-        struct ieee80211_channel* chan;
-        struct cfg80211_chan_def chandef;
+  /*
+   * Move unused phy's to a default channel. When the phy is moved the,
+   * fw will cleanup immediate quiet bit if it was previously set,
+   * otherwise we might not be able to reuse this phy.
+   */
+  if (ctxt->ref == 0) {
+    struct ieee80211_channel* chan;
+    struct cfg80211_chan_def chandef;
 
-        chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
-        cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
-        iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
-    }
+    chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
+    cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+    iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
+  }
 }
 
 static void iwl_mvm_binding_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    unsigned long* data = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  unsigned long* data = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (!mvmvif->phy_ctxt) { return; }
+  if (!mvmvif->phy_ctxt) {
+    return;
+  }
 
-    if (vif->type == NL80211_IFTYPE_STATION || vif->type == NL80211_IFTYPE_AP) {
-        __set_bit(mvmvif->phy_ctxt->id, data);
-    }
+  if (vif->type == NL80211_IFTYPE_STATION || vif->type == NL80211_IFTYPE_AP) {
+    __set_bit(mvmvif->phy_ctxt->id, data);
+  }
 }
 
 int iwl_mvm_phy_ctx_count(struct iwl_mvm* mvm) {
-    unsigned long phy_ctxt_counter = 0;
+  unsigned long phy_ctxt_counter = 0;
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_binding_iterator, &phy_ctxt_counter);
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_binding_iterator, &phy_ctxt_counter);
 
-    return hweight8(phy_ctxt_counter);
+  return hweight8(phy_ctxt_counter);
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/power.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/power.c
index 09b4272..efacad6 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/power.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/power.c
@@ -33,14 +33,14 @@
  *
  *****************************************************************************/
 
+#include "fw/api/power.h"
+
 #include <linux/etherdevice.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-
 #include <net/mac80211.h>
 
-#include "fw/api/power.h"
 #include "iwl-debug.h"
 #include "iwl-modparams.h"
 #include "mvm.h"
@@ -49,839 +49,917 @@
 
 static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm* mvm, struct iwl_beacon_filter_cmd* cmd,
                                           uint32_t flags) {
-    IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
-                    le32_to_cpu(cmd->ba_enable_beacon_abort));
-    IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", le32_to_cpu(cmd->ba_escape_timer));
-    IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", le32_to_cpu(cmd->bf_debug_flag));
-    IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
-                    le32_to_cpu(cmd->bf_enable_beacon_filter));
-    IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", le32_to_cpu(cmd->bf_energy_delta));
-    IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", le32_to_cpu(cmd->bf_escape_timer));
-    IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
-                    le32_to_cpu(cmd->bf_roaming_energy_delta));
-    IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", le32_to_cpu(cmd->bf_roaming_state));
-    IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n", le32_to_cpu(cmd->bf_temp_threshold));
-    IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n", le32_to_cpu(cmd->bf_temp_fast_filter));
-    IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n", le32_to_cpu(cmd->bf_temp_slow_filter));
+  IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", le32_to_cpu(cmd->ba_enable_beacon_abort));
+  IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", le32_to_cpu(cmd->ba_escape_timer));
+  IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", le32_to_cpu(cmd->bf_debug_flag));
+  IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+                  le32_to_cpu(cmd->bf_enable_beacon_filter));
+  IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", le32_to_cpu(cmd->bf_energy_delta));
+  IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", le32_to_cpu(cmd->bf_escape_timer));
+  IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+                  le32_to_cpu(cmd->bf_roaming_energy_delta));
+  IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", le32_to_cpu(cmd->bf_roaming_state));
+  IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n", le32_to_cpu(cmd->bf_temp_threshold));
+  IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n", le32_to_cpu(cmd->bf_temp_fast_filter));
+  IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n", le32_to_cpu(cmd->bf_temp_slow_filter));
 
-    return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
-                                sizeof(struct iwl_beacon_filter_cmd), cmd);
+  return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+                              sizeof(struct iwl_beacon_filter_cmd), cmd);
 }
 
 static void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                                  struct iwl_beacon_filter_cmd* cmd, bool d0i3) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
-        cmd->bf_energy_delta = cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
-        /* fw uses an absolute value for this */
-        cmd->bf_roaming_state = cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
-    }
-    cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
+  if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
+    cmd->bf_energy_delta = cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
+    /* fw uses an absolute value for this */
+    cmd->bf_roaming_state = cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
+  }
+  cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
 }
 
 static void iwl_mvm_power_log(struct iwl_mvm* mvm, struct iwl_mac_power_cmd* cmd) {
-    IWL_DEBUG_POWER(mvm,
-                    "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
-                    cmd->id_and_color, iwlmvm_mod_params.power_scheme, le16_to_cpu(cmd->flags));
-    IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", le16_to_cpu(cmd->keep_alive_seconds));
+  IWL_DEBUG_POWER(mvm,
+                  "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+                  cmd->id_and_color, iwlmvm_mod_params.power_scheme, le16_to_cpu(cmd->flags));
+  IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", le16_to_cpu(cmd->keep_alive_seconds));
 
-    if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
-        IWL_DEBUG_POWER(mvm, "Disable power management\n");
-        return;
-    }
+  if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
+    IWL_DEBUG_POWER(mvm, "Disable power management\n");
+    return;
+  }
 
-    IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", le32_to_cpu(cmd->rx_data_timeout));
-    IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", le32_to_cpu(cmd->tx_data_timeout));
-    if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) {
-        IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", cmd->skip_dtim_periods);
-    }
-    if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) {
-        IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", cmd->lprx_rssi_threshold);
-    }
-    if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
-        IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
-        IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
-                        le32_to_cpu(cmd->rx_data_timeout_uapsd));
-        IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
-                        le32_to_cpu(cmd->tx_data_timeout_uapsd));
-        IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
-        IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
-        IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
-    }
+  IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", le32_to_cpu(cmd->rx_data_timeout));
+  IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", le32_to_cpu(cmd->tx_data_timeout));
+  if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) {
+    IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", cmd->skip_dtim_periods);
+  }
+  if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) {
+    IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", cmd->lprx_rssi_threshold);
+  }
+  if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+    IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
+    IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n", le32_to_cpu(cmd->rx_data_timeout_uapsd));
+    IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n", le32_to_cpu(cmd->tx_data_timeout_uapsd));
+    IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
+    IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
+    IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
+  }
 }
 
 static void iwl_mvm_power_configure_uapsd(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                           struct iwl_mac_power_cmd* cmd) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    enum ieee80211_ac_numbers ac;
-    bool tid_found = false;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  enum ieee80211_ac_numbers ac;
+  bool tid_found = false;
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    /* set advanced pm flag with no uapsd ACs to enable ps-poll */
-    if (mvmvif->dbgfs_pm.use_ps_poll) {
-        cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
-        return;
-    }
+  /* set advanced pm flag with no uapsd ACs to enable ps-poll */
+  if (mvmvif->dbgfs_pm.use_ps_poll) {
+    cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+    return;
+  }
 #endif
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    if (mvm->trans->dbg_cfg.MVM_USE_PS_POLL) {
-        cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
-        return;
-    }
+  if (mvm->trans->dbg_cfg.MVM_USE_PS_POLL) {
+    cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+    return;
+  }
 #endif
 
-    for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
-        if (!mvmvif->queue_params[ac].uapsd) { continue; }
-
-        if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN) {
-            cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
-        }
-
-        cmd->uapsd_ac_flags |= BIT(ac);
-
-        /* QNDP TID - the highest TID with no admission control */
-        if (!tid_found && !mvmvif->queue_params[ac].acm) {
-            tid_found = true;
-            switch (ac) {
-            case IEEE80211_AC_VO:
-                cmd->qndp_tid = 6;
-                break;
-            case IEEE80211_AC_VI:
-                cmd->qndp_tid = 5;
-                break;
-            case IEEE80211_AC_BE:
-                cmd->qndp_tid = 0;
-                break;
-            case IEEE80211_AC_BK:
-                cmd->qndp_tid = 1;
-                break;
-            }
-        }
+  for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+    if (!mvmvif->queue_params[ac].uapsd) {
+      continue;
     }
 
-    cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
-
-    if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | BIT(IEEE80211_AC_VI) | BIT(IEEE80211_AC_BE) |
-                                BIT(IEEE80211_AC_BK))) {
-        cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
-        cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
-        cmd->snooze_window = (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN)
-                                 ? cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW)
-                                 : cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+    if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN) {
+      cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
     }
 
-    cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
+    cmd->uapsd_ac_flags |= BIT(ac);
 
-    if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN ||
-        cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
-        cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
-        cmd->tx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
-    } else {
-        cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
-        cmd->tx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+    /* QNDP TID - the highest TID with no admission control */
+    if (!tid_found && !mvmvif->queue_params[ac].acm) {
+      tid_found = true;
+      switch (ac) {
+        case IEEE80211_AC_VO:
+          cmd->qndp_tid = 6;
+          break;
+        case IEEE80211_AC_VI:
+          cmd->qndp_tid = 5;
+          break;
+        case IEEE80211_AC_BE:
+          cmd->qndp_tid = 0;
+          break;
+        case IEEE80211_AC_BK:
+          cmd->qndp_tid = 1;
+          break;
+      }
     }
+  }
 
-    if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
-        cmd->heavy_tx_thld_packets = IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
-        cmd->heavy_rx_thld_packets = IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
-    } else {
-        cmd->heavy_tx_thld_packets = IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
-        cmd->heavy_rx_thld_packets = IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
-    }
-    cmd->heavy_tx_thld_percentage = IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
-    cmd->heavy_rx_thld_percentage = IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
+  cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+
+  if (cmd->uapsd_ac_flags ==
+      (BIT(IEEE80211_AC_VO) | BIT(IEEE80211_AC_VI) | BIT(IEEE80211_AC_BE) | BIT(IEEE80211_AC_BK))) {
+    cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+    cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+    cmd->snooze_window = (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN)
+                             ? cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW)
+                             : cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+  }
+
+  cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
+
+  if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN ||
+      cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+    cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+    cmd->tx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+  } else {
+    cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+    cmd->tx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+  }
+
+  if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+    cmd->heavy_tx_thld_packets = IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+    cmd->heavy_rx_thld_packets = IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+  } else {
+    cmd->heavy_tx_thld_packets = IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+    cmd->heavy_rx_thld_packets = IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+  }
+  cmd->heavy_tx_thld_percentage = IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+  cmd->heavy_rx_thld_percentage = IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
 }
 
 struct iwl_allow_uapsd_iface_iterator_data {
-    struct ieee80211_vif* current_vif;
-    bool allow_uapsd;
+  struct ieee80211_vif* current_vif;
+  bool allow_uapsd;
 };
 
 static void iwl_mvm_allow_uapsd_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_allow_uapsd_iface_iterator_data* data = _data;
-    struct iwl_mvm_vif* other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_vif* curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
+  struct iwl_allow_uapsd_iface_iterator_data* data = _data;
+  struct iwl_mvm_vif* other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
 
-    /* exclude the given vif */
-    if (vif == data->current_vif) { return; }
+  /* exclude the given vif */
+  if (vif == data->current_vif) {
+    return;
+  }
 
-    switch (vif->type) {
+  switch (vif->type) {
     case NL80211_IFTYPE_AP:
     case NL80211_IFTYPE_ADHOC:
     case NL80211_IFTYPE_NAN:
-        data->allow_uapsd = false;
-        break;
+      data->allow_uapsd = false;
+      break;
     case NL80211_IFTYPE_STATION:
-        /* allow UAPSD if P2P interface and BSS station interface share
-         * the same channel.
-         */
-        if (vif->bss_conf.assoc && other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
-            (other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)) {
-            data->allow_uapsd = false;
-        }
-        break;
+      /* allow UAPSD if P2P interface and BSS station interface share
+       * the same channel.
+       */
+      if (vif->bss_conf.assoc && other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
+          (other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)) {
+        data->allow_uapsd = false;
+      }
+      break;
 
     default:
-        break;
-    }
+      break;
+  }
 }
 
 static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_allow_uapsd_iface_iterator_data data = {
-        .current_vif = vif,
-        .allow_uapsd = true,
-    };
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_allow_uapsd_iface_iterator_data data = {
+      .current_vif = vif,
+      .allow_uapsd = true,
+  };
 
-    if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, ETH_ALEN)) { return false; }
+  if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, ETH_ALEN)) {
+    return false;
+  }
 
-    /*
-     * Avoid using uAPSD if P2P client is associated to GO that uses
-     * opportunistic power save. This is due to current FW limitation.
-     */
-    if (vif->p2p && (vif->bss_conf.p2p_noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT)) {
-        return false;
-    }
+  /*
+   * Avoid using uAPSD if P2P client is associated to GO that uses
+   * opportunistic power save. This is due to current FW limitation.
+   */
+  if (vif->p2p && (vif->bss_conf.p2p_noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT)) {
+    return false;
+  }
 
-    if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { return false; }
+  if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
+    return false;
+  }
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_allow_uapsd_iterator, &data);
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_allow_uapsd_iterator, &data);
 
-    return data.allow_uapsd;
+  return data.allow_uapsd;
 }
 
 static bool iwl_mvm_power_is_radar(struct ieee80211_vif* vif) {
-    struct ieee80211_chanctx_conf* chanctx_conf;
-    struct ieee80211_channel* chan;
-    bool radar_detect = false;
+  struct ieee80211_chanctx_conf* chanctx_conf;
+  struct ieee80211_channel* chan;
+  bool radar_detect = false;
 
-    rcu_read_lock();
-    chanctx_conf = rcu_dereference(vif->chanctx_conf);
-    WARN_ON(!chanctx_conf);
-    if (chanctx_conf) {
-        chan = chanctx_conf->def.chan;
-        radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
-    }
-    rcu_read_unlock();
+  rcu_read_lock();
+  chanctx_conf = rcu_dereference(vif->chanctx_conf);
+  WARN_ON(!chanctx_conf);
+  if (chanctx_conf) {
+    chan = chanctx_conf->def.chan;
+    radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
+  }
+  rcu_read_unlock();
 
-    return radar_detect;
+  return radar_detect;
 }
 
 static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                            struct iwl_mac_power_cmd* cmd, bool host_awake) {
-    int dtimper = vif->bss_conf.dtim_period ?: 1;
-    int skip;
+  int dtimper = vif->bss_conf.dtim_period ?: 1;
+  int skip;
 
-    /* disable, in case we're supposed to override */
-    cmd->skip_dtim_periods = 0;
-    cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+  /* disable, in case we're supposed to override */
+  cmd->skip_dtim_periods = 0;
+  cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
 
-    if (iwl_mvm_power_is_radar(vif)) { return; }
+  if (iwl_mvm_power_is_radar(vif)) {
+    return;
+  }
 
-    if (dtimper >= 10) { return; }
+  if (dtimper >= 10) {
+    return;
+  }
 
-    /* TODO: check that multicast wake lock is off */
+  /* TODO: check that multicast wake lock is off */
 
-    if (host_awake) {
-        if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP) { return; }
-        skip = 2;
-    } else {
-        int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
-
-        if (WARN_ON(!dtimper_tu)) { return; }
-        /* configure skip over dtim up to 306TU - 314 msec */
-        skip = max_t(uint8_t, 1, 306 / dtimper_tu);
+  if (host_awake) {
+    if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP) {
+      return;
     }
+    skip = 2;
+  } else {
+    int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
 
-    /* the firmware really expects "look at every X DTIMs", so add 1 */
-    cmd->skip_dtim_periods = 1 + skip;
-    cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+    if (WARN_ON(!dtimper_tu)) {
+      return;
+    }
+    /* configure skip over dtim up to 306TU - 314 msec */
+    skip = max_t(uint8_t, 1, 306 / dtimper_tu);
+  }
+
+  /* the firmware really expects "look at every X DTIMs", so add 1 */
+  cmd->skip_dtim_periods = 1 + skip;
+  cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
 }
 
 static void iwl_mvm_power_build_cmd(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                     struct iwl_mac_power_cmd* cmd, bool host_awake) {
-    int dtimper, bi;
-    int keep_alive;
-    struct iwl_mvm_vif* mvmvif __maybe_unused = iwl_mvm_vif_from_mac80211(vif);
+  int dtimper, bi;
+  int keep_alive;
+  struct iwl_mvm_vif* mvmvif __maybe_unused = iwl_mvm_vif_from_mac80211(vif);
 
-    cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
-    dtimper = vif->bss_conf.dtim_period;
-    bi = vif->bss_conf.beacon_int;
+  cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+  dtimper = vif->bss_conf.dtim_period;
+  bi = vif->bss_conf.beacon_int;
 
-    /*
-     * Regardless of power management state the driver must set
-     * keep alive period. FW will use it for sending keep alive NDPs
-     * immediately after association. Check that keep alive period
-     * is at least 3 * DTIM
-     */
-    keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi), USEC_PER_SEC);
-    keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
-    cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
+  /*
+   * Regardless of power management state the driver must set
+   * keep alive period. FW will use it for sending keep alive NDPs
+   * immediately after association. Check that keep alive period
+   * is at least 3 * DTIM
+   */
+  keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi), USEC_PER_SEC);
+  keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
+  cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
 
-    if (mvm->ps_disabled) { return; }
+  if (mvm->ps_disabled) {
+    return;
+  }
 
-    cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+  cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
-    if (!vif->bss_conf.ps || !mvmvif->pm_enabled) { return; }
+  if (!vif->bss_conf.ps || !mvmvif->pm_enabled) {
+    return;
+  }
 
-    if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
-        (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
-         !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE)) {
-        return;
-    }
+  if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
+      (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
+       !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE)) {
+    return;
+  }
 
-    cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+  cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
 
-    if (vif->bss_conf.beacon_rate &&
-        (vif->bss_conf.beacon_rate->bitrate == 10 || vif->bss_conf.beacon_rate->bitrate == 60)) {
-        cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
-        cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
-    }
+  if (vif->bss_conf.beacon_rate &&
+      (vif->bss_conf.beacon_rate->bitrate == 10 || vif->bss_conf.beacon_rate->bitrate == 60)) {
+    cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+    cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
+  }
 
-    iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
+  iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
 
-    if (!host_awake) {
-        cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
-        cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
-    } else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
-               fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) {
-        cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT);
-        cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT);
-    } else {
-        cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
-        cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
-    }
+  if (!host_awake) {
+    cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+    cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+  } else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
+             fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) {
+    cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT);
+    cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT);
+  } else {
+    cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
+    cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
+  }
 
-    if (iwl_mvm_power_allow_uapsd(mvm, vif)) { iwl_mvm_power_configure_uapsd(mvm, vif, cmd); }
+  if (iwl_mvm_power_allow_uapsd(mvm, vif)) {
+    iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
+  }
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) {
-        cmd->keep_alive_seconds = cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
+  if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) {
+    cmd->keep_alive_seconds = cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
+  }
+  if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
+    if (mvmvif->dbgfs_pm.skip_over_dtim) {
+      cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+    } else {
+      cmd->flags &= cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
     }
-    if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
-        if (mvmvif->dbgfs_pm.skip_over_dtim) {
-            cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-        } else {
-            cmd->flags &= cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-        }
+  }
+  if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT) {
+    cmd->rx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
+  }
+  if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT) {
+    cmd->tx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
+  }
+  if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) {
+    cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
+  }
+  if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
+    if (mvmvif->dbgfs_pm.lprx_ena) {
+      cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+    } else {
+      cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
     }
-    if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT) {
-        cmd->rx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
+  }
+  if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) {
+    cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
+  }
+  if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
+    if (mvmvif->dbgfs_pm.snooze_ena) {
+      cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+    } else {
+      cmd->flags &= cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
     }
-    if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT) {
-        cmd->tx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
+  }
+  if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
+    uint16_t flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
+    if (mvmvif->dbgfs_pm.uapsd_misbehaving) {
+      cmd->flags |= cpu_to_le16(flag);
+    } else {
+      cmd->flags &= cpu_to_le16(flag);
     }
-    if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) {
-        cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
-    }
-    if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
-        if (mvmvif->dbgfs_pm.lprx_ena) {
-            cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
-        } else {
-            cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
-        }
-    }
-    if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) {
-        cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
-    }
-    if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
-        if (mvmvif->dbgfs_pm.snooze_ena) {
-            cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
-        } else {
-            cmd->flags &= cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
-        }
-    }
-    if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
-        uint16_t flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
-        if (mvmvif->dbgfs_pm.uapsd_misbehaving) {
-            cmd->flags |= cpu_to_le16(flag);
-        } else {
-            cmd->flags &= cpu_to_le16(flag);
-        }
-    }
+  }
 #endif /* CPTCFG_IWLWIFI_DEBUGFS */
 }
 
 static int iwl_mvm_power_send_cmd(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mac_power_cmd cmd = {};
+  struct iwl_mac_power_cmd cmd = {};
 
-    iwl_mvm_power_build_cmd(mvm, vif, &cmd, mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN);
-    iwl_mvm_power_log(mvm, &cmd);
+  iwl_mvm_power_build_cmd(mvm, vif, &cmd, mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN);
+  iwl_mvm_power_log(mvm, &cmd);
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
+  memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
 #endif
 
-    return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0, sizeof(cmd), &cmd);
+  return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0, sizeof(cmd), &cmd);
 }
 
 int iwl_mvm_power_update_device(struct iwl_mvm* mvm) {
-    struct iwl_device_power_cmd cmd = {
-        .flags = 0,
-    };
+  struct iwl_device_power_cmd cmd = {
+      .flags = 0,
+  };
 
-    if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) { mvm->ps_disabled = true; }
+  if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+    mvm->ps_disabled = true;
+  }
 
-    if (!mvm->ps_disabled) { cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); }
+  if (!mvm->ps_disabled) {
+    cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+  }
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3
-                                                   : mvm->disable_power_off) {
-        cmd.flags &= cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
-    }
+  if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3
+                                                 : mvm->disable_power_off) {
+    cmd.flags &= cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+  }
 #endif
-    IWL_DEBUG_POWER(mvm, "Sending device power command with flags = 0x%X\n", cmd.flags);
+  IWL_DEBUG_POWER(mvm, "Sending device power command with flags = 0x%X\n", cmd.flags);
 
-    return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
+  return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
 }
 
 void iwl_mvm_power_vif_assoc(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, ETH_ALEN)) {
-        eth_zero_addr(mvmvif->uapsd_misbehaving_bssid);
-    }
+  if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, ETH_ALEN)) {
+    eth_zero_addr(mvmvif->uapsd_misbehaving_bssid);
+  }
 }
 
 static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void* _data, uint8_t* mac,
                                                      struct ieee80211_vif* vif) {
-    uint8_t* ap_sta_id = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  uint8_t* ap_sta_id = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    /* The ap_sta_id is not expected to change during current association
-     * so no explicit protection is needed
-     */
-    if (mvmvif->ap_sta_id == *ap_sta_id) {
-        memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, ETH_ALEN);
-    }
+  /* The ap_sta_id is not expected to change during current association
+   * so no explicit protection is needed
+   */
+  if (mvmvif->ap_sta_id == *ap_sta_id) {
+    memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, ETH_ALEN);
+  }
 }
 
 void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_uapsd_misbehaving_ap_notif* notif = (void*)pkt->data;
-    uint8_t ap_sta_id = le32_to_cpu(notif->sta_id);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_uapsd_misbehaving_ap_notif* notif = (void*)pkt->data;
+  uint8_t ap_sta_id = le32_to_cpu(notif->sta_id);
 
-    ieee80211_iterate_active_interfaces_atomic(
-        mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
 }
 
 struct iwl_power_vifs {
-    struct iwl_mvm* mvm;
-    struct ieee80211_vif* bss_vif;
-    struct ieee80211_vif* p2p_vif;
-    struct ieee80211_vif* ap_vif;
-    struct ieee80211_vif* monitor_vif;
-    bool p2p_active;
-    bool bss_active;
-    bool ap_active;
-    bool monitor_active;
+  struct iwl_mvm* mvm;
+  struct ieee80211_vif* bss_vif;
+  struct ieee80211_vif* p2p_vif;
+  struct ieee80211_vif* ap_vif;
+  struct ieee80211_vif* monitor_vif;
+  bool p2p_active;
+  bool bss_active;
+  bool ap_active;
+  bool monitor_active;
 };
 
 static void iwl_mvm_power_disable_pm_iterator(void* _data, uint8_t* mac,
                                               struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    mvmvif->pm_enabled = false;
+  mvmvif->pm_enabled = false;
 }
 
 static void iwl_mvm_power_ps_disabled_iterator(void* _data, uint8_t* mac,
                                                struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    bool* disable_ps = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  bool* disable_ps = _data;
 
-    if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX) {
-        *disable_ps |= mvmvif->ps_disabled;
-    }
+  if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX) {
+    *disable_ps |= mvmvif->ps_disabled;
+  }
 }
 
 static void iwl_mvm_power_get_vifs_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_power_vifs* power_iterator = _data;
-    bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_power_vifs* power_iterator = _data;
+  bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
 
-    switch (ieee80211_vif_type_p2p(vif)) {
+  switch (ieee80211_vif_type_p2p(vif)) {
     case NL80211_IFTYPE_P2P_DEVICE:
     case NL80211_IFTYPE_NAN:
-        break;
+      break;
 
     case NL80211_IFTYPE_P2P_GO:
     case NL80211_IFTYPE_AP:
-        /* only a single MAC of the same type */
-        WARN_ON(power_iterator->ap_vif);
-        power_iterator->ap_vif = vif;
-        if (active) { power_iterator->ap_active = true; }
-        break;
+      /* only a single MAC of the same type */
+      WARN_ON(power_iterator->ap_vif);
+      power_iterator->ap_vif = vif;
+      if (active) {
+        power_iterator->ap_active = true;
+      }
+      break;
 
     case NL80211_IFTYPE_MONITOR:
-        /* only a single MAC of the same type */
-        WARN_ON(power_iterator->monitor_vif);
-        power_iterator->monitor_vif = vif;
-        if (active) { power_iterator->monitor_active = true; }
-        break;
+      /* only a single MAC of the same type */
+      WARN_ON(power_iterator->monitor_vif);
+      power_iterator->monitor_vif = vif;
+      if (active) {
+        power_iterator->monitor_active = true;
+      }
+      break;
 
     case NL80211_IFTYPE_P2P_CLIENT:
-        /* only a single MAC of the same type */
-        WARN_ON(power_iterator->p2p_vif);
-        power_iterator->p2p_vif = vif;
-        if (active) { power_iterator->p2p_active = true; }
-        break;
+      /* only a single MAC of the same type */
+      WARN_ON(power_iterator->p2p_vif);
+      power_iterator->p2p_vif = vif;
+      if (active) {
+        power_iterator->p2p_active = true;
+      }
+      break;
 
     case NL80211_IFTYPE_STATION:
-        power_iterator->bss_vif = vif;
-        if (active) { power_iterator->bss_active = true; }
-        break;
+      power_iterator->bss_vif = vif;
+      if (active) {
+        power_iterator->bss_active = true;
+      }
+      break;
 
     default:
-        break;
-    }
+      break;
+  }
 }
 
 static void iwl_mvm_power_set_pm(struct iwl_mvm* mvm, struct iwl_power_vifs* vifs) {
-    struct iwl_mvm_vif* bss_mvmvif = NULL;
-    struct iwl_mvm_vif* p2p_mvmvif = NULL;
-    struct iwl_mvm_vif* ap_mvmvif = NULL;
-    bool client_same_channel = false;
-    bool ap_same_channel = false;
+  struct iwl_mvm_vif* bss_mvmvif = NULL;
+  struct iwl_mvm_vif* p2p_mvmvif = NULL;
+  struct iwl_mvm_vif* ap_mvmvif = NULL;
+  bool client_same_channel = false;
+  bool ap_same_channel = false;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* set pm_enable to false */
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_power_disable_pm_iterator, NULL);
+  /* set pm_enable to false */
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_power_disable_pm_iterator, NULL);
 
-    if (vifs->bss_vif) { bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif); }
+  if (vifs->bss_vif) {
+    bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
+  }
 
-    if (vifs->p2p_vif) { p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif); }
+  if (vifs->p2p_vif) {
+    p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
+  }
 
-    if (vifs->ap_vif) { ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif); }
+  if (vifs->ap_vif) {
+    ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
+  }
 
-    /* don't allow PM if any TDLS stations exist */
-    if (iwl_mvm_tdls_sta_count(mvm, NULL)) { return; }
+  /* don't allow PM if any TDLS stations exist */
+  if (iwl_mvm_tdls_sta_count(mvm, NULL)) {
+    return;
+  }
 
-    /* enable PM on bss if bss stand alone */
-    if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
-        bss_mvmvif->pm_enabled = true;
-        return;
+  /* enable PM on bss if bss stand alone */
+  if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
+    bss_mvmvif->pm_enabled = true;
+    return;
+  }
+
+  /* enable PM on p2p if p2p stand alone */
+  if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
+    p2p_mvmvif->pm_enabled = true;
+    return;
+  }
+
+  if (vifs->bss_active && vifs->p2p_active) {
+    client_same_channel = (bss_mvmvif->phy_ctxt->id == p2p_mvmvif->phy_ctxt->id);
+  }
+  if (vifs->bss_active && vifs->ap_active) {
+    ap_same_channel = (bss_mvmvif->phy_ctxt->id == ap_mvmvif->phy_ctxt->id);
+  }
+
+  /* clients are not stand alone: enable PM if DCM */
+  if (!(client_same_channel || ap_same_channel)) {
+    if (vifs->bss_active) {
+      bss_mvmvif->pm_enabled = true;
     }
+    if (vifs->p2p_active) {
+      p2p_mvmvif->pm_enabled = true;
+    }
+    return;
+  }
 
-    /* enable PM on p2p if p2p stand alone */
-    if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
-        p2p_mvmvif->pm_enabled = true;
-        return;
-    }
-
-    if (vifs->bss_active && vifs->p2p_active) {
-        client_same_channel = (bss_mvmvif->phy_ctxt->id == p2p_mvmvif->phy_ctxt->id);
-    }
-    if (vifs->bss_active && vifs->ap_active) {
-        ap_same_channel = (bss_mvmvif->phy_ctxt->id == ap_mvmvif->phy_ctxt->id);
-    }
-
-    /* clients are not stand alone: enable PM if DCM */
-    if (!(client_same_channel || ap_same_channel)) {
-        if (vifs->bss_active) { bss_mvmvif->pm_enabled = true; }
-        if (vifs->p2p_active) { p2p_mvmvif->pm_enabled = true; }
-        return;
-    }
-
-    /*
-     * There is only one channel in the system and there are only
-     * bss and p2p clients that share it
-     */
-    if (client_same_channel && !vifs->ap_active) {
-        /* share same channel*/
-        bss_mvmvif->pm_enabled = true;
-        p2p_mvmvif->pm_enabled = true;
-    }
+  /*
+   * There is only one channel in the system and there are only
+   * bss and p2p clients that share it
+   */
+  if (client_same_channel && !vifs->ap_active) {
+    /* share same channel*/
+    bss_mvmvif->pm_enabled = true;
+    p2p_mvmvif->pm_enabled = true;
+  }
 }
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
 int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm* mvm, struct ieee80211_vif* vif, char* buf,
                                  int bufsz) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mac_power_cmd cmd = {};
-    int pos = 0;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mac_power_cmd cmd = {};
+  int pos = 0;
 
-    mutex_lock(&mvm->mutex);
-    memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
+  mutex_unlock(&mvm->mutex);
 
-    pos += scnprintf(buf + pos, bufsz - pos, "power_scheme = %d\n", iwlmvm_mod_params.power_scheme);
-    pos += scnprintf(buf + pos, bufsz - pos, "flags = 0x%x\n", le16_to_cpu(cmd.flags));
-    pos +=
-        scnprintf(buf + pos, bufsz - pos, "keep_alive = %d\n", le16_to_cpu(cmd.keep_alive_seconds));
+  pos += scnprintf(buf + pos, bufsz - pos, "power_scheme = %d\n", iwlmvm_mod_params.power_scheme);
+  pos += scnprintf(buf + pos, bufsz - pos, "flags = 0x%x\n", le16_to_cpu(cmd.flags));
+  pos +=
+      scnprintf(buf + pos, bufsz - pos, "keep_alive = %d\n", le16_to_cpu(cmd.keep_alive_seconds));
 
-    if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) { return pos; }
-
-    pos += scnprintf(buf + pos, bufsz - pos, "skip_over_dtim = %d\n",
-                     (cmd.flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
-    pos += scnprintf(buf + pos, bufsz - pos, "skip_dtim_periods = %d\n", cmd.skip_dtim_periods);
-    if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
-        pos += scnprintf(buf + pos, bufsz - pos, "rx_data_timeout = %d\n",
-                         le32_to_cpu(cmd.rx_data_timeout));
-        pos += scnprintf(buf + pos, bufsz - pos, "tx_data_timeout = %d\n",
-                         le32_to_cpu(cmd.tx_data_timeout));
-    }
-    if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
-        pos += scnprintf(buf + pos, bufsz - pos, "lprx_rssi_threshold = %d\n",
-                         cmd.lprx_rssi_threshold);
-
-    if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { return pos; }
-
-    pos += scnprintf(buf + pos, bufsz - pos, "rx_data_timeout_uapsd = %d\n",
-                     le32_to_cpu(cmd.rx_data_timeout_uapsd));
-    pos += scnprintf(buf + pos, bufsz - pos, "tx_data_timeout_uapsd = %d\n",
-                     le32_to_cpu(cmd.tx_data_timeout_uapsd));
-    pos += scnprintf(buf + pos, bufsz - pos, "qndp_tid = %d\n", cmd.qndp_tid);
-    pos += scnprintf(buf + pos, bufsz - pos, "uapsd_ac_flags = 0x%x\n", cmd.uapsd_ac_flags);
-    pos += scnprintf(buf + pos, bufsz - pos, "uapsd_max_sp = %d\n", cmd.uapsd_max_sp);
-    pos += scnprintf(buf + pos, bufsz - pos, "heavy_tx_thld_packets = %d\n",
-                     cmd.heavy_tx_thld_packets);
-    pos += scnprintf(buf + pos, bufsz - pos, "heavy_rx_thld_packets = %d\n",
-                     cmd.heavy_rx_thld_packets);
-    pos += scnprintf(buf + pos, bufsz - pos, "heavy_tx_thld_percentage = %d\n",
-                     cmd.heavy_tx_thld_percentage);
-    pos += scnprintf(buf + pos, bufsz - pos, "heavy_rx_thld_percentage = %d\n",
-                     cmd.heavy_rx_thld_percentage);
-    pos += scnprintf(buf + pos, bufsz - pos, "uapsd_misbehaving_enable = %d\n",
-                     (cmd.flags & cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ? 1 : 0);
-
-    if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK))) { return pos; }
-
-    pos += scnprintf(buf + pos, bufsz - pos, "snooze_interval = %d\n", cmd.snooze_interval);
-    pos += scnprintf(buf + pos, bufsz - pos, "snooze_window = %d\n", cmd.snooze_window);
-
+  if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
     return pos;
+  }
+
+  pos += scnprintf(buf + pos, bufsz - pos, "skip_over_dtim = %d\n",
+                   (cmd.flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
+  pos += scnprintf(buf + pos, bufsz - pos, "skip_dtim_periods = %d\n", cmd.skip_dtim_periods);
+  if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+    pos += scnprintf(buf + pos, bufsz - pos, "rx_data_timeout = %d\n",
+                     le32_to_cpu(cmd.rx_data_timeout));
+    pos += scnprintf(buf + pos, bufsz - pos, "tx_data_timeout = %d\n",
+                     le32_to_cpu(cmd.tx_data_timeout));
+  }
+  if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+    pos += scnprintf(buf + pos, bufsz - pos, "lprx_rssi_threshold = %d\n", cmd.lprx_rssi_threshold);
+
+  if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+    return pos;
+  }
+
+  pos += scnprintf(buf + pos, bufsz - pos, "rx_data_timeout_uapsd = %d\n",
+                   le32_to_cpu(cmd.rx_data_timeout_uapsd));
+  pos += scnprintf(buf + pos, bufsz - pos, "tx_data_timeout_uapsd = %d\n",
+                   le32_to_cpu(cmd.tx_data_timeout_uapsd));
+  pos += scnprintf(buf + pos, bufsz - pos, "qndp_tid = %d\n", cmd.qndp_tid);
+  pos += scnprintf(buf + pos, bufsz - pos, "uapsd_ac_flags = 0x%x\n", cmd.uapsd_ac_flags);
+  pos += scnprintf(buf + pos, bufsz - pos, "uapsd_max_sp = %d\n", cmd.uapsd_max_sp);
+  pos +=
+      scnprintf(buf + pos, bufsz - pos, "heavy_tx_thld_packets = %d\n", cmd.heavy_tx_thld_packets);
+  pos +=
+      scnprintf(buf + pos, bufsz - pos, "heavy_rx_thld_packets = %d\n", cmd.heavy_rx_thld_packets);
+  pos += scnprintf(buf + pos, bufsz - pos, "heavy_tx_thld_percentage = %d\n",
+                   cmd.heavy_tx_thld_percentage);
+  pos += scnprintf(buf + pos, bufsz - pos, "heavy_rx_thld_percentage = %d\n",
+                   cmd.heavy_rx_thld_percentage);
+  pos += scnprintf(buf + pos, bufsz - pos, "uapsd_misbehaving_enable = %d\n",
+                   (cmd.flags & cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ? 1 : 0);
+
+  if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK))) {
+    return pos;
+  }
+
+  pos += scnprintf(buf + pos, bufsz - pos, "snooze_interval = %d\n", cmd.snooze_interval);
+  pos += scnprintf(buf + pos, bufsz - pos, "snooze_window = %d\n", cmd.snooze_window);
+
+  return pos;
 }
 
 void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif* vif,
                                               struct iwl_beacon_filter_cmd* cmd) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_dbgfs_bf* dbgfs_bf = &mvmvif->dbgfs_bf;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_dbgfs_bf* dbgfs_bf = &mvmvif->dbgfs_bf;
 
-    if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) {
-        cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
-    }
-    if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) {
-        cmd->bf_roaming_energy_delta = cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
-    }
-    if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) {
-        cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
-    }
-    if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD) {
-        cmd->bf_temp_threshold = cpu_to_le32(dbgfs_bf->bf_temp_threshold);
-    }
-    if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER) {
-        cmd->bf_temp_fast_filter = cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
-    }
-    if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER) {
-        cmd->bf_temp_slow_filter = cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
-    }
-    if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) {
-        cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
-    }
-    if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) {
-        cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
-    }
-    if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) {
-        cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
-    }
-    if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) {
-        cmd->ba_enable_beacon_abort = cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
-    }
+  if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) {
+    cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
+  }
+  if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) {
+    cmd->bf_roaming_energy_delta = cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
+  }
+  if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) {
+    cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
+  }
+  if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD) {
+    cmd->bf_temp_threshold = cpu_to_le32(dbgfs_bf->bf_temp_threshold);
+  }
+  if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER) {
+    cmd->bf_temp_fast_filter = cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
+  }
+  if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER) {
+    cmd->bf_temp_slow_filter = cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
+  }
+  if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) {
+    cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
+  }
+  if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) {
+    cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
+  }
+  if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) {
+    cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
+  }
+  if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) {
+    cmd->ba_enable_beacon_abort = cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
+  }
 }
 #endif
 
 static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                          struct iwl_beacon_filter_cmd* cmd, uint32_t cmd_flags,
                                          bool d0i3) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int ret;
 
-    if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
-        vif->type != NL80211_IFTYPE_STATION || vif->p2p) {
-        return 0;
-    }
+  if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
+      vif->type != NL80211_IFTYPE_STATION || vif->p2p) {
+    return 0;
+  }
 
-    iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
-    if (!d0i3) { iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd); }
-    ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
+  iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
+  if (!d0i3) {
+    iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
+  }
+  ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
 
-    /* don't change bf_enabled in case of temporary d0i3 configuration */
-    if (!ret && !d0i3) { mvmvif->bf_data.bf_enabled = true; }
+  /* don't change bf_enabled in case of temporary d0i3 configuration */
+  if (!ret && !d0i3) {
+    mvmvif->bf_data.bf_enabled = true;
+  }
 
-    return ret;
+  return ret;
 }
 
 int iwl_mvm_enable_beacon_filter(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint32_t flags) {
-    struct iwl_beacon_filter_cmd cmd = {
-        IWL_BF_CMD_CONFIG_DEFAULTS,
-        .bf_enable_beacon_filter = cpu_to_le32(1),
-    };
+  struct iwl_beacon_filter_cmd cmd = {
+      IWL_BF_CMD_CONFIG_DEFAULTS,
+      .bf_enable_beacon_filter = cpu_to_le32(1),
+  };
 
-    return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
+  return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
 }
 
 static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                           uint32_t flags, bool d0i3) {
-    struct iwl_beacon_filter_cmd cmd = {};
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int ret;
+  struct iwl_beacon_filter_cmd cmd = {};
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int ret;
 
-    if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) { return 0; }
+  if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) {
+    return 0;
+  }
 
-    ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
+  ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
 
-    /* don't change bf_enabled in case of temporary d0i3 configuration */
-    if (!ret && !d0i3) { mvmvif->bf_data.bf_enabled = false; }
+  /* don't change bf_enabled in case of temporary d0i3 configuration */
+  if (!ret && !d0i3) {
+    mvmvif->bf_data.bf_enabled = false;
+  }
 
-    return ret;
+  return ret;
 }
 
 int iwl_mvm_disable_beacon_filter(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint32_t flags) {
-    return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false);
+  return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false);
 }
 
 static int iwl_mvm_power_set_ps(struct iwl_mvm* mvm) {
-    bool disable_ps;
-    int ret;
+  bool disable_ps;
+  int ret;
 
-    /* disable PS if CAM */
-    disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
-    /* ...or if any of the vifs require PS to be off */
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_power_ps_disabled_iterator, &disable_ps);
+  /* disable PS if CAM */
+  disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
+  /* ...or if any of the vifs require PS to be off */
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_power_ps_disabled_iterator, &disable_ps);
 
-    /* update device power state if it has changed */
-    if (mvm->ps_disabled != disable_ps) {
-        bool old_ps_disabled = mvm->ps_disabled;
+  /* update device power state if it has changed */
+  if (mvm->ps_disabled != disable_ps) {
+    bool old_ps_disabled = mvm->ps_disabled;
 
-        mvm->ps_disabled = disable_ps;
-        ret = iwl_mvm_power_update_device(mvm);
-        if (ret) {
-            mvm->ps_disabled = old_ps_disabled;
-            return ret;
-        }
+    mvm->ps_disabled = disable_ps;
+    ret = iwl_mvm_power_update_device(mvm);
+    if (ret) {
+      mvm->ps_disabled = old_ps_disabled;
+      return ret;
     }
+  }
 
-    return 0;
+  return 0;
 }
 
 static int iwl_mvm_power_set_ba(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_beacon_filter_cmd cmd = {
-        IWL_BF_CMD_CONFIG_DEFAULTS,
-        .bf_enable_beacon_filter = cpu_to_le32(1),
-    };
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_beacon_filter_cmd cmd = {
+      IWL_BF_CMD_CONFIG_DEFAULTS,
+      .bf_enable_beacon_filter = cpu_to_le32(1),
+  };
 
-    if (!mvmvif->bf_data.bf_enabled) { return 0; }
+  if (!mvmvif->bf_data.bf_enabled) {
+    return 0;
+  }
 
-    if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) {
-        cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
-    }
+  if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) {
+    cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+  }
 
-    mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || mvm->ps_disabled || !vif->bss_conf.ps ||
-                                   iwl_mvm_vif_low_latency(mvmvif));
+  mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || mvm->ps_disabled || !vif->bss_conf.ps ||
+                                 iwl_mvm_vif_low_latency(mvmvif));
 
-    return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
+  return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
 }
 
 int iwl_mvm_power_update_ps(struct iwl_mvm* mvm) {
-    struct iwl_power_vifs vifs = {
-        .mvm = mvm,
-    };
-    int ret;
+  struct iwl_power_vifs vifs = {
+      .mvm = mvm,
+  };
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* get vifs info */
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_power_get_vifs_iterator, &vifs);
+  /* get vifs info */
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_power_get_vifs_iterator, &vifs);
 
-    ret = iwl_mvm_power_set_ps(mvm);
-    if (ret) { return ret; }
+  ret = iwl_mvm_power_set_ps(mvm);
+  if (ret) {
+    return ret;
+  }
 
-    if (vifs.bss_vif) { return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); }
+  if (vifs.bss_vif) {
+    return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+  }
 
-    return 0;
+  return 0;
 }
 
 int iwl_mvm_power_update_mac(struct iwl_mvm* mvm) {
-    struct iwl_power_vifs vifs = {
-        .mvm = mvm,
-    };
-    int ret;
+  struct iwl_power_vifs vifs = {
+      .mvm = mvm,
+  };
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* get vifs info */
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_power_get_vifs_iterator, &vifs);
+  /* get vifs info */
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_power_get_vifs_iterator, &vifs);
 
-    iwl_mvm_power_set_pm(mvm, &vifs);
+  iwl_mvm_power_set_pm(mvm, &vifs);
 
-    ret = iwl_mvm_power_set_ps(mvm);
-    if (ret) { return ret; }
+  ret = iwl_mvm_power_set_ps(mvm);
+  if (ret) {
+    return ret;
+  }
 
-    if (vifs.bss_vif) {
-        ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
-        if (ret) { return ret; }
+  if (vifs.bss_vif) {
+    ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
+    if (ret) {
+      return ret;
     }
+  }
 
-    if (vifs.p2p_vif) {
-        ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
-        if (ret) { return ret; }
+  if (vifs.p2p_vif) {
+    ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
+    if (ret) {
+      return ret;
     }
+  }
 
-    if (vifs.bss_vif) { return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); }
+  if (vifs.bss_vif) {
+    return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+  }
 
-    return 0;
+  return 0;
 }
 
 int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm* mvm, struct ieee80211_vif* vif, bool enable,
                                    uint32_t flags) {
-    int ret;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mac_power_cmd cmd = {};
+  int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mac_power_cmd cmd = {};
 
-    if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) { return 0; }
+  if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) {
+    return 0;
+  }
 
-    if (!vif->bss_conf.assoc) { return 0; }
+  if (!vif->bss_conf.assoc) {
+    return 0;
+  }
 
-    iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable);
+  iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable);
 
-    iwl_mvm_power_log(mvm, &cmd);
+  iwl_mvm_power_log(mvm, &cmd);
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
+  memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
 #endif
-    ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags, sizeof(cmd), &cmd);
-    if (ret) { return ret; }
-
-    /* configure beacon filtering */
-    if (mvmvif != mvm->bf_allowed_vif) { return 0; }
-
-    if (enable) {
-        struct iwl_beacon_filter_cmd cmd_bf = {
-            IWL_BF_CMD_CONFIG_D0I3,
-            .bf_enable_beacon_filter = cpu_to_le32(1),
-        };
-        /*
-         * When beacon storing is supported - disable beacon filtering
-         * altogether - the latest beacon will be sent when exiting d0i3
-         */
-        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_STORING)) {
-            ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags, true);
-        } else {
-            ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, flags, true);
-        }
-    } else {
-        if (mvmvif->bf_data.bf_enabled) {
-            ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
-        } else {
-            ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
-        }
-    }
-
+  ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags, sizeof(cmd), &cmd);
+  if (ret) {
     return ret;
+  }
+
+  /* configure beacon filtering */
+  if (mvmvif != mvm->bf_allowed_vif) {
+    return 0;
+  }
+
+  if (enable) {
+    struct iwl_beacon_filter_cmd cmd_bf = {
+        IWL_BF_CMD_CONFIG_D0I3,
+        .bf_enable_beacon_filter = cpu_to_le32(1),
+    };
+    /*
+     * When beacon storing is supported - disable beacon filtering
+     * altogether - the latest beacon will be sent when exiting d0i3
+     */
+    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_STORING)) {
+      ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags, true);
+    } else {
+      ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, flags, true);
+    }
+  } else {
+    if (mvmvif->bf_data.bf_enabled) {
+      ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+    } else {
+      ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+    }
+  }
+
+  return ret;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/quota-adv.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/quota-adv.c
index d7b591d..1d00778 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/quota-adv.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/quota-adv.c
@@ -34,6 +34,7 @@
  *****************************************************************************/
 
 #include <net/mac80211.h>
+
 #include "constants.h"
 #include "fw-api.h"
 #include "mvm.h"
@@ -42,425 +43,448 @@
  * fractions of 128, so provide a conversion.
  */
 static inline uint32_t iwl_mvm_quota_from_pct(uint32_t pct) {
-    return (pct * IWL_MVM_MAX_QUOTA) / 100;
+  return (pct * IWL_MVM_MAX_QUOTA) / 100;
 }
 
 struct iwl_mvm_quota_iterator_data {
-    struct ieee80211_vif* disabled_vif;
-    /*
-     * Normally, transferring pointers from inside the iteration
-     * to outside is a bug, but all the code here is protected by
-     * the mvm mutex, so nothing can be added/removed and race.
-     */
-    uint32_t num_active_macs;
-    struct ieee80211_vif* vifs[NUM_MAC_INDEX_DRIVER];
-    bool monitor;
+  struct ieee80211_vif* disabled_vif;
+  /*
+   * Normally, transferring pointers from inside the iteration
+   * to outside is a bug, but all the code here is protected by
+   * the mvm mutex, so nothing can be added/removed and race.
+   */
+  uint32_t num_active_macs;
+  struct ieee80211_vif* vifs[NUM_MAC_INDEX_DRIVER];
+  bool monitor;
 };
 
 static void iwl_mvm_quota_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_quota_iterator_data* data = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    uint32_t num_active_macs = data->num_active_macs;
-    uint16_t id;
+  struct iwl_mvm_quota_iterator_data* data = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  uint32_t num_active_macs = data->num_active_macs;
+  uint16_t id;
 
-    /* reset tracked quota but otherwise skip interfaces being disabled */
-    if (vif == data->disabled_vif) { goto out; }
+  /* reset tracked quota but otherwise skip interfaces being disabled */
+  if (vif == data->disabled_vif) {
+    goto out;
+  }
 
-    if (!mvmvif->phy_ctxt) { goto out; }
+  if (!mvmvif->phy_ctxt) {
+    goto out;
+  }
 
-    /* currently, PHY ID == binding ID */
-    id = mvmvif->phy_ctxt->id;
+  /* currently, PHY ID == binding ID */
+  id = mvmvif->phy_ctxt->id;
 
-    /* need at least one binding per PHY */
-    BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
+  /* need at least one binding per PHY */
+  BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
 
-    if (WARN_ON_ONCE(id >= MAX_BINDINGS)) { goto out; }
+  if (WARN_ON_ONCE(id >= MAX_BINDINGS)) {
+    goto out;
+  }
 
-    switch (vif->type) {
+  switch (vif->type) {
     case NL80211_IFTYPE_STATION:
-        if (vif->bss_conf.assoc) {
-            data->vifs[data->num_active_macs] = vif;
-            data->num_active_macs++;
-        }
-        break;
+      if (vif->bss_conf.assoc) {
+        data->vifs[data->num_active_macs] = vif;
+        data->num_active_macs++;
+      }
+      break;
     case NL80211_IFTYPE_AP:
     case NL80211_IFTYPE_ADHOC:
-        if (mvmvif->ap_ibss_active) {
-            data->vifs[data->num_active_macs] = vif;
-            data->num_active_macs++;
-        }
-        break;
+      if (mvmvif->ap_ibss_active) {
+        data->vifs[data->num_active_macs] = vif;
+        data->num_active_macs++;
+      }
+      break;
     case NL80211_IFTYPE_MONITOR:
-        if (mvmvif->monitor_active) {
-            data->vifs[data->num_active_macs] = vif;
-            data->num_active_macs++;
-            data->monitor = true;
-        }
-        break;
+      if (mvmvif->monitor_active) {
+        data->vifs[data->num_active_macs] = vif;
+        data->num_active_macs++;
+        data->monitor = true;
+      }
+      break;
     case NL80211_IFTYPE_P2P_DEVICE:
     case NL80211_IFTYPE_NAN:
-        break;
+      break;
     default:
-        WARN_ON_ONCE(1);
-        break;
-    }
+      WARN_ON_ONCE(1);
+      break;
+  }
 
 out:
-    /* If this interface isn't considered now always reset its
-     * assigned quota so the next time it's considered it will
-     * be handled properly.
-     */
-    if (num_active_macs == data->num_active_macs) { mvmvif->pct_quota = 0; }
+  /* If this interface isn't considered now always reset its
+   * assigned quota so the next time it's considered it will
+   * be handled properly.
+   */
+  if (num_active_macs == data->num_active_macs) {
+    mvmvif->pct_quota = 0;
+  }
 }
 
 static uint32_t iwl_mvm_next_quota(struct iwl_mvm* mvm, uint32_t usage, uint32_t alloc,
                                    uint32_t unused, uint32_t n_vifs) {
-    uint32_t result;
-    uint32_t m;
+  uint32_t result;
+  uint32_t m;
 
-    IWL_DEBUG_QUOTA(mvm, "next_quota usage=%d, alloc=%d, unused=%d, n_vifs=%d\n", usage, alloc,
-                    unused, n_vifs);
+  IWL_DEBUG_QUOTA(mvm, "next_quota usage=%d, alloc=%d, unused=%d, n_vifs=%d\n", usage, alloc,
+                  unused, n_vifs);
 
-    if (alloc == 0) {
-        IWL_DEBUG_QUOTA(mvm, "new interface - next=%d\n", IWL_MVM_DYNQUOTA_START_PERCENT);
-        return IWL_MVM_DYNQUOTA_START_PERCENT;
+  if (alloc == 0) {
+    IWL_DEBUG_QUOTA(mvm, "new interface - next=%d\n", IWL_MVM_DYNQUOTA_START_PERCENT);
+    return IWL_MVM_DYNQUOTA_START_PERCENT;
+  }
+
+  if (usage > IWL_MVM_DYNQUOTA_HIGH_WM_PERCENT) {
+    if (unused > 0) {
+      result = alloc + (unused / n_vifs) + IWL_MVM_DYNQUOTA_INC_HIGH_PERCENT;
+      IWL_DEBUG_QUOTA(mvm, "high usage boost - next=%d\n", result);
+      return result;
     }
-
-    if (usage > IWL_MVM_DYNQUOTA_HIGH_WM_PERCENT) {
-        if (unused > 0) {
-            result = alloc + (unused / n_vifs) + IWL_MVM_DYNQUOTA_INC_HIGH_PERCENT;
-            IWL_DEBUG_QUOTA(mvm, "high usage boost - next=%d\n", result);
-            return result;
-        }
-        result = 100 / n_vifs;
-        IWL_DEBUG_QUOTA(mvm, "high usage w/o boost - next=%d\n", result);
-        return result;
-    }
-
-    if (usage > IWL_MVM_DYNQUOTA_LOW_WM_PERCENT) {
-        IWL_DEBUG_QUOTA(mvm, "medium usage - next=%d\n", alloc);
-        return alloc;
-    }
-
-    m = min_t(uint32_t, IWL_MVM_DYNQUOTA_MIN_PERCENT + unused / n_vifs,
-              IWL_MVM_DYNQUOTA_LOW_WM_PERCENT - usage);
-    if (alloc > IWL_MVM_DYNQUOTA_MIN_PERCENT + m) {
-        result = alloc - m;
-    } else {
-        result = IWL_MVM_DYNQUOTA_MIN_PERCENT;
-    }
-    IWL_DEBUG_QUOTA(mvm, "low usage - next=%d\n", result);
+    result = 100 / n_vifs;
+    IWL_DEBUG_QUOTA(mvm, "high usage w/o boost - next=%d\n", result);
     return result;
+  }
+
+  if (usage > IWL_MVM_DYNQUOTA_LOW_WM_PERCENT) {
+    IWL_DEBUG_QUOTA(mvm, "medium usage - next=%d\n", alloc);
+    return alloc;
+  }
+
+  m = min_t(uint32_t, IWL_MVM_DYNQUOTA_MIN_PERCENT + unused / n_vifs,
+            IWL_MVM_DYNQUOTA_LOW_WM_PERCENT - usage);
+  if (alloc > IWL_MVM_DYNQUOTA_MIN_PERCENT + m) {
+    result = alloc - m;
+  } else {
+    result = IWL_MVM_DYNQUOTA_MIN_PERCENT;
+  }
+  IWL_DEBUG_QUOTA(mvm, "low usage - next=%d\n", result);
+  return result;
 }
 
 enum iwl_mvm_quota_result iwl_mvm_calculate_advanced_quotas(struct iwl_mvm* mvm,
                                                             struct ieee80211_vif* disabled_vif,
                                                             bool force_update,
                                                             struct iwl_time_quota_cmd* cmd) {
-    int i, idx;
-    struct iwl_mvm_quota_iterator_data data = {
-        .disabled_vif = disabled_vif,
-    };
-    struct iwl_time_quota_data* quota;
-    uint32_t usage[NUM_MAC_INDEX_DRIVER];
-    uint32_t unused;
-    uint32_t total;
-    int n_lowlat = 0;
-    int iter;
-    uint32_t new_quota[NUM_MAC_INDEX_DRIVER];
-    bool significant_change = false;
+  int i, idx;
+  struct iwl_mvm_quota_iterator_data data = {
+      .disabled_vif = disabled_vif,
+  };
+  struct iwl_time_quota_data* quota;
+  uint32_t usage[NUM_MAC_INDEX_DRIVER];
+  uint32_t unused;
+  uint32_t total;
+  int n_lowlat = 0;
+  int iter;
+  uint32_t new_quota[NUM_MAC_INDEX_DRIVER];
+  bool significant_change = false;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    if (IWL_MVM_DYNQUOTA_DISABLED) { return IWL_MVM_QUOTA_ERROR; }
+  if (IWL_MVM_DYNQUOTA_DISABLED) {
+    return IWL_MVM_QUOTA_ERROR;
+  }
 #endif
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_quota_iterator, &data);
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_quota_iterator, &data);
 
-    if (!data.num_active_macs) { return IWL_MVM_QUOTA_ERROR; }
+  if (!data.num_active_macs) {
+    return IWL_MVM_QUOTA_ERROR;
+  }
 
-    if (WARN_ON(data.num_active_macs * IWL_MVM_DYNQUOTA_MIN_PERCENT > 100)) {
-        return IWL_MVM_QUOTA_ERROR;
+  if (WARN_ON(data.num_active_macs * IWL_MVM_DYNQUOTA_MIN_PERCENT > 100)) {
+    return IWL_MVM_QUOTA_ERROR;
+  }
+
+  if (data.monitor) {
+    WARN(data.num_active_macs != 1, "unexpectedly have %d MACs active despite monitor\n",
+         data.num_active_macs);
+    return IWL_MVM_QUOTA_ERROR;
+  }
+
+  unused = 0;
+
+  spin_lock_bh(&mvm->tcm.lock);
+  for (i = 0; i < data.num_active_macs; i++) {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
+    int id = mvmvif->id;
+
+    if (!mvmvif->pct_quota) {
+      continue;
     }
 
-    if (data.monitor) {
-        WARN(data.num_active_macs != 1, "unexpectedly have %d MACs active despite monitor\n",
-             data.num_active_macs);
-        return IWL_MVM_QUOTA_ERROR;
-    }
-
-    unused = 0;
-
-    spin_lock_bh(&mvm->tcm.lock);
-    for (i = 0; i < data.num_active_macs; i++) {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
-        int id = mvmvif->id;
-
-        if (!mvmvif->pct_quota) { continue; }
-
-        /* load percentage of the total elapsed time */
-        usage[id] =
-            iwl_mvm_tcm_load_percentage(mvm->tcm.result.airtime[id], mvm->tcm.result.elapsed);
-        /* expressed as percentage of the assigned quota */
-        usage[id] = (100 * usage[id]) / mvmvif->pct_quota;
-        /* can be > 1 when sharing channel contexts */
-        usage[id] = min_t(uint32_t, 100, usage[id]);
+    /* load percentage of the total elapsed time */
+    usage[id] = iwl_mvm_tcm_load_percentage(mvm->tcm.result.airtime[id], mvm->tcm.result.elapsed);
+    /* expressed as percentage of the assigned quota */
+    usage[id] = (100 * usage[id]) / mvmvif->pct_quota;
+    /* can be > 1 when sharing channel contexts */
+    usage[id] = min_t(uint32_t, 100, usage[id]);
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-        mvm->quotadbg.quota_used[id] = usage[id];
+    mvm->quotadbg.quota_used[id] = usage[id];
 #endif
-        unused += (mvmvif->pct_quota * (100 - usage[id])) / 100;
+    unused += (mvmvif->pct_quota * (100 - usage[id])) / 100;
+  }
+  spin_unlock_bh(&mvm->tcm.lock);
+
+  total = 0;
+
+  for (i = 0; i < data.num_active_macs; i++) {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
+    int id = mvmvif->id;
+
+    new_quota[id] =
+        iwl_mvm_next_quota(mvm, usage[id], mvmvif->pct_quota, unused, data.num_active_macs);
+
+    if (iwl_mvm_vif_low_latency(mvmvif)) {
+      uint32_t ll_min;
+
+      switch (ieee80211_vif_type_p2p(data.vifs[i])) {
+        case NL80211_IFTYPE_P2P_CLIENT:
+          ll_min = IWL_MVM_LOWLAT_QUOTA_MIN_PCT_P2PCLIENT;
+          break;
+        case NL80211_IFTYPE_P2P_GO:
+        case NL80211_IFTYPE_AP:
+          ll_min = IWL_MVM_LOWLAT_QUOTA_MIN_PCT_P2PGO;
+          break;
+        default:
+          ll_min = IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT;
+          break;
+      }
+      new_quota[id] = max_t(uint32_t, ll_min, new_quota[id]);
+      n_lowlat++;
     }
-    spin_unlock_bh(&mvm->tcm.lock);
+    total += new_quota[id];
+  }
+
+  /* take away evenly if > 100 */
+  if (total > 100) {
+    for (i = 0; i < data.num_active_macs; i++) {
+      struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
+
+      new_quota[mvmvif->id] = (new_quota[mvmvif->id] * 100) / total;
+    }
 
     total = 0;
-
     for (i = 0; i < data.num_active_macs; i++) {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
-        int id = mvmvif->id;
+      struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
 
-        new_quota[id] =
-            iwl_mvm_next_quota(mvm, usage[id], mvmvif->pct_quota, unused, data.num_active_macs);
+      total += new_quota[mvmvif->id];
+    }
+  }
 
-        if (iwl_mvm_vif_low_latency(mvmvif)) {
-            uint32_t ll_min;
+  /* distribute the remainder if any - preferably to low-latency */
+  i = 0;
+  while (total < 100) {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
 
-            switch (ieee80211_vif_type_p2p(data.vifs[i])) {
-            case NL80211_IFTYPE_P2P_CLIENT:
-                ll_min = IWL_MVM_LOWLAT_QUOTA_MIN_PCT_P2PCLIENT;
-                break;
-            case NL80211_IFTYPE_P2P_GO:
-            case NL80211_IFTYPE_AP:
-                ll_min = IWL_MVM_LOWLAT_QUOTA_MIN_PCT_P2PGO;
-                break;
-            default:
-                ll_min = IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT;
-                break;
-            }
-            new_quota[id] = max_t(uint32_t, ll_min, new_quota[id]);
-            n_lowlat++;
-        }
-        total += new_quota[id];
+    if (n_lowlat == 0 || iwl_mvm_vif_low_latency(mvmvif)) {
+      new_quota[mvmvif->id]++;
+      total++;
+    }
+    i = (i + 1) % data.num_active_macs;
+  }
+
+  /* ensure minimum allocation for each */
+  total = 0;
+  for (i = 0; i < data.num_active_macs; i++) {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
+
+    if (new_quota[mvmvif->id] < IWL_MVM_DYNQUOTA_MIN_PERCENT) {
+      new_quota[mvmvif->id] = IWL_MVM_DYNQUOTA_MIN_PERCENT;
+    }
+    total += new_quota[mvmvif->id];
+  }
+
+  iter = 0;
+  i = 0;
+  while (total > 100 && iter < 100) {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
+
+    if (new_quota[mvmvif->id] > IWL_MVM_DYNQUOTA_MIN_PERCENT) {
+      new_quota[mvmvif->id]--;
+      total--;
+    }
+    i = (i + 1) % data.num_active_macs;
+  }
+
+  if (WARN_ON(iter >= 100)) {
+    return IWL_MVM_QUOTA_ERROR;
+  }
+
+  for (i = 0; i < data.num_active_macs; i++) {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
+
+    if (abs((int32_t)new_quota[mvmvif->id] - (int32_t)mvmvif->pct_quota) >
+        IWL_MVM_QUOTA_THRESHOLD) {
+      significant_change = true;
+      mvmvif->pct_quota = new_quota[mvmvif->id];
+    }
+  }
+
+  if (!significant_change && !force_update) {
+    return IWL_MVM_QUOTA_SKIP;
+  }
+
+  /* prepare command to upload to device */
+  for (i = 0; i < MAX_BINDINGS; i++) {
+    quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i);
+    quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+    quota->quota = cpu_to_le32(0);
+    quota->max_duration = cpu_to_le32(0);
+  }
+
+  for (i = 0; i < data.num_active_macs; i++) {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
+    uint32_t color;
+
+    /* we always set binding id/color == phy id/color */
+    idx = mvmvif->phy_ctxt->id;
+    color = mvmvif->phy_ctxt->color;
+
+    if (WARN_ON_ONCE(idx >= MAX_BINDINGS)) {
+      continue;
     }
 
-    /* take away evenly if > 100 */
-    if (total > 100) {
-        for (i = 0; i < data.num_active_macs; i++) {
-            struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
+    quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, idx);
+    quota->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(idx, color));
+    le32_add_cpu(&quota->quota, iwl_mvm_quota_from_pct(mvmvif->pct_quota));
+  }
 
-            new_quota[mvmvif->id] = (new_quota[mvmvif->id] * 100) / total;
-        }
+  /* due to the 100/128 mix between the calculation and the firmware,
+   * we can get errors. Distribute those among the bindings.
+   */
+  total = 0;
+  for (i = 0; i < MAX_BINDINGS; i++) {
+    quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i);
+    total += le32_to_cpu(quota->quota);
+  }
+  if (WARN(total > IWL_MVM_MAX_QUOTA, "total (%d) too big\n", total)) {
+    memset(cmd, 0, sizeof(*cmd));
+    return IWL_MVM_QUOTA_ERROR;
+  }
 
-        total = 0;
-        for (i = 0; i < data.num_active_macs; i++) {
-            struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
-
-            total += new_quota[mvmvif->id];
-        }
+  idx = 0;
+  while (total < IWL_MVM_MAX_QUOTA) {
+    quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, idx);
+    if (quota->quota) {
+      le32_add_cpu(&quota->quota, 1);
+      total += 1;
     }
-
-    /* distribute the remainder if any - preferably to low-latency */
-    i = 0;
-    while (total < 100) {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
-
-        if (n_lowlat == 0 || iwl_mvm_vif_low_latency(mvmvif)) {
-            new_quota[mvmvif->id]++;
-            total++;
-        }
-        i = (i + 1) % data.num_active_macs;
-    }
-
-    /* ensure minimum allocation for each */
-    total = 0;
-    for (i = 0; i < data.num_active_macs; i++) {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
-
-        if (new_quota[mvmvif->id] < IWL_MVM_DYNQUOTA_MIN_PERCENT) {
-            new_quota[mvmvif->id] = IWL_MVM_DYNQUOTA_MIN_PERCENT;
-        }
-        total += new_quota[mvmvif->id];
-    }
-
-    iter = 0;
-    i = 0;
-    while (total > 100 && iter < 100) {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
-
-        if (new_quota[mvmvif->id] > IWL_MVM_DYNQUOTA_MIN_PERCENT) {
-            new_quota[mvmvif->id]--;
-            total--;
-        }
-        i = (i + 1) % data.num_active_macs;
-    }
-
-    if (WARN_ON(iter >= 100)) { return IWL_MVM_QUOTA_ERROR; }
-
-    for (i = 0; i < data.num_active_macs; i++) {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
-
-        if (abs((int32_t)new_quota[mvmvif->id] - (int32_t)mvmvif->pct_quota) >
-            IWL_MVM_QUOTA_THRESHOLD) {
-            significant_change = true;
-            mvmvif->pct_quota = new_quota[mvmvif->id];
-        }
-    }
-
-    if (!significant_change && !force_update) { return IWL_MVM_QUOTA_SKIP; }
-
-    /* prepare command to upload to device */
-    for (i = 0; i < MAX_BINDINGS; i++) {
-        quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i);
-        quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
-        quota->quota = cpu_to_le32(0);
-        quota->max_duration = cpu_to_le32(0);
-    }
-
-    for (i = 0; i < data.num_active_macs; i++) {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(data.vifs[i]);
-        uint32_t color;
-
-        /* we always set binding id/color == phy id/color */
-        idx = mvmvif->phy_ctxt->id;
-        color = mvmvif->phy_ctxt->color;
-
-        if (WARN_ON_ONCE(idx >= MAX_BINDINGS)) { continue; }
-
-        quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, idx);
-        quota->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(idx, color));
-        le32_add_cpu(&quota->quota, iwl_mvm_quota_from_pct(mvmvif->pct_quota));
-    }
-
-    /* due to the 100/128 mix between the calculation and the firmware,
-     * we can get errors. Distribute those among the bindings.
-     */
-    total = 0;
-    for (i = 0; i < MAX_BINDINGS; i++) {
-        quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i);
-        total += le32_to_cpu(quota->quota);
-    }
-    if (WARN(total > IWL_MVM_MAX_QUOTA, "total (%d) too big\n", total)) {
-        memset(cmd, 0, sizeof(*cmd));
-        return IWL_MVM_QUOTA_ERROR;
-    }
-
-    idx = 0;
-    while (total < IWL_MVM_MAX_QUOTA) {
-        quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, idx);
-        if (quota->quota) {
-            le32_add_cpu(&quota->quota, 1);
-            total += 1;
-        }
-        idx = (idx + 1) % MAX_BINDINGS;
-    }
+    idx = (idx + 1) % MAX_BINDINGS;
+  }
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    mvm->quotadbg.cmd = *cmd;
-    mvm->quotadbg.last_update = jiffies;
+  mvm->quotadbg.cmd = *cmd;
+  mvm->quotadbg.last_update = jiffies;
 #endif
 
-    return IWL_MVM_QUOTA_OK;
+  return IWL_MVM_QUOTA_OK;
 }
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
 struct quota_mac_data {
-    struct {
-        enum nl80211_iftype iftype;
-        int32_t phy_id;
-        bool low_latency;
-        uint32_t quota;
-    } macs[NUM_MAC_INDEX_DRIVER];
+  struct {
+    enum nl80211_iftype iftype;
+    int32_t phy_id;
+    bool low_latency;
+    uint32_t quota;
+  } macs[NUM_MAC_INDEX_DRIVER];
 };
 
 static void iwl_mvm_quota_dbg_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct quota_mac_data* md = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct quota_mac_data* md = _data;
 
-    if (WARN_ON(mvmvif->id >= NUM_MAC_INDEX_DRIVER)) { return; }
+  if (WARN_ON(mvmvif->id >= NUM_MAC_INDEX_DRIVER)) {
+    return;
+  }
 
-    md->macs[mvmvif->id].iftype = ieee80211_iftype_p2p(vif->type, vif->p2p);
-    md->macs[mvmvif->id].phy_id = mvmvif->phy_ctxt ? mvmvif->phy_ctxt->id : -1;
-    md->macs[mvmvif->id].low_latency = iwl_mvm_vif_low_latency(mvmvif);
-    md->macs[mvmvif->id].quota = mvmvif->pct_quota;
+  md->macs[mvmvif->id].iftype = ieee80211_iftype_p2p(vif->type, vif->p2p);
+  md->macs[mvmvif->id].phy_id = mvmvif->phy_ctxt ? mvmvif->phy_ctxt->id : -1;
+  md->macs[mvmvif->id].low_latency = iwl_mvm_vif_low_latency(mvmvif);
+  md->macs[mvmvif->id].quota = mvmvif->pct_quota;
 }
 
 ssize_t iwl_dbgfs_quota_status_read(struct file* file, char __user* user_buf, size_t count,
                                     loff_t* ppos) {
-    struct iwl_mvm* mvm = file->private_data;
-    static const char* iftypes[NUM_NL80211_IFTYPES] = {
-        "<unused>", "ADHOC",      "STATION",    "AP",     "AP_VLAN",    "WDS",
-        "MONITOR",  "MESH_POINT", "P2P_CLIENT", "P2P_GO", "P2P_DEVICE",
-    };
-    struct quota_mac_data md = {};
-    char* buf = (void*)get_zeroed_page(GFP_KERNEL);
-    size_t pos = 0, bufsz = PAGE_SIZE;
-    ssize_t ret;
-    int i;
+  struct iwl_mvm* mvm = file->private_data;
+  static const char* iftypes[NUM_NL80211_IFTYPES] = {
+      "<unused>", "ADHOC",      "STATION",    "AP",     "AP_VLAN",    "WDS",
+      "MONITOR",  "MESH_POINT", "P2P_CLIENT", "P2P_GO", "P2P_DEVICE",
+  };
+  struct quota_mac_data md = {};
+  char* buf = (void*)get_zeroed_page(GFP_KERNEL);
+  size_t pos = 0, bufsz = PAGE_SIZE;
+  ssize_t ret;
+  int i;
 
-    if (!buf) { return -ENOMEM; }
+  if (!buf) {
+    return -ENOMEM;
+  }
 
-    mutex_lock(&mvm->mutex);
-    ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                        iwl_mvm_quota_dbg_iterator, &md);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                      iwl_mvm_quota_dbg_iterator, &md);
+  mutex_unlock(&mvm->mutex);
 
 #ifdef CPTCFG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES
-    if (IWL_MVM_DYNQUOTA_DISABLED)
-        pos += scnprintf(buf + pos, bufsz - pos,
-                         "dynamic quota is disabled - values marked * are incorrect!\n\n");
+  if (IWL_MVM_DYNQUOTA_DISABLED)
+    pos += scnprintf(buf + pos, bufsz - pos,
+                     "dynamic quota is disabled - values marked * are incorrect!\n\n");
 #endif
 
 #define DESC_ROW 20
 #define VAL_ROW 11
-#define ADD_ROW(name, valfmt, val)                                                     \
-    do {                                                                               \
-        int _m;                                                                        \
-        pos += scnprintf(buf + pos, bufsz - pos, "%-*s |", DESC_ROW, name);            \
-        for (_m = 0; _m < NUM_MAC_INDEX_DRIVER; _m++)                                  \
-            pos += scnprintf(buf + pos, bufsz - pos, " %*" valfmt " |", VAL_ROW, val); \
-        pos += scnprintf(buf + pos, bufsz - pos, "\n");                                \
-    } while (0)
+#define ADD_ROW(name, valfmt, val)                                               \
+  do {                                                                           \
+    int _m;                                                                      \
+    pos += scnprintf(buf + pos, bufsz - pos, "%-*s |", DESC_ROW, name);          \
+    for (_m = 0; _m < NUM_MAC_INDEX_DRIVER; _m++)                                \
+      pos += scnprintf(buf + pos, bufsz - pos, " %*" valfmt " |", VAL_ROW, val); \
+    pos += scnprintf(buf + pos, bufsz - pos, "\n");                              \
+  } while (0)
 
-    ADD_ROW("MAC data", "d", _m);
-    pos += scnprintf(
-        buf + pos, bufsz - pos,
-        "------------------------------------------------------------------------------\n");
-    ADD_ROW("iftype", "s", iftypes[md.macs[_m].iftype]);
-    ADD_ROW("PHY index", "d", md.macs[_m].phy_id);
-    ADD_ROW("channel busy time", "d", mvm->tcm.result.airtime[_m]);
-    ADD_ROW("low latency", "d", md.macs[_m].low_latency);
-    ADD_ROW("*quota (%)", "d", md.macs[_m].quota);
-    ADD_ROW("*quota used (%)", "d", mvm->quotadbg.quota_used[_m]);
-    pos += scnprintf(buf + pos, bufsz - pos, "\n");
+  ADD_ROW("MAC data", "d", _m);
+  pos +=
+      scnprintf(buf + pos, bufsz - pos,
+                "------------------------------------------------------------------------------\n");
+  ADD_ROW("iftype", "s", iftypes[md.macs[_m].iftype]);
+  ADD_ROW("PHY index", "d", md.macs[_m].phy_id);
+  ADD_ROW("channel busy time", "d", mvm->tcm.result.airtime[_m]);
+  ADD_ROW("low latency", "d", md.macs[_m].low_latency);
+  ADD_ROW("*quota (%)", "d", md.macs[_m].quota);
+  ADD_ROW("*quota used (%)", "d", mvm->quotadbg.quota_used[_m]);
+  pos += scnprintf(buf + pos, bufsz - pos, "\n");
 
-    pos += scnprintf(buf + pos, bufsz - pos, "elapsed time since update: %d ms\n",
-                     jiffies_to_msecs(jiffies - mvm->quotadbg.last_update));
-    pos += scnprintf(buf + pos, bufsz - pos, "elapsed in last complete TCM period: %d ms\n",
-                     mvm->tcm.result.elapsed);
-    pos += scnprintf(buf + pos, bufsz - pos, "elapsed in current open TCM period: %d ms\n",
-                     jiffies_to_msecs(jiffies - mvm->tcm.ts));
+  pos += scnprintf(buf + pos, bufsz - pos, "elapsed time since update: %d ms\n",
+                   jiffies_to_msecs(jiffies - mvm->quotadbg.last_update));
+  pos += scnprintf(buf + pos, bufsz - pos, "elapsed in last complete TCM period: %d ms\n",
+                   mvm->tcm.result.elapsed);
+  pos += scnprintf(buf + pos, bufsz - pos, "elapsed in current open TCM period: %d ms\n",
+                   jiffies_to_msecs(jiffies - mvm->tcm.ts));
 
-    pos += scnprintf(buf + pos, bufsz - pos, "\n*last calculated quota cmd:\n");
-    for (i = 0; i < MAX_BINDINGS; i++) {
-        struct iwl_time_quota_data* q = iwl_mvm_quota_cmd_get_quota(mvm, &mvm->quotadbg.cmd, i);
-        pos += scnprintf(buf + pos, bufsz - pos,
-                         "binding #%d |  Quota:% 4d | MaxDuration:% 4d | id_and_color: 0x%.8x\n", i,
-                         le32_to_cpu(q->quota), le32_to_cpu(q->max_duration),
-                         le32_to_cpu(q->id_and_color));
-    }
+  pos += scnprintf(buf + pos, bufsz - pos, "\n*last calculated quota cmd:\n");
+  for (i = 0; i < MAX_BINDINGS; i++) {
+    struct iwl_time_quota_data* q = iwl_mvm_quota_cmd_get_quota(mvm, &mvm->quotadbg.cmd, i);
+    pos += scnprintf(buf + pos, bufsz - pos,
+                     "binding #%d |  Quota:% 4d | MaxDuration:% 4d | id_and_color: 0x%.8x\n", i,
+                     le32_to_cpu(q->quota), le32_to_cpu(q->max_duration),
+                     le32_to_cpu(q->id_and_color));
+  }
 
-    pos += scnprintf(buf + pos, bufsz - pos, "\ncurrent quota sent to firmware:\n");
-    for (i = 0; i < MAX_BINDINGS; i++) {
-        struct iwl_time_quota_data* q = iwl_mvm_quota_cmd_get_quota(mvm, &mvm->quotadbg.cmd, i);
-        pos += scnprintf(buf + pos, bufsz - pos,
-                         "binding #%d |  Quota:% 4d | MaxDuration:% 4d | id_and_color: 0x%.8x\n", i,
-                         le32_to_cpu(q->quota), le32_to_cpu(q->max_duration),
-                         le32_to_cpu(q->id_and_color));
-    }
+  pos += scnprintf(buf + pos, bufsz - pos, "\ncurrent quota sent to firmware:\n");
+  for (i = 0; i < MAX_BINDINGS; i++) {
+    struct iwl_time_quota_data* q = iwl_mvm_quota_cmd_get_quota(mvm, &mvm->quotadbg.cmd, i);
+    pos += scnprintf(buf + pos, bufsz - pos,
+                     "binding #%d |  Quota:% 4d | MaxDuration:% 4d | id_and_color: 0x%.8x\n", i,
+                     le32_to_cpu(q->quota), le32_to_cpu(q->max_duration),
+                     le32_to_cpu(q->id_and_color));
+  }
 
-    ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-    free_page((unsigned long)buf);
-    return ret;
+  ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+  free_page((unsigned long)buf);
+  return ret;
 }
 #endif /* CPTCFG_IWLWIFI_DEBUGFS */
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/quota.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/quota.c
index 682ea4d..a8acd7a 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/quota.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/quota.c
@@ -35,6 +35,7 @@
  *****************************************************************************/
 
 #include <net/mac80211.h>
+
 #include "fw-api.h"
 #include "mvm.h"
 
@@ -42,71 +43,83 @@
 #define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
 
 struct iwl_mvm_quota_iterator_data {
-    int n_interfaces[MAX_BINDINGS];
-    int colors[MAX_BINDINGS];
-    int low_latency[MAX_BINDINGS];
+  int n_interfaces[MAX_BINDINGS];
+  int colors[MAX_BINDINGS];
+  int low_latency[MAX_BINDINGS];
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    int dbgfs_min[MAX_BINDINGS];
+  int dbgfs_min[MAX_BINDINGS];
 #endif
-    int n_low_latency_bindings;
-    struct ieee80211_vif* disabled_vif;
+  int n_low_latency_bindings;
+  struct ieee80211_vif* disabled_vif;
 };
 
 static void iwl_mvm_quota_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_quota_iterator_data* data = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    uint16_t id;
+  struct iwl_mvm_quota_iterator_data* data = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  uint16_t id;
 
-    /* skip disabled interfaces here immediately */
-    if (vif == data->disabled_vif) { return; }
+  /* skip disabled interfaces here immediately */
+  if (vif == data->disabled_vif) {
+    return;
+  }
 
-    if (!mvmvif->phy_ctxt) { return; }
+  if (!mvmvif->phy_ctxt) {
+    return;
+  }
 
-    /* currently, PHY ID == binding ID */
-    id = mvmvif->phy_ctxt->id;
+  /* currently, PHY ID == binding ID */
+  id = mvmvif->phy_ctxt->id;
 
-    /* need at least one binding per PHY */
-    BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
+  /* need at least one binding per PHY */
+  BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
 
-    if (WARN_ON_ONCE(id >= MAX_BINDINGS)) { return; }
+  if (WARN_ON_ONCE(id >= MAX_BINDINGS)) {
+    return;
+  }
 
-    switch (vif->type) {
+  switch (vif->type) {
     case NL80211_IFTYPE_STATION:
-        if (vif->bss_conf.assoc) { break; }
-        return;
+      if (vif->bss_conf.assoc) {
+        break;
+      }
+      return;
     case NL80211_IFTYPE_AP:
     case NL80211_IFTYPE_ADHOC:
-        if (mvmvif->ap_ibss_active) { break; }
-        return;
+      if (mvmvif->ap_ibss_active) {
+        break;
+      }
+      return;
     case NL80211_IFTYPE_MONITOR:
-        if (mvmvif->monitor_active) { break; }
-        return;
+      if (mvmvif->monitor_active) {
+        break;
+      }
+      return;
     case NL80211_IFTYPE_P2P_DEVICE:
     case NL80211_IFTYPE_NAN:
-        return;
+      return;
     default:
-        WARN_ON_ONCE(1);
-        return;
-    }
+      WARN_ON_ONCE(1);
+      return;
+  }
 
-    if (data->colors[id] < 0) {
-        data->colors[id] = mvmvif->phy_ctxt->color;
-    } else {
-        WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
-    }
+  if (data->colors[id] < 0) {
+    data->colors[id] = mvmvif->phy_ctxt->color;
+  } else {
+    WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
+  }
 
-    data->n_interfaces[id]++;
+  data->n_interfaces[id]++;
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    if (mvmvif->dbgfs_quota_min) {
-        data->dbgfs_min[id] = max(data->dbgfs_min[id], mvmvif->dbgfs_quota_min);
-    }
+  if (mvmvif->dbgfs_quota_min) {
+    data->dbgfs_min[id] = max(data->dbgfs_min[id], mvmvif->dbgfs_quota_min);
+  }
 #endif
 
-    if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
-        data->n_low_latency_bindings++;
-        data->low_latency[id] = true;
-    }
+  if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
+    data->n_low_latency_bindings++;
+    data->low_latency[id] = true;
+  }
 }
 
 #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA
@@ -116,56 +129,65 @@
  * details.
  */
 static void iwl_mvm_adjust_quota_for_p2p_wa(struct iwl_mvm* mvm, struct iwl_time_quota_cmd* cmd) {
-    struct iwl_time_quota_data* quota;
-    int i, phy_id = -1;
+  struct iwl_time_quota_data* quota;
+  int i, phy_id = -1;
 
-    if (!mvm->p2p_opps_test_wa_vif || !mvm->p2p_opps_test_wa_vif->phy_ctxt) { return; }
+  if (!mvm->p2p_opps_test_wa_vif || !mvm->p2p_opps_test_wa_vif->phy_ctxt) {
+    return;
+  }
 
-    phy_id = mvm->p2p_opps_test_wa_vif->phy_ctxt->id;
-    for (i = 0; i < MAX_BINDINGS; i++) {
-        uint32_t id;
-        uint32_t id_n_c;
+  phy_id = mvm->p2p_opps_test_wa_vif->phy_ctxt->id;
+  for (i = 0; i < MAX_BINDINGS; i++) {
+    uint32_t id;
+    uint32_t id_n_c;
 
-        quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i);
-        id_n_c = le32_to_cpu(quota->id_and_color);
-        id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
+    quota = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i);
+    id_n_c = le32_to_cpu(quota->id_and_color);
+    id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
 
-        if (id != phy_id) { continue; }
-
-        quota->quota = 0;
+    if (id != phy_id) {
+      continue;
     }
+
+    quota->quota = 0;
+  }
 }
 #endif
 
 static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm* mvm, struct iwl_time_quota_cmd* cmd) {
 #ifdef CPTCFG_NL80211_TESTMODE
-    struct iwl_mvm_vif* mvmvif;
-    int i, phy_id = -1, beacon_int = 0;
+  struct iwl_mvm_vif* mvmvif;
+  int i, phy_id = -1, beacon_int = 0;
 
-    if (!mvm->noa_duration || !mvm->noa_vif) { return; }
+  if (!mvm->noa_duration || !mvm->noa_vif) {
+    return;
+  }
 
-    mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
-    if (!mvmvif->ap_ibss_active) { return; }
+  mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
+  if (!mvmvif->ap_ibss_active) {
+    return;
+  }
 
-    phy_id = mvmvif->phy_ctxt->id;
-    beacon_int = mvm->noa_vif->bss_conf.beacon_int;
+  phy_id = mvmvif->phy_ctxt->id;
+  beacon_int = mvm->noa_vif->bss_conf.beacon_int;
 
-    for (i = 0; i < MAX_BINDINGS; i++) {
-        struct iwl_time_quota_data* data = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i);
-        uint32_t id_n_c = le32_to_cpu(data->id_and_color);
-        uint32_t id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
-        uint32_t quota = le32_to_cpu(data->quota);
+  for (i = 0; i < MAX_BINDINGS; i++) {
+    struct iwl_time_quota_data* data = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i);
+    uint32_t id_n_c = le32_to_cpu(data->id_and_color);
+    uint32_t id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
+    uint32_t quota = le32_to_cpu(data->quota);
 
-        if (id != phy_id) { continue; }
-
-        quota *= (beacon_int - mvm->noa_duration);
-        quota /= beacon_int;
-
-        IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n", le32_to_cpu(data->quota),
-                        quota);
-
-        data->quota = cpu_to_le32(quota);
+    if (id != phy_id) {
+      continue;
     }
+
+    quota *= (beacon_int - mvm->noa_duration);
+    quota /= beacon_int;
+
+    IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n", le32_to_cpu(data->quota), quota);
+
+    data->quota = cpu_to_le32(quota);
+  }
 #endif
 }
 
@@ -175,214 +197,229 @@
  * Set vif to NULL to cancel a previous enforcement
  */
 int iwl_mvm_dhc_quota_enforce(struct iwl_mvm* mvm, struct iwl_mvm_vif* vif, int quota_percent) {
-    struct iwl_dhc_cmd* dhc_cmd;
-    struct iwl_dhc_quota_enforce* dhc_quota_cmd;
-    uint32_t cmd_id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0);
-    int ret;
+  struct iwl_dhc_cmd* dhc_cmd;
+  struct iwl_dhc_quota_enforce* dhc_quota_cmd;
+  uint32_t cmd_id = iwl_cmd_id(DEBUG_HOST_COMMAND, IWL_ALWAYS_LONG_GROUP, 0);
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    dhc_cmd = kzalloc(sizeof(*dhc_cmd) + sizeof(*dhc_quota_cmd), GFP_KERNEL);
-    if (!dhc_cmd) { return -ENOMEM; }
+  dhc_cmd = kzalloc(sizeof(*dhc_cmd) + sizeof(*dhc_quota_cmd), GFP_KERNEL);
+  if (!dhc_cmd) {
+    return -ENOMEM;
+  }
 
-    IWL_DEBUG_QUOTA(mvm, "quota enforce: enforce %d, percent %d\n", vif ? 1 : 0, quota_percent);
+  IWL_DEBUG_QUOTA(mvm, "quota enforce: enforce %d, percent %d\n", vif ? 1 : 0, quota_percent);
 
-    dhc_quota_cmd = (void*)dhc_cmd->data;
-    dhc_quota_cmd->quota_enforce_type = QUOTA_ENFORCE_TYPE_LIMITATION;
-    if (vif) {
-        dhc_quota_cmd->macs = BIT(vif->id);
-        dhc_quota_cmd->quota_percentage[vif->id] = cpu_to_le32(quota_percent);
-    }
+  dhc_quota_cmd = (void*)dhc_cmd->data;
+  dhc_quota_cmd->quota_enforce_type = QUOTA_ENFORCE_TYPE_LIMITATION;
+  if (vif) {
+    dhc_quota_cmd->macs = BIT(vif->id);
+    dhc_quota_cmd->quota_percentage[vif->id] = cpu_to_le32(quota_percent);
+  }
 
-    dhc_cmd->length = cpu_to_le32(sizeof(*dhc_quota_cmd) >> 2);
-    dhc_cmd->index_and_mask =
-        cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | DHC_INTEGRATION_QUOTA_ENFORCE);
+  dhc_cmd->length = cpu_to_le32(sizeof(*dhc_quota_cmd) >> 2);
+  dhc_cmd->index_and_mask =
+      cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | DHC_INTEGRATION_QUOTA_ENFORCE);
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(*dhc_cmd) + sizeof(*dhc_quota_cmd), dhc_cmd);
-    kfree(dhc_cmd);
+  ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(*dhc_cmd) + sizeof(*dhc_quota_cmd), dhc_cmd);
+  kfree(dhc_cmd);
 
-    return ret;
+  return ret;
 }
 #endif
 
 int iwl_mvm_update_quotas(struct iwl_mvm* mvm, bool force_update,
                           struct ieee80211_vif* disabled_vif) {
-    struct iwl_time_quota_cmd cmd = {};
-    int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat;
-    struct iwl_mvm_quota_iterator_data data = {
-        .n_interfaces = {},
-        .colors = {-1, -1, -1, -1},
-        .disabled_vif = disabled_vif,
-    };
-    struct iwl_time_quota_cmd* last = &mvm->last_quota_cmd;
-    struct iwl_time_quota_data *qdata, *last_data;
-    bool send = false;
+  struct iwl_time_quota_cmd cmd = {};
+  int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat;
+  struct iwl_mvm_quota_iterator_data data = {
+      .n_interfaces = {},
+      .colors = {-1, -1, -1, -1},
+      .disabled_vif = disabled_vif,
+  };
+  struct iwl_time_quota_cmd* last = &mvm->last_quota_cmd;
+  struct iwl_time_quota_data *qdata, *last_data;
+  bool send = false;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) { return 0; }
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+    return 0;
+  }
 
-    /* update all upon completion */
-    if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { return 0; }
+  /* update all upon completion */
+  if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+    return 0;
+  }
 
-    /* iterator data above must match */
-    BUILD_BUG_ON(MAX_BINDINGS != 4);
+  /* iterator data above must match */
+  BUILD_BUG_ON(MAX_BINDINGS != 4);
 
 #ifdef CPTCFG_IWLMVM_ADVANCED_QUOTA_MGMT
-    switch (iwl_mvm_calculate_advanced_quotas(mvm, disabled_vif, force_update, &cmd)) {
+  switch (iwl_mvm_calculate_advanced_quotas(mvm, disabled_vif, force_update, &cmd)) {
     case IWL_MVM_QUOTA_OK:
-        /* override send - advanced calculation checked already */
-        send = true;
-        goto out;
+      /* override send - advanced calculation checked already */
+      send = true;
+      goto out;
     case IWL_MVM_QUOTA_SKIP:
-        return 0;
+      return 0;
     case IWL_MVM_QUOTA_ERROR:
-        /* continue with static allocation */
+      /* continue with static allocation */
+      break;
+  }
+#endif
+
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_quota_iterator, &data);
+
+  /*
+   * The FW's scheduling session consists of
+   * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
+   * equally between all the bindings that require quota
+   */
+  num_active_macs = 0;
+  for (i = 0; i < MAX_BINDINGS; i++) {
+    qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+    qdata->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+    num_active_macs += data.n_interfaces[i];
+  }
+
+  n_non_lowlat = num_active_macs;
+
+  if (data.n_low_latency_bindings == 1) {
+    for (i = 0; i < MAX_BINDINGS; i++) {
+      if (data.low_latency[i]) {
+        n_non_lowlat -= data.n_interfaces[i];
         break;
+      }
     }
-#endif
+  }
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_quota_iterator, &data);
-
+  if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
     /*
-     * The FW's scheduling session consists of
-     * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
-     * equally between all the bindings that require quota
+     * Reserve quota for the low latency binding in case that
+     * there are several data bindings but only a single
+     * low latency one. Split the rest of the quota equally
+     * between the other data interfaces.
      */
-    num_active_macs = 0;
-    for (i = 0; i < MAX_BINDINGS; i++) {
-        qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
-        qdata->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
-        num_active_macs += data.n_interfaces[i];
+    quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
+    quota_rem = QUOTA_100 - n_non_lowlat * quota - QUOTA_LOWLAT_MIN;
+    IWL_DEBUG_QUOTA(
+        mvm, "quota: low-latency binding active, remaining quota per other binding: %d\n", quota);
+  } else if (num_active_macs) {
+    /*
+     * There are 0 or more than 1 low latency bindings, or all the
+     * data interfaces belong to the single low latency binding.
+     * Split the quota equally between the data interfaces.
+     */
+    quota = QUOTA_100 / num_active_macs;
+    quota_rem = QUOTA_100 % num_active_macs;
+    IWL_DEBUG_QUOTA(mvm, "quota: splitting evenly per binding: %d\n", quota);
+  } else {
+    /* values don't really matter - won't be used */
+    quota = 0;
+    quota_rem = 0;
+  }
+
+  for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
+    if (data.colors[i] < 0) {
+      continue;
     }
 
-    n_non_lowlat = num_active_macs;
+    qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, idx);
 
-    if (data.n_low_latency_bindings == 1) {
-        for (i = 0; i < MAX_BINDINGS; i++) {
-            if (data.low_latency[i]) {
-                n_non_lowlat -= data.n_interfaces[i];
-                break;
-            }
-        }
+    qdata->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
+
+    if (data.n_interfaces[i] <= 0) {
+      qdata->quota = cpu_to_le32(0);
     }
-
-    if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
-        /*
-         * Reserve quota for the low latency binding in case that
-         * there are several data bindings but only a single
-         * low latency one. Split the rest of the quota equally
-         * between the other data interfaces.
-         */
-        quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
-        quota_rem = QUOTA_100 - n_non_lowlat * quota - QUOTA_LOWLAT_MIN;
-        IWL_DEBUG_QUOTA(
-            mvm, "quota: low-latency binding active, remaining quota per other binding: %d\n",
-            quota);
-    } else if (num_active_macs) {
-        /*
-         * There are 0 or more than 1 low latency bindings, or all the
-         * data interfaces belong to the single low latency binding.
-         * Split the quota equally between the data interfaces.
-         */
-        quota = QUOTA_100 / num_active_macs;
-        quota_rem = QUOTA_100 % num_active_macs;
-        IWL_DEBUG_QUOTA(mvm, "quota: splitting evenly per binding: %d\n", quota);
-    } else {
-        /* values don't really matter - won't be used */
-        quota = 0;
-        quota_rem = 0;
-    }
-
-    for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
-        if (data.colors[i] < 0) { continue; }
-
-        qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, idx);
-
-        qdata->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
-
-        if (data.n_interfaces[i] <= 0) {
-            qdata->quota = cpu_to_le32(0);
-        }
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-        else if (data.dbgfs_min[i]) {
-            qdata->quota = cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100);
-        }
+    else if (data.dbgfs_min[i]) {
+      qdata->quota = cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100);
+    }
 #endif
-        else if (data.n_low_latency_bindings == 1 && n_non_lowlat && data.low_latency[i])
-        /*
-         * There is more than one binding, but only one of the
-         * bindings is in low latency. For this case, allocate
-         * the minimal required quota for the low latency
-         * binding.
-         */
-        {
-            qdata->quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
-        } else {
-            qdata->quota = cpu_to_le32(quota * data.n_interfaces[i]);
-        }
-
-        WARN_ONCE(le32_to_cpu(qdata->quota) > QUOTA_100, "Binding=%d, quota=%u > max=%u\n", idx,
-                  le32_to_cpu(qdata->quota), QUOTA_100);
-
-        qdata->max_duration = cpu_to_le32(0);
-
-        idx++;
+    else if (data.n_low_latency_bindings == 1 && n_non_lowlat && data.low_latency[i])
+    /*
+     * There is more than one binding, but only one of the
+     * bindings is in low latency. For this case, allocate
+     * the minimal required quota for the low latency
+     * binding.
+     */
+    {
+      qdata->quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
+    } else {
+      qdata->quota = cpu_to_le32(quota * data.n_interfaces[i]);
     }
 
-    /* Give the remainder of the session to the first data binding */
-    for (i = 0; i < MAX_BINDINGS; i++) {
-        qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
-        if (le32_to_cpu(qdata->quota) != 0) {
-            le32_add_cpu(&qdata->quota, quota_rem);
-            IWL_DEBUG_QUOTA(mvm, "quota: giving remainder of %d to binding %d\n", quota_rem, i);
-            break;
-        }
+    WARN_ONCE(le32_to_cpu(qdata->quota) > QUOTA_100, "Binding=%d, quota=%u > max=%u\n", idx,
+              le32_to_cpu(qdata->quota), QUOTA_100);
+
+    qdata->max_duration = cpu_to_le32(0);
+
+    idx++;
+  }
+
+  /* Give the remainder of the session to the first data binding */
+  for (i = 0; i < MAX_BINDINGS; i++) {
+    qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+    if (le32_to_cpu(qdata->quota) != 0) {
+      le32_add_cpu(&qdata->quota, quota_rem);
+      IWL_DEBUG_QUOTA(mvm, "quota: giving remainder of %d to binding %d\n", quota_rem, i);
+      break;
     }
+  }
 
 #ifdef CPTCFG_IWLMVM_ADVANCED_QUOTA_MGMT
 out:
 #endif
-    iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
+  iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
 
-    /* check that we have non-zero quota for all valid bindings */
-    for (i = 0; i < MAX_BINDINGS; i++) {
-        qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
-        last_data = iwl_mvm_quota_cmd_get_quota(mvm, last, i);
-        if (qdata->id_and_color != last_data->id_and_color) { send = true; }
-        if (qdata->max_duration != last_data->max_duration) { send = true; }
-        if (abs((int)le32_to_cpu(qdata->quota) - (int)le32_to_cpu(last_data->quota)) >
-            IWL_MVM_QUOTA_THRESHOLD) {
-            send = true;
-        }
-        if (qdata->id_and_color == cpu_to_le32(FW_CTXT_INVALID)) { continue; }
-        WARN_ONCE(qdata->quota == 0, "zero quota on binding %d\n", i);
+  /* check that we have non-zero quota for all valid bindings */
+  for (i = 0; i < MAX_BINDINGS; i++) {
+    qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+    last_data = iwl_mvm_quota_cmd_get_quota(mvm, last, i);
+    if (qdata->id_and_color != last_data->id_and_color) {
+      send = true;
     }
+    if (qdata->max_duration != last_data->max_duration) {
+      send = true;
+    }
+    if (abs((int)le32_to_cpu(qdata->quota) - (int)le32_to_cpu(last_data->quota)) >
+        IWL_MVM_QUOTA_THRESHOLD) {
+      send = true;
+    }
+    if (qdata->id_and_color == cpu_to_le32(FW_CTXT_INVALID)) {
+      continue;
+    }
+    WARN_ONCE(qdata->quota == 0, "zero quota on binding %d\n", i);
+  }
 
 #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA
-    /*
-     * Zero quota for P2P client MAC as part of a WA to pass P2P OPPPS
-     * certification test. Refer to IWLMVM_P2P_OPPPS_TEST_WA description in
-     * Kconfig.noupstream for details.
-     */
-    if (mvm->p2p_opps_test_wa_vif) { iwl_mvm_adjust_quota_for_p2p_wa(mvm, &cmd); }
+  /*
+   * Zero quota for P2P client MAC as part of a WA to pass P2P OPPPS
+   * certification test. Refer to IWLMVM_P2P_OPPPS_TEST_WA description in
+   * Kconfig.noupstream for details.
+   */
+  if (mvm->p2p_opps_test_wa_vif) {
+    iwl_mvm_adjust_quota_for_p2p_wa(mvm, &cmd);
+  }
 #endif
 
-    if (!send && !force_update) {
-        /* don't send a practically unchanged command, the firmware has
-         * to re-initialize a lot of state and that can have an adverse
-         * impact on it
-         */
-        return 0;
-    }
+  if (!send && !force_update) {
+    /* don't send a practically unchanged command, the firmware has
+     * to re-initialize a lot of state and that can have an adverse
+     * impact on it
+     */
+    return 0;
+  }
 
-    err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, iwl_mvm_quota_cmd_size(mvm), &cmd);
+  err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, iwl_mvm_quota_cmd_size(mvm), &cmd);
 
-    if (err) {
-        IWL_ERR(mvm, "Failed to send quota: %d\n", err);
-    } else {
-        mvm->last_quota_cmd = cmd;
-    }
-    return err;
+  if (err) {
+    IWL_ERR(mvm, "Failed to send quota: %d\n", err);
+  } else {
+    mvm->last_quota_cmd = cmd;
+  }
+  return err;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rateScaleMng.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rateScaleMng.c
index 8b8db65..19e7ed9 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rateScaleMng.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rateScaleMng.c
@@ -49,43 +49,43 @@
 #define MSB2ORD msb2ord
 #define LSB2ORD lsb2ord
 
-static inline unsigned long msb2ord(unsigned long x) {
-    return find_last_bit(&x, BITS_PER_LONG);
-}
+static inline unsigned long msb2ord(unsigned long x) { return find_last_bit(&x, BITS_PER_LONG); }
 
-static inline unsigned long lsb2ord(unsigned long x) {
-    return find_first_bit(&x, BITS_PER_LONG);
-}
+static inline unsigned long lsb2ord(unsigned long x) { return find_first_bit(&x, BITS_PER_LONG); }
 
 // TODO - move to coex.c
 static bool btCoexManagerIsAntAvailable(struct iwl_mvm* mvm, uint8_t ant) {
-    if (mvm->cfg->bt_shared_single_ant) { return true; }
+  if (mvm->cfg->bt_shared_single_ant) {
+    return true;
+  }
 
-    if (!(ant & ~mvm->cfg->non_shared_ant)) { return true; }
+  if (!(ant & ~mvm->cfg->non_shared_ant)) {
+    return true;
+  }
 
-    return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
+  return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
 }
 
 static bool btCoexManagerBtOwnsAnt(struct iwl_mvm* mvm) {
-    return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >= BT_HIGH_TRAFFIC;
+  return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >= BT_HIGH_TRAFFIC;
 }
 
 typedef struct _TLC_STAT_COMMON_API_S {
-    /* @brief number of packets sent
-     * txed[0] - rate index 0
-     * txed[1] - rate index != 0
-     */
-    U16 txed[2];
-    /**
-     * @brief number of packets we got acknowledgment for packet sent
-     * acked[0] - rate index 0
-     * acked[1] - rate index != 0
-     */
-    U16 acked[2];
-    /* Number of frames we got BA response for (regardless of success) */
-    U16 trafficLoad;
-    U16 baTxed;
-    U16 baAcked;
+  /* @brief number of packets sent
+   * txed[0] - rate index 0
+   * txed[1] - rate index != 0
+   */
+  U16 txed[2];
+  /**
+   * @brief number of packets we got acknowledgment for packet sent
+   * acked[0] - rate index 0
+   * acked[1] - rate index != 0
+   */
+  U16 acked[2];
+  /* Number of frames we got BA response for (regardless of success) */
+  U16 trafficLoad;
+  U16 baTxed;
+  U16 baAcked;
 } TLC_STAT_COMMON_API_S;
 
 static const U08 RS_NON_HT_RATE_TO_API_RATE[] = {
@@ -130,14 +130,14 @@
 };
 
 typedef struct _RS_MNG_DYN_BW_STAY {
-    RS_MCS_E lowestStayMcs;
-    RS_MCS_E highestStayMcs;
+  RS_MCS_E lowestStayMcs;
+  RS_MCS_E highestStayMcs;
 } RS_MNG_DYN_BW_STAY;
 
-#define RS_MNG_DYN_BW_STAY_MCS(_bw, _nss1min, _nss1max, _nss2min, _nss2max)                \
-    [CHANNEL_WIDTH##                                                                       \
-        _bw] = {{.lowestStayMcs = RS_MCS_##_nss1min, .highestStayMcs = RS_MCS_##_nss1max}, \
-                {.lowestStayMcs = RS_MCS_##_nss2min, .highestStayMcs = RS_MCS_##_nss2max}}
+#define RS_MNG_DYN_BW_STAY_MCS(_bw, _nss1min, _nss1max, _nss2min, _nss2max)              \
+  [CHANNEL_WIDTH##                                                                       \
+      _bw] = {{.lowestStayMcs = RS_MCS_##_nss1min, .highestStayMcs = RS_MCS_##_nss1max}, \
+              {.lowestStayMcs = RS_MCS_##_nss2min, .highestStayMcs = RS_MCS_##_nss2max}}
 
 // thresholds above/below which bandwidth will be increase/decreased
 static RS_MNG_DYN_BW_STAY g_rsMngDynBwStayMcs[][2] = {
@@ -913,346 +913,356 @@
 
 static void _rsMngRateCheckSet(const RS_MNG_RATE_S* rsMngRate,
                                RS_MNG_RATE_SETTING_BITMAP_E setting) {
-    WARN_ON(rsMngRate->unset & setting);
+  WARN_ON(rsMngRate->unset & setting);
 }
 
 static RS_MNG_MODULATION_E _rsMngRateGetModulation(const RS_MNG_RATE_S* rsMngRate) {
-    RS_MNG_MODULATION_E modulation;
+  RS_MNG_MODULATION_E modulation;
 
-    if (IS_RATE_OFDM_VHT_API_M(rsMngRate->rate) || IS_RATE_OFDM_HT_API_M(rsMngRate->rate) ||
-        IS_RATE_OFDM_HE_API_M(rsMngRate->rate)) {
-        if (GET_MIMO_INDEX_API_M(rsMngRate->rate) == SISO_INDX) {
-            modulation = RS_MNG_MODUL_SISO;
-        } else {
-            modulation = RS_MNG_MODUL_MIMO2;
-        }
+  if (IS_RATE_OFDM_VHT_API_M(rsMngRate->rate) || IS_RATE_OFDM_HT_API_M(rsMngRate->rate) ||
+      IS_RATE_OFDM_HE_API_M(rsMngRate->rate)) {
+    if (GET_MIMO_INDEX_API_M(rsMngRate->rate) == SISO_INDX) {
+      modulation = RS_MNG_MODUL_SISO;
     } else {
-        modulation = RS_MNG_MODUL_LEGACY;
+      modulation = RS_MNG_MODUL_MIMO2;
     }
+  } else {
+    modulation = RS_MNG_MODUL_LEGACY;
+  }
 
-    _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_MODULATION);
-    return modulation;
+  _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_MODULATION);
+  return modulation;
 }
 
 static void _rsMngRateSetModulation(RS_MNG_RATE_S* rsMngRate, RS_MNG_MODULATION_E mod) {
-    if (mod == RS_MNG_MODUL_MIMO2) {
-        if (IS_RATE_OFDM_VHT_API_M(rsMngRate->rate) || IS_RATE_OFDM_HE_API_M(rsMngRate->rate)) {
-            rsMngRate->rate.rate_n_flags |= RATE_MCS_VHT_MIMO2;
-        } else {
-            rsMngRate->rate.rate_n_flags |= RATE_MCS_HT_MIMO2_MSK;
-        }
+  if (mod == RS_MNG_MODUL_MIMO2) {
+    if (IS_RATE_OFDM_VHT_API_M(rsMngRate->rate) || IS_RATE_OFDM_HE_API_M(rsMngRate->rate)) {
+      rsMngRate->rate.rate_n_flags |= RATE_MCS_VHT_MIMO2;
+    } else {
+      rsMngRate->rate.rate_n_flags |= RATE_MCS_HT_MIMO2_MSK;
     }
+  }
 
-    if (mod == RS_MNG_MODUL_SISO) {
-        if (IS_RATE_OFDM_VHT_API_M(rsMngRate->rate) || IS_RATE_OFDM_HE_API_M(rsMngRate->rate)) {
-            rsMngRate->rate.rate_n_flags &= ~RATE_MCS_VHT_MIMO2;
-        } else {
-            rsMngRate->rate.rate_n_flags &= ~RATE_MCS_HT_MIMO2_MSK;
-        }
+  if (mod == RS_MNG_MODUL_SISO) {
+    if (IS_RATE_OFDM_VHT_API_M(rsMngRate->rate) || IS_RATE_OFDM_HE_API_M(rsMngRate->rate)) {
+      rsMngRate->rate.rate_n_flags &= ~RATE_MCS_VHT_MIMO2;
+    } else {
+      rsMngRate->rate.rate_n_flags &= ~RATE_MCS_HT_MIMO2_MSK;
     }
+  }
 
-    rsMngRate->unset &= ~RS_MNG_RATE_MODULATION;
-    rsMngRate->unset |= RS_MNG_RATE_STBC;
+  rsMngRate->unset &= ~RS_MNG_RATE_MODULATION;
+  rsMngRate->unset |= RS_MNG_RATE_STBC;
 }
 
 static TLC_MNG_MODE_E _rsMngRateGetMode(const RS_MNG_RATE_S* rsMngRate) {
-    TLC_MNG_MODE_E rateMode;
+  TLC_MNG_MODE_E rateMode;
 
-    _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_MODE);
+  _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_MODE);
 
-    if (IS_RATE_OFDM_HE_API_M(rsMngRate->rate)) {
-        rateMode = TLC_MNG_MODE_HE;
-    } else if (IS_RATE_OFDM_VHT_API_M(rsMngRate->rate)) {
-        rateMode = TLC_MNG_MODE_VHT;
-    } else if (IS_RATE_OFDM_HT_API_M(rsMngRate->rate)) {
-        rateMode = TLC_MNG_MODE_HT;
-    } else {
-        rateMode = TLC_MNG_MODE_LEGACY;
-    }
+  if (IS_RATE_OFDM_HE_API_M(rsMngRate->rate)) {
+    rateMode = TLC_MNG_MODE_HE;
+  } else if (IS_RATE_OFDM_VHT_API_M(rsMngRate->rate)) {
+    rateMode = TLC_MNG_MODE_VHT;
+  } else if (IS_RATE_OFDM_HT_API_M(rsMngRate->rate)) {
+    rateMode = TLC_MNG_MODE_HT;
+  } else {
+    rateMode = TLC_MNG_MODE_LEGACY;
+  }
 
-    return rateMode;
+  return rateMode;
 }
 
 static void _rsMngRateSetMode(RS_MNG_RATE_S* rsMngRate, TLC_MNG_MODE_E mode) {
-    /* This resets the rate completely */
-    rsMngRate->rate.rate_n_flags = 0;
-    /* We don't really use bfer, so don't mark it as reset */
-    rsMngRate->unset = RS_MNG_RATE_SET_ALL & ~(RS_MNG_RATE_MODE | RS_MNG_RATE_BFER);
+  /* This resets the rate completely */
+  rsMngRate->rate.rate_n_flags = 0;
+  /* We don't really use bfer, so don't mark it as reset */
+  rsMngRate->unset = RS_MNG_RATE_SET_ALL & ~(RS_MNG_RATE_MODE | RS_MNG_RATE_BFER);
 
-    switch (mode) {
+  switch (mode) {
     case TLC_MNG_MODE_LEGACY:
-        break;
+      break;
     case TLC_MNG_MODE_HT:
-        rsMngRate->rate.rate_n_flags |= RATE_MCS_HT_MSK;
-        break;
+      rsMngRate->rate.rate_n_flags |= RATE_MCS_HT_MSK;
+      break;
     case TLC_MNG_MODE_VHT:
-        rsMngRate->rate.rate_n_flags |= RATE_MCS_VHT_MSK;
-        break;
+      rsMngRate->rate.rate_n_flags |= RATE_MCS_VHT_MSK;
+      break;
     case TLC_MNG_MODE_HE:
-        rsMngRate->rate.rate_n_flags |= RATE_MCS_HE_MSK;
-        break;
+      rsMngRate->rate.rate_n_flags |= RATE_MCS_HE_MSK;
+      break;
     default:
-        break;
-    }
+      break;
+  }
 }
 
 static U32 _rsMngRateGetBw(const RS_MNG_RATE_S* rsMngRate) {
-    U32 bw = GET_BW_INDEX_API_M(rsMngRate->rate);
+  U32 bw = GET_BW_INDEX_API_M(rsMngRate->rate);
 
-    _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_BW);
-    return bw;
+  _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_BW);
+  return bw;
 }
 
 static void _rsMngRateSetBw(RS_MNG_RATE_S* rsMngRate, U32 bw) {
-    rsMngRate->rate.rate_n_flags &= ~RATE_MCS_FAT_MSK_API_D;
-    rsMngRate->rate.rate_n_flags |= bw << RATE_MCS_FAT_POS;
-    rsMngRate->unset &= ~RS_MNG_RATE_BW;
+  rsMngRate->rate.rate_n_flags &= ~RATE_MCS_FAT_MSK_API_D;
+  rsMngRate->rate.rate_n_flags |= bw << RATE_MCS_FAT_POS;
+  rsMngRate->unset &= ~RS_MNG_RATE_BW;
 }
 
 static RS_MNG_GI_E _rsMngRateGetGi(const RS_MNG_RATE_S* rsMngRate) {
-    _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_GI);
+  _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_GI);
 
-    if (_rsMngRateGetMode(rsMngRate) != TLC_MNG_MODE_HE) {
-        return (rsMngRate->rate.rate_n_flags & RATE_MCS_SGI_MSK) ? HT_VHT_SGI : HT_VHT_NGI;
-    }
+  if (_rsMngRateGetMode(rsMngRate) != TLC_MNG_MODE_HE) {
+    return (rsMngRate->rate.rate_n_flags & RATE_MCS_SGI_MSK) ? HT_VHT_SGI : HT_VHT_NGI;
+  }
 
-    switch (GET_OFDM_HE_GI_LTF_INDX_API_M(rsMngRate->rate)) {
+  switch (GET_OFDM_HE_GI_LTF_INDX_API_M(rsMngRate->rate)) {
     case 0:
     case 1:
-        return HE_0_8_GI;
+      return HE_0_8_GI;
     case 2:
-        return HE_1_6_GI;
+      return HE_1_6_GI;
     case 3:
-        return HE_3_2_GI;
-    }
+      return HE_3_2_GI;
+  }
 
-    return HT_VHT_NGI;  // impossible
+  return HT_VHT_NGI;  // impossible
 }
 
 static void _rsMngRateSetGi(RS_MNG_RATE_S* rsMngRate, RS_MNG_GI_E gi) {
-    if (_rsMngRateGetMode(rsMngRate) != TLC_MNG_MODE_HE) {
-        WARN_ON(!(gi == HT_VHT_NGI || gi == HT_VHT_SGI));
-        rsMngRate->rate.rate_n_flags &= ~RATE_MCS_SGI_MSK;
-        rsMngRate->rate.rate_n_flags |= (gi == HT_VHT_SGI) << RATE_MCS_SGI_POS;
-    } else {
-        rsMngRate->rate.rate_n_flags &= ~RATE_MCS_HE_GI_LTF_MSK;
-        switch (gi) {
-        case HT_VHT_NGI:
-        case HT_VHT_SGI:
-            WARN_ON(1);
-            break;
-        case HE_0_8_GI:
-            // 2xLTF
-            rsMngRate->rate.rate_n_flags |= 1 << RATE_MCS_HE_GI_LTF_POS;
-            break;
-        case HE_1_6_GI:
-            // 2xLTF
-            rsMngRate->rate.rate_n_flags |= 2 << RATE_MCS_HE_GI_LTF_POS;
-            break;
-        case HE_3_2_GI:
-            // 4xLTF
-            rsMngRate->rate.rate_n_flags |= 3 << RATE_MCS_HE_GI_LTF_POS;
-            break;
-        }
+  if (_rsMngRateGetMode(rsMngRate) != TLC_MNG_MODE_HE) {
+    WARN_ON(!(gi == HT_VHT_NGI || gi == HT_VHT_SGI));
+    rsMngRate->rate.rate_n_flags &= ~RATE_MCS_SGI_MSK;
+    rsMngRate->rate.rate_n_flags |= (gi == HT_VHT_SGI) << RATE_MCS_SGI_POS;
+  } else {
+    rsMngRate->rate.rate_n_flags &= ~RATE_MCS_HE_GI_LTF_MSK;
+    switch (gi) {
+      case HT_VHT_NGI:
+      case HT_VHT_SGI:
+        WARN_ON(1);
+        break;
+      case HE_0_8_GI:
+        // 2xLTF
+        rsMngRate->rate.rate_n_flags |= 1 << RATE_MCS_HE_GI_LTF_POS;
+        break;
+      case HE_1_6_GI:
+        // 2xLTF
+        rsMngRate->rate.rate_n_flags |= 2 << RATE_MCS_HE_GI_LTF_POS;
+        break;
+      case HE_3_2_GI:
+        // 4xLTF
+        rsMngRate->rate.rate_n_flags |= 3 << RATE_MCS_HE_GI_LTF_POS;
+        break;
     }
+  }
 
-    rsMngRate->unset &= ~RS_MNG_RATE_GI;
+  rsMngRate->unset &= ~RS_MNG_RATE_GI;
 }
 
 static void _rsMngRateSetLdpc(RS_MNG_RATE_S* rsMngRate, BOOLEAN ldpc) {
-    rsMngRate->rate.rate_n_flags &= ~RATE_MCS_LDPC_MSK;
-    rsMngRate->rate.rate_n_flags |= (!!ldpc) << RATE_MCS_LDPC_POS;
-    rsMngRate->unset &= ~RS_MNG_RATE_LDPC;
+  rsMngRate->rate.rate_n_flags &= ~RATE_MCS_LDPC_MSK;
+  rsMngRate->rate.rate_n_flags |= (!!ldpc) << RATE_MCS_LDPC_POS;
+  rsMngRate->unset &= ~RS_MNG_RATE_LDPC;
 }
 
 static BOOLEAN _rsMngRateGetStbc(const RS_MNG_RATE_S* rsMngRate) {
-    _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_STBC);
-    return !!(rsMngRate->rate.rate_n_flags & RATE_MCS_STBC_MSK);
+  _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_STBC);
+  return !!(rsMngRate->rate.rate_n_flags & RATE_MCS_STBC_MSK);
 }
 
 static void _rsMngRateSetStbc(RS_MNG_RATE_S* rsMngRate, BOOLEAN stbc) {
-    WARN_ON(!(!stbc || !(rsMngRate->rate.rate_n_flags & RATE_MCS_HE_DCM_MSK)));
+  WARN_ON(!(!stbc || !(rsMngRate->rate.rate_n_flags & RATE_MCS_HE_DCM_MSK)));
 
-    rsMngRate->rate.rate_n_flags &= ~RATE_MCS_STBC_MSK;
-    rsMngRate->rate.rate_n_flags |= (!!stbc) << RATE_MCS_STBC_POS;
-    rsMngRate->unset &= ~RS_MNG_RATE_STBC;
-    rsMngRate->unset |= RS_MNG_RATE_ANT;
+  rsMngRate->rate.rate_n_flags &= ~RATE_MCS_STBC_MSK;
+  rsMngRate->rate.rate_n_flags |= (!!stbc) << RATE_MCS_STBC_POS;
+  rsMngRate->unset &= ~RS_MNG_RATE_STBC;
+  rsMngRate->unset |= RS_MNG_RATE_ANT;
 }
 
 static void _rsMngRateSetBfer(RS_MNG_RATE_S* rsMngRate, BOOLEAN bfer) {
-    rsMngRate->rate.rate_n_flags &= ~RATE_MCS_BF_MSK;
-    rsMngRate->rate.rate_n_flags |= (!!bfer) << RATE_MCS_BF_POS;
-    rsMngRate->unset &= ~RS_MNG_RATE_BFER;
+  rsMngRate->rate.rate_n_flags &= ~RATE_MCS_BF_MSK;
+  rsMngRate->rate.rate_n_flags |= (!!bfer) << RATE_MCS_BF_POS;
+  rsMngRate->unset &= ~RS_MNG_RATE_BFER;
 }
 
 static U08 _rsMngRateGetAnt(const RS_MNG_RATE_S* rsMngRate) {
-    _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_ANT);
-    return (U08)GET_ANT_CHAIN_API_M(rsMngRate->rate);
-    ;
+  _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_ANT);
+  return (U08)GET_ANT_CHAIN_API_M(rsMngRate->rate);
+  ;
 }
 
 static void _rsMngRateSetAnt(RS_MNG_RATE_S* rsMngRate, U08 ant) {
-    // compilation asserts to make sure tlc offload api and rate api agree
-    BUILD_BUG_ON(!(TLC_MNG_CHAIN_A_MSK ==
-                   SHIFT_AND_MASK(RATE_MCS_ANT_A_MSK, RATE_MCS_ANT_ABC_MSK, RATE_MCS_ANT_A_POS)));
-    BUILD_BUG_ON(!(TLC_MNG_CHAIN_B_MSK ==
-                   SHIFT_AND_MASK(RATE_MCS_ANT_B_MSK, RATE_MCS_ANT_ABC_MSK, RATE_MCS_ANT_A_POS)));
+  // compilation asserts to make sure tlc offload api and rate api agree
+  BUILD_BUG_ON(!(TLC_MNG_CHAIN_A_MSK ==
+                 SHIFT_AND_MASK(RATE_MCS_ANT_A_MSK, RATE_MCS_ANT_ABC_MSK, RATE_MCS_ANT_A_POS)));
+  BUILD_BUG_ON(!(TLC_MNG_CHAIN_B_MSK ==
+                 SHIFT_AND_MASK(RATE_MCS_ANT_B_MSK, RATE_MCS_ANT_ABC_MSK, RATE_MCS_ANT_A_POS)));
 
-    rsMngRate->rate.rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
-    rsMngRate->rate.rate_n_flags |= ant << RATE_MCS_ANT_A_POS;
+  rsMngRate->rate.rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+  rsMngRate->rate.rate_n_flags |= ant << RATE_MCS_ANT_A_POS;
 
-    rsMngRate->unset &= ~RS_MNG_RATE_ANT;
+  rsMngRate->unset &= ~RS_MNG_RATE_ANT;
 }
 
-static U08 _rsMngRateGetIdx(const RS_MNG_RATE_S* rsMngRate) {
-    return rsMngRate->idx.idx;
-}
+static U08 _rsMngRateGetIdx(const RS_MNG_RATE_S* rsMngRate) { return rsMngRate->idx.idx; }
 
 static void _rsMngRateSetIdx(RS_MNG_RATE_S* rsMngRate, U08 idx) {
-    rsMngRate->idx.idx = idx;
+  rsMngRate->idx.idx = idx;
 
-    // DCM and STBC can't coexist. Since DCM is set here, make sure stbc has been set before setting
-    // the index, so the stbc setting could be overriden here without issue
-    _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_STBC);
+  // DCM and STBC can't coexist. Since DCM is set here, make sure stbc has been set before setting
+  // the index, so the stbc setting could be overriden here without issue
+  _rsMngRateCheckSet(rsMngRate, RS_MNG_RATE_STBC);
 
-    switch (_rsMngRateGetMode(rsMngRate)) {
+  switch (_rsMngRateGetMode(rsMngRate)) {
     case TLC_MNG_MODE_HE:
-        rsMngRate->rate.rate_n_flags &= ~RATE_MCS_VHT_RATE_CODE_MSK;
-        if (idx == RS_MCS_0_HE_ER_AND_DCM) {
-            rsMngRate->rate.rate_n_flags |=
-                RATE_MCS_HE_DCM_MSK | (RATE_MCS_HE_EXT_RANGE << RATE_MCS_VHT_HE_TYPE_POS);
-            rsMngRate->rate.rate_n_flags &= ~RATE_MCS_STBC_MSK;
-        } else {
-            rsMngRate->rate.rate_n_flags &= ~(RATE_MCS_HE_DCM_MSK | RATE_MCS_VHT_HE_TYPE_MSK);
-            rsMngRate->rate.rate_n_flags |= idx;
-        }
-        break;
+      rsMngRate->rate.rate_n_flags &= ~RATE_MCS_VHT_RATE_CODE_MSK;
+      if (idx == RS_MCS_0_HE_ER_AND_DCM) {
+        rsMngRate->rate.rate_n_flags |=
+            RATE_MCS_HE_DCM_MSK | (RATE_MCS_HE_EXT_RANGE << RATE_MCS_VHT_HE_TYPE_POS);
+        rsMngRate->rate.rate_n_flags &= ~RATE_MCS_STBC_MSK;
+      } else {
+        rsMngRate->rate.rate_n_flags &= ~(RATE_MCS_HE_DCM_MSK | RATE_MCS_VHT_HE_TYPE_MSK);
+        rsMngRate->rate.rate_n_flags |= idx;
+      }
+      break;
     case TLC_MNG_MODE_VHT:
-        rsMngRate->rate.rate_n_flags &= ~RATE_MCS_VHT_RATE_CODE_MSK;
-        rsMngRate->rate.rate_n_flags |= idx;
-        break;
+      rsMngRate->rate.rate_n_flags &= ~RATE_MCS_VHT_RATE_CODE_MSK;
+      rsMngRate->rate.rate_n_flags |= idx;
+      break;
     case TLC_MNG_MODE_HT:
-        rsMngRate->rate.rate_n_flags &= ~RATE_MCS_HT_RATE_CODE_MSK;
-        rsMngRate->rate.rate_n_flags |= idx;
-        break;
+      rsMngRate->rate.rate_n_flags &= ~RATE_MCS_HT_RATE_CODE_MSK;
+      rsMngRate->rate.rate_n_flags |= idx;
+      break;
     case TLC_MNG_MODE_LEGACY: {
-        RS_NON_HT_RATES_E nonHtIdx = (RS_NON_HT_RATES_E)idx;
+      RS_NON_HT_RATES_E nonHtIdx = (RS_NON_HT_RATES_E)idx;
 
-        rsMngRate->rate.rate_n_flags &= ~0xff;
-        rsMngRate->rate.rate_n_flags |= RS_NON_HT_RATE_TO_API_RATE[nonHtIdx];
-        if (nonHtIdx <= RS_NON_HT_RATE_CCK_LAST) {
-            rsMngRate->rate.rate_n_flags |= RATE_MCS_CCK_MSK;
-        } else {
-            rsMngRate->rate.rate_n_flags &= ~RATE_MCS_CCK_MSK;
-        }
-        break;
+      rsMngRate->rate.rate_n_flags &= ~0xff;
+      rsMngRate->rate.rate_n_flags |= RS_NON_HT_RATE_TO_API_RATE[nonHtIdx];
+      if (nonHtIdx <= RS_NON_HT_RATE_CCK_LAST) {
+        rsMngRate->rate.rate_n_flags |= RATE_MCS_CCK_MSK;
+      } else {
+        rsMngRate->rate.rate_n_flags &= ~RATE_MCS_CCK_MSK;
+      }
+      break;
     }
     default:
-        // shouldn't happen. return now so the index remains unset and an assert will be hit when
-        // trying to build the rate table with this rate
-        return;
-    }
+      // shouldn't happen. return now so the index remains unset and an assert will be hit when
+      // trying to build the rate table with this rate
+      return;
+  }
 
-    rsMngRate->unset &= ~RS_MNG_RATE_U_IDX;
+  rsMngRate->unset &= ~RS_MNG_RATE_U_IDX;
 }
 
 static void _rsMngRateInvalidate(RS_MNG_RATE_S* rsMngRate) {
-    rsMngRate->unset = RS_MNG_RATE_SET_ALL;
+  rsMngRate->unset = RS_MNG_RATE_SET_ALL;
 }
 
 static U16 _rsMngGetSupportedRatesByModeAndBw(const RS_MNG_STA_INFO_S* staInfo,
                                               RS_MNG_MODULATION_E modulation,
                                               TLC_MNG_CH_WIDTH_E bw) {
-    BOOLEAN isBw160;
-    U32 supportedRates;
+  BOOLEAN isBw160;
+  U32 supportedRates;
 
-    if (modulation == RS_MNG_MODUL_LEGACY) { return staInfo->config.nonHt; }
+  if (modulation == RS_MNG_MODUL_LEGACY) {
+    return staInfo->config.nonHt;
+  }
 
-    isBw160 = (bw == TLC_MNG_CH_WIDTH_160MHZ);
-    supportedRates =
-        (modulation == RS_MNG_MODUL_SISO ? staInfo->config.mcs[TLC_MNG_NSS_1][isBw160]
-                                         : staInfo->config.mcs[TLC_MNG_NSS_2][isBw160]);
+  isBw160 = (bw == TLC_MNG_CH_WIDTH_160MHZ);
+  supportedRates = (modulation == RS_MNG_MODUL_SISO ? staInfo->config.mcs[TLC_MNG_NSS_1][isBw160]
+                                                    : staInfo->config.mcs[TLC_MNG_NSS_2][isBw160]);
 
-    if (staInfo->config.bestSuppMode == TLC_MNG_MODE_VHT && bw == CHANNEL_WIDTH20) {
-        // In VHT, mcs 9 is never posible at 20mhz bandwidth
-        supportedRates &= ~BIT(RS_MCS_9);
-    }
+  if (staInfo->config.bestSuppMode == TLC_MNG_MODE_VHT && bw == CHANNEL_WIDTH20) {
+    // In VHT, mcs 9 is never posible at 20mhz bandwidth
+    supportedRates &= ~BIT(RS_MCS_9);
+  }
 
-    return (U16)supportedRates;
+  return (U16)supportedRates;
 }
 
 static U16 _rsMngGetSuppRatesSameMode(const RS_MNG_STA_INFO_S* staInfo,
                                       const RS_MNG_RATE_S* rsMngRate) {
-    return _rsMngGetSupportedRatesByModeAndBw(staInfo, _rsMngRateGetModulation(rsMngRate),
-                                              _rsMngRateGetBw(rsMngRate));
+  return _rsMngGetSupportedRatesByModeAndBw(staInfo, _rsMngRateGetModulation(rsMngRate),
+                                            _rsMngRateGetBw(rsMngRate));
 }
 
 static TLC_MNG_CH_WIDTH_E _rsMngGetMaxChWidth(const RS_MNG_STA_INFO_S* staInfo) {
-    return (TLC_MNG_CH_WIDTH_E)staInfo->config.maxChWidth;
+  return (TLC_MNG_CH_WIDTH_E)staInfo->config.maxChWidth;
 }
 
 static BOOLEAN _rsMngAreAggsSupported(TLC_MNG_MODE_E bestSuppMode) {
-    return bestSuppMode > TLC_MNG_MODE_LEGACY;
+  return bestSuppMode > TLC_MNG_MODE_LEGACY;
 }
 
 static BOOLEAN _rsMngIsDcmSupported(const RS_MNG_STA_INFO_S* staInfo, BOOLEAN isMimo) {
-    if (isMimo) { return !!(staInfo->config.configFlags & TLC_MNG_CONFIG_FLAGS_HE_DCM_NSS_2_MSK); }
+  if (isMimo) {
+    return !!(staInfo->config.configFlags & TLC_MNG_CONFIG_FLAGS_HE_DCM_NSS_2_MSK);
+  }
 
-    return !!(staInfo->config.configFlags & TLC_MNG_CONFIG_FLAGS_HE_DCM_NSS_1_MSK);
+  return !!(staInfo->config.configFlags & TLC_MNG_CONFIG_FLAGS_HE_DCM_NSS_1_MSK);
 }
 
 static BOOLEAN _rsMngRateIsOptimal(const RS_MNG_STA_INFO_S* staInfo,
                                    const RS_MNG_RATE_S* rsMngRate) {
-    U32 bw = _rsMngRateGetBw(rsMngRate);
-    BOOLEAN mimoAllowed = staInfo->config.mcs[TLC_MNG_NSS_2][bw == CHANNEL_WIDTH160];
+  U32 bw = _rsMngRateGetBw(rsMngRate);
+  BOOLEAN mimoAllowed = staInfo->config.mcs[TLC_MNG_NSS_2][bw == CHANNEL_WIDTH160];
 
-    if (_rsMngRateGetMode(rsMngRate) != staInfo->config.bestSuppMode) { return FALSE; }
+  if (_rsMngRateGetMode(rsMngRate) != staInfo->config.bestSuppMode) {
+    return FALSE;
+  }
 
-    if (_rsMngRateGetIdx(rsMngRate) != MSB2ORD(_rsMngGetSuppRatesSameMode(staInfo, rsMngRate))) {
-        return FALSE;
-    }
+  if (_rsMngRateGetIdx(rsMngRate) != MSB2ORD(_rsMngGetSuppRatesSameMode(staInfo, rsMngRate))) {
+    return FALSE;
+  }
 
-    // TODO: check for best ltf/gi in HE. This condition currently means that tpc won't be enabled
-    // in HE.
-    if ((staInfo->config.sgiChWidthSupport & BIT(bw)) && _rsMngRateGetGi(rsMngRate) != HT_VHT_SGI) {
-        return FALSE;
-    }
+  // TODO: check for best ltf/gi in HE. This condition currently means that tpc won't be enabled
+  // in HE.
+  if ((staInfo->config.sgiChWidthSupport & BIT(bw)) && _rsMngRateGetGi(rsMngRate) != HT_VHT_SGI) {
+    return FALSE;
+  }
 
-    if (mimoAllowed && _rsMngRateGetModulation(rsMngRate) != RS_MNG_MODUL_MIMO2) { return FALSE; }
+  if (mimoAllowed && _rsMngRateGetModulation(rsMngRate) != RS_MNG_MODUL_MIMO2) {
+    return FALSE;
+  }
 
-    if (bw != _rsMngGetMaxChWidth(staInfo)) { return FALSE; }
+  if (bw != _rsMngGetMaxChWidth(staInfo)) {
+    return FALSE;
+  }
 
-    return TRUE;
+  return TRUE;
 }
 
 static U08 _rsMngGetHigherRateIdx(U08 initRateIdx, U32 supportedRatesMsk) {
-    U32 tmpRateMsk;
+  U32 tmpRateMsk;
 
-    if (initRateIdx == RS_MCS_0_HE_ER_AND_DCM) { return (U08)LSB2ORD(supportedRatesMsk); }
+  if (initRateIdx == RS_MCS_0_HE_ER_AND_DCM) {
+    return (U08)LSB2ORD(supportedRatesMsk);
+  }
 
-    tmpRateMsk = supportedRatesMsk & (0xFFFFFFFF << (initRateIdx + 1));
+  tmpRateMsk = supportedRatesMsk & (0xFFFFFFFF << (initRateIdx + 1));
 
-    return (U08)(tmpRateMsk == 0 ? RS_MNG_INVALID_RATE_IDX : LSB2ORD(tmpRateMsk));
+  return (U08)(tmpRateMsk == 0 ? RS_MNG_INVALID_RATE_IDX : LSB2ORD(tmpRateMsk));
 }
 
 static U08 _rsMngGetLowerRateIdx(const RS_MNG_STA_INFO_S* staInfo, const RS_MNG_RATE_S* rate,
                                  U32 supportedRatesMsk) {
-    U08 idx = _rsMngRateGetIdx(rate);
-    U32 tmpRateMsk = (supportedRatesMsk & ((1 << idx) - 1));
+  U08 idx = _rsMngRateGetIdx(rate);
+  U32 tmpRateMsk = (supportedRatesMsk & ((1 << idx) - 1));
 
-    if (idx == RS_MCS_0_HE_ER_AND_DCM) { return RS_MNG_INVALID_RATE_IDX; }
+  if (idx == RS_MCS_0_HE_ER_AND_DCM) {
+    return RS_MNG_INVALID_RATE_IDX;
+  }
 
-    if (tmpRateMsk == 0) {
-        if (_rsMngRateGetMode(rate) == TLC_MNG_MODE_HE &&
-            _rsMngRateGetBw(rate) == CHANNEL_WIDTH20 &&
-            _rsMngIsDcmSupported(staInfo, _rsMngRateGetModulation(rate) == RS_MNG_MODUL_MIMO2)) {
-            return RS_MCS_0_HE_ER_AND_DCM;
-        }
-
-        return RS_MNG_INVALID_RATE_IDX;
+  if (tmpRateMsk == 0) {
+    if (_rsMngRateGetMode(rate) == TLC_MNG_MODE_HE && _rsMngRateGetBw(rate) == CHANNEL_WIDTH20 &&
+        _rsMngIsDcmSupported(staInfo, _rsMngRateGetModulation(rate) == RS_MNG_MODUL_MIMO2)) {
+      return RS_MCS_0_HE_ER_AND_DCM;
     }
 
-    return (U08)MSB2ORD(tmpRateMsk);
+    return RS_MNG_INVALID_RATE_IDX;
+  }
+
+  return (U08)MSB2ORD(tmpRateMsk);
 }
 
 // rs_get_adjacent_rate
@@ -1260,41 +1270,45 @@
 // suppRateDir : use GET_HIGHER_SUPPORTED_RATE or GET_LOWER_SUPPORTED_RATE
 static U08 _rsMngGetAdjacentRateIdx(const RS_MNG_STA_INFO_S* staInfo, const RS_MNG_RATE_S* initRate,
                                     U08 suppRateDir) {
-    U08 initRateIdx = _rsMngRateGetIdx(initRate);
-    U32 supportedRatesMsk = _rsMngGetSuppRatesSameMode(staInfo, initRate);
+  U08 initRateIdx = _rsMngRateGetIdx(initRate);
+  U32 supportedRatesMsk = _rsMngGetSuppRatesSameMode(staInfo, initRate);
 
-    return (U08)(suppRateDir == GET_LOWER_SUPPORTED_RATE
-                     ? _rsMngGetLowerRateIdx(staInfo, initRate, supportedRatesMsk)
-                     : _rsMngGetHigherRateIdx(initRateIdx, supportedRatesMsk));
+  return (U08)(suppRateDir == GET_LOWER_SUPPORTED_RATE
+                   ? _rsMngGetLowerRateIdx(staInfo, initRate, supportedRatesMsk)
+                   : _rsMngGetHigherRateIdx(initRateIdx, supportedRatesMsk));
 }
 
 // TODO - check. what if bt doesn't allow?
 static BOOLEAN _rsMngIsStbcSupported(const RS_MNG_STA_INFO_S* staInfo) {
-    return !!(staInfo->config.configFlags & TLC_MNG_CONFIG_FLAGS_STBC_MSK);
+  return !!(staInfo->config.configFlags & TLC_MNG_CONFIG_FLAGS_STBC_MSK);
 }
 
 static BOOLEAN _rsMngIsStbcAllowed(const RS_MNG_STA_INFO_S* staInfo, const RS_MNG_RATE_S* rate) {
-    if ((iwl_mvm_get_valid_tx_ant(staInfo->mvm) & rsMngGetDualAntMsk()) != rsMngGetDualAntMsk()) {
-        return FALSE;
-    }
-    return _rsMngIsStbcSupported(staInfo) && !(rate->rate.rate_n_flags & RATE_MCS_HE_DCM_MSK);
+  if ((iwl_mvm_get_valid_tx_ant(staInfo->mvm) & rsMngGetDualAntMsk()) != rsMngGetDualAntMsk()) {
+    return FALSE;
+  }
+  return _rsMngIsStbcSupported(staInfo) && !(rate->rate.rate_n_flags & RATE_MCS_HE_DCM_MSK);
 }
 
 static BOOLEAN _rsMngCoexIsLongAggAllowed(const RS_MNG_STA_INFO_S* staInfo) {
-    if (staInfo->config.band != NL80211_BAND_2GHZ) { return TRUE; }
-
-    if (btCoexManagerBtOwnsAnt(staInfo->mvm)) { return FALSE; }
-
+  if (staInfo->config.band != NL80211_BAND_2GHZ) {
     return TRUE;
+  }
+
+  if (btCoexManagerBtOwnsAnt(staInfo->mvm)) {
+    return FALSE;
+  }
+
+  return TRUE;
 }
 
 static BOOLEAN _rsMngIsLdpcAllowed(const RS_MNG_STA_INFO_S* staInfo) {
-    return !!(staInfo->config.configFlags & TLC_MNG_CONFIG_FLAGS_LDPC_MSK);
+  return !!(staInfo->config.configFlags & TLC_MNG_CONFIG_FLAGS_LDPC_MSK);
 }
 
 static BOOLEAN _rsMngIsAntSupported(const RS_MNG_STA_INFO_S* staInfo, U08 ant) {
-    return (ant & staInfo->config.chainsEnabled) == ant &&
-           (iwl_mvm_get_valid_tx_ant(staInfo->mvm) & ant) == ant;
+  return (ant & staInfo->config.chainsEnabled) == ant &&
+         (iwl_mvm_get_valid_tx_ant(staInfo->mvm) & ant) == ant;
 }
 /*******************************************************************************/
 
@@ -1304,101 +1318,113 @@
 
 static BOOLEAN _allowColAnt(const RS_MNG_STA_INFO_S* staInfo, U32 bw,
                             const RS_MNG_COL_ELEM_S* nextCol) {
-    if (!_rsMngIsAntSupported(staInfo, nextCol->ant)) { return FALSE; }
-
-    if (!_rsMngIsAntSupported(staInfo, (U08)(nextCol->ant ^ rsMngGetDualAntMsk()))) {
-        // If the other antenna is disabled for some reason, this antenna is the only one allowed so
-        // we must ignore possible BT-Coex restrictions. Also note that this function is only called
-        // for siso columns, so nextCol->ant always has just one bit set so the xor makes sense.
-        return TRUE;
-    }
-
-    if (staInfo->config.band != NL80211_BAND_2GHZ) { return TRUE; }
-
-    if (btCoexManagerIsAntAvailable(staInfo->mvm, nextCol->ant)) { return TRUE; }
-
+  if (!_rsMngIsAntSupported(staInfo, nextCol->ant)) {
     return FALSE;
+  }
+
+  if (!_rsMngIsAntSupported(staInfo, (U08)(nextCol->ant ^ rsMngGetDualAntMsk()))) {
+    // If the other antenna is disabled for some reason, this antenna is the only one allowed so
+    // we must ignore possible BT-Coex restrictions. Also note that this function is only called
+    // for siso columns, so nextCol->ant always has just one bit set so the xor makes sense.
+    return TRUE;
+  }
+
+  if (staInfo->config.band != NL80211_BAND_2GHZ) {
+    return TRUE;
+  }
+
+  if (btCoexManagerIsAntAvailable(staInfo->mvm, nextCol->ant)) {
+    return TRUE;
+  }
+
+  return FALSE;
 }
 
 static U16 _rsMngGetAggTimeLimit(RS_MNG_STA_INFO_S* staInfo) {
-    // Someone configured debug values, use them no matter what
-    if (staInfo->aggDurationLimit != RS_MNG_AGG_DURATION_LIMIT) {
-        return staInfo->aggDurationLimit;
-    }
+  // Someone configured debug values, use them no matter what
+  if (staInfo->aggDurationLimit != RS_MNG_AGG_DURATION_LIMIT) {
+    return staInfo->aggDurationLimit;
+  }
 
-    if (_rsMngCoexIsLongAggAllowed(staInfo)) {
-        staInfo->longAggEnabled = TRUE;
-        return RS_MNG_AGG_DURATION_LIMIT;
-    }
+  if (_rsMngCoexIsLongAggAllowed(staInfo)) {
+    staInfo->longAggEnabled = TRUE;
+    return RS_MNG_AGG_DURATION_LIMIT;
+  }
 
-    staInfo->longAggEnabled = FALSE;
-    return RS_MNG_AGG_DURATION_LIMIT_SHORT;
+  staInfo->longAggEnabled = FALSE;
+  return RS_MNG_AGG_DURATION_LIMIT_SHORT;
 }
 
 static BOOLEAN _allowColMimo(const RS_MNG_STA_INFO_S* staInfo, U32 bw,
                              const RS_MNG_COL_ELEM_S* nextCol) {
-    BOOLEAN isBw160 = (bw == TLC_MNG_CH_WIDTH_160MHZ);
+  BOOLEAN isBw160 = (bw == TLC_MNG_CH_WIDTH_160MHZ);
 
-    // TODO - check if ht/vht supported? redundent
-    // if no mimo rate is supported
-    if (!(staInfo->config.mcs[TLC_MNG_NSS_2][isBw160])) { return FALSE; }
+  // TODO - check if ht/vht supported? redundent
+  // if no mimo rate is supported
+  if (!(staInfo->config.mcs[TLC_MNG_NSS_2][isBw160])) {
+    return FALSE;
+  }
 
-    if (staInfo->config.chainsEnabled != rsMngGetDualAntMsk()) { return FALSE; }
+  if (staInfo->config.chainsEnabled != rsMngGetDualAntMsk()) {
+    return FALSE;
+  }
 
-    if (iwl_mvm_get_valid_tx_ant(staInfo->mvm) != rsMngGetDualAntMsk()) { return FALSE; }
+  if (iwl_mvm_get_valid_tx_ant(staInfo->mvm) != rsMngGetDualAntMsk()) {
+    return FALSE;
+  }
 
-    return TRUE;
+  return TRUE;
 }
 
 static BOOLEAN _allowColSiso(const RS_MNG_STA_INFO_S* staInfo, U32 bw,
                              const RS_MNG_COL_ELEM_S* nextCol) {
-    BOOLEAN isBw160 = (bw == TLC_MNG_CH_WIDTH_160MHZ);
+  BOOLEAN isBw160 = (bw == TLC_MNG_CH_WIDTH_160MHZ);
 
-    // if there are supported SISO rates - return true. else - return false
-    return (!!(staInfo->config.mcs[TLC_MNG_NSS_1][isBw160]));
+  // if there are supported SISO rates - return true. else - return false
+  return (!!(staInfo->config.mcs[TLC_MNG_NSS_1][isBw160]));
 }
 
 static BOOLEAN _allowColHe(const RS_MNG_STA_INFO_S* staInfo, U32 bw,
                            const RS_MNG_COL_ELEM_S* nextCol) {
-    return !!(staInfo->config.bestSuppMode == TLC_MNG_MODE_HE);
+  return !!(staInfo->config.bestSuppMode == TLC_MNG_MODE_HE);
 }
 
 static BOOLEAN _allowColHtVht(const RS_MNG_STA_INFO_S* staInfo, U32 bw,
                               const RS_MNG_COL_ELEM_S* nextCol) {
-    return !!(staInfo->config.bestSuppMode == TLC_MNG_MODE_HT ||
-              staInfo->config.bestSuppMode == TLC_MNG_MODE_VHT);
+  return !!(staInfo->config.bestSuppMode == TLC_MNG_MODE_HT ||
+            staInfo->config.bestSuppMode == TLC_MNG_MODE_VHT);
 }
 
 static BOOLEAN _allowColSgi(const RS_MNG_STA_INFO_S* staInfo, U32 bw,
                             const RS_MNG_COL_ELEM_S* nextCol) {
-    U08 sgiChWidthSupport = staInfo->config.sgiChWidthSupport;
+  U08 sgiChWidthSupport = staInfo->config.sgiChWidthSupport;
 
-    return !!(sgiChWidthSupport & BIT(bw));
+  return !!(sgiChWidthSupport & BIT(bw));
 }
 
 static BOOLEAN _alloCol2xLTF(const RS_MNG_STA_INFO_S* staInfo, U32 bw,
                              const RS_MNG_COL_ELEM_S* nextCol) {
-    return !(staInfo->config.configFlags & TLC_MNG_CONFIG_FLAGS_HE_BLOCK_2X_LTF_MSK);
+  return !(staInfo->config.configFlags & TLC_MNG_CONFIG_FLAGS_HE_BLOCK_2X_LTF_MSK);
 }
 
 /***************************************************************/
 
 static BOOLEAN _rsMngTpcIsActive(const RS_MNG_STA_INFO_S* staInfo) {
-    // There are 2 values for currStep that mean tpc isn't working currently - RS_MNG_TPC_INACTIVE
-    // and RS_MNG_TPC_DISABLED.
-    return staInfo->tpcTable.currStep < RS_MNG_TPC_NUM_STEPS;
+  // There are 2 values for currStep that mean tpc isn't working currently - RS_MNG_TPC_INACTIVE
+  // and RS_MNG_TPC_DISABLED.
+  return staInfo->tpcTable.currStep < RS_MNG_TPC_NUM_STEPS;
 }
 static BOOLEAN _rsMngIsTestWindow(const RS_MNG_STA_INFO_S* staInfo) {
-    return staInfo->tryingRateUpscale || staInfo->searchBetterTbl || staInfo->tpcTable.testing;
+  return staInfo->tryingRateUpscale || staInfo->searchBetterTbl || staInfo->tpcTable.testing;
 }
 
 static void _rsMngFillAggParamsLQCmd(RS_MNG_STA_INFO_S* staInfo, struct iwl_lq_cmd* lqCmd) {
-    lqCmd->agg_time_limit = cpu_to_le16(_rsMngGetAggTimeLimit(staInfo));
-    lqCmd->agg_disable_start_th = RS_MNG_AGG_DISABLE_START_TH;
+  lqCmd->agg_time_limit = cpu_to_le16(_rsMngGetAggTimeLimit(staInfo));
+  lqCmd->agg_disable_start_th = RS_MNG_AGG_DISABLE_START_TH;
 
-    // W/A for a HW bug that causes it to not prepare a second burst if the first one uses
-    // all frames in the Fifo. W/A this by making sure there's always at least one frame left.
-    lqCmd->agg_frame_cnt_limit = (U08)(staInfo->staBuffSize - 1);
+  // W/A for a HW bug that causes it to not prepare a second burst if the first one uses
+  // all frames in the Fifo. W/A this by making sure there's always at least one frame left.
+  lqCmd->agg_frame_cnt_limit = (U08)(staInfo->staBuffSize - 1);
 }
 
 // Get the next supported lower rate in the current column.
@@ -1406,419 +1432,456 @@
 // the found rate index, or
 // RS_MNG_INVALID_RATE_IDX if no such rate exists
 static U08 _rsMngSetLowerRate(const RS_MNG_STA_INFO_S* staInfo, RS_MNG_RATE_S* rsMngRate) {
-    U08 lowerSuppRateIdx = _rsMngGetAdjacentRateIdx(staInfo, rsMngRate, GET_LOWER_SUPPORTED_RATE);
+  U08 lowerSuppRateIdx = _rsMngGetAdjacentRateIdx(staInfo, rsMngRate, GET_LOWER_SUPPORTED_RATE);
 
-    // if this is the lowest rate possible and this is not legacy rate - break;
-    if (RS_MNG_INVALID_RATE_IDX != lowerSuppRateIdx) {
-        _rsMngRateSetIdx(rsMngRate, lowerSuppRateIdx);
-    }
+  // if this is the lowest rate possible and this is not legacy rate - break;
+  if (RS_MNG_INVALID_RATE_IDX != lowerSuppRateIdx) {
+    _rsMngRateSetIdx(rsMngRate, lowerSuppRateIdx);
+  }
 
-    return lowerSuppRateIdx;
+  return lowerSuppRateIdx;
 }
 
 static void tlcMngNotifyAmsdu(const RS_MNG_STA_INFO_S* staInfo, U16 amsduSize, U16 tidBitmap) {
-    int i;
+  int i;
 
-    staInfo->mvmsta->amsdu_enabled = tidBitmap;
-    staInfo->mvmsta->max_amsdu_len = amsduSize;
-    staInfo->sta->max_rc_amsdu_len = staInfo->mvmsta->max_amsdu_len;
+  staInfo->mvmsta->amsdu_enabled = tidBitmap;
+  staInfo->mvmsta->max_amsdu_len = amsduSize;
+  staInfo->sta->max_rc_amsdu_len = staInfo->mvmsta->max_amsdu_len;
 
-    for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
-        if (staInfo->mvmsta->amsdu_enabled & BIT(i))
-            staInfo->sta->max_tid_amsdu_len[i] =
-                iwl_mvm_max_amsdu_size(staInfo->mvm, staInfo->sta, i);
-        else
-        /*
-         * Not so elegant, but this will effectively
-         * prevent AMSDU on this TID
-         */
-        {
-            staInfo->sta->max_tid_amsdu_len[i] = 1;
-        }
+  for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+    if (staInfo->mvmsta->amsdu_enabled & BIT(i))
+      staInfo->sta->max_tid_amsdu_len[i] = iwl_mvm_max_amsdu_size(staInfo->mvm, staInfo->sta, i);
+    else
+    /*
+     * Not so elegant, but this will effectively
+     * prevent AMSDU on this TID
+     */
+    {
+      staInfo->sta->max_tid_amsdu_len[i] = 1;
     }
+  }
 }
 
 static void _rsMngFillNonHtRates(const RS_MNG_STA_INFO_S* staInfo, struct iwl_lq_cmd* lqCmd, U08 i,
                                  RS_MNG_RATE_S* rsMngRate) {
-    BOOLEAN togglingPossible = _rsMngIsAntSupported(staInfo, rsMngGetDualAntMsk()) &&
-                               btCoexManagerIsAntAvailable(staInfo->mvm, BT_COEX_SHARED_ANT_ID);
+  BOOLEAN togglingPossible = _rsMngIsAntSupported(staInfo, rsMngGetDualAntMsk()) &&
+                             btCoexManagerIsAntAvailable(staInfo->mvm, BT_COEX_SHARED_ANT_ID);
 
-    if (_rsMngRateGetMode(rsMngRate) != TLC_MNG_MODE_LEGACY) {
-        U08 currIdx = _rsMngRateGetIdx(rsMngRate);
+  if (_rsMngRateGetMode(rsMngRate) != TLC_MNG_MODE_LEGACY) {
+    U08 currIdx = _rsMngRateGetIdx(rsMngRate);
 
-        _rsMngRateSetMode(rsMngRate, TLC_MNG_MODE_LEGACY);
-        _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_LEGACY);
-        _rsMngRateSetBw(rsMngRate, CHANNEL_WIDTH20);
-        _rsMngRateSetLdpc(rsMngRate, FALSE);
-        _rsMngRateSetStbc(rsMngRate, FALSE);
+    _rsMngRateSetMode(rsMngRate, TLC_MNG_MODE_LEGACY);
+    _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_LEGACY);
+    _rsMngRateSetBw(rsMngRate, CHANNEL_WIDTH20);
+    _rsMngRateSetLdpc(rsMngRate, FALSE);
+    _rsMngRateSetStbc(rsMngRate, FALSE);
 
-        // Always start with the non-shared antenna if it's available. If there's toggling, it
-        // doesn't make much difference, and if there's no toggling due to bt-coex it promises we'll
-        // stay on the non-shared antenna.
-        _rsMngRateSetAnt(rsMngRate, rsMngGetSingleAntMsk(staInfo->config.chainsEnabled));
-        _rsMngRateSetIdx(rsMngRate, downColMcsToLegacy[currIdx]);
-    } else {
-        _rsMngSetLowerRate(staInfo, rsMngRate);
+    // Always start with the non-shared antenna if it's available. If there's toggling, it
+    // doesn't make much difference, and if there's no toggling due to bt-coex it promises we'll
+    // stay on the non-shared antenna.
+    _rsMngRateSetAnt(rsMngRate, rsMngGetSingleAntMsk(staInfo->config.chainsEnabled));
+    _rsMngRateSetIdx(rsMngRate, downColMcsToLegacy[currIdx]);
+  } else {
+    _rsMngSetLowerRate(staInfo, rsMngRate);
+  }
+
+  for (; i < LQ_MAX_RETRY_NUM; i++) {
+    lqCmd->rs_table[i] = cpu_to_le32(rsMngRate->rate.rate_n_flags);
+
+    _rsMngSetLowerRate(staInfo, rsMngRate);
+
+    if (togglingPossible) {
+      _rsMngRateSetAnt(rsMngRate, (U08)(_rsMngRateGetAnt(rsMngRate) ^ rsMngGetDualAntMsk()));
     }
-
-    for (; i < LQ_MAX_RETRY_NUM; i++) {
-        lqCmd->rs_table[i] = cpu_to_le32(rsMngRate->rate.rate_n_flags);
-
-        _rsMngSetLowerRate(staInfo, rsMngRate);
-
-        if (togglingPossible) {
-            _rsMngRateSetAnt(rsMngRate, (U08)(_rsMngRateGetAnt(rsMngRate) ^ rsMngGetDualAntMsk()));
-        }
-    }
+  }
 }
 
 static void _rsMngBuildRatesTbl(const RS_MNG_STA_INFO_S* staInfo, struct iwl_lq_cmd* lqCmd) {
-    RS_MNG_RATE_S rsMngRate;
-    U08 i = 0;
-    U08 j;
+  RS_MNG_RATE_S rsMngRate;
+  U08 i = 0;
+  U08 j;
 
-    memcpy(&rsMngRate, &staInfo->rateTblInfo.rsMngRate, sizeof(rsMngRate));
+  memcpy(&rsMngRate, &staInfo->rateTblInfo.rsMngRate, sizeof(rsMngRate));
 
-    if (staInfo->searchBetterTbl) {
-        // When trying a new column, only the initial rate should be of that column. The rest of the
-        // table is constructed from the "stable" column.
-        lqCmd->rs_table[0] = cpu_to_le32(staInfo->searchColData.rsMngRate.rate.rate_n_flags);
-        i++;
-    } else if (staInfo->tryingRateUpscale) {
-        // When trying a higher mcs, try it only once. The next retries will be from the previous
-        // mcs which is known to be good (otherwise wouldn't be trying a higher one).
-        lqCmd->rs_table[0] = cpu_to_le32(staInfo->rateTblInfo.rsMngRate.rate.rate_n_flags);
-        i++;
-        _rsMngSetLowerRate(staInfo, &rsMngRate);
-    }
+  if (staInfo->searchBetterTbl) {
+    // When trying a new column, only the initial rate should be of that column. The rest of the
+    // table is constructed from the "stable" column.
+    lqCmd->rs_table[0] = cpu_to_le32(staInfo->searchColData.rsMngRate.rate.rate_n_flags);
+    i++;
+  } else if (staInfo->tryingRateUpscale) {
+    // When trying a higher mcs, try it only once. The next retries will be from the previous
+    // mcs which is known to be good (otherwise wouldn't be trying a higher one).
+    lqCmd->rs_table[0] = cpu_to_le32(staInfo->rateTblInfo.rsMngRate.rate.rate_n_flags);
+    i++;
+    _rsMngSetLowerRate(staInfo, &rsMngRate);
+  }
 
-    // Fill RS_MNG_RETRY_TABLE_INITIAL_RATE_NUM copies of the best known stable rate
-    for (j = 0; j < RS_MNG_RETRY_TABLE_INITIAL_RATE_NUM; j++) {
-        lqCmd->rs_table[i + j] = cpu_to_le32(rsMngRate.rate.rate_n_flags);
+  // Fill RS_MNG_RETRY_TABLE_INITIAL_RATE_NUM copies of the best known stable rate
+  for (j = 0; j < RS_MNG_RETRY_TABLE_INITIAL_RATE_NUM; j++) {
+    lqCmd->rs_table[i + j] = cpu_to_le32(rsMngRate.rate.rate_n_flags);
+  }
+  i += j;
+
+  if (!(staInfo->searchBetterTbl || staInfo->tryingRateUpscale) &&
+      _rsMngSetLowerRate(staInfo, &rsMngRate) != RS_MNG_INVALID_RATE_IDX) {
+    // In case the first rate is not a test rate, put here RS_MNG_RETRY_TABLE_SECONDARY_RATE_NUM
+    // copies of the initial rate but mcs-1
+    // Note that a tpc test window is not treated as a test rate for the purpose of construction
+    // of the retry table.
+    for (j = 0; j < RS_MNG_RETRY_TABLE_SECONDARY_RATE_NUM; j++) {
+      lqCmd->rs_table[i + j] = cpu_to_le32(rsMngRate.rate.rate_n_flags);
     }
     i += j;
 
-    if (!(staInfo->searchBetterTbl || staInfo->tryingRateUpscale) &&
-        _rsMngSetLowerRate(staInfo, &rsMngRate) != RS_MNG_INVALID_RATE_IDX) {
-        // In case the first rate is not a test rate, put here RS_MNG_RETRY_TABLE_SECONDARY_RATE_NUM
-        // copies of the initial rate but mcs-1
-        // Note that a tpc test window is not treated as a test rate for the purpose of construction
-        // of the retry table.
-        for (j = 0; j < RS_MNG_RETRY_TABLE_SECONDARY_RATE_NUM; j++) {
-            lqCmd->rs_table[i + j] = cpu_to_le32(rsMngRate.rate.rate_n_flags);
-        }
-        i += j;
-
-        // Now put RS_MNG_RETRY_TABLE_SECONDARY_RATE_20MHZ_NUM copies of the secondary rate with
-        // 20mhz bandwidth.
-        _rsMngRateSetBw(&rsMngRate, CHANNEL_WIDTH20);
-        for (j = 0; j < RS_MNG_RETRY_TABLE_SECONDARY_RATE_20MHZ_NUM; j++) {
-            lqCmd->rs_table[i + j] = cpu_to_le32(rsMngRate.rate.rate_n_flags);
-        }
-        i += j;
+    // Now put RS_MNG_RETRY_TABLE_SECONDARY_RATE_20MHZ_NUM copies of the secondary rate with
+    // 20mhz bandwidth.
+    _rsMngRateSetBw(&rsMngRate, CHANNEL_WIDTH20);
+    for (j = 0; j < RS_MNG_RETRY_TABLE_SECONDARY_RATE_20MHZ_NUM; j++) {
+      lqCmd->rs_table[i + j] = cpu_to_le32(rsMngRate.rate.rate_n_flags);
     }
+    i += j;
+  }
 
-    // Fill the rest of the retry table with non-ht rates
-    _rsMngFillNonHtRates(staInfo, lqCmd, i, &rsMngRate);
+  // Fill the rest of the retry table with non-ht rates
+  _rsMngFillNonHtRates(staInfo, lqCmd, i, &rsMngRate);
 }
 
 static void _rsMngFillLQCmd(RS_MNG_STA_INFO_S* staInfo, struct iwl_lq_cmd* lqCmd) {
-    int i;
+  int i;
 
-    memset(lqCmd, 0, sizeof(*lqCmd));
-    lqCmd->sta_id = staInfo->mvmsta->sta_id;
+  memset(lqCmd, 0, sizeof(*lqCmd));
+  lqCmd->sta_id = staInfo->mvmsta->sta_id;
 
-    if (_rsMngTpcIsActive(staInfo)) {
-        // reduce Tx power in steps of 3db. Note that currStep == 0 means reduce 3db, hence the '+1'
-        lqCmd->reduced_tpc = (U08)(RS_MNG_TPC_STEP_SIZE * (staInfo->tpcTable.currStep + 1));
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngFillLQCmd: reducing tx power by %d db",
-                   lqCmd->reduced_tpc);
+  if (_rsMngTpcIsActive(staInfo)) {
+    // reduce Tx power in steps of 3db. Note that currStep == 0 means reduce 3db, hence the '+1'
+    lqCmd->reduced_tpc = (U08)(RS_MNG_TPC_STEP_SIZE * (staInfo->tpcTable.currStep + 1));
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngFillLQCmd: reducing tx power by %d db",
+               lqCmd->reduced_tpc);
+  }
+
+  _rsMngFillAggParamsLQCmd(staInfo, lqCmd);
+
+  _rsMngBuildRatesTbl(staInfo, lqCmd);
+
+  lqCmd->single_stream_ant_msk = rsMngGetSingleAntMsk(staInfo->config.chainsEnabled);
+  lqCmd->dual_stream_ant_msk = rsMngGetDualAntMsk();
+
+  if (_rsMngIsTestWindow(staInfo)) {
+    if (IS_RATE_OFDM_HT_VHT_HE_API_M(le32_to_cpu(lqCmd->rs_table[0]))) {
+      // For 11a/b/g rates, where there are no aggregations anyway, RTS protection just hurts
+      // the tpt.
+      lqCmd->rs_table[0] |= cpu_to_le32(RATE_MCS_RTS_REQUIRED_MSK);
+      // TODO: lqCmd->agg_params.uAggFrameCntInTestWin = RS_MNG_UPSCALE_AGG_FRAME_COUNT;
     }
+    // TODO: lqCmd->general_params.flags |= LINK_QUAL_FLAGS_TEST_WINDOWS_MSK;
+  }
 
-    _rsMngFillAggParamsLQCmd(staInfo, lqCmd);
+  if (staInfo->mvmsta->tx_protection) {
+    lqCmd->flags |= LQ_FLAG_USE_RTS_MSK;
+  }
 
-    _rsMngBuildRatesTbl(staInfo, lqCmd);
-
-    lqCmd->single_stream_ant_msk = rsMngGetSingleAntMsk(staInfo->config.chainsEnabled);
-    lqCmd->dual_stream_ant_msk = rsMngGetDualAntMsk();
-
-    if (_rsMngIsTestWindow(staInfo)) {
-        if (IS_RATE_OFDM_HT_VHT_HE_API_M(le32_to_cpu(lqCmd->rs_table[0]))) {
-            // For 11a/b/g rates, where there are no aggregations anyway, RTS protection just hurts
-            // the tpt.
-            lqCmd->rs_table[0] |= cpu_to_le32(RATE_MCS_RTS_REQUIRED_MSK);
-            // TODO: lqCmd->agg_params.uAggFrameCntInTestWin = RS_MNG_UPSCALE_AGG_FRAME_COUNT;
-        }
-        // TODO: lqCmd->general_params.flags |= LINK_QUAL_FLAGS_TEST_WINDOWS_MSK;
+  // When Amsdu's are enabled, enable RTS protection for all rates that use A-MPDUs, since in this
+  // case there could be really long frames and this should help reduce collisions.
+  if (staInfo->amsduEnabledSize != RS_MNG_AMSDU_INVALID) {
+    for (i = 0; i < RS_MNG_AGG_DISABLE_START_TH; i++) {
+      lqCmd->rs_table[i] |= cpu_to_le32(RATE_MCS_RTS_REQUIRED_MSK);
     }
-
-    if (staInfo->mvmsta->tx_protection) { lqCmd->flags |= LQ_FLAG_USE_RTS_MSK; }
-
-    // When Amsdu's are enabled, enable RTS protection for all rates that use A-MPDUs, since in this
-    // case there could be really long frames and this should help reduce collisions.
-    if (staInfo->amsduEnabledSize != RS_MNG_AMSDU_INVALID) {
-        for (i = 0; i < RS_MNG_AGG_DISABLE_START_TH; i++) {
-            lqCmd->rs_table[i] |= cpu_to_le32(RATE_MCS_RTS_REQUIRED_MSK);
-        }
-    }
+  }
 }
 
 // rs_update_rate_tbl
 static void _rsMngUpdateRateTbl(RS_MNG_STA_INFO_S* staInfo, BOOLEAN notifyHost) {
-    _rsMngFillLQCmd(staInfo, &staInfo->mvmsta->lq_sta.rs_drv.lq);
+  _rsMngFillLQCmd(staInfo, &staInfo->mvmsta->lq_sta.rs_drv.lq);
 
-    iwl_mvm_send_lq_cmd(staInfo->mvm, &staInfo->mvmsta->lq_sta.rs_drv.lq, !staInfo->enabled);
+  iwl_mvm_send_lq_cmd(staInfo->mvm, &staInfo->mvmsta->lq_sta.rs_drv.lq, !staInfo->enabled);
 }
 
 static void _rsMngClearWinArr(RS_MNG_WIN_STAT_S* winArr, U08 numWin) {
-    U08 i;
+  U08 i;
 
-    _memclr(winArr, (sizeof(*winArr) * numWin));
+  _memclr(winArr, (sizeof(*winArr) * numWin));
 
-    for (i = 0; i < numWin; i++) {
-        winArr[i].successRatio = RS_MNG_INVALID_VAL;
-        winArr[i].averageTpt = RS_MNG_INVALID_VAL;
-    }
+  for (i = 0; i < numWin; i++) {
+    winArr[i].successRatio = RS_MNG_INVALID_VAL;
+    winArr[i].averageTpt = RS_MNG_INVALID_VAL;
+  }
 }
 
 static void _rsMngClearTblWindows(RS_MNG_STA_INFO_S* staInfo) {
-    _rsMngClearWinArr(staInfo->rateTblInfo.win, RS_MNG_MAX_RATES_NUM);
+  _rsMngClearWinArr(staInfo->rateTblInfo.win, RS_MNG_MAX_RATES_NUM);
 
-    _rsMngClearWinArr(staInfo->tpcTable.windows, RS_MNG_TPC_NUM_STEPS);
+  _rsMngClearWinArr(staInfo->tpcTable.windows, RS_MNG_TPC_NUM_STEPS);
 }
 
 static void _rsMngSetVisitedColumn(RS_MNG_STA_INFO_S* staInfo, RS_MNG_COLUMN_DESC_E colId) {
-    // to make the code for setting both siso columns in case of stbc simpler, make sure that each
-    // such pair of columns has only bit 0 different.
-    BUILD_BUG_ON(!((RS_MNG_COL_SISO_ANT_A ^ RS_MNG_COL_SISO_ANT_B) == 1));
-    BUILD_BUG_ON(!((RS_MNG_COL_SISO_ANT_A_SGI ^ RS_MNG_COL_SISO_ANT_B_SGI) == 1));
-    BUILD_BUG_ON(!((RS_MNG_COL_HE_3_2_SISO_ANT_A ^ RS_MNG_COL_HE_3_2_SISO_ANT_B) == 1));
-    BUILD_BUG_ON(!((RS_MNG_COL_HE_1_6_SISO_ANT_A ^ RS_MNG_COL_HE_1_6_SISO_ANT_B) == 1));
-    BUILD_BUG_ON(!((RS_MNG_COL_HE_0_8_SISO_ANT_A ^ RS_MNG_COL_HE_0_8_SISO_ANT_B) == 1));
+  // to make the code for setting both siso columns in case of stbc simpler, make sure that each
+  // such pair of columns has only bit 0 different.
+  BUILD_BUG_ON(!((RS_MNG_COL_SISO_ANT_A ^ RS_MNG_COL_SISO_ANT_B) == 1));
+  BUILD_BUG_ON(!((RS_MNG_COL_SISO_ANT_A_SGI ^ RS_MNG_COL_SISO_ANT_B_SGI) == 1));
+  BUILD_BUG_ON(!((RS_MNG_COL_HE_3_2_SISO_ANT_A ^ RS_MNG_COL_HE_3_2_SISO_ANT_B) == 1));
+  BUILD_BUG_ON(!((RS_MNG_COL_HE_1_6_SISO_ANT_A ^ RS_MNG_COL_HE_1_6_SISO_ANT_B) == 1));
+  BUILD_BUG_ON(!((RS_MNG_COL_HE_0_8_SISO_ANT_A ^ RS_MNG_COL_HE_0_8_SISO_ANT_B) == 1));
 
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngSetVisitedColumn: colId %d, stbc allowed %d, visited columns 0x%x", colId,
-               _rsMngIsStbcSupported(staInfo), staInfo->visitedColumns);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngSetVisitedColumn: colId %d, stbc allowed %d, visited columns 0x%x", colId,
+             _rsMngIsStbcSupported(staInfo), staInfo->visitedColumns);
 
-    staInfo->visitedColumns |= BIT(colId);
-    if (rsMngColumns[colId].mode == RS_MNG_MODUL_SISO && _rsMngIsStbcSupported(staInfo)) {
-        staInfo->visitedColumns |= BIT(colId ^ 1);
-    }
+  staInfo->visitedColumns |= BIT(colId);
+  if (rsMngColumns[colId].mode == RS_MNG_MODUL_SISO && _rsMngIsStbcSupported(staInfo)) {
+    staInfo->visitedColumns |= BIT(colId ^ 1);
+  }
 
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngSetVisitedColumn: visited columns 0x%x",
-               staInfo->visitedColumns);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngSetVisitedColumn: visited columns 0x%x",
+             staInfo->visitedColumns);
 }
 
 static U32 _rsMngVhtRateToPhyRate(U32 bw, RS_MCS_E mcs, RS_MNG_GI_E gi, RS_MNG_MODULATION_E nss) {
-    U32 bitrate;
+  U32 bitrate;
 
-    if (WARN_ON(!(mcs < 10 && bw < MAX_CHANNEL_BW_INDX && nss >= RS_MNG_MODUL_SISO))) { return 1; }
+  if (WARN_ON(!(mcs < 10 && bw < MAX_CHANNEL_BW_INDX && nss >= RS_MNG_MODUL_SISO))) {
+    return 1;
+  }
 
-    bitrate = rsMngVhtRateToBps[bw][mcs];
+  bitrate = rsMngVhtRateToBps[bw][mcs];
 
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngAmsduRate: bw %d, mcs %d sgi %d nss %d", bw, mcs,
-               gi, nss);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngAmsduRate: bw %d, mcs %d sgi %d nss %d", bw, mcs, gi,
+             nss);
 
-    if (nss == RS_MNG_MODUL_MIMO2) { bitrate *= 2; }
+  if (nss == RS_MNG_MODUL_MIMO2) {
+    bitrate *= 2;
+  }
 
-    if (gi == HT_VHT_SGI) { bitrate = bitrate + (bitrate / 9); }
+  if (gi == HT_VHT_SGI) {
+    bitrate = bitrate + (bitrate / 9);
+  }
 
-    return bitrate >> 20;
+  return bitrate >> 20;
 }
 
 static U32 _rsMngHeRateToPhyRate(U32 bw, RS_MCS_E mcs, RS_MNG_GI_E gi, RS_MNG_MODULATION_E nss) {
 #define RATIO_SCALE 2048
 #define MBPS_X_10_TO_KBPS(x) (((x) << 10) / 10)
-    static const U16 mcsRatios[12] = {
-        34133, /* 16.666666... */
-        17067, /*  8.333333... */
-        11378, /*  5.555555... */
-        8533,  /*  4.166666... */
-        5689,  /*  2.777777... */
-        4267,  /*  2.083333... */
-        3923,  /*  1.851851... */
-        3413,  /*  1.666666... */
-        2844,  /*  1.388888... */
-        2560,  /*  1.250000... */
-        2276,  /*  1.111111... */
-        2048,  /*  1.000000... */
-    };
-    static const U32 ratesPerGi[][3] = {
-        // phy rate in kbps per GI for mcs 11 with 2 ss
-        //                     HE_3_2_GI                  HE_1_6_GI                 HE_0_8_GI
-        [CHANNEL_WIDTH20] = {MBPS_X_10_TO_KBPS(2438), MBPS_X_10_TO_KBPS(2708),
-                             MBPS_X_10_TO_KBPS(2868)},
-        [CHANNEL_WIDTH40] = {MBPS_X_10_TO_KBPS(4875), MBPS_X_10_TO_KBPS(5417),
-                             MBPS_X_10_TO_KBPS(5735)},
-        [CHANNEL_WIDTH80] = {MBPS_X_10_TO_KBPS(10208), MBPS_X_10_TO_KBPS(11343),
-                             MBPS_X_10_TO_KBPS(12010)},
-        [CHANNEL_WIDTH160] = {MBPS_X_10_TO_KBPS(20416), MBPS_X_10_TO_KBPS(22685),
-                              MBPS_X_10_TO_KBPS(24019)},
-    };
-    U64 tmp;
-    U32 bitrate;
-    BOOLEAN isDcm = FALSE;
+  static const U16 mcsRatios[12] = {
+      34133, /* 16.666666... */
+      17067, /*  8.333333... */
+      11378, /*  5.555555... */
+      8533,  /*  4.166666... */
+      5689,  /*  2.777777... */
+      4267,  /*  2.083333... */
+      3923,  /*  1.851851... */
+      3413,  /*  1.666666... */
+      2844,  /*  1.388888... */
+      2560,  /*  1.250000... */
+      2276,  /*  1.111111... */
+      2048,  /*  1.000000... */
+  };
+  static const U32 ratesPerGi[][3] = {
+      // phy rate in kbps per GI for mcs 11 with 2 ss
+      //                     HE_3_2_GI                  HE_1_6_GI                 HE_0_8_GI
+      [CHANNEL_WIDTH20] = {MBPS_X_10_TO_KBPS(2438), MBPS_X_10_TO_KBPS(2708),
+                           MBPS_X_10_TO_KBPS(2868)},
+      [CHANNEL_WIDTH40] = {MBPS_X_10_TO_KBPS(4875), MBPS_X_10_TO_KBPS(5417),
+                           MBPS_X_10_TO_KBPS(5735)},
+      [CHANNEL_WIDTH80] = {MBPS_X_10_TO_KBPS(10208), MBPS_X_10_TO_KBPS(11343),
+                           MBPS_X_10_TO_KBPS(12010)},
+      [CHANNEL_WIDTH160] = {MBPS_X_10_TO_KBPS(20416), MBPS_X_10_TO_KBPS(22685),
+                            MBPS_X_10_TO_KBPS(24019)},
+  };
+  U64 tmp;
+  U32 bitrate;
+  BOOLEAN isDcm = FALSE;
 
-    if (mcs == RS_MCS_0_HE_ER_AND_DCM) {
-        isDcm = TRUE;
-        mcs = RS_MCS_0;
-    }
+  if (mcs == RS_MCS_0_HE_ER_AND_DCM) {
+    isDcm = TRUE;
+    mcs = RS_MCS_0;
+  }
 
-    if (WARN_ON(!(mcs < 12 && bw < MAX_CHANNEL_BW_INDX && gi >= HE_FIRST_GI &&
-                  nss >= RS_MNG_MODUL_SISO))) {
-        return 1;
-    }
+  if (WARN_ON(!(mcs < 12 && bw < MAX_CHANNEL_BW_INDX && gi >= HE_FIRST_GI &&
+                nss >= RS_MNG_MODUL_SISO))) {
+    return 1;
+  }
 
-    bitrate = ratesPerGi[bw][gi - HE_FIRST_GI];
-    tmp = bitrate;
-    tmp *= RATIO_SCALE;
-    tmp /= mcsRatios[mcs];
-    bitrate = (U32)tmp;
+  bitrate = ratesPerGi[bw][gi - HE_FIRST_GI];
+  tmp = bitrate;
+  tmp *= RATIO_SCALE;
+  tmp /= mcsRatios[mcs];
+  bitrate = (U32)tmp;
 
-    if (nss == RS_MNG_MODUL_SISO) { bitrate /= 2; }
+  if (nss == RS_MNG_MODUL_SISO) {
+    bitrate /= 2;
+  }
 
-    if (isDcm) { bitrate /= 2; }
+  if (isDcm) {
+    bitrate /= 2;
+  }
 
-    return bitrate >> 10;
+  return bitrate >> 10;
 }
 
 static U32 _rsMngRateToPhyRate(TLC_MNG_MODE_E mode, U32 bw, RS_MCS_E mcs, RS_MNG_GI_E gi,
                                RS_MNG_MODULATION_E nss) {
-    if (mode == TLC_MNG_MODE_VHT) { return _rsMngVhtRateToPhyRate(bw, mcs, gi, nss); }
-    if (mode == TLC_MNG_MODE_HE) { return _rsMngHeRateToPhyRate(bw, mcs, gi, nss); }
+  if (mode == TLC_MNG_MODE_VHT) {
+    return _rsMngVhtRateToPhyRate(bw, mcs, gi, nss);
+  }
+  if (mode == TLC_MNG_MODE_HE) {
+    return _rsMngHeRateToPhyRate(bw, mcs, gi, nss);
+  }
 
-    return 0;
+  return 0;
 }
 
 static RS_MNG_TX_AMSDU_SIZE_E _rsMngAmsduSize(const RS_MNG_STA_INFO_S* staInfo, TLC_MNG_MODE_E mode,
                                               U32 bw, RS_MCS_E mcs, RS_MNG_GI_E gi,
                                               RS_MNG_MODULATION_E nss) {
-    RS_MNG_TX_AMSDU_SIZE_E amsdu_3k, amsdu_5k, amsdu_6k, amsdu_8k;
-    U32 phyRate = _rsMngRateToPhyRate(mode, bw, mcs, gi, nss);
+  RS_MNG_TX_AMSDU_SIZE_E amsdu_3k, amsdu_5k, amsdu_6k, amsdu_8k;
+  U32 phyRate = _rsMngRateToPhyRate(mode, bw, mcs, gi, nss);
 
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngAmsduRate: sta %d, phyRate %d, blacklist bitmap 0x%X",
-               _rsMngStaInfoToStaId(staInfo), phyRate, staInfo->amsduBlacklist);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngAmsduRate: sta %d, phyRate %d, blacklist bitmap 0x%X",
+             _rsMngStaInfoToStaId(staInfo), phyRate, staInfo->amsduBlacklist);
 
-    amsdu_3k = RS_MNG_AMSDU_3500B;
-    amsdu_5k = RS_MNG_AMSDU_5000B;
+  amsdu_3k = RS_MNG_AMSDU_3500B;
+  amsdu_5k = RS_MNG_AMSDU_5000B;
 
-    if (staInfo->amsduBlacklist) {
-        // If we disabled 3k AMSDU - don't use it at all
-        if (staInfo->amsduBlacklist & BIT(RS_MNG_AMSDU_3500B)) { amsdu_3k = RS_MNG_AMSDU_INVALID; }
-
-        // If we disabled some amsdu size, use a smaller size.
-        // Note that smaller sizes that are blacklisted as well will still not be used.
-        if (staInfo->amsduBlacklist & BIT(RS_MNG_AMSDU_5000B)) { amsdu_5k = amsdu_3k; }
+  if (staInfo->amsduBlacklist) {
+    // If we disabled 3k AMSDU - don't use it at all
+    if (staInfo->amsduBlacklist & BIT(RS_MNG_AMSDU_3500B)) {
+      amsdu_3k = RS_MNG_AMSDU_INVALID;
     }
 
-    if (mode == TLC_MNG_MODE_HE) {
-        amsdu_6k = RS_MNG_AMSDU_6500B;
-        amsdu_8k = RS_MNG_AMSDU_8000B;
+    // If we disabled some amsdu size, use a smaller size.
+    // Note that smaller sizes that are blacklisted as well will still not be used.
+    if (staInfo->amsduBlacklist & BIT(RS_MNG_AMSDU_5000B)) {
+      amsdu_5k = amsdu_3k;
+    }
+  }
 
-        if (staInfo->amsduBlacklist & BIT(RS_MNG_AMSDU_6500B)) { amsdu_6k = amsdu_5k; }
+  if (mode == TLC_MNG_MODE_HE) {
+    amsdu_6k = RS_MNG_AMSDU_6500B;
+    amsdu_8k = RS_MNG_AMSDU_8000B;
 
-        if (staInfo->amsduBlacklist & BIT(RS_MNG_AMSDU_8000B)) { amsdu_8k = amsdu_6k; }
-
-        if (phyRate > RS_MNG_AMSDU_HE_8K_THRESHOLD) { return amsdu_8k; }
-
-        if (phyRate > RS_MNG_AMSDU_HE_6K_THRESHOLD) { return amsdu_6k; }
+    if (staInfo->amsduBlacklist & BIT(RS_MNG_AMSDU_6500B)) {
+      amsdu_6k = amsdu_5k;
     }
 
-    if (phyRate > RS_MNG_AMSDU_5K_THRESHOLD) { return amsdu_5k; }
+    if (staInfo->amsduBlacklist & BIT(RS_MNG_AMSDU_8000B)) {
+      amsdu_8k = amsdu_6k;
+    }
 
-    if (phyRate > RS_MNG_AMSDU_3K_THRESHOLD) { return amsdu_3k; }
+    if (phyRate > RS_MNG_AMSDU_HE_8K_THRESHOLD) {
+      return amsdu_8k;
+    }
 
-    return RS_MNG_AMSDU_INVALID;
+    if (phyRate > RS_MNG_AMSDU_HE_6K_THRESHOLD) {
+      return amsdu_6k;
+    }
+  }
+
+  if (phyRate > RS_MNG_AMSDU_5K_THRESHOLD) {
+    return amsdu_5k;
+  }
+
+  if (phyRate > RS_MNG_AMSDU_3K_THRESHOLD) {
+    return amsdu_3k;
+  }
+
+  return RS_MNG_AMSDU_INVALID;
 }
 
 static const TPT_BY_RATE_ARR* _rsMngGetExpectedTptTable(const RS_MNG_COL_ELEM_S* col,
                                                         TLC_MNG_CH_WIDTH_E bw, BOOLEAN isAgg) {
-    U32 nss;
-    U32 gi;
+  U32 nss;
+  U32 gi;
 
-    if (col->mode == RS_MNG_MODUL_LEGACY) { return &expectedTptNonHt; }
+  if (col->mode == RS_MNG_MODUL_LEGACY) {
+    return &expectedTptNonHt;
+  }
 
-    nss = col->mode == RS_MNG_MODUL_SISO ? RS_MNG_SISO : RS_MNG_MIMO;
+  nss = col->mode == RS_MNG_MODUL_SISO ? RS_MNG_SISO : RS_MNG_MIMO;
 
-    switch (col->gi) {
+  switch (col->gi) {
     case HT_VHT_NGI:
-        gi = RS_MNG_NGI;
-        break;
+      gi = RS_MNG_NGI;
+      break;
     case HT_VHT_SGI:
-        gi = RS_MNG_SGI;
-        break;
+      gi = RS_MNG_SGI;
+      break;
     case HE_3_2_GI:
-        gi = RS_MNG_GI_3_2;
-        break;
+      gi = RS_MNG_GI_3_2;
+      break;
     case HE_1_6_GI:
-        gi = RS_MNG_GI_1_6;
-        break;
+      gi = RS_MNG_GI_1_6;
+      break;
     case HE_0_8_GI:
-        gi = RS_MNG_GI_0_8;
-        break;
+      gi = RS_MNG_GI_0_8;
+      break;
     default:
-        WARN_ON(1);
-        gi = 0;
-    }
+      WARN_ON(1);
+      gi = 0;
+  }
 
-    DBG_PRINTF(
-        UT, TLC_OFFLOAD_DBG, INFO,
-        "_rsMngGetExpectedTptTable: expected Tpt table - isHE %d, isAgg %d, BW %d, GI %d, NSS %d",
-        col->gi >= HE_FIRST_GI, isAgg, bw, gi, nss);
+  DBG_PRINTF(
+      UT, TLC_OFFLOAD_DBG, INFO,
+      "_rsMngGetExpectedTptTable: expected Tpt table - isHE %d, isAgg %d, BW %d, GI %d, NSS %d",
+      col->gi >= HE_FIRST_GI, isAgg, bw, gi, nss);
 
-    if (col->gi < HE_FIRST_GI) { return &expectedTptHtVht[isAgg][bw][gi][nss]; }
+  if (col->gi < HE_FIRST_GI) {
+    return &expectedTptHtVht[isAgg][bw][gi][nss];
+  }
 
-    return &expectedTptHe[isAgg][bw][gi][nss];
+  return &expectedTptHe[isAgg][bw][gi][nss];
 }
 
 static U32 _rsMngGetExpectedTpt(const RS_MNG_STA_INFO_S* staInfo, const RS_MNG_COL_ELEM_S* col,
                                 TLC_MNG_CH_WIDTH_E bw, BOOLEAN isAgg, RS_MCS_E rateIdx) {
-    const TPT_BY_RATE_ARR* expectedTptTable = _rsMngGetExpectedTptTable(col, bw, isAgg);
-    U32 ret;
+  const TPT_BY_RATE_ARR* expectedTptTable = _rsMngGetExpectedTptTable(col, bw, isAgg);
+  U32 ret;
 
-    if (expectedTptTable == &expectedTptNonHt) { return (*expectedTptTable)[rateIdx]; }
-    if (rateIdx == RS_MCS_0_HE_ER_AND_DCM) {
-        // rateIdx == RS_MCS_0_HE_ER_AND_DCM. DCM cuts expected tpt in half.
-        // TODO: add an additional small penalty for ER
-        return (*expectedTptTable)[RS_MCS_0] / 2;
+  if (expectedTptTable == &expectedTptNonHt) {
+    return (*expectedTptTable)[rateIdx];
+  }
+  if (rateIdx == RS_MCS_0_HE_ER_AND_DCM) {
+    // rateIdx == RS_MCS_0_HE_ER_AND_DCM. DCM cuts expected tpt in half.
+    // TODO: add an additional small penalty for ER
+    return (*expectedTptTable)[RS_MCS_0] / 2;
+  }
+
+  ret = (*expectedTptTable)[rateIdx];
+
+  if (staInfo->amsduSupport && staInfo->mvmsta->agg_tids && staInfo->amsduInAmpdu) {
+    switch (staInfo->amsduEnabledSize) {
+      case RS_MNG_AMSDU_8000B:
+        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                   "_rsMngGetExpectedTpt: adding 50%% thanks to 8k amsdu");
+        ret += (ret / 2);
+        break;
+      case RS_MNG_AMSDU_6500B:
+        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                   "_rsMngGetExpectedTpt: adding 37.5%% thanks to 6.5k amsdu");
+        ret += (ret / 4) + (ret / 8);
+        break;
+      case RS_MNG_AMSDU_5000B:
+        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                   "_rsMngGetExpectedTpt: adding 25%% thanks to 5k amsdu");
+        ret += (ret / 4);
+        break;
+      case RS_MNG_AMSDU_3500B:
+        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                   "_rsMngGetExpectedTpt: adding 12.5%% thanks to 3.5k amsdu");
+        ret += (ret / 8);
+        break;
+      default:
+        break;
     }
+  }
 
-    ret = (*expectedTptTable)[rateIdx];
-
-    if (staInfo->amsduSupport && staInfo->mvmsta->agg_tids && staInfo->amsduInAmpdu) {
-        switch (staInfo->amsduEnabledSize) {
-        case RS_MNG_AMSDU_8000B:
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngGetExpectedTpt: adding 50%% thanks to 8k amsdu");
-            ret += (ret / 2);
-            break;
-        case RS_MNG_AMSDU_6500B:
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngGetExpectedTpt: adding 37.5%% thanks to 6.5k amsdu");
-            ret += (ret / 4) + (ret / 8);
-            break;
-        case RS_MNG_AMSDU_5000B:
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngGetExpectedTpt: adding 25%% thanks to 5k amsdu");
-            ret += (ret / 4);
-            break;
-        case RS_MNG_AMSDU_3500B:
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngGetExpectedTpt: adding 12.5%% thanks to 3.5k amsdu");
-            ret += (ret / 8);
-            break;
-        default:
-            break;
-        }
-    }
-
-    return ret;
+  return ret;
 }
 
 static BOOLEAN _isAvgTptCalcPossible(RS_MNG_WIN_STAT_S* win) {
-    return ((win->successCounter >= RS_MNG_RATE_MIN_SUCCESS_TH) ||
-            ((win->framesCounter - win->successCounter) >= RS_MNG_RATE_MIN_FAILURE_TH));
+  return ((win->successCounter >= RS_MNG_RATE_MIN_SUCCESS_TH) ||
+          ((win->framesCounter - win->successCounter) >= RS_MNG_RATE_MIN_FAILURE_TH));
 }
 
 // rs_get_rate_action
@@ -1847,115 +1910,115 @@
 static RS_MNG_ACTION_E _rsMngGetScaleAction(const RS_MNG_STA_INFO_S* staInfo,
                                             const RS_MNG_WIN_STAT_S* currWin, U32 lowerRateIdx,
                                             U32 higherRateIdx) {
-    const RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
-    U32 currTpt;
-    U32 lowTpt;
-    U32 highTpt;
-    RS_MNG_ACTION_E action = RS_MNG_ACTION_STAY;
-    enum {
-        RS_MNG_SCALE_REASON_BELOW_FORCE_DECREASE,
-        RS_MNG_SCALE_REASON_NO_DATA_ON_HIGHER_RATE,
-        RS_MNG_SCALE_REASON_HIGHER_RATE_HAS_HIGHER_TPT,
-        RS_MNG_SCALE_REASON_CURRENT_RATE_HAS_HIGHEST_TPT,
-        RS_MNG_SCALE_REASON_SR_ABOVE_NO_DECREASE_THRESHOLD,
-        RS_MNG_SCALE_REASON_LOWER_RATE_TPT_UNKOWN_OR_BETTER,
-        RS_MNG_SCALE_REASON_DEFAULT,
-    };
+  const RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
+  U32 currTpt;
+  U32 lowTpt;
+  U32 highTpt;
+  RS_MNG_ACTION_E action = RS_MNG_ACTION_STAY;
+  enum {
+    RS_MNG_SCALE_REASON_BELOW_FORCE_DECREASE,
+    RS_MNG_SCALE_REASON_NO_DATA_ON_HIGHER_RATE,
+    RS_MNG_SCALE_REASON_HIGHER_RATE_HAS_HIGHER_TPT,
+    RS_MNG_SCALE_REASON_CURRENT_RATE_HAS_HIGHEST_TPT,
+    RS_MNG_SCALE_REASON_SR_ABOVE_NO_DECREASE_THRESHOLD,
+    RS_MNG_SCALE_REASON_LOWER_RATE_TPT_UNKOWN_OR_BETTER,
+    RS_MNG_SCALE_REASON_DEFAULT,
+  };
 
-    currTpt = currWin->averageTpt;
-    lowTpt = ((lowerRateIdx == RS_MNG_INVALID_RATE_IDX) ? RS_MNG_INVALID_VAL
-                                                        : tblInfo->win[lowerRateIdx].averageTpt);
-    highTpt = ((higherRateIdx == RS_MNG_INVALID_RATE_IDX) ? RS_MNG_INVALID_VAL
-                                                          : tblInfo->win[higherRateIdx].averageTpt);
+  currTpt = currWin->averageTpt;
+  lowTpt = ((lowerRateIdx == RS_MNG_INVALID_RATE_IDX) ? RS_MNG_INVALID_VAL
+                                                      : tblInfo->win[lowerRateIdx].averageTpt);
+  highTpt = ((higherRateIdx == RS_MNG_INVALID_RATE_IDX) ? RS_MNG_INVALID_VAL
+                                                        : tblInfo->win[higherRateIdx].averageTpt);
 
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngGetScaleAction:\ncurrTpt: %d, successRatio: %d,\nlowerRateIdx: %d, lowTpt: "
+             "%d,\nhigherRateIdx: %d, highTpt: %d",
+             currTpt, currWin->successRatio, lowerRateIdx, lowTpt, higherRateIdx, highTpt);
+
+  // current Success ratio is insufficient or Tpt for the current window is 0 => downscale
+  if ((currWin->successRatio <= RS_MNG_PERCENT(RS_MNG_SR_FORCE_DECREASE)) || (0 == currTpt)) {
     DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngGetScaleAction:\ncurrTpt: %d, successRatio: %d,\nlowerRateIdx: %d, lowTpt: "
-               "%d,\nhigherRateIdx: %d, highTpt: %d",
-               currTpt, currWin->successRatio, lowerRateIdx, lowTpt, higherRateIdx, highTpt);
+               "_rsMngGetScaleAction: DOWNSCALE due to insufficient success ratio or 0 tpt");
+    action = RS_MNG_ACTION_DOWNSCALE;
+    goto out;
+  }
 
-    // current Success ratio is insufficient or Tpt for the current window is 0 => downscale
-    if ((currWin->successRatio <= RS_MNG_PERCENT(RS_MNG_SR_FORCE_DECREASE)) || (0 == currTpt)) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngGetScaleAction: DOWNSCALE due to insufficient success ratio or 0 tpt");
-        action = RS_MNG_ACTION_DOWNSCALE;
-        goto out;
-    }
+  // No Tpt data about high/low rate => upscale
+  if ((RS_MNG_INVALID_VAL == lowTpt) && (RS_MNG_INVALID_VAL == highTpt) &&
+      (RS_MNG_INVALID_RATE_IDX != higherRateIdx)) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngGetScaleAction: UPSCALE due to no data about higher or lower rates");
+    action = RS_MNG_ACTION_UPSCALE;
+    goto out;
+  }
 
-    // No Tpt data about high/low rate => upscale
-    if ((RS_MNG_INVALID_VAL == lowTpt) && (RS_MNG_INVALID_VAL == highTpt) &&
-        (RS_MNG_INVALID_RATE_IDX != higherRateIdx)) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngGetScaleAction: UPSCALE due to no data about higher or lower rates");
-        action = RS_MNG_ACTION_UPSCALE;
-        goto out;
-    }
+  // if there's no Tpt data about the higerRateIdx but the Tpt for the lowerRateIdx is worse then
+  // the curr tpt => Upscale
+  if (((RS_MNG_INVALID_VAL == highTpt) && (RS_MNG_INVALID_RATE_IDX != higherRateIdx)) &&
+      ((RS_MNG_INVALID_VAL != lowTpt) && (lowTpt < currTpt))) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngGetScaleAction: UPSCALE due to no data on higher rate and lower rate has "
+               "worse tpt");
+    action = RS_MNG_ACTION_UPSCALE;
+    goto out;
+  }
 
-    // if there's no Tpt data about the higerRateIdx but the Tpt for the lowerRateIdx is worse then
-    // the curr tpt => Upscale
-    if (((RS_MNG_INVALID_VAL == highTpt) && (RS_MNG_INVALID_RATE_IDX != higherRateIdx)) &&
-        ((RS_MNG_INVALID_VAL != lowTpt) && (lowTpt < currTpt))) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngGetScaleAction: UPSCALE due to no data on higher rate and lower rate has "
-                   "worse tpt");
-        action = RS_MNG_ACTION_UPSCALE;
-        goto out;
-    }
+  // if higherRateIdx tpt > currTpt => upscale
+  if ((RS_MNG_INVALID_VAL != highTpt) && (highTpt > currTpt)) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngGetScaleAction: UPSCALE due to higher rate having higher tpt");
+    action = RS_MNG_ACTION_UPSCALE;
+    goto out;
+  }
 
-    // if higherRateIdx tpt > currTpt => upscale
-    if ((RS_MNG_INVALID_VAL != highTpt) && (highTpt > currTpt)) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngGetScaleAction: UPSCALE due to higher rate having higher tpt");
-        action = RS_MNG_ACTION_UPSCALE;
-        goto out;
-    }
-
-    // if Tpt for the higherRateIdx and for LowerRateIdx are both worse then the current Tpt => stay
-    if (((RS_MNG_INVALID_VAL != highTpt) && (highTpt < currTpt)) &&
-        ((RS_MNG_INVALID_VAL != lowTpt) && (lowTpt < currTpt))) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngGetScaleAction: STAY due to current rate having better tpt that both "
-                   "higher and lower rates");
-        action = RS_MNG_ACTION_STAY;
-        goto out;
-    }
-
-    // If Tpt for LowerRateIdx > currTpt, or it is unknown but lowerRateIdx is valid
-    if (((RS_MNG_INVALID_VAL != lowTpt) && (lowTpt > currTpt)) ||
-        ((RS_MNG_INVALID_VAL == lowTpt) && (RS_MNG_INVALID_RATE_IDX != lowerRateIdx))) {
-        U32 lowerRateExpectedTpt = _rsMngGetExpectedTpt(
-            staInfo, &rsMngColumns[tblInfo->column], _rsMngRateGetBw(&tblInfo->rsMngRate),
-            !!(staInfo->mvmsta->agg_tids), lowerRateIdx);
-
-        // if CurrWin success ratio reached the no decrease TH, or currTpt is higher then expected
-        // Tpt => stay
-        if ((RS_MNG_INVALID_RATE_IDX != lowerRateIdx) &&
-            ((currWin->successRatio >= RS_MNG_PERCENT(RS_MNG_SR_NO_DECREASE)) ||
-             (currTpt > lowerRateExpectedTpt))) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngGetScaleAction: STAY due to lower rate having expected tpt lower "
-                       "than current tpt or current success ratio is above the 'no-decrease' "
-                       "threshold. lower rate expected tpt: %d",
-                       lowerRateExpectedTpt);
-            action = RS_MNG_ACTION_STAY;
-            goto out;
-        }
-        // curr SR is insufficient and either lowTpt is valid and > currTpt or lowerRateIdx is valid
-        // => downscale
-        else {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngGetScaleAction: DOWNSCALE due to low success ratio or lower rate "
-                       "having higher tpt than current rate. lower rate expected tpt: %d",
-                       lowerRateExpectedTpt);
-            action = RS_MNG_ACTION_DOWNSCALE;
-            goto out;
-        }
-    }
-
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngGetScaleAction: STAY (default)");
+  // if Tpt for the higherRateIdx and for LowerRateIdx are both worse then the current Tpt => stay
+  if (((RS_MNG_INVALID_VAL != highTpt) && (highTpt < currTpt)) &&
+      ((RS_MNG_INVALID_VAL != lowTpt) && (lowTpt < currTpt))) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngGetScaleAction: STAY due to current rate having better tpt that both "
+               "higher and lower rates");
     action = RS_MNG_ACTION_STAY;
+    goto out;
+  }
+
+  // If Tpt for LowerRateIdx > currTpt, or it is unknown but lowerRateIdx is valid
+  if (((RS_MNG_INVALID_VAL != lowTpt) && (lowTpt > currTpt)) ||
+      ((RS_MNG_INVALID_VAL == lowTpt) && (RS_MNG_INVALID_RATE_IDX != lowerRateIdx))) {
+    U32 lowerRateExpectedTpt = _rsMngGetExpectedTpt(staInfo, &rsMngColumns[tblInfo->column],
+                                                    _rsMngRateGetBw(&tblInfo->rsMngRate),
+                                                    !!(staInfo->mvmsta->agg_tids), lowerRateIdx);
+
+    // if CurrWin success ratio reached the no decrease TH, or currTpt is higher then expected
+    // Tpt => stay
+    if ((RS_MNG_INVALID_RATE_IDX != lowerRateIdx) &&
+        ((currWin->successRatio >= RS_MNG_PERCENT(RS_MNG_SR_NO_DECREASE)) ||
+         (currTpt > lowerRateExpectedTpt))) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngGetScaleAction: STAY due to lower rate having expected tpt lower "
+                 "than current tpt or current success ratio is above the 'no-decrease' "
+                 "threshold. lower rate expected tpt: %d",
+                 lowerRateExpectedTpt);
+      action = RS_MNG_ACTION_STAY;
+      goto out;
+    }
+    // curr SR is insufficient and either lowTpt is valid and > currTpt or lowerRateIdx is valid
+    // => downscale
+    else {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngGetScaleAction: DOWNSCALE due to low success ratio or lower rate "
+                 "having higher tpt than current rate. lower rate expected tpt: %d",
+                 lowerRateExpectedTpt);
+      action = RS_MNG_ACTION_DOWNSCALE;
+      goto out;
+    }
+  }
+
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngGetScaleAction: STAY (default)");
+  action = RS_MNG_ACTION_STAY;
 
 out:
-    return action;
+  return action;
 }
 
 // return: TRUE if there is a better start rate, so need to send LQ command
@@ -1965,68 +2028,68 @@
 static RS_MNG_ACTION_E _rsMngSearchBetterStartRate(const RS_MNG_STA_INFO_S* staInfo,
                                                    RS_MNG_WIN_STAT_S* currWin,
                                                    const RS_MNG_RATE_S* currRate, U08* newIdx) {
-    U08 lowerSuppRateIdx;
-    U08 higherSuppRateIdx;
-    RS_MNG_ACTION_E scaleAction;
+  U08 lowerSuppRateIdx;
+  U08 higherSuppRateIdx;
+  RS_MNG_ACTION_E scaleAction;
 
-    lowerSuppRateIdx = _rsMngGetAdjacentRateIdx(staInfo, currRate, GET_LOWER_SUPPORTED_RATE);
-    higherSuppRateIdx = _rsMngGetAdjacentRateIdx(staInfo, currRate, GET_HIGHER_SUPPORTED_RATE);
+  lowerSuppRateIdx = _rsMngGetAdjacentRateIdx(staInfo, currRate, GET_LOWER_SUPPORTED_RATE);
+  higherSuppRateIdx = _rsMngGetAdjacentRateIdx(staInfo, currRate, GET_HIGHER_SUPPORTED_RATE);
 
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngSearchBetterStartRate: curr rate idx: %d, lower: %d, higher: %d. supported "
-               "rates mask: 0x%x",
-               _rsMngRateGetIdx(currRate), lowerSuppRateIdx, higherSuppRateIdx, supportedRatesMsk);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngSearchBetterStartRate: curr rate idx: %d, lower: %d, higher: %d. supported "
+             "rates mask: 0x%x",
+             _rsMngRateGetIdx(currRate), lowerSuppRateIdx, higherSuppRateIdx, supportedRatesMsk);
 
-    scaleAction = _rsMngGetScaleAction(staInfo, currWin, lowerSuppRateIdx, higherSuppRateIdx);
+  scaleAction = _rsMngGetScaleAction(staInfo, currWin, lowerSuppRateIdx, higherSuppRateIdx);
 
-    // FMAC TODO - add 'fix' in case we are in MIMO and BT doesn't 'allow' MIMO ? (currently a dead
-    // code in fmac)
+  // FMAC TODO - add 'fix' in case we are in MIMO and BT doesn't 'allow' MIMO ? (currently a dead
+  // code in fmac)
 
-    // Set the new rate index + tx power reduction, if needed
-    switch (scaleAction) {
+  // Set the new rate index + tx power reduction, if needed
+  switch (scaleAction) {
     case RS_MNG_ACTION_STAY:
-        if (staInfo->rsMngState == RS_MNG_STATE_STAY_IN_COLUMN) {
-            // TODO - add tpc support
-            // rs_tpc_perform(sta, lq_sta, tbl);
-        }
-        break;
+      if (staInfo->rsMngState == RS_MNG_STATE_STAY_IN_COLUMN) {
+        // TODO - add tpc support
+        // rs_tpc_perform(sta, lq_sta, tbl);
+      }
+      break;
     case RS_MNG_ACTION_DOWNSCALE:
-        if (RS_MNG_INVALID_RATE_IDX != lowerSuppRateIdx) {
-            // TODO - add tpc
-            *newIdx = lowerSuppRateIdx;
-        }
-        // else - already at the lowest possible rate -> can't downscale
-        else {
-            scaleAction = RS_MNG_ACTION_STAY;
-        }
-        break;
+      if (RS_MNG_INVALID_RATE_IDX != lowerSuppRateIdx) {
+        // TODO - add tpc
+        *newIdx = lowerSuppRateIdx;
+      }
+      // else - already at the lowest possible rate -> can't downscale
+      else {
+        scaleAction = RS_MNG_ACTION_STAY;
+      }
+      break;
     case RS_MNG_ACTION_UPSCALE:
-        if (RS_MNG_INVALID_RATE_IDX != higherSuppRateIdx) {
-            // TODO - add tpc
-            *newIdx = higherSuppRateIdx;
-        }
-        // else - already at the highest possible rate -> can't upscale
-        else {
-            scaleAction = RS_MNG_ACTION_STAY;
-        }
-        break;
+      if (RS_MNG_INVALID_RATE_IDX != higherSuppRateIdx) {
+        // TODO - add tpc
+        *newIdx = higherSuppRateIdx;
+      }
+      // else - already at the highest possible rate -> can't upscale
+      else {
+        scaleAction = RS_MNG_ACTION_STAY;
+      }
+      break;
     default:
-        // TODO add assert?
-        break;
-    }
+      // TODO add assert?
+      break;
+  }
 
-    return scaleAction;
+  return scaleAction;
 }
 
 static U08 _rsMngGetLowestSupportedRate(const RS_MNG_STA_INFO_S* staInfo,
                                         RS_MNG_MODULATION_E modulation, U32 bw,
                                         U16 supportedRates) {
-    if (modulation != RS_MNG_MODUL_LEGACY && staInfo->config.bestSuppMode == TLC_MNG_MODE_HE &&
-        bw == CHANNEL_WIDTH20 && _rsMngIsDcmSupported(staInfo, modulation == RS_MNG_MODUL_MIMO2)) {
-        return RS_MCS_0_HE_ER_AND_DCM;
-    }
+  if (modulation != RS_MNG_MODUL_LEGACY && staInfo->config.bestSuppMode == TLC_MNG_MODE_HE &&
+      bw == CHANNEL_WIDTH20 && _rsMngIsDcmSupported(staInfo, modulation == RS_MNG_MODUL_MIMO2)) {
+    return RS_MCS_0_HE_ER_AND_DCM;
+  }
 
-    return (U08)LSB2ORD(supportedRates);
+  return (U08)LSB2ORD(supportedRates);
 }
 //
 // Returns the index of the lowest rate in the given column with expected tpt higher
@@ -2034,40 +2097,41 @@
 //
 static U08 _rsMngGetBestRate(const RS_MNG_STA_INFO_S* staInfo, const RS_MNG_TBL_INFO_S* activeTbl,
                              const RS_MNG_COL_ELEM_S* col, U32 bw, U32 targetTpt) {
-    U16 supportedRates = _rsMngGetSupportedRatesByModeAndBw(staInfo, col->mode, bw);
-    U08 rateIdx = _rsMngGetLowestSupportedRate(staInfo, col->mode, bw, supportedRates);
-    U32 expectedTpt = 0;
+  U16 supportedRates = _rsMngGetSupportedRatesByModeAndBw(staInfo, col->mode, bw);
+  U08 rateIdx = _rsMngGetLowestSupportedRate(staInfo, col->mode, bw, supportedRates);
+  U32 expectedTpt = 0;
 
-    while (rateIdx != RS_MNG_INVALID_RATE_IDX) {
-        expectedTpt = _rsMngGetExpectedTpt(staInfo, col, bw, !!staInfo->mvmsta->agg_tids, rateIdx);
+  while (rateIdx != RS_MNG_INVALID_RATE_IDX) {
+    expectedTpt = _rsMngGetExpectedTpt(staInfo, col, bw, !!staInfo->mvmsta->agg_tids, rateIdx);
 
-        if (targetTpt <= expectedTpt) { break; }
-
-        rateIdx = _rsMngGetHigherRateIdx(rateIdx, supportedRates);
+    if (targetTpt <= expectedTpt) {
+      break;
     }
 
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngGetBestRate: best rateIdx %d. targetTpt: %d, new expected tpt: %d", rateIdx,
-               targetTpt, expectedTpt);
+    rateIdx = _rsMngGetHigherRateIdx(rateIdx, supportedRates);
+  }
 
-    return rateIdx;
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngGetBestRate: best rateIdx %d. targetTpt: %d, new expected tpt: %d", rateIdx,
+             targetTpt, expectedTpt);
+
+  return rateIdx;
 }
 
 static BOOLEAN _rsMngIsColAllowed(const RS_MNG_STA_INFO_S* staInfo, U32 bw,
                                   const RS_MNG_COL_ELEM_S* nextCol) {
-    int i;
+  int i;
 
-    for (i = 0; i < MAX_COLUMN_CHECKS; i++) {
-        ALLOW_COL_FUNC_F allowColFunc = nextCol->checks[i];
+  for (i = 0; i < MAX_COLUMN_CHECKS; i++) {
+    ALLOW_COL_FUNC_F allowColFunc = nextCol->checks[i];
 
-        if (allowColFunc && !allowColFunc(staInfo, bw, nextCol)) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngIsColAllowed: Function[%d] check failed",
-                       i);
-            return FALSE;
-        }
+    if (allowColFunc && !allowColFunc(staInfo, bw, nextCol)) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngIsColAllowed: Function[%d] check failed", i);
+      return FALSE;
     }
+  }
 
-    return TRUE;
+  return TRUE;
 }
 
 // rs_get_next_column
@@ -2093,819 +2157,823 @@
 static RS_MNG_COLUMN_DESC_E _rsMngGetNextColId(const RS_MNG_STA_INFO_S* staInfo,
                                                const RS_MNG_TBL_INFO_S* tblInfo, U32 targetTpt,
                                                U32* bw, U08* rateIdx) {
-    const RS_MNG_COL_ELEM_S* currCol = &rsMngColumns[tblInfo->column];
-    const RS_MNG_COL_ELEM_S* nextCol;
-    RS_MNG_COLUMN_DESC_E nextColId = RS_MNG_COL_INVALID;  // for compilation. won't be used
-    int i;
+  const RS_MNG_COL_ELEM_S* currCol = &rsMngColumns[tblInfo->column];
+  const RS_MNG_COL_ELEM_S* nextCol;
+  RS_MNG_COLUMN_DESC_E nextColId = RS_MNG_COL_INVALID;  // for compilation. won't be used
+  int i;
 
-    // Check that the defines' value allow to assume that we can add NON_SHARED_ANT_RFIC_ID to
-    // RS_MNG_COL_SISO_ANT_A to get the shared antenna
-    BUILD_BUG_ON(!(RS_MNG_COL_SISO_ANT_A + 1 == RS_MNG_COL_SISO_ANT_B));
-    BUILD_BUG_ON(!(RS_MNG_COL_SISO_ANT_A_SGI + 1 == RS_MNG_COL_SISO_ANT_B_SGI));
-    BUILD_BUG_ON(!(RS_MNG_COL_HE_3_2_SISO_ANT_A + 1 == RS_MNG_COL_HE_3_2_SISO_ANT_B));
-    BUILD_BUG_ON(!(RS_MNG_COL_HE_1_6_SISO_ANT_A + 1 == RS_MNG_COL_HE_1_6_SISO_ANT_B));
-    BUILD_BUG_ON(!(RS_MNG_COL_HE_0_8_SISO_ANT_A + 1 == RS_MNG_COL_HE_0_8_SISO_ANT_B));
+  // Check that the defines' value allow to assume that we can add NON_SHARED_ANT_RFIC_ID to
+  // RS_MNG_COL_SISO_ANT_A to get the shared antenna
+  BUILD_BUG_ON(!(RS_MNG_COL_SISO_ANT_A + 1 == RS_MNG_COL_SISO_ANT_B));
+  BUILD_BUG_ON(!(RS_MNG_COL_SISO_ANT_A_SGI + 1 == RS_MNG_COL_SISO_ANT_B_SGI));
+  BUILD_BUG_ON(!(RS_MNG_COL_HE_3_2_SISO_ANT_A + 1 == RS_MNG_COL_HE_3_2_SISO_ANT_B));
+  BUILD_BUG_ON(!(RS_MNG_COL_HE_1_6_SISO_ANT_A + 1 == RS_MNG_COL_HE_1_6_SISO_ANT_B));
+  BUILD_BUG_ON(!(RS_MNG_COL_HE_0_8_SISO_ANT_A + 1 == RS_MNG_COL_HE_0_8_SISO_ANT_B));
 
-    for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
-        nextColId = currCol->nextCols[i];
+  for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
+    nextColId = currCol->nextCols[i];
 
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngGetNextColId: Checking nextCol %d (column id %d)", i, nextColId);
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngGetNextColId: Checking nextCol %d (column id %d)",
+               i, nextColId);
 
-        if (RS_MNG_COL_INVALID == nextColId) {
-            DBG_PRINTF(
-                UT, TLC_OFFLOAD_DBG, INFO,
-                "_rsMngGetNextColId: invalid column. next columns are also invalid, so break");
-            break;  // if column is invalid, all the following ones will be invalid as well
-        }
-
-        if (0 == i && RS_MNG_MODUL_MIMO2 == currCol->mode) { nextColId += NON_SHARED_ANT_RFIC_ID; }
-
-        if (staInfo->visitedColumns & BIT(nextColId)) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngGetNextColId: This column was already visited. continue");
-            continue;
-        }
-
-        nextCol = &rsMngColumns[nextColId];
-        // when moving to a new column, if it's a non-HT column the bw must be 20. If attempting to
-        // leave non-HT, jump to the highest available width, otherwise keep the current bw (bw
-        // changes are handled separately)
-        *bw = (nextCol->mode == RS_MNG_MODUL_LEGACY
-                   ? CHANNEL_WIDTH20
-                   : (currCol->mode == RS_MNG_MODUL_LEGACY ? _rsMngGetMaxChWidth(staInfo)
-                                                           : _rsMngRateGetBw(&tblInfo->rsMngRate)));
-
-        if (!_rsMngIsColAllowed(staInfo, *bw, nextCol)) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngGetNextColId: column not allowed. continue");
-            continue;
-        }
-
-        if ((*rateIdx = _rsMngGetBestRate(staInfo, tblInfo, nextCol, *bw, targetTpt)) ==
-            RS_MNG_INVALID_RATE_IDX) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngGetNextColId: currTpt >= maxTpt of potential column. continue");
-            continue;
-        }
-
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngGetNextColId: Found potential column %d, with chosen bw %d and potential "
-                   "rateIdx %d",
-                   nextColId, *bw, *rateIdx);
-
-        // Found potential column.
-        break;
+    if (RS_MNG_COL_INVALID == nextColId) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngGetNextColId: invalid column. next columns are also invalid, so break");
+      break;  // if column is invalid, all the following ones will be invalid as well
     }
 
-    if (MAX_NEXT_COLUMNS == i) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngGetNextColId: Couldn't find a better column, staying in this one");
-        return RS_MNG_COL_INVALID;
+    if (0 == i && RS_MNG_MODUL_MIMO2 == currCol->mode) {
+      nextColId += NON_SHARED_ANT_RFIC_ID;
     }
 
-    return nextColId;
+    if (staInfo->visitedColumns & BIT(nextColId)) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngGetNextColId: This column was already visited. continue");
+      continue;
+    }
+
+    nextCol = &rsMngColumns[nextColId];
+    // when moving to a new column, if it's a non-HT column the bw must be 20. If attempting to
+    // leave non-HT, jump to the highest available width, otherwise keep the current bw (bw
+    // changes are handled separately)
+    *bw = (nextCol->mode == RS_MNG_MODUL_LEGACY
+               ? CHANNEL_WIDTH20
+               : (currCol->mode == RS_MNG_MODUL_LEGACY ? _rsMngGetMaxChWidth(staInfo)
+                                                       : _rsMngRateGetBw(&tblInfo->rsMngRate)));
+
+    if (!_rsMngIsColAllowed(staInfo, *bw, nextCol)) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngGetNextColId: column not allowed. continue");
+      continue;
+    }
+
+    if ((*rateIdx = _rsMngGetBestRate(staInfo, tblInfo, nextCol, *bw, targetTpt)) ==
+        RS_MNG_INVALID_RATE_IDX) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngGetNextColId: currTpt >= maxTpt of potential column. continue");
+      continue;
+    }
+
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngGetNextColId: Found potential column %d, with chosen bw %d and potential "
+               "rateIdx %d",
+               nextColId, *bw, *rateIdx);
+
+    // Found potential column.
+    break;
+  }
+
+  if (MAX_NEXT_COLUMNS == i) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngGetNextColId: Couldn't find a better column, staying in this one");
+    return RS_MNG_COL_INVALID;
+  }
+
+  return nextColId;
 }
 
 // rs_switch_to_column
 static void _rsMngSetUpSearchColData(RS_MNG_STA_INFO_S* staInfo, RS_MNG_COLUMN_DESC_E nextColId,
                                      U32 bw, U08 rateIdx) {
-    RS_MNG_SEARCH_COL_DATA* searchData = &staInfo->searchColData;
-    RS_MNG_RATE_S* rsMngRate = &searchData->rsMngRate;
-    const RS_MNG_COL_ELEM_S* nextCol = &rsMngColumns[nextColId];
+  RS_MNG_SEARCH_COL_DATA* searchData = &staInfo->searchColData;
+  RS_MNG_RATE_S* rsMngRate = &searchData->rsMngRate;
+  const RS_MNG_COL_ELEM_S* nextCol = &rsMngColumns[nextColId];
 
-    switch (nextCol->mode) {
+  switch (nextCol->mode) {
     case RS_MNG_MODUL_LEGACY:
-        _rsMngRateSetMode(rsMngRate, TLC_MNG_MODE_LEGACY);
-        _rsMngRateSetLdpc(rsMngRate, FALSE);
-        _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_LEGACY);
-        break;
+      _rsMngRateSetMode(rsMngRate, TLC_MNG_MODE_LEGACY);
+      _rsMngRateSetLdpc(rsMngRate, FALSE);
+      _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_LEGACY);
+      break;
     case RS_MNG_MODUL_SISO:
-        _rsMngRateSetMode(rsMngRate, staInfo->config.bestSuppMode);
-        _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_SISO);
-        break;
+      _rsMngRateSetMode(rsMngRate, staInfo->config.bestSuppMode);
+      _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_SISO);
+      break;
     case RS_MNG_MODUL_MIMO2:
-        _rsMngRateSetMode(rsMngRate, staInfo->config.bestSuppMode);
-        _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_MIMO2);
-        break;
+      _rsMngRateSetMode(rsMngRate, staInfo->config.bestSuppMode);
+      _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_MIMO2);
+      break;
     default:
-        WARN_ON(1);
-    }
+      WARN_ON(1);
+  }
 
-    if (nextCol->mode != RS_MNG_MODUL_LEGACY) {
-        _rsMngRateSetLdpc(rsMngRate, _rsMngIsLdpcAllowed(staInfo));
-    }
+  if (nextCol->mode != RS_MNG_MODUL_LEGACY) {
+    _rsMngRateSetLdpc(rsMngRate, _rsMngIsLdpcAllowed(staInfo));
+  }
 
-    _rsMngRateSetBw(rsMngRate, bw);
-    // Set the search rate according to the new column and station info
-    _rsMngRateSetGi(rsMngRate, nextCol->gi);
-    if (nextCol->mode == RS_MNG_MODUL_SISO && _rsMngIsStbcAllowed(staInfo, rsMngRate)) {
-        _rsMngRateSetStbc(rsMngRate, TRUE);
-        _rsMngRateSetAnt(rsMngRate, TLC_MNG_CHAIN_A_MSK | TLC_MNG_CHAIN_B_MSK);
-    } else {
-        _rsMngRateSetStbc(rsMngRate, FALSE);
-        _rsMngRateSetAnt(rsMngRate, nextCol->ant);
-    }
-    _rsMngRateSetIdx(rsMngRate, rateIdx);
+  _rsMngRateSetBw(rsMngRate, bw);
+  // Set the search rate according to the new column and station info
+  _rsMngRateSetGi(rsMngRate, nextCol->gi);
+  if (nextCol->mode == RS_MNG_MODUL_SISO && _rsMngIsStbcAllowed(staInfo, rsMngRate)) {
+    _rsMngRateSetStbc(rsMngRate, TRUE);
+    _rsMngRateSetAnt(rsMngRate, TLC_MNG_CHAIN_A_MSK | TLC_MNG_CHAIN_B_MSK);
+  } else {
+    _rsMngRateSetStbc(rsMngRate, FALSE);
+    _rsMngRateSetAnt(rsMngRate, nextCol->ant);
+  }
+  _rsMngRateSetIdx(rsMngRate, rateIdx);
 
-    _memclr(&searchData->win, sizeof(searchData->win));
-    searchData->column = nextColId;
-    searchData->expectedTpt = _rsMngGetExpectedTpt(staInfo, &rsMngColumns[nextColId], bw,
-                                                   !!staInfo->mvmsta->agg_tids, rateIdx);
-    _rsMngSetVisitedColumn(staInfo, nextColId);
+  _memclr(&searchData->win, sizeof(searchData->win));
+  searchData->column = nextColId;
+  searchData->expectedTpt = _rsMngGetExpectedTpt(staInfo, &rsMngColumns[nextColId], bw,
+                                                 !!staInfo->mvmsta->agg_tids, rateIdx);
+  _rsMngSetVisitedColumn(staInfo, nextColId);
 
-    DBG_PRINTF(
-        UT, TLC_OFFLOAD_DBG, INFO,
-        "_rsMngSetUpSearchColData: starting new col at rate index %d (visited columns: 0x%X)",
-        _rsMngRateGetIdx(rsMngRate), staInfo->visitedColumns);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngSetUpSearchColData: starting new col at rate index %d (visited columns: 0x%X)",
+             _rsMngRateGetIdx(rsMngRate), staInfo->visitedColumns);
 }
 
 static RS_MNG_COLUMN_DESC_E _rsMngGetMatchingMimoColumn(RS_MNG_COLUMN_DESC_E col) {
-    switch (col) {
+  switch (col) {
     case RS_MNG_COL_SISO_ANT_A:
     case RS_MNG_COL_SISO_ANT_B:
-        return RS_MNG_COL_MIMO2;
+      return RS_MNG_COL_MIMO2;
     case RS_MNG_COL_SISO_ANT_A_SGI:
     case RS_MNG_COL_SISO_ANT_B_SGI:
-        return RS_MNG_COL_MIMO2_SGI;
+      return RS_MNG_COL_MIMO2_SGI;
     case RS_MNG_COL_HE_3_2_SISO_ANT_A:
     case RS_MNG_COL_HE_3_2_SISO_ANT_B:
-        return RS_MNG_COL_HE_3_2_MIMO;
+      return RS_MNG_COL_HE_3_2_MIMO;
     case RS_MNG_COL_HE_1_6_SISO_ANT_A:
     case RS_MNG_COL_HE_1_6_SISO_ANT_B:
-        return RS_MNG_COL_HE_1_6_MIMO;
+      return RS_MNG_COL_HE_1_6_MIMO;
     case RS_MNG_COL_HE_0_8_SISO_ANT_A:
     case RS_MNG_COL_HE_0_8_SISO_ANT_B:
-        return RS_MNG_COL_HE_0_8_MIMO;
+      return RS_MNG_COL_HE_0_8_MIMO;
     default:
-        return RS_MNG_COL_INVALID;
-    }
+      return RS_MNG_COL_INVALID;
+  }
 }
 
 static BOOLEAN _rsMngSearchBetterCol(RS_MNG_STA_INFO_S* staInfo, const RS_MNG_TBL_INFO_S* tblInfo,
                                      U32 currTpt) {
-    BOOLEAN ret = FALSE;
-    RS_MNG_COLUMN_DESC_E nextColId;
-    RS_MNG_COLUMN_DESC_E mimoCol;
-    U08 currRateIdx = _rsMngRateGetIdx(&tblInfo->rsMngRate);
-    U32 successRatio = tblInfo->win[currRateIdx].successRatio;
-    // set target_tpt:
-    // - if the current success ratio >= 85% -> keep the expected_tpt for this idx
-    // - if the success ratio is too low     -> revert to last_tpt (current ?)
-    U32 targetTpt = (successRatio <= RS_MNG_PERCENT(RS_MNG_SR_NO_DECREASE))
-                        ? currTpt
-                        : _rsMngGetExpectedTpt(staInfo, &rsMngColumns[tblInfo->column],
-                                               _rsMngRateGetBw(&tblInfo->rsMngRate),
-                                               !!(staInfo->mvmsta->agg_tids), currRateIdx);
-    U32 bw;
-    U08 rateIdx;
+  BOOLEAN ret = FALSE;
+  RS_MNG_COLUMN_DESC_E nextColId;
+  RS_MNG_COLUMN_DESC_E mimoCol;
+  U08 currRateIdx = _rsMngRateGetIdx(&tblInfo->rsMngRate);
+  U32 successRatio = tblInfo->win[currRateIdx].successRatio;
+  // set target_tpt:
+  // - if the current success ratio >= 85% -> keep the expected_tpt for this idx
+  // - if the success ratio is too low     -> revert to last_tpt (current ?)
+  U32 targetTpt = (successRatio <= RS_MNG_PERCENT(RS_MNG_SR_NO_DECREASE))
+                      ? currTpt
+                      : _rsMngGetExpectedTpt(staInfo, &rsMngColumns[tblInfo->column],
+                                             _rsMngRateGetBw(&tblInfo->rsMngRate),
+                                             !!(staInfo->mvmsta->agg_tids), currRateIdx);
+  U32 bw;
+  U08 rateIdx;
 
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngSearchBetterCol: starting search. currTpt=%d targetTpt=%d", currTpt, targetTpt);
+
+  // return column with expected better throughput, or invalid column if such doesn't exist
+  nextColId = _rsMngGetNextColId(staInfo, tblInfo, targetTpt, &bw, &rateIdx);
+  if (RS_MNG_COL_INVALID != nextColId) {
+    _rsMngSetUpSearchColData(staInfo, nextColId, bw, rateIdx);
+    ret = TRUE;
+  } else if (staInfo->searchBw != MAX_CHANNEL_BW_INDX) {
     DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngSearchBetterCol: starting search. currTpt=%d targetTpt=%d", currTpt,
-               targetTpt);
+               "_rsMngSearchBetterCol: checking if bw change could help. new bw %d",
+               staInfo->searchBw);
 
-    // return column with expected better throughput, or invalid column if such doesn't exist
-    nextColId = _rsMngGetNextColId(staInfo, tblInfo, targetTpt, &bw, &rateIdx);
-    if (RS_MNG_COL_INVALID != nextColId) {
-        _rsMngSetUpSearchColData(staInfo, nextColId, bw, rateIdx);
-        ret = TRUE;
-    } else if (staInfo->searchBw != MAX_CHANNEL_BW_INDX) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngSearchBetterCol: checking if bw change could help. new bw %d",
-                   staInfo->searchBw);
-
-        if ((rateIdx = _rsMngGetBestRate(staInfo, tblInfo, &rsMngColumns[staInfo->stableColumn],
-                                         staInfo->searchBw, targetTpt)) !=
-            RS_MNG_INVALID_RATE_IDX) {
-            nextColId = staInfo->stableColumn;
-        } else if ((mimoCol = _rsMngGetMatchingMimoColumn(staInfo->stableColumn)) !=
-                       RS_MNG_COL_INVALID &&
-                   (rateIdx = _rsMngGetBestRate(staInfo, tblInfo, &rsMngColumns[mimoCol],
-                                                staInfo->searchBw, targetTpt)) !=
-                       RS_MNG_INVALID_RATE_IDX) {
-            nextColId = mimoCol;
-        }
-
-        if (nextColId != RS_MNG_COL_INVALID &&
-            _rsMngIsColAllowed(staInfo, staInfo->searchBw, &rsMngColumns[nextColId])) {
-            _rsMngSetUpSearchColData(staInfo, nextColId, staInfo->searchBw, rateIdx);
-            ret = TRUE;
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngSearchBetterCol: trying col %d with the new bw", nextColId);
-        } else {
-            DBG_PRINTF(
-                UT, TLC_OFFLOAD_DBG, INFO,
-                "_rsMngSearchBetterCol: New bw can't improve tpt or isn't allowed, so not trying "
-                "it. col %d, allowed: %d",
-                nextColId,
-                nextColId == RS_MNG_COL_INVALID
-                    ? 0
-                    : _rsMngIsColAllowed(staInfo, staInfo->searchBw, &rsMngColumns[nextColId]));
-        }
+    if ((rateIdx = _rsMngGetBestRate(staInfo, tblInfo, &rsMngColumns[staInfo->stableColumn],
+                                     staInfo->searchBw, targetTpt)) != RS_MNG_INVALID_RATE_IDX) {
+      nextColId = staInfo->stableColumn;
+    } else if ((mimoCol = _rsMngGetMatchingMimoColumn(staInfo->stableColumn)) !=
+                   RS_MNG_COL_INVALID &&
+               (rateIdx = _rsMngGetBestRate(staInfo, tblInfo, &rsMngColumns[mimoCol],
+                                            staInfo->searchBw, targetTpt)) !=
+                   RS_MNG_INVALID_RATE_IDX) {
+      nextColId = mimoCol;
     }
 
-    return ret;
+    if (nextColId != RS_MNG_COL_INVALID &&
+        _rsMngIsColAllowed(staInfo, staInfo->searchBw, &rsMngColumns[nextColId])) {
+      _rsMngSetUpSearchColData(staInfo, nextColId, staInfo->searchBw, rateIdx);
+      ret = TRUE;
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngSearchBetterCol: trying col %d with the new bw",
+                 nextColId);
+    } else {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngSearchBetterCol: New bw can't improve tpt or isn't allowed, so not trying "
+                 "it. col %d, allowed: %d",
+                 nextColId,
+                 nextColId == RS_MNG_COL_INVALID
+                     ? 0
+                     : _rsMngIsColAllowed(staInfo, staInfo->searchBw, &rsMngColumns[nextColId]));
+    }
+  }
+
+  return ret;
 }
 
 static BOOLEAN _rsMngShouldStartUpscaleSearchCycle(const RS_MNG_STA_INFO_S* staInfo,
                                                    const RS_MNG_STA_LIMITS_S* staLimits,
                                                    unsigned long timeLastSearch) {
-    // isUpscaleSearchCycle here is referring to the type of the previous search cycle.
-    // This is here to prevent two consecutive upscale search cycles (i.e. started because of
-    // passing the successFramesLimit threshold) within too short a time.
-    return staInfo->totalFramesSuccess > staLimits->successFramesLimit &&
-           (!staInfo->isUpscaleSearchCycle ||
-            time_after(jiffies,
-                       timeLastSearch + usecs_to_jiffies(RS_MNG_UPSCALE_SEARCH_CYCLE_MAX_FREQ)));
+  // isUpscaleSearchCycle here is referring to the type of the previous search cycle.
+  // This is here to prevent two consecutive upscale search cycles (i.e. started because of
+  // passing the successFramesLimit threshold) within too short a time.
+  return staInfo->totalFramesSuccess > staLimits->successFramesLimit &&
+         (!staInfo->isUpscaleSearchCycle ||
+          time_after(jiffies,
+                     timeLastSearch + usecs_to_jiffies(RS_MNG_UPSCALE_SEARCH_CYCLE_MAX_FREQ)));
 }
 
 static BOOLEAN _rsMngShouldStartDownscaleSearchCycle(const RS_MNG_STA_INFO_S* staInfo,
                                                      const RS_MNG_STA_LIMITS_S* staLimits,
                                                      unsigned long timeLastSearch) {
-    return time_after(jiffies, timeLastSearch + usecs_to_jiffies(staLimits->statsFlushTimeLimit)) ||
-           staInfo->totalFramesFailed > staLimits->failedFramesLimit;
+  return time_after(jiffies, timeLastSearch + usecs_to_jiffies(staLimits->statsFlushTimeLimit)) ||
+         staInfo->totalFramesFailed > staLimits->failedFramesLimit;
 }
 
 static BOOLEAN _rsMngShouldStartSearchCycle(const RS_MNG_STA_INFO_S* staInfo,
                                             BOOLEAN* isUpscaleSearchCycle) {
-    const RS_MNG_RATE_S* rsMngRate = &staInfo->rateTblInfo.rsMngRate;
-    BOOLEAN isNonHt = _rsMngRateGetMode(rsMngRate) == TLC_MNG_MODE_LEGACY;
-    const RS_MNG_STA_LIMITS_S* staLimits = &g_rsMngStaModLimits[isNonHt];
-    unsigned long timeLastSearch = staInfo->lastSearchCycleEndTimeJiffies;
+  const RS_MNG_RATE_S* rsMngRate = &staInfo->rateTblInfo.rsMngRate;
+  BOOLEAN isNonHt = _rsMngRateGetMode(rsMngRate) == TLC_MNG_MODE_LEGACY;
+  const RS_MNG_STA_LIMITS_S* staLimits = &g_rsMngStaModLimits[isNonHt];
+  unsigned long timeLastSearch = staInfo->lastSearchCycleEndTimeJiffies;
 
-    if (_rsMngShouldStartUpscaleSearchCycle(staInfo, staLimits, timeLastSearch)) {
-        *isUpscaleSearchCycle = TRUE;
-        return TRUE;
-    }
+  if (_rsMngShouldStartUpscaleSearchCycle(staInfo, staLimits, timeLastSearch)) {
+    *isUpscaleSearchCycle = TRUE;
+    return TRUE;
+  }
 
-    if (_rsMngShouldStartDownscaleSearchCycle(staInfo, staLimits, timeLastSearch)) {
-        *isUpscaleSearchCycle = FALSE;
-        return TRUE;
-    }
+  if (_rsMngShouldStartDownscaleSearchCycle(staInfo, staLimits, timeLastSearch)) {
+    *isUpscaleSearchCycle = FALSE;
+    return TRUE;
+  }
 
-    return FALSE;
+  return FALSE;
 }
 
 static void _rsMngPrepareForBwChangeAttempt(RS_MNG_STA_INFO_S* staInfo,
                                             const RS_MNG_RATE_S* rsMngRate) {
-    BOOLEAN isNonHt = _rsMngRateGetMode(rsMngRate) == TLC_MNG_MODE_LEGACY;
-    RS_MCS_E mcs;
-    U32 bw;
-    U32 isMimo;
+  BOOLEAN isNonHt = _rsMngRateGetMode(rsMngRate) == TLC_MNG_MODE_LEGACY;
+  RS_MCS_E mcs;
+  U32 bw;
+  U32 isMimo;
 
-    staInfo->searchBw = MAX_CHANNEL_BW_INDX;
-    if (isNonHt) { return; }
+  staInfo->searchBw = MAX_CHANNEL_BW_INDX;
+  if (isNonHt) {
+    return;
+  }
 
-    mcs = _rsMngRateGetIdx(rsMngRate);
-    bw = _rsMngRateGetBw(rsMngRate);
-    isMimo = _rsMngRateGetModulation(rsMngRate) == RS_MNG_MODUL_MIMO2;
+  mcs = _rsMngRateGetIdx(rsMngRate);
+  bw = _rsMngRateGetBw(rsMngRate);
+  isMimo = _rsMngRateGetModulation(rsMngRate) == RS_MNG_MODUL_MIMO2;
 
-    if (mcs > g_rsMngDynBwStayMcs[bw][isMimo].highestStayMcs && _rsMngGetMaxChWidth(staInfo) > bw) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngPrepareForBwChangeAttempt: will try higher bandwidth (mcs %d, isMimo %d, "
-                   "bw %d->%d)",
-                   mcs, isMimo, bw, bw + 1);
-        staInfo->searchBw = bw + 1;
-    } else if (mcs < g_rsMngDynBwStayMcs[bw][isMimo].lowestStayMcs) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngPrepareForBwChangeAttempt: will try lower bandwidth (mcs %d, isMimo %d, "
-                   "bw %d->%d)",
-                   mcs, isMimo, bw, bw - 1);
-        staInfo->searchBw = bw - 1;
-    }
+  if (mcs > g_rsMngDynBwStayMcs[bw][isMimo].highestStayMcs && _rsMngGetMaxChWidth(staInfo) > bw) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngPrepareForBwChangeAttempt: will try higher bandwidth (mcs %d, isMimo %d, "
+               "bw %d->%d)",
+               mcs, isMimo, bw, bw + 1);
+    staInfo->searchBw = bw + 1;
+  } else if (mcs < g_rsMngDynBwStayMcs[bw][isMimo].lowestStayMcs) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngPrepareForBwChangeAttempt: will try lower bandwidth (mcs %d, isMimo %d, "
+               "bw %d->%d)",
+               mcs, isMimo, bw, bw - 1);
+    staInfo->searchBw = bw - 1;
+  }
 }
 
 static void _rsMngSetStayInCol(RS_MNG_STA_INFO_S* staInfo) {
-    staInfo->rsMngState = RS_MNG_STATE_STAY_IN_COLUMN;
-    staInfo->stableColumn = staInfo->rateTblInfo.column;
+  staInfo->rsMngState = RS_MNG_STATE_STAY_IN_COLUMN;
+  staInfo->stableColumn = staInfo->rateTblInfo.column;
 
-    staInfo->totalFramesFailed = 0;
-    staInfo->totalFramesSuccess = 0;
-    staInfo->lastSearchCycleEndTimeJiffies = jiffies;
-    staInfo->txedFrames = 0;
-    staInfo->visitedColumns = 0;
+  staInfo->totalFramesFailed = 0;
+  staInfo->totalFramesSuccess = 0;
+  staInfo->lastSearchCycleEndTimeJiffies = jiffies;
+  staInfo->txedFrames = 0;
+  staInfo->visitedColumns = 0;
 }
 
 static BOOLEAN _rsMngTryColumnSwitch(RS_MNG_STA_INFO_S* staInfo, U32 currTpt, BOOLEAN* updateHost) {
-    const RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
+  const RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
 
-    if (_rsMngSearchBetterCol(staInfo, tblInfo, currTpt)) {
-        staInfo->searchBetterTbl = TRUE;
-        *updateHost = FALSE;
+  if (_rsMngSearchBetterCol(staInfo, tblInfo, currTpt)) {
+    staInfo->searchBetterTbl = TRUE;
+    *updateHost = FALSE;
 
-        return TRUE;
-    } else {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngTryColumnSwitch: No potential column found, change state to "
-                   "RS_MNG_STATE_STAY_IN_COLUMN");
-        _rsMngSetStayInCol(staInfo);
-        *updateHost = TRUE;
+    return TRUE;
+  } else {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngTryColumnSwitch: No potential column found, change state to "
+               "RS_MNG_STATE_STAY_IN_COLUMN");
+    _rsMngSetStayInCol(staInfo);
+    *updateHost = TRUE;
 
-        return FALSE;
-    }
+    return FALSE;
+  }
 }
 
 static BOOLEAN _rsMngStartSearchCycle(RS_MNG_STA_INFO_S* staInfo, U32 currTpt,
                                       BOOLEAN* updateHost) {
-    RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
-    RS_MNG_RATE_S* rsMngRate = &staInfo->rateTblInfo.rsMngRate;
+  RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
+  RS_MNG_RATE_S* rsMngRate = &staInfo->rateTblInfo.rsMngRate;
 
-    staInfo->rsMngState = RS_MNG_STATE_SEARCH_CYCLE_STARTED;
-    _rsMngSetVisitedColumn(staInfo, tblInfo->column);
+  staInfo->rsMngState = RS_MNG_STATE_SEARCH_CYCLE_STARTED;
+  _rsMngSetVisitedColumn(staInfo, tblInfo->column);
 
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngStartSearchCycle: Moving to state SEARCH_CYCLE_STARTED, upscale search cycle "
-               "%d, visited cols bitmask: 0x%X, curr rate: 0x%X",
-               staInfo->isUpscaleSearchCycle, staInfo->visitedColumns,
-               rsMngRate->rate.rate_n_flags);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngStartSearchCycle: Moving to state SEARCH_CYCLE_STARTED, upscale search cycle "
+             "%d, visited cols bitmask: 0x%X, curr rate: 0x%X",
+             staInfo->isUpscaleSearchCycle, staInfo->visitedColumns, rsMngRate->rate.rate_n_flags);
 
-    // If we're in HT/VHT/HE, we may want to test a different bandwidth during the search cycle.
-    // According to requirements, this is decided based on the reason the search cycle started
-    // (upscale or downscale) and the configuration and mcs when the sycle begins.
-    // The new bandwidth will be tested on the column from the start of the search cycle.
-    // Do that logic now.
-    _rsMngPrepareForBwChangeAttempt(staInfo, rsMngRate);
+  // If we're in HT/VHT/HE, we may want to test a different bandwidth during the search cycle.
+  // According to requirements, this is decided based on the reason the search cycle started
+  // (upscale or downscale) and the configuration and mcs when the sycle begins.
+  // The new bandwidth will be tested on the column from the start of the search cycle.
+  // Do that logic now.
+  _rsMngPrepareForBwChangeAttempt(staInfo, rsMngRate);
 
-    return _rsMngTryColumnSwitch(staInfo, currTpt, updateHost);
+  return _rsMngTryColumnSwitch(staInfo, currTpt, updateHost);
 }
 
 static void _rsMngSwitchToSearchCol(RS_MNG_STA_INFO_S* staInfo) {
-    RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
-    RS_MNG_SEARCH_COL_DATA* searchData = &staInfo->searchColData;
+  RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
+  RS_MNG_SEARCH_COL_DATA* searchData = &staInfo->searchColData;
 
-    tblInfo->rsMngRate = searchData->rsMngRate;
-    _rsMngClearTblWindows(staInfo);
-    tblInfo->win[_rsMngRateGetIdx(&searchData->rsMngRate)] = searchData->win;
-    tblInfo->column = searchData->column;
+  tblInfo->rsMngRate = searchData->rsMngRate;
+  _rsMngClearTblWindows(staInfo);
+  tblInfo->win[_rsMngRateGetIdx(&searchData->rsMngRate)] = searchData->win;
+  tblInfo->column = searchData->column;
 }
 
 static BOOLEAN _rsMngTryScaleWithinColumn(RS_MNG_STA_INFO_S* staInfo, RS_MNG_WIN_STAT_S* currWin,
                                           BOOLEAN* updateHost) {
-    RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
-    RS_MNG_ACTION_E action;
-    U08 newIdx = RS_MNG_INVALID_RATE_IDX;
+  RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
+  RS_MNG_ACTION_E action;
+  U08 newIdx = RS_MNG_INVALID_RATE_IDX;
 
-    action = _rsMngSearchBetterStartRate(staInfo, currWin, &tblInfo->rsMngRate, &newIdx);
-    if (action == RS_MNG_ACTION_UPSCALE) {
-        if (staInfo->rsMngState == RS_MNG_STATE_SEARCH_CYCLE_STARTED ||
-            time_after(jiffies, staInfo->lastRateUpscaleTimeJiffies +
-                                    usecs_to_jiffies(RS_MNG_UPSCALE_MAX_FREQUENCY))) {
-            // Rate upscaling could happen both during a search cycle and while in STAY_IN_COLUMN
-            // state. When in stay in column the upscaling frequency is limited to no more than once
-            // per 100ms. When in search cycle there is no limit because search cycles need to be as
-            // short as possible.
-            staInfo->lastRateUpscaleTimeJiffies = jiffies;
-        } else {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngTryScaleWithinColumn: not upscaling because not enough time passed "
-                       "since last upscale (%d usec)",
-                       systemTimeGetUsecDiffTime(staInfo->lastRateUpscaleTimeJiffies));
-            action = RS_MNG_ACTION_STAY;
-        }
+  action = _rsMngSearchBetterStartRate(staInfo, currWin, &tblInfo->rsMngRate, &newIdx);
+  if (action == RS_MNG_ACTION_UPSCALE) {
+    if (staInfo->rsMngState == RS_MNG_STATE_SEARCH_CYCLE_STARTED ||
+        time_after(jiffies, staInfo->lastRateUpscaleTimeJiffies +
+                                usecs_to_jiffies(RS_MNG_UPSCALE_MAX_FREQUENCY))) {
+      // Rate upscaling could happen both during a search cycle and while in STAY_IN_COLUMN
+      // state. When in stay in column the upscaling frequency is limited to no more than once
+      // per 100ms. When in search cycle there is no limit because search cycles need to be as
+      // short as possible.
+      staInfo->lastRateUpscaleTimeJiffies = jiffies;
+    } else {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngTryScaleWithinColumn: not upscaling because not enough time passed "
+                 "since last upscale (%d usec)",
+                 systemTimeGetUsecDiffTime(staInfo->lastRateUpscaleTimeJiffies));
+      action = RS_MNG_ACTION_STAY;
     }
+  }
 
-    staInfo->tryingRateUpscale = (U08)(action == RS_MNG_ACTION_UPSCALE);
+  staInfo->tryingRateUpscale = (U08)(action == RS_MNG_ACTION_UPSCALE);
 
-    if (action != RS_MNG_ACTION_STAY) {
-        _rsMngRateSetIdx(&tblInfo->rsMngRate, newIdx);
-        return TRUE;
-    }
+  if (action != RS_MNG_ACTION_STAY) {
+    _rsMngRateSetIdx(&tblInfo->rsMngRate, newIdx);
+    return TRUE;
+  }
 
-    return FALSE;
+  return FALSE;
 }
 
 static void _rsMngLookForNextSearchRate(RS_MNG_STA_INFO_S* staInfo, RS_MNG_WIN_STAT_S* currWin,
                                         BOOLEAN* updateHost) {
-    // During a search cycle first search for the optimal rate within the current column, and once
-    // that is found (i.e. _rsMngTryScaleWithinColumn returns FALSE), search for a better column to
-    // switch to.
-    if (!_rsMngTryScaleWithinColumn(staInfo, currWin, updateHost)) {
-        _rsMngTryColumnSwitch(staInfo, currWin->averageTpt, updateHost);
-    }
+  // During a search cycle first search for the optimal rate within the current column, and once
+  // that is found (i.e. _rsMngTryScaleWithinColumn returns FALSE), search for a better column to
+  // switch to.
+  if (!_rsMngTryScaleWithinColumn(staInfo, currWin, updateHost)) {
+    _rsMngTryColumnSwitch(staInfo, currWin->averageTpt, updateHost);
+  }
 }
 
 typedef enum _RS_MNG_TPC_ALLOWED_REASON_E {
-    RS_MNG_TPC_DISALLOWED_DEBUG_HOOK,
-    RS_MNG_TPC_DISALLOWED_SLEEP_DISALLOWED,
-    RS_MNG_TPC_DISALLOWED_IN_SEARCH_CYCLE,
-    RS_MNG_TPC_DISALLOWED_RATE_IS_NON_HT,
-    RS_MNG_TPC_DISALLOWED_RATE_IS_NOT_OPTIMAL,
-    RS_MNG_TPC_DISALLOWED_TEST_RATE,
-    RS_MNG_TPC_DISALLOWED_AMSDU_INACTIVE,
-    RS_MNG_TPC_DISALLOWED_AMSDU_TIME_LIMIT,
-    RS_MNG_TPC_ALLOWED,
+  RS_MNG_TPC_DISALLOWED_DEBUG_HOOK,
+  RS_MNG_TPC_DISALLOWED_SLEEP_DISALLOWED,
+  RS_MNG_TPC_DISALLOWED_IN_SEARCH_CYCLE,
+  RS_MNG_TPC_DISALLOWED_RATE_IS_NON_HT,
+  RS_MNG_TPC_DISALLOWED_RATE_IS_NOT_OPTIMAL,
+  RS_MNG_TPC_DISALLOWED_TEST_RATE,
+  RS_MNG_TPC_DISALLOWED_AMSDU_INACTIVE,
+  RS_MNG_TPC_DISALLOWED_AMSDU_TIME_LIMIT,
+  RS_MNG_TPC_ALLOWED,
 } RS_MNG_TPC_ALLOWED_REASON_E;
 
 static RS_MNG_TPC_ALLOWED_REASON_E _rsMngTpcAllowed(const RS_MNG_STA_INFO_S* staInfo,
                                                     U32* tpcAllowedData) {
-    const RS_MNG_RATE_S* rsMngRate = &staInfo->rateTblInfo.rsMngRate;
-    TLC_MNG_MODE_E mode;
-    RS_MNG_TX_AMSDU_SIZE_E amsduSize;
+  const RS_MNG_RATE_S* rsMngRate = &staInfo->rateTblInfo.rsMngRate;
+  TLC_MNG_MODE_E mode;
+  RS_MNG_TX_AMSDU_SIZE_E amsduSize;
 
-    if (!PWR_IS_SLEEP_ALLOWED) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngTpcAllowed: TPC disallowed because sleep disallowed");
-        return RS_MNG_TPC_DISALLOWED_SLEEP_DISALLOWED;
+  if (!PWR_IS_SLEEP_ALLOWED) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngTpcAllowed: TPC disallowed because sleep disallowed");
+    return RS_MNG_TPC_DISALLOWED_SLEEP_DISALLOWED;
+  }
+
+  if (staInfo->rsMngState == RS_MNG_STATE_SEARCH_CYCLE_STARTED) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngTpcAllowed: TPC disallowed because in search cycle");
+    return RS_MNG_TPC_DISALLOWED_IN_SEARCH_CYCLE;
+  }
+
+  mode = _rsMngRateGetMode(rsMngRate);
+  if (mode == TLC_MNG_MODE_LEGACY) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngTpcAllowed: TPC disallowed because in non-HT rate");
+    return RS_MNG_TPC_DISALLOWED_RATE_IS_NON_HT;
+  }
+
+  if (!_rsMngRateIsOptimal(staInfo, rsMngRate)) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngTpcAllowed: TPC disallowed because rate is not optimal");
+    *tpcAllowedData = rsMngRate->rate.rate_n_flags;
+    return RS_MNG_TPC_DISALLOWED_RATE_IS_NOT_OPTIMAL;
+  }
+
+  if (staInfo->tryingRateUpscale) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngTpcAllowed: TPC disallowed because current rate is test rate");
+    return RS_MNG_TPC_DISALLOWED_TEST_RATE;
+  }
+
+  amsduSize =
+      _rsMngAmsduSize(staInfo, mode, _rsMngRateGetBw(rsMngRate), _rsMngRateGetIdx(rsMngRate),
+                      _rsMngRateGetGi(rsMngRate), _rsMngRateGetModulation(rsMngRate));
+  if (staInfo->amsduSupport && amsduSize != RS_MNG_AMSDU_INVALID) {
+    // amsdu is supported in general, and specifically for this rate (i.e. the phy rate of the
+    // optimal rate is above the amsdu min phy rate threshold)
+    if (staInfo->amsduEnabledSize == RS_MNG_AMSDU_INVALID) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngTpcAllowed: TPC disallowed because amsdu is not yet active");
+      return RS_MNG_TPC_DISALLOWED_AMSDU_INACTIVE;
     }
 
-    if (staInfo->rsMngState == RS_MNG_STATE_SEARCH_CYCLE_STARTED) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngTpcAllowed: TPC disallowed because in search cycle");
-        return RS_MNG_TPC_DISALLOWED_IN_SEARCH_CYCLE;
+    if (time_before(jiffies,
+                    staInfo->lastEnableJiffies + usecs_to_jiffies(RS_MNG_TPC_AMSDU_ENABLE))) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngTpcAllowed: TPC disallowed because not enough time elapsed since "
+                 "amsdu enablement. time elapsed: %u",
+                 time);
+      return RS_MNG_TPC_DISALLOWED_AMSDU_TIME_LIMIT;
     }
+  }
 
-    mode = _rsMngRateGetMode(rsMngRate);
-    if (mode == TLC_MNG_MODE_LEGACY) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngTpcAllowed: TPC disallowed because in non-HT rate");
-        return RS_MNG_TPC_DISALLOWED_RATE_IS_NON_HT;
-    }
-
-    if (!_rsMngRateIsOptimal(staInfo, rsMngRate)) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngTpcAllowed: TPC disallowed because rate is not optimal");
-        *tpcAllowedData = rsMngRate->rate.rate_n_flags;
-        return RS_MNG_TPC_DISALLOWED_RATE_IS_NOT_OPTIMAL;
-    }
-
-    if (staInfo->tryingRateUpscale) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngTpcAllowed: TPC disallowed because current rate is test rate");
-        return RS_MNG_TPC_DISALLOWED_TEST_RATE;
-    }
-
-    amsduSize =
-        _rsMngAmsduSize(staInfo, mode, _rsMngRateGetBw(rsMngRate), _rsMngRateGetIdx(rsMngRate),
-                        _rsMngRateGetGi(rsMngRate), _rsMngRateGetModulation(rsMngRate));
-    if (staInfo->amsduSupport && amsduSize != RS_MNG_AMSDU_INVALID) {
-        // amsdu is supported in general, and specifically for this rate (i.e. the phy rate of the
-        // optimal rate is above the amsdu min phy rate threshold)
-        if (staInfo->amsduEnabledSize == RS_MNG_AMSDU_INVALID) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngTpcAllowed: TPC disallowed because amsdu is not yet active");
-            return RS_MNG_TPC_DISALLOWED_AMSDU_INACTIVE;
-        }
-
-        if (time_before(jiffies,
-                        staInfo->lastEnableJiffies + usecs_to_jiffies(RS_MNG_TPC_AMSDU_ENABLE))) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngTpcAllowed: TPC disallowed because not enough time elapsed since "
-                       "amsdu enablement. time elapsed: %u",
-                       time);
-            return RS_MNG_TPC_DISALLOWED_AMSDU_TIME_LIMIT;
-        }
-    }
-
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngTpcAllowed: TPC is allowed");
-    return RS_MNG_TPC_ALLOWED;
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngTpcAllowed: TPC is allowed");
+  return RS_MNG_TPC_ALLOWED;
 }
 
 typedef enum _RS_MNG_TPC_ACTION {
-    RS_MNG_TPC_ACTION_STAY,
-    RS_MNG_TPC_ACTION_INCREASE,
-    RS_MNG_TPC_ACTION_DECREASE,
-    RS_MNG_TPC_ACTION_DISABLE,
+  RS_MNG_TPC_ACTION_STAY,
+  RS_MNG_TPC_ACTION_INCREASE,
+  RS_MNG_TPC_ACTION_DECREASE,
+  RS_MNG_TPC_ACTION_DISABLE,
 } RS_MNG_TPC_ACTION;
 
 static RS_MNG_TPC_ACTION _rsMngTpcGetAction(const RS_MNG_STA_INFO_S* staInfo) {
-    const RS_MNG_TPC_TBL_S* tpcTbl = &staInfo->tpcTable;
-    U32 currStepSR;
-    U08 currStep = tpcTbl->currStep;
-    BOOLEAN tpcInactive = currStep == RS_MNG_TPC_INACTIVE;
+  const RS_MNG_TPC_TBL_S* tpcTbl = &staInfo->tpcTable;
+  U32 currStepSR;
+  U08 currStep = tpcTbl->currStep;
+  BOOLEAN tpcInactive = currStep == RS_MNG_TPC_INACTIVE;
 
-    if (WARN_ON(!(currStep < RS_MNG_TPC_DISABLED))) { return RS_MNG_TPC_ACTION_STAY; }
-
-    if (!tpcInactive) {
-        currStepSR = tpcTbl->windows[currStep].successRatio;
-    } else {
-        currStepSR = staInfo->rateTblInfo.win[_rsMngRateGetIdx(&staInfo->rateTblInfo.rsMngRate)]
-                         .successRatio;
-    }
-
-    if (!tpcInactive) {
-        // if testing is true, then we are now operating on results from a tpc_action_increase test
-        // window. In this case we don't want to completely disable tpc even if the current SR is
-        // relatively bad. Instead we will just decrease back to the last good step.
-        if (currStepSR <= RS_MNG_TPC_SR_DISABLE && !tpcTbl->testing) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngTpcGetAction: DISABLING tpc. currStep = %d, currStepSR = %d",
-                       currStep, currStepSR);
-            return RS_MNG_TPC_ACTION_DISABLE;
-        }
-        if (currStepSR <= RS_MNG_TPC_SR_DECREASE) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngTpcGetAction: DECREASING tpc. currStep = %d, currStepSR = %d",
-                       currStep, currStepSR);
-            return RS_MNG_TPC_ACTION_DECREASE;
-        }
-    }
-
-    if (tpcInactive || currStep < RS_MNG_TPC_NUM_STEPS - 1) {
-        U08 higherStep = (U08)(tpcInactive ? 0 : currStep + 1);
-        U32 higherStepSR = tpcTbl->windows[higherStep].successRatio;
-
-        // only attempt to increase power reduction if this hasn't been tried since the last time
-        // statistics for the higher step were cleared
-        if (currStepSR >= RS_MNG_TPC_SR_INCREASE && higherStepSR == RS_MNG_INVALID_VAL) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngTpcGetAction: INCREASING tpc. currStep = %d, currStepSR = %d",
-                       currStep, currStepSR);
-            return RS_MNG_TPC_ACTION_INCREASE;
-        }
-    }
-
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngTpcGetAction: STAY tpc. currStep = %d, currStepSR = %d", currStep,
-               currStepSR);
+  if (WARN_ON(!(currStep < RS_MNG_TPC_DISABLED))) {
     return RS_MNG_TPC_ACTION_STAY;
+  }
+
+  if (!tpcInactive) {
+    currStepSR = tpcTbl->windows[currStep].successRatio;
+  } else {
+    currStepSR =
+        staInfo->rateTblInfo.win[_rsMngRateGetIdx(&staInfo->rateTblInfo.rsMngRate)].successRatio;
+  }
+
+  if (!tpcInactive) {
+    // if testing is true, then we are now operating on results from a tpc_action_increase test
+    // window. In this case we don't want to completely disable tpc even if the current SR is
+    // relatively bad. Instead we will just decrease back to the last good step.
+    if (currStepSR <= RS_MNG_TPC_SR_DISABLE && !tpcTbl->testing) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngTpcGetAction: DISABLING tpc. currStep = %d, currStepSR = %d", currStep,
+                 currStepSR);
+      return RS_MNG_TPC_ACTION_DISABLE;
+    }
+    if (currStepSR <= RS_MNG_TPC_SR_DECREASE) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngTpcGetAction: DECREASING tpc. currStep = %d, currStepSR = %d", currStep,
+                 currStepSR);
+      return RS_MNG_TPC_ACTION_DECREASE;
+    }
+  }
+
+  if (tpcInactive || currStep < RS_MNG_TPC_NUM_STEPS - 1) {
+    U08 higherStep = (U08)(tpcInactive ? 0 : currStep + 1);
+    U32 higherStepSR = tpcTbl->windows[higherStep].successRatio;
+
+    // only attempt to increase power reduction if this hasn't been tried since the last time
+    // statistics for the higher step were cleared
+    if (currStepSR >= RS_MNG_TPC_SR_INCREASE && higherStepSR == RS_MNG_INVALID_VAL) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngTpcGetAction: INCREASING tpc. currStep = %d, currStepSR = %d", currStep,
+                 currStepSR);
+      return RS_MNG_TPC_ACTION_INCREASE;
+    }
+  }
+
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngTpcGetAction: STAY tpc. currStep = %d, currStepSR = %d", currStep, currStepSR);
+  return RS_MNG_TPC_ACTION_STAY;
 }
 
 static void _rsMngTpcDoAction(RS_MNG_STA_INFO_S* staInfo, RS_MNG_TPC_ACTION action) {
-    RS_MNG_TPC_TBL_S* tpcTbl = &staInfo->tpcTable;
+  RS_MNG_TPC_TBL_S* tpcTbl = &staInfo->tpcTable;
 
-    switch (action) {
+  switch (action) {
     case RS_MNG_TPC_ACTION_INCREASE:
-        if (tpcTbl->currStep == RS_MNG_TPC_INACTIVE) {
-            tpcTbl->currStep = 0;
-        } else {
-            tpcTbl->currStep++;
-        }
-        tpcTbl->testing = TRUE;
-        break;
+      if (tpcTbl->currStep == RS_MNG_TPC_INACTIVE) {
+        tpcTbl->currStep = 0;
+      } else {
+        tpcTbl->currStep++;
+      }
+      tpcTbl->testing = TRUE;
+      break;
     case RS_MNG_TPC_ACTION_DECREASE:
-        if (tpcTbl->currStep > 0) {
-            tpcTbl->currStep--;
-        } else {
-            tpcTbl->currStep = RS_MNG_TPC_INACTIVE;
-        }
-        tpcTbl->testing = FALSE;
-        break;
-    case RS_MNG_TPC_ACTION_DISABLE:
+      if (tpcTbl->currStep > 0) {
+        tpcTbl->currStep--;
+      } else {
         tpcTbl->currStep = RS_MNG_TPC_INACTIVE;
+      }
+      tpcTbl->testing = FALSE;
+      break;
+    case RS_MNG_TPC_ACTION_DISABLE:
+      tpcTbl->currStep = RS_MNG_TPC_INACTIVE;
     /* fall through */
     case RS_MNG_TPC_ACTION_STAY:
     /* fall through */
     default:
-        tpcTbl->testing = FALSE;
-        break;
-    }
+      tpcTbl->testing = FALSE;
+      break;
+  }
 }
 
 static void _rsMngHandleBwChange(RS_MNG_STA_INFO_S* staInfo) {
-    RS_MNG_SEARCH_COL_DATA* searchData = &staInfo->searchColData;
-    RS_MNG_RATE_S* prevRate = &staInfo->rateTblInfo.rsMngRate;
+  RS_MNG_SEARCH_COL_DATA* searchData = &staInfo->searchColData;
+  RS_MNG_RATE_S* prevRate = &staInfo->rateTblInfo.rsMngRate;
 
-    // check if this was a bandwidth search
-    if (_rsMngRateGetModulation(&searchData->rsMngRate) != RS_MNG_MODUL_LEGACY &&
-        staInfo->stableColumn >= RS_MNG_COL_FIRST_HT_VHT &&
-        _rsMngRateGetBw(&searchData->rsMngRate) != _rsMngRateGetBw(prevRate)) {
-        staInfo->searchBw = MAX_CHANNEL_BW_INDX;
+  // check if this was a bandwidth search
+  if (_rsMngRateGetModulation(&searchData->rsMngRate) != RS_MNG_MODUL_LEGACY &&
+      staInfo->stableColumn >= RS_MNG_COL_FIRST_HT_VHT &&
+      _rsMngRateGetBw(&searchData->rsMngRate) != _rsMngRateGetBw(prevRate)) {
+    staInfo->searchBw = MAX_CHANNEL_BW_INDX;
 
-        // attempting a bandwidth change is always the very last configuration change attempt in a
-        // search cycle. mark all columns as visited in order to make sure the search cycle will
-        // end.
-        staInfo->visitedColumns = (U32)-1;
-    }
+    // attempting a bandwidth change is always the very last configuration change attempt in a
+    // search cycle. mark all columns as visited in order to make sure the search cycle will
+    // end.
+    staInfo->visitedColumns = (U32)-1;
+  }
 }
 
 static BOOLEAN _rsMngHandleBtCoex(const RS_MNG_STA_INFO_S* staInfo, RS_MNG_RATE_S* rate,
                                   RS_MNG_COLUMN_DESC_E* col) {
+  /*
+   * BT-Coex only restricts use of the shared antenna if the rate is a SISO non-STBC rate. In case
+   * of MIMO (or SISO with STBC) nothing needs to be done.
+   * So check here only if the column uses only the shared antenna.
+   */
+  if (rsMngColumns[*col].ant != BT_COEX_SHARED_ANT_ID) {
+    return FALSE;
+  }
+
+  if (!_rsMngRateGetStbc(rate)) {
+    U08 singleAnt = rsMngGetSingleAntMsk(staInfo->config.chainsEnabled);
+
     /*
-     * BT-Coex only restricts use of the shared antenna if the rate is a SISO non-STBC rate. In case
-     * of MIMO (or SISO with STBC) nothing needs to be done.
-     * So check here only if the column uses only the shared antenna.
+     * Set the antenna to the non-shared one if possible.
+     * In case of SAR limitation, for example, it may be that BT is in a high activity grading
+     * and would prefer that wifi use the other antenna, but due to the SAR restriction this is
+     * not possible. Because rsMngGetSingleAntMsk always returns the non-shared antenna if it's
+     * available (i.e. enabled by configuration and not restricted due to SAR limitation), using
+     * the antenna returned by that function handles this type of case too.
      */
-    if (rsMngColumns[*col].ant != BT_COEX_SHARED_ANT_ID) { return FALSE; }
-
-    if (!_rsMngRateGetStbc(rate)) {
-        U08 singleAnt = rsMngGetSingleAntMsk(staInfo->config.chainsEnabled);
-
-        /*
-         * Set the antenna to the non-shared one if possible.
-         * In case of SAR limitation, for example, it may be that BT is in a high activity grading
-         * and would prefer that wifi use the other antenna, but due to the SAR restriction this is
-         * not possible. Because rsMngGetSingleAntMsk always returns the non-shared antenna if it's
-         * available (i.e. enabled by configuration and not restricted due to SAR limitation), using
-         * the antenna returned by that function handles this type of case too.
-         */
-        if (_rsMngRateGetAnt(rate) == singleAnt) { return FALSE; }
-
-        _rsMngRateSetAnt(rate, singleAnt);
-        *col ^= 1;
-        return TRUE;
+    if (_rsMngRateGetAnt(rate) == singleAnt) {
+      return FALSE;
     }
 
-    /*
-     * Just to avoid reaching here again the next time around, set the column to the non-shared-ant
-     * one. In reality there's no difference because with STBC both antennas are used in any case.
-     * The xor here does the trick thanks to the compilation asserts near _rsMngSetVisitedColumn.
-     */
+    _rsMngRateSetAnt(rate, singleAnt);
     *col ^= 1;
+    return TRUE;
+  }
 
-    return FALSE;
+  /*
+   * Just to avoid reaching here again the next time around, set the column to the non-shared-ant
+   * one. In reality there's no difference because with STBC both antennas are used in any case.
+   * The xor here does the trick thanks to the compilation asserts near _rsMngSetVisitedColumn.
+   */
+  *col ^= 1;
+
+  return FALSE;
 }
 
 static void _rsMngRateScalePerform(RS_MNG_STA_INFO_S* staInfo, BOOLEAN forceUpdate) {
-    RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
-    RS_MNG_WIN_STAT_S* currWin;
-    BOOLEAN updateLmac = forceUpdate, updateHost = FALSE;
-    RS_MNG_TPC_ALLOWED_REASON_E tpcAllowed;
-    U32 tpcAllowedData = 0;
+  RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
+  RS_MNG_WIN_STAT_S* currWin;
+  BOOLEAN updateLmac = forceUpdate, updateHost = FALSE;
+  RS_MNG_TPC_ALLOWED_REASON_E tpcAllowed;
+  U32 tpcAllowedData = 0;
 
-    if (_rsMngCoexIsLongAggAllowed(staInfo) != staInfo->longAggEnabled) { updateLmac = TRUE; }
+  if (_rsMngCoexIsLongAggAllowed(staInfo) != staInfo->longAggEnabled) {
+    updateLmac = TRUE;
+  }
 
-    if (btCoexManagerBtOwnsAnt(staInfo->mvm)) {
-        updateLmac |= _rsMngHandleBtCoex(staInfo, &tblInfo->rsMngRate, &tblInfo->column);
+  if (btCoexManagerBtOwnsAnt(staInfo->mvm)) {
+    updateLmac |= _rsMngHandleBtCoex(staInfo, &tblInfo->rsMngRate, &tblInfo->column);
 
-        /*
-         * If these stats are on a search rate, and that rate just happens to be siso on the shared
-         * antenna, change it to the non-shared one too.
-         */
-        if (staInfo->rsMngState == RS_MNG_STATE_SEARCH_CYCLE_STARTED && staInfo->searchBetterTbl) {
-            _rsMngHandleBtCoex(staInfo, &staInfo->searchColData.rsMngRate,
-                               &staInfo->searchColData.column);
-        }
+    /*
+     * If these stats are on a search rate, and that rate just happens to be siso on the shared
+     * antenna, change it to the non-shared one too.
+     */
+    if (staInfo->rsMngState == RS_MNG_STATE_SEARCH_CYCLE_STARTED && staInfo->searchBetterTbl) {
+      _rsMngHandleBtCoex(staInfo, &staInfo->searchColData.rsMngRate,
+                         &staInfo->searchColData.column);
     }
+  }
 
-    if (staInfo->rsMngState == RS_MNG_STATE_SEARCH_CYCLE_STARTED) {
-        updateLmac = TRUE;
+  if (staInfo->rsMngState == RS_MNG_STATE_SEARCH_CYCLE_STARTED) {
+    updateLmac = TRUE;
 
-        if (staInfo->searchBetterTbl) {
-            // Last rate sent to lmac was of a test for a new column. Check if it's any good.
-            U32 stableRateTpt = tblInfo->win[_rsMngRateGetIdx(&tblInfo->rsMngRate)].averageTpt;
+    if (staInfo->searchBetterTbl) {
+      // Last rate sent to lmac was of a test for a new column. Check if it's any good.
+      U32 stableRateTpt = tblInfo->win[_rsMngRateGetIdx(&tblInfo->rsMngRate)].averageTpt;
 
-            staInfo->searchBetterTbl = FALSE;
-            currWin = &staInfo->searchColData.win;
+      staInfo->searchBetterTbl = FALSE;
+      currWin = &staInfo->searchColData.win;
 
-            _rsMngHandleBwChange(staInfo);
+      _rsMngHandleBwChange(staInfo);
 
-            if (currWin->averageTpt >= stableRateTpt) {
-                // Yay! The search column is better! switch over to it.
-                DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                           "_rsMngRateScalePerform: search col Tpt %d > lastTpt %d. swapping to "
-                           "the search table",
-                           currWin->averageTpt, stableRateTpt);
+      if (currWin->averageTpt >= stableRateTpt) {
+        // Yay! The search column is better! switch over to it.
+        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                   "_rsMngRateScalePerform: search col Tpt %d > lastTpt %d. swapping to "
+                   "the search table",
+                   currWin->averageTpt, stableRateTpt);
 
-                _rsMngSwitchToSearchCol(staInfo);
+        _rsMngSwitchToSearchCol(staInfo);
 
-                _rsMngLookForNextSearchRate(staInfo, currWin, &updateHost);
-            } else {
-                // The search col didn't turn out to improve tpt, so search for another new column.
-                DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                           "_rsMngRateScalePerform: search col Tpt %d <= lastTpt %d. Searching for "
-                           "another column",
-                           currWin->averageTpt, stableRateTpt);
+        _rsMngLookForNextSearchRate(staInfo, currWin, &updateHost);
+      } else {
+        // The search col didn't turn out to improve tpt, so search for another new column.
+        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                   "_rsMngRateScalePerform: search col Tpt %d <= lastTpt %d. Searching for "
+                   "another column",
+                   currWin->averageTpt, stableRateTpt);
 
-                _rsMngTryColumnSwitch(staInfo, stableRateTpt, &updateHost);
-            }
-        } else {
-            // In the middle of searching for an optimal rate within a column. Continue this effort.
-            currWin = &tblInfo->win[_rsMngRateGetIdx(&tblInfo->rsMngRate)];
-
-            _rsMngLookForNextSearchRate(staInfo, currWin, &updateHost);
-        }
+        _rsMngTryColumnSwitch(staInfo, stableRateTpt, &updateHost);
+      }
     } else {
-        // Not in a search cycle.
+      // In the middle of searching for an optimal rate within a column. Continue this effort.
+      currWin = &tblInfo->win[_rsMngRateGetIdx(&tblInfo->rsMngRate)];
 
-        if (!_rsMngTpcIsActive(staInfo)) {
-            // If TPC is active (i.e. actually reducing Tx power), we don't try to scale within the
-            // column, will only do tpc scaling farther down in this function.
+      _rsMngLookForNextSearchRate(staInfo, currWin, &updateHost);
+    }
+  } else {
+    // Not in a search cycle.
 
-            // If previous rate was an upscale test, need to update lmac regardless of the next rate
-            // because even if it's the same rate again, need to send the lq command without the
-            // test-rate bit set.
-            updateLmac |= staInfo->tryingRateUpscale;
+    if (!_rsMngTpcIsActive(staInfo)) {
+      // If TPC is active (i.e. actually reducing Tx power), we don't try to scale within the
+      // column, will only do tpc scaling farther down in this function.
 
-            currWin = &tblInfo->win[_rsMngRateGetIdx(&tblInfo->rsMngRate)];
+      // If previous rate was an upscale test, need to update lmac regardless of the next rate
+      // because even if it's the same rate again, need to send the lq command without the
+      // test-rate bit set.
+      updateLmac |= staInfo->tryingRateUpscale;
 
-            // Check if to try changing rate within the column. If so - update lmac. Otherwise -
-            // check if it's time to start a new search cycle and act upon that decision
-            if (_rsMngTryScaleWithinColumn(staInfo, currWin, &updateHost)) {
-                updateLmac = TRUE;
-            } else if (_rsMngShouldStartSearchCycle(staInfo, &staInfo->isUpscaleSearchCycle)) {
-                updateLmac |= _rsMngStartSearchCycle(staInfo, currWin->averageTpt, &updateHost);
-            } else {
-                // Note that if the rate didn't really change the host-update function will not send
-                // a notification to host regadless of the value of this boolean.
-                updateHost = TRUE;
-            }
-        }
+      currWin = &tblInfo->win[_rsMngRateGetIdx(&tblInfo->rsMngRate)];
+
+      // Check if to try changing rate within the column. If so - update lmac. Otherwise -
+      // check if it's time to start a new search cycle and act upon that decision
+      if (_rsMngTryScaleWithinColumn(staInfo, currWin, &updateHost)) {
+        updateLmac = TRUE;
+      } else if (_rsMngShouldStartSearchCycle(staInfo, &staInfo->isUpscaleSearchCycle)) {
+        updateLmac |= _rsMngStartSearchCycle(staInfo, currWin->averageTpt, &updateHost);
+      } else {
+        // Note that if the rate didn't really change the host-update function will not send
+        // a notification to host regadless of the value of this boolean.
+        updateHost = TRUE;
+      }
+    }
+  }
+
+  tpcAllowed = _rsMngTpcAllowed(staInfo, &tpcAllowedData);
+  if (tpcAllowed == RS_MNG_TPC_ALLOWED) {
+    RS_MNG_TPC_ACTION action;
+
+    if (staInfo->tpcTable.currStep == RS_MNG_TPC_DISABLED) {
+      // First time tpc is allowed, start a tpc search cycle (quick movement between tpc
+      // steps)
+      _rsMngClearWinArr(staInfo->tpcTable.windows, RS_MNG_TPC_NUM_STEPS);
+      staInfo->tpcTable.currStep = RS_MNG_TPC_INACTIVE;
+      staInfo->rsMngState = RS_MNG_STATE_TPC_SEARCH;
+
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngRateScalePerform: starting tpc search");
     }
 
-    tpcAllowed = _rsMngTpcAllowed(staInfo, &tpcAllowedData);
-    if (tpcAllowed == RS_MNG_TPC_ALLOWED) {
-        RS_MNG_TPC_ACTION action;
+    action = _rsMngTpcGetAction(staInfo);
 
-        if (staInfo->tpcTable.currStep == RS_MNG_TPC_DISABLED) {
-            // First time tpc is allowed, start a tpc search cycle (quick movement between tpc
-            // steps)
-            _rsMngClearWinArr(staInfo->tpcTable.windows, RS_MNG_TPC_NUM_STEPS);
-            staInfo->tpcTable.currStep = RS_MNG_TPC_INACTIVE;
-            staInfo->rsMngState = RS_MNG_STATE_TPC_SEARCH;
+    updateLmac |= staInfo->tpcTable.testing;
 
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngRateScalePerform: starting tpc search");
-        }
+    if (staInfo->rsMngState == RS_MNG_STATE_TPC_SEARCH) {
+      if (action == RS_MNG_TPC_ACTION_STAY) {
+        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                   "_rsMngRateScalePerform: change state to RS_MNG_STATE_STAY_IN_COLUMN");
 
-        action = _rsMngTpcGetAction(staInfo);
-
-        updateLmac |= staInfo->tpcTable.testing;
-
-        if (staInfo->rsMngState == RS_MNG_STATE_TPC_SEARCH) {
-            if (action == RS_MNG_TPC_ACTION_STAY) {
-                DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                           "_rsMngRateScalePerform: change state to RS_MNG_STATE_STAY_IN_COLUMN");
-
-                staInfo->rsMngState = RS_MNG_STATE_STAY_IN_COLUMN;
-                updateHost = TRUE;
-            }
+        staInfo->rsMngState = RS_MNG_STATE_STAY_IN_COLUMN;
+        updateHost = TRUE;
+      }
+    } else {
+      if (action == RS_MNG_TPC_ACTION_INCREASE) {
+        if (time_after(jiffies, staInfo->lastRateUpscaleTimeJiffies +
+                                    usecs_to_jiffies(RS_MNG_UPSCALE_MAX_FREQUENCY))) {
+          staInfo->lastRateUpscaleTimeJiffies = jiffies;
         } else {
-            if (action == RS_MNG_TPC_ACTION_INCREASE) {
-                if (time_after(jiffies, staInfo->lastRateUpscaleTimeJiffies +
-                                            usecs_to_jiffies(RS_MNG_UPSCALE_MAX_FREQUENCY))) {
-                    staInfo->lastRateUpscaleTimeJiffies = jiffies;
-                } else {
-                    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                               "_rsMngTryScaleWithinColumn: overriding tpc increase. time since "
-                               "last increase %dusec",
-                               systemTimeGetUsecDiffTime(staInfo->lastRateUpscaleTimeJiffies));
-                    action = RS_MNG_TPC_ACTION_STAY;
-                }
-            }
+          DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                     "_rsMngTryScaleWithinColumn: overriding tpc increase. time since "
+                     "last increase %dusec",
+                     systemTimeGetUsecDiffTime(staInfo->lastRateUpscaleTimeJiffies));
+          action = RS_MNG_TPC_ACTION_STAY;
         }
-
-        _rsMngTpcDoAction(staInfo, action);
-        updateLmac |= (action != RS_MNG_TPC_ACTION_STAY);
-    } else if (staInfo->tpcTable.currStep != RS_MNG_TPC_DISABLED) {
-        // TPC was just disallowed. Reset tpc state.
-
-        // If TPC was active or was testing a new step, need to resend an lq with no power reduction
-        // and no test-window bit.
-        updateLmac |= (_rsMngTpcIsActive(staInfo) || staInfo->tpcTable.testing);
-
-        staInfo->tpcTable.testing = FALSE;
-        staInfo->tpcTable.currStep = RS_MNG_TPC_DISABLED;
-
-        if (staInfo->rsMngState == RS_MNG_STATE_TPC_SEARCH) {
-            staInfo->rsMngState = RS_MNG_STATE_STAY_IN_COLUMN;
-            updateHost = TRUE;
-        }
+      }
     }
 
-    if (updateLmac) {
-        _rsMngUpdateRateTbl(staInfo,
-                            staInfo->rsMngState == RS_MNG_STATE_STAY_IN_COLUMN && updateHost);
+    _rsMngTpcDoAction(staInfo, action);
+    updateLmac |= (action != RS_MNG_TPC_ACTION_STAY);
+  } else if (staInfo->tpcTable.currStep != RS_MNG_TPC_DISABLED) {
+    // TPC was just disallowed. Reset tpc state.
+
+    // If TPC was active or was testing a new step, need to resend an lq with no power reduction
+    // and no test-window bit.
+    updateLmac |= (_rsMngTpcIsActive(staInfo) || staInfo->tpcTable.testing);
+
+    staInfo->tpcTable.testing = FALSE;
+    staInfo->tpcTable.currStep = RS_MNG_TPC_DISABLED;
+
+    if (staInfo->rsMngState == RS_MNG_STATE_TPC_SEARCH) {
+      staInfo->rsMngState = RS_MNG_STATE_STAY_IN_COLUMN;
+      updateHost = TRUE;
     }
+  }
+
+  if (updateLmac) {
+    _rsMngUpdateRateTbl(staInfo, staInfo->rsMngState == RS_MNG_STATE_STAY_IN_COLUMN && updateHost);
+  }
 }
 
 static void rsMngInitAmsdu(RS_MNG_STA_INFO_S* staInfo) {
-    if (staInfo->config.amsduSupported && staInfo->config.bestSuppMode >= TLC_MNG_MODE_VHT &&
-        _rsMngGetMaxChWidth(staInfo) >= TLC_MNG_CH_WIDTH_40MHZ) {
-        staInfo->amsduSupport = TRUE;
-    } else {
-        staInfo->amsduSupport = FALSE;
-    }
+  if (staInfo->config.amsduSupported && staInfo->config.bestSuppMode >= TLC_MNG_MODE_VHT &&
+      _rsMngGetMaxChWidth(staInfo) >= TLC_MNG_CH_WIDTH_40MHZ) {
+    staInfo->amsduSupport = TRUE;
+  } else {
+    staInfo->amsduSupport = FALSE;
+  }
 
-    staInfo->amsduEnabledSize = RS_MNG_AMSDU_INVALID;
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "rsMngInitAmsdu: sta %d, AMSDU support %d",
-               _rsMngStaInfoToStaId(staInfo), staInfo->amsduSupport);
+  staInfo->amsduEnabledSize = RS_MNG_AMSDU_INVALID;
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "rsMngInitAmsdu: sta %d, AMSDU support %d",
+             _rsMngStaInfoToStaId(staInfo), staInfo->amsduSupport);
 
-    staInfo->lastTrafficLoadStatJiffies = jiffies;
+  staInfo->lastTrafficLoadStatJiffies = jiffies;
 }
 
 static U16 _rsMngAmsduEnumToSize(RS_MNG_TX_AMSDU_SIZE_E value) {
-    switch (value) {
+  switch (value) {
     case RS_MNG_AMSDU_3500B:
-        return 3500;
+      return 3500;
     case RS_MNG_AMSDU_5000B:
-        return 5000;
+      return 5000;
     case RS_MNG_AMSDU_6500B:
-        return 6500;
+      return 6500;
     case RS_MNG_AMSDU_8000B:
-        return 8000;
+      return 8000;
     default:
-        return 0;
-    }
+      return 0;
+  }
 }
 
 static void _rsMngNotifAmsdu(RS_MNG_STA_INFO_S* staInfo, U32 successRatio, U32 trafficLoadPerSec) {
-    U16 bitmap = 0;
-    U16 size = 0;
+  U16 bitmap = 0;
+  U16 size = 0;
 
-    if (staInfo->amsduEnabledSize != RS_MNG_AMSDU_INVALID) {
-        // Check which TIDs are not low latency and have an AMSDU in AMPDU session.
-        // We activate AMSDU only for those.
-        bitmap =
-            (U16)(RS_MNG_AMSDU_VALID_TIDS_MSK & staInfo->mvmsta->agg_tids & staInfo->amsduInAmpdu);
-        size = _rsMngAmsduEnumToSize(staInfo->amsduEnabledSize);
-        if (size > staInfo->config.maxMpduLen) { size = staInfo->config.maxMpduLen; }
+  if (staInfo->amsduEnabledSize != RS_MNG_AMSDU_INVALID) {
+    // Check which TIDs are not low latency and have an AMSDU in AMPDU session.
+    // We activate AMSDU only for those.
+    bitmap = (U16)(RS_MNG_AMSDU_VALID_TIDS_MSK & staInfo->mvmsta->agg_tids & staInfo->amsduInAmpdu);
+    size = _rsMngAmsduEnumToSize(staInfo->amsduEnabledSize);
+    if (size > staInfo->config.maxMpduLen) {
+      size = staInfo->config.maxMpduLen;
     }
+  }
 
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngNotifAmsdu: sta %d AMSDU enabled ? %u. bitmap %x, size %d",
-               _rsMngStaInfoToStaId(staInfo), staInfo->amsduEnabledSize != RS_MNG_AMSDU_INVALID,
-               bitmap, size);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngNotifAmsdu: sta %d AMSDU enabled ? %u. bitmap %x, size %d",
+             _rsMngStaInfoToStaId(staInfo), staInfo->amsduEnabledSize != RS_MNG_AMSDU_INVALID,
+             bitmap, size);
 
-    tlcMngNotifyAmsdu(staInfo, size, bitmap);
+  tlcMngNotifyAmsdu(staInfo, size, bitmap);
 }
 
 // Activate fail safe mechanism:
@@ -2915,39 +2983,45 @@
 // In case we consecutively toggle to disable within a FAIL_TIME_THRESHOLD from
 // enablement FAIL_CONSEC_THRESHOLD times - blacklist this AMSDU size permanently.
 static void _rsMngAmsduFailSafe(RS_MNG_STA_INFO_S* staInfo, unsigned long currentTime) {
-    // Since traffic load 1 sec window is not exactly one sec, check to see if we
-    // are within its window by comparing the update of the stat timestamp with
-    // the enable event.
-    // If the "1 sec" window has passed, then the timestamp was updated.
-    if (time_before(currentTime, staInfo->lastEnableJiffies +
-                                     usecs_to_jiffies(RS_MNG_AMSDU_FAIL_TIME_THRESHOLD)) ||
-        staInfo->lastEnableJiffies == staInfo->lastTrafficLoadStatJiffies) {
-        staInfo->failSafeCounter++;
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngAmsduFailSafe: Sta %d IOP counter incremented to %d",
-                   _rsMngStaInfoToStaId(staInfo), staInfo->failSafeCounter);
-    } else {
-        staInfo->failSafeCounter = 0;
-    }
+  // Since traffic load 1 sec window is not exactly one sec, check to see if we
+  // are within its window by comparing the update of the stat timestamp with
+  // the enable event.
+  // If the "1 sec" window has passed, then the timestamp was updated.
+  if (time_before(currentTime, staInfo->lastEnableJiffies +
+                                   usecs_to_jiffies(RS_MNG_AMSDU_FAIL_TIME_THRESHOLD)) ||
+      staInfo->lastEnableJiffies == staInfo->lastTrafficLoadStatJiffies) {
+    staInfo->failSafeCounter++;
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngAmsduFailSafe: Sta %d IOP counter incremented to %d",
+               _rsMngStaInfoToStaId(staInfo), staInfo->failSafeCounter);
+  } else {
+    staInfo->failSafeCounter = 0;
+  }
 
-    if (staInfo->failSafeCounter >= RS_MNG_AMSDU_FAIL_CONSEC_THRESHOLD) {
-        // disable AMSDU permanently for this AMSDU size
-        staInfo->amsduBlacklist |= BIT(staInfo->amsduEnabledSize);
-        staInfo->failSafeCounter = 0;
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngAmsduFailSafe: sta %d, blacklisted size %d",
-                   _rsMngStaInfoToStaId(staInfo), staInfo->amsduEnabledSize);
+  if (staInfo->failSafeCounter >= RS_MNG_AMSDU_FAIL_CONSEC_THRESHOLD) {
+    // disable AMSDU permanently for this AMSDU size
+    staInfo->amsduBlacklist |= BIT(staInfo->amsduEnabledSize);
+    staInfo->failSafeCounter = 0;
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngAmsduFailSafe: sta %d, blacklisted size %d",
+               _rsMngStaInfoToStaId(staInfo), staInfo->amsduEnabledSize);
 
-        // if all disabled - save some future processing
-        if (staInfo->amsduBlacklist == RS_MNG_AMSDU_SIZE_ALL) { staInfo->amsduSupport = FALSE; }
+    // if all disabled - save some future processing
+    if (staInfo->amsduBlacklist == RS_MNG_AMSDU_SIZE_ALL) {
+      staInfo->amsduSupport = FALSE;
     }
+  }
 }
 
 static void _rsMngAmsduChanged(RS_MNG_STA_INFO_S* staInfo, RS_MNG_TX_AMSDU_SIZE_E size,
                                unsigned long time, U32 successRatio, U32 trafficLoadPerSec) {
-    if (size < staInfo->amsduEnabledSize) { _rsMngAmsduFailSafe(staInfo, time); }
-    staInfo->amsduEnabledSize = size;
-    if (size != RS_MNG_AMSDU_INVALID) { staInfo->lastEnableJiffies = time; }
-    _rsMngNotifAmsdu(staInfo, successRatio, trafficLoadPerSec);
+  if (size < staInfo->amsduEnabledSize) {
+    _rsMngAmsduFailSafe(staInfo, time);
+  }
+  staInfo->amsduEnabledSize = size;
+  if (size != RS_MNG_AMSDU_INVALID) {
+    staInfo->lastEnableJiffies = time;
+  }
+  _rsMngNotifAmsdu(staInfo, successRatio, trafficLoadPerSec);
 }
 
 /*
@@ -2956,108 +3030,103 @@
  */
 static BOOLEAN _rsMngCollectAmsduTlcData(RS_MNG_STA_INFO_S* staInfo, U32 baTxed, U32 baAcked,
                                          U16 trafficLoad) {
-    RS_MNG_TX_AMSDU_SIZE_E size;
-    unsigned long elapsedTime, currentTime, trafficLoadPerSec, successRatio;
-    BOOLEAN ret = FALSE;
-    const U32 baTxedUnnormalized = baTxed;
-    RS_MNG_RATE_S* rsMngRate = &staInfo->rateTblInfo.rsMngRate;
+  RS_MNG_TX_AMSDU_SIZE_E size;
+  unsigned long elapsedTime, currentTime, trafficLoadPerSec, successRatio;
+  BOOLEAN ret = FALSE;
+  const U32 baTxedUnnormalized = baTxed;
+  RS_MNG_RATE_S* rsMngRate = &staInfo->rateTblInfo.rsMngRate;
 
-    currentTime = jiffies;
-    elapsedTime = jiffies_to_usecs(currentTime - staInfo->lastTrafficLoadStatJiffies) >> 10;
-    elapsedTime = elapsedTime ?: 1;
+  currentTime = jiffies;
+  elapsedTime = jiffies_to_usecs(currentTime - staInfo->lastTrafficLoadStatJiffies) >> 10;
+  elapsedTime = elapsedTime ?: 1;
 
-    staInfo->trafficLoad += trafficLoad;
-    size = _rsMngAmsduSize(staInfo, _rsMngRateGetMode(rsMngRate), _rsMngRateGetBw(rsMngRate),
-                           _rsMngRateGetIdx(rsMngRate), _rsMngRateGetGi(rsMngRate),
-                           _rsMngRateGetModulation(rsMngRate));
+  staInfo->trafficLoad += trafficLoad;
+  size = _rsMngAmsduSize(staInfo, _rsMngRateGetMode(rsMngRate), _rsMngRateGetBw(rsMngRate),
+                         _rsMngRateGetIdx(rsMngRate), _rsMngRateGetGi(rsMngRate),
+                         _rsMngRateGetModulation(rsMngRate));
 
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngCollectAmsduTlcData: baTxed %d, baAcked %d, new trafficLoad %d, time from "
-               "last traffic load check %d, trafficLoad %d potential size %d",
-               baTxed, baAcked, trafficLoad, elapsedTime, staInfo->trafficLoad, size);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngCollectAmsduTlcData: baTxed %d, baAcked %d, new trafficLoad %d, time from "
+             "last traffic load check %d, trafficLoad %d potential size %d",
+             baTxed, baAcked, trafficLoad, elapsedTime, staInfo->trafficLoad, size);
 
-    // Normalize the baTxed to be able to divide by it without explicit zero check
-    // baAcked is zero as well in this case, so it has no functionality impact
-    baTxed = baTxed ?: 1;
+  // Normalize the baTxed to be able to divide by it without explicit zero check
+  // baAcked is zero as well in this case, so it has no functionality impact
+  baTxed = baTxed ?: 1;
 
-    if (staInfo->amsduEnabledSize == RS_MNG_AMSDU_INVALID) {
-        // AMSDU TLC traffic load works in a window of 1 second
-        if (elapsedTime < 1000) {
-            DBG_PRINTF(
-                UT, TLC_OFFLOAD_DBG, INFO,
-                "_rsMngCollectAmsduTlcData: amsdu disabled, but elapsedTime less than one second");
-            return FALSE;
-        }
-
-        // This div will only be done 1 time in a second, if AMSDU is supported - not that expensive
-        successRatio = (baAcked * 128) / baTxed;
-        // normalize the traffic load to a 1 second window
-        trafficLoadPerSec = (staInfo->trafficLoad << 10) / elapsedTime;
-
-        DBG_PRINTF(
-            UT, TLC_OFFLOAD_DBG, INFO,
-            "_rsMngCollectAmsduTlcData: amsdu disabled. successRatio %d, trafficLoadPerSec %d",
-            successRatio, trafficLoadPerSec);
-
-        if (successRatio > RS_MNG_AMSDU_SR_ENABLE_THRESHOLD &&
-            trafficLoadPerSec > RS_MNG_AMSDU_TL_ENABLE_THRESHOLD && size != RS_MNG_AMSDU_INVALID) {
-            // AMSDU is disabled and should be enabled
-            _rsMngAmsduChanged(staInfo, size, currentTime, successRatio, trafficLoadPerSec);
-            ret = TRUE;
-        }
-    } else {
-        successRatio = (baAcked * 128) / baTxed;
-        // normalize the traffic load to a 1 second window
-        trafficLoadPerSec = (staInfo->trafficLoad << 10) / elapsedTime;
-
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "_rsMngCollectAmsduTlcData: amsdu is enabled with size %d, successRatio %d, "
-                   "trafficLoadPerSec  %d",
-                   staInfo->amsduEnabledSize, successRatio, trafficLoadPerSec);
-
-        // AMSDU is enabled - check if we should disable or up/down-grade.
-        // Note that we upgrade here without waiting for a second to pass since the last change in
-        // order to recover as fast as possible from a momentary rate reduction (at the price of
-        // perhaps triggering the failsafe mechanism too early).
-        if (size != staInfo->amsduEnabledSize) {
-            DBG_PRINTF(
-                UT, TLC_OFFLOAD_DBG, INFO,
-                "_rsMngCollectAmsduTlcData: new size is different than current size. updating");
-            _rsMngAmsduChanged(staInfo, size, currentTime, successRatio, trafficLoadPerSec);
-            return size == RS_MNG_AMSDU_INVALID;
-        }
-
-        if (successRatio < RS_MNG_AMSDU_SR_DISABLE_THRESHOLD && baTxedUnnormalized) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngCollectAmsduTlcData: success ratio too low. Disabling amsdu");
-            _rsMngAmsduChanged(staInfo, RS_MNG_AMSDU_INVALID, currentTime, successRatio,
-                               trafficLoadPerSec);
-            return TRUE;
-        }
-
-        // AMSDU TLC traffic load works in a window of 1 second
-        if (elapsedTime < 1000) {
-            DBG_PRINTF(
-                UT, TLC_OFFLOAD_DBG, INFO,
-                "_rsMngCollectAmsduTlcData: elapsed time less than a second, skipping TL check");
-            return FALSE;
-        }
-
-        if (trafficLoadPerSec < RS_MNG_AMSDU_TL_DISABLE_THRESHOLD) {
-            staInfo->trafficLoad = trafficLoadPerSec;
-            _rsMngAmsduChanged(staInfo, RS_MNG_AMSDU_INVALID, currentTime, successRatio,
-                               trafficLoadPerSec);
-            ret = TRUE;
-        }
+  if (staInfo->amsduEnabledSize == RS_MNG_AMSDU_INVALID) {
+    // AMSDU TLC traffic load works in a window of 1 second
+    if (elapsedTime < 1000) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngCollectAmsduTlcData: amsdu disabled, but elapsedTime less than one second");
+      return FALSE;
     }
 
+    // This div will only be done 1 time in a second, if AMSDU is supported - not that expensive
+    successRatio = (baAcked * 128) / baTxed;
+    // normalize the traffic load to a 1 second window
+    trafficLoadPerSec = (staInfo->trafficLoad << 10) / elapsedTime;
+
     DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngCollectAmsduTlcData: resetting trafficLoad counter");
+               "_rsMngCollectAmsduTlcData: amsdu disabled. successRatio %d, trafficLoadPerSec %d",
+               successRatio, trafficLoadPerSec);
 
-    staInfo->trafficLoad = 0;
-    staInfo->lastTrafficLoadStatJiffies = currentTime;
+    if (successRatio > RS_MNG_AMSDU_SR_ENABLE_THRESHOLD &&
+        trafficLoadPerSec > RS_MNG_AMSDU_TL_ENABLE_THRESHOLD && size != RS_MNG_AMSDU_INVALID) {
+      // AMSDU is disabled and should be enabled
+      _rsMngAmsduChanged(staInfo, size, currentTime, successRatio, trafficLoadPerSec);
+      ret = TRUE;
+    }
+  } else {
+    successRatio = (baAcked * 128) / baTxed;
+    // normalize the traffic load to a 1 second window
+    trafficLoadPerSec = (staInfo->trafficLoad << 10) / elapsedTime;
 
-    return ret;
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "_rsMngCollectAmsduTlcData: amsdu is enabled with size %d, successRatio %d, "
+               "trafficLoadPerSec  %d",
+               staInfo->amsduEnabledSize, successRatio, trafficLoadPerSec);
+
+    // AMSDU is enabled - check if we should disable or up/down-grade.
+    // Note that we upgrade here without waiting for a second to pass since the last change in
+    // order to recover as fast as possible from a momentary rate reduction (at the price of
+    // perhaps triggering the failsafe mechanism too early).
+    if (size != staInfo->amsduEnabledSize) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngCollectAmsduTlcData: new size is different than current size. updating");
+      _rsMngAmsduChanged(staInfo, size, currentTime, successRatio, trafficLoadPerSec);
+      return size == RS_MNG_AMSDU_INVALID;
+    }
+
+    if (successRatio < RS_MNG_AMSDU_SR_DISABLE_THRESHOLD && baTxedUnnormalized) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngCollectAmsduTlcData: success ratio too low. Disabling amsdu");
+      _rsMngAmsduChanged(staInfo, RS_MNG_AMSDU_INVALID, currentTime, successRatio,
+                         trafficLoadPerSec);
+      return TRUE;
+    }
+
+    // AMSDU TLC traffic load works in a window of 1 second
+    if (elapsedTime < 1000) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngCollectAmsduTlcData: elapsed time less than a second, skipping TL check");
+      return FALSE;
+    }
+
+    if (trafficLoadPerSec < RS_MNG_AMSDU_TL_DISABLE_THRESHOLD) {
+      staInfo->trafficLoad = trafficLoadPerSec;
+      _rsMngAmsduChanged(staInfo, RS_MNG_AMSDU_INVALID, currentTime, successRatio,
+                         trafficLoadPerSec);
+      ret = TRUE;
+    }
+  }
+
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngCollectAmsduTlcData: resetting trafficLoad counter");
+
+  staInfo->trafficLoad = 0;
+  staInfo->lastTrafficLoadStatJiffies = currentTime;
+
+  return ret;
 }
 
 /**
@@ -3070,201 +3139,201 @@
 // TODO - handle the position of the acks/fails in the window. for now - we only care about the
 // counters (how many failed/succeeded regardless of when)
 static BOOLEAN _rsMngCollectTlcData(RS_MNG_STA_INFO_S* staInfo, int attempts, int successes) {
-    RS_MNG_WIN_STAT_S* window;
-    const RS_MNG_RATE_S* rsMngRate = &staInfo->rateTblInfo.rsMngRate;
-    U32 expectedTpt;
-    U32 tmpSumFrames = attempts;
-    U32 tmpExtraFrames;
-    U32 windowSize;
-    BOOLEAN ret = TRUE;
+  RS_MNG_WIN_STAT_S* window;
+  const RS_MNG_RATE_S* rsMngRate = &staInfo->rateTblInfo.rsMngRate;
+  U32 expectedTpt;
+  U32 tmpSumFrames = attempts;
+  U32 tmpExtraFrames;
+  U32 windowSize;
+  BOOLEAN ret = TRUE;
 
-    if (staInfo->searchBetterTbl) {
-        RS_MNG_SEARCH_COL_DATA* searchData = &staInfo->searchColData;
+  if (staInfo->searchBetterTbl) {
+    RS_MNG_SEARCH_COL_DATA* searchData = &staInfo->searchColData;
 
-        window = &searchData->win;
-        expectedTpt = searchData->expectedTpt;
-        rsMngRate = &searchData->rsMngRate;
-    } else if (_rsMngTpcIsActive(staInfo)) {
-        window = &staInfo->tpcTable.windows[staInfo->tpcTable.currStep];
-        // In TPC we don't care about expected/average tpt, only about success ratio. Initialize
-        // this var here anyways so the compiler won't complain.
-        expectedTpt = 0;
-    } else {
-        RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
-        U32 rateIdx = _rsMngRateGetIdx(&tblInfo->rsMngRate);
+    window = &searchData->win;
+    expectedTpt = searchData->expectedTpt;
+    rsMngRate = &searchData->rsMngRate;
+  } else if (_rsMngTpcIsActive(staInfo)) {
+    window = &staInfo->tpcTable.windows[staInfo->tpcTable.currStep];
+    // In TPC we don't care about expected/average tpt, only about success ratio. Initialize
+    // this var here anyways so the compiler won't complain.
+    expectedTpt = 0;
+  } else {
+    RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
+    U32 rateIdx = _rsMngRateGetIdx(&tblInfo->rsMngRate);
 
-        window = &tblInfo->win[rateIdx];
-        expectedTpt = _rsMngGetExpectedTpt(staInfo, &rsMngColumns[tblInfo->column],
-                                           _rsMngRateGetBw(&tblInfo->rsMngRate),
-                                           !!(staInfo->mvmsta->agg_tids), rateIdx);
+    window = &tblInfo->win[rateIdx];
+    expectedTpt = _rsMngGetExpectedTpt(staInfo, &rsMngColumns[tblInfo->column],
+                                       _rsMngRateGetBw(&tblInfo->rsMngRate),
+                                       !!(staInfo->mvmsta->agg_tids), rateIdx);
+  }
+
+  windowSize = _rsMngRateGetMode(rsMngRate) != TLC_MNG_MODE_LEGACY ? RS_MNG_MAX_WINDOW_SIZE
+                                                                   : RS_MNG_MAX_WINDOW_SIZE_NON_HT;
+
+  tmpSumFrames += window->framesCounter;
+
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngCollectTlcData: old data: frames: %d, success: %d",
+             window->framesCounter, window->successCounter);
+
+  if (attempts >= windowSize) {
+    window->successCounter = (windowSize * successes) / attempts;
+    window->framesCounter = windowSize;
+  } else if (tmpSumFrames > windowSize) {
+    /***** Calculate window->successCounter *****/
+    // TODO - replace with good algorithem. for now this is a workaround
+    tmpExtraFrames = (tmpSumFrames - windowSize);
+
+    // 1. calculated the ratio of window->successCounter in window->framesCounter and substruct
+    // the matching part of the frames substructed
+    //    to avoid exceeding the MAX window size.
+    window->successCounter -=
+        (tmpExtraFrames * window->successCounter) / MAX(1, window->framesCounter);
+
+    // 2. add the successes to window->successCounter, as long as it dosn't exceed the max
+    // window size
+    window->successCounter = min_t(U32, (window->successCounter + successes), windowSize);
+
+    window->framesCounter = windowSize;
+  } else {
+    if (!WARN_ON(!((window->successCounter + successes) <= windowSize))) {
+      window->successCounter += successes;
+      window->framesCounter += attempts;
     }
+  }
 
-    windowSize = _rsMngRateGetMode(rsMngRate) != TLC_MNG_MODE_LEGACY
-                     ? RS_MNG_MAX_WINDOW_SIZE
-                     : RS_MNG_MAX_WINDOW_SIZE_NON_HT;
+  // Calculate current success ratio
+  if (window->framesCounter > 0) {
+    window->successRatio = (128 * window->successCounter) / window->framesCounter;
 
-    tmpSumFrames += window->framesCounter;
-
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngCollectTlcData: old data: frames: %d, success: %d",
-               window->framesCounter, window->successCounter);
-
-    if (attempts >= windowSize) {
-        window->successCounter = (windowSize * successes) / attempts;
-        window->framesCounter = windowSize;
-    } else if (tmpSumFrames > windowSize) {
-        /***** Calculate window->successCounter *****/
-        // TODO - replace with good algorithem. for now this is a workaround
-        tmpExtraFrames = (tmpSumFrames - windowSize);
-
-        // 1. calculated the ratio of window->successCounter in window->framesCounter and substruct
-        // the matching part of the frames substructed
-        //    to avoid exceeding the MAX window size.
-        window->successCounter -=
-            (tmpExtraFrames * window->successCounter) / MAX(1, window->framesCounter);
-
-        // 2. add the successes to window->successCounter, as long as it dosn't exceed the max
-        // window size
-        window->successCounter = min_t(U32, (window->successCounter + successes), windowSize);
-
-        window->framesCounter = windowSize;
+    // Calculate average throughput, if we have enough history.
+    if (_isAvgTptCalcPossible(window)) {
+      window->averageTpt = ((window->successRatio * expectedTpt) + 64) / 128;
     } else {
-        if (!WARN_ON(!((window->successCounter + successes) <= windowSize))) {
-            window->successCounter += successes;
-            window->framesCounter += attempts;
-        }
+      window->averageTpt = RS_MNG_INVALID_VAL;
+      ret = FALSE;
     }
+  } else {
+    window->successRatio = RS_MNG_INVALID_VAL;
+    window->averageTpt = RS_MNG_INVALID_VAL;
+    ret = FALSE;
+  }
 
-    // Calculate current success ratio
-    if (window->framesCounter > 0) {
-        window->successRatio = (128 * window->successCounter) / window->framesCounter;
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngCollectTlcData: new data: framesCounter: %d, successCounter: %d, "
+             "successRatio: %d/128, avg Tpt: %d, expected Tpt: %d",
+             window->framesCounter, window->successCounter, window->successRatio,
+             window->averageTpt, expectedTpt);
 
-        // Calculate average throughput, if we have enough history.
-        if (_isAvgTptCalcPossible(window)) {
-            window->averageTpt = ((window->successRatio * expectedTpt) + 64) / 128;
-        } else {
-            window->averageTpt = RS_MNG_INVALID_VAL;
-            ret = FALSE;
-        }
-    } else {
-        window->successRatio = RS_MNG_INVALID_VAL;
-        window->averageTpt = RS_MNG_INVALID_VAL;
-        ret = FALSE;
-    }
-
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngCollectTlcData: new data: framesCounter: %d, successCounter: %d, "
-               "successRatio: %d/128, avg Tpt: %d, expected Tpt: %d",
-               window->framesCounter, window->successCounter, window->successRatio,
-               window->averageTpt, expectedTpt);
-
-    return ret;
+  return ret;
 }
 
 static U16 _rsMngGetCurrentThreshold(const RS_MNG_STA_INFO_S* staInfo) {
-    return (U16)(_rsMngIsTestWindow(staInfo) ? RS_MNG_UPSCALE_AGG_FRAME_COUNT : RS_STAT_THOLD);
+  return (U16)(_rsMngIsTestWindow(staInfo) ? RS_MNG_UPSCALE_AGG_FRAME_COUNT : RS_STAT_THOLD);
 }
 
 static BOOLEAN _rsMngUpdateGlobalStats(RS_MNG_STA_INFO_S* staInfo, TLC_STAT_COMMON_API_S* stats) {
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "_rsMngUpdateGlobalStats: tpc stats: no-tpc %u, step1 %u, step2 %u, step3 %u, step4 "
-               "%u, step5 %u",
-               g_rsMngTpcStats.noTpc, g_rsMngTpcStats.step[0], g_rsMngTpcStats.step[1],
-               g_rsMngTpcStats.step[2], g_rsMngTpcStats.step[3], g_rsMngTpcStats.step[4]);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "_rsMngUpdateGlobalStats: tpc stats: no-tpc %u, step1 %u, step2 %u, step3 %u, step4 "
+             "%u, step5 %u",
+             g_rsMngTpcStats.noTpc, g_rsMngTpcStats.step[0], g_rsMngTpcStats.step[1],
+             g_rsMngTpcStats.step[2], g_rsMngTpcStats.step[3], g_rsMngTpcStats.step[4]);
 
-    if (staInfo->rsMngState == RS_MNG_STATE_STAY_IN_COLUMN && !_rsMngIsTestWindow(staInfo)) {
-        BOOLEAN isNonHt = _rsMngRateGetMode(&staInfo->rateTblInfo.rsMngRate) == TLC_MNG_MODE_LEGACY;
+  if (staInfo->rsMngState == RS_MNG_STATE_STAY_IN_COLUMN && !_rsMngIsTestWindow(staInfo)) {
+    BOOLEAN isNonHt = _rsMngRateGetMode(&staInfo->rateTblInfo.rsMngRate) == TLC_MNG_MODE_LEGACY;
 
-        staInfo->totalFramesSuccess += stats->acked[0] + stats->acked[1];
-        staInfo->totalFramesFailed +=
-            stats->txed[0] - stats->acked[0] + stats->txed[1] - stats->acked[1];
-        staInfo->txedFrames += stats->txed[0] + stats->txed[1];
+    staInfo->totalFramesSuccess += stats->acked[0] + stats->acked[1];
+    staInfo->totalFramesFailed +=
+        stats->txed[0] - stats->acked[0] + stats->txed[1] - stats->acked[1];
+    staInfo->txedFrames += stats->txed[0] + stats->txed[1];
 
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "Total frames succeeded: %d, total frames failed: %d",
-                   staInfo->totalFramesSuccess, staInfo->totalFramesFailed);
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "Total frames succeeded: %d, total frames failed: %d",
+               staInfo->totalFramesSuccess, staInfo->totalFramesFailed);
 
-        // If we have been in this column long enough (regardless of the fail/success rate or time
-        // elapsed) - start the statistics calculation from scratch
-        if (staInfo->txedFrames >= g_rsMngStaModLimits[isNonHt].clearTblWindowsLimit) {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "_rsMngUpdateGlobalStats: Stayed in same column for a long time. Clear "
-                       "table windows. modulation counter was %d",
-                       staInfo->txedFrames);
+    // If we have been in this column long enough (regardless of the fail/success rate or time
+    // elapsed) - start the statistics calculation from scratch
+    if (staInfo->txedFrames >= g_rsMngStaModLimits[isNonHt].clearTblWindowsLimit) {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "_rsMngUpdateGlobalStats: Stayed in same column for a long time. Clear "
+                 "table windows. modulation counter was %d",
+                 staInfo->txedFrames);
 
-            staInfo->txedFrames = 0;
-            _rsMngClearTblWindows(staInfo);
-        }
+      staInfo->txedFrames = 0;
+      _rsMngClearTblWindows(staInfo);
     }
+  }
 
-    staInfo->framesSinceLastRun += stats->txed[0] + stats->txed[1];
-    if (staInfo->framesSinceLastRun >= _rsMngGetCurrentThreshold(staInfo)) {
-        staInfo->framesSinceLastRun = 0;
-        return TRUE;
-    }
+  staInfo->framesSinceLastRun += stats->txed[0] + stats->txed[1];
+  if (staInfo->framesSinceLastRun >= _rsMngGetCurrentThreshold(staInfo)) {
+    staInfo->framesSinceLastRun = 0;
+    return TRUE;
+  }
 
-    return FALSE;
+  return FALSE;
 }
 
 static void tlcStatUpdateHandler(RS_MNG_STA_INFO_S* staInfo, TLC_STAT_COMMON_API_S* stats,
                                  struct iwl_mvm* mvm, struct ieee80211_sta* sta, int tid,
                                  bool is_ndp) {
-    BOOLEAN forceLmacUpdate = FALSE;
-    int i;
+  BOOLEAN forceLmacUpdate = FALSE;
+  int i;
 
-    if (!staInfo->enabled) {
-        // Could happen if statistics from lmac were sent while umac was handling remove station.
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "tlcStatUpdateHandler: received stats for invalid station %d. Ignoring", i);
-        return;
-    }
-
-    if (!(stats->txed[0] || stats->txed[1])) {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "tlcStatUpdateHandler: no new info for station %u, skipping it", i);
-        return;
-    }
-
-    // TODO: optimize, no need for array
-    staInfo->amsduInAmpdu = 0;
-    for (i = 0; i < IWL_MAX_TID_COUNT; i++)
-        if (staInfo->mvmsta->tid_data[i].amsdu_in_ampdu_allowed) {
-            staInfo->amsduInAmpdu |= BIT(i);
-        }
-
-    if (!_rsMngIsTestWindow(staInfo) && staInfo->amsduSupport && staInfo->mvmsta->agg_tids &&
-        staInfo->amsduInAmpdu) {
-        forceLmacUpdate =
-            _rsMngCollectAmsduTlcData(staInfo, stats->baTxed, stats->baAcked, stats->trafficLoad);
-    }
-
-    if (_rsMngAreAggsSupported(staInfo->config.bestSuppMode)) {
-        // find all tids such that data was sent on them but aggregations weren't opened for them
-        // yet
-        if (tid < IWL_MAX_TID_COUNT && !is_ndp) { iwl_start_agg(mvm, sta, tid); }
-    }
-
+  if (!staInfo->enabled) {
+    // Could happen if statistics from lmac were sent while umac was handling remove station.
     DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-               "tlcStatUpdateHandler: Received statistics for sta %d, txed[0]: %d, acked[0]: %d, "
-               "tids: 0x%x",
-               i, stats->txed[0], stats->acked[0], stats->tids);
+               "tlcStatUpdateHandler: received stats for invalid station %d. Ignoring", i);
+    return;
+  }
 
-    if (!staInfo->ignoreNextTlcNotif && !staInfo->fixedRate) {
-        BOOLEAN doRateScale = _rsMngUpdateGlobalStats(staInfo, stats);
-        doRateScale &= _rsMngCollectTlcData(staInfo, stats->txed[0], stats->acked[0]);
+  if (!(stats->txed[0] || stats->txed[1])) {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "tlcStatUpdateHandler: no new info for station %u, skipping it", i);
+    return;
+  }
 
-        if (doRateScale) {
-            _rsMngRateScalePerform(staInfo, forceLmacUpdate);
-        } else {
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                       "tlcStatUpdateHandler: not performing rate scaling. #frames since last rate "
-                       "scale perform: %d",
-                       staInfo->framesSinceLastRun);
-        }
-    } else {
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
-                   "tlcStatUpdateHandler: ignoring notification. fixed rate: 0x%x",
-                   staInfo->fixedRate);
-        staInfo->ignoreNextTlcNotif = FALSE;
+  // TODO: optimize, no need for array
+  staInfo->amsduInAmpdu = 0;
+  for (i = 0; i < IWL_MAX_TID_COUNT; i++)
+    if (staInfo->mvmsta->tid_data[i].amsdu_in_ampdu_allowed) {
+      staInfo->amsduInAmpdu |= BIT(i);
     }
+
+  if (!_rsMngIsTestWindow(staInfo) && staInfo->amsduSupport && staInfo->mvmsta->agg_tids &&
+      staInfo->amsduInAmpdu) {
+    forceLmacUpdate =
+        _rsMngCollectAmsduTlcData(staInfo, stats->baTxed, stats->baAcked, stats->trafficLoad);
+  }
+
+  if (_rsMngAreAggsSupported(staInfo->config.bestSuppMode)) {
+    // find all tids such that data was sent on them but aggregations weren't opened for them
+    // yet
+    if (tid < IWL_MAX_TID_COUNT && !is_ndp) {
+      iwl_start_agg(mvm, sta, tid);
+    }
+  }
+
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+             "tlcStatUpdateHandler: Received statistics for sta %d, txed[0]: %d, acked[0]: %d, "
+             "tids: 0x%x",
+             i, stats->txed[0], stats->acked[0], stats->tids);
+
+  if (!staInfo->ignoreNextTlcNotif && !staInfo->fixedRate) {
+    BOOLEAN doRateScale = _rsMngUpdateGlobalStats(staInfo, stats);
+    doRateScale &= _rsMngCollectTlcData(staInfo, stats->txed[0], stats->acked[0]);
+
+    if (doRateScale) {
+      _rsMngRateScalePerform(staInfo, forceLmacUpdate);
+    } else {
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+                 "tlcStatUpdateHandler: not performing rate scaling. #frames since last rate "
+                 "scale perform: %d",
+                 staInfo->framesSinceLastRun);
+    }
+  } else {
+    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO,
+               "tlcStatUpdateHandler: ignoring notification. fixed rate: 0x%x", staInfo->fixedRate);
+    staInfo->ignoreNextTlcNotif = FALSE;
+  }
 }
 
 /*********************************************************************/
@@ -3272,246 +3341,254 @@
 /*********************************************************************/
 
 static RS_MNG_COLUMN_DESC_E _rsMngGetColByRate(RS_MNG_RATE_S* rsMngRate) {
-    RS_MNG_GI_E gi = _rsMngRateGetGi(rsMngRate);
-    U08 ant = _rsMngRateGetAnt(rsMngRate);
-    RS_MNG_COLUMN_DESC_E colId = RS_MNG_COL_INVALID;
+  RS_MNG_GI_E gi = _rsMngRateGetGi(rsMngRate);
+  U08 ant = _rsMngRateGetAnt(rsMngRate);
+  RS_MNG_COLUMN_DESC_E colId = RS_MNG_COL_INVALID;
 
-    switch (_rsMngRateGetModulation(rsMngRate)) {
+  switch (_rsMngRateGetModulation(rsMngRate)) {
     case RS_MNG_MODUL_LEGACY:
-        switch (ant) {
+      switch (ant) {
         case TLC_MNG_CHAIN_A_MSK:
-            colId = RS_MNG_COL_NON_HT_ANT_A;
-            break;
+          colId = RS_MNG_COL_NON_HT_ANT_A;
+          break;
         case TLC_MNG_CHAIN_B_MSK:
-            colId = RS_MNG_COL_NON_HT_ANT_B;
-            break;
+          colId = RS_MNG_COL_NON_HT_ANT_B;
+          break;
         default:
-            DBG_PRINTF(UT, TLC_OFFLOAD_DBG, ERROR, "invalid antenna Msk 0x%x for legacy rate",
-                       _rsMngRateGetAnt(rsMngRate));
-            break;
-        }
-        break;
+          DBG_PRINTF(UT, TLC_OFFLOAD_DBG, ERROR, "invalid antenna Msk 0x%x for legacy rate",
+                     _rsMngRateGetAnt(rsMngRate));
+          break;
+      }
+      break;
     case RS_MNG_MODUL_SISO:
-        switch (gi) {
+      switch (gi) {
         case HT_VHT_NGI:
-            colId = RS_MNG_COL_SISO_ANT_A;
-            break;
+          colId = RS_MNG_COL_SISO_ANT_A;
+          break;
         case HT_VHT_SGI:
-            colId = RS_MNG_COL_SISO_ANT_A_SGI;
-            break;
+          colId = RS_MNG_COL_SISO_ANT_A_SGI;
+          break;
         case HE_3_2_GI:
-            colId = RS_MNG_COL_HE_3_2_SISO_ANT_A;
-            break;
+          colId = RS_MNG_COL_HE_3_2_SISO_ANT_A;
+          break;
         case HE_1_6_GI:
-            colId = RS_MNG_COL_HE_1_6_SISO_ANT_A;
-            break;
+          colId = RS_MNG_COL_HE_1_6_SISO_ANT_A;
+          break;
         case HE_0_8_GI:
-            colId = RS_MNG_COL_HE_0_8_SISO_ANT_A;
-            break;
-        }
+          colId = RS_MNG_COL_HE_0_8_SISO_ANT_A;
+          break;
+      }
 
-        if (ant == TLC_MNG_CHAIN_B_MSK) {
-            // This works thanks to the compilation asserts near _rsMngSetVisitedColumn
-            colId ^= 1;
-        }
-        break;
+      if (ant == TLC_MNG_CHAIN_B_MSK) {
+        // This works thanks to the compilation asserts near _rsMngSetVisitedColumn
+        colId ^= 1;
+      }
+      break;
     case RS_MNG_MODUL_MIMO2:
-        switch (gi) {
+      switch (gi) {
         case HT_VHT_NGI:
-            colId = RS_MNG_COL_MIMO2;
-            break;
+          colId = RS_MNG_COL_MIMO2;
+          break;
         case HT_VHT_SGI:
-            colId = RS_MNG_COL_MIMO2_SGI;
-            break;
+          colId = RS_MNG_COL_MIMO2_SGI;
+          break;
         case HE_3_2_GI:
-            colId = RS_MNG_COL_HE_3_2_MIMO;
-            break;
+          colId = RS_MNG_COL_HE_3_2_MIMO;
+          break;
         case HE_1_6_GI:
-            colId = RS_MNG_COL_HE_1_6_MIMO;
-            break;
+          colId = RS_MNG_COL_HE_1_6_MIMO;
+          break;
         case HE_0_8_GI:
-            colId = RS_MNG_COL_HE_0_8_MIMO;
-            break;
-        }
-        break;
+          colId = RS_MNG_COL_HE_0_8_MIMO;
+          break;
+      }
+      break;
     default:
-        DBG_PRINTF(UT, TLC_OFFLOAD_DBG, ERROR, "invalid modulation %d",
-                   _rsMngRateGetModulation(rsMngRate));
-    }
+      DBG_PRINTF(UT, TLC_OFFLOAD_DBG, ERROR, "invalid modulation %d",
+                 _rsMngRateGetModulation(rsMngRate));
+  }
 
-    return colId;
+  return colId;
 }
 
 static void _rsMngSetInitRate(RS_MNG_STA_INFO_S* staInfo, RS_MNG_RATE_S* rsMngRate) {
-    U16 nonHtRates = staInfo->config.nonHt;
-    TLC_MNG_MODE_E mode = staInfo->config.bestSuppMode;
+  U16 nonHtRates = staInfo->config.nonHt;
+  TLC_MNG_MODE_E mode = staInfo->config.bestSuppMode;
 
-    _rsMngRateSetMode(rsMngRate, mode);
-    if (mode == TLC_MNG_MODE_LEGACY) {
-        _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_LEGACY);
-        _rsMngRateSetLdpc(rsMngRate, FALSE);
+  _rsMngRateSetMode(rsMngRate, mode);
+  if (mode == TLC_MNG_MODE_LEGACY) {
+    _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_LEGACY);
+    _rsMngRateSetLdpc(rsMngRate, FALSE);
+  } else {
+    _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_SISO);
+    _rsMngRateSetLdpc(rsMngRate, _rsMngIsLdpcAllowed(staInfo));
+  }
+
+  _rsMngRateSetBw(rsMngRate, CHANNEL_WIDTH20);
+
+  if (mode == TLC_MNG_MODE_HE) {
+    _rsMngRateSetGi(rsMngRate, HE_3_2_GI);
+  } else {
+    _rsMngRateSetGi(rsMngRate, HT_VHT_NGI);
+  }
+  _rsMngRateSetBfer(rsMngRate, FALSE);
+
+  if (mode > TLC_MNG_MODE_LEGACY && _rsMngIsStbcAllowed(staInfo, rsMngRate)) {
+    _rsMngRateSetStbc(rsMngRate, TRUE);
+    _rsMngRateSetAnt(rsMngRate, rsMngGetDualAntMsk());
+  } else {
+    _rsMngRateSetStbc(rsMngRate, FALSE);
+    _rsMngRateSetAnt(rsMngRate, rsMngGetSingleAntMsk(staInfo->config.chainsEnabled));
+  }
+
+  if (mode > TLC_MNG_MODE_LEGACY) {
+    U08 idx =
+        (U08)(_rsMngGetSuppRatesSameMode(staInfo, rsMngRate) & BIT(RS_MCS_3) ? RS_MCS_3 : RS_MCS_0);
+
+    _rsMngRateSetIdx(rsMngRate, idx);
+  } else {
+    if (LSB2ORD(nonHtRates) > RS_NON_HT_RATE_CCK_LAST) {
+      // 11a
+      _rsMngRateSetIdx(rsMngRate, RS_NON_HT_RATE_OFDM_24M);
+    } else if (MSB2ORD(nonHtRates) > RS_NON_HT_RATE_CCK_LAST) {
+      // 11g
+      _rsMngRateSetIdx(rsMngRate, RS_NON_HT_RATE_OFDM_18M);
     } else {
-        _rsMngRateSetModulation(rsMngRate, RS_MNG_MODUL_SISO);
-        _rsMngRateSetLdpc(rsMngRate, _rsMngIsLdpcAllowed(staInfo));
+      // 11b
+      _rsMngRateSetIdx(rsMngRate, RS_NON_HT_RATE_CCK_5_5M);
     }
 
-    _rsMngRateSetBw(rsMngRate, CHANNEL_WIDTH20);
-
-    if (mode == TLC_MNG_MODE_HE) {
-        _rsMngRateSetGi(rsMngRate, HE_3_2_GI);
-    } else {
-        _rsMngRateSetGi(rsMngRate, HT_VHT_NGI);
+    if (!(BIT(_rsMngRateGetIdx(rsMngRate)) & nonHtRates)) {
+      // we don't support the mean rate as defined in the SAS,
+      // so just start from the lowest supported rate.
+      _rsMngRateSetIdx(rsMngRate, (U08)LSB2ORD(nonHtRates));
     }
-    _rsMngRateSetBfer(rsMngRate, FALSE);
-
-    if (mode > TLC_MNG_MODE_LEGACY && _rsMngIsStbcAllowed(staInfo, rsMngRate)) {
-        _rsMngRateSetStbc(rsMngRate, TRUE);
-        _rsMngRateSetAnt(rsMngRate, rsMngGetDualAntMsk());
-    } else {
-        _rsMngRateSetStbc(rsMngRate, FALSE);
-        _rsMngRateSetAnt(rsMngRate, rsMngGetSingleAntMsk(staInfo->config.chainsEnabled));
-    }
-
-    if (mode > TLC_MNG_MODE_LEGACY) {
-        U08 idx = (U08)(_rsMngGetSuppRatesSameMode(staInfo, rsMngRate) & BIT(RS_MCS_3) ? RS_MCS_3
-                                                                                       : RS_MCS_0);
-
-        _rsMngRateSetIdx(rsMngRate, idx);
-    } else {
-        if (LSB2ORD(nonHtRates) > RS_NON_HT_RATE_CCK_LAST) {
-            // 11a
-            _rsMngRateSetIdx(rsMngRate, RS_NON_HT_RATE_OFDM_24M);
-        } else if (MSB2ORD(nonHtRates) > RS_NON_HT_RATE_CCK_LAST) {
-            // 11g
-            _rsMngRateSetIdx(rsMngRate, RS_NON_HT_RATE_OFDM_18M);
-        } else {
-            // 11b
-            _rsMngRateSetIdx(rsMngRate, RS_NON_HT_RATE_CCK_5_5M);
-        }
-
-        if (!(BIT(_rsMngRateGetIdx(rsMngRate)) & nonHtRates)) {
-            // we don't support the mean rate as defined in the SAS,
-            // so just start from the lowest supported rate.
-            _rsMngRateSetIdx(rsMngRate, (U08)LSB2ORD(nonHtRates));
-        }
-    }
+  }
 }
 
 static void rsMngInitTlcTbl(RS_MNG_STA_INFO_S* staInfo, RS_MNG_TBL_INFO_S* tblInfo) {
-    _rsMngSetInitRate(staInfo, &tblInfo->rsMngRate);
+  _rsMngSetInitRate(staInfo, &tblInfo->rsMngRate);
 
-    tblInfo->column = _rsMngGetColByRate(&(tblInfo->rsMngRate));
-    staInfo->stableColumn = tblInfo->column;
+  tblInfo->column = _rsMngGetColByRate(&(tblInfo->rsMngRate));
+  staInfo->stableColumn = tblInfo->column;
 
-    _rsMngUpdateRateTbl(staInfo, TRUE);
+  _rsMngUpdateRateTbl(staInfo, TRUE);
 }
 
 static void rsMngResetStaInfo(struct iwl_mvm* mvm, struct ieee80211_sta* sta,
                               struct iwl_mvm_sta* mvmsta, RS_MNG_STA_INFO_S* staInfo,
                               BOOLEAN reconfigure) {
-    U32 fixedRate = staInfo->fixedRate;
-    U16 aggDurationLimit = staInfo->aggDurationLimit;
-    U08 amsduInAmpdu = staInfo->amsduInAmpdu;
-    BOOLEAN longAggEnabled = staInfo->longAggEnabled;
+  U32 fixedRate = staInfo->fixedRate;
+  U16 aggDurationLimit = staInfo->aggDurationLimit;
+  U08 amsduInAmpdu = staInfo->amsduInAmpdu;
+  BOOLEAN longAggEnabled = staInfo->longAggEnabled;
 
-    _memclr(staInfo, sizeof(*staInfo));
+  _memclr(staInfo, sizeof(*staInfo));
 
-    staInfo->mvm = mvm;
-    staInfo->sta = sta;
-    staInfo->mvmsta = mvmsta;
-    staInfo->lastSearchCycleEndTimeJiffies = jiffies;
-    staInfo->lastRateUpscaleTimeJiffies = jiffies;
-    staInfo->lastEnableJiffies = jiffies;
+  staInfo->mvm = mvm;
+  staInfo->sta = sta;
+  staInfo->mvmsta = mvmsta;
+  staInfo->lastSearchCycleEndTimeJiffies = jiffies;
+  staInfo->lastRateUpscaleTimeJiffies = jiffies;
+  staInfo->lastEnableJiffies = jiffies;
 
-    if (reconfigure) {
-        staInfo->fixedRate = fixedRate;
-        staInfo->amsduInAmpdu = amsduInAmpdu;
-        staInfo->longAggEnabled = longAggEnabled;
-        staInfo->aggDurationLimit = aggDurationLimit;
-    } else {
-        staInfo->aggDurationLimit = RS_MNG_AGG_DURATION_LIMIT;
-    }
+  if (reconfigure) {
+    staInfo->fixedRate = fixedRate;
+    staInfo->amsduInAmpdu = amsduInAmpdu;
+    staInfo->longAggEnabled = longAggEnabled;
+    staInfo->aggDurationLimit = aggDurationLimit;
+  } else {
+    staInfo->aggDurationLimit = RS_MNG_AGG_DURATION_LIMIT;
+  }
 
-    // aggState IDLE is 0. so memclear sets it
-    staInfo->rsMngState = RS_MNG_STATE_STAY_IN_COLUMN;
+  // aggState IDLE is 0. so memclear sets it
+  staInfo->rsMngState = RS_MNG_STATE_STAY_IN_COLUMN;
 
-    _rsMngRateInvalidate(&staInfo->rateTblInfo.rsMngRate);
-    _rsMngClearTblWindows(staInfo);
-    _rsMngClearWinArr(staInfo->tpcTable.windows, RS_MNG_TPC_NUM_STEPS);
+  _rsMngRateInvalidate(&staInfo->rateTblInfo.rsMngRate);
+  _rsMngClearTblWindows(staInfo);
+  _rsMngClearWinArr(staInfo->tpcTable.windows, RS_MNG_TPC_NUM_STEPS);
 
-    staInfo->tpcTable.currStep = RS_MNG_TPC_DISABLED;
-    staInfo->staBuffSize = RS_MNG_AGG_FRAME_CNT_LIMIT;
-    staInfo->amsduEnabledSize = RS_MNG_AMSDU_INVALID;
-    staInfo->amsduSupport = FALSE;
-    staInfo->failSafeCounter = 0;
-    staInfo->amsduBlacklist = 0;
+  staInfo->tpcTable.currStep = RS_MNG_TPC_DISABLED;
+  staInfo->staBuffSize = RS_MNG_AGG_FRAME_CNT_LIMIT;
+  staInfo->amsduEnabledSize = RS_MNG_AMSDU_INVALID;
+  staInfo->amsduSupport = FALSE;
+  staInfo->failSafeCounter = 0;
+  staInfo->amsduBlacklist = 0;
 }
 
 static bool _tlcMngConfigValid(TLC_MNG_CONFIG_PARAMS_CMD_API_S* params) {
-    U08 chainsEnabled = params->chainsEnabled;
+  U08 chainsEnabled = params->chainsEnabled;
 
-    // no valid chain is selected
-    if (WARN_ON(!(chainsEnabled && (chainsEnabled & rsMngGetDualAntMsk()) == chainsEnabled))) {
+  // no valid chain is selected
+  if (WARN_ON(!(chainsEnabled && (chainsEnabled & rsMngGetDualAntMsk()) == chainsEnabled))) {
+    return false;
+  }
+
+  // at least one non-HT rate MUST be valid
+  if (WARN_ON(!params->nonHt)) {
+    return false;
+  }
+
+  if (params->bestSuppMode == TLC_MNG_MODE_LEGACY) {
+    // check that the no non-HT rates are set
+    if (WARN_ON(params->mcs[TLC_MNG_NSS_1][0] || params->mcs[TLC_MNG_NSS_1][1] ||
+                params->mcs[TLC_MNG_NSS_2][0] || params->mcs[TLC_MNG_NSS_2][1])) {
+      return false;
+    }
+
+    // all config flags make sense only in HT/VHT/HE scenarios, and non-ht rates can only
+    // support 20MHz bandwidth
+    if (WARN_ON(!(!params->configFlags && params->maxChWidth == TLC_MNG_CH_WIDTH_20MHZ))) {
+      return false;
+    }
+  } else {  // HT/VHT/HE
+    // check that there are valid rates for the best supported mode
+    if (WARN_ON(!(params->mcs[TLC_MNG_NSS_1][0]))) {
+      return false;
+    }
+
+    if (chainsEnabled != rsMngGetDualAntMsk()) {
+      // the following flags all require 2 antennas to be used
+      if (WARN_ON(params->configFlags &
+                  (TLC_MNG_CONFIG_FLAGS_STBC_MSK | TLC_MNG_CONFIG_FLAGS_HE_STBC_160MHZ_MSK |
+                   TLC_MNG_CONFIG_FLAGS_HE_DCM_NSS_2_MSK))) {
         return false;
+      }
+
+      // only one chain => can't do mimo
+      if (WARN_ON(params->mcs[TLC_MNG_NSS_2][0] || params->mcs[TLC_MNG_NSS_2][1])) {
+        return false;
+      }
     }
 
-    // at least one non-HT rate MUST be valid
-    if (WARN_ON(!params->nonHt)) { return false; }
-
-    if (params->bestSuppMode == TLC_MNG_MODE_LEGACY) {
-        // check that the no non-HT rates are set
-        if (WARN_ON(params->mcs[TLC_MNG_NSS_1][0] || params->mcs[TLC_MNG_NSS_1][1] ||
-                    params->mcs[TLC_MNG_NSS_2][0] || params->mcs[TLC_MNG_NSS_2][1])) {
-            return false;
-        }
-
-        // all config flags make sense only in HT/VHT/HE scenarios, and non-ht rates can only
-        // support 20MHz bandwidth
-        if (WARN_ON(!(!params->configFlags && params->maxChWidth == TLC_MNG_CH_WIDTH_20MHZ))) {
-            return false;
-        }
-    } else {  // HT/VHT/HE
-        // check that there are valid rates for the best supported mode
-        if (WARN_ON(!(params->mcs[TLC_MNG_NSS_1][0]))) { return false; }
-
-        if (chainsEnabled != rsMngGetDualAntMsk()) {
-            // the following flags all require 2 antennas to be used
-            if (WARN_ON(params->configFlags &
-                        (TLC_MNG_CONFIG_FLAGS_STBC_MSK | TLC_MNG_CONFIG_FLAGS_HE_STBC_160MHZ_MSK |
-                         TLC_MNG_CONFIG_FLAGS_HE_DCM_NSS_2_MSK))) {
-                return false;
-            }
-
-            // only one chain => can't do mimo
-            if (WARN_ON(params->mcs[TLC_MNG_NSS_2][0] || params->mcs[TLC_MNG_NSS_2][1])) {
-                return false;
-            }
-        }
-
-        if (params->bestSuppMode == TLC_MNG_MODE_HT) {
-            if (WARN_ON(!((params->bestSuppMode == TLC_MNG_MODE_HT &&
-                           params->maxChWidth <= TLC_MNG_CH_WIDTH_40MHZ) ||
-                          params->maxChWidth < TLC_MNG_CH_WIDTH_MAX))) {
-                return false;
-            }
-        }
-
-        if (params->bestSuppMode < TLC_MNG_MODE_HE) {
-            // the following flags are for HE only
-            if (WARN_ON(params->configFlags & (TLC_MNG_CONFIG_FLAGS_HE_STBC_160MHZ_MSK |
-                                               TLC_MNG_CONFIG_FLAGS_HE_DCM_NSS_1_MSK |
-                                               TLC_MNG_CONFIG_FLAGS_HE_DCM_NSS_2_MSK |
-                                               TLC_MNG_CONFIG_FLAGS_HE_BLOCK_2X_LTF_MSK))) {
-                return false;
-            }
-        } else {
-            if (WARN_ON(params->sgiChWidthSupport)) { return false; }
-        }
+    if (params->bestSuppMode == TLC_MNG_MODE_HT) {
+      if (WARN_ON(!((params->bestSuppMode == TLC_MNG_MODE_HT &&
+                     params->maxChWidth <= TLC_MNG_CH_WIDTH_40MHZ) ||
+                    params->maxChWidth < TLC_MNG_CH_WIDTH_MAX))) {
+        return false;
+      }
     }
 
-    if (WARN_ON(!(params->amsduSupported <= TLC_AMSDU_SUPPORTED))) { return false; }
+    if (params->bestSuppMode < TLC_MNG_MODE_HE) {
+      // the following flags are for HE only
+      if (WARN_ON(params->configFlags &
+                  (TLC_MNG_CONFIG_FLAGS_HE_STBC_160MHZ_MSK | TLC_MNG_CONFIG_FLAGS_HE_DCM_NSS_1_MSK |
+                   TLC_MNG_CONFIG_FLAGS_HE_DCM_NSS_2_MSK |
+                   TLC_MNG_CONFIG_FLAGS_HE_BLOCK_2X_LTF_MSK))) {
+        return false;
+      }
+    } else {
+      if (WARN_ON(params->sgiChWidthSupport)) {
+        return false;
+      }
+    }
+  }
 
-    return true;
+  if (WARN_ON(!(params->amsduSupported <= TLC_AMSDU_SUPPORTED))) {
+    return false;
+  }
+
+  return true;
 }
 
 // rs_initialize_lq
@@ -3522,15 +3599,15 @@
 //
 // NOTE: This sets up a default set of values.  These will be replaced later
 static void _rsMngTlcInit(RS_MNG_STA_INFO_S* staInfo) {
-    RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
+  RS_MNG_TBL_INFO_S* tblInfo = &staInfo->rateTblInfo;
 
-    rsMngInitTlcTbl(staInfo, tblInfo);
+  rsMngInitTlcTbl(staInfo, tblInfo);
 
-    // Start in search cycle state in order allow quick convergence on the optimal rate.
-    staInfo->rsMngState = RS_MNG_STATE_SEARCH_CYCLE_STARTED;
-    _rsMngSetVisitedColumn(staInfo, tblInfo->column);
-    _rsMngPrepareForBwChangeAttempt(staInfo, &staInfo->rateTblInfo.rsMngRate);
-    DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngTlcInit: starting at column %d", tblInfo->column);
+  // Start in search cycle state in order allow quick convergence on the optimal rate.
+  staInfo->rsMngState = RS_MNG_STATE_SEARCH_CYCLE_STARTED;
+  _rsMngSetVisitedColumn(staInfo, tblInfo->column);
+  _rsMngPrepareForBwChangeAttempt(staInfo, &staInfo->rateTblInfo.rsMngRate);
+  DBG_PRINTF(UT, TLC_OFFLOAD_DBG, INFO, "_rsMngTlcInit: starting at column %d", tblInfo->column);
 }
 
 /**********************************************************************/
@@ -3540,27 +3617,31 @@
 static void cmdHandlerTlcMngConfig(struct iwl_mvm* mvm, struct ieee80211_sta* sta,
                                    struct iwl_mvm_sta* mvmsta, RS_MNG_STA_INFO_S* staInfo,
                                    TLC_MNG_CONFIG_PARAMS_CMD_API_S* config, BOOLEAN reconfigure) {
-    if (!_tlcMngConfigValid(config)) { return; }
+  if (!_tlcMngConfigValid(config)) {
+    return;
+  }
 
-    // Check if this a reconfiguration of an existing station
-    if (staInfo->enabled && reconfigure) {
-        // Switching between non-HT/HT/VHT/HE requires completely de-associating before
-        // reassociating with the new mode. That being said, windows driver always sends a tlc
-        // config command with only non-HT mode when first adding a station, and then updates it to
-        // the correct mode after association. Since the switch from non-HT to HT/VHT/HE doesn't
-        // require any extra processing here (no aggregation state to possibly change etc.), this is
-        // allowed even though it's weird.
-        if (WARN_ON(!(config->bestSuppMode >= staInfo->config.bestSuppMode))) { return; }
+  // Check if this a reconfiguration of an existing station
+  if (staInfo->enabled && reconfigure) {
+    // Switching between non-HT/HT/VHT/HE requires completely de-associating before
+    // reassociating with the new mode. That being said, windows driver always sends a tlc
+    // config command with only non-HT mode when first adding a station, and then updates it to
+    // the correct mode after association. Since the switch from non-HT to HT/VHT/HE doesn't
+    // require any extra processing here (no aggregation state to possibly change etc.), this is
+    // allowed even though it's weird.
+    if (WARN_ON(!(config->bestSuppMode >= staInfo->config.bestSuppMode))) {
+      return;
     }
+  }
 
-    rsMngResetStaInfo(mvm, sta, mvmsta, staInfo, staInfo->enabled && reconfigure);
-    BUILD_BUG_ON(sizeof(staInfo->config) != sizeof(*config));
-    memcpy(&staInfo->config, config, sizeof(staInfo->config));
+  rsMngResetStaInfo(mvm, sta, mvmsta, staInfo, staInfo->enabled && reconfigure);
+  BUILD_BUG_ON(sizeof(staInfo->config) != sizeof(*config));
+  memcpy(&staInfo->config, config, sizeof(staInfo->config));
 
-    rsMngInitAmsdu(staInfo);
+  rsMngInitAmsdu(staInfo);
 
-    // send LQ command with basic rates table
-    _rsMngTlcInit(staInfo);
+  // send LQ command with basic rates table
+  _rsMngTlcInit(staInfo);
 
-    staInfo->enabled = TRUE;
+  staInfo->enabled = TRUE;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-fw.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-fw.c
index 87fedbf..16e3bf7 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-fw.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-fw.c
@@ -38,299 +38,323 @@
 #include "sta.h"
 
 static uint8_t rs_fw_bw_from_sta_bw(struct ieee80211_sta* sta) {
-    switch (sta->bandwidth) {
+  switch (sta->bandwidth) {
     case IEEE80211_STA_RX_BW_160:
-        return IWL_TLC_MNG_CH_WIDTH_160MHZ;
+      return IWL_TLC_MNG_CH_WIDTH_160MHZ;
     case IEEE80211_STA_RX_BW_80:
-        return IWL_TLC_MNG_CH_WIDTH_80MHZ;
+      return IWL_TLC_MNG_CH_WIDTH_80MHZ;
     case IEEE80211_STA_RX_BW_40:
-        return IWL_TLC_MNG_CH_WIDTH_40MHZ;
+      return IWL_TLC_MNG_CH_WIDTH_40MHZ;
     case IEEE80211_STA_RX_BW_20:
     default:
-        return IWL_TLC_MNG_CH_WIDTH_20MHZ;
-    }
+      return IWL_TLC_MNG_CH_WIDTH_20MHZ;
+  }
 }
 
 static uint8_t rs_fw_set_active_chains(uint8_t chains) {
-    uint8_t fw_chains = 0;
+  uint8_t fw_chains = 0;
 
-    if (chains & ANT_A) { fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK; }
-    if (chains & ANT_B) { fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK; }
-    if (chains & ANT_C) {
-        WARN(false, "tlc offload doesn't support antenna C. chains: 0x%x\n", chains);
-    }
+  if (chains & ANT_A) {
+    fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+  }
+  if (chains & ANT_B) {
+    fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+  }
+  if (chains & ANT_C) {
+    WARN(false, "tlc offload doesn't support antenna C. chains: 0x%x\n", chains);
+  }
 
-    return fw_chains;
+  return fw_chains;
 }
 
 static uint8_t rs_fw_sgi_cw_support(struct ieee80211_sta* sta) {
-    struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
-    struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
-    struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
-    uint8_t supp = 0;
+  struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
+  struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
+  struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
+  uint8_t supp = 0;
 
-    if (he_cap && he_cap->has_he) { return 0; }
+  if (he_cap && he_cap->has_he) {
+    return 0;
+  }
 
-    if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) { supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); }
-    if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) { supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ); }
-    if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) { supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ); }
-    if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) { supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ); }
+  if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) {
+    supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
+  }
+  if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) {
+    supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
+  }
+  if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) {
+    supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
+  }
+  if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) {
+    supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
+  }
 
-    return supp;
+  return supp;
 }
 
 static uint16_t rs_fw_set_config_flags(struct iwl_mvm* mvm, struct ieee80211_sta* sta) {
-    struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
-    struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
-    struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
-    bool vht_ena = vht_cap && vht_cap->vht_supported;
-    uint16_t flags = 0;
+  struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
+  struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
+  struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
+  bool vht_ena = vht_cap && vht_cap->vht_supported;
+  uint16_t flags = 0;
 
-    if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
-        if (he_cap && he_cap->has_he) {
-            if (he_cap->he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) {
-                flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
-            }
+  if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
+    if (he_cap && he_cap->has_he) {
+      if (he_cap->he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) {
+        flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+      }
 
-            if (he_cap->he_cap_elem.phy_cap_info[7] & IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) {
-                flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
-            }
-        } else if ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
-                   (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))) {
-            flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
-        }
+      if (he_cap->he_cap_elem.phy_cap_info[7] & IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) {
+        flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
+      }
+    } else if ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+               (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))) {
+      flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
     }
+  }
 
-    if (mvm->cfg->ht_params->ldpc && ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
-                                      (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) {
-        flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
-    }
+  if (mvm->cfg->ht_params->ldpc && ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
+                                    (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) {
+    flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+  }
 
-    if (he_cap && he_cap->has_he &&
-        (he_cap->he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK)) {
-        flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
-    }
+  if (he_cap && he_cap->has_he &&
+      (he_cap->he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK)) {
+    flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
+  }
 
-    return flags;
+  return flags;
 }
 
 static int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap* vht_cap, int nss) {
-    uint16_t rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & (0x3 << (2 * (nss - 1)));
-    rx_mcs >>= (2 * (nss - 1));
+  uint16_t rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & (0x3 << (2 * (nss - 1)));
+  rx_mcs >>= (2 * (nss - 1));
 
-    switch (rx_mcs) {
+  switch (rx_mcs) {
     case IEEE80211_VHT_MCS_SUPPORT_0_7:
-        return IWL_TLC_MNG_HT_RATE_MCS7;
+      return IWL_TLC_MNG_HT_RATE_MCS7;
     case IEEE80211_VHT_MCS_SUPPORT_0_8:
-        return IWL_TLC_MNG_HT_RATE_MCS8;
+      return IWL_TLC_MNG_HT_RATE_MCS8;
     case IEEE80211_VHT_MCS_SUPPORT_0_9:
-        return IWL_TLC_MNG_HT_RATE_MCS9;
+      return IWL_TLC_MNG_HT_RATE_MCS9;
     default:
-        WARN_ON_ONCE(1);
-        break;
-    }
+      WARN_ON_ONCE(1);
+      break;
+  }
 
-    return 0;
+  return 0;
 }
 
 static void rs_fw_vht_set_enabled_rates(const struct ieee80211_sta* sta,
                                         const struct ieee80211_sta_vht_cap* vht_cap,
                                         struct iwl_tlc_config_cmd* cmd) {
-    uint16_t supp;
-    int i, highest_mcs;
+  uint16_t supp;
+  int i, highest_mcs;
 
-    for (i = 0; i < sta->rx_nss; i++) {
-        if (i == MAX_NSS) { break; }
-
-        highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
-        if (!highest_mcs) { continue; }
-
-        supp = BIT(highest_mcs + 1) - 1;
-        if (sta->bandwidth == IEEE80211_STA_RX_BW_20) { supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); }
-
-        cmd->ht_rates[i][0] = cpu_to_le16(supp);
-        if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
-            cmd->ht_rates[i][1] = cmd->ht_rates[i][0];
-        }
+  for (i = 0; i < sta->rx_nss; i++) {
+    if (i == MAX_NSS) {
+      break;
     }
+
+    highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+    if (!highest_mcs) {
+      continue;
+    }
+
+    supp = BIT(highest_mcs + 1) - 1;
+    if (sta->bandwidth == IEEE80211_STA_RX_BW_20) {
+      supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+    }
+
+    cmd->ht_rates[i][0] = cpu_to_le16(supp);
+    if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+      cmd->ht_rates[i][1] = cmd->ht_rates[i][0];
+    }
+  }
 }
 
 static uint16_t rs_fw_he_ieee80211_mcs_to_rs_mcs(uint16_t mcs) {
-    switch (mcs) {
+  switch (mcs) {
     case IEEE80211_HE_MCS_SUPPORT_0_7:
-        return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+      return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
     case IEEE80211_HE_MCS_SUPPORT_0_9:
-        return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+      return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
     case IEEE80211_HE_MCS_SUPPORT_0_11:
-        return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+      return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
     case IEEE80211_HE_MCS_NOT_SUPPORTED:
-        return 0;
-    }
+      return 0;
+  }
 
-    WARN(1, "invalid HE MCS %d\n", mcs);
-    return 0;
+  WARN(1, "invalid HE MCS %d\n", mcs);
+  return 0;
 }
 
 static void rs_fw_he_set_enabled_rates(const struct ieee80211_sta* sta,
                                        const struct ieee80211_sta_he_cap* he_cap,
                                        struct iwl_tlc_config_cmd* cmd) {
-    uint16_t mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
-    uint16_t mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
-    int i;
+  uint16_t mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+  uint16_t mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+  int i;
 
-    for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
-        uint16_t _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
-        uint16_t _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+  for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
+    uint16_t _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+    uint16_t _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
 
-        cmd->ht_rates[i][0] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
-        cmd->ht_rates[i][1] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
-    }
+    cmd->ht_rates[i][0] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+    cmd->ht_rates[i][1] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
+  }
 }
 
 static void rs_fw_set_supp_rates(struct ieee80211_sta* sta, struct ieee80211_supported_band* sband,
                                  struct iwl_tlc_config_cmd* cmd) {
-    int i;
-    unsigned long tmp;
-    unsigned long supp; /* must be unsigned long for for_each_set_bit */
-    const struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
-    const struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
-    const struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
+  int i;
+  unsigned long tmp;
+  unsigned long supp; /* must be unsigned long for for_each_set_bit */
+  const struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
+  const struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
+  const struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
 
-    /* non HT rates */
-    supp = 0;
-    tmp = sta->supp_rates[sband->band];
-    for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value);
+  /* non HT rates */
+  supp = 0;
+  tmp = sta->supp_rates[sband->band];
+  for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value);
 
-    cmd->non_ht_rates = cpu_to_le16(supp);
-    cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+  cmd->non_ht_rates = cpu_to_le16(supp);
+  cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
 
-    /* HT/VHT rates */
-    if (he_cap && he_cap->has_he) {
-        cmd->mode = IWL_TLC_MNG_MODE_HE;
-        rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
-    } else if (vht_cap && vht_cap->vht_supported) {
-        cmd->mode = IWL_TLC_MNG_MODE_VHT;
-        rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
-    } else if (ht_cap && ht_cap->ht_supported) {
-        cmd->mode = IWL_TLC_MNG_MODE_HT;
-        cmd->ht_rates[0][0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
-        cmd->ht_rates[1][0] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
-    }
+  /* HT/VHT rates */
+  if (he_cap && he_cap->has_he) {
+    cmd->mode = IWL_TLC_MNG_MODE_HE;
+    rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+  } else if (vht_cap && vht_cap->vht_supported) {
+    cmd->mode = IWL_TLC_MNG_MODE_VHT;
+    rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
+  } else if (ht_cap && ht_cap->ht_supported) {
+    cmd->mode = IWL_TLC_MNG_MODE_HT;
+    cmd->ht_rates[0][0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+    cmd->ht_rates[1][0] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+  }
 }
 
 void iwl_mvm_tlc_update_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_tlc_update_notif* notif;
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvmsta;
-    struct iwl_lq_sta_rs_fw* lq_sta;
-    uint32_t flags;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_tlc_update_notif* notif;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvmsta;
+  struct iwl_lq_sta_rs_fw* lq_sta;
+  uint32_t flags;
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    notif = (void*)pkt->data;
-    sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
-    if (IS_ERR_OR_NULL(sta)) {
-        IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id);
-        goto out;
+  notif = (void*)pkt->data;
+  sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
+  if (IS_ERR_OR_NULL(sta)) {
+    IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id);
+    goto out;
+  }
+
+  mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+  if (!mvmsta) {
+    IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id);
+    goto out;
+  }
+
+  flags = le32_to_cpu(notif->flags);
+
+  lq_sta = &mvmsta->lq_sta.rs_fw;
+
+  if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+    lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
+    IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n", lq_sta->last_rate_n_flags);
+  }
+
+  if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
+    uint16_t size = le32_to_cpu(notif->amsdu_size);
+    int i;
+
+    if (WARN_ON(sta->max_amsdu_len < size)) {
+      goto out;
     }
 
-    mvmsta = iwl_mvm_sta_from_mac80211(sta);
+    mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
+    mvmsta->max_amsdu_len = size;
+    sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
 
-    if (!mvmsta) {
-        IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id);
-        goto out;
+    for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+      if (mvmsta->amsdu_enabled & BIT(i)) {
+        sta->max_tid_amsdu_len[i] = iwl_mvm_max_amsdu_size(mvm, sta, i);
+      } else
+      /*
+       * Not so elegant, but this will effectively
+       * prevent AMSDU on this TID
+       */
+      {
+        sta->max_tid_amsdu_len[i] = 1;
+      }
     }
 
-    flags = le32_to_cpu(notif->flags);
-
-    lq_sta = &mvmsta->lq_sta.rs_fw;
-
-    if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
-        lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
-        IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n", lq_sta->last_rate_n_flags);
-    }
-
-    if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
-        uint16_t size = le32_to_cpu(notif->amsdu_size);
-        int i;
-
-        if (WARN_ON(sta->max_amsdu_len < size)) { goto out; }
-
-        mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
-        mvmsta->max_amsdu_len = size;
-        sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
-
-        for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
-            if (mvmsta->amsdu_enabled & BIT(i)) {
-                sta->max_tid_amsdu_len[i] = iwl_mvm_max_amsdu_size(mvm, sta, i);
-            } else
-            /*
-             * Not so elegant, but this will effectively
-             * prevent AMSDU on this TID
-             */
-            {
-                sta->max_tid_amsdu_len[i] = 1;
-            }
-        }
-
-        IWL_DEBUG_RATE(
-            mvm, "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
-            le32_to_cpu(notif->amsdu_size), size, mvmsta->amsdu_enabled);
-    }
+    IWL_DEBUG_RATE(mvm,
+                   "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
+                   le32_to_cpu(notif->amsdu_size), size, mvmsta->amsdu_enabled);
+  }
 out:
-    rcu_read_unlock();
+  rcu_read_unlock();
 }
 
 void rs_fw_rate_init(struct iwl_mvm* mvm, struct ieee80211_sta* sta, enum nl80211_band band,
                      bool update) {
-    struct ieee80211_hw* hw = mvm->hw;
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_lq_sta_rs_fw* lq_sta = &mvmsta->lq_sta.rs_fw;
-    uint32_t cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
-    struct ieee80211_supported_band* sband;
-    struct iwl_tlc_config_cmd cfg_cmd = {
-        .sta_id = mvmsta->sta_id,
-        .max_ch_width = update ? rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
-        .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
-        .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
-        .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
-        .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
-        .amsdu = iwl_mvm_is_csum_supported(mvm),
-    };
-    int ret;
+  struct ieee80211_hw* hw = mvm->hw;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_lq_sta_rs_fw* lq_sta = &mvmsta->lq_sta.rs_fw;
+  uint32_t cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+  struct ieee80211_supported_band* sband;
+  struct iwl_tlc_config_cmd cfg_cmd = {
+      .sta_id = mvmsta->sta_id,
+      .max_ch_width = update ? rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
+      .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
+      .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
+      .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
+      .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+      .amsdu = iwl_mvm_is_csum_supported(mvm),
+  };
+  int ret;
 
-    memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+  memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    iwl_mvm_reset_frame_stats(mvm);
+  iwl_mvm_reset_frame_stats(mvm);
 #endif
-    sband = hw->wiphy->bands[band];
-    rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
+  sband = hw->wiphy->bands[band];
+  rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
-    if (ret) { IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); }
+  ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
+  }
 }
 
 void iwl_mvm_rs_add_sta(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta) {
-    struct iwl_lq_sta_rs_fw* lq_sta = &mvmsta->lq_sta.rs_fw;
+  struct iwl_lq_sta_rs_fw* lq_sta = &mvmsta->lq_sta.rs_fw;
 
-    IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+  IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
 
-    lq_sta->pers.drv = mvm;
-    lq_sta->pers.sta_id = mvmsta->sta_id;
-    lq_sta->pers.chains = 0;
-    memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
-    lq_sta->pers.last_rssi = S8_MIN;
-    lq_sta->last_rate_n_flags = 0;
+  lq_sta->pers.drv = mvm;
+  lq_sta->pers.sta_id = mvmsta->sta_id;
+  lq_sta->pers.chains = 0;
+  memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+  lq_sta->pers.last_rssi = S8_MIN;
+  lq_sta->last_rate_n_flags = 0;
 
 #ifdef CPTCFG_MAC80211_DEBUGFS
-    lq_sta->pers.dbg_fixed_rate = 0;
+  lq_sta->pers.dbg_fixed_rate = 0;
 #endif
 }
 
 int rs_fw_tx_protection(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta, bool enable) {
-    /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */
-    IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n");
-    return 0;
+  /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */
+  IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n");
+  return 0;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-ng.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-ng.c
index 6a9f228..a643be6 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-ng.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-ng.c
@@ -37,195 +37,221 @@
 #include "rs.h"
 
 static void iwl_start_agg(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int tid) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_tid_data* tid_data;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_tid_data* tid_data;
 
-    tid_data = &mvmsta->tid_data[tid];
-    if (tid_data->state == IWL_AGG_OFF && mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
-        int ret = ieee80211_start_tx_ba_session(sta, tid, 0);
-        if (ret == -EAGAIN) {
-            ieee80211_stop_tx_ba_session(sta, tid);
-            return;
-        }
-        if (ret == 0) { tid_data->state = IWL_AGG_QUEUED; }
+  tid_data = &mvmsta->tid_data[tid];
+  if (tid_data->state == IWL_AGG_OFF && mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
+    int ret = ieee80211_start_tx_ba_session(sta, tid, 0);
+    if (ret == -EAGAIN) {
+      ieee80211_stop_tx_ba_session(sta, tid);
+      return;
     }
+    if (ret == 0) {
+      tid_data->state = IWL_AGG_QUEUED;
+    }
+  }
 }
 
 static uint8_t rs_fw_bw_from_sta_bw(struct ieee80211_sta* sta) {
-    switch (sta->bandwidth) {
+  switch (sta->bandwidth) {
     case IEEE80211_STA_RX_BW_160:
-        return IWL_TLC_MNG_CH_WIDTH_160MHZ;
+      return IWL_TLC_MNG_CH_WIDTH_160MHZ;
     case IEEE80211_STA_RX_BW_80:
-        return IWL_TLC_MNG_CH_WIDTH_80MHZ;
+      return IWL_TLC_MNG_CH_WIDTH_80MHZ;
     case IEEE80211_STA_RX_BW_40:
-        return IWL_TLC_MNG_CH_WIDTH_40MHZ;
+      return IWL_TLC_MNG_CH_WIDTH_40MHZ;
     case IEEE80211_STA_RX_BW_20:
     default:
-        return IWL_TLC_MNG_CH_WIDTH_20MHZ;
-    }
+      return IWL_TLC_MNG_CH_WIDTH_20MHZ;
+  }
 }
 
 static uint8_t rs_fw_set_active_chains(uint8_t chains) {
-    uint8_t fw_chains = 0;
+  uint8_t fw_chains = 0;
 
-    if (chains & ANT_A) { fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK; }
-    if (chains & ANT_B) { fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK; }
-    if (chains & ANT_C) { WARN(false, "tlc doesn't support antenna C. chains: 0x%x\n", chains); }
+  if (chains & ANT_A) {
+    fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+  }
+  if (chains & ANT_B) {
+    fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+  }
+  if (chains & ANT_C) {
+    WARN(false, "tlc doesn't support antenna C. chains: 0x%x\n", chains);
+  }
 
-    return fw_chains;
+  return fw_chains;
 }
 
 static uint8_t rs_fw_sgi_cw_support(struct ieee80211_sta* sta) {
-    struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
-    struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
-    struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
-    uint8_t supp = 0;
+  struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
+  struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
+  struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
+  uint8_t supp = 0;
 
-    if (he_cap && he_cap->has_he) { return 0; }
+  if (he_cap && he_cap->has_he) {
+    return 0;
+  }
 
-    if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) { supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); }
-    if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) { supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ); }
-    if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) { supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ); }
-    if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) { supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ); }
+  if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) {
+    supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
+  }
+  if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) {
+    supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
+  }
+  if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) {
+    supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
+  }
+  if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) {
+    supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
+  }
 
-    return supp;
+  return supp;
 }
 
 static uint16_t rs_fw_set_config_flags(struct iwl_mvm* mvm, struct ieee80211_sta* sta) {
-    struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
-    struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
-    struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
-    bool vht_ena = vht_cap && vht_cap->vht_supported;
-    uint16_t flags = 0;
+  struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
+  struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
+  struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
+  bool vht_ena = vht_cap && vht_cap->vht_supported;
+  uint16_t flags = 0;
 
-    if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
-        if (he_cap && he_cap->has_he) {
-            if (he_cap->he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) {
-                flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
-            }
+  if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
+    if (he_cap && he_cap->has_he) {
+      if (he_cap->he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) {
+        flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+      }
 
-            if (he_cap->he_cap_elem.phy_cap_info[7] & IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) {
-                flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
-            }
-        } else if ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
-                   (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))) {
-            flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
-        }
+      if (he_cap->he_cap_elem.phy_cap_info[7] & IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) {
+        flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
+      }
+    } else if ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+               (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))) {
+      flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
     }
+  }
 
-    if (mvm->cfg->ht_params->ldpc && ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
-                                      (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) {
-        flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
-    }
+  if (mvm->cfg->ht_params->ldpc && ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
+                                    (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) {
+    flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+  }
 
-    if (he_cap && he_cap->has_he &&
-        (he_cap->he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK)) {
-        flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
-    }
+  if (he_cap && he_cap->has_he &&
+      (he_cap->he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK)) {
+    flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
+  }
 
-    return flags;
+  return flags;
 }
 
 static int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap* vht_cap, int nss) {
-    uint16_t rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & (0x3 << (2 * (nss - 1)));
-    rx_mcs >>= (2 * (nss - 1));
+  uint16_t rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & (0x3 << (2 * (nss - 1)));
+  rx_mcs >>= (2 * (nss - 1));
 
-    switch (rx_mcs) {
+  switch (rx_mcs) {
     case IEEE80211_VHT_MCS_SUPPORT_0_7:
-        return IWL_TLC_MNG_HT_RATE_MCS7;
+      return IWL_TLC_MNG_HT_RATE_MCS7;
     case IEEE80211_VHT_MCS_SUPPORT_0_8:
-        return IWL_TLC_MNG_HT_RATE_MCS8;
+      return IWL_TLC_MNG_HT_RATE_MCS8;
     case IEEE80211_VHT_MCS_SUPPORT_0_9:
-        return IWL_TLC_MNG_HT_RATE_MCS9;
+      return IWL_TLC_MNG_HT_RATE_MCS9;
     default:
-        WARN_ON_ONCE(1);
-        break;
-    }
+      WARN_ON_ONCE(1);
+      break;
+  }
 
-    return 0;
+  return 0;
 }
 
 static void rs_fw_vht_set_enabled_rates(const struct ieee80211_sta* sta,
                                         const struct ieee80211_sta_vht_cap* vht_cap,
                                         TLC_MNG_CONFIG_PARAMS_CMD_API_S* cmd) {
-    uint16_t supp;
-    int i, highest_mcs;
+  uint16_t supp;
+  int i, highest_mcs;
 
-    for (i = 0; i < sta->rx_nss; i++) {
-        if (i == MAX_NSS) { break; }
-
-        highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
-        if (!highest_mcs) { continue; }
-
-        supp = BIT(highest_mcs + 1) - 1;
-        if (sta->bandwidth == IEEE80211_STA_RX_BW_20) { supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); }
-
-        cmd->mcs[i][0] = (supp);
-        if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { cmd->mcs[i][1] = cmd->mcs[i][0]; }
+  for (i = 0; i < sta->rx_nss; i++) {
+    if (i == MAX_NSS) {
+      break;
     }
+
+    highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+    if (!highest_mcs) {
+      continue;
+    }
+
+    supp = BIT(highest_mcs + 1) - 1;
+    if (sta->bandwidth == IEEE80211_STA_RX_BW_20) {
+      supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+    }
+
+    cmd->mcs[i][0] = (supp);
+    if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+      cmd->mcs[i][1] = cmd->mcs[i][0];
+    }
+  }
 }
 
 static uint16_t rs_fw_he_ieee80211_mcs_to_rs_mcs(uint16_t mcs) {
-    switch (mcs) {
+  switch (mcs) {
     case IEEE80211_HE_MCS_SUPPORT_0_7:
-        return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+      return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
     case IEEE80211_HE_MCS_SUPPORT_0_9:
-        return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+      return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
     case IEEE80211_HE_MCS_SUPPORT_0_11:
-        return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+      return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
     case IEEE80211_HE_MCS_NOT_SUPPORTED:
-        return 0;
-    }
+      return 0;
+  }
 
-    WARN(1, "invalid HE MCS %d\n", mcs);
-    return 0;
+  WARN(1, "invalid HE MCS %d\n", mcs);
+  return 0;
 }
 
 static void rs_fw_he_set_enabled_rates(const struct ieee80211_sta* sta,
                                        const struct ieee80211_sta_he_cap* he_cap,
                                        TLC_MNG_CONFIG_PARAMS_CMD_API_S* cmd) {
-    uint16_t mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
-    uint16_t mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
-    int i;
+  uint16_t mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+  uint16_t mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+  int i;
 
-    for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
-        uint16_t _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
-        uint16_t _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+  for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
+    uint16_t _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+    uint16_t _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
 
-        cmd->mcs[i][0] = (rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
-        cmd->mcs[i][1] = (rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
-    }
+    cmd->mcs[i][0] = (rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+    cmd->mcs[i][1] = (rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
+  }
 }
 
 static void rs_fw_set_supp_rates(struct ieee80211_sta* sta, struct ieee80211_supported_band* sband,
                                  TLC_MNG_CONFIG_PARAMS_CMD_API_S* cmd) {
-    int i;
-    unsigned long tmp;
-    unsigned long supp;
-    const struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
-    const struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
-    const struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
+  int i;
+  unsigned long tmp;
+  unsigned long supp;
+  const struct ieee80211_sta_ht_cap* ht_cap = &sta->ht_cap;
+  const struct ieee80211_sta_vht_cap* vht_cap = &sta->vht_cap;
+  const struct ieee80211_sta_he_cap* he_cap = &sta->he_cap;
 
-    /* non HT rates */
-    supp = 0;
-    tmp = sta->supp_rates[sband->band];
-    for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value);
+  /* non HT rates */
+  supp = 0;
+  tmp = sta->supp_rates[sband->band];
+  for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value);
 
-    cmd->nonHt = supp;
-    cmd->bestSuppMode = IWL_TLC_MNG_MODE_NON_HT;
+  cmd->nonHt = supp;
+  cmd->bestSuppMode = IWL_TLC_MNG_MODE_NON_HT;
 
-    /* HT/VHT rates */
-    if (he_cap && he_cap->has_he) {
-        cmd->bestSuppMode = IWL_TLC_MNG_MODE_HE;
-        rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
-    } else if (vht_cap && vht_cap->vht_supported) {
-        cmd->bestSuppMode = IWL_TLC_MNG_MODE_VHT;
-        rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
-    } else if (ht_cap && ht_cap->ht_supported) {
-        cmd->bestSuppMode = IWL_TLC_MNG_MODE_HT;
-        cmd->mcs[0][0] = (ht_cap->mcs.rx_mask[0]);
-        cmd->mcs[1][0] = (ht_cap->mcs.rx_mask[1]);
-    }
+  /* HT/VHT rates */
+  if (he_cap && he_cap->has_he) {
+    cmd->bestSuppMode = IWL_TLC_MNG_MODE_HE;
+    rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+  } else if (vht_cap && vht_cap->vht_supported) {
+    cmd->bestSuppMode = IWL_TLC_MNG_MODE_VHT;
+    rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
+  } else if (ht_cap && ht_cap->ht_supported) {
+    cmd->bestSuppMode = IWL_TLC_MNG_MODE_HT;
+    cmd->mcs[0][0] = (ht_cap->mcs.rx_mask[0]);
+    cmd->mcs[1][0] = (ht_cap->mcs.rx_mask[1]);
+  }
 }
 
 /// TODO: merge file?
@@ -233,74 +259,74 @@
 
 static void rs_drv_rate_init(struct iwl_mvm* mvm, struct ieee80211_sta* sta, enum nl80211_band band,
                              bool update) {
-    struct ieee80211_hw* hw = mvm->hw;
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_lq_sta* lq_sta = &mvmsta->lq_sta.rs_drv;
-    struct ieee80211_supported_band* sband;
-    RS_MNG_STA_INFO_S* staInfo = &lq_sta->pers;
-    TLC_MNG_CONFIG_PARAMS_CMD_API_S config = {};
+  struct ieee80211_hw* hw = mvm->hw;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_lq_sta* lq_sta = &mvmsta->lq_sta.rs_drv;
+  struct ieee80211_supported_band* sband;
+  RS_MNG_STA_INFO_S* staInfo = &lq_sta->pers;
+  TLC_MNG_CONFIG_PARAMS_CMD_API_S config = {};
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    iwl_mvm_reset_frame_stats(mvm);
+  iwl_mvm_reset_frame_stats(mvm);
 #endif
-    sband = hw->wiphy->bands[band];
+  sband = hw->wiphy->bands[band];
 
-    mvmsta->amsdu_enabled = 0;
-    mvmsta->max_amsdu_len = sta->max_amsdu_len;
+  mvmsta->amsdu_enabled = 0;
+  mvmsta->max_amsdu_len = sta->max_amsdu_len;
 
-    config.maxChWidth = update ? rs_fw_bw_from_sta_bw(sta) : IWL_TLC_MNG_CH_WIDTH_20MHZ;
-    config.configFlags = rs_fw_set_config_flags(mvm, sta);
-    config.chainsEnabled = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm));
-    config.maxMpduLen = sta->max_amsdu_len;
-    config.sgiChWidthSupport = rs_fw_sgi_cw_support(sta);
-    config.amsduSupported = iwl_mvm_is_csum_supported(mvm);
-    config.band = sband->band;
-    rs_fw_set_supp_rates(sta, sband, &config);
+  config.maxChWidth = update ? rs_fw_bw_from_sta_bw(sta) : IWL_TLC_MNG_CH_WIDTH_20MHZ;
+  config.configFlags = rs_fw_set_config_flags(mvm, sta);
+  config.chainsEnabled = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm));
+  config.maxMpduLen = sta->max_amsdu_len;
+  config.sgiChWidthSupport = rs_fw_sgi_cw_support(sta);
+  config.amsduSupported = iwl_mvm_is_csum_supported(mvm);
+  config.band = sband->band;
+  rs_fw_set_supp_rates(sta, sband, &config);
 
-    cmdHandlerTlcMngConfig(mvm, sta, mvmsta, staInfo, &config, update);
+  cmdHandlerTlcMngConfig(mvm, sta, mvmsta, staInfo, &config, update);
 }
 
 void iwl_mvm_rs_rate_init(struct iwl_mvm* mvm, struct ieee80211_sta* sta, enum nl80211_band band,
                           bool update) {
-    if (iwl_mvm_has_tlc_offload(mvm)) {
-        rs_fw_rate_init(mvm, sta, band, update);
-    } else {
-        rs_drv_rate_init(mvm, sta, band, update);
-    }
+  if (iwl_mvm_has_tlc_offload(mvm)) {
+    rs_fw_rate_init(mvm, sta, band, update);
+  } else {
+    rs_drv_rate_init(mvm, sta, band, update);
+  }
 }
 
 void iwl_mvm_rs_tx_status(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int tid,
                           struct ieee80211_tx_info* info, bool is_ndp) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_lq_sta* lq_sta = &mvmsta->lq_sta.rs_drv;
-    RS_MNG_STA_INFO_S* staInfo = &lq_sta->pers;
-    TLC_STAT_COMMON_API_S stats;
-    int failures = info->status.rates[0].count - 1;
-    bool acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_lq_sta* lq_sta = &mvmsta->lq_sta.rs_drv;
+  RS_MNG_STA_INFO_S* staInfo = &lq_sta->pers;
+  TLC_STAT_COMMON_API_S stats;
+  int failures = info->status.rates[0].count - 1;
+  bool acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
 
-    if ((info->flags & IEEE80211_TX_CTL_AMPDU) && !(info->flags & IEEE80211_TX_STAT_AMPDU)) {
-        return;
-    }
+  if ((info->flags & IEEE80211_TX_CTL_AMPDU) && !(info->flags & IEEE80211_TX_STAT_AMPDU)) {
+    return;
+  }
 
-    if (info->flags & IEEE80211_TX_STAT_AMPDU) {
-        stats.baTxed = info->status.ampdu_len;
-        stats.baAcked = info->status.ampdu_ack_len;
-        stats.trafficLoad = stats.baTxed;
-        stats.txed[0] = stats.baTxed;
-        stats.txed[1] = 0;
-        stats.acked[0] = stats.baAcked;
-        stats.acked[1] = 0;
-    } else {
-        stats.baTxed = 0;
-        stats.baAcked = 0;
-        stats.trafficLoad = 0;
-        stats.txed[0] = 1;
-        stats.txed[1] = !!failures;
-        stats.acked[0] = !failures && acked;
-        stats.acked[1] = !!failures && acked;
-    }
+  if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+    stats.baTxed = info->status.ampdu_len;
+    stats.baAcked = info->status.ampdu_ack_len;
+    stats.trafficLoad = stats.baTxed;
+    stats.txed[0] = stats.baTxed;
+    stats.txed[1] = 0;
+    stats.acked[0] = stats.baAcked;
+    stats.acked[1] = 0;
+  } else {
+    stats.baTxed = 0;
+    stats.baAcked = 0;
+    stats.trafficLoad = 0;
+    stats.txed[0] = 1;
+    stats.txed[1] = !!failures;
+    stats.acked[0] = !failures && acked;
+    stats.acked[1] = !!failures && acked;
+  }
 
-    tlcStatUpdateHandler(staInfo, &stats, mvm, sta, tid, is_ndp);
+  tlcStatUpdateHandler(staInfo, &stats, mvm, sta, tid, is_ndp);
 }
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
@@ -309,26 +335,24 @@
 void iwl_mvm_update_frame_stats(struct iwl_mvm* mvm, uint32_t rate, bool agg) {}
 
 /* TODO */
-int rs_pretty_print_rate(char* buf, int bufsz, const uint32_t rate) {
-    return 0;
-}
+int rs_pretty_print_rate(char* buf, int bufsz, const uint32_t rate) { return 0; }
 #endif
 
 static void* rs_alloc(struct ieee80211_hw* hw, struct dentry* debugfsdir) {
-    return IWL_MAC80211_GET_MVM(hw);
+  return IWL_MAC80211_GET_MVM(hw);
 }
 
 static void rs_free(void* priv) {}
 
 static void* rs_alloc_sta(void* priv, struct ieee80211_sta* sta, gfp_t gfp) {
-    struct iwl_mvm* mvm = priv;
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_lq_sta* lq_sta = &mvmsta->lq_sta.rs_drv;
-    RS_MNG_STA_INFO_S* staInfo = &lq_sta->pers;
-    void* priv_sta = lq_sta;
+  struct iwl_mvm* mvm = priv;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_lq_sta* lq_sta = &mvmsta->lq_sta.rs_drv;
+  RS_MNG_STA_INFO_S* staInfo = &lq_sta->pers;
+  void* priv_sta = lq_sta;
 
-    rsMngResetStaInfo(mvm, sta, mvmsta, staInfo, false);
-    return priv_sta;
+  rsMngResetStaInfo(mvm, sta, mvmsta, staInfo, false);
+  return priv_sta;
 }
 
 static void rs_rate_init(void* priv, struct ieee80211_supported_band* sband,
@@ -338,67 +362,77 @@
 static void rs_rate_update(void* priv, struct ieee80211_supported_band* sband,
                            struct cfg80211_chan_def* chandef, struct ieee80211_sta* sta,
                            void* priv_sta, uint32_t changed) {
-    struct iwl_mvm* mvm = priv;
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    int tid;
+  struct iwl_mvm* mvm = priv;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  int tid;
 
-    if (!mvmsta->vif) { return; }
+  if (!mvmsta->vif) {
+    return;
+  }
 
-    for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
-        ieee80211_stop_tx_ba_session(sta, tid);
-    }
+  for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+    ieee80211_stop_tx_ba_session(sta, tid);
+  }
 
-    iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
+  iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
 }
 
 static void rs_free_sta(void* priv, struct ieee80211_sta* sta, void* priv_sta) {}
 
 static inline uint8_t rs_get_tid(struct ieee80211_hdr* hdr) {
-    int tid = IWL_MAX_TID_COUNT;
+  int tid = IWL_MAX_TID_COUNT;
 
-    if (ieee80211_is_data_qos(hdr->frame_control)) { tid = ieee80211_get_tid(hdr); }
+  if (ieee80211_is_data_qos(hdr->frame_control)) {
+    tid = ieee80211_get_tid(hdr);
+  }
 
-    return tid;
+  return tid;
 }
 
 static void rs_tx_status(void* priv, struct ieee80211_supported_band* sband,
                          struct ieee80211_sta* sta, void* priv_sta, struct sk_buff* skb) {
-    struct iwl_mvm* mvm = priv;
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct ieee80211_tx_info* info = IEEE80211_SKB_CB(skb);
-    struct ieee80211_hdr* hdr = (void*)skb->data;
+  struct iwl_mvm* mvm = priv;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct ieee80211_tx_info* info = IEEE80211_SKB_CB(skb);
+  struct ieee80211_hdr* hdr = (void*)skb->data;
 
-    if (!mvmsta->vif) { return; }
+  if (!mvmsta->vif) {
+    return;
+  }
 
-    if (!ieee80211_is_data(hdr->frame_control) || (info->flags & IEEE80211_TX_CTL_NO_ACK)) {
-        return;
-    }
+  if (!ieee80211_is_data(hdr->frame_control) || (info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+    return;
+  }
 
-    iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info,
-                         ieee80211_is_qos_nullfunc(hdr->frame_control));
+  iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info,
+                       ieee80211_is_qos_nullfunc(hdr->frame_control));
 }
 
 static void rs_get_rate(void* priv, struct ieee80211_sta* sta, void* priv_sta,
                         struct ieee80211_tx_rate_control* txrc) {
-    struct iwl_mvm_sta* mvmsta = NULL;
-    uint32_t hwrate;
-    struct ieee80211_tx_info* info = IEEE80211_SKB_CB(txrc->skb);
+  struct iwl_mvm_sta* mvmsta = NULL;
+  uint32_t hwrate;
+  struct ieee80211_tx_info* info = IEEE80211_SKB_CB(txrc->skb);
 
-    if (sta) {
-        mvmsta = iwl_mvm_sta_from_mac80211(sta);
-        if (!mvmsta->vif) {
-            sta = NULL;
-            mvmsta = NULL;
-        }
+  if (sta) {
+    mvmsta = iwl_mvm_sta_from_mac80211(sta);
+    if (!mvmsta->vif) {
+      sta = NULL;
+      mvmsta = NULL;
     }
+  }
 
-    if (rate_control_send_low(sta, mvmsta, txrc)) { return; }
+  if (rate_control_send_low(sta, mvmsta, txrc)) {
+    return;
+  }
 
-    if (!mvmsta) { return; }
+  if (!mvmsta) {
+    return;
+  }
 
-    hwrate = le32_to_cpu(mvmsta->lq_sta.rs_drv.lq.rs_table[0]);
-    iwl_mvm_hwrate_to_tx_rate(hwrate, info->band, &info->control.rates[0]);
-    info->control.rates[0].count = 1;
+  hwrate = le32_to_cpu(mvmsta->lq_sta.rs_drv.lq.rs_table[0]);
+  iwl_mvm_hwrate_to_tx_rate(hwrate, info->band, &info->control.rates[0]);
+  info->control.rates[0].count = 1;
 }
 
 static const struct rate_control_ops rs_ops = {
@@ -413,36 +447,32 @@
     .get_rate = rs_get_rate,
 };
 
-int iwl_mvm_rate_control_register(void) {
-    return ieee80211_rate_control_register(&rs_ops);
-}
+int iwl_mvm_rate_control_register(void) { return ieee80211_rate_control_register(&rs_ops); }
 
-void iwl_mvm_rate_control_unregister(void) {
-    ieee80211_rate_control_unregister(&rs_ops);
-}
+void iwl_mvm_rate_control_unregister(void) { ieee80211_rate_control_unregister(&rs_ops); }
 
 static int rs_drv_tx_protection(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta, bool enable) {
-    if (enable) {
-        mvmsta->tx_protection++;
-    } else {
-        mvmsta->tx_protection--;
-    }
+  if (enable) {
+    mvmsta->tx_protection++;
+  } else {
+    mvmsta->tx_protection--;
+  }
 
-    if (mvmsta->tx_protection) {
-        mvmsta->lq_sta.rs_drv.lq.flags |= LQ_FLAG_USE_RTS_MSK;
-    } else {
-        mvmsta->lq_sta.rs_drv.lq.flags &= ~LQ_FLAG_USE_RTS_MSK;
-    }
+  if (mvmsta->tx_protection) {
+    mvmsta->lq_sta.rs_drv.lq.flags |= LQ_FLAG_USE_RTS_MSK;
+  } else {
+    mvmsta->lq_sta.rs_drv.lq.flags &= ~LQ_FLAG_USE_RTS_MSK;
+  }
 
-    return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
+  return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
 }
 
 int iwl_mvm_tx_protection(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta, bool enable) {
-    if (iwl_mvm_has_tlc_offload(mvm)) {
-        return rs_fw_tx_protection(mvm, mvmsta, enable);
-    } else {
-        return rs_drv_tx_protection(mvm, mvmsta, enable);
-    }
+  if (iwl_mvm_has_tlc_offload(mvm)) {
+    return rs_fw_tx_protection(mvm, mvmsta, enable);
+  } else {
+    return rs_drv_tx_protection(mvm, mvmsta, enable);
+  }
 }
 
 void rs_update_last_rssi(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta,
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-ng.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-ng.h
index fe6f36f..3027f79 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-ng.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rs-ng.h
@@ -58,33 +58,33 @@
 
 #define RS_DRV_DATA_LQ_COLOR_POS (8)
 #define RS_DRV_DATA_PACK(lq_color, reduced_txp) \
-    ((void*)(((lq_color) << RS_DRV_DATA_LQ_COLOR_POS) | ((uintptr_t)reduced_txp)))
+  ((void*)(((lq_color) << RS_DRV_DATA_LQ_COLOR_POS) | ((uintptr_t)reduced_txp)))
 
 struct iwl_mvm;
 struct iwl_mvm_sta;
 
 struct iwl_lq_sta_rs_fw_pers {
-    struct iwl_mvm* drv;
-    uint32_t sta_id;
-    uint8_t chains;
-    int8_t chain_signal[IEEE80211_MAX_CHAINS];
-    int8_t last_rssi;
+  struct iwl_mvm* drv;
+  uint32_t sta_id;
+  uint8_t chains;
+  int8_t chain_signal[IEEE80211_MAX_CHAINS];
+  int8_t last_rssi;
 #ifdef CPTCFG_MAC80211_DEBUGFS
-    uint32_t dbg_fixed_rate;
-    uint32_t dbg_agg_frame_count_lim;
+  uint32_t dbg_fixed_rate;
+  uint32_t dbg_agg_frame_count_lim;
 #endif
 };
 
 struct iwl_lq_sta_rs_fw {
-    uint32_t last_rate_n_flags;
+  uint32_t last_rate_n_flags;
 
-    struct iwl_lq_sta_rs_fw_pers pers;
+  struct iwl_lq_sta_rs_fw_pers pers;
 };
 
 struct iwl_lq_sta {
-    struct iwl_lq_cmd lq;
+  struct iwl_lq_cmd lq;
 
-    RS_MNG_STA_INFO_S pers;
+  RS_MNG_STA_INFO_S pers;
 };
 
 int iwl_mvm_rate_control_register(void);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rx.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rx.c
index 75d118a..fc8179c 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rx.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rx.c
@@ -34,6 +34,7 @@
  *****************************************************************************/
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
+
 #include "fw-api.h"
 #include "iwl-trans.h"
 #include "mvm.h"
@@ -45,17 +46,17 @@
  * actual data will come from the fw in the next packet.
  */
 void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
 
-    memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
-    mvm->ampdu_ref++;
+  memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
+  mvm->ampdu_ref++;
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
-        spin_lock(&mvm->drv_stats_lock);
-        mvm->drv_rx_stats.ampdu_count++;
-        spin_unlock(&mvm->drv_stats_lock);
-    }
+  if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+    spin_lock(&mvm->drv_stats_lock);
+    mvm->drv_rx_stats.ampdu_count++;
+    spin_unlock(&mvm->drv_stats_lock);
+  }
 #endif
 }
 
@@ -68,45 +69,45 @@
                                             struct napi_struct* napi, struct sk_buff* skb,
                                             struct ieee80211_hdr* hdr, uint16_t len,
                                             uint8_t crypt_len, struct iwl_rx_cmd_buffer* rxb) {
-    unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
-    unsigned int fraglen;
+  unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+  unsigned int fraglen;
 
-    /*
-     * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
-     * but those are all multiples of 4 long) all goes away, but we
-     * want the *end* of it, which is going to be the start of the IP
-     * header, to be aligned when it gets pulled in.
-     * The beginning of the skb->data is aligned on at least a 4-byte
-     * boundary after allocation. Everything here is aligned at least
-     * on a 2-byte boundary so we can just take hdrlen & 3 and pad by
-     * the result.
-     */
-    skb_reserve(skb, hdrlen & 3);
+  /*
+   * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
+   * but those are all multiples of 4 long) all goes away, but we
+   * want the *end* of it, which is going to be the start of the IP
+   * header, to be aligned when it gets pulled in.
+   * The beginning of the skb->data is aligned on at least a 4-byte
+   * boundary after allocation. Everything here is aligned at least
+   * on a 2-byte boundary so we can just take hdrlen & 3 and pad by
+   * the result.
+   */
+  skb_reserve(skb, hdrlen & 3);
 
-    /* If frame is small enough to fit in skb->head, pull it completely.
-     * If not, only pull ieee80211_hdr (including crypto if present, and
-     * an additional 8 bytes for SNAP/ethertype, see below) so that
-     * splice() or TCP coalesce are more efficient.
-     *
-     * Since, in addition, ieee80211_data_to_8023() always pull in at
-     * least 8 bytes (possibly more for mesh) we can do the same here
-     * to save the cost of doing it later. That still doesn't pull in
-     * the actual IP header since the typical case has a SNAP header.
-     * If the latter changes (there are efforts in the standards group
-     * to do so) we should revisit this and ieee80211_data_to_8023().
-     */
-    hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
+  /* If frame is small enough to fit in skb->head, pull it completely.
+   * If not, only pull ieee80211_hdr (including crypto if present, and
+   * an additional 8 bytes for SNAP/ethertype, see below) so that
+   * splice() or TCP coalesce are more efficient.
+   *
+   * Since, in addition, ieee80211_data_to_8023() always pull in at
+   * least 8 bytes (possibly more for mesh) we can do the same here
+   * to save the cost of doing it later. That still doesn't pull in
+   * the actual IP header since the typical case has a SNAP header.
+   * If the latter changes (there are efforts in the standards group
+   * to do so) we should revisit this and ieee80211_data_to_8023().
+   */
+  hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
 
-    skb_put_data(skb, hdr, hdrlen);
-    fraglen = len - hdrlen;
+  skb_put_data(skb, hdr, hdrlen);
+  fraglen = len - hdrlen;
 
-    if (fraglen) {
-        int offset = (void*)hdr + hdrlen - rxb_addr(rxb) + rxb_offset(rxb);
+  if (fraglen) {
+    int offset = (void*)hdr + hdrlen - rxb_addr(rxb) + rxb_offset(rxb);
 
-        skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize);
-    }
+    skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize);
+  }
 
-    ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+  ieee80211_rx_napi(mvm->hw, sta, skb, napi);
 }
 
 /*
@@ -117,28 +118,28 @@
  */
 static void iwl_mvm_get_signal_strength(struct iwl_mvm* mvm, struct iwl_rx_phy_info* phy_info,
                                         struct ieee80211_rx_status* rx_status) {
-    int energy_a, energy_b, energy_c, max_energy;
-    uint32_t val;
+  int energy_a, energy_b, energy_c, max_energy;
+  uint32_t val;
 
-    val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
-    energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> IWL_RX_INFO_ENERGY_ANT_A_POS;
-    energy_a = energy_a ? -energy_a : S8_MIN;
-    energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> IWL_RX_INFO_ENERGY_ANT_B_POS;
-    energy_b = energy_b ? -energy_b : S8_MIN;
-    energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> IWL_RX_INFO_ENERGY_ANT_C_POS;
-    energy_c = energy_c ? -energy_c : S8_MIN;
-    max_energy = max(energy_a, energy_b);
-    max_energy = max(max_energy, energy_c);
+  val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
+  energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> IWL_RX_INFO_ENERGY_ANT_A_POS;
+  energy_a = energy_a ? -energy_a : S8_MIN;
+  energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> IWL_RX_INFO_ENERGY_ANT_B_POS;
+  energy_b = energy_b ? -energy_b : S8_MIN;
+  energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> IWL_RX_INFO_ENERGY_ANT_C_POS;
+  energy_c = energy_c ? -energy_c : S8_MIN;
+  max_energy = max(energy_a, energy_b);
+  max_energy = max(max_energy, energy_c);
 
-    IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", energy_a, energy_b, energy_c,
-                    max_energy);
+  IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", energy_a, energy_b, energy_c,
+                  max_energy);
 
-    rx_status->signal = max_energy;
-    rx_status->chains = (le16_to_cpu(phy_info->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA) >>
-                        RX_RES_PHY_FLAGS_ANTENNA_POS;
-    rx_status->chain_signal[0] = energy_a;
-    rx_status->chain_signal[1] = energy_b;
-    rx_status->chain_signal[2] = energy_c;
+  rx_status->signal = max_energy;
+  rx_status->chains =
+      (le16_to_cpu(phy_info->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA) >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+  rx_status->chain_signal[0] = energy_a;
+  rx_status->chain_signal[1] = energy_b;
+  rx_status->chain_signal[2] = energy_c;
 }
 
 /*
@@ -153,126 +154,136 @@
 static uint32_t iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm* mvm, struct ieee80211_hdr* hdr,
                                              struct ieee80211_rx_status* stats,
                                              uint32_t rx_pkt_status, uint8_t* crypt_len) {
-    if (!ieee80211_has_protected(hdr->frame_control) ||
-        (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_NO_ENC) {
-        return 0;
-    }
+  if (!ieee80211_has_protected(hdr->frame_control) ||
+      (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_NO_ENC) {
+    return 0;
+  }
 
-    /* packet was encrypted with unknown alg */
-    if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_ENC_ERR) {
-        return 0;
-    }
+  /* packet was encrypted with unknown alg */
+  if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_ENC_ERR) {
+    return 0;
+  }
 
-    switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) {
+  switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) {
     case RX_MPDU_RES_STATUS_SEC_CCM_ENC:
-        /* alg is CCM: check MIC only */
-        if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) { return -1; }
+      /* alg is CCM: check MIC only */
+      if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) {
+        return -1;
+      }
 
-        stats->flag |= RX_FLAG_DECRYPTED;
-        *crypt_len = IEEE80211_CCMP_HDR_LEN;
-        return 0;
+      stats->flag |= RX_FLAG_DECRYPTED;
+      *crypt_len = IEEE80211_CCMP_HDR_LEN;
+      return 0;
 
     case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
-        /* Don't drop the frame and decrypt it in SW */
-        if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
-            !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) {
-            return 0;
-        }
-        *crypt_len = IEEE80211_TKIP_IV_LEN;
-        /* fall through if TTAK OK */
+      /* Don't drop the frame and decrypt it in SW */
+      if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+          !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) {
+        return 0;
+      }
+      *crypt_len = IEEE80211_TKIP_IV_LEN;
+      /* fall through if TTAK OK */
 
     case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
-        if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) { return -1; }
+      if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) {
+        return -1;
+      }
 
-        stats->flag |= RX_FLAG_DECRYPTED;
-        if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_WEP_ENC) {
-            *crypt_len = IEEE80211_WEP_IV_LEN;
-        }
-        return 0;
+      stats->flag |= RX_FLAG_DECRYPTED;
+      if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_WEP_ENC) {
+        *crypt_len = IEEE80211_WEP_IV_LEN;
+      }
+      return 0;
 
     case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
-        if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) { return -1; }
-        stats->flag |= RX_FLAG_DECRYPTED;
-        return 0;
+      if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) {
+        return -1;
+      }
+      stats->flag |= RX_FLAG_DECRYPTED;
+      return 0;
 
     default:
-        /* Expected in monitor (not having the keys) */
-        if (!mvm->monitor_on) { IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); }
-    }
+      /* Expected in monitor (not having the keys) */
+      if (!mvm->monitor_on) {
+        IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+      }
+  }
 
-    return 0;
+  return 0;
 }
 
 static void iwl_mvm_rx_handle_tcm(struct iwl_mvm* mvm, struct ieee80211_sta* sta,
                                   struct ieee80211_hdr* hdr, uint32_t len,
                                   struct iwl_rx_phy_info* phy_info, uint32_t rate_n_flags) {
-    struct iwl_mvm_sta* mvmsta;
-    struct iwl_mvm_tcm_mac* mdata;
-    struct iwl_mvm_vif* mvmvif;
-    int mac;
-    int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */
-    /* expected throughput in 100Kbps, single stream, 20 MHz */
-    static const uint8_t thresh_tpt[] = {
-        9, 18, 30, 42, 60, 78, 90, 96, 120, 135,
-    };
-    uint16_t thr;
+  struct iwl_mvm_sta* mvmsta;
+  struct iwl_mvm_tcm_mac* mdata;
+  struct iwl_mvm_vif* mvmvif;
+  int mac;
+  int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */
+  /* expected throughput in 100Kbps, single stream, 20 MHz */
+  static const uint8_t thresh_tpt[] = {
+      9, 18, 30, 42, 60, 78, 90, 96, 120, 135,
+  };
+  uint16_t thr;
 
-    if (ieee80211_is_data_qos(hdr->frame_control)) {
-        ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)];
+  if (ieee80211_is_data_qos(hdr->frame_control)) {
+    ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)];
+  }
+
+  mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+
+  if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+    schedule_delayed_work(&mvm->tcm.work, 0);
+  }
+  mdata = &mvm->tcm.data[mac];
+  mdata->rx.pkts[ac]++;
+
+  /* count the airtime only once for each ampdu */
+  if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) {
+    mdata->rx.last_ampdu_ref = mvm->ampdu_ref;
+    mdata->rx.airtime += le16_to_cpu(phy_info->frame_time);
+  }
+  mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+  if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK))) {
+    return;
+  }
+
+  if (mdata->opened_rx_ba_sessions || mdata->uapsd_nonagg_detect.detected ||
+      (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
+       !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
+       !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
+       !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) ||
+      mvmsta->sta_id != mvmvif->ap_sta_id) {
+    return;
+  }
+
+  if (rate_n_flags & RATE_MCS_HT_MSK) {
+    thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK];
+    thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS);
+  } else {
+    if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >= ARRAY_SIZE(thresh_tpt))) {
+      return;
     }
+    thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK];
+    thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS);
+  }
 
-    mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+  thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >> RATE_MCS_CHAN_WIDTH_POS);
 
-    if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) {
-        schedule_delayed_work(&mvm->tcm.work, 0);
-    }
-    mdata = &mvm->tcm.data[mac];
-    mdata->rx.pkts[ac]++;
-
-    /* count the airtime only once for each ampdu */
-    if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) {
-        mdata->rx.last_ampdu_ref = mvm->ampdu_ref;
-        mdata->rx.airtime += le16_to_cpu(phy_info->frame_time);
-    }
-    mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-
-    if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK))) { return; }
-
-    if (mdata->opened_rx_ba_sessions || mdata->uapsd_nonagg_detect.detected ||
-        (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
-         !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
-         !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
-         !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) ||
-        mvmsta->sta_id != mvmvif->ap_sta_id) {
-        return;
-    }
-
-    if (rate_n_flags & RATE_MCS_HT_MSK) {
-        thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK];
-        thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS);
-    } else {
-        if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >= ARRAY_SIZE(thresh_tpt))) {
-            return;
-        }
-        thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK];
-        thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS);
-    }
-
-    thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >> RATE_MCS_CHAN_WIDTH_POS);
-
-    mdata->uapsd_nonagg_detect.rx_bytes += len;
-    ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr);
+  mdata->uapsd_nonagg_detect.rx_bytes += len;
+  ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr);
 }
 
 static void iwl_mvm_rx_csum(struct ieee80211_sta* sta, struct sk_buff* skb, uint32_t status) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
 
-    if (mvmvif->features & NETIF_F_RXCSUM && status & RX_MPDU_RES_STATUS_CSUM_DONE &&
-        status & RX_MPDU_RES_STATUS_CSUM_OK) {
-        skb->ip_summed = CHECKSUM_UNNECESSARY;
-    }
+  if (mvmvif->features & NETIF_F_RXCSUM && status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+      status & RX_MPDU_RES_STATUS_CSUM_OK) {
+    skb->ip_summed = CHECKSUM_UNNECESSARY;
+  }
 }
 
 /*
@@ -282,480 +293,521 @@
  */
 void iwl_mvm_rx_rx_mpdu(struct iwl_mvm* mvm, struct napi_struct* napi,
                         struct iwl_rx_cmd_buffer* rxb) {
-    struct ieee80211_hdr* hdr;
-    struct ieee80211_rx_status* rx_status;
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_rx_phy_info* phy_info;
-    struct iwl_rx_mpdu_res_start* rx_res;
-    struct ieee80211_sta* sta = NULL;
-    struct sk_buff* skb;
-    uint32_t len;
-    uint32_t rate_n_flags;
-    uint32_t rx_pkt_status;
-    uint8_t crypt_len = 0;
-    bool take_ref;
+  struct ieee80211_hdr* hdr;
+  struct ieee80211_rx_status* rx_status;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_rx_phy_info* phy_info;
+  struct iwl_rx_mpdu_res_start* rx_res;
+  struct ieee80211_sta* sta = NULL;
+  struct sk_buff* skb;
+  uint32_t len;
+  uint32_t rate_n_flags;
+  uint32_t rx_pkt_status;
+  uint8_t crypt_len = 0;
+  bool take_ref;
 
-    phy_info = &mvm->last_phy_info;
-    rx_res = (struct iwl_rx_mpdu_res_start*)pkt->data;
-    hdr = (struct ieee80211_hdr*)(pkt->data + sizeof(*rx_res));
-    len = le16_to_cpu(rx_res->byte_count);
-    rx_pkt_status = le32_to_cpup((__le32*)(pkt->data + sizeof(*rx_res) + len));
+  phy_info = &mvm->last_phy_info;
+  rx_res = (struct iwl_rx_mpdu_res_start*)pkt->data;
+  hdr = (struct ieee80211_hdr*)(pkt->data + sizeof(*rx_res));
+  len = le16_to_cpu(rx_res->byte_count);
+  rx_pkt_status = le32_to_cpup((__le32*)(pkt->data + sizeof(*rx_res) + len));
 
-    /* Dont use dev_alloc_skb(), we'll have enough headroom once
-     * ieee80211_hdr pulled.
+  /* Dont use dev_alloc_skb(), we'll have enough headroom once
+   * ieee80211_hdr pulled.
+   */
+  skb = alloc_skb(128, GFP_ATOMIC);
+  if (!skb) {
+    IWL_ERR(mvm, "alloc_skb failed\n");
+    return;
+  }
+
+  rx_status = IEEE80211_SKB_RXCB(skb);
+
+  /*
+   * drop the packet if it has failed being decrypted by HW
+   */
+  if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, &crypt_len)) {
+    IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", rx_pkt_status);
+    kfree_skb(skb);
+    return;
+  }
+
+  /*
+   * Keep packets with CRC errors (and with overrun) for monitor mode
+   * (otherwise the firmware discards them) but mark them as bad.
+   */
+  if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
+      !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
+    IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
+    rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+  }
+
+  /* This will be used in several places later */
+  rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
+
+  /* rx_status carries information about the packet to mac80211 */
+  rx_status->mactime = le64_to_cpu(phy_info->timestamp);
+  rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
+  rx_status->band = (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24))
+                        ? NL80211_BAND_2GHZ
+                        : NL80211_BAND_5GHZ;
+  rx_status->freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), rx_status->band);
+
+  /* TSF as indicated by the firmware  is at INA time */
+  rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+
+  iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
+
+  IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
+                        (unsigned long long)rx_status->mactime);
+
+  rcu_read_lock();
+  if (rx_pkt_status & RX_MPDU_RES_STATUS_SRC_STA_FOUND) {
+    uint32_t id = rx_pkt_status & RX_MPDU_RES_STATUS_STA_ID_MSK;
+
+    id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
+
+    if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
+      sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
+      if (IS_ERR(sta)) {
+        sta = NULL;
+      }
+    }
+  } else if (!is_multicast_ether_addr(hdr->addr2)) {
+    /* This is fine since we prevent two stations with the same
+     * address from being added.
      */
-    skb = alloc_skb(128, GFP_ATOMIC);
-    if (!skb) {
-        IWL_ERR(mvm, "alloc_skb failed\n");
-        return;
-    }
+    sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
+  }
 
-    rx_status = IEEE80211_SKB_RXCB(skb);
+  if (sta) {
+    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+    struct ieee80211_vif* tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif);
+    struct iwl_fw_dbg_trigger_tlv* trig;
+    struct ieee80211_vif* vif = mvmsta->vif;
 
-    /*
-     * drop the packet if it has failed being decrypted by HW
+    /* We have tx blocked stations (with CS bit). If we heard
+     * frames from a blocked station on a new channel we can
+     * TX to it again.
      */
-    if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, &crypt_len)) {
-        IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", rx_pkt_status);
-        kfree_skb(skb);
-        return;
+    if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) {
+      struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+      if (mvmvif->csa_target_freq == rx_status->freq) {
+        iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
+      }
     }
 
-    /*
-     * Keep packets with CRC errors (and with overrun) for monitor mode
-     * (otherwise the firmware discards them) but mark them as bad.
-     */
-    if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
-        !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
-        IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
-        rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+    rs_update_last_rssi(mvm, mvmsta, rx_status);
+
+    trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_RSSI);
+
+    if (trig && ieee80211_is_beacon(hdr->frame_control)) {
+      struct iwl_fw_dbg_trigger_low_rssi* rssi_trig;
+      int32_t rssi;
+
+      rssi_trig = (void*)trig->data;
+      rssi = le32_to_cpu(rssi_trig->rssi);
+
+      if (rx_status->signal < rssi) {
+        iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL);
+      }
     }
 
-    /* This will be used in several places later */
-    rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
-
-    /* rx_status carries information about the packet to mac80211 */
-    rx_status->mactime = le64_to_cpu(phy_info->timestamp);
-    rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
-    rx_status->band = (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24))
-                          ? NL80211_BAND_2GHZ
-                          : NL80211_BAND_5GHZ;
-    rx_status->freq =
-        ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), rx_status->band);
-
-    /* TSF as indicated by the firmware  is at INA time */
-    rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
-
-    iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
-
-    IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
-                          (unsigned long long)rx_status->mactime);
-
-    rcu_read_lock();
-    if (rx_pkt_status & RX_MPDU_RES_STATUS_SRC_STA_FOUND) {
-        uint32_t id = rx_pkt_status & RX_MPDU_RES_STATUS_STA_ID_MSK;
-
-        id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
-
-        if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
-            sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
-            if (IS_ERR(sta)) { sta = NULL; }
-        }
-    } else if (!is_multicast_ether_addr(hdr->addr2)) {
-        /* This is fine since we prevent two stations with the same
-         * address from being added.
-         */
-        sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
+    if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) &&
+        ieee80211_is_data(hdr->frame_control)) {
+      iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info, rate_n_flags);
     }
-
-    if (sta) {
-        struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-        struct ieee80211_vif* tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif);
-        struct iwl_fw_dbg_trigger_tlv* trig;
-        struct ieee80211_vif* vif = mvmsta->vif;
-
-        /* We have tx blocked stations (with CS bit). If we heard
-         * frames from a blocked station on a new channel we can
-         * TX to it again.
-         */
-        if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) {
-            struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
-
-            if (mvmvif->csa_target_freq == rx_status->freq) {
-                iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
-            }
-        }
-
-        rs_update_last_rssi(mvm, mvmsta, rx_status);
-
-        trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_RSSI);
-
-        if (trig && ieee80211_is_beacon(hdr->frame_control)) {
-            struct iwl_fw_dbg_trigger_low_rssi* rssi_trig;
-            int32_t rssi;
-
-            rssi_trig = (void*)trig->data;
-            rssi = le32_to_cpu(rssi_trig->rssi);
-
-            if (rx_status->signal < rssi) { iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); }
-        }
-
-        if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) &&
-            ieee80211_is_data(hdr->frame_control)) {
-            iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info, rate_n_flags);
-        }
 #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE
-        /*
-         * these packets are from the AP or the existing TDLS peer.
-         * In both cases an existing station.
-         */
-        iwl_mvm_tdls_peer_cache_pkt(mvm, hdr, len, 0);
+    /*
+     * these packets are from the AP or the existing TDLS peer.
+     * In both cases an existing station.
+     */
+    iwl_mvm_tdls_peer_cache_pkt(mvm, hdr, len, 0);
 #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
 
-        if (ieee80211_is_data(hdr->frame_control)) { iwl_mvm_rx_csum(sta, skb, rx_pkt_status); }
+    if (ieee80211_is_data(hdr->frame_control)) {
+      iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
     }
-    rcu_read_unlock();
+  }
+  rcu_read_unlock();
 
-    /* set the preamble flag if appropriate */
-    if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) {
-        rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
-    }
+  /* set the preamble flag if appropriate */
+  if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) {
+    rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+  }
 
-    if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
-        /*
-         * We know which subframes of an A-MPDU belong
-         * together since we get a single PHY response
-         * from the firmware for all of them
-         */
-        rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
-        rx_status->ampdu_reference = mvm->ampdu_ref;
-    }
+  if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+    /*
+     * We know which subframes of an A-MPDU belong
+     * together since we get a single PHY response
+     * from the firmware for all of them
+     */
+    rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+    rx_status->ampdu_reference = mvm->ampdu_ref;
+  }
 
-    /* Set up the HT phy flags */
-    switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+  /* Set up the HT phy flags */
+  switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
     case RATE_MCS_CHAN_WIDTH_20:
-        break;
+      break;
     case RATE_MCS_CHAN_WIDTH_40:
-        rx_status->bw = RATE_INFO_BW_40;
-        break;
+      rx_status->bw = RATE_INFO_BW_40;
+      break;
     case RATE_MCS_CHAN_WIDTH_80:
-        rx_status->bw = RATE_INFO_BW_80;
-        break;
+      rx_status->bw = RATE_INFO_BW_80;
+      break;
     case RATE_MCS_CHAN_WIDTH_160:
-        rx_status->bw = RATE_INFO_BW_160;
-        break;
+      rx_status->bw = RATE_INFO_BW_160;
+      break;
+  }
+  if (!(rate_n_flags & RATE_MCS_CCK_MSK) && rate_n_flags & RATE_MCS_SGI_MSK) {
+    rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+  }
+  if (rate_n_flags & RATE_HT_MCS_GF_MSK) {
+    rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
+  }
+  if (rate_n_flags & RATE_MCS_LDPC_MSK) {
+    rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+  }
+  if (rate_n_flags & RATE_MCS_HT_MSK) {
+    uint8_t stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
+    rx_status->encoding = RX_ENC_HT;
+    rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+    rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+  } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+    uint8_t stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
+    rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1;
+    rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+    rx_status->encoding = RX_ENC_VHT;
+    rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+    if (rate_n_flags & RATE_MCS_BF_MSK) {
+      rx_status->enc_flags |= RX_ENC_FLAG_BF;
     }
-    if (!(rate_n_flags & RATE_MCS_CCK_MSK) && rate_n_flags & RATE_MCS_SGI_MSK) {
-        rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
-    }
-    if (rate_n_flags & RATE_HT_MCS_GF_MSK) { rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; }
-    if (rate_n_flags & RATE_MCS_LDPC_MSK) { rx_status->enc_flags |= RX_ENC_FLAG_LDPC; }
-    if (rate_n_flags & RATE_MCS_HT_MSK) {
-        uint8_t stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
-        rx_status->encoding = RX_ENC_HT;
-        rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
-        rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
-    } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
-        uint8_t stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
-        rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1;
-        rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
-        rx_status->encoding = RX_ENC_VHT;
-        rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
-        if (rate_n_flags & RATE_MCS_BF_MSK) { rx_status->enc_flags |= RX_ENC_FLAG_BF; }
-    } else {
-        int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band);
+  } else {
+    int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band);
 
-        if (WARN(rate < 0 || rate > 0xFF, "Invalid rate flags 0x%x, band %d,\n", rate_n_flags,
-                 rx_status->band)) {
-            kfree_skb(skb);
-            return;
-        }
-        rx_status->rate_idx = rate;
+    if (WARN(rate < 0 || rate > 0xFF, "Invalid rate flags 0x%x, band %d,\n", rate_n_flags,
+             rx_status->band)) {
+      kfree_skb(skb);
+      return;
     }
+    rx_status->rate_idx = rate;
+  }
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    iwl_mvm_update_frame_stats(mvm, rate_n_flags, rx_status->flag & RX_FLAG_AMPDU_DETAILS);
+  iwl_mvm_update_frame_stats(mvm, rate_n_flags, rx_status->flag & RX_FLAG_AMPDU_DETAILS);
 #endif
 
-    if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
-                  ieee80211_is_probe_resp(hdr->frame_control)) &&
-                 mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) {
-        mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
-    }
+  if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
+                ieee80211_is_probe_resp(hdr->frame_control)) &&
+               mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) {
+    mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
+  }
 
-    if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
-                 ieee80211_is_probe_resp(hdr->frame_control))) {
-        rx_status->boottime_ns = ktime_get_boot_ns();
-    }
+  if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+               ieee80211_is_probe_resp(hdr->frame_control))) {
+    rx_status->boottime_ns = ktime_get_boot_ns();
+  }
 
-    /* Take a reference briefly to kick off a d0i3 entry delay so
-     * we can handle bursts of RX packets without toggling the
-     * state too often.  But don't do this for beacons if we are
-     * going to idle because the beacon filtering changes we make
-     * cause the firmware to send us collateral beacons. */
-    take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) &&
-                 ieee80211_is_beacon(hdr->frame_control));
+  /* Take a reference briefly to kick off a d0i3 entry delay so
+   * we can handle bursts of RX packets without toggling the
+   * state too often.  But don't do this for beacons if we are
+   * going to idle because the beacon filtering changes we make
+   * cause the firmware to send us collateral beacons. */
+  take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) &&
+               ieee80211_is_beacon(hdr->frame_control));
 
-    if (take_ref) { iwl_mvm_ref(mvm, IWL_MVM_REF_RX); }
+  if (take_ref) {
+    iwl_mvm_ref(mvm, IWL_MVM_REF_RX);
+  }
 
-    iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len, crypt_len, rxb);
+  iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len, crypt_len, rxb);
 
-    if (take_ref) { iwl_mvm_unref(mvm, IWL_MVM_REF_RX); }
+  if (take_ref) {
+    iwl_mvm_unref(mvm, IWL_MVM_REF_RX);
+  }
 }
 
 struct iwl_mvm_stat_data {
-    struct iwl_mvm* mvm;
-    __le32 mac_id;
-    uint8_t beacon_filter_average_energy;
-    void* general;
+  struct iwl_mvm* mvm;
+  __le32 mac_id;
+  uint8_t beacon_filter_average_energy;
+  void* general;
 };
 
 static void iwl_mvm_stat_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_stat_data* data = _data;
-    struct iwl_mvm* mvm = data->mvm;
-    int sig = -data->beacon_filter_average_energy;
-    int last_event;
-    int thold = vif->bss_conf.cqm_rssi_thold;
-    int hyst = vif->bss_conf.cqm_rssi_hyst;
-    uint16_t id = le32_to_cpu(data->mac_id);
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    uint16_t vif_id = mvmvif->id;
+  struct iwl_mvm_stat_data* data = _data;
+  struct iwl_mvm* mvm = data->mvm;
+  int sig = -data->beacon_filter_average_energy;
+  int last_event;
+  int thold = vif->bss_conf.cqm_rssi_thold;
+  int hyst = vif->bss_conf.cqm_rssi_hyst;
+  uint16_t id = le32_to_cpu(data->mac_id);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  uint16_t vif_id = mvmvif->id;
 
-    /* This doesn't need the MAC ID check since it's not taking the
-     * data copied into the "data" struct, but rather the data from
-     * the notification directly.
-     */
-    if (iwl_mvm_has_new_rx_stats_api(mvm)) {
-        struct mvm_statistics_general* general = data->general;
+  /* This doesn't need the MAC ID check since it's not taking the
+   * data copied into the "data" struct, but rather the data from
+   * the notification directly.
+   */
+  if (iwl_mvm_has_new_rx_stats_api(mvm)) {
+    struct mvm_statistics_general* general = data->general;
 
-        mvmvif->beacon_stats.num_beacons = le32_to_cpu(general->beacon_counter[vif_id]);
-        mvmvif->beacon_stats.avg_signal = -general->beacon_average_energy[vif_id];
-    } else {
-        struct mvm_statistics_general_v8* general = data->general;
+    mvmvif->beacon_stats.num_beacons = le32_to_cpu(general->beacon_counter[vif_id]);
+    mvmvif->beacon_stats.avg_signal = -general->beacon_average_energy[vif_id];
+  } else {
+    struct mvm_statistics_general_v8* general = data->general;
 
-        mvmvif->beacon_stats.num_beacons = le32_to_cpu(general->beacon_counter[vif_id]);
-        mvmvif->beacon_stats.avg_signal = -general->beacon_average_energy[vif_id];
+    mvmvif->beacon_stats.num_beacons = le32_to_cpu(general->beacon_counter[vif_id]);
+    mvmvif->beacon_stats.avg_signal = -general->beacon_average_energy[vif_id];
+  }
+
+  if (mvmvif->id != id) {
+    return;
+  }
+
+  if (vif->type != NL80211_IFTYPE_STATION) {
+    return;
+  }
+
+  if (sig == 0) {
+    IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
+    return;
+  }
+
+  mvmvif->bf_data.ave_beacon_signal = sig;
+
+  /* BT Coex */
+  if (mvmvif->bf_data.bt_coex_min_thold != mvmvif->bf_data.bt_coex_max_thold) {
+    last_event = mvmvif->bf_data.last_bt_coex_event;
+    if (sig > mvmvif->bf_data.bt_coex_max_thold &&
+        (last_event <= mvmvif->bf_data.bt_coex_min_thold || last_event == 0)) {
+      mvmvif->bf_data.last_bt_coex_event = sig;
+      IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n", sig);
+      iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
+    } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
+               (last_event >= mvmvif->bf_data.bt_coex_max_thold || last_event == 0)) {
+      mvmvif->bf_data.last_bt_coex_event = sig;
+      IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n", sig);
+      iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
     }
+  }
 
-    if (mvmvif->id != id) { return; }
+  if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) {
+    return;
+  }
 
-    if (vif->type != NL80211_IFTYPE_STATION) { return; }
-
-    if (sig == 0) {
-        IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
-        return;
-    }
-
-    mvmvif->bf_data.ave_beacon_signal = sig;
-
-    /* BT Coex */
-    if (mvmvif->bf_data.bt_coex_min_thold != mvmvif->bf_data.bt_coex_max_thold) {
-        last_event = mvmvif->bf_data.last_bt_coex_event;
-        if (sig > mvmvif->bf_data.bt_coex_max_thold &&
-            (last_event <= mvmvif->bf_data.bt_coex_min_thold || last_event == 0)) {
-            mvmvif->bf_data.last_bt_coex_event = sig;
-            IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n", sig);
-            iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
-        } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
-                   (last_event >= mvmvif->bf_data.bt_coex_max_thold || last_event == 0)) {
-            mvmvif->bf_data.last_bt_coex_event = sig;
-            IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n", sig);
-            iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
-        }
-    }
-
-    if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) { return; }
-
-    /* CQM Notification */
-    last_event = mvmvif->bf_data.last_cqm_event;
-    if (thold && sig < thold && (last_event == 0 || sig < last_event - hyst)) {
-        mvmvif->bf_data.last_cqm_event = sig;
-        IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n", sig);
-        ieee80211_cqm_rssi_notify(vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, sig, GFP_KERNEL);
-    } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) {
-        mvmvif->bf_data.last_cqm_event = sig;
-        IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n", sig);
-        ieee80211_cqm_rssi_notify(vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, sig, GFP_KERNEL);
-    }
+  /* CQM Notification */
+  last_event = mvmvif->bf_data.last_cqm_event;
+  if (thold && sig < thold && (last_event == 0 || sig < last_event - hyst)) {
+    mvmvif->bf_data.last_cqm_event = sig;
+    IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n", sig);
+    ieee80211_cqm_rssi_notify(vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, sig, GFP_KERNEL);
+  } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) {
+    mvmvif->bf_data.last_cqm_event = sig;
+    IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n", sig);
+    ieee80211_cqm_rssi_notify(vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, sig, GFP_KERNEL);
+  }
 }
 
 static inline void iwl_mvm_rx_stats_check_trigger(struct iwl_mvm* mvm, struct iwl_rx_packet* pkt) {
-    struct iwl_fw_dbg_trigger_tlv* trig;
-    struct iwl_fw_dbg_trigger_stats* trig_stats;
-    uint32_t trig_offset, trig_thold;
+  struct iwl_fw_dbg_trigger_tlv* trig;
+  struct iwl_fw_dbg_trigger_stats* trig_stats;
+  uint32_t trig_offset, trig_thold;
 
-    trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS);
-    if (!trig) { return; }
+  trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS);
+  if (!trig) {
+    return;
+  }
 
-    trig_stats = (void*)trig->data;
+  trig_stats = (void*)trig->data;
 
-    trig_offset = le32_to_cpu(trig_stats->stop_offset);
-    trig_thold = le32_to_cpu(trig_stats->stop_threshold);
+  trig_offset = le32_to_cpu(trig_stats->stop_offset);
+  trig_thold = le32_to_cpu(trig_stats->stop_threshold);
 
-    if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt))) { return; }
+  if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt))) {
+    return;
+  }
 
-    if (le32_to_cpup((__le32*)(pkt->data + trig_offset)) < trig_thold) { return; }
+  if (le32_to_cpup((__le32*)(pkt->data + trig_offset)) < trig_thold) {
+    return;
+  }
 
-    iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL);
+  iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL);
 }
 
 void iwl_mvm_handle_rx_statistics(struct iwl_mvm* mvm, struct iwl_rx_packet* pkt) {
-    struct iwl_mvm_stat_data data = {
-        .mvm = mvm,
-    };
-    int expected_size;
-    int i;
-    uint8_t* energy;
-    __le32 *bytes, *air_time;
-    __le32 flags;
+  struct iwl_mvm_stat_data data = {
+      .mvm = mvm,
+  };
+  int expected_size;
+  int i;
+  uint8_t* energy;
+  __le32 *bytes, *air_time;
+  __le32 flags;
 
-    if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
-        if (iwl_mvm_has_new_rx_api(mvm)) {
-            expected_size = sizeof(struct iwl_notif_statistics_v11);
-        } else {
-            expected_size = sizeof(struct iwl_notif_statistics_v10);
-        }
+  if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+    if (iwl_mvm_has_new_rx_api(mvm)) {
+      expected_size = sizeof(struct iwl_notif_statistics_v11);
     } else {
-        expected_size = sizeof(struct iwl_notif_statistics);
+      expected_size = sizeof(struct iwl_notif_statistics_v10);
+    }
+  } else {
+    expected_size = sizeof(struct iwl_notif_statistics);
+  }
+
+  if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
+                "received invalid statistics size (%d)!\n", iwl_rx_packet_payload_len(pkt))) {
+    return;
+  }
+
+  if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+    struct iwl_notif_statistics_v11* stats = (void*)&pkt->data;
+
+    data.mac_id = stats->rx.general.mac_id;
+    data.beacon_filter_average_energy = stats->general.common.beacon_filter_average_energy;
+
+    mvm->rx_stats_v3 = stats->rx;
+
+    mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time);
+    mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time);
+    mvm->radio_stats.on_time_rf = le64_to_cpu(stats->general.common.on_time_rf);
+    mvm->radio_stats.on_time_scan = le64_to_cpu(stats->general.common.on_time_scan);
+
+    data.general = &stats->general;
+
+    flags = stats->flag;
+  } else {
+    struct iwl_notif_statistics* stats = (void*)&pkt->data;
+
+    data.mac_id = stats->rx.general.mac_id;
+    data.beacon_filter_average_energy = stats->general.common.beacon_filter_average_energy;
+
+    mvm->rx_stats = stats->rx;
+
+    mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time);
+    mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time);
+    mvm->radio_stats.on_time_rf = le64_to_cpu(stats->general.common.on_time_rf);
+    mvm->radio_stats.on_time_scan = le64_to_cpu(stats->general.common.on_time_scan);
+
+    data.general = &stats->general;
+
+    flags = stats->flag;
+  }
+
+  iwl_mvm_rx_stats_check_trigger(mvm, pkt);
+
+  ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator,
+                                      &data);
+
+  if (!iwl_mvm_has_new_rx_api(mvm)) {
+    return;
+  }
+
+  if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+    struct iwl_notif_statistics_v11* v11 = (void*)&pkt->data;
+
+    energy = (void*)&v11->load_stats.avg_energy;
+    bytes = (void*)&v11->load_stats.byte_count;
+    air_time = (void*)&v11->load_stats.air_time;
+  } else {
+    struct iwl_notif_statistics* stats = (void*)&pkt->data;
+
+    energy = (void*)&stats->load_stats.avg_energy;
+    bytes = (void*)&stats->load_stats.byte_count;
+    air_time = (void*)&stats->load_stats.air_time;
+  }
+
+  rcu_read_lock();
+  for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+    struct iwl_mvm_sta* sta;
+
+    if (!energy[i]) {
+      continue;
     }
 
-    if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
-                  "received invalid statistics size (%d)!\n", iwl_rx_packet_payload_len(pkt))) {
-        return;
+    sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
+    if (!sta) {
+      continue;
     }
+    sta->avg_energy = energy[i];
+  }
+  rcu_read_unlock();
 
-    if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
-        struct iwl_notif_statistics_v11* stats = (void*)&pkt->data;
+  /*
+   * Don't update in case the statistics are not cleared, since
+   * we will end up counting twice the same airtime, once in TCM
+   * request and once in statistics notification.
+   */
+  if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)) {
+    return;
+  }
 
-        data.mac_id = stats->rx.general.mac_id;
-        data.beacon_filter_average_energy = stats->general.common.beacon_filter_average_energy;
+  spin_lock(&mvm->tcm.lock);
+  for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+    struct iwl_mvm_tcm_mac* mdata = &mvm->tcm.data[i];
+    uint32_t rx_bytes = le32_to_cpu(bytes[i]);
+    uint32_t airtime = le32_to_cpu(air_time[i]);
 
-        mvm->rx_stats_v3 = stats->rx;
-
-        mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time);
-        mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time);
-        mvm->radio_stats.on_time_rf = le64_to_cpu(stats->general.common.on_time_rf);
-        mvm->radio_stats.on_time_scan = le64_to_cpu(stats->general.common.on_time_scan);
-
-        data.general = &stats->general;
-
-        flags = stats->flag;
-    } else {
-        struct iwl_notif_statistics* stats = (void*)&pkt->data;
-
-        data.mac_id = stats->rx.general.mac_id;
-        data.beacon_filter_average_energy = stats->general.common.beacon_filter_average_energy;
-
-        mvm->rx_stats = stats->rx;
-
-        mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time);
-        mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time);
-        mvm->radio_stats.on_time_rf = le64_to_cpu(stats->general.common.on_time_rf);
-        mvm->radio_stats.on_time_scan = le64_to_cpu(stats->general.common.on_time_scan);
-
-        data.general = &stats->general;
-
-        flags = stats->flag;
+    mdata->rx.airtime += airtime;
+    mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
+    if (airtime) {
+      /* re-init every time to store rate from FW */
+      ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+      ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, rx_bytes * 8 / airtime);
     }
-
-    iwl_mvm_rx_stats_check_trigger(mvm, pkt);
-
-    ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator,
-                                        &data);
-
-    if (!iwl_mvm_has_new_rx_api(mvm)) { return; }
-
-    if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
-        struct iwl_notif_statistics_v11* v11 = (void*)&pkt->data;
-
-        energy = (void*)&v11->load_stats.avg_energy;
-        bytes = (void*)&v11->load_stats.byte_count;
-        air_time = (void*)&v11->load_stats.air_time;
-    } else {
-        struct iwl_notif_statistics* stats = (void*)&pkt->data;
-
-        energy = (void*)&stats->load_stats.avg_energy;
-        bytes = (void*)&stats->load_stats.byte_count;
-        air_time = (void*)&stats->load_stats.air_time;
-    }
-
-    rcu_read_lock();
-    for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
-        struct iwl_mvm_sta* sta;
-
-        if (!energy[i]) { continue; }
-
-        sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
-        if (!sta) { continue; }
-        sta->avg_energy = energy[i];
-    }
-    rcu_read_unlock();
-
-    /*
-     * Don't update in case the statistics are not cleared, since
-     * we will end up counting twice the same airtime, once in TCM
-     * request and once in statistics notification.
-     */
-    if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)) { return; }
-
-    spin_lock(&mvm->tcm.lock);
-    for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
-        struct iwl_mvm_tcm_mac* mdata = &mvm->tcm.data[i];
-        uint32_t rx_bytes = le32_to_cpu(bytes[i]);
-        uint32_t airtime = le32_to_cpu(air_time[i]);
-
-        mdata->rx.airtime += airtime;
-        mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
-        if (airtime) {
-            /* re-init every time to store rate from FW */
-            ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
-            ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, rx_bytes * 8 / airtime);
-        }
-    }
-    spin_unlock(&mvm->tcm.lock);
+  }
+  spin_unlock(&mvm->tcm.lock);
 }
 
 void iwl_mvm_rx_statistics(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
+  iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
 }
 
 void iwl_mvm_window_status_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_ba_window_status_notif* notif = (void*)pkt->data;
-    int i;
-    uint32_t pkt_len = iwl_rx_packet_payload_len(pkt);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_ba_window_status_notif* notif = (void*)pkt->data;
+  int i;
+  uint32_t pkt_len = iwl_rx_packet_payload_len(pkt);
 
-    if (WARN_ONCE(pkt_len != sizeof(*notif),
-                  "Received window status notification of wrong size (%u)\n", pkt_len)) {
-        return;
+  if (WARN_ONCE(pkt_len != sizeof(*notif),
+                "Received window status notification of wrong size (%u)\n", pkt_len)) {
+    return;
+  }
+
+  rcu_read_lock();
+  for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
+    struct ieee80211_sta* sta;
+    uint8_t sta_id, tid;
+    uint64_t bitmap;
+    uint32_t ssn;
+    uint16_t ratid;
+    uint16_t received_mpdu;
+
+    ratid = le16_to_cpu(notif->ra_tid[i]);
+    /* check that this TID is valid */
+    if (!(ratid & BA_WINDOW_STATUS_VALID_MSK)) {
+      continue;
     }
 
-    rcu_read_lock();
-    for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
-        struct ieee80211_sta* sta;
-        uint8_t sta_id, tid;
-        uint64_t bitmap;
-        uint32_t ssn;
-        uint16_t ratid;
-        uint16_t received_mpdu;
-
-        ratid = le16_to_cpu(notif->ra_tid[i]);
-        /* check that this TID is valid */
-        if (!(ratid & BA_WINDOW_STATUS_VALID_MSK)) { continue; }
-
-        received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]);
-        if (received_mpdu == 0) { continue; }
-
-        tid = ratid & BA_WINDOW_STATUS_TID_MSK;
-        /* get the station */
-        sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK) >> BA_WINDOW_STATUS_STA_ID_POS;
-        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-        if (IS_ERR_OR_NULL(sta)) { continue; }
-        bitmap = le64_to_cpu(notif->bitmap[i]);
-        ssn = le32_to_cpu(notif->start_seq_num[i]);
-
-        /* update mac80211 with the bitmap for the reordering buffer */
-        ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap, received_mpdu);
+    received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]);
+    if (received_mpdu == 0) {
+      continue;
     }
-    rcu_read_unlock();
+
+    tid = ratid & BA_WINDOW_STATUS_TID_MSK;
+    /* get the station */
+    sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK) >> BA_WINDOW_STATUS_STA_ID_POS;
+    sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+    if (IS_ERR_OR_NULL(sta)) {
+      continue;
+    }
+    bitmap = le64_to_cpu(notif->bitmap[i]);
+    ssn = le32_to_cpu(notif->start_seq_num[i]);
+
+    /* update mac80211 with the bitmap for the reordering buffer */
+    ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap, received_mpdu);
+  }
+  rcu_read_unlock();
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rxmq.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rxmq.c
index d1b398c..c6147c1 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rxmq.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/rxmq.c
@@ -36,7 +36,7 @@
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/fw-api.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mvm.h"
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static inline int iwl_mvm_check_pn(struct iwl_mvm* mvm, struct sk_buff* skb, int queue,
                                    struct ieee80211_sta* sta) {
     struct iwl_mvm_sta* mvmsta;
@@ -541,7 +541,7 @@
 #endif  // NEEDS_PORTING
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 /*
  * Returns true if the MPDU was buffered\dropped, false if it should be passed
  * to upper layer.
@@ -859,9 +859,9 @@
     }
 
     if (he_mu) {
-#define CHECK_BW(bw)                                                           \
-    BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_##bw##MHZ != \
-                 RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
+#define CHECK_BW(bw)                                                         \
+  BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_##bw##MHZ != \
+               RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
         CHECK_BW(20);
         CHECK_BW(40);
         CHECK_BW(80);
@@ -1065,9 +1065,9 @@
 
     rx_status->he_dcm = !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
 
-#define CHECK_TYPE(F)                                      \
-    BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_##F != \
-                 (RATE_MCS_HE_TYPE_##F >> RATE_MCS_HE_TYPE_POS))
+#define CHECK_TYPE(F)                                    \
+  BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_##F != \
+               (RATE_MCS_HE_TYPE_##F >> RATE_MCS_HE_TYPE_POS))
 
     CHECK_TYPE(SU);
     CHECK_TYPE(EXT_SU);
@@ -1151,7 +1151,7 @@
 
 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm* mvm, struct napi_struct* napi,
                         struct iwl_rx_cmd_buffer* rxb, int queue) {
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
     struct ieee80211_rx_status* rx_status;
     struct iwl_rx_packet* pkt = rxb_addr(rxb);
     struct iwl_rx_mpdu_desc* desc = (void*)pkt->data;
@@ -1362,7 +1362,7 @@
          * In both cases an existing station.
          */
         iwl_mvm_tdls_peer_cache_pkt(mvm, hdr, len, queue);
-#endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
+#endif  /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
 
         if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
             kfree_skb(skb);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/scan.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/scan.c
index 50e6a5a..9f3a3c1 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/scan.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/scan.c
@@ -35,6 +35,7 @@
  *****************************************************************************/
 
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/scan.h"
+
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mvm.h"
 
@@ -57,8 +58,8 @@
 #define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
 
 struct iwl_mvm_scan_timing_params {
-    uint32_t suspend_time;
-    uint32_t max_out_time;
+  uint32_t suspend_time;
+  uint32_t max_out_time;
 };
 
 #if 0   // NEEDS_PORTING
@@ -92,28 +93,28 @@
 #endif  // NEEDS_PORTING
 
 struct iwl_mvm_scan_params {
-    /* For CDB this is low band scan type, for non-CDB - type. */
-    enum iwl_mvm_scan_type type;
-    enum iwl_mvm_scan_type hb_type;
-    uint32_t n_channels;
-    uint16_t delay;
-    int n_ssids;
-    struct cfg80211_ssid* ssids;
-    struct ieee80211_channel** channels;
-    uint32_t flags;
-    uint8_t* mac_addr;
-    uint8_t* mac_addr_mask;
-    bool no_cck;
-    bool pass_all;
-    int n_match_sets;
-    struct iwl_scan_probe_req preq;
-    struct cfg80211_match_set* match_sets;
-    int n_scan_plans;
-    struct cfg80211_sched_scan_plan* scan_plans;
-    uint32_t measurement_dwell;
+  /* For CDB this is low band scan type, for non-CDB - type. */
+  enum iwl_mvm_scan_type type;
+  enum iwl_mvm_scan_type hb_type;
+  uint32_t n_channels;
+  uint16_t delay;
+  int n_ssids;
+  struct cfg80211_ssid* ssids;
+  struct ieee80211_channel** channels;
+  uint32_t flags;
+  uint8_t* mac_addr;
+  uint8_t* mac_addr_mask;
+  bool no_cck;
+  bool pass_all;
+  int n_match_sets;
+  struct iwl_scan_probe_req preq;
+  struct cfg80211_match_set* match_sets;
+  int n_scan_plans;
+  struct cfg80211_sched_scan_plan* scan_plans;
+  uint32_t measurement_dwell;
 };
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static inline void* iwl_mvm_get_scan_req_umac_data(struct iwl_mvm* mvm) {
     struct iwl_scan_req_umac* cmd = mvm->scan_cmd;
 
@@ -1714,24 +1715,24 @@
 #endif  // NEEDS_PORTING
 
 int iwl_mvm_scan_size(struct iwl_mvm* mvm) {
-    int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
+  int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
 
-    if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
-        base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
-    } else if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
-        base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
-    } else if (iwl_mvm_cdb_scan_api(mvm)) {
-        base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
-    }
+  if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
+    base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
+  } else if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+    base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
+  } else if (iwl_mvm_cdb_scan_api(mvm)) {
+    base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
+  }
 
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
-        return base_size +
-               sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels +
-               sizeof(struct iwl_scan_req_umac_tail);
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+    return base_size +
+           sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels +
+           sizeof(struct iwl_scan_req_umac_tail);
 
-    return sizeof(struct iwl_scan_req_lmac) +
-           sizeof(struct iwl_scan_channel_cfg_lmac) * mvm->fw->ucode_capa.n_scan_channels +
-           sizeof(struct iwl_scan_probe_req);
+  return sizeof(struct iwl_scan_req_lmac) +
+         sizeof(struct iwl_scan_channel_cfg_lmac) * mvm->fw->ucode_capa.n_scan_channels +
+         sizeof(struct iwl_scan_probe_req);
 }
 
 #if 0   // NEEDS_PORTING
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sf.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sf.c
index c6cf269..ff5c456 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sf.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sf.c
@@ -36,10 +36,10 @@
 
 /* For counting bound interfaces */
 struct iwl_mvm_active_iface_iterator_data {
-    struct ieee80211_vif* ignore_vif;
-    uint8_t sta_vif_ap_sta_id;
-    enum iwl_sf_state sta_vif_state;
-    uint32_t num_active_macs;
+  struct ieee80211_vif* ignore_vif;
+  uint8_t sta_vif_ap_sta_id;
+  enum iwl_sf_state sta_vif_state;
+  uint32_t num_active_macs;
 };
 
 /*
@@ -47,24 +47,24 @@
  * data->station_vif will point to one bound vif of type station, if exists.
  */
 static void iwl_mvm_bound_iface_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
-    struct iwl_mvm_active_iface_iterator_data* data = _data;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_active_iface_iterator_data* data = _data;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (vif == data->ignore_vif || !mvmvif->phy_ctxt || vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-        vif->type == NL80211_IFTYPE_NAN) {
-        return;
+  if (vif == data->ignore_vif || !mvmvif->phy_ctxt || vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+      vif->type == NL80211_IFTYPE_NAN) {
+    return;
+  }
+
+  data->num_active_macs++;
+
+  if (vif->type == NL80211_IFTYPE_STATION) {
+    data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
+    if (vif->bss_conf.assoc) {
+      data->sta_vif_state = SF_FULL_ON;
+    } else {
+      data->sta_vif_state = SF_INIT_OFF;
     }
-
-    data->num_active_macs++;
-
-    if (vif->type == NL80211_IFTYPE_STATION) {
-        data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
-        if (vif->bss_conf.assoc) {
-            data->sta_vif_state = SF_FULL_ON;
-        } else {
-            data->sta_vif_state = SF_INIT_OFF;
-        }
-    }
+  }
 }
 
 /*
@@ -93,103 +93,107 @@
 
 static void iwl_mvm_fill_sf_command(struct iwl_mvm* mvm, struct iwl_sf_cfg_cmd* sf_cmd,
                                     struct ieee80211_sta* sta) {
-    int i, j, watermark;
+  int i, j, watermark;
 
-    sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN);
+  sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN);
 
-    /*
-     * If we are in association flow - check antenna configuration
-     * capabilities of the AP station, and choose the watermark accordingly.
-     */
-    if (sta) {
-        if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
-            switch (sta->rx_nss) {
-            case 1:
-                watermark = SF_W_MARK_SISO;
-                break;
-            case 2:
-                watermark = SF_W_MARK_MIMO2;
-                break;
-            default:
-                watermark = SF_W_MARK_MIMO3;
-                break;
-            }
-        } else {
-            watermark = SF_W_MARK_LEGACY;
-        }
-        /* default watermark value for unassociated mode. */
+  /*
+   * If we are in association flow - check antenna configuration
+   * capabilities of the AP station, and choose the watermark accordingly.
+   */
+  if (sta) {
+    if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+      switch (sta->rx_nss) {
+        case 1:
+          watermark = SF_W_MARK_SISO;
+          break;
+        case 2:
+          watermark = SF_W_MARK_MIMO2;
+          break;
+        default:
+          watermark = SF_W_MARK_MIMO3;
+          break;
+      }
     } else {
-        watermark = SF_W_MARK_MIMO2;
+      watermark = SF_W_MARK_LEGACY;
     }
-    sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark);
+    /* default watermark value for unassociated mode. */
+  } else {
+    watermark = SF_W_MARK_MIMO2;
+  }
+  sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark);
 
-    for (i = 0; i < SF_NUM_SCENARIO; i++) {
-        for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) {
-            sf_cmd->long_delay_timeouts[i][j] = cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
-        }
+  for (i = 0; i < SF_NUM_SCENARIO; i++) {
+    for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) {
+      sf_cmd->long_delay_timeouts[i][j] = cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
     }
+  }
 
-    if (sta) {
-        BUILD_BUG_ON(sizeof(sf_full_timeout) !=
-                     sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
+  if (sta) {
+    BUILD_BUG_ON(sizeof(sf_full_timeout) !=
+                 sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
 
-        memcpy(sf_cmd->full_on_timeouts, sf_full_timeout, sizeof(sf_full_timeout));
-    } else {
-        BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
-                     sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
+    memcpy(sf_cmd->full_on_timeouts, sf_full_timeout, sizeof(sf_full_timeout));
+  } else {
+    BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
+                 sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
 
-        memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def, sizeof(sf_full_timeout_def));
-    }
+    memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def, sizeof(sf_full_timeout_def));
+  }
 }
 
 static int iwl_mvm_sf_config(struct iwl_mvm* mvm, uint8_t sta_id, enum iwl_sf_state new_state) {
-    struct iwl_sf_cfg_cmd sf_cmd = {
-        .state = cpu_to_le32(new_state),
-    };
-    struct ieee80211_sta* sta;
-    int ret = 0;
+  struct iwl_sf_cfg_cmd sf_cmd = {
+      .state = cpu_to_le32(new_state),
+  };
+  struct ieee80211_sta* sta;
+  int ret = 0;
 
-    if (mvm->cfg->disable_dummy_notification) {
-        sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
-    }
+  if (mvm->cfg->disable_dummy_notification) {
+    sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
+  }
 
-    /*
-     * If an associated AP sta changed its antenna configuration, the state
-     * will remain FULL_ON but SF parameters need to be reconsidered.
-     */
-    if (new_state != SF_FULL_ON && mvm->sf_state == new_state) { return 0; }
+  /*
+   * If an associated AP sta changed its antenna configuration, the state
+   * will remain FULL_ON but SF parameters need to be reconsidered.
+   */
+  if (new_state != SF_FULL_ON && mvm->sf_state == new_state) {
+    return 0;
+  }
 
-    switch (new_state) {
+  switch (new_state) {
     case SF_UNINIT:
-        iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
-        break;
+      iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+      break;
     case SF_FULL_ON:
-        if (sta_id == IWL_MVM_INVALID_STA) {
-            IWL_ERR(mvm, "No station: Cannot switch SF to FULL_ON\n");
-            return -EINVAL;
-        }
-        rcu_read_lock();
-        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-        if (IS_ERR_OR_NULL(sta)) {
-            IWL_ERR(mvm, "Invalid station id\n");
-            rcu_read_unlock();
-            return -EINVAL;
-        }
-        iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
-        rcu_read_unlock();
-        break;
-    case SF_INIT_OFF:
-        iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
-        break;
-    default:
-        WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n", new_state);
+      if (sta_id == IWL_MVM_INVALID_STA) {
+        IWL_ERR(mvm, "No station: Cannot switch SF to FULL_ON\n");
         return -EINVAL;
-    }
+      }
+      rcu_read_lock();
+      sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+      if (IS_ERR_OR_NULL(sta)) {
+        IWL_ERR(mvm, "Invalid station id\n");
+        rcu_read_unlock();
+        return -EINVAL;
+      }
+      iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
+      rcu_read_unlock();
+      break;
+    case SF_INIT_OFF:
+      iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+      break;
+    default:
+      WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n", new_state);
+      return -EINVAL;
+  }
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC, sizeof(sf_cmd), &sf_cmd);
-    if (!ret) { mvm->sf_state = new_state; }
+  ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC, sizeof(sf_cmd), &sf_cmd);
+  if (!ret) {
+    mvm->sf_state = new_state;
+  }
 
-    return ret;
+  return ret;
 }
 
 /*
@@ -198,59 +202,63 @@
  * and set new state accordingly.
  */
 int iwl_mvm_sf_update(struct iwl_mvm* mvm, struct ieee80211_vif* changed_vif, bool remove_vif) {
-    enum iwl_sf_state new_state;
-    uint8_t sta_id = IWL_MVM_INVALID_STA;
-    struct iwl_mvm_vif* mvmvif = NULL;
-    struct iwl_mvm_active_iface_iterator_data data = {
-        .ignore_vif = changed_vif,
-        .sta_vif_state = SF_UNINIT,
-        .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA,
-    };
+  enum iwl_sf_state new_state;
+  uint8_t sta_id = IWL_MVM_INVALID_STA;
+  struct iwl_mvm_vif* mvmvif = NULL;
+  struct iwl_mvm_active_iface_iterator_data data = {
+      .ignore_vif = changed_vif,
+      .sta_vif_state = SF_UNINIT,
+      .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA,
+  };
 
-    /*
-     * Ignore the call if we are in HW Restart flow, or if the handled
-     * vif is a p2p device.
-     */
-    if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
-        (changed_vif && (changed_vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-                         changed_vif->type == NL80211_IFTYPE_NAN))) {
-        return 0;
-    }
+  /*
+   * Ignore the call if we are in HW Restart flow, or if the handled
+   * vif is a p2p device.
+   */
+  if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+      (changed_vif && (changed_vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+                       changed_vif->type == NL80211_IFTYPE_NAN))) {
+    return 0;
+  }
 
-    ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                               iwl_mvm_bound_iface_iterator, &data);
+  ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                             iwl_mvm_bound_iface_iterator, &data);
 
-    /* If changed_vif exists and is not to be removed, add to the count */
-    if (changed_vif && !remove_vif) { data.num_active_macs++; }
+  /* If changed_vif exists and is not to be removed, add to the count */
+  if (changed_vif && !remove_vif) {
+    data.num_active_macs++;
+  }
 
-    switch (data.num_active_macs) {
+  switch (data.num_active_macs) {
     case 0:
-        /* If there are no active macs - change state to SF_INIT_OFF */
-        new_state = SF_INIT_OFF;
-        break;
+      /* If there are no active macs - change state to SF_INIT_OFF */
+      new_state = SF_INIT_OFF;
+      break;
     case 1:
-        if (remove_vif) {
-            /* The one active mac left is of type station
-             * and we filled the relevant data during iteration
-             */
-            new_state = data.sta_vif_state;
-            sta_id = data.sta_vif_ap_sta_id;
-        } else {
-            if (WARN_ON(!changed_vif)) { return -EINVAL; }
-            if (changed_vif->type != NL80211_IFTYPE_STATION) {
-                new_state = SF_UNINIT;
-            } else if (changed_vif->bss_conf.assoc && changed_vif->bss_conf.dtim_period) {
-                mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
-                sta_id = mvmvif->ap_sta_id;
-                new_state = SF_FULL_ON;
-            } else {
-                new_state = SF_INIT_OFF;
-            }
+      if (remove_vif) {
+        /* The one active mac left is of type station
+         * and we filled the relevant data during iteration
+         */
+        new_state = data.sta_vif_state;
+        sta_id = data.sta_vif_ap_sta_id;
+      } else {
+        if (WARN_ON(!changed_vif)) {
+          return -EINVAL;
         }
-        break;
+        if (changed_vif->type != NL80211_IFTYPE_STATION) {
+          new_state = SF_UNINIT;
+        } else if (changed_vif->bss_conf.assoc && changed_vif->bss_conf.dtim_period) {
+          mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
+          sta_id = mvmvif->ap_sta_id;
+          new_state = SF_FULL_ON;
+        } else {
+          new_state = SF_INIT_OFF;
+        }
+      }
+      break;
     default:
-        /* If there are multiple active macs - change to SF_UNINIT */
-        new_state = SF_UNINIT;
-    }
-    return iwl_mvm_sf_config(mvm, sta_id, new_state);
+      /* If there are multiple active macs - change to SF_UNINIT */
+      new_state = SF_UNINIT;
+  }
+  return iwl_mvm_sf_config(mvm, sta_id, new_state);
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sta.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sta.c
index e955db9..463cbfc 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sta.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sta.c
@@ -33,11 +33,12 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include "sta.h"
+
 #include <net/mac80211.h>
 
 #include "mvm.h"
 #include "rs.h"
-#include "sta.h"
 
 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm* mvm);
 
@@ -52,339 +53,373 @@
  * support both API versions.
  */
 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm* mvm) {
-    if (iwl_mvm_has_new_rx_api(mvm) ||
-        fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
-        return sizeof(struct iwl_mvm_add_sta_cmd);
-    } else {
-        return sizeof(struct iwl_mvm_add_sta_cmd_v7);
-    }
+  if (iwl_mvm_has_new_rx_api(mvm) || fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+    return sizeof(struct iwl_mvm_add_sta_cmd);
+  } else {
+    return sizeof(struct iwl_mvm_add_sta_cmd_v7);
+  }
 }
 
 static int iwl_mvm_find_free_sta_id(struct iwl_mvm* mvm, enum nl80211_iftype iftype) {
-    int sta_id;
-    uint32_t reserved_ids = 0;
+  int sta_id;
+  uint32_t reserved_ids = 0;
 
-    BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
-    WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+  BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
+  WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
-    if (iftype != NL80211_IFTYPE_STATION) { reserved_ids = BIT(0); }
+  /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
+  if (iftype != NL80211_IFTYPE_STATION) {
+    reserved_ids = BIT(0);
+  }
 
-    /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
-    for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
-        if (BIT(sta_id) & reserved_ids) { continue; }
-
-        if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
-                                       lockdep_is_held(&mvm->mutex))) {
-            return sta_id;
-        }
+  /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
+  for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
+    if (BIT(sta_id) & reserved_ids) {
+      continue;
     }
-    return IWL_MVM_INVALID_STA;
+
+    if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex))) {
+      return sta_id;
+    }
+  }
+  return IWL_MVM_INVALID_STA;
 }
 
 /* send station add/update command to firmware */
 int iwl_mvm_sta_send_to_fw(struct iwl_mvm* mvm, struct ieee80211_sta* sta, bool update,
                            unsigned int flags) {
-    struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_add_sta_cmd add_sta_cmd = {
-        .sta_id = mvm_sta->sta_id,
-        .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
-        .add_modify = update ? 1 : 0,
-        .station_flags_msk =
-            cpu_to_le32(STA_FLG_FAT_EN_MSK | STA_FLG_MIMO_EN_MSK | STA_FLG_RTS_MIMO_PROT),
-        .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
-    };
-    int ret;
-    uint32_t status;
-    uint32_t agg_size = 0, mpdu_dens = 0;
+  struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_add_sta_cmd add_sta_cmd = {
+      .sta_id = mvm_sta->sta_id,
+      .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
+      .add_modify = update ? 1 : 0,
+      .station_flags_msk =
+          cpu_to_le32(STA_FLG_FAT_EN_MSK | STA_FLG_MIMO_EN_MSK | STA_FLG_RTS_MIMO_PROT),
+      .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
+  };
+  int ret;
+  uint32_t status;
+  uint32_t agg_size = 0, mpdu_dens = 0;
 
-    if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
-        add_sta_cmd.station_type = mvm_sta->sta_type;
+  if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+    add_sta_cmd.station_type = mvm_sta->sta_type;
+  }
+
+  if (!update || (flags & STA_MODIFY_QUEUES)) {
+    memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+
+    if (!iwl_mvm_has_new_tx_api(mvm)) {
+      add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+
+      if (flags & STA_MODIFY_QUEUES) {
+        add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+      }
+    } else {
+      WARN_ON(flags & STA_MODIFY_QUEUES);
     }
+  }
 
-    if (!update || (flags & STA_MODIFY_QUEUES)) {
-        memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
-
-        if (!iwl_mvm_has_new_tx_api(mvm)) {
-            add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
-
-            if (flags & STA_MODIFY_QUEUES) { add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; }
-        } else {
-            WARN_ON(flags & STA_MODIFY_QUEUES);
-        }
-    }
-
-    switch (sta->bandwidth) {
+  switch (sta->bandwidth) {
     case IEEE80211_STA_RX_BW_160:
-        add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
+      add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
     /* fall through */
     case IEEE80211_STA_RX_BW_80:
-        add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
+      add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
     /* fall through */
     case IEEE80211_STA_RX_BW_40:
-        add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
+      add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
     /* fall through */
     case IEEE80211_STA_RX_BW_20:
-        if (sta->ht_cap.ht_supported) {
-            add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
-        }
-        break;
-    }
+      if (sta->ht_cap.ht_supported) {
+        add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
+      }
+      break;
+  }
 
-    switch (sta->rx_nss) {
+  switch (sta->rx_nss) {
     case 1:
-        add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
-        break;
+      add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
+      break;
     case 2:
-        add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
-        break;
+      add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
+      break;
     case 3 ... 8:
-        add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
-        break;
-    }
+      add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
+      break;
+  }
 
-    switch (sta->smps_mode) {
+  switch (sta->smps_mode) {
     case IEEE80211_SMPS_AUTOMATIC:
     case IEEE80211_SMPS_NUM_MODES:
-        WARN_ON(1);
-        break;
+      WARN_ON(1);
+      break;
     case IEEE80211_SMPS_STATIC:
-        /* override NSS */
-        add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
-        add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
-        break;
+      /* override NSS */
+      add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
+      add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
+      break;
     case IEEE80211_SMPS_DYNAMIC:
-        add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
-        break;
+      add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
+      break;
     case IEEE80211_SMPS_OFF:
-        /* nothing */
-        break;
+      /* nothing */
+      break;
+  }
+
+  if (sta->ht_cap.ht_supported) {
+    add_sta_cmd.station_flags_msk |=
+        cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK);
+
+    mpdu_dens = sta->ht_cap.ampdu_density;
+  }
+
+  if (sta->vht_cap.vht_supported) {
+    agg_size = sta->vht_cap.cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+    agg_size >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+  } else if (sta->ht_cap.ht_supported) {
+    agg_size = sta->ht_cap.ampdu_factor;
+  }
+
+  add_sta_cmd.station_flags |= cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
+  add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
+  if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) {
+    add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
+  }
+
+  if (sta->wme) {
+    add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
+
+    if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
+      add_sta_cmd.uapsd_acs |= BIT(AC_BK);
     }
-
-    if (sta->ht_cap.ht_supported) {
-        add_sta_cmd.station_flags_msk |=
-            cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK);
-
-        mpdu_dens = sta->ht_cap.ampdu_density;
+    if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
+      add_sta_cmd.uapsd_acs |= BIT(AC_BE);
     }
-
-    if (sta->vht_cap.vht_supported) {
-        agg_size = sta->vht_cap.cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
-        agg_size >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
-    } else if (sta->ht_cap.ht_supported) {
-        agg_size = sta->ht_cap.ampdu_factor;
+    if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
+      add_sta_cmd.uapsd_acs |= BIT(AC_VI);
     }
-
-    add_sta_cmd.station_flags |= cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
-    add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
-    if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) { add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); }
-
-    if (sta->wme) {
-        add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
-
-        if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
-            add_sta_cmd.uapsd_acs |= BIT(AC_BK);
-        }
-        if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
-            add_sta_cmd.uapsd_acs |= BIT(AC_BE);
-        }
-        if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
-            add_sta_cmd.uapsd_acs |= BIT(AC_VI);
-        }
-        if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
-            add_sta_cmd.uapsd_acs |= BIT(AC_VO);
-        }
-        add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
-        add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
+    if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
+      add_sta_cmd.uapsd_acs |= BIT(AC_VO);
     }
+    add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
+    add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
+  }
 
-    status = ADD_STA_SUCCESS;
-    ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &add_sta_cmd,
-                                      &status);
-    if (ret) { return ret; }
-
-    switch (status & IWL_ADD_STA_STATUS_MASK) {
-    case ADD_STA_SUCCESS:
-        IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
-        break;
-    default:
-        ret = -EIO;
-        IWL_ERR(mvm, "ADD_STA failed\n");
-        break;
-    }
-
+  status = ADD_STA_SUCCESS;
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &add_sta_cmd,
+                                    &status);
+  if (ret) {
     return ret;
+  }
+
+  switch (status & IWL_ADD_STA_STATUS_MASK) {
+    case ADD_STA_SUCCESS:
+      IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
+      break;
+    default:
+      ret = -EIO;
+      IWL_ERR(mvm, "ADD_STA failed\n");
+      break;
+  }
+
+  return ret;
 }
 
 static void iwl_mvm_rx_agg_session_expired(struct timer_list* t) {
-    struct iwl_mvm_baid_data* data = from_timer(data, t, session_timer);
-    struct iwl_mvm_baid_data __rcu** rcu_ptr = data->rcu_ptr;
-    struct iwl_mvm_baid_data* ba_data;
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvm_sta;
-    unsigned long timeout;
+  struct iwl_mvm_baid_data* data = from_timer(data, t, session_timer);
+  struct iwl_mvm_baid_data __rcu** rcu_ptr = data->rcu_ptr;
+  struct iwl_mvm_baid_data* ba_data;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvm_sta;
+  unsigned long timeout;
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    ba_data = rcu_dereference(*rcu_ptr);
+  ba_data = rcu_dereference(*rcu_ptr);
 
-    if (WARN_ON(!ba_data)) { goto unlock; }
+  if (WARN_ON(!ba_data)) {
+    goto unlock;
+  }
 
-    if (!ba_data->timeout) { goto unlock; }
+  if (!ba_data->timeout) {
+    goto unlock;
+  }
 
-    timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
-    if (time_is_after_jiffies(timeout)) {
-        mod_timer(&ba_data->session_timer, timeout);
-        goto unlock;
-    }
+  timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
+  if (time_is_after_jiffies(timeout)) {
+    mod_timer(&ba_data->session_timer, timeout);
+    goto unlock;
+  }
 
-    /* Timer expired */
-    sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
+  /* Timer expired */
+  sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
 
-    /*
-     * sta should be valid unless the following happens:
-     * The firmware asserts which triggers a reconfig flow, but
-     * the reconfig fails before we set the pointer to sta into
-     * the fw_id_to_mac_id pointer table. Mac80211 can't stop
-     * A-MDPU and hence the timer continues to run. Then, the
-     * timer expires and sta is NULL.
-     */
-    if (!sta) { goto unlock; }
+  /*
+   * sta should be valid unless the following happens:
+   * The firmware asserts which triggers a reconfig flow, but
+   * the reconfig fails before we set the pointer to sta into
+   * the fw_id_to_mac_id pointer table. Mac80211 can't stop
+   * A-MDPU and hence the timer continues to run. Then, the
+   * timer expires and sta is NULL.
+   */
+  if (!sta) {
+    goto unlock;
+  }
 
-    mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-    ieee80211_rx_ba_timer_expired(mvm_sta->vif, sta->addr, ba_data->tid);
+  mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+  ieee80211_rx_ba_timer_expired(mvm_sta->vif, sta->addr, ba_data->tid);
 unlock:
-    rcu_read_unlock();
+  rcu_read_unlock();
 }
 
 /* Disable aggregations for a bitmap of TIDs for a given station */
 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm* mvm, int queue,
                                         unsigned long disable_agg_tids, bool remove_queue) {
-    struct iwl_mvm_add_sta_cmd cmd = {};
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvmsta;
-    uint32_t status;
-    uint8_t sta_id;
-    int ret;
+  struct iwl_mvm_add_sta_cmd cmd = {};
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvmsta;
+  uint32_t status;
+  uint8_t sta_id;
+  int ret;
 
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return -EINVAL; }
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+    return -EINVAL;
+  }
 
-    sta_id = mvm->queue_info[queue].ra_sta_id;
+  sta_id = mvm->queue_info[queue].ra_sta_id;
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+  sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 
-    if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
-        rcu_read_unlock();
-        return -EINVAL;
-    }
-
-    mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
-    mvmsta->tid_disable_agg |= disable_agg_tids;
-
-    cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
-    cmd.sta_id = mvmsta->sta_id;
-    cmd.add_modify = STA_MODE_MODIFY;
-    cmd.modify_mask = STA_MODIFY_QUEUES;
-    if (disable_agg_tids) { cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; }
-    if (remove_queue) { cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; }
-    cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
-    cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
-
+  if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
     rcu_read_unlock();
+    return -EINVAL;
+  }
 
-    /* Notify FW of queue removal from the STA queues */
-    status = ADD_STA_SUCCESS;
-    ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status);
+  mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
-    return ret;
+  mvmsta->tid_disable_agg |= disable_agg_tids;
+
+  cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+  cmd.sta_id = mvmsta->sta_id;
+  cmd.add_modify = STA_MODE_MODIFY;
+  cmd.modify_mask = STA_MODIFY_QUEUES;
+  if (disable_agg_tids) {
+    cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
+  }
+  if (remove_queue) {
+    cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
+  }
+  cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+  cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+  rcu_read_unlock();
+
+  /* Notify FW of queue removal from the STA queues */
+  status = ADD_STA_SUCCESS;
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status);
+
+  return ret;
 }
 
 static int iwl_mvm_disable_txq(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int queue,
                                uint8_t tid, uint8_t flags) {
-    struct iwl_scd_txq_cfg_cmd cmd = {
-        .scd_queue = queue,
-        .action = SCD_CFG_DISABLE_QUEUE,
-    };
-    int ret;
+  struct iwl_scd_txq_cfg_cmd cmd = {
+      .scd_queue = queue,
+      .action = SCD_CFG_DISABLE_QUEUE,
+  };
+  int ret;
 
-    if (iwl_mvm_has_new_tx_api(mvm)) {
-        iwl_trans_txq_free(mvm->trans, queue);
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    iwl_trans_txq_free(mvm->trans, queue);
 
-        return 0;
-    }
+    return 0;
+  }
 
-    if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) { return 0; }
+  if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
+    return 0;
+  }
 
-    mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+  mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
 
-    cmd.action = mvm->queue_info[queue].tid_bitmap ? SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
-    if (cmd.action == SCD_CFG_DISABLE_QUEUE) { mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; }
+  cmd.action = mvm->queue_info[queue].tid_bitmap ? SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+  if (cmd.action == SCD_CFG_DISABLE_QUEUE) {
+    mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
+  }
 
-    IWL_DEBUG_TX_QUEUES(mvm, "Disabling TXQ #%d tids=0x%x\n", queue,
-                        mvm->queue_info[queue].tid_bitmap);
+  IWL_DEBUG_TX_QUEUES(mvm, "Disabling TXQ #%d tids=0x%x\n", queue,
+                      mvm->queue_info[queue].tid_bitmap);
 
-    /* If the queue is still enabled - nothing left to do in this func */
-    if (cmd.action == SCD_CFG_ENABLE_QUEUE) { return 0; }
+  /* If the queue is still enabled - nothing left to do in this func */
+  if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
+    return 0;
+  }
 
-    cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
-    cmd.tid = mvm->queue_info[queue].txq_tid;
+  cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+  cmd.tid = mvm->queue_info[queue].txq_tid;
 
-    /* Make sure queue info is correct even though we overwrite it */
-    WARN(mvm->queue_info[queue].tid_bitmap, "TXQ #%d info out-of-sync - tids=0x%x\n", queue,
-         mvm->queue_info[queue].tid_bitmap);
+  /* Make sure queue info is correct even though we overwrite it */
+  WARN(mvm->queue_info[queue].tid_bitmap, "TXQ #%d info out-of-sync - tids=0x%x\n", queue,
+       mvm->queue_info[queue].tid_bitmap);
 
-    /* If we are here - the queue is freed and we can zero out these vals */
-    mvm->queue_info[queue].tid_bitmap = 0;
+  /* If we are here - the queue is freed and we can zero out these vals */
+  mvm->queue_info[queue].tid_bitmap = 0;
 
-    if (sta) {
-        struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_tid(sta, tid);
+  if (sta) {
+    struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_tid(sta, tid);
 
-        mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
-    }
+    mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+  }
 
-    /* Regardless if this is a reserved TXQ for a STA - mark it as false */
-    mvm->queue_info[queue].reserved = false;
+  /* Regardless if this is a reserved TXQ for a STA - mark it as false */
+  mvm->queue_info[queue].reserved = false;
 
-    iwl_trans_txq_disable(mvm->trans, queue, false);
-    ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
+  iwl_trans_txq_disable(mvm->trans, queue, false);
+  ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
 
-    if (ret) { IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret); }
-    return ret;
+  if (ret) {
+    IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret);
+  }
+  return ret;
 }
 
 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm* mvm, int queue) {
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvmsta;
-    unsigned long tid_bitmap;
-    unsigned long agg_tids = 0;
-    uint8_t sta_id;
-    int tid;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvmsta;
+  unsigned long tid_bitmap;
+  unsigned long agg_tids = 0;
+  uint8_t sta_id;
+  int tid;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return -EINVAL; }
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+    return -EINVAL;
+  }
 
-    sta_id = mvm->queue_info[queue].ra_sta_id;
-    tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+  sta_id = mvm->queue_info[queue].ra_sta_id;
+  tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 
-    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
+  sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
 
-    if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { return -EINVAL; }
+  if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+    return -EINVAL;
+  }
 
-    mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
-    spin_lock_bh(&mvmsta->lock);
-    for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
-        if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { agg_tids |= BIT(tid); }
+  spin_lock_bh(&mvmsta->lock);
+  for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+    if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+      agg_tids |= BIT(tid);
     }
-    spin_unlock_bh(&mvmsta->lock);
+  }
+  spin_unlock_bh(&mvmsta->lock);
 
-    return agg_tids;
+  return agg_tids;
 }
 
 /*
@@ -393,164 +428,180 @@
  * doesn't disable the queue
  */
 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm* mvm, int queue) {
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvmsta;
-    unsigned long tid_bitmap;
-    unsigned long disable_agg_tids = 0;
-    uint8_t sta_id;
-    int tid;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvmsta;
+  unsigned long tid_bitmap;
+  unsigned long disable_agg_tids = 0;
+  uint8_t sta_id;
+  int tid;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return -EINVAL; }
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+    return -EINVAL;
+  }
 
-    sta_id = mvm->queue_info[queue].ra_sta_id;
-    tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+  sta_id = mvm->queue_info[queue].ra_sta_id;
+  tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+  sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 
-    if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
-        rcu_read_unlock();
-        return 0;
-    }
-
-    mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
-    spin_lock_bh(&mvmsta->lock);
-    /* Unmap MAC queues and TIDs from this queue */
-    for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
-        struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_tid(sta, tid);
-
-        if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { disable_agg_tids |= BIT(tid); }
-        mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
-
-        mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
-    }
-
-    mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
-    spin_unlock_bh(&mvmsta->lock);
-
+  if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
     rcu_read_unlock();
+    return 0;
+  }
 
-    /*
-     * The TX path may have been using this TXQ_ID from the tid_data,
-     * so make sure it's no longer running so that we can safely reuse
-     * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
-     * above, but nothing guarantees we've stopped using them. Thus,
-     * without this, we could get to iwl_mvm_disable_txq() and remove
-     * the queue while still sending frames to it.
-     */
-    synchronize_net();
+  mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
-    return disable_agg_tids;
+  spin_lock_bh(&mvmsta->lock);
+  /* Unmap MAC queues and TIDs from this queue */
+  for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+    struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_tid(sta, tid);
+
+    if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+      disable_agg_tids |= BIT(tid);
+    }
+    mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+
+    mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+  }
+
+  mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
+  spin_unlock_bh(&mvmsta->lock);
+
+  rcu_read_unlock();
+
+  /*
+   * The TX path may have been using this TXQ_ID from the tid_data,
+   * so make sure it's no longer running so that we can safely reuse
+   * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
+   * above, but nothing guarantees we've stopped using them. Thus,
+   * without this, we could get to iwl_mvm_disable_txq() and remove
+   * the queue while still sending frames to it.
+   */
+  synchronize_net();
+
+  return disable_agg_tids;
 }
 
 static int iwl_mvm_free_inactive_queue(struct iwl_mvm* mvm, int queue,
                                        struct ieee80211_sta* old_sta, uint8_t new_sta_id) {
-    struct iwl_mvm_sta* mvmsta;
-    uint8_t sta_id, tid;
-    unsigned long disable_agg_tids = 0;
-    bool same_sta;
-    int ret;
+  struct iwl_mvm_sta* mvmsta;
+  uint8_t sta_id, tid;
+  unsigned long disable_agg_tids = 0;
+  bool same_sta;
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return -EINVAL; }
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+    return -EINVAL;
+  }
 
-    sta_id = mvm->queue_info[queue].ra_sta_id;
-    tid = mvm->queue_info[queue].txq_tid;
+  sta_id = mvm->queue_info[queue].ra_sta_id;
+  tid = mvm->queue_info[queue].txq_tid;
 
-    same_sta = sta_id == new_sta_id;
+  same_sta = sta_id == new_sta_id;
 
-    same_sta = sta_id == new_sta_id;
+  same_sta = sta_id == new_sta_id;
 
-    mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
-    if (WARN_ON(!mvmsta)) { return -EINVAL; }
+  mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+  if (WARN_ON(!mvmsta)) {
+    return -EINVAL;
+  }
 
-    disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
-    /* Disable the queue */
-    if (disable_agg_tids) { iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); }
+  disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
+  /* Disable the queue */
+  if (disable_agg_tids) {
+    iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false);
+  }
 
-    ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", queue, ret);
+  ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", queue, ret);
 
-        return ret;
-    }
+    return ret;
+  }
 
-    /* If TXQ is allocated to another STA, update removal in FW */
-    if (!same_sta) { iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); }
+  /* If TXQ is allocated to another STA, update removal in FW */
+  if (!same_sta) {
+    iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
+  }
 
-    return 0;
+  return 0;
 }
 
 static int iwl_mvm_get_shared_queue(struct iwl_mvm* mvm, unsigned long tfd_queue_mask, uint8_t ac) {
-    int queue = 0;
-    uint8_t ac_to_queue[IEEE80211_NUM_ACS];
-    int i;
+  int queue = 0;
+  uint8_t ac_to_queue[IEEE80211_NUM_ACS];
+  int i;
 
-    /*
-     * This protects us against grabbing a queue that's being reconfigured
-     * by the inactivity checker.
-     */
-    lockdep_assert_held(&mvm->mutex);
+  /*
+   * This protects us against grabbing a queue that's being reconfigured
+   * by the inactivity checker.
+   */
+  lockdep_assert_held(&mvm->mutex);
 
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return -EINVAL; }
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+    return -EINVAL;
+  }
 
-    memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
+  memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
 
-    /* See what ACs the existing queues for this STA have */
-    for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
-        /* Only DATA queues can be shared */
-        if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) { continue; }
-
-        ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
+  /* See what ACs the existing queues for this STA have */
+  for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
+    /* Only DATA queues can be shared */
+    if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) {
+      continue;
     }
 
-    /*
-     * The queue to share is chosen only from DATA queues as follows (in
-     * descending priority):
-     * 1. An AC_BE queue
-     * 2. Same AC queue
-     * 3. Highest AC queue that is lower than new AC
-     * 4. Any existing AC (there always is at least 1 DATA queue)
-     */
+    ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
+  }
 
-    /* Priority 1: An AC_BE queue */
-    if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) {
-        queue = ac_to_queue[IEEE80211_AC_BE];
-    }
-    /* Priority 2: Same AC queue */
-    else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) {
-        queue = ac_to_queue[ac];
-    }
-    /* Priority 3a: If new AC is VO and VI exists - use VI */
-    else if (ac == IEEE80211_AC_VO && ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) {
-        queue = ac_to_queue[IEEE80211_AC_VI];
-    }
-    /* Priority 3b: No BE so only AC less than the new one is BK */
-    else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) {
-        queue = ac_to_queue[IEEE80211_AC_BK];
-    }
-    /* Priority 4a: No BE nor BK - use VI if exists */
-    else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) {
-        queue = ac_to_queue[IEEE80211_AC_VI];
-    }
-    /* Priority 4b: No BE, BK nor VI - use VO if exists */
-    else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) {
-        queue = ac_to_queue[IEEE80211_AC_VO];
-    }
+  /*
+   * The queue to share is chosen only from DATA queues as follows (in
+   * descending priority):
+   * 1. An AC_BE queue
+   * 2. Same AC queue
+   * 3. Highest AC queue that is lower than new AC
+   * 4. Any existing AC (there always is at least 1 DATA queue)
+   */
 
-    /* Make sure queue found (or not) is legal */
-    if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
-        (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
-        IWL_ERR(mvm, "No DATA queues available to share\n");
-        return -ENOSPC;
-    }
+  /* Priority 1: An AC_BE queue */
+  if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) {
+    queue = ac_to_queue[IEEE80211_AC_BE];
+  }
+  /* Priority 2: Same AC queue */
+  else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) {
+    queue = ac_to_queue[ac];
+  }
+  /* Priority 3a: If new AC is VO and VI exists - use VI */
+  else if (ac == IEEE80211_AC_VO && ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) {
+    queue = ac_to_queue[IEEE80211_AC_VI];
+  }
+  /* Priority 3b: No BE so only AC less than the new one is BK */
+  else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) {
+    queue = ac_to_queue[IEEE80211_AC_BK];
+  }
+  /* Priority 4a: No BE nor BK - use VI if exists */
+  else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) {
+    queue = ac_to_queue[IEEE80211_AC_VI];
+  }
+  /* Priority 4b: No BE, BK nor VI - use VO if exists */
+  else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) {
+    queue = ac_to_queue[IEEE80211_AC_VO];
+  }
 
-    return queue;
+  /* Make sure queue found (or not) is legal */
+  if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
+      (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
+    IWL_ERR(mvm, "No DATA queues available to share\n");
+    return -ENOSPC;
+  }
+
+  return queue;
 }
 
 /*
@@ -561,314 +612,340 @@
  */
 static int iwl_mvm_redirect_queue(struct iwl_mvm* mvm, int queue, int tid, int ac, int ssn,
                                   unsigned int wdg_timeout, bool force, struct iwl_mvm_txq* txq) {
-    struct iwl_scd_txq_cfg_cmd cmd = {
-        .scd_queue = queue,
-        .action = SCD_CFG_DISABLE_QUEUE,
-    };
-    bool shared_queue;
-    int ret;
+  struct iwl_scd_txq_cfg_cmd cmd = {
+      .scd_queue = queue,
+      .action = SCD_CFG_DISABLE_QUEUE,
+  };
+  bool shared_queue;
+  int ret;
 
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return -EINVAL; }
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+    return -EINVAL;
+  }
 
-    /*
-     * If the AC is lower than current one - FIFO needs to be redirected to
-     * the lowest one of the streams in the queue. Check if this is needed
-     * here.
-     * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
-     * value 3 and VO with value 0, so to check if ac X is lower than ac Y
-     * we need to check if the numerical value of X is LARGER than of Y.
-     */
-    if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
-        IWL_DEBUG_TX_QUEUES(mvm, "No redirection needed on TXQ #%d\n", queue);
-        return 0;
-    }
+  /*
+   * If the AC is lower than current one - FIFO needs to be redirected to
+   * the lowest one of the streams in the queue. Check if this is needed
+   * here.
+   * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
+   * value 3 and VO with value 0, so to check if ac X is lower than ac Y
+   * we need to check if the numerical value of X is LARGER than of Y.
+   */
+  if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
+    IWL_DEBUG_TX_QUEUES(mvm, "No redirection needed on TXQ #%d\n", queue);
+    return 0;
+  }
 
-    cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
-    cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
-    cmd.tid = mvm->queue_info[queue].txq_tid;
-    shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
+  cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+  cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
+  cmd.tid = mvm->queue_info[queue].txq_tid;
+  shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
 
-    IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", queue, iwl_mvm_ac_to_tx_fifo[ac]);
+  IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", queue, iwl_mvm_ac_to_tx_fifo[ac]);
 
-    /* Stop the queue and wait for it to empty */
-    txq->stopped = true;
+  /* Stop the queue and wait for it to empty */
+  txq->stopped = true;
 
-    ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
-    if (ret) {
-        IWL_ERR(mvm, "Error draining queue %d before reconfig\n", queue);
-        ret = -EIO;
-        goto out;
-    }
+  ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
+  if (ret) {
+    IWL_ERR(mvm, "Error draining queue %d before reconfig\n", queue);
+    ret = -EIO;
+    goto out;
+  }
 
-    /* Before redirecting the queue we need to de-activate it */
-    iwl_trans_txq_disable(mvm->trans, queue, false);
-    ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
-    if (ret) { IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, ret); }
+  /* Before redirecting the queue we need to de-activate it */
+  iwl_trans_txq_disable(mvm->trans, queue, false);
+  ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, ret);
+  }
 
-    /* Make sure the SCD wrptr is correctly set before reconfiguring */
-    iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
+  /* Make sure the SCD wrptr is correctly set before reconfiguring */
+  iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
 
-    /* Update the TID "owner" of the queue */
-    mvm->queue_info[queue].txq_tid = tid;
+  /* Update the TID "owner" of the queue */
+  mvm->queue_info[queue].txq_tid = tid;
 
-    /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
+  /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
 
-    /* Redirect to lower AC */
-    iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], cmd.sta_id, tid, IWL_FRAME_LIMIT,
-                         ssn);
+  /* Redirect to lower AC */
+  iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], cmd.sta_id, tid, IWL_FRAME_LIMIT,
+                       ssn);
 
-    /* Update AC marking of the queue */
-    mvm->queue_info[queue].mac80211_ac = ac;
+  /* Update AC marking of the queue */
+  mvm->queue_info[queue].mac80211_ac = ac;
 
-    /*
-     * Mark queue as shared in transport if shared
-     * Note this has to be done after queue enablement because enablement
-     * can also set this value, and there is no indication there to shared
-     * queues
-     */
-    if (shared_queue) { iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); }
+  /*
+   * Mark queue as shared in transport if shared
+   * Note this has to be done after queue enablement because enablement
+   * can also set this value, and there is no indication there to shared
+   * queues
+   */
+  if (shared_queue) {
+    iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+  }
 
 out:
-    /* Continue using the queue */
-    txq->stopped = false;
+  /* Continue using the queue */
+  txq->stopped = false;
 
-    return ret;
+  return ret;
 }
 
 static int iwl_mvm_find_free_queue(struct iwl_mvm* mvm, uint8_t sta_id, uint8_t minq,
                                    uint8_t maxq) {
-    int i;
+  int i;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* This should not be hit with new TX path */
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return -ENOSPC; }
-
-    /* Start by looking for a free queue */
-    for (i = minq; i <= maxq; i++)
-        if (mvm->queue_info[i].tid_bitmap == 0 && mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) {
-            return i;
-        }
-
+  /* This should not be hit with new TX path */
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
     return -ENOSPC;
+  }
+
+  /* Start by looking for a free queue */
+  for (i = minq; i <= maxq; i++)
+    if (mvm->queue_info[i].tid_bitmap == 0 && mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) {
+      return i;
+    }
+
+  return -ENOSPC;
 }
 
 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm* mvm, uint8_t sta_id, uint8_t tid,
                                    unsigned int timeout) {
-    int queue, size = IWL_DEFAULT_QUEUE_SIZE;
+  int queue, size = IWL_DEFAULT_QUEUE_SIZE;
 
-    if (tid == IWL_MAX_TID_COUNT) {
-        tid = IWL_MGMT_TID;
-        size = IWL_MGMT_QUEUE_SIZE;
-    }
-    queue = iwl_trans_txq_alloc(mvm->trans, cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), sta_id, tid,
-                                SCD_QUEUE_CFG, size, timeout);
+  if (tid == IWL_MAX_TID_COUNT) {
+    tid = IWL_MGMT_TID;
+    size = IWL_MGMT_QUEUE_SIZE;
+  }
+  queue = iwl_trans_txq_alloc(mvm->trans, cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), sta_id, tid,
+                              SCD_QUEUE_CFG, size, timeout);
 
-    if (queue < 0) {
-        IWL_DEBUG_TX_QUEUES(mvm, "Failed allocating TXQ for sta %d tid %d, ret: %d\n", sta_id, tid,
-                            queue);
-        return queue;
-    }
-
-    IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", queue, sta_id, tid);
-
-    IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
-
+  if (queue < 0) {
+    IWL_DEBUG_TX_QUEUES(mvm, "Failed allocating TXQ for sta %d tid %d, ret: %d\n", sta_id, tid,
+                        queue);
     return queue;
+  }
+
+  IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", queue, sta_id, tid);
+
+  IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
+
+  return queue;
 }
 
 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm* mvm, struct ieee80211_sta* sta, uint8_t ac,
                                         int tid) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_tid(sta, tid);
-    unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
-    int queue = -1;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_tid(sta, tid);
+  unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+  int queue = -1;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue for sta %d on tid %d\n", mvmsta->sta_id, tid);
-    queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
-    if (queue < 0) { return queue; }
+  IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue for sta %d on tid %d\n", mvmsta->sta_id, tid);
+  queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
+  if (queue < 0) {
+    return queue;
+  }
 
-    if (sta) {
-        mvmtxq->txq_id = queue;
-        mvm->tvqm_info[queue].txq_tid = tid;
-    }
+  if (sta) {
+    mvmtxq->txq_id = queue;
+    mvm->tvqm_info[queue].txq_tid = tid;
+  }
 
-    IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
+  IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
 
-    spin_lock_bh(&mvmsta->lock);
-    mvmsta->tid_data[tid].txq_id = queue;
-    spin_unlock_bh(&mvmsta->lock);
+  spin_lock_bh(&mvmsta->lock);
+  mvmsta->tid_data[tid].txq_id = queue;
+  spin_unlock_bh(&mvmsta->lock);
 
-    return 0;
+  return 0;
 }
 
 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int queue,
                                        uint8_t sta_id, uint8_t tid) {
-    bool enable_queue = true;
+  bool enable_queue = true;
 
-    /* Make sure this TID isn't already enabled */
-    if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
-        IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", queue, tid);
-        return false;
+  /* Make sure this TID isn't already enabled */
+  if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
+    IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", queue, tid);
+    return false;
+  }
+
+  /* Update mappings and refcounts */
+  if (mvm->queue_info[queue].tid_bitmap) {
+    enable_queue = false;
+  }
+
+  mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+  mvm->queue_info[queue].ra_sta_id = sta_id;
+
+  if (enable_queue) {
+    if (tid != IWL_MAX_TID_COUNT) {
+      mvm->queue_info[queue].mac80211_ac = tid_to_mac80211_ac[tid];
+    } else {
+      mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
     }
 
-    /* Update mappings and refcounts */
-    if (mvm->queue_info[queue].tid_bitmap) { enable_queue = false; }
+    mvm->queue_info[queue].txq_tid = tid;
+  }
 
-    mvm->queue_info[queue].tid_bitmap |= BIT(tid);
-    mvm->queue_info[queue].ra_sta_id = sta_id;
+  if (sta) {
+    struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_tid(sta, tid);
 
-    if (enable_queue) {
-        if (tid != IWL_MAX_TID_COUNT) {
-            mvm->queue_info[queue].mac80211_ac = tid_to_mac80211_ac[tid];
-        } else {
-            mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
-        }
+    mvmtxq->txq_id = queue;
+  }
 
-        mvm->queue_info[queue].txq_tid = tid;
-    }
+  IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d tids=0x%x\n", queue,
+                      mvm->queue_info[queue].tid_bitmap);
 
-    if (sta) {
-        struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_tid(sta, tid);
-
-        mvmtxq->txq_id = queue;
-    }
-
-    IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d tids=0x%x\n", queue,
-                        mvm->queue_info[queue].tid_bitmap);
-
-    return enable_queue;
+  return enable_queue;
 }
 
 static bool iwl_mvm_enable_txq(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int queue,
                                uint16_t ssn, const struct iwl_trans_txq_scd_cfg* cfg,
                                unsigned int wdg_timeout) {
-    struct iwl_scd_txq_cfg_cmd cmd = {
-        .scd_queue = queue,
-        .action = SCD_CFG_ENABLE_QUEUE,
-        .window = cfg->frame_limit,
-        .sta_id = cfg->sta_id,
-        .ssn = cpu_to_le16(ssn),
-        .tx_fifo = cfg->fifo,
-        .aggregate = cfg->aggregate,
-        .tid = cfg->tid,
-    };
-    bool inc_ssn;
+  struct iwl_scd_txq_cfg_cmd cmd = {
+      .scd_queue = queue,
+      .action = SCD_CFG_ENABLE_QUEUE,
+      .window = cfg->frame_limit,
+      .sta_id = cfg->sta_id,
+      .ssn = cpu_to_le16(ssn),
+      .tx_fifo = cfg->fifo,
+      .aggregate = cfg->aggregate,
+      .tid = cfg->tid,
+  };
+  bool inc_ssn;
 
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return false; }
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+    return false;
+  }
 
-    /* Send the enabling command if we need to */
-    if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) { return false; }
+  /* Send the enabling command if we need to */
+  if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) {
+    return false;
+  }
 
-    inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
-    if (inc_ssn) { le16_add_cpu(&cmd.ssn, 1); }
+  inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
+  if (inc_ssn) {
+    le16_add_cpu(&cmd.ssn, 1);
+  }
 
-    WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
-         "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+  WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+       "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
 
-    return inc_ssn;
+  return inc_ssn;
 }
 
 static void iwl_mvm_change_queue_tid(struct iwl_mvm* mvm, int queue) {
-    struct iwl_scd_txq_cfg_cmd cmd = {
-        .scd_queue = queue,
-        .action = SCD_CFG_UPDATE_QUEUE_TID,
-    };
-    int tid;
-    unsigned long tid_bitmap;
-    int ret;
+  struct iwl_scd_txq_cfg_cmd cmd = {
+      .scd_queue = queue,
+      .action = SCD_CFG_UPDATE_QUEUE_TID,
+  };
+  int tid;
+  unsigned long tid_bitmap;
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return; }
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+    return;
+  }
 
-    tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+  tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 
-    if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) { return; }
+  if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) {
+    return;
+  }
 
-    /* Find any TID for queue */
-    tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
-    cmd.tid = tid;
-    cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+  /* Find any TID for queue */
+  tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+  cmd.tid = tid;
+  cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", queue, ret);
-        return;
-    }
+  ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", queue, ret);
+    return;
+  }
 
-    mvm->queue_info[queue].txq_tid = tid;
-    IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", queue, tid);
+  mvm->queue_info[queue].txq_tid = tid;
+  IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", queue, tid);
 }
 
 static void iwl_mvm_unshare_queue(struct iwl_mvm* mvm, int queue) {
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvmsta;
-    uint8_t sta_id;
-    int tid = -1;
-    unsigned long tid_bitmap;
-    unsigned int wdg_timeout;
-    int ssn;
-    int ret = true;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvmsta;
+  uint8_t sta_id;
+  int tid = -1;
+  unsigned long tid_bitmap;
+  unsigned int wdg_timeout;
+  int ssn;
+  int ret = true;
 
-    /* queue sharing is disabled on new TX path */
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return; }
+  /* queue sharing is disabled on new TX path */
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+    return;
+  }
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    sta_id = mvm->queue_info[queue].ra_sta_id;
-    tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+  sta_id = mvm->queue_info[queue].ra_sta_id;
+  tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 
-    /* Find TID for queue, and make sure it is the only one on the queue */
-    tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
-    if (tid_bitmap != BIT(tid)) {
-        IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", queue, tid_bitmap);
-        return;
+  /* Find TID for queue, and make sure it is the only one on the queue */
+  tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+  if (tid_bitmap != BIT(tid)) {
+    IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", queue, tid_bitmap);
+    return;
+  }
+
+  IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, tid);
+
+  sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
+
+  if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+    return;
+  }
+
+  mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+
+  ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+
+  ret = iwl_mvm_redirect_queue(mvm, queue, tid, tid_to_mac80211_ac[tid], ssn, wdg_timeout, true,
+                               iwl_mvm_txq_from_tid(sta, tid));
+  if (ret) {
+    IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
+    return;
+  }
+
+  /* If aggs should be turned back on - do it */
+  if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+    struct iwl_mvm_add_sta_cmd cmd = {0};
+
+    mvmsta->tid_disable_agg &= ~BIT(tid);
+
+    cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+    cmd.sta_id = mvmsta->sta_id;
+    cmd.add_modify = STA_MODE_MODIFY;
+    cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+    cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+    cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+    ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+    if (!ret) {
+      IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d is now aggregated again\n", queue);
+
+      /* Mark queue intenally as aggregating again */
+      iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
     }
+  }
 
-    IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, tid);
-
-    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
-
-    if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { return; }
-
-    mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
-
-    ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
-
-    ret = iwl_mvm_redirect_queue(mvm, queue, tid, tid_to_mac80211_ac[tid], ssn, wdg_timeout, true,
-                                 iwl_mvm_txq_from_tid(sta, tid));
-    if (ret) {
-        IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
-        return;
-    }
-
-    /* If aggs should be turned back on - do it */
-    if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
-        struct iwl_mvm_add_sta_cmd cmd = {0};
-
-        mvmsta->tid_disable_agg &= ~BIT(tid);
-
-        cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
-        cmd.sta_id = mvmsta->sta_id;
-        cmd.add_modify = STA_MODE_MODIFY;
-        cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
-        cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
-        cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
-
-        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd);
-        if (!ret) {
-            IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d is now aggregated again\n", queue);
-
-            /* Mark queue intenally as aggregating again */
-            iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
-        }
-    }
-
-    mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+  mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
 }
 
 /*
@@ -881,75 +958,81 @@
 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta, int queue,
                                          unsigned long tid_bitmap, unsigned long* unshare_queues,
                                          unsigned long* changetid_queues) {
-    int tid;
+  int tid;
 
-    lockdep_assert_held(&mvmsta->lock);
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvmsta->lock);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return false; }
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+    return false;
+  }
 
-    /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
-    for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
-        /* If some TFDs are still queued - don't mark TID as inactive */
-        if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) { tid_bitmap &= ~BIT(tid); }
-
-        /* Don't mark as inactive any TID that has an active BA */
-        if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { tid_bitmap &= ~BIT(tid); }
+  /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
+  for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+    /* If some TFDs are still queued - don't mark TID as inactive */
+    if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) {
+      tid_bitmap &= ~BIT(tid);
     }
 
-    /* If all TIDs in the queue are inactive - return it can be reused */
-    if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
-        IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
-        return true;
+    /* Don't mark as inactive any TID that has an active BA */
+    if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+      tid_bitmap &= ~BIT(tid);
     }
+  }
 
-    /*
-     * If we are here, this is a shared queue and not all TIDs timed-out.
-     * Remove the ones that did.
-     */
-    for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
-        uint16_t tid_bitmap;
+  /* If all TIDs in the queue are inactive - return it can be reused */
+  if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
+    IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
+    return true;
+  }
 
-        mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
-        mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+  /*
+   * If we are here, this is a shared queue and not all TIDs timed-out.
+   * Remove the ones that did.
+   */
+  for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+    uint16_t tid_bitmap;
 
-        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+    mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+    mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
 
-        /*
-         * We need to take into account a situation in which a TXQ was
-         * allocated to TID x, and then turned shared by adding TIDs y
-         * and z. If TID x becomes inactive and is removed from the TXQ,
-         * ownership must be given to one of the remaining TIDs.
-         * This is mainly because if TID x continues - a new queue can't
-         * be allocated for it as long as it is an owner of another TXQ.
-         *
-         * Mark this queue in the right bitmap, we'll send the command
-         * to the firmware later.
-         */
-        if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) {
-            set_bit(queue, changetid_queues);
-        }
-
-        IWL_DEBUG_TX_QUEUES(mvm, "Removing inactive TID %d from shared Q:%d\n", tid, queue);
-    }
-
-    IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d left with tid bitmap 0x%x\n", queue,
-                        mvm->queue_info[queue].tid_bitmap);
-
-    /*
-     * There may be different TIDs with the same mac queues, so make
-     * sure all TIDs have existing corresponding mac queues enabled
-     */
     tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 
-    /* If the queue is marked as shared - "unshare" it */
-    if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
-        mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
-        IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", queue);
-        set_bit(queue, unshare_queues);
+    /*
+     * We need to take into account a situation in which a TXQ was
+     * allocated to TID x, and then turned shared by adding TIDs y
+     * and z. If TID x becomes inactive and is removed from the TXQ,
+     * ownership must be given to one of the remaining TIDs.
+     * This is mainly because if TID x continues - a new queue can't
+     * be allocated for it as long as it is an owner of another TXQ.
+     *
+     * Mark this queue in the right bitmap, we'll send the command
+     * to the firmware later.
+     */
+    if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) {
+      set_bit(queue, changetid_queues);
     }
 
-    return false;
+    IWL_DEBUG_TX_QUEUES(mvm, "Removing inactive TID %d from shared Q:%d\n", tid, queue);
+  }
+
+  IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d left with tid bitmap 0x%x\n", queue,
+                      mvm->queue_info[queue].tid_bitmap);
+
+  /*
+   * There may be different TIDs with the same mac queues, so make
+   * sure all TIDs have existing corresponding mac queues enabled
+   */
+  tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+
+  /* If the queue is marked as shared - "unshare" it */
+  if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
+      mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+    IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", queue);
+    set_bit(queue, unshare_queues);
+  }
+
+  return false;
 }
 
 /*
@@ -962,311 +1045,338 @@
  * Returns the queue number, or -ENOSPC.
  */
 static int iwl_mvm_inactivity_check(struct iwl_mvm* mvm, uint8_t alloc_for_sta) {
-    unsigned long now = jiffies;
-    unsigned long unshare_queues = 0;
-    unsigned long changetid_queues = 0;
-    int i, ret, free_queue = -ENOSPC;
-    struct ieee80211_sta* queue_owner = NULL;
+  unsigned long now = jiffies;
+  unsigned long unshare_queues = 0;
+  unsigned long changetid_queues = 0;
+  int i, ret, free_queue = -ENOSPC;
+  struct ieee80211_sta* queue_owner = NULL;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (iwl_mvm_has_new_tx_api(mvm)) { return -ENOSPC; }
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    return -ENOSPC;
+  }
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    /* we skip the CMD queue below by starting at 1 */
-    BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
+  /* we skip the CMD queue below by starting at 1 */
+  BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
 
-    for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
-        struct ieee80211_sta* sta;
-        struct iwl_mvm_sta* mvmsta;
-        uint8_t sta_id;
-        int tid;
-        unsigned long inactive_tid_bitmap = 0;
-        unsigned long queue_tid_bitmap;
+  for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
+    struct ieee80211_sta* sta;
+    struct iwl_mvm_sta* mvmsta;
+    uint8_t sta_id;
+    int tid;
+    unsigned long inactive_tid_bitmap = 0;
+    unsigned long queue_tid_bitmap;
 
-        queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
-        if (!queue_tid_bitmap) { continue; }
-
-        /* If TXQ isn't in active use anyway - nothing to do here... */
-        if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
-            mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
-            continue;
-        }
-
-        /* Check to see if there are inactive TIDs on this queue */
-        for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
-            if (time_after(mvm->queue_info[i].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT,
-                           now)) {
-                continue;
-            }
-
-            inactive_tid_bitmap |= BIT(tid);
-        }
-
-        /* If all TIDs are active - finish check on this queue */
-        if (!inactive_tid_bitmap) { continue; }
-
-        /*
-         * If we are here - the queue hadn't been served recently and is
-         * in use
-         */
-
-        sta_id = mvm->queue_info[i].ra_sta_id;
-        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-
-        /*
-         * If the STA doesn't exist anymore, it isn't an error. It could
-         * be that it was removed since getting the queues, and in this
-         * case it should've inactivated its queues anyway.
-         */
-        if (IS_ERR_OR_NULL(sta)) { continue; }
-
-        mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
-        spin_lock_bh(&mvmsta->lock);
-        ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, inactive_tid_bitmap, &unshare_queues,
-                                           &changetid_queues);
-        if (ret >= 0 && free_queue < 0) {
-            queue_owner = sta;
-            free_queue = ret;
-        }
-        /* only unlock sta lock - we still need the queue info lock */
-        spin_unlock_bh(&mvmsta->lock);
+    queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
+    if (!queue_tid_bitmap) {
+      continue;
     }
 
-    /* Reconfigure queues requiring reconfiguation */
-    for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) iwl_mvm_unshare_queue(mvm, i);
-    for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i);
-
-    if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
-        ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta);
-        if (ret) {
-            rcu_read_unlock();
-            return ret;
-        }
+    /* If TXQ isn't in active use anyway - nothing to do here... */
+    if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
+        mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
+      continue;
     }
 
-    rcu_read_unlock();
+    /* Check to see if there are inactive TIDs on this queue */
+    for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+      if (time_after(mvm->queue_info[i].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) {
+        continue;
+      }
 
-    return free_queue;
+      inactive_tid_bitmap |= BIT(tid);
+    }
+
+    /* If all TIDs are active - finish check on this queue */
+    if (!inactive_tid_bitmap) {
+      continue;
+    }
+
+    /*
+     * If we are here - the queue hadn't been served recently and is
+     * in use
+     */
+
+    sta_id = mvm->queue_info[i].ra_sta_id;
+    sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+    /*
+     * If the STA doesn't exist anymore, it isn't an error. It could
+     * be that it was removed since getting the queues, and in this
+     * case it should've inactivated its queues anyway.
+     */
+    if (IS_ERR_OR_NULL(sta)) {
+      continue;
+    }
+
+    mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+    spin_lock_bh(&mvmsta->lock);
+    ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, inactive_tid_bitmap, &unshare_queues,
+                                       &changetid_queues);
+    if (ret >= 0 && free_queue < 0) {
+      queue_owner = sta;
+      free_queue = ret;
+    }
+    /* only unlock sta lock - we still need the queue info lock */
+    spin_unlock_bh(&mvmsta->lock);
+  }
+
+  /* Reconfigure queues requiring reconfiguation */
+  for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) iwl_mvm_unshare_queue(mvm, i);
+  for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i);
+
+  if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
+    ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta);
+    if (ret) {
+      rcu_read_unlock();
+      return ret;
+    }
+  }
+
+  rcu_read_unlock();
+
+  return free_queue;
 }
 
 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm* mvm, struct ieee80211_sta* sta, uint8_t ac,
                                    int tid) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_trans_txq_scd_cfg cfg = {
-        .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
-        .sta_id = mvmsta->sta_id,
-        .tid = tid,
-        .frame_limit = IWL_FRAME_LIMIT,
-    };
-    unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
-    int queue = -1;
-    unsigned long disable_agg_tids = 0;
-    enum iwl_mvm_agg_state queue_state;
-    bool shared_queue = false, inc_ssn;
-    int ssn;
-    unsigned long tfd_queue_mask;
-    int ret;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_trans_txq_scd_cfg cfg = {
+      .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
+      .sta_id = mvmsta->sta_id,
+      .tid = tid,
+      .frame_limit = IWL_FRAME_LIMIT,
+  };
+  unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+  int queue = -1;
+  unsigned long disable_agg_tids = 0;
+  enum iwl_mvm_agg_state queue_state;
+  bool shared_queue = false, inc_ssn;
+  int ssn;
+  unsigned long tfd_queue_mask;
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (iwl_mvm_has_new_tx_api(mvm)) { return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); }
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+  }
 
-    spin_lock_bh(&mvmsta->lock);
-    tfd_queue_mask = mvmsta->tfd_queue_msk;
-    ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
-    spin_unlock_bh(&mvmsta->lock);
+  spin_lock_bh(&mvmsta->lock);
+  tfd_queue_mask = mvmsta->tfd_queue_msk;
+  ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+  spin_unlock_bh(&mvmsta->lock);
 
-    if (tid == IWL_MAX_TID_COUNT) {
-        queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_MGMT_QUEUE,
-                                        IWL_MVM_DQA_MAX_MGMT_QUEUE);
-        if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) {
-            IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", queue);
-        }
-
-        /* If no such queue is found, we'll use a DATA queue instead */
+  if (tid == IWL_MAX_TID_COUNT) {
+    queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_MGMT_QUEUE,
+                                    IWL_MVM_DQA_MAX_MGMT_QUEUE);
+    if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) {
+      IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", queue);
     }
 
-    if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
-        (mvm->queue_info[mvmsta->reserved_queue].status == IWL_MVM_QUEUE_RESERVED)) {
-        queue = mvmsta->reserved_queue;
-        mvm->queue_info[queue].reserved = true;
-        IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+    /* If no such queue is found, we'll use a DATA queue instead */
+  }
+
+  if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
+      (mvm->queue_info[mvmsta->reserved_queue].status == IWL_MVM_QUEUE_RESERVED)) {
+    queue = mvmsta->reserved_queue;
+    mvm->queue_info[queue].reserved = true;
+    IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+  }
+
+  if (queue < 0)
+    queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE,
+                                    IWL_MVM_DQA_MAX_DATA_QUEUE);
+  if (queue < 0) {
+    /* try harder - perhaps kill an inactive queue */
+    queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
+  }
+
+  /* No free queue - we'll have to share */
+  if (queue <= 0) {
+    queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
+    if (queue > 0) {
+      shared_queue = true;
+      mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
+    }
+  }
+
+  /*
+   * Mark TXQ as ready, even though it hasn't been fully configured yet,
+   * to make sure no one else takes it.
+   * This will allow avoiding re-acquiring the lock at the end of the
+   * configuration. On error we'll mark it back as free.
+   */
+  if (queue > 0 && !shared_queue) {
+    mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+  }
+
+  /* This shouldn't happen - out of queues */
+  if (WARN_ON(queue <= 0)) {
+    IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", tid, cfg.sta_id);
+    return queue;
+  }
+
+  /*
+   * Actual en/disablement of aggregations is through the ADD_STA HCMD,
+   * but for configuring the SCD to send A-MPDUs we need to mark the queue
+   * as aggregatable.
+   * Mark all DATA queues as allowing to be aggregated at some point
+   */
+  cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+  IWL_DEBUG_TX_QUEUES(mvm, "Allocating %squeue #%d to sta %d on tid %d\n",
+                      shared_queue ? "shared " : "", queue, mvmsta->sta_id, tid);
+
+  if (shared_queue) {
+    /* Disable any open aggs on this queue */
+    disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
+
+    if (disable_agg_tids) {
+      IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", queue);
+      iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false);
+    }
+  }
+
+  inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
+
+  /*
+   * Mark queue as shared in transport if shared
+   * Note this has to be done after queue enablement because enablement
+   * can also set this value, and there is no indication there to shared
+   * queues
+   */
+  if (shared_queue) {
+    iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+  }
+
+  spin_lock_bh(&mvmsta->lock);
+  /*
+   * This looks racy, but it is not. We have only one packet for
+   * this ra/tid in our Tx path since we stop the Qdisc when we
+   * need to allocate a new TFD queue.
+   */
+  if (inc_ssn) {
+    mvmsta->tid_data[tid].seq_number += 0x10;
+    ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
+  }
+  mvmsta->tid_data[tid].txq_id = queue;
+  mvmsta->tfd_queue_msk |= BIT(queue);
+  queue_state = mvmsta->tid_data[tid].state;
+
+  if (mvmsta->reserved_queue == queue) {
+    mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
+  }
+  spin_unlock_bh(&mvmsta->lock);
+
+  if (!shared_queue) {
+    ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+    if (ret) {
+      goto out_err;
     }
 
-    if (queue < 0)
-        queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE,
-                                        IWL_MVM_DQA_MAX_DATA_QUEUE);
-    if (queue < 0) {
-        /* try harder - perhaps kill an inactive queue */
-        queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
+    /* If we need to re-enable aggregations... */
+    if (queue_state == IWL_AGG_ON) {
+      ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+      if (ret) {
+        goto out_err;
+      }
     }
-
-    /* No free queue - we'll have to share */
-    if (queue <= 0) {
-        queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
-        if (queue > 0) {
-            shared_queue = true;
-            mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
-        }
+  } else {
+    /* Redirect queue, if needed */
+    ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, wdg_timeout, false,
+                                 iwl_mvm_txq_from_tid(sta, tid));
+    if (ret) {
+      goto out_err;
     }
+  }
 
-    /*
-     * Mark TXQ as ready, even though it hasn't been fully configured yet,
-     * to make sure no one else takes it.
-     * This will allow avoiding re-acquiring the lock at the end of the
-     * configuration. On error we'll mark it back as free.
-     */
-    if (queue > 0 && !shared_queue) { mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; }
-
-    /* This shouldn't happen - out of queues */
-    if (WARN_ON(queue <= 0)) {
-        IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", tid, cfg.sta_id);
-        return queue;
-    }
-
-    /*
-     * Actual en/disablement of aggregations is through the ADD_STA HCMD,
-     * but for configuring the SCD to send A-MPDUs we need to mark the queue
-     * as aggregatable.
-     * Mark all DATA queues as allowing to be aggregated at some point
-     */
-    cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
-
-    IWL_DEBUG_TX_QUEUES(mvm, "Allocating %squeue #%d to sta %d on tid %d\n",
-                        shared_queue ? "shared " : "", queue, mvmsta->sta_id, tid);
-
-    if (shared_queue) {
-        /* Disable any open aggs on this queue */
-        disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
-
-        if (disable_agg_tids) {
-            IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", queue);
-            iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false);
-        }
-    }
-
-    inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
-
-    /*
-     * Mark queue as shared in transport if shared
-     * Note this has to be done after queue enablement because enablement
-     * can also set this value, and there is no indication there to shared
-     * queues
-     */
-    if (shared_queue) { iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); }
-
-    spin_lock_bh(&mvmsta->lock);
-    /*
-     * This looks racy, but it is not. We have only one packet for
-     * this ra/tid in our Tx path since we stop the Qdisc when we
-     * need to allocate a new TFD queue.
-     */
-    if (inc_ssn) {
-        mvmsta->tid_data[tid].seq_number += 0x10;
-        ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
-    }
-    mvmsta->tid_data[tid].txq_id = queue;
-    mvmsta->tfd_queue_msk |= BIT(queue);
-    queue_state = mvmsta->tid_data[tid].state;
-
-    if (mvmsta->reserved_queue == queue) { mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; }
-    spin_unlock_bh(&mvmsta->lock);
-
-    if (!shared_queue) {
-        ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
-        if (ret) { goto out_err; }
-
-        /* If we need to re-enable aggregations... */
-        if (queue_state == IWL_AGG_ON) {
-            ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
-            if (ret) { goto out_err; }
-        }
-    } else {
-        /* Redirect queue, if needed */
-        ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, wdg_timeout, false,
-                                     iwl_mvm_txq_from_tid(sta, tid));
-        if (ret) { goto out_err; }
-    }
-
-    return 0;
+  return 0;
 
 out_err:
-    iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
+  iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
 
-    return ret;
+  return ret;
 }
 
 static inline uint8_t iwl_mvm_tid_to_ac_queue(int tid) {
-    if (tid == IWL_MAX_TID_COUNT) { return IEEE80211_AC_VO; /* MGMT */ }
+  if (tid == IWL_MAX_TID_COUNT) {
+    return IEEE80211_AC_VO; /* MGMT */
+  }
 
-    return tid_to_mac80211_ac[tid];
+  return tid_to_mac80211_ac[tid];
 }
 
 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct* wk) {
-    struct iwl_mvm* mvm = container_of(wk, struct iwl_mvm, add_stream_wk);
+  struct iwl_mvm* mvm = container_of(wk, struct iwl_mvm, add_stream_wk);
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
+  iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
 
-    while (!list_empty(&mvm->add_stream_txqs)) {
-        struct iwl_mvm_txq* mvmtxq;
-        struct ieee80211_txq* txq;
-        uint8_t tid;
+  while (!list_empty(&mvm->add_stream_txqs)) {
+    struct iwl_mvm_txq* mvmtxq;
+    struct ieee80211_txq* txq;
+    uint8_t tid;
 
-        mvmtxq = list_first_entry(&mvm->add_stream_txqs, struct iwl_mvm_txq, list);
+    mvmtxq = list_first_entry(&mvm->add_stream_txqs, struct iwl_mvm_txq, list);
 
-        txq = container_of((void*)mvmtxq, struct ieee80211_txq, drv_priv);
-        tid = txq->tid;
-        if (tid == IEEE80211_NUM_TIDS) { tid = IWL_MAX_TID_COUNT; }
-
-        iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
-        list_del_init(&mvmtxq->list);
-        local_bh_disable();
-        iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
-        local_bh_enable();
+    txq = container_of((void*)mvmtxq, struct ieee80211_txq, drv_priv);
+    tid = txq->tid;
+    if (tid == IEEE80211_NUM_TIDS) {
+      tid = IWL_MAX_TID_COUNT;
     }
 
-    mutex_unlock(&mvm->mutex);
+    iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
+    list_del_init(&mvmtxq->list);
+    local_bh_disable();
+    iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+    local_bh_enable();
+  }
+
+  mutex_unlock(&mvm->mutex);
 }
 
 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm* mvm, struct ieee80211_sta* sta,
                                       enum nl80211_iftype vif_type) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    int queue;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  int queue;
 
-    /* queue reserving is disabled on new TX path */
-    if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { return 0; }
-
-    /* run the general cleanup/unsharing of queues */
-    iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
-
-    /* Make sure we have free resources for this STA */
-    if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
-        !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
-        (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == IWL_MVM_QUEUE_FREE)) {
-        queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
-    } else
-        queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE,
-                                        IWL_MVM_DQA_MAX_DATA_QUEUE);
-    if (queue < 0) {
-        /* try again - this time kick out a queue if needed */
-        queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
-        if (queue < 0) {
-            IWL_ERR(mvm, "No available queues for new station\n");
-            return -ENOSPC;
-        }
-    }
-    mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
-
-    mvmsta->reserved_queue = queue;
-
-    IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", queue, mvmsta->sta_id);
-
+  /* queue reserving is disabled on new TX path */
+  if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
     return 0;
+  }
+
+  /* run the general cleanup/unsharing of queues */
+  iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
+
+  /* Make sure we have free resources for this STA */
+  if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
+      !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
+      (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == IWL_MVM_QUEUE_FREE)) {
+    queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
+  } else
+    queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE,
+                                    IWL_MVM_DQA_MAX_DATA_QUEUE);
+  if (queue < 0) {
+    /* try again - this time kick out a queue if needed */
+    queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
+    if (queue < 0) {
+      IWL_ERR(mvm, "No available queues for new station\n");
+      return -ENOSPC;
+    }
+  }
+  mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
+
+  mvmsta->reserved_queue = queue;
+
+  IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", queue, mvmsta->sta_id);
+
+  return 0;
 }
 
 /*
@@ -1277,253 +1387,275 @@
  * Note that re-enabling aggregations isn't done in this function.
  */
 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm* mvm, struct ieee80211_sta* sta) {
-    struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-    unsigned int wdg = iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
-    int i;
-    struct iwl_trans_txq_scd_cfg cfg = {
-        .sta_id = mvm_sta->sta_id,
-        .frame_limit = IWL_FRAME_LIMIT,
-    };
+  struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+  unsigned int wdg = iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+  int i;
+  struct iwl_trans_txq_scd_cfg cfg = {
+      .sta_id = mvm_sta->sta_id,
+      .frame_limit = IWL_FRAME_LIMIT,
+  };
 
-    /* Make sure reserved queue is still marked as such (if allocated) */
-    if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
-        mvm->queue_info[mvm_sta->reserved_queue].status = IWL_MVM_QUEUE_RESERVED;
+  /* Make sure reserved queue is still marked as such (if allocated) */
+  if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+    mvm->queue_info[mvm_sta->reserved_queue].status = IWL_MVM_QUEUE_RESERVED;
+  }
+
+  for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+    struct iwl_mvm_tid_data* tid_data = &mvm_sta->tid_data[i];
+    int txq_id = tid_data->txq_id;
+    int ac;
+
+    if (txq_id == IWL_MVM_INVALID_QUEUE) {
+      continue;
     }
 
-    for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
-        struct iwl_mvm_tid_data* tid_data = &mvm_sta->tid_data[i];
-        int txq_id = tid_data->txq_id;
-        int ac;
+    ac = tid_to_mac80211_ac[i];
 
-        if (txq_id == IWL_MVM_INVALID_QUEUE) { continue; }
+    if (iwl_mvm_has_new_tx_api(mvm)) {
+      IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d\n", mvm_sta->sta_id, i);
+      txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, i, wdg);
+      tid_data->txq_id = txq_id;
 
-        ac = tid_to_mac80211_ac[i];
+      /*
+       * Since we don't set the seq number after reset, and HW
+       * sets it now, FW reset will cause the seq num to start
+       * at 0 again, so driver will need to update it
+       * internally as well, so it keeps in sync with real val
+       */
+      tid_data->seq_number = 0;
+    } else {
+      uint16_t seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 
-        if (iwl_mvm_has_new_tx_api(mvm)) {
-            IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d\n", mvm_sta->sta_id, i);
-            txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, i, wdg);
-            tid_data->txq_id = txq_id;
+      cfg.tid = i;
+      cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
+      cfg.aggregate =
+          (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
 
-            /*
-             * Since we don't set the seq number after reset, and HW
-             * sets it now, FW reset will cause the seq num to start
-             * at 0 again, so driver will need to update it
-             * internally as well, so it keeps in sync with real val
-             */
-            tid_data->seq_number = 0;
-        } else {
-            uint16_t seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+      IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d to queue %d\n", mvm_sta->sta_id, i,
+                          txq_id);
 
-            cfg.tid = i;
-            cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
-            cfg.aggregate =
-                (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
-
-            IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d to queue %d\n", mvm_sta->sta_id, i,
-                                txq_id);
-
-            iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
-            mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
-        }
+      iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
+      mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
     }
+  }
 }
 
 static int iwl_mvm_add_int_sta_common(struct iwl_mvm* mvm, struct iwl_mvm_int_sta* sta,
                                       const uint8_t* addr, uint16_t mac_id, uint16_t color) {
-    struct iwl_mvm_add_sta_cmd cmd;
-    int ret;
-    uint32_t status = ADD_STA_SUCCESS;
+  struct iwl_mvm_add_sta_cmd cmd;
+  int ret;
+  uint32_t status = ADD_STA_SUCCESS;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    memset(&cmd, 0, sizeof(cmd));
-    cmd.sta_id = sta->sta_id;
-    cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, color));
-    if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
-        cmd.station_type = sta->type;
-    }
+  memset(&cmd, 0, sizeof(cmd));
+  cmd.sta_id = sta->sta_id;
+  cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, color));
+  if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+    cmd.station_type = sta->type;
+  }
 
-    if (!iwl_mvm_has_new_tx_api(mvm)) { cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); }
-    cmd.tid_disable_tx = cpu_to_le16(0xffff);
+  if (!iwl_mvm_has_new_tx_api(mvm)) {
+    cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+  }
+  cmd.tid_disable_tx = cpu_to_le16(0xffff);
 
-    if (addr) { memcpy(cmd.addr, addr, ETH_ALEN); }
+  if (addr) {
+    memcpy(cmd.addr, addr, ETH_ALEN);
+  }
 
-    ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status);
-    if (ret) { return ret; }
-
-    switch (status & IWL_ADD_STA_STATUS_MASK) {
-    case ADD_STA_SUCCESS:
-        IWL_DEBUG_INFO(mvm, "Internal station added.\n");
-        return 0;
-    default:
-        ret = -EIO;
-        IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", status);
-        break;
-    }
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status);
+  if (ret) {
     return ret;
+  }
+
+  switch (status & IWL_ADD_STA_STATUS_MASK) {
+    case ADD_STA_SUCCESS:
+      IWL_DEBUG_INFO(mvm, "Internal station added.\n");
+      return 0;
+    default:
+      ret = -EIO;
+      IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", status);
+      break;
+  }
+  return ret;
 }
 
 int iwl_mvm_add_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif, struct ieee80211_sta* sta) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_rxq_dup_data* dup_data;
-    int i, ret, sta_id;
-    bool sta_update = false;
-    unsigned int sta_flags = 0;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_rxq_dup_data* dup_data;
+  int i, ret, sta_id;
+  bool sta_update = false;
+  unsigned int sta_flags = 0;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
-        sta_id = iwl_mvm_find_free_sta_id(mvm, ieee80211_vif_type_p2p(vif));
-    } else {
-        sta_id = mvm_sta->sta_id;
-    }
+  if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+    sta_id = iwl_mvm_find_free_sta_id(mvm, ieee80211_vif_type_p2p(vif));
+  } else {
+    sta_id = mvm_sta->sta_id;
+  }
 
-    if (sta_id == IWL_MVM_INVALID_STA) { return -ENOSPC; }
+  if (sta_id == IWL_MVM_INVALID_STA) {
+    return -ENOSPC;
+  }
 
-    spin_lock_init(&mvm_sta->lock);
+  spin_lock_init(&mvm_sta->lock);
 
-    /* if this is a HW restart re-alloc existing queues */
-    if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
-        struct iwl_mvm_int_sta tmp_sta = {
-            .sta_id = sta_id,
-            .type = mvm_sta->sta_type,
-        };
-
-        /*
-         * First add an empty station since allocating
-         * a queue requires a valid station
-         */
-        ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, mvmvif->id, mvmvif->color);
-        if (ret) { goto err; }
-
-        iwl_mvm_realloc_queues_after_restart(mvm, sta);
-        sta_update = true;
-        sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
-        goto update_fw;
-    }
-
-    mvm_sta->sta_id = sta_id;
-    mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
-    mvm_sta->vif = vif;
-    if (!mvm->trans->cfg->gen2) {
-        mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
-    } else {
-        mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
-    }
-    mvm_sta->tx_protection = 0;
-    mvm_sta->tt_tx_protection = false;
-    mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
-
-    /* HW restart, don't assume the memory has been zeroed */
-    mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
-    mvm_sta->tfd_queue_msk = 0;
-
-    /* for HW restart - reset everything but the sequence number */
-    for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
-        uint16_t seq = mvm_sta->tid_data[i].seq_number;
-        memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
-        mvm_sta->tid_data[i].seq_number = seq;
-
-        /*
-         * Mark all queues for this STA as unallocated and defer TX
-         * frames until the queue is allocated
-         */
-        mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
-    }
-
-    for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
-        struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]);
-
-        mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
-        INIT_LIST_HEAD(&mvmtxq->list);
-        spin_lock_init(&mvmtxq->tx_path_lock);
-    }
-
-    mvm_sta->agg_tids = 0;
-
-    if (iwl_mvm_has_new_rx_api(mvm) && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
-        int q;
-
-        dup_data = kcalloc(mvm->trans->num_rx_queues, sizeof(*dup_data), GFP_KERNEL);
-        if (!dup_data) { return -ENOMEM; }
-        /*
-         * Initialize all the last_seq values to 0xffff which can never
-         * compare equal to the frame's seq_ctrl in the check in
-         * iwl_mvm_is_dup() since the lower 4 bits are the fragment
-         * number and fragmented packets don't reach that function.
-         *
-         * This thus allows receiving a packet with seqno 0 and the
-         * retry bit set as the very first packet on a new TID.
-         */
-        for (q = 0; q < mvm->trans->num_rx_queues; q++) {
-            memset(dup_data[q].last_seq, 0xff, sizeof(dup_data[q].last_seq));
-        }
-        mvm_sta->dup_data = dup_data;
-    }
-
-    if (!iwl_mvm_has_new_tx_api(mvm)) {
-        ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif));
-        if (ret) { goto err; }
-    }
+  /* if this is a HW restart re-alloc existing queues */
+  if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+    struct iwl_mvm_int_sta tmp_sta = {
+        .sta_id = sta_id,
+        .type = mvm_sta->sta_type,
+    };
 
     /*
-     * if rs is registered with mac80211, then "add station" will be handled
-     * via the corresponding ops, otherwise need to notify rate scaling here
+     * First add an empty station since allocating
+     * a queue requires a valid station
      */
-    if (iwl_mvm_has_tlc_offload(mvm)) { iwl_mvm_rs_add_sta(mvm, mvm_sta); }
-
-    iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
-
-update_fw:
-    ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
-    if (ret) { goto err; }
-
-    if (vif->type == NL80211_IFTYPE_STATION) {
-        if (!sta->tdls) {
-            WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
-            mvmvif->ap_sta_id = sta_id;
-        } else {
-            WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
-        }
+    ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, mvmvif->id, mvmvif->color);
+    if (ret) {
+      goto err;
     }
 
-    rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+    iwl_mvm_realloc_queues_after_restart(mvm, sta);
+    sta_update = true;
+    sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
+    goto update_fw;
+  }
 
-    return 0;
+  mvm_sta->sta_id = sta_id;
+  mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
+  mvm_sta->vif = vif;
+  if (!mvm->trans->cfg->gen2) {
+    mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+  } else {
+    mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
+  }
+  mvm_sta->tx_protection = 0;
+  mvm_sta->tt_tx_protection = false;
+  mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
+
+  /* HW restart, don't assume the memory has been zeroed */
+  mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
+  mvm_sta->tfd_queue_msk = 0;
+
+  /* for HW restart - reset everything but the sequence number */
+  for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+    uint16_t seq = mvm_sta->tid_data[i].seq_number;
+    memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
+    mvm_sta->tid_data[i].seq_number = seq;
+
+    /*
+     * Mark all queues for this STA as unallocated and defer TX
+     * frames until the queue is allocated
+     */
+    mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+  }
+
+  for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+    struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+    mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+    INIT_LIST_HEAD(&mvmtxq->list);
+    spin_lock_init(&mvmtxq->tx_path_lock);
+  }
+
+  mvm_sta->agg_tids = 0;
+
+  if (iwl_mvm_has_new_rx_api(mvm) && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+    int q;
+
+    dup_data = kcalloc(mvm->trans->num_rx_queues, sizeof(*dup_data), GFP_KERNEL);
+    if (!dup_data) {
+      return -ENOMEM;
+    }
+    /*
+     * Initialize all the last_seq values to 0xffff which can never
+     * compare equal to the frame's seq_ctrl in the check in
+     * iwl_mvm_is_dup() since the lower 4 bits are the fragment
+     * number and fragmented packets don't reach that function.
+     *
+     * This thus allows receiving a packet with seqno 0 and the
+     * retry bit set as the very first packet on a new TID.
+     */
+    for (q = 0; q < mvm->trans->num_rx_queues; q++) {
+      memset(dup_data[q].last_seq, 0xff, sizeof(dup_data[q].last_seq));
+    }
+    mvm_sta->dup_data = dup_data;
+  }
+
+  if (!iwl_mvm_has_new_tx_api(mvm)) {
+    ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif));
+    if (ret) {
+      goto err;
+    }
+  }
+
+  /*
+   * if rs is registered with mac80211, then "add station" will be handled
+   * via the corresponding ops, otherwise need to notify rate scaling here
+   */
+  if (iwl_mvm_has_tlc_offload(mvm)) {
+    iwl_mvm_rs_add_sta(mvm, mvm_sta);
+  }
+
+  iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
+
+update_fw:
+  ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
+  if (ret) {
+    goto err;
+  }
+
+  if (vif->type == NL80211_IFTYPE_STATION) {
+    if (!sta->tdls) {
+      WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
+      mvmvif->ap_sta_id = sta_id;
+    } else {
+      WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
+    }
+  }
+
+  rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+
+  return 0;
 
 err:
-    return ret;
+  return ret;
 }
 
 int iwl_mvm_drain_sta(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta, bool drain) {
-    struct iwl_mvm_add_sta_cmd cmd = {};
-    int ret;
-    uint32_t status;
+  struct iwl_mvm_add_sta_cmd cmd = {};
+  int ret;
+  uint32_t status;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
-    cmd.sta_id = mvmsta->sta_id;
-    cmd.add_modify = STA_MODE_MODIFY;
-    cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
-    cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
+  cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+  cmd.sta_id = mvmsta->sta_id;
+  cmd.add_modify = STA_MODE_MODIFY;
+  cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
+  cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
 
-    status = ADD_STA_SUCCESS;
-    ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status);
-    if (ret) { return ret; }
-
-    switch (status & IWL_ADD_STA_STATUS_MASK) {
-    case ADD_STA_SUCCESS:
-        IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", mvmsta->sta_id);
-        break;
-    default:
-        ret = -EIO;
-        IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", mvmsta->sta_id);
-        break;
-    }
-
+  status = ADD_STA_SUCCESS;
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status);
+  if (ret) {
     return ret;
+  }
+
+  switch (status & IWL_ADD_STA_STATUS_MASK) {
+    case ADD_STA_SUCCESS:
+      IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", mvmsta->sta_id);
+      break;
+    default:
+      ret = -EIO;
+      IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", mvmsta->sta_id);
+      break;
+  }
+
+  return ret;
 }
 
 /*
@@ -1532,281 +1664,299 @@
  * only).
  */
 static int iwl_mvm_rm_sta_common(struct iwl_mvm* mvm, uint8_t sta_id) {
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
-        .sta_id = sta_id,
-    };
-    int ret;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
+      .sta_id = sta_id,
+  };
+  int ret;
 
-    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
+  sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
 
-    /* Note: internal stations are marked as error values */
-    if (!sta) {
-        IWL_ERR(mvm, "Invalid station id\n");
-        return -EINVAL;
-    }
+  /* Note: internal stations are marked as error values */
+  if (!sta) {
+    IWL_ERR(mvm, "Invalid station id\n");
+    return -EINVAL;
+  }
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, sizeof(rm_sta_cmd), &rm_sta_cmd);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
-        return ret;
-    }
+  ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, sizeof(rm_sta_cmd), &rm_sta_cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
+    return ret;
+  }
 
-    return 0;
+  return 0;
 }
 
 static void iwl_mvm_disable_sta_queues(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                        struct ieee80211_sta* sta) {
-    struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-    int i;
+  struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+  int i;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
-        if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) { continue; }
-
-        iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i, 0);
-        mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+  for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+    if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) {
+      continue;
     }
 
-    for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
-        struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]);
+    iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i, 0);
+    mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+  }
 
-        mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
-    }
+  for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+    struct iwl_mvm_txq* mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+    mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+  }
 }
 
 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvm_sta) {
-    int i;
+  int i;
 
-    for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
-        uint16_t txq_id;
-        int ret;
+  for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+    uint16_t txq_id;
+    int ret;
 
-        spin_lock_bh(&mvm_sta->lock);
-        txq_id = mvm_sta->tid_data[i].txq_id;
-        spin_unlock_bh(&mvm_sta->lock);
+    spin_lock_bh(&mvm_sta->lock);
+    txq_id = mvm_sta->tid_data[i].txq_id;
+    spin_unlock_bh(&mvm_sta->lock);
 
-        if (txq_id == IWL_MVM_INVALID_QUEUE) { continue; }
-
-        ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
-        if (ret) { return ret; }
+    if (txq_id == IWL_MVM_INVALID_QUEUE) {
+      continue;
     }
 
-    return 0;
+    ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
+    if (ret) {
+      return ret;
+    }
+  }
+
+  return 0;
 }
 
 int iwl_mvm_rm_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif, struct ieee80211_sta* sta) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-    uint8_t sta_id = mvm_sta->sta_id;
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+  uint8_t sta_id = mvm_sta->sta_id;
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (iwl_mvm_has_new_rx_api(mvm)) { kfree(mvm_sta->dup_data); }
+  if (iwl_mvm_has_new_rx_api(mvm)) {
+    kfree(mvm_sta->dup_data);
+  }
 
-    ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
-    if (ret) { return ret; }
-
-    /* flush its queues here since we are freeing mvm_sta */
-    ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
-    if (ret) { return ret; }
-    if (iwl_mvm_has_new_tx_api(mvm)) {
-        ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
-    } else {
-        uint32_t q_mask = mvm_sta->tfd_queue_msk;
-
-        ret = iwl_trans_wait_tx_queues_empty(mvm->trans, q_mask);
-    }
-    if (ret) { return ret; }
-
-    ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
-
-    iwl_mvm_disable_sta_queues(mvm, vif, sta);
-
-    /* If there is a TXQ still marked as reserved - free it */
-    if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
-        uint8_t reserved_txq = mvm_sta->reserved_queue;
-        enum iwl_mvm_queue_status* status;
-
-        /*
-         * If no traffic has gone through the reserved TXQ - it
-         * is still marked as IWL_MVM_QUEUE_RESERVED, and
-         * should be manually marked as free again
-         */
-        status = &mvm->queue_info[reserved_txq].status;
-        if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && (*status != IWL_MVM_QUEUE_FREE),
-                 "sta_id %d reserved txq %d status %d", sta_id, reserved_txq, *status)) {
-            return -EINVAL;
-        }
-
-        *status = IWL_MVM_QUEUE_FREE;
-    }
-
-    if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id == sta_id) {
-        /* if associated - we can't remove the AP STA now */
-        if (vif->bss_conf.assoc) { return ret; }
-
-        /* unassoc - go ahead - remove the AP STA now */
-        mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
-
-        /* clear d0i3_ap_sta_id if no longer relevant */
-        if (mvm->d0i3_ap_sta_id == sta_id) { mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; }
-    }
-
-    /*
-     * This shouldn't happen - the TDLS channel switch should be canceled
-     * before the STA is removed.
-     */
-    if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
-        mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
-        cancel_delayed_work(&mvm->tdls_cs.dwork);
-    }
-
-    /*
-     * Make sure that the tx response code sees the station as -EBUSY and
-     * calls the drain worker.
-     */
-    spin_lock_bh(&mvm_sta->lock);
-    spin_unlock_bh(&mvm_sta->lock);
-
-    ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
-    RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
-
+  ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+  if (ret) {
     return ret;
+  }
+
+  /* flush its queues here since we are freeing mvm_sta */
+  ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
+  if (ret) {
+    return ret;
+  }
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
+  } else {
+    uint32_t q_mask = mvm_sta->tfd_queue_msk;
+
+    ret = iwl_trans_wait_tx_queues_empty(mvm->trans, q_mask);
+  }
+  if (ret) {
+    return ret;
+  }
+
+  ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
+
+  iwl_mvm_disable_sta_queues(mvm, vif, sta);
+
+  /* If there is a TXQ still marked as reserved - free it */
+  if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+    uint8_t reserved_txq = mvm_sta->reserved_queue;
+    enum iwl_mvm_queue_status* status;
+
+    /*
+     * If no traffic has gone through the reserved TXQ - it
+     * is still marked as IWL_MVM_QUEUE_RESERVED, and
+     * should be manually marked as free again
+     */
+    status = &mvm->queue_info[reserved_txq].status;
+    if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && (*status != IWL_MVM_QUEUE_FREE),
+             "sta_id %d reserved txq %d status %d", sta_id, reserved_txq, *status)) {
+      return -EINVAL;
+    }
+
+    *status = IWL_MVM_QUEUE_FREE;
+  }
+
+  if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id == sta_id) {
+    /* if associated - we can't remove the AP STA now */
+    if (vif->bss_conf.assoc) {
+      return ret;
+    }
+
+    /* unassoc - go ahead - remove the AP STA now */
+    mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+
+    /* clear d0i3_ap_sta_id if no longer relevant */
+    if (mvm->d0i3_ap_sta_id == sta_id) {
+      mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+    }
+  }
+
+  /*
+   * This shouldn't happen - the TDLS channel switch should be canceled
+   * before the STA is removed.
+   */
+  if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
+    mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
+    cancel_delayed_work(&mvm->tdls_cs.dwork);
+  }
+
+  /*
+   * Make sure that the tx response code sees the station as -EBUSY and
+   * calls the drain worker.
+   */
+  spin_lock_bh(&mvm_sta->lock);
+  spin_unlock_bh(&mvm_sta->lock);
+
+  ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
+  RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+
+  return ret;
 }
 
 int iwl_mvm_rm_sta_id(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint8_t sta_id) {
-    int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
+  int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
-    return ret;
+  RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
+  return ret;
 }
 
 int iwl_mvm_allocate_int_sta(struct iwl_mvm* mvm, struct iwl_mvm_int_sta* sta, uint32_t qmask,
                              enum nl80211_iftype iftype, enum iwl_sta_type type) {
-    if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
-        sta->sta_id == IWL_MVM_INVALID_STA) {
-        sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
-        if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) { return -ENOSPC; }
+  if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || sta->sta_id == IWL_MVM_INVALID_STA) {
+    sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
+    if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) {
+      return -ENOSPC;
     }
+  }
 
-    sta->tfd_queue_msk = qmask;
-    sta->type = type;
+  sta->tfd_queue_msk = qmask;
+  sta->type = type;
 
-    /* put a non-NULL value so iterating over the stations won't stop */
-    rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
-    return 0;
+  /* put a non-NULL value so iterating over the stations won't stop */
+  rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
+  return 0;
 }
 
 void iwl_mvm_dealloc_int_sta(struct iwl_mvm* mvm, struct iwl_mvm_int_sta* sta) {
-    RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
-    memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
-    sta->sta_id = IWL_MVM_INVALID_STA;
+  RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+  memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
+  sta->sta_id = IWL_MVM_INVALID_STA;
 }
 
 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm* mvm, uint16_t* queue, uint8_t sta_id,
                                           uint8_t fifo) {
-    unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect
-                                   ? mvm->cfg->base_params->wd_timeout
-                                   : IWL_WATCHDOG_DISABLED;
+  unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? mvm->cfg->base_params->wd_timeout
+                                                                 : IWL_WATCHDOG_DISABLED;
 
-    if (iwl_mvm_has_new_tx_api(mvm)) {
-        int tvqm_queue = iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, wdg_timeout);
-        *queue = tvqm_queue;
-    } else {
-        struct iwl_trans_txq_scd_cfg cfg = {
-            .fifo = fifo,
-            .sta_id = sta_id,
-            .tid = IWL_MAX_TID_COUNT,
-            .aggregate = false,
-            .frame_limit = IWL_FRAME_LIMIT,
-        };
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    int tvqm_queue = iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, wdg_timeout);
+    *queue = tvqm_queue;
+  } else {
+    struct iwl_trans_txq_scd_cfg cfg = {
+        .fifo = fifo,
+        .sta_id = sta_id,
+        .tid = IWL_MAX_TID_COUNT,
+        .aggregate = false,
+        .frame_limit = IWL_FRAME_LIMIT,
+    };
 
-        iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
-    }
+    iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
+  }
 }
 
 int iwl_mvm_add_aux_sta(struct iwl_mvm* mvm) {
-    int ret;
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* Allocate aux station and assign to it the aux queue */
-    ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
-                                   NL80211_IFTYPE_UNSPECIFIED, IWL_STA_AUX_ACTIVITY);
-    if (ret) { return ret; }
+  /* Allocate aux station and assign to it the aux queue */
+  ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
+                                 NL80211_IFTYPE_UNSPECIFIED, IWL_STA_AUX_ACTIVITY);
+  if (ret) {
+    return ret;
+  }
 
-    /* Map Aux queue to fifo - needs to happen before adding Aux station */
-    if (!iwl_mvm_has_new_tx_api(mvm))
-        iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, mvm->aux_sta.sta_id,
-                                      IWL_MVM_TX_FIFO_MCAST);
+  /* Map Aux queue to fifo - needs to happen before adding Aux station */
+  if (!iwl_mvm_has_new_tx_api(mvm))
+    iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, mvm->aux_sta.sta_id, IWL_MVM_TX_FIFO_MCAST);
 
-    ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, MAC_INDEX_AUX, 0);
-    if (ret) {
-        iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
-        return ret;
-    }
+  ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, MAC_INDEX_AUX, 0);
+  if (ret) {
+    iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+    return ret;
+  }
 
-    /*
-     * For 22000 firmware and on we cannot add queue to a station unknown
-     * to firmware so enable queue here - after the station was added
-     */
-    if (iwl_mvm_has_new_tx_api(mvm))
-        iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, mvm->aux_sta.sta_id,
-                                      IWL_MVM_TX_FIFO_MCAST);
+  /*
+   * For 22000 firmware and on we cannot add queue to a station unknown
+   * to firmware so enable queue here - after the station was added
+   */
+  if (iwl_mvm_has_new_tx_api(mvm))
+    iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, mvm->aux_sta.sta_id, IWL_MVM_TX_FIFO_MCAST);
 
-    return 0;
+  return 0;
 }
 
 int iwl_mvm_add_snif_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* Map snif queue to fifo - must happen before adding snif station */
-    if (!iwl_mvm_has_new_tx_api(mvm))
-        iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, mvm->snif_sta.sta_id,
-                                      IWL_MVM_TX_FIFO_BE);
+  /* Map snif queue to fifo - must happen before adding snif station */
+  if (!iwl_mvm_has_new_tx_api(mvm))
+    iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, mvm->snif_sta.sta_id, IWL_MVM_TX_FIFO_BE);
 
-    ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, mvmvif->id, 0);
-    if (ret) { return ret; }
+  ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, mvmvif->id, 0);
+  if (ret) {
+    return ret;
+  }
 
-    /*
-     * For 22000 firmware and on we cannot add queue to a station unknown
-     * to firmware so enable queue here - after the station was added
-     */
-    if (iwl_mvm_has_new_tx_api(mvm))
-        iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, mvm->snif_sta.sta_id,
-                                      IWL_MVM_TX_FIFO_BE);
+  /*
+   * For 22000 firmware and on we cannot add queue to a station unknown
+   * to firmware so enable queue here - after the station was added
+   */
+  if (iwl_mvm_has_new_tx_api(mvm))
+    iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, mvm->snif_sta.sta_id, IWL_MVM_TX_FIFO_BE);
 
-    return 0;
+  return 0;
 }
 
 int iwl_mvm_rm_snif_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    int ret;
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
-    ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
-    if (ret) { IWL_WARN(mvm, "Failed sending remove station\n"); }
+  iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
+  ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
+  if (ret) {
+    IWL_WARN(mvm, "Failed sending remove station\n");
+  }
 
-    return ret;
+  return ret;
 }
 
-void iwl_mvm_dealloc_snif_sta(struct iwl_mvm* mvm) {
-    iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
-}
+void iwl_mvm_dealloc_snif_sta(struct iwl_mvm* mvm) { iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); }
 
 void iwl_mvm_del_aux_sta(struct iwl_mvm* mvm) {
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+  iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
 }
 
 /*
@@ -1818,111 +1968,121 @@
  * @bsta: the broadcast station to add.
  */
 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_int_sta* bsta = &mvmvif->bcast_sta;
-    static const uint8_t _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
-    const uint8_t* baddr = _baddr;
-    int queue;
-    int ret;
-    unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
-    struct iwl_trans_txq_scd_cfg cfg = {
-        .fifo = IWL_MVM_TX_FIFO_VO,
-        .sta_id = mvmvif->bcast_sta.sta_id,
-        .tid = IWL_MAX_TID_COUNT,
-        .aggregate = false,
-        .frame_limit = IWL_FRAME_LIMIT,
-    };
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_int_sta* bsta = &mvmvif->bcast_sta;
+  static const uint8_t _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+  const uint8_t* baddr = _baddr;
+  int queue;
+  int ret;
+  unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+  struct iwl_trans_txq_scd_cfg cfg = {
+      .fifo = IWL_MVM_TX_FIFO_VO,
+      .sta_id = mvmvif->bcast_sta.sta_id,
+      .tid = IWL_MAX_TID_COUNT,
+      .aggregate = false,
+      .frame_limit = IWL_FRAME_LIMIT,
+  };
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (!iwl_mvm_has_new_tx_api(mvm)) {
-        if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) {
-            queue = mvm->probe_queue;
-        } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-            queue = mvm->p2p_dev_queue;
-        } else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) {
-            return -EINVAL;
-        }
-
-        bsta->tfd_queue_msk |= BIT(queue);
-
-        iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
+  if (!iwl_mvm_has_new_tx_api(mvm)) {
+    if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) {
+      queue = mvm->probe_queue;
+    } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+      queue = mvm->p2p_dev_queue;
+    } else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) {
+      return -EINVAL;
     }
 
-    if (vif->type == NL80211_IFTYPE_ADHOC) { baddr = vif->bss_conf.bssid; }
+    bsta->tfd_queue_msk |= BIT(queue);
 
-    if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) { return -ENOSPC; }
+    iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
+  }
 
-    ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, mvmvif->id, mvmvif->color);
-    if (ret) { return ret; }
+  if (vif->type == NL80211_IFTYPE_ADHOC) {
+    baddr = vif->bss_conf.bssid;
+  }
 
-    /*
-     * For 22000 firmware and on we cannot add queue to a station unknown
-     * to firmware so enable queue here - after the station was added
-     */
-    if (iwl_mvm_has_new_tx_api(mvm)) {
-        queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, IWL_MAX_TID_COUNT, wdg_timeout);
+  if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) {
+    return -ENOSPC;
+  }
 
-        if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) {
-            mvm->probe_queue = queue;
-        } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-            mvm->p2p_dev_queue = queue;
-        }
+  ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, mvmvif->id, mvmvif->color);
+  if (ret) {
+    return ret;
+  }
+
+  /*
+   * For 22000 firmware and on we cannot add queue to a station unknown
+   * to firmware so enable queue here - after the station was added
+   */
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, IWL_MAX_TID_COUNT, wdg_timeout);
+
+    if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) {
+      mvm->probe_queue = queue;
+    } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+      mvm->p2p_dev_queue = queue;
     }
+  }
 
-    return 0;
+  return 0;
 }
 
 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int queue;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int queue;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
+  iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
 
-    switch (vif->type) {
+  switch (vif->type) {
     case NL80211_IFTYPE_AP:
     case NL80211_IFTYPE_ADHOC:
-        queue = mvm->probe_queue;
-        break;
+      queue = mvm->probe_queue;
+      break;
     case NL80211_IFTYPE_P2P_DEVICE:
-        queue = mvm->p2p_dev_queue;
-        break;
+      queue = mvm->p2p_dev_queue;
+      break;
     default:
-        WARN(1, "Can't free bcast queue on vif type %d\n", vif->type);
-        return;
-    }
+      WARN(1, "Can't free bcast queue on vif type %d\n", vif->type);
+      return;
+  }
 
-    iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
-    if (iwl_mvm_has_new_tx_api(mvm)) { return; }
+  iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    return;
+  }
 
-    WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
-    mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
+  WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
+  mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
 }
 
 /* Send the FW a request to remove the station from it's internal data
  * structures, but DO NOT remove the entry from the local data structures. */
 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    iwl_mvm_free_bcast_sta_queues(mvm, vif);
+  iwl_mvm_free_bcast_sta_queues(mvm, vif);
 
-    ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
-    if (ret) { IWL_WARN(mvm, "Failed sending remove station\n"); }
-    return ret;
+  ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
+  if (ret) {
+    IWL_WARN(mvm, "Failed sending remove station\n");
+  }
+  return ret;
 }
 
 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, ieee80211_vif_type_p2p(vif),
-                                    IWL_STA_GENERAL_PURPOSE);
+  return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, ieee80211_vif_type_p2p(vif),
+                                  IWL_STA_GENERAL_PURPOSE);
 }
 
 /* Allocate a new station entry for the broadcast station to the given vif,
@@ -1933,26 +2093,30 @@
  * @vif: the interface to which the broadcast station is added
  * @bsta: the broadcast station to add. */
 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_int_sta* bsta = &mvmvif->bcast_sta;
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_int_sta* bsta = &mvmvif->bcast_sta;
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
-    if (ret) { return ret; }
-
-    ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
-
-    if (ret) { iwl_mvm_dealloc_int_sta(mvm, bsta); }
-
+  ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
+  if (ret) {
     return ret;
+  }
+
+  ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+
+  if (ret) {
+    iwl_mvm_dealloc_int_sta(mvm, bsta);
+  }
+
+  return ret;
 }
 
 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
+  iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
 }
 
 /*
@@ -1960,15 +2124,15 @@
  * structures, and in addition remove it from the local data structure.
  */
 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    int ret;
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
+  ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
 
-    iwl_mvm_dealloc_bcast_sta(mvm, vif);
+  iwl_mvm_dealloc_bcast_sta(mvm, vif);
 
-    return ret;
+  return ret;
 }
 
 /*
@@ -1980,84 +2144,94 @@
  * @vif: the interface to which the multicast station is added
  */
 int iwl_mvm_add_mcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_int_sta* msta = &mvmvif->mcast_sta;
-    static const uint8_t _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
-    const uint8_t* maddr = _maddr;
-    struct iwl_trans_txq_scd_cfg cfg = {
-        .fifo = IWL_MVM_TX_FIFO_MCAST,
-        .sta_id = msta->sta_id,
-        .tid = 0,
-        .aggregate = false,
-        .frame_limit = IWL_FRAME_LIMIT,
-    };
-    unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_int_sta* msta = &mvmvif->mcast_sta;
+  static const uint8_t _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+  const uint8_t* maddr = _maddr;
+  struct iwl_trans_txq_scd_cfg cfg = {
+      .fifo = IWL_MVM_TX_FIFO_MCAST,
+      .sta_id = msta->sta_id,
+      .tid = 0,
+      .aggregate = false,
+      .frame_limit = IWL_FRAME_LIMIT,
+  };
+  unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC)) {
-        return -ENOTSUPP;
+  if (WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC)) {
+    return -ENOTSUPP;
+  }
+
+  /*
+   * In IBSS, ieee80211_check_queues() sets the cab_queue to be
+   * invalid, so make sure we use the queue we want.
+   * Note that this is done here as we want to avoid making DQA
+   * changes in mac80211 layer.
+   */
+  if (vif->type == NL80211_IFTYPE_ADHOC) {
+    mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+  }
+
+  /*
+   * While in previous FWs we had to exclude cab queue from TFD queue
+   * mask, now it is needed as any other queue.
+   */
+  if (!iwl_mvm_has_new_tx_api(mvm) &&
+      fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+    iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout);
+    msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
+  }
+  ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color);
+  if (ret) {
+    iwl_mvm_dealloc_int_sta(mvm, msta);
+    return ret;
+  }
+
+  /*
+   * Enable cab queue after the ADD_STA command is sent.
+   * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
+   * command with unknown station id, and for FW that doesn't support
+   * station API since the cab queue is not included in the
+   * tfd_queue_mask.
+   */
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, 0, timeout);
+    mvmvif->cab_queue = queue;
+  } else if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+    iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout);
+  }
+
+  if (mvmvif->ap_wep_key) {
+    uint8_t key_offset = iwl_mvm_set_fw_key_idx(mvm);
+
+    if (key_offset == STA_KEY_IDX_INVALID) {
+      return -ENOSPC;
     }
 
-    /*
-     * In IBSS, ieee80211_check_queues() sets the cab_queue to be
-     * invalid, so make sure we use the queue we want.
-     * Note that this is done here as we want to avoid making DQA
-     * changes in mac80211 layer.
-     */
-    if (vif->type == NL80211_IFTYPE_ADHOC) { mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; }
-
-    /*
-     * While in previous FWs we had to exclude cab queue from TFD queue
-     * mask, now it is needed as any other queue.
-     */
-    if (!iwl_mvm_has_new_tx_api(mvm) &&
-        fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
-        iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout);
-        msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
-    }
-    ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color);
+    ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, mvmvif->ap_wep_key, 1, 0, NULL, 0,
+                               key_offset, 0);
     if (ret) {
-        iwl_mvm_dealloc_int_sta(mvm, msta);
-        return ret;
+      return ret;
+    }
+  }
+
+  if (mvmvif->ap_wep_key) {
+    uint8_t key_offset = iwl_mvm_set_fw_key_idx(mvm);
+
+    if (key_offset == STA_KEY_IDX_INVALID) {
+      return -ENOSPC;
     }
 
-    /*
-     * Enable cab queue after the ADD_STA command is sent.
-     * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
-     * command with unknown station id, and for FW that doesn't support
-     * station API since the cab queue is not included in the
-     * tfd_queue_mask.
-     */
-    if (iwl_mvm_has_new_tx_api(mvm)) {
-        int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, 0, timeout);
-        mvmvif->cab_queue = queue;
-    } else if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
-        iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout);
+    ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, mvmvif->ap_wep_key, 1, 0, NULL, 0,
+                               key_offset, 0);
+    if (ret) {
+      return ret;
     }
+  }
 
-    if (mvmvif->ap_wep_key) {
-        uint8_t key_offset = iwl_mvm_set_fw_key_idx(mvm);
-
-        if (key_offset == STA_KEY_IDX_INVALID) { return -ENOSPC; }
-
-        ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, mvmvif->ap_wep_key, 1, 0, NULL, 0,
-                                   key_offset, 0);
-        if (ret) { return ret; }
-    }
-
-    if (mvmvif->ap_wep_key) {
-        uint8_t key_offset = iwl_mvm_set_fw_key_idx(mvm);
-
-        if (key_offset == STA_KEY_IDX_INVALID) { return -ENOSPC; }
-
-        ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, mvmvif->ap_wep_key, 1, 0, NULL, 0,
-                                   key_offset, 0);
-        if (ret) { return ret; }
-    }
-
-    return 0;
+  return 0;
 }
 
 /*
@@ -2065,282 +2239,302 @@
  * structures, and in addition remove it from the local data structure.
  */
 int iwl_mvm_rm_mcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    int ret;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
+  iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
 
-    iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
+  iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
 
-    ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
-    if (ret) { IWL_WARN(mvm, "Failed sending remove station\n"); }
+  ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
+  if (ret) {
+    IWL_WARN(mvm, "Failed sending remove station\n");
+  }
 
-    return ret;
+  return ret;
 }
 
 #define IWL_MAX_RX_BA_SESSIONS 16
 
 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm* mvm, uint8_t baid) {
-    struct iwl_mvm_delba_notif notif = {
-        .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
-        .metadata.sync = 1,
-        .delba.baid = baid,
-    };
-    iwl_mvm_sync_rx_queues_internal(mvm, (void*)&notif, sizeof(notif));
+  struct iwl_mvm_delba_notif notif = {
+      .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
+      .metadata.sync = 1,
+      .delba.baid = baid,
+  };
+  iwl_mvm_sync_rx_queues_internal(mvm, (void*)&notif, sizeof(notif));
 };
 
 static void iwl_mvm_free_reorder(struct iwl_mvm* mvm, struct iwl_mvm_baid_data* data) {
-    int i;
+  int i;
 
-    iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
+  iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
 
-    for (i = 0; i < mvm->trans->num_rx_queues; i++) {
-        int j;
-        struct iwl_mvm_reorder_buffer* reorder_buf = &data->reorder_buf[i];
-        struct iwl_mvm_reorder_buf_entry* entries = &data->entries[i * data->entries_per_queue];
+  for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+    int j;
+    struct iwl_mvm_reorder_buffer* reorder_buf = &data->reorder_buf[i];
+    struct iwl_mvm_reorder_buf_entry* entries = &data->entries[i * data->entries_per_queue];
 
-        spin_lock_bh(&reorder_buf->lock);
-        if (likely(!reorder_buf->num_stored)) {
-            spin_unlock_bh(&reorder_buf->lock);
-            continue;
-        }
-
-        /*
-         * This shouldn't happen in regular DELBA since the internal
-         * delBA notification should trigger a release of all frames in
-         * the reorder buffer.
-         */
-        WARN_ON(1);
-
-        for (j = 0; j < reorder_buf->buf_size; j++) {
-            __skb_queue_purge(&entries[j].e.frames);
-        }
-        /*
-         * Prevent timer re-arm. This prevents a very far fetched case
-         * where we timed out on the notification. There may be prior
-         * RX frames pending in the RX queue before the notification
-         * that might get processed between now and the actual deletion
-         * and we would re-arm the timer although we are deleting the
-         * reorder buffer.
-         */
-        reorder_buf->removed = true;
-        spin_unlock_bh(&reorder_buf->lock);
-        del_timer_sync(&reorder_buf->reorder_timer);
+    spin_lock_bh(&reorder_buf->lock);
+    if (likely(!reorder_buf->num_stored)) {
+      spin_unlock_bh(&reorder_buf->lock);
+      continue;
     }
+
+    /*
+     * This shouldn't happen in regular DELBA since the internal
+     * delBA notification should trigger a release of all frames in
+     * the reorder buffer.
+     */
+    WARN_ON(1);
+
+    for (j = 0; j < reorder_buf->buf_size; j++) {
+      __skb_queue_purge(&entries[j].e.frames);
+    }
+    /*
+     * Prevent timer re-arm. This prevents a very far fetched case
+     * where we timed out on the notification. There may be prior
+     * RX frames pending in the RX queue before the notification
+     * that might get processed between now and the actual deletion
+     * and we would re-arm the timer although we are deleting the
+     * reorder buffer.
+     */
+    reorder_buf->removed = true;
+    spin_unlock_bh(&reorder_buf->lock);
+    del_timer_sync(&reorder_buf->reorder_timer);
+  }
 }
 
 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm* mvm, struct iwl_mvm_baid_data* data,
                                         uint16_t ssn, uint16_t buf_size) {
-    int i;
+  int i;
 
-    for (i = 0; i < mvm->trans->num_rx_queues; i++) {
-        struct iwl_mvm_reorder_buffer* reorder_buf = &data->reorder_buf[i];
-        struct iwl_mvm_reorder_buf_entry* entries = &data->entries[i * data->entries_per_queue];
-        int j;
+  for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+    struct iwl_mvm_reorder_buffer* reorder_buf = &data->reorder_buf[i];
+    struct iwl_mvm_reorder_buf_entry* entries = &data->entries[i * data->entries_per_queue];
+    int j;
 
-        reorder_buf->num_stored = 0;
-        reorder_buf->head_sn = ssn;
-        reorder_buf->buf_size = buf_size;
-        /* rx reorder timer */
-        timer_setup(&reorder_buf->reorder_timer, iwl_mvm_reorder_timer_expired, 0);
-        spin_lock_init(&reorder_buf->lock);
-        reorder_buf->mvm = mvm;
-        reorder_buf->queue = i;
-        reorder_buf->valid = false;
-        for (j = 0; j < reorder_buf->buf_size; j++) {
-            __skb_queue_head_init(&entries[j].e.frames);
-        }
+    reorder_buf->num_stored = 0;
+    reorder_buf->head_sn = ssn;
+    reorder_buf->buf_size = buf_size;
+    /* rx reorder timer */
+    timer_setup(&reorder_buf->reorder_timer, iwl_mvm_reorder_timer_expired, 0);
+    spin_lock_init(&reorder_buf->lock);
+    reorder_buf->mvm = mvm;
+    reorder_buf->queue = i;
+    reorder_buf->valid = false;
+    for (j = 0; j < reorder_buf->buf_size; j++) {
+      __skb_queue_head_init(&entries[j].e.frames);
     }
+  }
 }
 
 int iwl_mvm_sta_rx_agg(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int tid, uint16_t ssn,
                        bool start, uint16_t buf_size, uint16_t timeout) {
-    struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_add_sta_cmd cmd = {};
-    struct iwl_mvm_baid_data* baid_data = NULL;
-    int ret;
-    uint32_t status;
+  struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_add_sta_cmd cmd = {};
+  struct iwl_mvm_baid_data* baid_data = NULL;
+  int ret;
+  uint32_t status;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
-        IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
-        return -ENOSPC;
-    }
+  if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
+    IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
+    return -ENOSPC;
+  }
 
-    if (iwl_mvm_has_new_rx_api(mvm) && start) {
-        uint16_t reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+  if (iwl_mvm_has_new_rx_api(mvm) && start) {
+    uint16_t reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
 
-        /* sparse doesn't like the __align() so don't check */
+    /* sparse doesn't like the __align() so don't check */
 #ifndef __CHECKER__
-        /*
-         * The division below will be OK if either the cache line size
-         * can be divided by the entry size (ALIGN will round up) or if
-         * if the entry size can be divided by the cache line size, in
-         * which case the ALIGN() will do nothing.
-         */
-        BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
-                     sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
+    /*
+     * The division below will be OK if either the cache line size
+     * can be divided by the entry size (ALIGN will round up) or if
+     * if the entry size can be divided by the cache line size, in
+     * which case the ALIGN() will do nothing.
+     */
+    BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
+                 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
 #endif
 
-        /*
-         * Upward align the reorder buffer size to fill an entire cache
-         * line for each queue, to avoid sharing cache lines between
-         * different queues.
-         */
-        reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
+    /*
+     * Upward align the reorder buffer size to fill an entire cache
+     * line for each queue, to avoid sharing cache lines between
+     * different queues.
+     */
+    reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
 
-        /*
-         * Allocate here so if allocation fails we can bail out early
-         * before starting the BA session in the firmware
-         */
-        baid_data =
-            kzalloc(sizeof(*baid_data) + mvm->trans->num_rx_queues * reorder_buf_size, GFP_KERNEL);
-        if (!baid_data) { return -ENOMEM; }
-
-        /*
-         * This division is why we need the above BUILD_BUG_ON(),
-         * if that doesn't hold then this will not be right.
-         */
-        baid_data->entries_per_queue = reorder_buf_size / sizeof(baid_data->entries[0]);
+    /*
+     * Allocate here so if allocation fails we can bail out early
+     * before starting the BA session in the firmware
+     */
+    baid_data =
+        kzalloc(sizeof(*baid_data) + mvm->trans->num_rx_queues * reorder_buf_size, GFP_KERNEL);
+    if (!baid_data) {
+      return -ENOMEM;
     }
 
-    cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
-    cmd.sta_id = mvm_sta->sta_id;
-    cmd.add_modify = STA_MODE_MODIFY;
-    if (start) {
-        cmd.add_immediate_ba_tid = (uint8_t)tid;
-        cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
-        cmd.rx_ba_window = cpu_to_le16(buf_size);
-    } else {
-        cmd.remove_immediate_ba_tid = (uint8_t)tid;
-    }
-    cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : STA_MODIFY_REMOVE_BA_TID;
+    /*
+     * This division is why we need the above BUILD_BUG_ON(),
+     * if that doesn't hold then this will not be right.
+     */
+    baid_data->entries_per_queue = reorder_buf_size / sizeof(baid_data->entries[0]);
+  }
 
-    status = ADD_STA_SUCCESS;
-    ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status);
-    if (ret) { goto out_free; }
+  cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+  cmd.sta_id = mvm_sta->sta_id;
+  cmd.add_modify = STA_MODE_MODIFY;
+  if (start) {
+    cmd.add_immediate_ba_tid = (uint8_t)tid;
+    cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+    cmd.rx_ba_window = cpu_to_le16(buf_size);
+  } else {
+    cmd.remove_immediate_ba_tid = (uint8_t)tid;
+  }
+  cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : STA_MODIFY_REMOVE_BA_TID;
 
-    switch (status & IWL_ADD_STA_STATUS_MASK) {
+  status = ADD_STA_SUCCESS;
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status);
+  if (ret) {
+    goto out_free;
+  }
+
+  switch (status & IWL_ADD_STA_STATUS_MASK) {
     case ADD_STA_SUCCESS:
-        IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp");
-        break;
+      IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp");
+      break;
     case ADD_STA_IMMEDIATE_BA_FAILURE:
-        IWL_WARN(mvm, "RX BA Session refused by fw\n");
-        ret = -ENOSPC;
-        break;
+      IWL_WARN(mvm, "RX BA Session refused by fw\n");
+      ret = -ENOSPC;
+      break;
     default:
-        ret = -EIO;
-        IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp",
-                status);
-        break;
+      ret = -EIO;
+      IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp", status);
+      break;
+  }
+
+  if (ret) {
+    goto out_free;
+  }
+
+  if (start) {
+    uint8_t baid;
+
+    mvm->rx_ba_sessions++;
+
+    if (!iwl_mvm_has_new_rx_api(mvm)) {
+      return 0;
     }
 
-    if (ret) { goto out_free; }
-
-    if (start) {
-        uint8_t baid;
-
-        mvm->rx_ba_sessions++;
-
-        if (!iwl_mvm_has_new_rx_api(mvm)) { return 0; }
-
-        if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
-            ret = -EINVAL;
-            goto out_free;
-        }
-        baid = (uint8_t)((status & IWL_ADD_STA_BAID_MASK) >> IWL_ADD_STA_BAID_SHIFT);
-        baid_data->baid = baid;
-        baid_data->timeout = timeout;
-        baid_data->last_rx = jiffies;
-        baid_data->rcu_ptr = &mvm->baid_map[baid];
-        timer_setup(&baid_data->session_timer, iwl_mvm_rx_agg_session_expired, 0);
-        baid_data->mvm = mvm;
-        baid_data->tid = tid;
-        baid_data->sta_id = mvm_sta->sta_id;
-
-        mvm_sta->tid_to_baid[tid] = baid;
-        if (timeout) { mod_timer(&baid_data->session_timer, TU_TO_EXP_TIME(timeout * 2)); }
-
-        iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
-        /*
-         * protect the BA data with RCU to cover a case where our
-         * internal RX sync mechanism will timeout (not that it's
-         * supposed to happen) and we will free the session data while
-         * RX is being processed in parallel
-         */
-        IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", mvm_sta->sta_id, tid, baid);
-        WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
-        rcu_assign_pointer(mvm->baid_map[baid], baid_data);
-    } else {
-        uint8_t baid = mvm_sta->tid_to_baid[tid];
-
-        if (mvm->rx_ba_sessions > 0) { /* check that restart flow didn't zero the counter */
-            mvm->rx_ba_sessions--;
-        }
-        if (!iwl_mvm_has_new_rx_api(mvm)) { return 0; }
-
-        if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) { return -EINVAL; }
-
-        baid_data = rcu_access_pointer(mvm->baid_map[baid]);
-        if (WARN_ON(!baid_data)) { return -EINVAL; }
-
-        /* synchronize all rx queues so we can safely delete */
-        iwl_mvm_free_reorder(mvm, baid_data);
-        del_timer_sync(&baid_data->session_timer);
-        RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
-        kfree_rcu(baid_data, rcu_head);
-        IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
+    if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
+      ret = -EINVAL;
+      goto out_free;
     }
-    return 0;
+    baid = (uint8_t)((status & IWL_ADD_STA_BAID_MASK) >> IWL_ADD_STA_BAID_SHIFT);
+    baid_data->baid = baid;
+    baid_data->timeout = timeout;
+    baid_data->last_rx = jiffies;
+    baid_data->rcu_ptr = &mvm->baid_map[baid];
+    timer_setup(&baid_data->session_timer, iwl_mvm_rx_agg_session_expired, 0);
+    baid_data->mvm = mvm;
+    baid_data->tid = tid;
+    baid_data->sta_id = mvm_sta->sta_id;
+
+    mvm_sta->tid_to_baid[tid] = baid;
+    if (timeout) {
+      mod_timer(&baid_data->session_timer, TU_TO_EXP_TIME(timeout * 2));
+    }
+
+    iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
+    /*
+     * protect the BA data with RCU to cover a case where our
+     * internal RX sync mechanism will timeout (not that it's
+     * supposed to happen) and we will free the session data while
+     * RX is being processed in parallel
+     */
+    IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", mvm_sta->sta_id, tid, baid);
+    WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
+    rcu_assign_pointer(mvm->baid_map[baid], baid_data);
+  } else {
+    uint8_t baid = mvm_sta->tid_to_baid[tid];
+
+    if (mvm->rx_ba_sessions > 0) { /* check that restart flow didn't zero the counter */
+      mvm->rx_ba_sessions--;
+    }
+    if (!iwl_mvm_has_new_rx_api(mvm)) {
+      return 0;
+    }
+
+    if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) {
+      return -EINVAL;
+    }
+
+    baid_data = rcu_access_pointer(mvm->baid_map[baid]);
+    if (WARN_ON(!baid_data)) {
+      return -EINVAL;
+    }
+
+    /* synchronize all rx queues so we can safely delete */
+    iwl_mvm_free_reorder(mvm, baid_data);
+    del_timer_sync(&baid_data->session_timer);
+    RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
+    kfree_rcu(baid_data, rcu_head);
+    IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
+  }
+  return 0;
 
 out_free:
-    kfree(baid_data);
-    return ret;
+  kfree(baid_data);
+  return ret;
 }
 
 int iwl_mvm_sta_tx_agg(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int tid, uint8_t queue,
                        bool start) {
-    struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_add_sta_cmd cmd = {};
-    int ret;
-    uint32_t status;
+  struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_add_sta_cmd cmd = {};
+  int ret;
+  uint32_t status;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (start) {
-        mvm_sta->tfd_queue_msk |= BIT(queue);
-        mvm_sta->tid_disable_agg &= ~BIT(tid);
-    } else {
-        /* In DQA-mode the queue isn't removed on agg termination */
-        mvm_sta->tid_disable_agg |= BIT(tid);
-    }
+  if (start) {
+    mvm_sta->tfd_queue_msk |= BIT(queue);
+    mvm_sta->tid_disable_agg &= ~BIT(tid);
+  } else {
+    /* In DQA-mode the queue isn't removed on agg termination */
+    mvm_sta->tid_disable_agg |= BIT(tid);
+  }
 
-    cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
-    cmd.sta_id = mvm_sta->sta_id;
-    cmd.add_modify = STA_MODE_MODIFY;
-    if (!iwl_mvm_has_new_tx_api(mvm)) { cmd.modify_mask = STA_MODIFY_QUEUES; }
-    cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
-    cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
-    cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
+  cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+  cmd.sta_id = mvm_sta->sta_id;
+  cmd.add_modify = STA_MODE_MODIFY;
+  if (!iwl_mvm_has_new_tx_api(mvm)) {
+    cmd.modify_mask = STA_MODIFY_QUEUES;
+  }
+  cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
+  cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+  cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
 
-    status = ADD_STA_SUCCESS;
-    ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status);
-    if (ret) { return ret; }
-
-    switch (status & IWL_ADD_STA_STATUS_MASK) {
-    case ADD_STA_SUCCESS:
-        break;
-    default:
-        ret = -EIO;
-        IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp",
-                status);
-        break;
-    }
-
+  status = ADD_STA_SUCCESS;
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status);
+  if (ret) {
     return ret;
+  }
+
+  switch (status & IWL_ADD_STA_STATUS_MASK) {
+    case ADD_STA_SUCCESS:
+      break;
+    default:
+      ret = -EIO;
+      IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp", status);
+      break;
+  }
+
+  return ret;
 }
 
 const uint8_t tid_to_mac80211_ac[] = {
@@ -2355,1042 +2549,1121 @@
 
 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                              struct ieee80211_sta* sta, uint16_t tid, uint16_t* ssn) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_tid_data* tid_data;
-    uint16_t normalized_ssn;
-    uint16_t txq_id;
-    int ret;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_tid_data* tid_data;
+  uint16_t normalized_ssn;
+  uint16_t txq_id;
+  int ret;
 
-    if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) { return -EINVAL; }
+  if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) {
+    return -EINVAL;
+  }
 
-    if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
-        mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
-        IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
-                mvmsta->tid_data[tid].state);
-        return -ENXIO;
+  if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+    IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
+            mvmsta->tid_data[tid].state);
+    return -ENXIO;
+  }
+
+  lockdep_assert_held(&mvm->mutex);
+
+  if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && iwl_mvm_has_new_tx_api(mvm)) {
+    uint8_t ac = tid_to_mac80211_ac[tid];
+
+    ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+    if (ret) {
+      return ret;
+    }
+  }
+
+  spin_lock_bh(&mvmsta->lock);
+
+  /* possible race condition - we entered D0i3 while starting agg */
+  if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
+    spin_unlock_bh(&mvmsta->lock);
+    IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
+    return -EIO;
+  }
+
+  /*
+   * Note the possible cases:
+   *  1. An enabled TXQ - TXQ needs to become agg'ed
+   *  2. The TXQ hasn't yet been enabled, so find a free one and mark
+   *  it as reserved
+   */
+  txq_id = mvmsta->tid_data[tid].txq_id;
+  if (txq_id == IWL_MVM_INVALID_QUEUE) {
+    ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE,
+                                  IWL_MVM_DQA_MAX_DATA_QUEUE);
+    if (ret < 0) {
+      IWL_ERR(mvm, "Failed to allocate agg queue\n");
+      goto out;
     }
 
-    lockdep_assert_held(&mvm->mutex);
+    txq_id = ret;
 
-    if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && iwl_mvm_has_new_tx_api(mvm)) {
-        uint8_t ac = tid_to_mac80211_ac[tid];
+    /* TXQ hasn't yet been enabled, so mark it only as reserved */
+    mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
+  } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
+    ret = -ENXIO;
+    IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", tid, IWL_MAX_HW_QUEUES - 1);
+    goto out;
 
-        ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
-        if (ret) { return ret; }
-    }
+  } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
+    ret = -ENXIO;
+    IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", tid);
+    goto out;
+  }
 
-    spin_lock_bh(&mvmsta->lock);
+  IWL_DEBUG_TX_QUEUES(mvm, "AGG for tid %d will be on queue #%d\n", tid, txq_id);
 
-    /* possible race condition - we entered D0i3 while starting agg */
-    if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
-        spin_unlock_bh(&mvmsta->lock);
-        IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
-        return -EIO;
-    }
+  tid_data = &mvmsta->tid_data[tid];
+  tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+  tid_data->txq_id = txq_id;
+  *ssn = tid_data->ssn;
 
-    /*
-     * Note the possible cases:
-     *  1. An enabled TXQ - TXQ needs to become agg'ed
-     *  2. The TXQ hasn't yet been enabled, so find a free one and mark
-     *  it as reserved
-     */
-    txq_id = mvmsta->tid_data[tid].txq_id;
-    if (txq_id == IWL_MVM_INVALID_QUEUE) {
-        ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE,
-                                      IWL_MVM_DQA_MAX_DATA_QUEUE);
-        if (ret < 0) {
-            IWL_ERR(mvm, "Failed to allocate agg queue\n");
-            goto out;
-        }
+  IWL_DEBUG_TX_QUEUES(mvm, "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
+                      mvmsta->sta_id, tid, txq_id, tid_data->ssn, tid_data->next_reclaimed);
 
-        txq_id = ret;
+  /*
+   * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+   * to align the wrap around of ssn so we compare relevant values.
+   */
+  normalized_ssn = tid_data->ssn;
+  if (mvm->trans->cfg->gen2) {
+    normalized_ssn &= 0xff;
+  }
 
-        /* TXQ hasn't yet been enabled, so mark it only as reserved */
-        mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
-    } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
-        ret = -ENXIO;
-        IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", tid, IWL_MAX_HW_QUEUES - 1);
-        goto out;
+  if (normalized_ssn == tid_data->next_reclaimed) {
+    tid_data->state = IWL_AGG_STARTING;
+    ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+  } else {
+    tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+  }
 
-    } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
-        ret = -ENXIO;
-        IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", tid);
-        goto out;
-    }
-
-    IWL_DEBUG_TX_QUEUES(mvm, "AGG for tid %d will be on queue #%d\n", tid, txq_id);
-
-    tid_data = &mvmsta->tid_data[tid];
-    tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
-    tid_data->txq_id = txq_id;
-    *ssn = tid_data->ssn;
-
-    IWL_DEBUG_TX_QUEUES(mvm, "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
-                        mvmsta->sta_id, tid, txq_id, tid_data->ssn, tid_data->next_reclaimed);
-
-    /*
-     * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
-     * to align the wrap around of ssn so we compare relevant values.
-     */
-    normalized_ssn = tid_data->ssn;
-    if (mvm->trans->cfg->gen2) { normalized_ssn &= 0xff; }
-
-    if (normalized_ssn == tid_data->next_reclaimed) {
-        tid_data->state = IWL_AGG_STARTING;
-        ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-    } else {
-        tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
-    }
-
-    ret = 0;
+  ret = 0;
 
 out:
-    spin_unlock_bh(&mvmsta->lock);
+  spin_unlock_bh(&mvmsta->lock);
 
-    return ret;
+  return ret;
 }
 
 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                             struct ieee80211_sta* sta, uint16_t tid, uint16_t buf_size,
                             bool amsdu) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_tid_data* tid_data = &mvmsta->tid_data[tid];
-    unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
-    int queue, ret;
-    bool alloc_queue = true;
-    enum iwl_mvm_queue_status queue_status;
-    uint16_t ssn;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_tid_data* tid_data = &mvmsta->tid_data[tid];
+  unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
+  int queue, ret;
+  bool alloc_queue = true;
+  enum iwl_mvm_queue_status queue_status;
+  uint16_t ssn;
 
-    struct iwl_trans_txq_scd_cfg cfg = {
-        .sta_id = mvmsta->sta_id,
-        .tid = tid,
-        .frame_limit = buf_size,
-        .aggregate = true,
-    };
+  struct iwl_trans_txq_scd_cfg cfg = {
+      .sta_id = mvmsta->sta_id,
+      .tid = tid,
+      .frame_limit = buf_size,
+      .aggregate = true,
+  };
 
+  /*
+   * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
+   * manager, so this function should never be called in this case.
+   */
+  if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) {
+    return -EINVAL;
+  }
+
+  BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) != IWL_MAX_TID_COUNT);
+
+  spin_lock_bh(&mvmsta->lock);
+  ssn = tid_data->ssn;
+  queue = tid_data->txq_id;
+  tid_data->state = IWL_AGG_ON;
+  mvmsta->agg_tids |= BIT(tid);
+  tid_data->ssn = 0xffff;
+  tid_data->amsdu_in_ampdu_allowed = amsdu;
+  spin_unlock_bh(&mvmsta->lock);
+
+  if (iwl_mvm_has_new_tx_api(mvm)) {
     /*
-     * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
-     * manager, so this function should never be called in this case.
+     * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
+     * would have failed, so if we are here there is no need to
+     * allocate a queue.
+     * However, if aggregation size is different than the default
+     * size, the scheduler should be reconfigured.
+     * We cannot do this with the new TX API, so return unsupported
+     * for now, until it will be offloaded to firmware..
+     * Note that if SCD default value changes - this condition
+     * should be updated as well.
      */
-    if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) { return -EINVAL; }
-
-    BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) != IWL_MAX_TID_COUNT);
-
-    spin_lock_bh(&mvmsta->lock);
-    ssn = tid_data->ssn;
-    queue = tid_data->txq_id;
-    tid_data->state = IWL_AGG_ON;
-    mvmsta->agg_tids |= BIT(tid);
-    tid_data->ssn = 0xffff;
-    tid_data->amsdu_in_ampdu_allowed = amsdu;
-    spin_unlock_bh(&mvmsta->lock);
-
-    if (iwl_mvm_has_new_tx_api(mvm)) {
-        /*
-         * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
-         * would have failed, so if we are here there is no need to
-         * allocate a queue.
-         * However, if aggregation size is different than the default
-         * size, the scheduler should be reconfigured.
-         * We cannot do this with the new TX API, so return unsupported
-         * for now, until it will be offloaded to firmware..
-         * Note that if SCD default value changes - this condition
-         * should be updated as well.
-         */
-        if (buf_size < IWL_FRAME_LIMIT) { return -ENOTSUPP; }
-
-        ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
-        if (ret) { return -EIO; }
-        goto out;
+    if (buf_size < IWL_FRAME_LIMIT) {
+      return -ENOTSUPP;
     }
 
-    cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+    ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+    if (ret) {
+      return -EIO;
+    }
+    goto out;
+  }
 
-    queue_status = mvm->queue_info[queue].status;
+  cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 
-    /* Maybe there is no need to even alloc a queue... */
-    if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) { alloc_queue = false; }
+  queue_status = mvm->queue_info[queue].status;
 
+  /* Maybe there is no need to even alloc a queue... */
+  if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) {
+    alloc_queue = false;
+  }
+
+  /*
+   * Only reconfig the SCD for the queue if the window size has
+   * changed from current (become smaller)
+   */
+  if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
     /*
-     * Only reconfig the SCD for the queue if the window size has
-     * changed from current (become smaller)
+     * If reconfiguring an existing queue, it first must be
+     * drained
      */
-    if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
-        /*
-         * If reconfiguring an existing queue, it first must be
-         * drained
-         */
-        ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
-        if (ret) {
-            IWL_ERR(mvm, "Error draining queue before reconfig\n");
-            return ret;
-        }
-
-        ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, mvmsta->sta_id, tid, buf_size, ssn);
-        if (ret) {
-            IWL_ERR(mvm, "Error reconfiguring TXQ #%d\n", queue);
-            return ret;
-        }
+    ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
+    if (ret) {
+      IWL_ERR(mvm, "Error draining queue before reconfig\n");
+      return ret;
     }
 
-    if (alloc_queue) { iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); }
-
-    /* Send ADD_STA command to enable aggs only if the queue isn't shared */
-    if (queue_status != IWL_MVM_QUEUE_SHARED) {
-        ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
-        if (ret) { return -EIO; }
+    ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, mvmsta->sta_id, tid, buf_size, ssn);
+    if (ret) {
+      IWL_ERR(mvm, "Error reconfiguring TXQ #%d\n", queue);
+      return ret;
     }
+  }
 
-    /* No need to mark as reserved */
-    mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+  if (alloc_queue) {
+    iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
+  }
+
+  /* Send ADD_STA command to enable aggs only if the queue isn't shared */
+  if (queue_status != IWL_MVM_QUEUE_SHARED) {
+    ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+    if (ret) {
+      return -EIO;
+    }
+  }
+
+  /* No need to mark as reserved */
+  mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
 
 out:
-    /*
-     * Even though in theory the peer could have different
-     * aggregation reorder buffer sizes for different sessions,
-     * our ucode doesn't allow for that and has a global limit
-     * for each station. Therefore, use the minimum of all the
-     * aggregation sessions and our default value.
-     */
-    mvmsta->max_agg_bufsize = min(mvmsta->max_agg_bufsize, buf_size);
-    mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+  /*
+   * Even though in theory the peer could have different
+   * aggregation reorder buffer sizes for different sessions,
+   * our ucode doesn't allow for that and has a global limit
+   * for each station. Therefore, use the minimum of all the
+   * aggregation sessions and our default value.
+   */
+  mvmsta->max_agg_bufsize = min(mvmsta->max_agg_bufsize, buf_size);
+  mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
 
-    IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", sta->addr, tid);
+  IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", sta->addr, tid);
 
-    return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
+  return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
 }
 
 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta,
                                         struct iwl_mvm_tid_data* tid_data) {
-    uint16_t txq_id = tid_data->txq_id;
+  uint16_t txq_id = tid_data->txq_id;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (iwl_mvm_has_new_tx_api(mvm)) { return; }
+  if (iwl_mvm_has_new_tx_api(mvm)) {
+    return;
+  }
 
-    /*
-     * The TXQ is marked as reserved only if no traffic came through yet
-     * This means no traffic has been sent on this TID (agg'd or not), so
-     * we no longer have use for the queue. Since it hasn't even been
-     * allocated through iwl_mvm_enable_txq, so we can just mark it back as
-     * free.
-     */
-    if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
-        mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
-        tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
-    }
+  /*
+   * The TXQ is marked as reserved only if no traffic came through yet
+   * This means no traffic has been sent on this TID (agg'd or not), so
+   * we no longer have use for the queue. Since it hasn't even been
+   * allocated through iwl_mvm_enable_txq, so we can just mark it back as
+   * free.
+   */
+  if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
+    mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
+    tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
+  }
 }
 
 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                             struct ieee80211_sta* sta, uint16_t tid) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_tid_data* tid_data = &mvmsta->tid_data[tid];
-    uint16_t txq_id;
-    int err;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_tid_data* tid_data = &mvmsta->tid_data[tid];
+  uint16_t txq_id;
+  int err;
 
-    /*
-     * If mac80211 is cleaning its state, then say that we finished since
-     * our state has been cleared anyway.
-     */
-    if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
-        ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-        return 0;
-    }
+  /*
+   * If mac80211 is cleaning its state, then say that we finished since
+   * our state has been cleared anyway.
+   */
+  if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+    ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+    return 0;
+  }
 
-    spin_lock_bh(&mvmsta->lock);
+  spin_lock_bh(&mvmsta->lock);
 
-    txq_id = tid_data->txq_id;
+  txq_id = tid_data->txq_id;
 
-    IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid, txq_id,
-                        tid_data->state);
+  IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid, txq_id,
+                      tid_data->state);
 
-    mvmsta->agg_tids &= ~BIT(tid);
+  mvmsta->agg_tids &= ~BIT(tid);
 
-    iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
+  iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
 
-    switch (tid_data->state) {
+  switch (tid_data->state) {
     case IWL_AGG_ON:
-        tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+      tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 
-        IWL_DEBUG_TX_QUEUES(mvm, "ssn = %d, next_recl = %d\n", tid_data->ssn,
-                            tid_data->next_reclaimed);
+      IWL_DEBUG_TX_QUEUES(mvm, "ssn = %d, next_recl = %d\n", tid_data->ssn,
+                          tid_data->next_reclaimed);
 
-        tid_data->ssn = 0xffff;
-        tid_data->state = IWL_AGG_OFF;
-        spin_unlock_bh(&mvmsta->lock);
+      tid_data->ssn = 0xffff;
+      tid_data->state = IWL_AGG_OFF;
+      spin_unlock_bh(&mvmsta->lock);
 
-        ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+      ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 
-        iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
-        return 0;
+      iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
+      return 0;
     case IWL_AGG_STARTING:
     case IWL_EMPTYING_HW_QUEUE_ADDBA:
-        /*
-         * The agg session has been stopped before it was set up. This
-         * can happen when the AddBA timer times out for example.
-         */
+      /*
+       * The agg session has been stopped before it was set up. This
+       * can happen when the AddBA timer times out for example.
+       */
 
-        /* No barriers since we are under mutex */
-        lockdep_assert_held(&mvm->mutex);
+      /* No barriers since we are under mutex */
+      lockdep_assert_held(&mvm->mutex);
 
-        ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-        tid_data->state = IWL_AGG_OFF;
-        err = 0;
-        break;
+      ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+      tid_data->state = IWL_AGG_OFF;
+      err = 0;
+      break;
     default:
-        IWL_ERR(mvm, "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
-                mvmsta->sta_id, tid, tid_data->state);
-        IWL_ERR(mvm, "\ttid_data->txq_id = %d\n", tid_data->txq_id);
-        err = -EINVAL;
-    }
+      IWL_ERR(mvm, "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
+              mvmsta->sta_id, tid, tid_data->state);
+      IWL_ERR(mvm, "\ttid_data->txq_id = %d\n", tid_data->txq_id);
+      err = -EINVAL;
+  }
 
-    spin_unlock_bh(&mvmsta->lock);
+  spin_unlock_bh(&mvmsta->lock);
 
-    return err;
+  return err;
 }
 
 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                              struct ieee80211_sta* sta, uint16_t tid) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_tid_data* tid_data = &mvmsta->tid_data[tid];
-    uint16_t txq_id;
-    enum iwl_mvm_agg_state old_state;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_tid_data* tid_data = &mvmsta->tid_data[tid];
+  uint16_t txq_id;
+  enum iwl_mvm_agg_state old_state;
 
-    /*
-     * First set the agg state to OFF to avoid calling
-     * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
-     */
-    spin_lock_bh(&mvmsta->lock);
-    txq_id = tid_data->txq_id;
-    IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid,
-                        txq_id, tid_data->state);
-    old_state = tid_data->state;
-    tid_data->state = IWL_AGG_OFF;
-    mvmsta->agg_tids &= ~BIT(tid);
-    spin_unlock_bh(&mvmsta->lock);
+  /*
+   * First set the agg state to OFF to avoid calling
+   * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
+   */
+  spin_lock_bh(&mvmsta->lock);
+  txq_id = tid_data->txq_id;
+  IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid, txq_id,
+                      tid_data->state);
+  old_state = tid_data->state;
+  tid_data->state = IWL_AGG_OFF;
+  mvmsta->agg_tids &= ~BIT(tid);
+  spin_unlock_bh(&mvmsta->lock);
 
-    iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
+  iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
 
-    if (old_state >= IWL_AGG_ON) {
-        iwl_mvm_drain_sta(mvm, mvmsta, true);
+  if (old_state >= IWL_AGG_ON) {
+    iwl_mvm_drain_sta(mvm, mvmsta, true);
 
-        if (iwl_mvm_has_new_tx_api(mvm)) {
-            if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, BIT(tid), 0)) {
-                IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
-            }
-            iwl_trans_wait_txq_empty(mvm->trans, txq_id);
-        } else {
-            if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) {
-                IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
-            }
-            iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
-        }
-
-        iwl_mvm_drain_sta(mvm, mvmsta, false);
-
-        iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
+    if (iwl_mvm_has_new_tx_api(mvm)) {
+      if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, BIT(tid), 0)) {
+        IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+      }
+      iwl_trans_wait_txq_empty(mvm->trans, txq_id);
+    } else {
+      if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) {
+        IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+      }
+      iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
     }
 
-    return 0;
+    iwl_mvm_drain_sta(mvm, mvmsta, false);
+
+    iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
+  }
+
+  return 0;
 }
 
 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm* mvm) {
-    int i, max = -1, max_offs = -1;
+  int i, max = -1, max_offs = -1;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* Pick the unused key offset with the highest 'deleted'
-     * counter. Every time a key is deleted, all the counters
-     * are incremented and the one that was just deleted is
-     * reset to zero. Thus, the highest counter is the one
-     * that was deleted longest ago. Pick that one.
-     */
-    for (i = 0; i < STA_KEY_MAX_NUM; i++) {
-        if (test_bit(i, mvm->fw_key_table)) { continue; }
-        if (mvm->fw_key_deleted[i] > max) {
-            max = mvm->fw_key_deleted[i];
-            max_offs = i;
-        }
+  /* Pick the unused key offset with the highest 'deleted'
+   * counter. Every time a key is deleted, all the counters
+   * are incremented and the one that was just deleted is
+   * reset to zero. Thus, the highest counter is the one
+   * that was deleted longest ago. Pick that one.
+   */
+  for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+    if (test_bit(i, mvm->fw_key_table)) {
+      continue;
     }
+    if (mvm->fw_key_deleted[i] > max) {
+      max = mvm->fw_key_deleted[i];
+      max_offs = i;
+    }
+  }
 
-    if (max_offs < 0) { return STA_KEY_IDX_INVALID; }
+  if (max_offs < 0) {
+    return STA_KEY_IDX_INVALID;
+  }
 
-    return max_offs;
+  return max_offs;
 }
 
 static struct iwl_mvm_sta* iwl_mvm_get_key_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                                struct ieee80211_sta* sta) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (sta) { return iwl_mvm_sta_from_mac80211(sta); }
+  if (sta) {
+    return iwl_mvm_sta_from_mac80211(sta);
+  }
+
+  /*
+   * The device expects GTKs for station interfaces to be
+   * installed as GTKs for the AP station. If we have no
+   * station ID, then use AP's station ID.
+   */
+  if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
+    uint8_t sta_id = mvmvif->ap_sta_id;
+
+    sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
 
     /*
-     * The device expects GTKs for station interfaces to be
-     * installed as GTKs for the AP station. If we have no
-     * station ID, then use AP's station ID.
+     * It is possible that the 'sta' parameter is NULL,
+     * for example when a GTK is removed - the sta_id will then
+     * be the AP ID, and no station was passed by mac80211.
      */
-    if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
-        uint8_t sta_id = mvmvif->ap_sta_id;
-
-        sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
-
-        /*
-         * It is possible that the 'sta' parameter is NULL,
-         * for example when a GTK is removed - the sta_id will then
-         * be the AP ID, and no station was passed by mac80211.
-         */
-        if (IS_ERR_OR_NULL(sta)) { return NULL; }
-
-        return iwl_mvm_sta_from_mac80211(sta);
+    if (IS_ERR_OR_NULL(sta)) {
+      return NULL;
     }
 
-    return NULL;
+    return iwl_mvm_sta_from_mac80211(sta);
+  }
+
+  return NULL;
 }
 
 static int iwl_mvm_send_sta_key(struct iwl_mvm* mvm, uint32_t sta_id,
                                 struct ieee80211_key_conf* key, bool mcast, uint32_t tkip_iv32,
                                 uint16_t* tkip_p1k, uint32_t cmd_flags, uint8_t key_offset,
                                 bool mfp) {
-    union {
-        struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
-        struct iwl_mvm_add_sta_key_cmd cmd;
-    } u = {};
-    __le16 key_flags;
-    int ret;
-    uint32_t status;
-    uint16_t keyidx;
-    uint64_t pn = 0;
-    int i, size;
-    bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
+  union {
+    struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+    struct iwl_mvm_add_sta_key_cmd cmd;
+  } u = {};
+  __le16 key_flags;
+  int ret;
+  uint32_t status;
+  uint16_t keyidx;
+  uint64_t pn = 0;
+  int i, size;
+  bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
 
-    if (sta_id == IWL_MVM_INVALID_STA) { return -EINVAL; }
+  if (sta_id == IWL_MVM_INVALID_STA) {
+    return -EINVAL;
+  }
 
-    keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK;
-    key_flags = cpu_to_le16(keyidx);
-    key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+  keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK;
+  key_flags = cpu_to_le16(keyidx);
+  key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
 
-    switch (key->cipher) {
+  switch (key->cipher) {
     case WLAN_CIPHER_SUITE_TKIP:
-        key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
-        if (new_api) {
-            memcpy((void*)&u.cmd.tx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
-                   IWL_MIC_KEY_SIZE);
+      key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
+      if (new_api) {
+        memcpy((void*)&u.cmd.tx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+               IWL_MIC_KEY_SIZE);
 
-            memcpy((void*)&u.cmd.rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
-                   IWL_MIC_KEY_SIZE);
-            pn = atomic64_read(&key->tx_pn);
+        memcpy((void*)&u.cmd.rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+               IWL_MIC_KEY_SIZE);
+        pn = atomic64_read(&key->tx_pn);
 
-        } else {
-            u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
-            for (i = 0; i < 5; i++) {
-                u.cmd_v1.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
-            }
+      } else {
+        u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
+        for (i = 0; i < 5; i++) {
+          u.cmd_v1.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
         }
-        memcpy(u.cmd.common.key, key->key, key->keylen);
-        break;
+      }
+      memcpy(u.cmd.common.key, key->key, key->keylen);
+      break;
     case WLAN_CIPHER_SUITE_CCMP:
-        key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
-        memcpy(u.cmd.common.key, key->key, key->keylen);
-        if (new_api) { pn = atomic64_read(&key->tx_pn); }
-        break;
+      key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
+      memcpy(u.cmd.common.key, key->key, key->keylen);
+      if (new_api) {
+        pn = atomic64_read(&key->tx_pn);
+      }
+      break;
     case WLAN_CIPHER_SUITE_WEP104:
-        key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
+      key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
     /* fall through */
     case WLAN_CIPHER_SUITE_WEP40:
-        key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
-        memcpy(u.cmd.common.key + 3, key->key, key->keylen);
-        break;
+      key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
+      memcpy(u.cmd.common.key + 3, key->key, key->keylen);
+      break;
     case WLAN_CIPHER_SUITE_GCMP_256:
-        key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
+      key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
     /* fall through */
     case WLAN_CIPHER_SUITE_GCMP:
-        key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
-        memcpy(u.cmd.common.key, key->key, key->keylen);
-        if (new_api) { pn = atomic64_read(&key->tx_pn); }
-        break;
+      key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
+      memcpy(u.cmd.common.key, key->key, key->keylen);
+      if (new_api) {
+        pn = atomic64_read(&key->tx_pn);
+      }
+      break;
     default:
-        key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
-        memcpy(u.cmd.common.key, key->key, key->keylen);
-    }
+      key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
+      memcpy(u.cmd.common.key, key->key, key->keylen);
+  }
 
-    if (mcast) { key_flags |= cpu_to_le16(STA_KEY_MULTICAST); }
-    if (mfp) { key_flags |= cpu_to_le16(STA_KEY_MFP); }
+  if (mcast) {
+    key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+  }
+  if (mfp) {
+    key_flags |= cpu_to_le16(STA_KEY_MFP);
+  }
 
-    u.cmd.common.key_offset = key_offset;
-    u.cmd.common.key_flags = key_flags;
-    u.cmd.common.sta_id = sta_id;
+  u.cmd.common.key_offset = key_offset;
+  u.cmd.common.key_flags = key_flags;
+  u.cmd.common.sta_id = sta_id;
 
-    if (new_api) {
-        u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
-        size = sizeof(u.cmd);
-    } else {
-        size = sizeof(u.cmd_v1);
-    }
+  if (new_api) {
+    u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
+    size = sizeof(u.cmd);
+  } else {
+    size = sizeof(u.cmd_v1);
+  }
 
-    status = ADD_STA_SUCCESS;
-    if (cmd_flags & CMD_ASYNC) {
-        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, &u.cmd);
-    } else {
-        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status);
-    }
+  status = ADD_STA_SUCCESS;
+  if (cmd_flags & CMD_ASYNC) {
+    ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, &u.cmd);
+  } else {
+    ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status);
+  }
 
-    switch (status) {
+  switch (status) {
     case ADD_STA_SUCCESS:
-        IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
-        break;
+      IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
+      break;
     default:
-        ret = -EIO;
-        IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
-        break;
-    }
+      ret = -EIO;
+      IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
+      break;
+  }
 
-    return ret;
+  return ret;
 }
 
 static int iwl_mvm_send_sta_igtk(struct iwl_mvm* mvm, struct ieee80211_key_conf* keyconf,
                                  uint8_t sta_id, bool remove_key) {
-    struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
+  struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
 
-    /* verify the key details match the required command's expectations */
-    if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
-                (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
-                (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
-                 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
-                 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) {
+  /* verify the key details match the required command's expectations */
+  if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
+              (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
+              (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
+               keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
+               keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) {
+    return -EINVAL;
+  }
+
+  if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) {
+    return -EINVAL;
+  }
+
+  igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
+  igtk_cmd.sta_id = cpu_to_le32(sta_id);
+
+  if (remove_key) {
+    igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
+  } else {
+    struct ieee80211_key_seq seq;
+    const uint8_t* pn;
+
+    switch (keyconf->cipher) {
+      case WLAN_CIPHER_SUITE_AES_CMAC:
+        igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+        break;
+      case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+      case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+        igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
+        break;
+      default:
         return -EINVAL;
     }
 
-    if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) {
-        return -EINVAL;
+    memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
+    if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
+      igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
     }
+    ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+    pn = seq.aes_cmac.pn;
+    igtk_cmd.receive_seq_cnt =
+        cpu_to_le64(((uint64_t)pn[5] << 0) | ((uint64_t)pn[4] << 8) | ((uint64_t)pn[3] << 16) |
+                    ((uint64_t)pn[2] << 24) | ((uint64_t)pn[1] << 32) | ((uint64_t)pn[0] << 40));
+  }
 
-    igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
-    igtk_cmd.sta_id = cpu_to_le32(sta_id);
+  IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", remove_key ? "removing" : "installing",
+                 igtk_cmd.sta_id);
 
-    if (remove_key) {
-        igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
-    } else {
-        struct ieee80211_key_seq seq;
-        const uint8_t* pn;
+  if (!iwl_mvm_has_new_rx_api(mvm)) {
+    struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
+        .ctrl_flags = igtk_cmd.ctrl_flags,
+        .key_id = igtk_cmd.key_id,
+        .sta_id = igtk_cmd.sta_id,
+        .receive_seq_cnt = igtk_cmd.receive_seq_cnt};
 
-        switch (keyconf->cipher) {
-        case WLAN_CIPHER_SUITE_AES_CMAC:
-            igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
-            break;
-        case WLAN_CIPHER_SUITE_BIP_GMAC_128:
-        case WLAN_CIPHER_SUITE_BIP_GMAC_256:
-            igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
-            break;
-        default:
-            return -EINVAL;
-        }
-
-        memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
-        if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
-            igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
-        }
-        ieee80211_get_key_rx_seq(keyconf, 0, &seq);
-        pn = seq.aes_cmac.pn;
-        igtk_cmd.receive_seq_cnt = cpu_to_le64(((uint64_t)pn[5] << 0) | ((uint64_t)pn[4] << 8) |
-                                               ((uint64_t)pn[3] << 16) | ((uint64_t)pn[2] << 24) |
-                                               ((uint64_t)pn[1] << 32) | ((uint64_t)pn[0] << 40));
-    }
-
-    IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", remove_key ? "removing" : "installing",
-                   igtk_cmd.sta_id);
-
-    if (!iwl_mvm_has_new_rx_api(mvm)) {
-        struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
-            .ctrl_flags = igtk_cmd.ctrl_flags,
-            .key_id = igtk_cmd.key_id,
-            .sta_id = igtk_cmd.sta_id,
-            .receive_seq_cnt = igtk_cmd.receive_seq_cnt};
-
-        memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, ARRAY_SIZE(igtk_cmd_v1.igtk));
-        return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd_v1), &igtk_cmd_v1);
-    }
-    return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd), &igtk_cmd);
+    memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, ARRAY_SIZE(igtk_cmd_v1.igtk));
+    return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd_v1), &igtk_cmd_v1);
+  }
+  return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd), &igtk_cmd);
 }
 
 static inline uint8_t* iwl_mvm_get_mac_addr(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                             struct ieee80211_sta* sta) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (sta) { return sta->addr; }
+  if (sta) {
+    return sta->addr;
+  }
 
-    if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
-        uint8_t sta_id = mvmvif->ap_sta_id;
-        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
-        return sta->addr;
-    }
+  if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
+    uint8_t sta_id = mvmvif->ap_sta_id;
+    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
+    return sta->addr;
+  }
 
-    return NULL;
+  return NULL;
 }
 
 static int __iwl_mvm_set_sta_key(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                  struct ieee80211_sta* sta, struct ieee80211_key_conf* keyconf,
                                  uint8_t key_offset, bool mcast) {
-    int ret;
-    const uint8_t* addr;
-    struct ieee80211_key_seq seq;
-    uint16_t p1k[5];
-    uint32_t sta_id;
-    bool mfp = false;
+  int ret;
+  const uint8_t* addr;
+  struct ieee80211_key_seq seq;
+  uint16_t p1k[5];
+  uint32_t sta_id;
+  bool mfp = false;
 
-    if (sta) {
-        struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+  if (sta) {
+    struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 
-        sta_id = mvm_sta->sta_id;
-        mfp = sta->mfp;
-    } else if (vif->type == NL80211_IFTYPE_AP && !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+    sta_id = mvm_sta->sta_id;
+    mfp = sta->mfp;
+  } else if (vif->type == NL80211_IFTYPE_AP && !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-        sta_id = mvmvif->mcast_sta.sta_id;
-    } else {
-        IWL_ERR(mvm, "Failed to find station id\n");
-        return -EINVAL;
-    }
+    sta_id = mvmvif->mcast_sta.sta_id;
+  } else {
+    IWL_ERR(mvm, "Failed to find station id\n");
+    return -EINVAL;
+  }
 
-    switch (keyconf->cipher) {
+  switch (keyconf->cipher) {
     case WLAN_CIPHER_SUITE_TKIP:
-        addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
-        /* get phase 1 key from mac80211 */
-        ieee80211_get_key_rx_seq(keyconf, 0, &seq);
-        ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
-        ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, seq.tkip.iv32, p1k, 0, key_offset,
-                                   mfp);
-        break;
+      addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+      /* get phase 1 key from mac80211 */
+      ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+      ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+      ret =
+          iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, seq.tkip.iv32, p1k, 0, key_offset, mfp);
+      break;
     case WLAN_CIPHER_SUITE_CCMP:
     case WLAN_CIPHER_SUITE_WEP40:
     case WLAN_CIPHER_SUITE_WEP104:
     case WLAN_CIPHER_SUITE_GCMP:
     case WLAN_CIPHER_SUITE_GCMP_256:
-        ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 0, NULL, 0, key_offset, mfp);
-        break;
+      ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 0, NULL, 0, key_offset, mfp);
+      break;
     default:
-        ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 0, NULL, 0, key_offset, mfp);
-    }
+      ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 0, NULL, 0, key_offset, mfp);
+  }
 
-    return ret;
+  return ret;
 }
 
 static int __iwl_mvm_remove_sta_key(struct iwl_mvm* mvm, uint8_t sta_id,
                                     struct ieee80211_key_conf* keyconf, bool mcast) {
-    union {
-        struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
-        struct iwl_mvm_add_sta_key_cmd cmd;
-    } u = {};
-    bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
-    __le16 key_flags;
-    int ret, size;
-    uint32_t status;
+  union {
+    struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+    struct iwl_mvm_add_sta_key_cmd cmd;
+  } u = {};
+  bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
+  __le16 key_flags;
+  int ret, size;
+  uint32_t status;
 
-    /* This is a valid situation for GTK removal */
-    if (sta_id == IWL_MVM_INVALID_STA) { return 0; }
+  /* This is a valid situation for GTK removal */
+  if (sta_id == IWL_MVM_INVALID_STA) {
+    return 0;
+  }
 
-    key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK);
-    key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
-    key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
+  key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK);
+  key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
+  key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
 
-    if (mcast) { key_flags |= cpu_to_le16(STA_KEY_MULTICAST); }
+  if (mcast) {
+    key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+  }
 
-    /*
-     * The fields assigned here are in the same location at the start
-     * of the command, so we can do this union trick.
-     */
-    u.cmd.common.key_flags = key_flags;
-    u.cmd.common.key_offset = keyconf->hw_key_idx;
-    u.cmd.common.sta_id = sta_id;
+  /*
+   * The fields assigned here are in the same location at the start
+   * of the command, so we can do this union trick.
+   */
+  u.cmd.common.key_flags = key_flags;
+  u.cmd.common.key_offset = keyconf->hw_key_idx;
+  u.cmd.common.sta_id = sta_id;
 
-    size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
+  size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
 
-    status = ADD_STA_SUCCESS;
-    ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status);
+  status = ADD_STA_SUCCESS;
+  ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status);
 
-    switch (status) {
+  switch (status) {
     case ADD_STA_SUCCESS:
-        IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
-        break;
+      IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
+      break;
     default:
-        ret = -EIO;
-        IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
-        break;
-    }
+      ret = -EIO;
+      IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
+      break;
+  }
 
-    return ret;
+  return ret;
 }
 
 int iwl_mvm_set_sta_key(struct iwl_mvm* mvm, struct ieee80211_vif* vif, struct ieee80211_sta* sta,
                         struct ieee80211_key_conf* keyconf, uint8_t key_offset) {
-    bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
-    struct iwl_mvm_sta* mvm_sta;
-    uint8_t sta_id = IWL_MVM_INVALID_STA;
-    int ret;
-    static const uint8_t __maybe_unused zero_addr[ETH_ALEN] = {0};
+  bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+  struct iwl_mvm_sta* mvm_sta;
+  uint8_t sta_id = IWL_MVM_INVALID_STA;
+  int ret;
+  static const uint8_t __maybe_unused zero_addr[ETH_ALEN] = {0};
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (vif->type != NL80211_IFTYPE_AP || keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
-        /* Get the station id from the mvm local station table */
-        mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
-        if (!mvm_sta) {
-            IWL_ERR(mvm, "Failed to find station\n");
-            return -EINVAL;
-        }
-        sta_id = mvm_sta->sta_id;
-
-        /*
-         * It is possible that the 'sta' parameter is NULL, and thus
-         * there is a need to retrieve the sta from the local station
-         * table.
-         */
-        if (!sta) {
-            sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
-                                            lockdep_is_held(&mvm->mutex));
-            if (IS_ERR_OR_NULL(sta)) {
-                IWL_ERR(mvm, "Invalid station id\n");
-                return -EINVAL;
-            }
-        }
-
-        if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) { return -EINVAL; }
-    } else {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-        sta_id = mvmvif->mcast_sta.sta_id;
+  if (vif->type != NL80211_IFTYPE_AP || keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+    /* Get the station id from the mvm local station table */
+    mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+    if (!mvm_sta) {
+      IWL_ERR(mvm, "Failed to find station\n");
+      return -EINVAL;
     }
-
-    if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
-        keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
-        keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
-        ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
-        goto end;
-    }
-
-    /* If the key_offset is not pre-assigned, we need to find a
-     * new offset to use.  In normal cases, the offset is not
-     * pre-assigned, but during HW_RESTART we want to reuse the
-     * same indices, so we pass them when this function is called.
-     *
-     * In D3 entry, we need to hardcoded the indices (because the
-     * firmware hardcodes the PTK offset to 0).  In this case, we
-     * need to make sure we don't overwrite the hw_key_idx in the
-     * keyconf structure, because otherwise we cannot configure
-     * the original ones back when resuming.
-     */
-    if (key_offset == STA_KEY_IDX_INVALID) {
-        key_offset = iwl_mvm_set_fw_key_idx(mvm);
-        if (key_offset == STA_KEY_IDX_INVALID) { return -ENOSPC; }
-        keyconf->hw_key_idx = key_offset;
-    }
-
-    ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
-    if (ret) { goto end; }
+    sta_id = mvm_sta->sta_id;
 
     /*
-     * For WEP, the same key is used for multicast and unicast. Upload it
-     * again, using the same key offset, and now pointing the other one
-     * to the same key slot (offset).
-     * If this fails, remove the original as well.
+     * It is possible that the 'sta' parameter is NULL, and thus
+     * there is a need to retrieve the sta from the local station
+     * table.
      */
-    if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-         keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
-        sta) {
-        ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, !mcast);
-        if (ret) {
-            __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
-            goto end;
-        }
+    if (!sta) {
+      sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
+      if (IS_ERR_OR_NULL(sta)) {
+        IWL_ERR(mvm, "Invalid station id\n");
+        return -EINVAL;
+      }
     }
 
-    __set_bit(key_offset, mvm->fw_key_table);
+    if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) {
+      return -EINVAL;
+    }
+  } else {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+    sta_id = mvmvif->mcast_sta.sta_id;
+  }
+
+  if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+      keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+      keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
+    ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
+    goto end;
+  }
+
+  /* If the key_offset is not pre-assigned, we need to find a
+   * new offset to use.  In normal cases, the offset is not
+   * pre-assigned, but during HW_RESTART we want to reuse the
+   * same indices, so we pass them when this function is called.
+   *
+   * In D3 entry, we need to hardcoded the indices (because the
+   * firmware hardcodes the PTK offset to 0).  In this case, we
+   * need to make sure we don't overwrite the hw_key_idx in the
+   * keyconf structure, because otherwise we cannot configure
+   * the original ones back when resuming.
+   */
+  if (key_offset == STA_KEY_IDX_INVALID) {
+    key_offset = iwl_mvm_set_fw_key_idx(mvm);
+    if (key_offset == STA_KEY_IDX_INVALID) {
+      return -ENOSPC;
+    }
+    keyconf->hw_key_idx = key_offset;
+  }
+
+  ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
+  if (ret) {
+    goto end;
+  }
+
+  /*
+   * For WEP, the same key is used for multicast and unicast. Upload it
+   * again, using the same key offset, and now pointing the other one
+   * to the same key slot (offset).
+   * If this fails, remove the original as well.
+   */
+  if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+      sta) {
+    ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, !mcast);
+    if (ret) {
+      __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+      goto end;
+    }
+  }
+
+  __set_bit(key_offset, mvm->fw_key_table);
 
 end:
-    IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher,
-                  keyconf->keylen, keyconf->keyidx, sta ? sta->addr : zero_addr, ret);
-    return ret;
+  IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher,
+                keyconf->keylen, keyconf->keyidx, sta ? sta->addr : zero_addr, ret);
+  return ret;
 }
 
 int iwl_mvm_remove_sta_key(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                            struct ieee80211_sta* sta, struct ieee80211_key_conf* keyconf) {
-    bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
-    struct iwl_mvm_sta* mvm_sta;
-    uint8_t sta_id = IWL_MVM_INVALID_STA;
-    int ret, i;
+  bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+  struct iwl_mvm_sta* mvm_sta;
+  uint8_t sta_id = IWL_MVM_INVALID_STA;
+  int ret, i;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* Get the station from the mvm local station table */
-    mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
-    if (mvm_sta) {
-        sta_id = mvm_sta->sta_id;
-    } else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) {
-        sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
+  /* Get the station from the mvm local station table */
+  mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+  if (mvm_sta) {
+    sta_id = mvm_sta->sta_id;
+  } else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) {
+    sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
+  }
+
+  IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
+
+  if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+                  keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+                  keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) {
+    return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
+  }
+
+  if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
+    IWL_ERR(mvm, "offset %d not used in fw key table.\n", keyconf->hw_key_idx);
+    return -ENOENT;
+  }
+
+  /* track which key was deleted last */
+  for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+    if (mvm->fw_key_deleted[i] < U8_MAX) {
+      mvm->fw_key_deleted[i]++;
     }
+  }
+  mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
 
-    IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
+  if (sta && !mvm_sta) {
+    IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
+    return 0;
+  }
 
-    if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
-                    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
-                    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) {
-        return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
-    }
-
-    if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
-        IWL_ERR(mvm, "offset %d not used in fw key table.\n", keyconf->hw_key_idx);
-        return -ENOENT;
-    }
-
-    /* track which key was deleted last */
-    for (i = 0; i < STA_KEY_MAX_NUM; i++) {
-        if (mvm->fw_key_deleted[i] < U8_MAX) { mvm->fw_key_deleted[i]++; }
-    }
-    mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
-
-    if (sta && !mvm_sta) {
-        IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
-        return 0;
-    }
-
-    ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
-    if (ret) { return ret; }
-
-    /* delete WEP key twice to get rid of (now useless) offset */
-    if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
-        ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
-    }
-
+  ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+  if (ret) {
     return ret;
+  }
+
+  /* delete WEP key twice to get rid of (now useless) offset */
+  if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
+    ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
+  }
+
+  return ret;
 }
 
 void iwl_mvm_update_tkip_key(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                              struct ieee80211_key_conf* keyconf, struct ieee80211_sta* sta,
                              uint32_t iv32, uint16_t* phase1key) {
-    struct iwl_mvm_sta* mvm_sta;
-    bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
-    bool mfp = sta ? sta->mfp : false;
+  struct iwl_mvm_sta* mvm_sta;
+  bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+  bool mfp = sta ? sta->mfp : false;
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
-    if (WARN_ON_ONCE(!mvm_sta)) { goto unlock; }
-    iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, iv32, phase1key, CMD_ASYNC,
-                         keyconf->hw_key_idx, mfp);
+  mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+  if (WARN_ON_ONCE(!mvm_sta)) {
+    goto unlock;
+  }
+  iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, iv32, phase1key, CMD_ASYNC,
+                       keyconf->hw_key_idx, mfp);
 
 unlock:
-    rcu_read_unlock();
+  rcu_read_unlock();
 }
 
 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm* mvm, struct ieee80211_sta* sta) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_add_sta_cmd cmd = {
-        .add_modify = STA_MODE_MODIFY,
-        .sta_id = mvmsta->sta_id,
-        .station_flags_msk = cpu_to_le32(STA_FLG_PS),
-        .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
-    };
-    int ret;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_add_sta_cmd cmd = {
+      .add_modify = STA_MODE_MODIFY,
+      .sta_id = mvmsta->sta_id,
+      .station_flags_msk = cpu_to_le32(STA_FLG_PS),
+      .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+  };
+  int ret;
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd);
-    if (ret) { IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); }
+  ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+  }
 }
 
 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm* mvm, struct ieee80211_sta* sta,
                                        enum ieee80211_frame_release_type reason, uint16_t cnt,
                                        uint16_t tids, bool more_data, bool single_sta_queue) {
-    struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    struct iwl_mvm_add_sta_cmd cmd = {
-        .add_modify = STA_MODE_MODIFY,
-        .sta_id = mvmsta->sta_id,
-        .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
-        .sleep_tx_count = cpu_to_le16(cnt),
-        .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
-    };
-    int tid, ret;
-    unsigned long _tids = tids;
+  struct iwl_mvm_sta* mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_add_sta_cmd cmd = {
+      .add_modify = STA_MODE_MODIFY,
+      .sta_id = mvmsta->sta_id,
+      .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
+      .sleep_tx_count = cpu_to_le16(cnt),
+      .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+  };
+  int tid, ret;
+  unsigned long _tids = tids;
 
-    /* convert TIDs to ACs - we don't support TSPEC so that's OK
-     * Note that this field is reserved and unused by firmware not
-     * supporting GO uAPSD, so it's safe to always do this.
-     */
-    for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
+  /* convert TIDs to ACs - we don't support TSPEC so that's OK
+   * Note that this field is reserved and unused by firmware not
+   * supporting GO uAPSD, so it's safe to always do this.
+   */
+  for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
 
-    /* If we're releasing frames from aggregation or dqa queues then check
-     * if all the queues that we're releasing frames from, combined, have:
-     *  - more frames than the service period, in which case more_data
-     *    needs to be set
-     *  - fewer than 'cnt' frames, in which case we need to adjust the
-     *    firmware command (but do that unconditionally)
-     */
-    if (single_sta_queue) {
-        int remaining = cnt;
-        int sleep_tx_count;
+  /* If we're releasing frames from aggregation or dqa queues then check
+   * if all the queues that we're releasing frames from, combined, have:
+   *  - more frames than the service period, in which case more_data
+   *    needs to be set
+   *  - fewer than 'cnt' frames, in which case we need to adjust the
+   *    firmware command (but do that unconditionally)
+   */
+  if (single_sta_queue) {
+    int remaining = cnt;
+    int sleep_tx_count;
 
-        spin_lock_bh(&mvmsta->lock);
-        for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
-            struct iwl_mvm_tid_data* tid_data;
-            uint16_t n_queued;
+    spin_lock_bh(&mvmsta->lock);
+    for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
+      struct iwl_mvm_tid_data* tid_data;
+      uint16_t n_queued;
 
-            tid_data = &mvmsta->tid_data[tid];
+      tid_data = &mvmsta->tid_data[tid];
 
-            n_queued = iwl_mvm_tid_queued(mvm, tid_data);
-            if (n_queued > remaining) {
-                more_data = true;
-                remaining = 0;
-                break;
-            }
-            remaining -= n_queued;
-        }
-        sleep_tx_count = cnt - remaining;
-        if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { mvmsta->sleep_tx_count = sleep_tx_count; }
-        spin_unlock_bh(&mvmsta->lock);
-
-        cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
-        if (WARN_ON(cnt - remaining == 0)) {
-            ieee80211_sta_eosp(sta);
-            return;
-        }
+      n_queued = iwl_mvm_tid_queued(mvm, tid_data);
+      if (n_queued > remaining) {
+        more_data = true;
+        remaining = 0;
+        break;
+      }
+      remaining -= n_queued;
     }
-
-    /* Note: this is ignored by firmware not supporting GO uAPSD */
-    if (more_data) { cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; }
-
-    if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
-        mvmsta->next_status_eosp = true;
-        cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
-    } else {
-        cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
+    sleep_tx_count = cnt - remaining;
+    if (reason == IEEE80211_FRAME_RELEASE_UAPSD) {
+      mvmsta->sleep_tx_count = sleep_tx_count;
     }
+    spin_unlock_bh(&mvmsta->lock);
 
-    /* block the Tx queues until the FW updated the sleep Tx count */
-    iwl_trans_block_txq_ptrs(mvm->trans, true);
+    cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
+    if (WARN_ON(cnt - remaining == 0)) {
+      ieee80211_sta_eosp(sta);
+      return;
+    }
+  }
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
-                               iwl_mvm_add_sta_cmd_size(mvm), &cmd);
-    if (ret) { IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); }
+  /* Note: this is ignored by firmware not supporting GO uAPSD */
+  if (more_data) {
+    cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
+  }
+
+  if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
+    mvmsta->next_status_eosp = true;
+    cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
+  } else {
+    cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
+  }
+
+  /* block the Tx queues until the FW updated the sleep Tx count */
+  iwl_trans_block_txq_ptrs(mvm->trans, true);
+
+  ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
+                             iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+  }
 }
 
 void iwl_mvm_rx_eosp_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_mvm_eosp_notification* notif = (void*)pkt->data;
-    struct ieee80211_sta* sta;
-    uint32_t sta_id = le32_to_cpu(notif->sta_id);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_mvm_eosp_notification* notif = (void*)pkt->data;
+  struct ieee80211_sta* sta;
+  uint32_t sta_id = le32_to_cpu(notif->sta_id);
 
-    if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) { return; }
+  if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) {
+    return;
+  }
 
-    rcu_read_lock();
-    sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-    if (!IS_ERR_OR_NULL(sta)) { ieee80211_sta_eosp(sta); }
-    rcu_read_unlock();
+  rcu_read_lock();
+  sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+  if (!IS_ERR_OR_NULL(sta)) {
+    ieee80211_sta_eosp(sta);
+  }
+  rcu_read_unlock();
 }
 
 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta, bool disable) {
-    struct iwl_mvm_add_sta_cmd cmd = {
-        .add_modify = STA_MODE_MODIFY,
-        .sta_id = mvmsta->sta_id,
-        .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
-        .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
-        .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
-    };
-    int ret;
+  struct iwl_mvm_add_sta_cmd cmd = {
+      .add_modify = STA_MODE_MODIFY,
+      .sta_id = mvmsta->sta_id,
+      .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+      .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+      .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+  };
+  int ret;
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd);
-    if (ret) { IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); }
+  ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+  }
 }
 
 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm* mvm, struct ieee80211_sta* sta,
                                       bool disable) {
-    struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+  struct iwl_mvm_sta* mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 
-    spin_lock_bh(&mvm_sta->lock);
+  spin_lock_bh(&mvm_sta->lock);
 
-    if (mvm_sta->disable_tx == disable) {
-        spin_unlock_bh(&mvm_sta->lock);
-        return;
-    }
-
-    mvm_sta->disable_tx = disable;
-
-    /* Tell mac80211 to start/stop queuing tx for this station */
-    ieee80211_sta_block_awake(mvm->hw, sta, disable);
-
-    iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
-
+  if (mvm_sta->disable_tx == disable) {
     spin_unlock_bh(&mvm_sta->lock);
+    return;
+  }
+
+  mvm_sta->disable_tx = disable;
+
+  /* Tell mac80211 to start/stop queuing tx for this station */
+  ieee80211_sta_block_awake(mvm->hw, sta, disable);
+
+  iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
+
+  spin_unlock_bh(&mvm_sta->lock);
 }
 
 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm* mvm, struct iwl_mvm_vif* mvmvif,
                                               struct iwl_mvm_int_sta* sta, bool disable) {
-    uint32_t id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
-    struct iwl_mvm_add_sta_cmd cmd = {
-        .add_modify = STA_MODE_MODIFY,
-        .sta_id = sta->sta_id,
-        .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
-        .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
-        .mac_id_n_color = cpu_to_le32(id),
-    };
-    int ret;
+  uint32_t id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
+  struct iwl_mvm_add_sta_cmd cmd = {
+      .add_modify = STA_MODE_MODIFY,
+      .sta_id = sta->sta_id,
+      .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+      .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+      .mac_id_n_color = cpu_to_le32(id),
+  };
+  int ret;
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0, iwl_mvm_add_sta_cmd_size(mvm), &cmd);
-    if (ret) { IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); }
+  ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0, iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+  }
 }
 
 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm* mvm, struct iwl_mvm_vif* mvmvif,
                                        bool disable) {
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvm_sta;
-    int i;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvm_sta;
+  int i;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* Block/unblock all the stations of the given mvmvif */
-    for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
-        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex));
-        if (IS_ERR_OR_NULL(sta)) { continue; }
-
-        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-        if (mvm_sta->mac_id_n_color != FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) { continue; }
-
-        iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
+  /* Block/unblock all the stations of the given mvmvif */
+  for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex));
+    if (IS_ERR_OR_NULL(sta)) {
+      continue;
     }
 
-    if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { return; }
-
-    /* Need to block/unblock also multicast station */
-    if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) {
-        iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->mcast_sta, disable);
+    mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+    if (mvm_sta->mac_id_n_color != FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) {
+      continue;
     }
 
-    /*
-     * Only unblock the broadcast station (FW blocks it for immediate
-     * quiet, not the driver)
-     */
-    if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) {
-        iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->bcast_sta, disable);
-    }
+    iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
+  }
+
+  if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+    return;
+  }
+
+  /* Need to block/unblock also multicast station */
+  if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) {
+    iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->mcast_sta, disable);
+  }
+
+  /*
+   * Only unblock the broadcast station (FW blocks it for immediate
+   * quiet, not the driver)
+   */
+  if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) {
+    iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->bcast_sta, disable);
+  }
 }
 
 void iwl_mvm_csa_client_absent(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_sta* mvmsta;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_sta* mvmsta;
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+  mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
 
-    if (!WARN_ON(!mvmsta)) { iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); }
+  if (!WARN_ON(!mvmsta)) {
+    iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
+  }
 
-    rcu_read_unlock();
+  rcu_read_unlock();
 }
 
 uint16_t iwl_mvm_tid_queued(struct iwl_mvm* mvm, struct iwl_mvm_tid_data* tid_data) {
-    uint16_t sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+  uint16_t sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 
-    /*
-     * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
-     * to align the wrap around of ssn so we compare relevant values.
-     */
-    if (mvm->trans->cfg->gen2) { sn &= 0xff; }
+  /*
+   * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+   * to align the wrap around of ssn so we compare relevant values.
+   */
+  if (mvm->trans->cfg->gen2) {
+    sn &= 0xff;
+  }
 
-    return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
+  return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sta.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sta.h
index 655c68c..2943621 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sta.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sta.h
@@ -411,11 +411,9 @@
   uint8_t tx_ant;
 };
 
-uint16_t iwl_mvm_tid_queued(struct iwl_mvm* mvm,
-                            struct iwl_mvm_tid_data* tid_data);
+uint16_t iwl_mvm_tid_queued(struct iwl_mvm* mvm, struct iwl_mvm_tid_data* tid_data);
 
-static inline struct iwl_mvm_sta* iwl_mvm_sta_from_mac80211(
-    struct ieee80211_sta* sta) {
+static inline struct iwl_mvm_sta* iwl_mvm_sta_from_mac80211(struct ieee80211_sta* sta) {
   return (struct iwl_mvm_sta*)sta->drv_priv;
 }
 
@@ -442,54 +440,43 @@
  * @flags: if update==true, this marks what is being changed via ORs of values
  *  from enum iwl_sta_modify_flag. Otherwise, this is ignored.
  */
-int iwl_mvm_sta_send_to_fw(struct iwl_mvm* mvm, struct ieee80211_sta* sta,
-                           bool update, unsigned int flags);
-int iwl_mvm_add_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                    struct ieee80211_sta* sta);
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm* mvm, struct ieee80211_sta* sta, bool update,
+                           unsigned int flags);
+int iwl_mvm_add_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif, struct ieee80211_sta* sta);
 
-static inline int iwl_mvm_update_sta(struct iwl_mvm* mvm,
-                                     struct ieee80211_vif* vif,
+static inline int iwl_mvm_update_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                      struct ieee80211_sta* sta) {
   return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
 }
 
-int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm* mvm,
-                                  struct iwl_mvm_sta* mvm_sta);
-int iwl_mvm_rm_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                   struct ieee80211_sta* sta);
-int iwl_mvm_rm_sta_id(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                      uint8_t sta_id);
-int iwl_mvm_set_sta_key(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                        struct ieee80211_sta* sta,
+int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvm_sta);
+int iwl_mvm_rm_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif, struct ieee80211_sta* sta);
+int iwl_mvm_rm_sta_id(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint8_t sta_id);
+int iwl_mvm_set_sta_key(struct iwl_mvm* mvm, struct ieee80211_vif* vif, struct ieee80211_sta* sta,
                         struct ieee80211_key_conf* keyconf, uint8_t key_offset);
 int iwl_mvm_remove_sta_key(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                           struct ieee80211_sta* sta,
-                           struct ieee80211_key_conf* keyconf);
+                           struct ieee80211_sta* sta, struct ieee80211_key_conf* keyconf);
 
 void iwl_mvm_update_tkip_key(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                             struct ieee80211_key_conf* keyconf,
-                             struct ieee80211_sta* sta, uint32_t iv32,
-                             uint16_t* phase1key);
+                             struct ieee80211_key_conf* keyconf, struct ieee80211_sta* sta,
+                             uint32_t iv32, uint16_t* phase1key);
 
 void iwl_mvm_rx_eosp_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb);
 
 /* AMPDU */
-int iwl_mvm_sta_rx_agg(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int tid,
-                       uint16_t ssn, bool start, uint16_t buf_size,
-                       uint16_t timeout);
+int iwl_mvm_sta_rx_agg(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int tid, uint16_t ssn,
+                       bool start, uint16_t buf_size, uint16_t timeout);
 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                             struct ieee80211_sta* sta, uint16_t tid,
-                             uint16_t* ssn);
+                             struct ieee80211_sta* sta, uint16_t tid, uint16_t* ssn);
 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
-                            struct ieee80211_sta* sta, uint16_t tid,
-                            uint16_t buf_size, bool amsdu);
+                            struct ieee80211_sta* sta, uint16_t tid, uint16_t buf_size, bool amsdu);
 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                             struct ieee80211_sta* sta, uint16_t tid);
 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                              struct ieee80211_sta* sta, uint16_t tid);
 
-int iwl_mvm_sta_tx_agg(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int tid,
-                       uint8_t queue, bool start);
+int iwl_mvm_sta_tx_agg(struct iwl_mvm* mvm, struct ieee80211_sta* sta, int tid, uint8_t queue,
+                       bool start);
 
 int iwl_mvm_add_aux_sta(struct iwl_mvm* mvm);
 void iwl_mvm_del_aux_sta(struct iwl_mvm* mvm);
@@ -501,9 +488,8 @@
 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 int iwl_mvm_add_mcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 int iwl_mvm_rm_mcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
-int iwl_mvm_allocate_int_sta(struct iwl_mvm* mvm, struct iwl_mvm_int_sta* sta,
-                             uint32_t qmask, enum nl80211_iftype iftype,
-                             enum iwl_sta_type type);
+int iwl_mvm_allocate_int_sta(struct iwl_mvm* mvm, struct iwl_mvm_int_sta* sta, uint32_t qmask,
+                             enum nl80211_iftype iftype, enum iwl_sta_type type);
 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 void iwl_mvm_dealloc_int_sta(struct iwl_mvm* mvm, struct iwl_mvm_int_sta* sta);
 int iwl_mvm_add_snif_sta(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
@@ -511,19 +497,13 @@
 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm* mvm);
 
 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm* mvm, struct ieee80211_sta* sta);
-void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm* mvm,
-                                       struct ieee80211_sta* sta,
-                                       enum ieee80211_frame_release_type reason,
-                                       uint16_t cnt, uint16_t tids,
-                                       bool more_data, bool single_sta_queue);
-int iwl_mvm_drain_sta(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta,
-                      bool drain);
-void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm* mvm,
-                                   struct iwl_mvm_sta* mvmsta, bool disable);
-void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm* mvm,
-                                      struct ieee80211_sta* sta, bool disable);
-void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm* mvm,
-                                       struct iwl_mvm_vif* mvmvif,
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm* mvm, struct ieee80211_sta* sta,
+                                       enum ieee80211_frame_release_type reason, uint16_t cnt,
+                                       uint16_t tids, bool more_data, bool single_sta_queue);
+int iwl_mvm_drain_sta(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta, bool drain);
+void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm* mvm, struct iwl_mvm_sta* mvmsta, bool disable);
+void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm* mvm, struct ieee80211_sta* sta, bool disable);
+void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm* mvm, struct iwl_mvm_vif* mvmvif,
                                        bool disable);
 void iwl_mvm_csa_client_absent(struct iwl_mvm* mvm, struct ieee80211_vif* vif);
 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct* wk);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tdls.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tdls.c
index 822ad7d..33b2868 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tdls.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tdls.c
@@ -34,6 +34,7 @@
  *****************************************************************************/
 
 #include <linux/etherdevice.h>
+
 #include "iwl-io.h"
 #include "iwl-prph.h"
 #include "mvm.h"
@@ -43,283 +44,313 @@
 #define TU_TO_MS(x) (TU_TO_US(x) / 1000)
 
 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm* mvm) {
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvmsta;
-    int i;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvmsta;
+  int i;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
-        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex));
-        if (!sta || IS_ERR(sta) || !sta->tdls) { continue; }
-
-        mvmsta = iwl_mvm_sta_from_mac80211(sta);
-        ieee80211_tdls_oper_request(mvmsta->vif, sta->addr, NL80211_TDLS_TEARDOWN,
-                                    WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, GFP_KERNEL);
+  for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex));
+    if (!sta || IS_ERR(sta) || !sta->tdls) {
+      continue;
     }
+
+    mvmsta = iwl_mvm_sta_from_mac80211(sta);
+    ieee80211_tdls_oper_request(mvmsta->vif, sta->addr, NL80211_TDLS_TEARDOWN,
+                                WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, GFP_KERNEL);
+  }
 }
 
 int iwl_mvm_tdls_sta_count(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvmsta;
-    int count = 0;
-    int i;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvmsta;
+  int count = 0;
+  int i;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
-        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex));
-        if (!sta || IS_ERR(sta) || !sta->tdls) { continue; }
-
-        if (vif) {
-            mvmsta = iwl_mvm_sta_from_mac80211(sta);
-            if (mvmsta->vif != vif) { continue; }
-        }
-
-        count++;
+  for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex));
+    if (!sta || IS_ERR(sta) || !sta->tdls) {
+      continue;
     }
 
-    return count;
+    if (vif) {
+      mvmsta = iwl_mvm_sta_from_mac80211(sta);
+      if (mvmsta->vif != vif) {
+        continue;
+      }
+    }
+
+    count++;
+  }
+
+  return count;
 }
 
 static void iwl_mvm_tdls_config(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_rx_packet* pkt;
-    struct iwl_tdls_config_res* resp;
-    struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
-    struct iwl_host_cmd cmd = {
-        .id = TDLS_CONFIG_CMD,
-        .flags = CMD_WANT_SKB,
-        .data =
-            {
-                &tdls_cfg_cmd,
-            },
-        .len =
-            {
-                sizeof(struct iwl_tdls_config_cmd),
-            },
-    };
-    struct ieee80211_sta* sta;
-    int ret, i, cnt;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_rx_packet* pkt;
+  struct iwl_tdls_config_res* resp;
+  struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
+  struct iwl_host_cmd cmd = {
+      .id = TDLS_CONFIG_CMD,
+      .flags = CMD_WANT_SKB,
+      .data =
+          {
+              &tdls_cfg_cmd,
+          },
+      .len =
+          {
+              sizeof(struct iwl_tdls_config_cmd),
+          },
+  };
+  struct ieee80211_sta* sta;
+  int ret, i, cnt;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    tdls_cfg_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
-    tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
-    tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
+  tdls_cfg_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+  tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
+  tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
 
-    /* for now the Tx cmd is empty and unused */
+  /* for now the Tx cmd is empty and unused */
 
-    /* populate TDLS peer data */
-    cnt = 0;
-    for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
-        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex));
-        if (IS_ERR_OR_NULL(sta) || !sta->tdls) { continue; }
-
-        tdls_cfg_cmd.sta_info[cnt].sta_id = i;
-        tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid = IWL_MVM_TDLS_FW_TID;
-        tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
-        tdls_cfg_cmd.sta_info[cnt].is_initiator = cpu_to_le32(sta->tdls_initiator ? 1 : 0);
-
-        cnt++;
+  /* populate TDLS peer data */
+  cnt = 0;
+  for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex));
+    if (IS_ERR_OR_NULL(sta) || !sta->tdls) {
+      continue;
     }
 
-    tdls_cfg_cmd.tdls_peer_count = cnt;
-    IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
+    tdls_cfg_cmd.sta_info[cnt].sta_id = i;
+    tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid = IWL_MVM_TDLS_FW_TID;
+    tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
+    tdls_cfg_cmd.sta_info[cnt].is_initiator = cpu_to_le32(sta->tdls_initiator ? 1 : 0);
 
-    ret = iwl_mvm_send_cmd(mvm, &cmd);
-    if (WARN_ON_ONCE(ret)) { return; }
+    cnt++;
+  }
 
-    pkt = cmd.resp_pkt;
+  tdls_cfg_cmd.tdls_peer_count = cnt;
+  IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
 
-    WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
+  ret = iwl_mvm_send_cmd(mvm, &cmd);
+  if (WARN_ON_ONCE(ret)) {
+    return;
+  }
 
-    /* we don't really care about the response at this point */
+  pkt = cmd.resp_pkt;
 
-    iwl_free_resp(&cmd);
+  WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
+
+  /* we don't really care about the response at this point */
+
+  iwl_free_resp(&cmd);
 }
 
 void iwl_mvm_recalc_tdls_state(struct iwl_mvm* mvm, struct ieee80211_vif* vif, bool sta_added) {
-    int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
+  int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
 
-    /* when the first peer joins, send a power update first */
-    if (tdls_sta_cnt == 1 && sta_added) { iwl_mvm_power_update_mac(mvm); }
+  /* when the first peer joins, send a power update first */
+  if (tdls_sta_cnt == 1 && sta_added) {
+    iwl_mvm_power_update_mac(mvm);
+  }
 
-    /* Configure the FW with TDLS peer info only if TDLS channel switch
-     * capability is set.
-     * TDLS config data is used currently only in TDLS channel switch code.
-     * Supposed to serve also TDLS buffer station which is not implemneted
-     * yet in FW*/
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
-        iwl_mvm_tdls_config(mvm, vif);
-    }
+  /* Configure the FW with TDLS peer info only if TDLS channel switch
+   * capability is set.
+   * TDLS config data is used currently only in TDLS channel switch code.
+   * Supposed to serve also TDLS buffer station which is not implemneted
+   * yet in FW*/
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
+    iwl_mvm_tdls_config(mvm, vif);
+  }
 
-    /* when the last peer leaves, send a power update last */
-    if (tdls_sta_cnt == 0 && !sta_added) { iwl_mvm_power_update_mac(mvm); }
+  /* when the last peer leaves, send a power update last */
+  if (tdls_sta_cnt == 0 && !sta_added) {
+    iwl_mvm_power_update_mac(mvm);
+  }
 }
 
 void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw* hw, struct ieee80211_vif* vif) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    uint32_t duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  uint32_t duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
 
-    /*
-     * iwl_mvm_protect_session() reads directly from the device
-     * (the system time), so make sure it is available.
-     */
-    if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS)) { return; }
+  /*
+   * iwl_mvm_protect_session() reads directly from the device
+   * (the system time), so make sure it is available.
+   */
+  if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS)) {
+    return;
+  }
 
-    mutex_lock(&mvm->mutex);
-    /* Protect the session to hear the TDLS setup response on the channel */
-    iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  /* Protect the session to hear the TDLS setup response on the channel */
+  iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
+  mutex_unlock(&mvm->mutex);
 
-    iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
+  iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
 }
 
 static const char* iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state) {
-    switch (state) {
+  switch (state) {
     case IWL_MVM_TDLS_SW_IDLE:
-        return "IDLE";
+      return "IDLE";
     case IWL_MVM_TDLS_SW_REQ_SENT:
-        return "REQ SENT";
+      return "REQ SENT";
     case IWL_MVM_TDLS_SW_RESP_RCVD:
-        return "RESP RECEIVED";
+      return "RESP RECEIVED";
     case IWL_MVM_TDLS_SW_REQ_RCVD:
-        return "REQ RECEIVED";
+      return "REQ RECEIVED";
     case IWL_MVM_TDLS_SW_ACTIVE:
-        return "ACTIVE";
-    }
+      return "ACTIVE";
+  }
 
-    return NULL;
+  return NULL;
 }
 
 static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm* mvm, enum iwl_mvm_tdls_cs_state state) {
-    if (mvm->tdls_cs.state == state) { return; }
+  if (mvm->tdls_cs.state == state) {
+    return;
+  }
 
-    IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
-                   iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state), iwl_mvm_tdls_cs_state_str(state));
-    mvm->tdls_cs.state = state;
+  IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
+                 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state), iwl_mvm_tdls_cs_state_str(state));
+  mvm->tdls_cs.state = state;
 
-    /* we only send requests to our switching peer - update sent time */
-    if (state == IWL_MVM_TDLS_SW_REQ_SENT) {
-        mvm->tdls_cs.peer.sent_timestamp = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
-    }
+  /* we only send requests to our switching peer - update sent time */
+  if (state == IWL_MVM_TDLS_SW_REQ_SENT) {
+    mvm->tdls_cs.peer.sent_timestamp = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+  }
 
-    if (state == IWL_MVM_TDLS_SW_IDLE) { mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA; }
+  if (state == IWL_MVM_TDLS_SW_IDLE) {
+    mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
+  }
 }
 
 void iwl_mvm_rx_tdls_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_tdls_channel_switch_notif* notif = (void*)pkt->data;
-    struct ieee80211_sta* sta;
-    unsigned int delay;
-    struct iwl_mvm_sta* mvmsta;
-    struct ieee80211_vif* vif;
-    uint32_t sta_id = le32_to_cpu(notif->sta_id);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_tdls_channel_switch_notif* notif = (void*)pkt->data;
+  struct ieee80211_sta* sta;
+  unsigned int delay;
+  struct iwl_mvm_sta* mvmsta;
+  struct ieee80211_vif* vif;
+  uint32_t sta_id = le32_to_cpu(notif->sta_id);
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    /* can fail sometimes */
-    if (!le32_to_cpu(notif->status)) {
-        iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
-        return;
-    }
+  /* can fail sometimes */
+  if (!le32_to_cpu(notif->status)) {
+    iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+    return;
+  }
 
-    if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT)) { return; }
+  if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT)) {
+    return;
+  }
 
-    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
-    /* the station may not be here, but if it is, it must be a TDLS peer */
-    if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) { return; }
+  sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex));
+  /* the station may not be here, but if it is, it must be a TDLS peer */
+  if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) {
+    return;
+  }
 
-    mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    vif = mvmsta->vif;
+  mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  vif = mvmsta->vif;
 
-    /*
-     * Update state and possibly switch again after this is over (DTIM).
-     * Also convert TU to msec.
-     */
-    delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
-    mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
+  /*
+   * Update state and possibly switch again after this is over (DTIM).
+   * Also convert TU to msec.
+   */
+  delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+  mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
 
-    iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
+  iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
 }
 
 static int iwl_mvm_tdls_check_action(struct iwl_mvm* mvm, enum iwl_tdls_channel_switch_type type,
                                      const uint8_t* peer, bool peer_initiator, uint32_t timestamp) {
-    bool same_peer = false;
-    int ret = 0;
+  bool same_peer = false;
+  int ret = 0;
 
-    /* get the existing peer if it's there */
-    if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
-        mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
-        struct ieee80211_sta* sta = rcu_dereference_protected(
-            mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], lockdep_is_held(&mvm->mutex));
-        if (!IS_ERR_OR_NULL(sta)) { same_peer = ether_addr_equal(peer, sta->addr); }
+  /* get the existing peer if it's there */
+  if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
+      mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
+    struct ieee80211_sta* sta = rcu_dereference_protected(
+        mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], lockdep_is_held(&mvm->mutex));
+    if (!IS_ERR_OR_NULL(sta)) {
+      same_peer = ether_addr_equal(peer, sta->addr);
     }
+  }
 
-    switch (mvm->tdls_cs.state) {
+  switch (mvm->tdls_cs.state) {
     case IWL_MVM_TDLS_SW_IDLE:
-        /*
-         * might be spurious packet from the peer after the switch is
-         * already done
-         */
-        if (type == TDLS_MOVE_CH) { ret = -EINVAL; }
-        break;
+      /*
+       * might be spurious packet from the peer after the switch is
+       * already done
+       */
+      if (type == TDLS_MOVE_CH) {
+        ret = -EINVAL;
+      }
+      break;
     case IWL_MVM_TDLS_SW_REQ_SENT:
-        /* only allow requests from the same peer */
-        if (!same_peer) {
-            ret = -EBUSY;
-        } else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH && !peer_initiator)
-        /*
-         * We received a ch-switch request while an outgoing
-         * one is pending. Allow it if the peer is the link
-         * initiator.
-         */
-        {
-            ret = -EBUSY;
-        } else if (type == TDLS_SEND_CHAN_SW_REQ)
-        /* wait for idle before sending another request */
-        {
-            ret = -EBUSY;
-        } else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
-        /* we got a stale response - ignore it */
-        {
-            ret = -EINVAL;
-        }
-        break;
-    case IWL_MVM_TDLS_SW_RESP_RCVD:
-        /*
-         * we are waiting for the FW to give an "active" notification,
-         * so ignore requests in the meantime
-         */
+      /* only allow requests from the same peer */
+      if (!same_peer) {
         ret = -EBUSY;
-        break;
+      } else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH && !peer_initiator)
+      /*
+       * We received a ch-switch request while an outgoing
+       * one is pending. Allow it if the peer is the link
+       * initiator.
+       */
+      {
+        ret = -EBUSY;
+      } else if (type == TDLS_SEND_CHAN_SW_REQ)
+      /* wait for idle before sending another request */
+      {
+        ret = -EBUSY;
+      } else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
+      /* we got a stale response - ignore it */
+      {
+        ret = -EINVAL;
+      }
+      break;
+    case IWL_MVM_TDLS_SW_RESP_RCVD:
+      /*
+       * we are waiting for the FW to give an "active" notification,
+       * so ignore requests in the meantime
+       */
+      ret = -EBUSY;
+      break;
     case IWL_MVM_TDLS_SW_REQ_RCVD:
-        /* as above, allow the link initiator to proceed */
-        if (type == TDLS_SEND_CHAN_SW_REQ) {
-            if (!same_peer) {
-                ret = -EBUSY;
-            } else if (peer_initiator) { /* they are the initiator */
-                ret = -EBUSY;
-            }
-        } else if (type == TDLS_MOVE_CH) {
-            ret = -EINVAL;
+      /* as above, allow the link initiator to proceed */
+      if (type == TDLS_SEND_CHAN_SW_REQ) {
+        if (!same_peer) {
+          ret = -EBUSY;
+        } else if (peer_initiator) { /* they are the initiator */
+          ret = -EBUSY;
         }
-        break;
+      } else if (type == TDLS_MOVE_CH) {
+        ret = -EINVAL;
+      }
+      break;
     case IWL_MVM_TDLS_SW_ACTIVE:
-        /*
-         * the only valid request when active is a request to return
-         * to the base channel by the current off-channel peer
-         */
-        if (type != TDLS_MOVE_CH || !same_peer) { ret = -EBUSY; }
-        break;
-    }
+      /*
+       * the only valid request when active is a request to return
+       * to the base channel by the current off-channel peer
+       */
+      if (type != TDLS_MOVE_CH || !same_peer) {
+        ret = -EBUSY;
+      }
+      break;
+  }
 
-    if (ret)
-        IWL_DEBUG_TDLS(mvm, "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
-                       type, mvm->tdls_cs.state, peer, same_peer, peer_initiator);
+  if (ret)
+    IWL_DEBUG_TDLS(mvm, "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
+                   type, mvm->tdls_cs.state, peer, same_peer, peer_initiator);
 
-    return ret;
+  return ret;
 }
 
 static int iwl_mvm_tdls_config_channel_switch(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
@@ -329,359 +360,381 @@
                                               uint32_t timestamp, uint16_t switch_time,
                                               uint16_t switch_timeout, struct sk_buff* skb,
                                               uint32_t ch_sw_tm_ie) {
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvmsta;
-    struct ieee80211_tx_info* info;
-    struct ieee80211_hdr* hdr;
-    struct iwl_tdls_channel_switch_cmd cmd = {0};
-    int ret;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvmsta;
+  struct ieee80211_tx_info* info;
+  struct ieee80211_hdr* hdr;
+  struct iwl_tdls_channel_switch_cmd cmd = {0};
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator, timestamp);
-    if (ret) { return ret; }
+  ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator, timestamp);
+  if (ret) {
+    return ret;
+  }
 
-    if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
-        ret = -EINVAL;
-        goto out;
-    }
+  if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
+    ret = -EINVAL;
+    goto out;
+  }
 
-    cmd.switch_type = type;
-    cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
-    cmd.timing.switch_time = cpu_to_le32(switch_time);
-    cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
+  cmd.switch_type = type;
+  cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
+  cmd.timing.switch_time = cpu_to_le32(switch_time);
+  cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
 
-    rcu_read_lock();
-    sta = ieee80211_find_sta(vif, peer);
-    if (!sta) {
-        rcu_read_unlock();
-        ret = -ENOENT;
-        goto out;
-    }
-    mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
-
-    if (!chandef) {
-        if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && mvm->tdls_cs.peer.chandef.chan) {
-            /* actually moving to the channel */
-            chandef = &mvm->tdls_cs.peer.chandef;
-        } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE && type == TDLS_MOVE_CH) {
-            /* we need to return to base channel */
-            struct ieee80211_chanctx_conf* chanctx = rcu_dereference(vif->chanctx_conf);
-
-            if (WARN_ON_ONCE(!chanctx)) {
-                rcu_read_unlock();
-                goto out;
-            }
-
-            chandef = &chanctx->def;
-        }
-    }
-
-    if (chandef) {
-        cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5);
-        cmd.ci.channel = chandef->chan->hw_value;
-        cmd.ci.width = iwl_mvm_get_channel_width(chandef);
-        cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
-    }
-
-    /* keep quota calculation simple for now - 50% of DTIM for TDLS */
-    cmd.timing.max_offchan_duration =
-        cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int) / 2);
-
-    /* Switch time is the first element in the switch-timing IE. */
-    cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
-
-    info = IEEE80211_SKB_CB(skb);
-    hdr = (void*)skb->data;
-    if (info->control.hw_key) {
-        if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
-            rcu_read_unlock();
-            ret = -EINVAL;
-            goto out;
-        }
-        iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
-    }
-
-    iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info, mvmsta->sta_id);
-
-    iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta, hdr->frame_control);
+  rcu_read_lock();
+  sta = ieee80211_find_sta(vif, peer);
+  if (!sta) {
     rcu_read_unlock();
+    ret = -ENOENT;
+    goto out;
+  }
+  mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
 
-    memcpy(cmd.frame.data, skb->data, skb->len);
+  if (!chandef) {
+    if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && mvm->tdls_cs.peer.chandef.chan) {
+      /* actually moving to the channel */
+      chandef = &mvm->tdls_cs.peer.chandef;
+    } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE && type == TDLS_MOVE_CH) {
+      /* we need to return to base channel */
+      struct ieee80211_chanctx_conf* chanctx = rcu_dereference(vif->chanctx_conf);
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, sizeof(cmd), &cmd);
-    if (ret) {
-        IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", ret);
+      if (WARN_ON_ONCE(!chanctx)) {
+        rcu_read_unlock();
         goto out;
-    }
+      }
 
-    /* channel switch has started, update state */
-    if (type != TDLS_MOVE_CH) {
-        mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
-        iwl_mvm_tdls_update_cs_state(mvm, type == TDLS_SEND_CHAN_SW_REQ ? IWL_MVM_TDLS_SW_REQ_SENT
-                                                                        : IWL_MVM_TDLS_SW_REQ_RCVD);
-    } else {
-        iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
+      chandef = &chanctx->def;
     }
+  }
+
+  if (chandef) {
+    cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? PHY_BAND_24 : PHY_BAND_5);
+    cmd.ci.channel = chandef->chan->hw_value;
+    cmd.ci.width = iwl_mvm_get_channel_width(chandef);
+    cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+  }
+
+  /* keep quota calculation simple for now - 50% of DTIM for TDLS */
+  cmd.timing.max_offchan_duration =
+      cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int) / 2);
+
+  /* Switch time is the first element in the switch-timing IE. */
+  cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
+
+  info = IEEE80211_SKB_CB(skb);
+  hdr = (void*)skb->data;
+  if (info->control.hw_key) {
+    if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+      rcu_read_unlock();
+      ret = -EINVAL;
+      goto out;
+    }
+    iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+  }
+
+  iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info, mvmsta->sta_id);
+
+  iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta, hdr->frame_control);
+  rcu_read_unlock();
+
+  memcpy(cmd.frame.data, skb->data, skb->len);
+
+  ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, sizeof(cmd), &cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", ret);
+    goto out;
+  }
+
+  /* channel switch has started, update state */
+  if (type != TDLS_MOVE_CH) {
+    mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
+    iwl_mvm_tdls_update_cs_state(
+        mvm, type == TDLS_SEND_CHAN_SW_REQ ? IWL_MVM_TDLS_SW_REQ_SENT : IWL_MVM_TDLS_SW_REQ_RCVD);
+  } else {
+    iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
+  }
 
 out:
 
-    /* channel switch failed - we are idle */
-    if (ret) { iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); }
+  /* channel switch failed - we are idle */
+  if (ret) {
+    iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+  }
 
-    return ret;
+  return ret;
 }
 
 void iwl_mvm_tdls_ch_switch_work(struct work_struct* work) {
-    struct iwl_mvm* mvm;
-    struct ieee80211_sta* sta;
-    struct iwl_mvm_sta* mvmsta;
-    struct ieee80211_vif* vif;
-    unsigned int delay;
-    int ret;
+  struct iwl_mvm* mvm;
+  struct ieee80211_sta* sta;
+  struct iwl_mvm_sta* mvmsta;
+  struct ieee80211_vif* vif;
+  unsigned int delay;
+  int ret;
 
-    mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
-    mutex_lock(&mvm->mutex);
+  mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
+  mutex_lock(&mvm->mutex);
 
-    /* called after an active channel switch has finished or timed-out */
-    iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+  /* called after an active channel switch has finished or timed-out */
+  iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
 
-    /* station might be gone, in that case do nothing */
-    if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) { goto out; }
+  /* station might be gone, in that case do nothing */
+  if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
+    goto out;
+  }
 
-    sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
-                                    lockdep_is_held(&mvm->mutex));
-    /* the station may not be here, but if it is, it must be a TDLS peer */
-    if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) { goto out; }
+  sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+                                  lockdep_is_held(&mvm->mutex));
+  /* the station may not be here, but if it is, it must be a TDLS peer */
+  if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) {
+    goto out;
+  }
 
-    mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    vif = mvmsta->vif;
-    ret = iwl_mvm_tdls_config_channel_switch(
-        mvm, vif, TDLS_SEND_CHAN_SW_REQ, sta->addr, mvm->tdls_cs.peer.initiator,
-        mvm->tdls_cs.peer.op_class, &mvm->tdls_cs.peer.chandef, 0, 0, 0, mvm->tdls_cs.peer.skb,
-        mvm->tdls_cs.peer.ch_sw_tm_ie);
-    if (ret) { IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret); }
+  mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  vif = mvmsta->vif;
+  ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, TDLS_SEND_CHAN_SW_REQ, sta->addr,
+                                           mvm->tdls_cs.peer.initiator, mvm->tdls_cs.peer.op_class,
+                                           &mvm->tdls_cs.peer.chandef, 0, 0, 0,
+                                           mvm->tdls_cs.peer.skb, mvm->tdls_cs.peer.ch_sw_tm_ie);
+  if (ret) {
+    IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
+  }
 
-    /* retry after a DTIM if we failed sending now */
-    delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
-    schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
+  /* retry after a DTIM if we failed sending now */
+  delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+  schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
 out:
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 }
 
 int iwl_mvm_tdls_channel_switch(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                                 struct ieee80211_sta* sta, uint8_t oper_class,
                                 struct cfg80211_chan_def* chandef, struct sk_buff* tmpl_skb,
                                 uint32_t ch_sw_tm_ie) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct iwl_mvm_sta* mvmsta;
-    unsigned int delay;
-    int ret;
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_mvm_sta* mvmsta;
+  unsigned int delay;
+  int ret;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", sta->addr,
-                   chandef->chan->center_freq, chandef->width);
+  IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", sta->addr,
+                 chandef->chan->center_freq, chandef->width);
 
-    /* we only support a single peer for channel switching */
-    if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
-        IWL_DEBUG_TDLS(mvm, "Existing peer. Can't start switch with %pM\n", sta->addr);
-        ret = -EBUSY;
-        goto out;
-    }
+  /* we only support a single peer for channel switching */
+  if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
+    IWL_DEBUG_TDLS(mvm, "Existing peer. Can't start switch with %pM\n", sta->addr);
+    ret = -EBUSY;
+    goto out;
+  }
 
-    ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, TDLS_SEND_CHAN_SW_REQ, sta->addr,
-                                             sta->tdls_initiator, oper_class, chandef, 0, 0, 0,
-                                             tmpl_skb, ch_sw_tm_ie);
-    if (ret) { goto out; }
+  ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, TDLS_SEND_CHAN_SW_REQ, sta->addr,
+                                           sta->tdls_initiator, oper_class, chandef, 0, 0, 0,
+                                           tmpl_skb, ch_sw_tm_ie);
+  if (ret) {
+    goto out;
+  }
 
-    /*
-     * Mark the peer as "in tdls switch" for this vif. We only allow a
-     * single such peer per vif.
-     */
-    mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
-    if (!mvm->tdls_cs.peer.skb) {
-        ret = -ENOMEM;
-        goto out;
-    }
+  /*
+   * Mark the peer as "in tdls switch" for this vif. We only allow a
+   * single such peer per vif.
+   */
+  mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
+  if (!mvm->tdls_cs.peer.skb) {
+    ret = -ENOMEM;
+    goto out;
+  }
 
-    mvmsta = iwl_mvm_sta_from_mac80211(sta);
-    mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
-    mvm->tdls_cs.peer.chandef = *chandef;
-    mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
-    mvm->tdls_cs.peer.op_class = oper_class;
-    mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
+  mvmsta = iwl_mvm_sta_from_mac80211(sta);
+  mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
+  mvm->tdls_cs.peer.chandef = *chandef;
+  mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
+  mvm->tdls_cs.peer.op_class = oper_class;
+  mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
 
-    /*
-     * Wait for 2 DTIM periods before attempting the next switch. The next
-     * switch will be made sooner if the current one completes before that.
-     */
-    delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
-    mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
+  /*
+   * Wait for 2 DTIM periods before attempting the next switch. The next
+   * switch will be made sooner if the current one completes before that.
+   */
+  delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+  mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
 
 out:
-    mutex_unlock(&mvm->mutex);
-    return ret;
+  mutex_unlock(&mvm->mutex);
+  return ret;
 }
 
 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                                         struct ieee80211_sta* sta) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct ieee80211_sta* cur_sta;
-    bool wait_for_phy = false;
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct ieee80211_sta* cur_sta;
+  bool wait_for_phy = false;
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
+  IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
 
-    /* we only support a single peer for channel switching */
-    if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
-        IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
-        goto out;
-    }
+  /* we only support a single peer for channel switching */
+  if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
+    IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
+    goto out;
+  }
 
-    cur_sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
-                                        lockdep_is_held(&mvm->mutex));
-    /* make sure it's the same peer */
-    if (cur_sta != sta) { goto out; }
+  cur_sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+                                      lockdep_is_held(&mvm->mutex));
+  /* make sure it's the same peer */
+  if (cur_sta != sta) {
+    goto out;
+  }
 
-    /*
-     * If we're currently in a switch because of the now canceled peer,
-     * wait a DTIM here to make sure the phy is back on the base channel.
-     * We can't otherwise force it.
-     */
-    if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
-        mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) {
-        wait_for_phy = true;
-    }
+  /*
+   * If we're currently in a switch because of the now canceled peer,
+   * wait a DTIM here to make sure the phy is back on the base channel.
+   * We can't otherwise force it.
+   */
+  if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
+      mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) {
+    wait_for_phy = true;
+  }
 
-    mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
-    dev_kfree_skb(mvm->tdls_cs.peer.skb);
-    mvm->tdls_cs.peer.skb = NULL;
+  mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
+  dev_kfree_skb(mvm->tdls_cs.peer.skb);
+  mvm->tdls_cs.peer.skb = NULL;
 
 out:
-    mutex_unlock(&mvm->mutex);
+  mutex_unlock(&mvm->mutex);
 
-    /* make sure the phy is on the base channel */
-    if (wait_for_phy) { msleep(TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int)); }
+  /* make sure the phy is on the base channel */
+  if (wait_for_phy) {
+    msleep(TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int));
+  }
 
-    /* flush the channel switch state */
-    flush_delayed_work(&mvm->tdls_cs.dwork);
+  /* flush the channel switch state */
+  flush_delayed_work(&mvm->tdls_cs.dwork);
 
-    IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
+  IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
 }
 
 void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw* hw, struct ieee80211_vif* vif,
                                       struct ieee80211_tdls_ch_sw_params* params) {
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    enum iwl_tdls_channel_switch_type type;
-    unsigned int delay;
-    const char* action_str =
-        params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ? "REQ" : "RESP";
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  enum iwl_tdls_channel_switch_type type;
+  unsigned int delay;
+  const char* action_str = params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ? "REQ" : "RESP";
 
-    mutex_lock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
 
-    IWL_DEBUG_TDLS(mvm, "Received TDLS ch switch action %s from %pM status %d\n", action_str,
-                   params->sta->addr, params->status);
+  IWL_DEBUG_TDLS(mvm, "Received TDLS ch switch action %s from %pM status %d\n", action_str,
+                 params->sta->addr, params->status);
 
-    /*
-     * we got a non-zero status from a peer we were switching to - move to
-     * the idle state and retry again later
-     */
-    if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && params->status != 0 &&
-        mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
-        mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
-        struct ieee80211_sta* cur_sta;
+  /*
+   * we got a non-zero status from a peer we were switching to - move to
+   * the idle state and retry again later
+   */
+  if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && params->status != 0 &&
+      mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+      mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
+    struct ieee80211_sta* cur_sta;
 
-        /* make sure it's the same peer */
-        cur_sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
-                                            lockdep_is_held(&mvm->mutex));
-        if (cur_sta == params->sta) {
-            iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
-            goto retry;
-        }
+    /* make sure it's the same peer */
+    cur_sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+                                        lockdep_is_held(&mvm->mutex));
+    if (cur_sta == params->sta) {
+      iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+      goto retry;
     }
+  }
 
-    type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST)
-               ? TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH
-               : TDLS_MOVE_CH;
+  type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST)
+             ? TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH
+             : TDLS_MOVE_CH;
 
-    iwl_mvm_tdls_config_channel_switch(
-        mvm, vif, type, params->sta->addr, params->sta->tdls_initiator, 0, params->chandef,
-        params->timestamp, params->switch_time, params->switch_timeout, params->tmpl_skb,
-        params->ch_sw_tm_ie);
+  iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr, params->sta->tdls_initiator,
+                                     0, params->chandef, params->timestamp, params->switch_time,
+                                     params->switch_timeout, params->tmpl_skb, params->ch_sw_tm_ie);
 
 retry:
-    /* register a timeout in case we don't succeed in switching */
-    delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int * 1024 / 1000;
-    mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
-    mutex_unlock(&mvm->mutex);
+  /* register a timeout in case we don't succeed in switching */
+  delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int * 1024 / 1000;
+  mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
+  mutex_unlock(&mvm->mutex);
 }
 
 #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE
 void iwl_mvm_tdls_peer_cache_pkt(struct iwl_mvm* mvm, struct ieee80211_hdr* hdr, uint32_t len,
                                  int rxq) {
-    struct iwl_mvm_tdls_peer_counter* cnt;
-    uint8_t* addr;
+  struct iwl_mvm_tdls_peer_counter* cnt;
+  uint8_t* addr;
 
-    /*
-     * To reduce code runtime and complexity, we don't check the packet
-     * arrived on the correct vif, or even if the current vif is a station.
-     * While it is theoretically possible for a TDLS peer to also be
-     * connected to us in the capacity of a AP/GO, this will not happen
-     * in practice.
-     */
+  /*
+   * To reduce code runtime and complexity, we don't check the packet
+   * arrived on the correct vif, or even if the current vif is a station.
+   * While it is theoretically possible for a TDLS peer to also be
+   * connected to us in the capacity of a AP/GO, this will not happen
+   * in practice.
+   */
 
-    if (list_empty(&mvm->tdls_peer_cache_list)) { return; }
+  if (list_empty(&mvm->tdls_peer_cache_list)) {
+    return;
+  }
 
-    if (len < sizeof(*hdr) || !ieee80211_is_data(hdr->frame_control)) { return; }
+  if (len < sizeof(*hdr) || !ieee80211_is_data(hdr->frame_control)) {
+    return;
+  }
 
-    addr = rxq < 0 ? ieee80211_get_DA(hdr) : ieee80211_get_SA(hdr);
+  addr = rxq < 0 ? ieee80211_get_DA(hdr) : ieee80211_get_SA(hdr);
 
-    /* we rely on the Rx and Tx path mutual atomicity for the counters */
-    rcu_read_lock();
-    list_for_each_entry_rcu(cnt, &mvm->tdls_peer_cache_list,
-                            list) if (ether_addr_equal(cnt->mac.addr, addr)) {
-        if (rxq < 0) {
-            cnt->tx_bytes += len;
-        } else {
-            cnt->rx[rxq].bytes += len;
-        }
-
-        break;
+  /* we rely on the Rx and Tx path mutual atomicity for the counters */
+  rcu_read_lock();
+  list_for_each_entry_rcu(cnt, &mvm->tdls_peer_cache_list,
+                          list) if (ether_addr_equal(cnt->mac.addr, addr)) {
+    if (rxq < 0) {
+      cnt->tx_bytes += len;
+    } else {
+      cnt->rx[rxq].bytes += len;
     }
-    rcu_read_unlock();
+
+    break;
+  }
+  rcu_read_unlock();
 }
 
 void iwl_mvm_tdls_peer_cache_clear(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_tdls_peer_counter *cnt, *tmp;
+  struct iwl_mvm_tdls_peer_counter *cnt, *tmp;
 
-    /*
-     * mvm->mutex is held or the HW is already unregistered, barring
-     * vendor commands that can change the list.
-     */
-    list_for_each_entry_safe(cnt, tmp, &mvm->tdls_peer_cache_list, list) {
-        if (vif && cnt->vif != vif) { continue; }
-
-        mvm->tdls_peer_cache_cnt--;
-        list_del_rcu(&cnt->list);
-        kfree_rcu(cnt, rcu_head);
+  /*
+   * mvm->mutex is held or the HW is already unregistered, barring
+   * vendor commands that can change the list.
+   */
+  list_for_each_entry_safe(cnt, tmp, &mvm->tdls_peer_cache_list, list) {
+    if (vif && cnt->vif != vif) {
+      continue;
     }
+
+    mvm->tdls_peer_cache_cnt--;
+    list_del_rcu(&cnt->list);
+    kfree_rcu(cnt, rcu_head);
+  }
 }
 
 /* requires RCU read side lock taken */
 struct iwl_mvm_tdls_peer_counter* iwl_mvm_tdls_peer_cache_find(struct iwl_mvm* mvm,
                                                                const uint8_t* addr) {
-    struct iwl_mvm_tdls_peer_counter* cnt;
+  struct iwl_mvm_tdls_peer_counter* cnt;
 
-    list_for_each_entry_rcu(cnt, &mvm->tdls_peer_cache_list,
-                            list) if (memcmp(addr, cnt->mac.addr, ETH_ALEN) == 0) {
-        break;
-    }
+  list_for_each_entry_rcu(cnt, &mvm->tdls_peer_cache_list,
+                          list) if (memcmp(addr, cnt->mac.addr, ETH_ALEN) == 0) {
+    break;
+  }
 
-    if (&cnt->list == &mvm->tdls_peer_cache_list) { return NULL; }
+  if (&cnt->list == &mvm->tdls_peer_cache_list) {
+    return NULL;
+  }
 
-    return cnt;
+  return cnt;
 }
 #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/time-event.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/time-event.c
index 08fb7119..df07207 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/time-event.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/time-event.c
@@ -34,6 +34,8 @@
  *
  *****************************************************************************/
 
+#include "time-event.h"
+
 #include <linux/jiffies.h>
 #include <net/mac80211.h>
 
@@ -43,7 +45,6 @@
 #include "iwl-prph.h"
 #include "iwl-trans.h"
 #include "mvm.h"
-#include "time-event.h"
 
 /*
  * For the high priority TE use a time event type that has similar priority to
@@ -53,183 +54,196 @@
 #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
 
 void iwl_mvm_te_clear_data(struct iwl_mvm* mvm, struct iwl_mvm_time_event_data* te_data) {
-    lockdep_assert_held(&mvm->time_event_lock);
+  lockdep_assert_held(&mvm->time_event_lock);
 
-    if (!te_data->vif) { return; }
+  if (!te_data->vif) {
+    return;
+  }
 
-    list_del(&te_data->list);
-    te_data->running = false;
-    te_data->uid = 0;
-    te_data->id = TE_MAX;
-    te_data->vif = NULL;
+  list_del(&te_data->list);
+  te_data->running = false;
+  te_data->uid = 0;
+  te_data->id = TE_MAX;
+  te_data->vif = NULL;
 }
 
 void iwl_mvm_roc_done_wk(struct work_struct* wk) {
-    struct iwl_mvm* mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+  struct iwl_mvm* mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+  /*
+   * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
+   * This will cause the TX path to drop offchannel transmissions.
+   * That would also be done by mac80211, but it is racy, in particular
+   * in the case that the time event actually completed in the firmware
+   * (which is handled in iwl_mvm_te_handle_notif).
+   */
+  if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
+    iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+  }
+  if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
+    iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
+  }
+
+  synchronize_net();
+
+  /*
+   * Flush the offchannel queue -- this is called when the time
+   * event finishes or is canceled, so that frames queued for it
+   * won't get stuck on the queue and be transmitted in the next
+   * time event.
+   * We have to send the command asynchronously since this cannot
+   * be under the mutex for locking reasons, but that's not an
+   * issue as it will have to complete before the next command is
+   * executed, and a new time event means a new command.
+   */
+  iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
+
+  /* Do the same for the P2P device queue (STA) */
+  if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+    struct iwl_mvm_vif* mvmvif;
 
     /*
-     * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
-     * This will cause the TX path to drop offchannel transmissions.
-     * That would also be done by mac80211, but it is racy, in particular
-     * in the case that the time event actually completed in the firmware
-     * (which is handled in iwl_mvm_te_handle_notif).
+     * NB: access to this pointer would be racy, but the flush bit
+     * can only be set when we had a P2P-Device VIF, and we have a
+     * flush of this work in iwl_mvm_prepare_mac_removal() so it's
+     * not really racy.
      */
-    if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
-        iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+
+    if (!WARN_ON(!mvm->p2p_device_vif)) {
+      mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
+      iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, CMD_ASYNC);
     }
-    if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
-        iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
-    }
-
-    synchronize_net();
-
-    /*
-     * Flush the offchannel queue -- this is called when the time
-     * event finishes or is canceled, so that frames queued for it
-     * won't get stuck on the queue and be transmitted in the next
-     * time event.
-     * We have to send the command asynchronously since this cannot
-     * be under the mutex for locking reasons, but that's not an
-     * issue as it will have to complete before the next command is
-     * executed, and a new time event means a new command.
-     */
-    iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
-
-    /* Do the same for the P2P device queue (STA) */
-    if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
-        struct iwl_mvm_vif* mvmvif;
-
-        /*
-         * NB: access to this pointer would be racy, but the flush bit
-         * can only be set when we had a P2P-Device VIF, and we have a
-         * flush of this work in iwl_mvm_prepare_mac_removal() so it's
-         * not really racy.
-         */
-
-        if (!WARN_ON(!mvm->p2p_device_vif)) {
-            mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
-            iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, CMD_ASYNC);
-        }
-    }
+  }
 }
 
 static void iwl_mvm_roc_finished(struct iwl_mvm* mvm) {
-    /*
-     * Of course, our status bit is just as racy as mac80211, so in
-     * addition, fire off the work struct which will drop all frames
-     * from the hardware queues that made it through the race. First
-     * it will of course synchronize the TX path to make sure that
-     * any *new* TX will be rejected.
-     */
-    schedule_work(&mvm->roc_done_wk);
+  /*
+   * Of course, our status bit is just as racy as mac80211, so in
+   * addition, fire off the work struct which will drop all frames
+   * from the hardware queues that made it through the race. First
+   * it will of course synchronize the TX path to make sure that
+   * any *new* TX will be rejected.
+   */
+  schedule_work(&mvm->roc_done_wk);
 }
 
 static void iwl_mvm_csa_noa_start(struct iwl_mvm* mvm) {
-    struct ieee80211_vif* csa_vif;
+  struct ieee80211_vif* csa_vif;
 
-    rcu_read_lock();
+  rcu_read_lock();
 
-    csa_vif = rcu_dereference(mvm->csa_vif);
-    if (!csa_vif || !csa_vif->csa_active) { goto out_unlock; }
+  csa_vif = rcu_dereference(mvm->csa_vif);
+  if (!csa_vif || !csa_vif->csa_active) {
+    goto out_unlock;
+  }
 
-    IWL_DEBUG_TE(mvm, "CSA NOA started\n");
+  IWL_DEBUG_TE(mvm, "CSA NOA started\n");
 
-    /*
-     * CSA NoA is started but we still have beacons to
-     * transmit on the current channel.
-     * So we just do nothing here and the switch
-     * will be performed on the last TBTT.
-     */
-    if (!ieee80211_csa_is_complete(csa_vif)) {
-        IWL_WARN(mvm, "CSA NOA started too early\n");
-        goto out_unlock;
-    }
+  /*
+   * CSA NoA is started but we still have beacons to
+   * transmit on the current channel.
+   * So we just do nothing here and the switch
+   * will be performed on the last TBTT.
+   */
+  if (!ieee80211_csa_is_complete(csa_vif)) {
+    IWL_WARN(mvm, "CSA NOA started too early\n");
+    goto out_unlock;
+  }
 
-    ieee80211_csa_finish(csa_vif);
+  ieee80211_csa_finish(csa_vif);
 
-    rcu_read_unlock();
+  rcu_read_unlock();
 
-    RCU_INIT_POINTER(mvm->csa_vif, NULL);
+  RCU_INIT_POINTER(mvm->csa_vif, NULL);
 
-    return;
+  return;
 
 out_unlock:
-    rcu_read_unlock();
+  rcu_read_unlock();
 }
 
 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                         const char* errmsg) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (vif->type != NL80211_IFTYPE_STATION) { return false; }
+  if (vif->type != NL80211_IFTYPE_STATION) {
+    return false;
+  }
 
-    if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && vif->bss_conf.dtim_period) {
-        return false;
-    }
-    if (errmsg) { IWL_ERR(mvm, "%s\n", errmsg); }
+  if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && vif->bss_conf.dtim_period) {
+    return false;
+  }
+  if (errmsg) {
+    IWL_ERR(mvm, "%s\n", errmsg);
+  }
 
-    iwl_mvm_connection_loss(mvm, vif, errmsg);
-    return true;
+  iwl_mvm_connection_loss(mvm, vif, errmsg);
+  return true;
 }
 
 static void iwl_mvm_te_handle_notify_csa(struct iwl_mvm* mvm,
                                          struct iwl_mvm_time_event_data* te_data,
                                          struct iwl_time_event_notif* notif) {
-    struct ieee80211_vif* vif = te_data->vif;
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct ieee80211_vif* vif = te_data->vif;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    if (!notif->status) { IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); }
+  if (!notif->status) {
+    IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
+  }
 
-    switch (te_data->vif->type) {
+  switch (te_data->vif->type) {
     case NL80211_IFTYPE_AP:
-        if (!notif->status) { mvmvif->csa_failed = true; }
-        iwl_mvm_csa_noa_start(mvm);
-        break;
+      if (!notif->status) {
+        mvmvif->csa_failed = true;
+      }
+      iwl_mvm_csa_noa_start(mvm);
+      break;
     case NL80211_IFTYPE_STATION:
-        if (!notif->status) {
-            iwl_mvm_connection_loss(mvm, vif, "CSA TE failed to start");
-            break;
-        }
-        iwl_mvm_csa_client_absent(mvm, te_data->vif);
-        ieee80211_chswitch_done(te_data->vif, true);
+      if (!notif->status) {
+        iwl_mvm_connection_loss(mvm, vif, "CSA TE failed to start");
         break;
+      }
+      iwl_mvm_csa_client_absent(mvm, te_data->vif);
+      ieee80211_chswitch_done(te_data->vif, true);
+      break;
     default:
-        /* should never happen */
-        WARN_ON_ONCE(1);
-        break;
-    }
+      /* should never happen */
+      WARN_ON_ONCE(1);
+      break;
+  }
 
-    /* we don't need it anymore */
-    iwl_mvm_te_clear_data(mvm, te_data);
+  /* we don't need it anymore */
+  iwl_mvm_te_clear_data(mvm, te_data);
 }
 
 static void iwl_mvm_te_check_trigger(struct iwl_mvm* mvm, struct iwl_time_event_notif* notif,
                                      struct iwl_mvm_time_event_data* te_data) {
-    struct iwl_fw_dbg_trigger_tlv* trig;
-    struct iwl_fw_dbg_trigger_time_event* te_trig;
-    int i;
+  struct iwl_fw_dbg_trigger_tlv* trig;
+  struct iwl_fw_dbg_trigger_time_event* te_trig;
+  int i;
 
-    trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(te_data->vif),
-                                 FW_DBG_TRIGGER_TIME_EVENT);
-    if (!trig) { return; }
+  trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(te_data->vif),
+                               FW_DBG_TRIGGER_TIME_EVENT);
+  if (!trig) {
+    return;
+  }
 
-    te_trig = (void*)trig->data;
+  te_trig = (void*)trig->data;
 
-    for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
-        uint32_t trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
-        uint32_t trig_action_bitmap = le32_to_cpu(te_trig->time_events[i].action_bitmap);
-        uint32_t trig_status_bitmap = le32_to_cpu(te_trig->time_events[i].status_bitmap);
+  for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
+    uint32_t trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
+    uint32_t trig_action_bitmap = le32_to_cpu(te_trig->time_events[i].action_bitmap);
+    uint32_t trig_status_bitmap = le32_to_cpu(te_trig->time_events[i].status_bitmap);
 
-        if (trig_te_id != te_data->id || !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
-            !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) {
-            continue;
-        }
-
-        iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Time event %d Action 0x%x received status: %d",
-                                te_data->id, le32_to_cpu(notif->action),
-                                le32_to_cpu(notif->status));
-        break;
+    if (trig_te_id != te_data->id || !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
+        !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) {
+      continue;
     }
+
+    iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Time event %d Action 0x%x received status: %d",
+                            te_data->id, le32_to_cpu(notif->action), le32_to_cpu(notif->status));
+    break;
+  }
 }
 
 /*
@@ -241,75 +255,75 @@
  */
 static void iwl_mvm_te_handle_notif(struct iwl_mvm* mvm, struct iwl_mvm_time_event_data* te_data,
                                     struct iwl_time_event_notif* notif) {
-    lockdep_assert_held(&mvm->time_event_lock);
+  lockdep_assert_held(&mvm->time_event_lock);
 
-    IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
-                 le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action));
+  IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
+               le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action));
 
-    iwl_mvm_te_check_trigger(mvm, notif, te_data);
+  iwl_mvm_te_check_trigger(mvm, notif, te_data);
 
-    /*
-     * The FW sends the start/end time event notifications even for events
-     * that it fails to schedule. This is indicated in the status field of
-     * the notification. This happens in cases that the scheduler cannot
-     * find a schedule that can handle the event (for example requesting a
-     * P2P Device discoveribility, while there are other higher priority
-     * events in the system).
-     */
-    if (!le32_to_cpu(notif->status)) {
-        const char* msg;
+  /*
+   * The FW sends the start/end time event notifications even for events
+   * that it fails to schedule. This is indicated in the status field of
+   * the notification. This happens in cases that the scheduler cannot
+   * find a schedule that can handle the event (for example requesting a
+   * P2P Device discoveribility, while there are other higher priority
+   * events in the system).
+   */
+  if (!le32_to_cpu(notif->status)) {
+    const char* msg;
 
-        if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START)) {
-            msg = "Time Event start notification failure";
-        } else {
-            msg = "Time Event end notification failure";
-        }
-
-        IWL_DEBUG_TE(mvm, "%s\n", msg);
-
-        if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
-            iwl_mvm_te_clear_data(mvm, te_data);
-            return;
-        }
-    }
-
-    if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
-        IWL_DEBUG_TE(mvm, "TE ended - current time %lu, estimated end %lu\n", jiffies,
-                     te_data->end_jiffies);
-
-        switch (te_data->vif->type) {
-        case NL80211_IFTYPE_P2P_DEVICE:
-            ieee80211_remain_on_channel_expired(mvm->hw);
-            set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
-            iwl_mvm_roc_finished(mvm);
-            break;
-        case NL80211_IFTYPE_STATION:
-            /*
-             * By now, we should have finished association
-             * and know the dtim period.
-             */
-            iwl_mvm_te_check_disconnect(mvm, te_data->vif,
-                                        "No beacon heard and the time event is over already...");
-            break;
-        default:
-            break;
-        }
-
-        iwl_mvm_te_clear_data(mvm, te_data);
-    } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
-        te_data->running = true;
-        te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
-
-        if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-            set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
-            iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
-            ieee80211_ready_on_channel(mvm->hw);
-        } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
-            iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
-        }
+    if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START)) {
+      msg = "Time Event start notification failure";
     } else {
-        IWL_WARN(mvm, "Got TE with unknown action\n");
+      msg = "Time Event end notification failure";
     }
+
+    IWL_DEBUG_TE(mvm, "%s\n", msg);
+
+    if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
+      iwl_mvm_te_clear_data(mvm, te_data);
+      return;
+    }
+  }
+
+  if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
+    IWL_DEBUG_TE(mvm, "TE ended - current time %lu, estimated end %lu\n", jiffies,
+                 te_data->end_jiffies);
+
+    switch (te_data->vif->type) {
+      case NL80211_IFTYPE_P2P_DEVICE:
+        ieee80211_remain_on_channel_expired(mvm->hw);
+        set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+        iwl_mvm_roc_finished(mvm);
+        break;
+      case NL80211_IFTYPE_STATION:
+        /*
+         * By now, we should have finished association
+         * and know the dtim period.
+         */
+        iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+                                    "No beacon heard and the time event is over already...");
+        break;
+      default:
+        break;
+    }
+
+    iwl_mvm_te_clear_data(mvm, te_data);
+  } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
+    te_data->running = true;
+    te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
+
+    if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+      set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+      iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
+      ieee80211_ready_on_channel(mvm->hw);
+    } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
+      iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
+    }
+  } else {
+    IWL_WARN(mvm, "Got TE with unknown action\n");
+  }
 }
 
 /*
@@ -317,274 +331,285 @@
  */
 static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm* mvm,
                                            struct iwl_time_event_notif* notif) {
-    struct iwl_mvm_time_event_data *te_data, *tmp;
-    bool aux_roc_te = false;
+  struct iwl_mvm_time_event_data *te_data, *tmp;
+  bool aux_roc_te = false;
 
-    list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
-        if (le32_to_cpu(notif->unique_id) == te_data->uid) {
-            aux_roc_te = true;
-            break;
-        }
+  list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
+    if (le32_to_cpu(notif->unique_id) == te_data->uid) {
+      aux_roc_te = true;
+      break;
     }
-    if (!aux_roc_te) { /* Not a Aux ROC time event */
-        return -EINVAL;
-    }
+  }
+  if (!aux_roc_te) { /* Not a Aux ROC time event */
+    return -EINVAL;
+  }
 
-    iwl_mvm_te_check_trigger(mvm, notif, te_data);
+  iwl_mvm_te_check_trigger(mvm, notif, te_data);
 
-    IWL_DEBUG_TE(mvm, "Aux ROC time event notification  - UID = 0x%x action %d (error = %d)\n",
-                 le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action),
-                 le32_to_cpu(notif->status));
+  IWL_DEBUG_TE(mvm, "Aux ROC time event notification  - UID = 0x%x action %d (error = %d)\n",
+               le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action),
+               le32_to_cpu(notif->status));
 
-    if (!le32_to_cpu(notif->status) || le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
-        /* End TE, notify mac80211 */
-        ieee80211_remain_on_channel_expired(mvm->hw);
-        iwl_mvm_roc_finished(mvm); /* flush aux queue */
-        list_del(&te_data->list);  /* remove from list */
-        te_data->running = false;
-        te_data->vif = NULL;
-        te_data->uid = 0;
-        te_data->id = TE_MAX;
-    } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
-        set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
-        te_data->running = true;
-        iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
-        ieee80211_ready_on_channel(mvm->hw); /* Start TE */
-    } else {
-        IWL_DEBUG_TE(mvm, "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
-                     le32_to_cpu(notif->action));
-        return -EINVAL;
-    }
+  if (!le32_to_cpu(notif->status) || le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
+    /* End TE, notify mac80211 */
+    ieee80211_remain_on_channel_expired(mvm->hw);
+    iwl_mvm_roc_finished(mvm); /* flush aux queue */
+    list_del(&te_data->list);  /* remove from list */
+    te_data->running = false;
+    te_data->vif = NULL;
+    te_data->uid = 0;
+    te_data->id = TE_MAX;
+  } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
+    set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+    te_data->running = true;
+    iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
+    ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+  } else {
+    IWL_DEBUG_TE(mvm, "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
+                 le32_to_cpu(notif->action));
+    return -EINVAL;
+  }
 
-    return 0;
+  return 0;
 }
 
 /*
  * The Rx handler for time event notifications
  */
 void iwl_mvm_rx_time_event_notif(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_time_event_notif* notif = (void*)pkt->data;
-    struct iwl_mvm_time_event_data *te_data, *tmp;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_time_event_notif* notif = (void*)pkt->data;
+  struct iwl_mvm_time_event_data *te_data, *tmp;
 
-    IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
-                 le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action));
+  IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
+               le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action));
 
-    spin_lock_bh(&mvm->time_event_lock);
-    /* This time event is triggered for Aux ROC request */
-    if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif)) { goto unlock; }
+  spin_lock_bh(&mvm->time_event_lock);
+  /* This time event is triggered for Aux ROC request */
+  if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif)) {
+    goto unlock;
+  }
 
-    list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
-        if (le32_to_cpu(notif->unique_id) == te_data->uid) {
-            iwl_mvm_te_handle_notif(mvm, te_data, notif);
-        }
+  list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
+    if (le32_to_cpu(notif->unique_id) == te_data->uid) {
+      iwl_mvm_te_handle_notif(mvm, te_data, notif);
     }
+  }
 unlock:
-    spin_unlock_bh(&mvm->time_event_lock);
+  spin_unlock_bh(&mvm->time_event_lock);
 }
 
 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data* notif_wait, struct iwl_rx_packet* pkt,
                              void* data) {
-    struct iwl_mvm* mvm = container_of(notif_wait, struct iwl_mvm, notif_wait);
-    struct iwl_mvm_time_event_data* te_data = data;
-    struct iwl_time_event_notif* resp;
-    int resp_len = iwl_rx_packet_payload_len(pkt);
+  struct iwl_mvm* mvm = container_of(notif_wait, struct iwl_mvm, notif_wait);
+  struct iwl_mvm_time_event_data* te_data = data;
+  struct iwl_time_event_notif* resp;
+  int resp_len = iwl_rx_packet_payload_len(pkt);
 
-    if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION)) { return true; }
-
-    if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
-        IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
-        return true;
-    }
-
-    resp = (void*)pkt->data;
-
-    /* te_data->uid is already set in the TIME_EVENT_CMD response */
-    if (le32_to_cpu(resp->unique_id) != te_data->uid) { return false; }
-
-    IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n", te_data->uid);
-    if (!resp->status) { IWL_ERR(mvm, "TIME_EVENT_NOTIFICATION received but not executed\n"); }
-
+  if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION)) {
     return true;
+  }
+
+  if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+    IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
+    return true;
+  }
+
+  resp = (void*)pkt->data;
+
+  /* te_data->uid is already set in the TIME_EVENT_CMD response */
+  if (le32_to_cpu(resp->unique_id) != te_data->uid) {
+    return false;
+  }
+
+  IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n", te_data->uid);
+  if (!resp->status) {
+    IWL_ERR(mvm, "TIME_EVENT_NOTIFICATION received but not executed\n");
+  }
+
+  return true;
 }
 
 static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data* notif_wait,
                                         struct iwl_rx_packet* pkt, void* data) {
-    struct iwl_mvm* mvm = container_of(notif_wait, struct iwl_mvm, notif_wait);
-    struct iwl_mvm_time_event_data* te_data = data;
-    struct iwl_time_event_resp* resp;
-    int resp_len = iwl_rx_packet_payload_len(pkt);
+  struct iwl_mvm* mvm = container_of(notif_wait, struct iwl_mvm, notif_wait);
+  struct iwl_mvm_time_event_data* te_data = data;
+  struct iwl_time_event_resp* resp;
+  int resp_len = iwl_rx_packet_payload_len(pkt);
 
-    if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD)) { return true; }
-
-    if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
-        IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
-        return true;
-    }
-
-    resp = (void*)pkt->data;
-
-    /* we should never get a response to another TIME_EVENT_CMD here */
-    if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id)) { return false; }
-
-    te_data->uid = le32_to_cpu(resp->unique_id);
-    IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", te_data->uid);
+  if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD)) {
     return true;
+  }
+
+  if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+    IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
+    return true;
+  }
+
+  resp = (void*)pkt->data;
+
+  /* we should never get a response to another TIME_EVENT_CMD here */
+  if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id)) {
+    return false;
+  }
+
+  te_data->uid = le32_to_cpu(resp->unique_id);
+  IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", te_data->uid);
+  return true;
 }
 
 static int iwl_mvm_time_event_send_add(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                        struct iwl_mvm_time_event_data* te_data,
                                        struct iwl_time_event_cmd* te_cmd) {
-    static const uint16_t time_event_response[] = {TIME_EVENT_CMD};
-    struct iwl_notification_wait wait_time_event;
-    int ret;
+  static const uint16_t time_event_response[] = {TIME_EVENT_CMD};
+  struct iwl_notification_wait wait_time_event;
+  int ret;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n", le32_to_cpu(te_cmd->duration));
+  IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n", le32_to_cpu(te_cmd->duration));
 
-    spin_lock_bh(&mvm->time_event_lock);
-    if (WARN_ON(te_data->id != TE_MAX)) {
-        spin_unlock_bh(&mvm->time_event_lock);
-        return -EIO;
-    }
-    te_data->vif = vif;
-    te_data->duration = le32_to_cpu(te_cmd->duration);
-    te_data->id = le32_to_cpu(te_cmd->id);
-    list_add_tail(&te_data->list, &mvm->time_event_list);
+  spin_lock_bh(&mvm->time_event_lock);
+  if (WARN_ON(te_data->id != TE_MAX)) {
     spin_unlock_bh(&mvm->time_event_lock);
+    return -EIO;
+  }
+  te_data->vif = vif;
+  te_data->duration = le32_to_cpu(te_cmd->duration);
+  te_data->id = le32_to_cpu(te_cmd->id);
+  list_add_tail(&te_data->list, &mvm->time_event_list);
+  spin_unlock_bh(&mvm->time_event_lock);
 
-    /*
-     * Use a notification wait, which really just processes the
-     * command response and doesn't wait for anything, in order
-     * to be able to process the response and get the UID inside
-     * the RX path. Using CMD_WANT_SKB doesn't work because it
-     * stores the buffer and then wakes up this thread, by which
-     * time another notification (that the time event started)
-     * might already be processed unsuccessfully.
-     */
-    iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, time_event_response,
-                               ARRAY_SIZE(time_event_response), iwl_mvm_time_event_response,
-                               te_data);
+  /*
+   * Use a notification wait, which really just processes the
+   * command response and doesn't wait for anything, in order
+   * to be able to process the response and get the UID inside
+   * the RX path. Using CMD_WANT_SKB doesn't work because it
+   * stores the buffer and then wakes up this thread, by which
+   * time another notification (that the time event started)
+   * might already be processed unsuccessfully.
+   */
+  iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, time_event_response,
+                             ARRAY_SIZE(time_event_response), iwl_mvm_time_event_response, te_data);
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, sizeof(*te_cmd), te_cmd);
-    if (ret) {
-        IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
-        iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
-        goto out_clear_te;
-    }
+  ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, sizeof(*te_cmd), te_cmd);
+  if (ret) {
+    IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
+    iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+    goto out_clear_te;
+  }
 
-    /* No need to wait for anything, so just pass 1 (0 isn't valid) */
-    ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
-    /* should never fail */
-    WARN_ON_ONCE(ret);
+  /* No need to wait for anything, so just pass 1 (0 isn't valid) */
+  ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+  /* should never fail */
+  WARN_ON_ONCE(ret);
 
-    if (ret) {
-    out_clear_te:
-        spin_lock_bh(&mvm->time_event_lock);
-        iwl_mvm_te_clear_data(mvm, te_data);
-        spin_unlock_bh(&mvm->time_event_lock);
-    }
-    return ret;
+  if (ret) {
+  out_clear_te:
+    spin_lock_bh(&mvm->time_event_lock);
+    iwl_mvm_te_clear_data(mvm, te_data);
+    spin_unlock_bh(&mvm->time_event_lock);
+  }
+  return ret;
 }
 
 void iwl_mvm_protect_session(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint32_t duration,
                              uint32_t min_duration, uint32_t max_delay, bool wait_for_notif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_time_event_data* te_data = &mvmvif->time_event_data;
-    const uint16_t te_notif_response[] = {TIME_EVENT_NOTIFICATION};
-    struct iwl_notification_wait wait_te_notif;
-    struct iwl_time_event_cmd time_cmd = {};
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_time_event_data* te_data = &mvmvif->time_event_data;
+  const uint16_t te_notif_response[] = {TIME_EVENT_NOTIFICATION};
+  struct iwl_notification_wait wait_te_notif;
+  struct iwl_time_event_cmd time_cmd = {};
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (te_data->running && time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
-        IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
-                     jiffies_to_msecs(te_data->end_jiffies - jiffies));
-        return;
-    }
+  if (te_data->running && time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
+    IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+                 jiffies_to_msecs(te_data->end_jiffies - jiffies));
+    return;
+  }
 
-    if (te_data->running) {
-        IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n", te_data->uid,
-                     jiffies_to_msecs(te_data->end_jiffies - jiffies));
-        /*
-         * we don't have enough time
-         * cancel the current TE and issue a new one
-         * Of course it would be better to remove the old one only
-         * when the new one is added, but we don't care if we are off
-         * channel for a bit. All we need to do, is not to return
-         * before we actually begin to be on the channel.
-         */
-        iwl_mvm_stop_session_protection(mvm, vif);
-    }
-
-    time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
-    time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
-    time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
-
-    time_cmd.apply_time = cpu_to_le32(0);
-
-    time_cmd.max_frags = TE_V2_FRAG_NONE;
-    time_cmd.max_delay = cpu_to_le32(max_delay);
-    /* TODO: why do we need to interval = bi if it is not periodic? */
-    time_cmd.interval = cpu_to_le32(1);
-    time_cmd.duration = cpu_to_le32(duration);
-    time_cmd.repeat = 1;
-    time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END |
-                                  TE_V2_START_IMMEDIATELY);
-
-    if (!wait_for_notif) {
-        iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
-        return;
-    }
-
+  if (te_data->running) {
+    IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n", te_data->uid,
+                 jiffies_to_msecs(te_data->end_jiffies - jiffies));
     /*
-     * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
-     * right after we send the time event
+     * we don't have enough time
+     * cancel the current TE and issue a new one
+     * Of course it would be better to remove the old one only
+     * when the new one is added, but we don't care if we are off
+     * channel for a bit. All we need to do, is not to return
+     * before we actually begin to be on the channel.
      */
-    iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif, te_notif_response,
-                               ARRAY_SIZE(te_notif_response), iwl_mvm_te_notif, te_data);
+    iwl_mvm_stop_session_protection(mvm, vif);
+  }
 
-    /* If TE was sent OK - wait for the notification that started */
-    if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
-        IWL_ERR(mvm, "Failed to add TE to protect session\n");
-        iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
-    } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif, TU_TO_JIFFIES(max_delay))) {
-        IWL_ERR(mvm, "Failed to protect session until TE\n");
-    }
+  time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+  time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+  time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
+
+  time_cmd.apply_time = cpu_to_le32(0);
+
+  time_cmd.max_frags = TE_V2_FRAG_NONE;
+  time_cmd.max_delay = cpu_to_le32(max_delay);
+  /* TODO: why do we need to interval = bi if it is not periodic? */
+  time_cmd.interval = cpu_to_le32(1);
+  time_cmd.duration = cpu_to_le32(duration);
+  time_cmd.repeat = 1;
+  time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END |
+                                TE_V2_START_IMMEDIATELY);
+
+  if (!wait_for_notif) {
+    iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+    return;
+  }
+
+  /*
+   * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
+   * right after we send the time event
+   */
+  iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif, te_notif_response,
+                             ARRAY_SIZE(te_notif_response), iwl_mvm_te_notif, te_data);
+
+  /* If TE was sent OK - wait for the notification that started */
+  if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
+    IWL_ERR(mvm, "Failed to add TE to protect session\n");
+    iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
+  } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif, TU_TO_JIFFIES(max_delay))) {
+    IWL_ERR(mvm, "Failed to protect session until TE\n");
+  }
 }
 
 static bool __iwl_mvm_remove_time_event(struct iwl_mvm* mvm,
                                         struct iwl_mvm_time_event_data* te_data, uint32_t* uid) {
-    uint32_t id;
+  uint32_t id;
 
-    /*
-     * It is possible that by the time we got to this point the time
-     * event was already removed.
-     */
-    spin_lock_bh(&mvm->time_event_lock);
+  /*
+   * It is possible that by the time we got to this point the time
+   * event was already removed.
+   */
+  spin_lock_bh(&mvm->time_event_lock);
 
-    /* Save time event uid before clearing its data */
-    *uid = te_data->uid;
-    id = te_data->id;
+  /* Save time event uid before clearing its data */
+  *uid = te_data->uid;
+  id = te_data->id;
 
-    /*
-     * The clear_data function handles time events that were already removed
-     */
-    iwl_mvm_te_clear_data(mvm, te_data);
-    spin_unlock_bh(&mvm->time_event_lock);
+  /*
+   * The clear_data function handles time events that were already removed
+   */
+  iwl_mvm_te_clear_data(mvm, te_data);
+  spin_unlock_bh(&mvm->time_event_lock);
 
-    /*
-     * It is possible that by the time we try to remove it, the time event
-     * has already ended and removed. In such a case there is no need to
-     * send a removal command.
-     */
-    if (id == TE_MAX) {
-        IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
-        return false;
-    }
+  /*
+   * It is possible that by the time we try to remove it, the time event
+   * has already ended and removed. In such a case there is no need to
+   * send a removal command.
+   */
+  if (id == TE_MAX) {
+    IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
+    return false;
+  }
 
-    return true;
+  return true;
 }
 
 /*
@@ -595,19 +620,23 @@
  */
 static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm* mvm, struct iwl_mvm_vif* mvmvif,
                                       struct iwl_mvm_time_event_data* te_data) {
-    struct iwl_hs20_roc_req aux_cmd = {};
-    uint32_t uid;
-    int ret;
+  struct iwl_hs20_roc_req aux_cmd = {};
+  uint32_t uid;
+  int ret;
 
-    if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) { return; }
+  if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) {
+    return;
+  }
 
-    aux_cmd.event_unique_id = cpu_to_le32(uid);
-    aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
-    aux_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
-    IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", le32_to_cpu(aux_cmd.event_unique_id));
-    ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_cmd), &aux_cmd);
+  aux_cmd.event_unique_id = cpu_to_le32(uid);
+  aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+  aux_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+  IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", le32_to_cpu(aux_cmd.event_unique_id));
+  ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_cmd), &aux_cmd);
 
-    if (WARN_ON(ret)) { return; }
+  if (WARN_ON(ret)) {
+    return;
+  }
 }
 
 /*
@@ -617,182 +646,192 @@
  */
 void iwl_mvm_remove_time_event(struct iwl_mvm* mvm, struct iwl_mvm_vif* mvmvif,
                                struct iwl_mvm_time_event_data* te_data) {
-    struct iwl_time_event_cmd time_cmd = {};
-    uint32_t uid;
-    int ret;
+  struct iwl_time_event_cmd time_cmd = {};
+  uint32_t uid;
+  int ret;
 
-    if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) { return; }
+  if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) {
+    return;
+  }
 
-    /* When we remove a TE, the UID is to be set in the id field */
-    time_cmd.id = cpu_to_le32(uid);
-    time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
-    time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+  /* When we remove a TE, the UID is to be set in the id field */
+  time_cmd.id = cpu_to_le32(uid);
+  time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+  time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 
-    IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
-    ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, sizeof(time_cmd), &time_cmd);
-    if (WARN_ON(ret)) { return; }
+  IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
+  ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, sizeof(time_cmd), &time_cmd);
+  if (WARN_ON(ret)) {
+    return;
+  }
 }
 
 void iwl_mvm_stop_session_protection(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_time_event_data* te_data = &mvmvif->time_event_data;
-    uint32_t id;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_time_event_data* te_data = &mvmvif->time_event_data;
+  uint32_t id;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
+
+  spin_lock_bh(&mvm->time_event_lock);
+  id = te_data->id;
+  spin_unlock_bh(&mvm->time_event_lock);
+
+  if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
+    IWL_DEBUG_TE(mvm, "don't remove TE with id=%u (not session protection)\n", id);
+    return;
+  }
+
+  iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+}
+
+int iwl_mvm_start_p2p_roc(struct iwl_mvm* mvm, struct ieee80211_vif* vif, int duration,
+                          enum ieee80211_roc_type type) {
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_time_event_data* te_data = &mvmvif->time_event_data;
+  struct iwl_time_event_cmd time_cmd = {};
+
+  lockdep_assert_held(&mvm->mutex);
+  if (te_data->running) {
+    IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
+    return -EBUSY;
+  }
+
+  time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+  time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+
+  switch (type) {
+    case IEEE80211_ROC_TYPE_NORMAL:
+      time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
+      break;
+    case IEEE80211_ROC_TYPE_MGMT_TX:
+      time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
+      break;
+    default:
+      WARN_ONCE(1, "Got an invalid ROC type\n");
+      return -EINVAL;
+  }
+
+  time_cmd.apply_time = cpu_to_le32(0);
+  time_cmd.interval = cpu_to_le32(1);
+
+  /*
+   * The P2P Device TEs can have lower priority than other events
+   * that are being scheduled by the driver/fw, and thus it might not be
+   * scheduled. To improve the chances of it being scheduled, allow them
+   * to be fragmented, and in addition allow them to be delayed.
+   */
+  time_cmd.max_frags = min(MSEC_TO_TU(duration) / 50, TE_V2_FRAG_ENDLESS);
+  time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration / 2));
+  time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
+  time_cmd.repeat = 1;
+  time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END |
+                                TE_V2_START_IMMEDIATELY);
+
+  return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+static struct iwl_mvm_time_event_data* iwl_mvm_get_roc_te(struct iwl_mvm* mvm) {
+  struct iwl_mvm_time_event_data* te_data;
+
+  lockdep_assert_held(&mvm->mutex);
+
+  spin_lock_bh(&mvm->time_event_lock);
+
+  /*
+   * Iterate over the list of time events and find the time event that is
+   * associated with a P2P_DEVICE interface.
+   * This assumes that a P2P_DEVICE interface can have only a single time
+   * event at any given time and this time event coresponds to a ROC
+   * request
+   */
+  list_for_each_entry(te_data, &mvm->time_event_list, list) {
+    if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+      goto out;
+    }
+  }
+
+  /* There can only be at most one AUX ROC time event, we just use the
+   * list to simplify/unify code. Remove it if it exists.
+   */
+  te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, struct iwl_mvm_time_event_data, list);
+out:
+  spin_unlock_bh(&mvm->time_event_lock);
+  return te_data;
+}
+
+void iwl_mvm_cleanup_roc_te(struct iwl_mvm* mvm) {
+  struct iwl_mvm_time_event_data* te_data;
+  uint32_t uid;
+
+  te_data = iwl_mvm_get_roc_te(mvm);
+  if (te_data) {
+    __iwl_mvm_remove_time_event(mvm, te_data, &uid);
+  }
+}
+
+void iwl_mvm_stop_roc(struct iwl_mvm* mvm) {
+  struct iwl_mvm_vif* mvmvif;
+  struct iwl_mvm_time_event_data* te_data;
+
+  te_data = iwl_mvm_get_roc_te(mvm);
+  if (!te_data) {
+    IWL_WARN(mvm, "No remain on channel event\n");
+    return;
+  }
+
+  mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+
+  if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+    iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+    set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+  } else {
+    iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
+  }
+
+  iwl_mvm_roc_finished(mvm);
+}
+
+int iwl_mvm_schedule_csa_period(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint32_t duration,
+                                uint32_t apply_time) {
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct iwl_mvm_time_event_data* te_data = &mvmvif->time_event_data;
+  struct iwl_time_event_cmd time_cmd = {};
+
+  lockdep_assert_held(&mvm->mutex);
+
+  if (te_data->running) {
+    uint32_t id;
 
     spin_lock_bh(&mvm->time_event_lock);
     id = te_data->id;
     spin_unlock_bh(&mvm->time_event_lock);
 
-    if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
-        IWL_DEBUG_TE(mvm, "don't remove TE with id=%u (not session protection)\n", id);
-        return;
+    if (id == TE_CHANNEL_SWITCH_PERIOD) {
+      IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
+      return -EBUSY;
     }
 
+    /*
+     * Remove the session protection time event to allow the
+     * channel switch. If we got here, we just heard a beacon so
+     * the session protection is not needed anymore anyway.
+     */
     iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
-}
+  }
 
-int iwl_mvm_start_p2p_roc(struct iwl_mvm* mvm, struct ieee80211_vif* vif, int duration,
-                          enum ieee80211_roc_type type) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_time_event_data* te_data = &mvmvif->time_event_data;
-    struct iwl_time_event_cmd time_cmd = {};
+  time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+  time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+  time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
+  time_cmd.apply_time = cpu_to_le32(apply_time);
+  time_cmd.max_frags = TE_V2_FRAG_NONE;
+  time_cmd.duration = cpu_to_le32(duration);
+  time_cmd.repeat = 1;
+  time_cmd.interval = cpu_to_le32(1);
+  time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_ABSENCE);
+  if (!apply_time) {
+    time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
+  }
 
-    lockdep_assert_held(&mvm->mutex);
-    if (te_data->running) {
-        IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
-        return -EBUSY;
-    }
-
-    time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
-    time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
-
-    switch (type) {
-    case IEEE80211_ROC_TYPE_NORMAL:
-        time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
-        break;
-    case IEEE80211_ROC_TYPE_MGMT_TX:
-        time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
-        break;
-    default:
-        WARN_ONCE(1, "Got an invalid ROC type\n");
-        return -EINVAL;
-    }
-
-    time_cmd.apply_time = cpu_to_le32(0);
-    time_cmd.interval = cpu_to_le32(1);
-
-    /*
-     * The P2P Device TEs can have lower priority than other events
-     * that are being scheduled by the driver/fw, and thus it might not be
-     * scheduled. To improve the chances of it being scheduled, allow them
-     * to be fragmented, and in addition allow them to be delayed.
-     */
-    time_cmd.max_frags = min(MSEC_TO_TU(duration) / 50, TE_V2_FRAG_ENDLESS);
-    time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration / 2));
-    time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
-    time_cmd.repeat = 1;
-    time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END |
-                                  TE_V2_START_IMMEDIATELY);
-
-    return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
-}
-
-static struct iwl_mvm_time_event_data* iwl_mvm_get_roc_te(struct iwl_mvm* mvm) {
-    struct iwl_mvm_time_event_data* te_data;
-
-    lockdep_assert_held(&mvm->mutex);
-
-    spin_lock_bh(&mvm->time_event_lock);
-
-    /*
-     * Iterate over the list of time events and find the time event that is
-     * associated with a P2P_DEVICE interface.
-     * This assumes that a P2P_DEVICE interface can have only a single time
-     * event at any given time and this time event coresponds to a ROC
-     * request
-     */
-    list_for_each_entry(te_data, &mvm->time_event_list, list) {
-        if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { goto out; }
-    }
-
-    /* There can only be at most one AUX ROC time event, we just use the
-     * list to simplify/unify code. Remove it if it exists.
-     */
-    te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, struct iwl_mvm_time_event_data, list);
-out:
-    spin_unlock_bh(&mvm->time_event_lock);
-    return te_data;
-}
-
-void iwl_mvm_cleanup_roc_te(struct iwl_mvm* mvm) {
-    struct iwl_mvm_time_event_data* te_data;
-    uint32_t uid;
-
-    te_data = iwl_mvm_get_roc_te(mvm);
-    if (te_data) { __iwl_mvm_remove_time_event(mvm, te_data, &uid); }
-}
-
-void iwl_mvm_stop_roc(struct iwl_mvm* mvm) {
-    struct iwl_mvm_vif* mvmvif;
-    struct iwl_mvm_time_event_data* te_data;
-
-    te_data = iwl_mvm_get_roc_te(mvm);
-    if (!te_data) {
-        IWL_WARN(mvm, "No remain on channel event\n");
-        return;
-    }
-
-    mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
-
-    if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-        iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
-        set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
-    } else {
-        iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
-    }
-
-    iwl_mvm_roc_finished(mvm);
-}
-
-int iwl_mvm_schedule_csa_period(struct iwl_mvm* mvm, struct ieee80211_vif* vif, uint32_t duration,
-                                uint32_t apply_time) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct iwl_mvm_time_event_data* te_data = &mvmvif->time_event_data;
-    struct iwl_time_event_cmd time_cmd = {};
-
-    lockdep_assert_held(&mvm->mutex);
-
-    if (te_data->running) {
-        uint32_t id;
-
-        spin_lock_bh(&mvm->time_event_lock);
-        id = te_data->id;
-        spin_unlock_bh(&mvm->time_event_lock);
-
-        if (id == TE_CHANNEL_SWITCH_PERIOD) {
-            IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
-            return -EBUSY;
-        }
-
-        /*
-         * Remove the session protection time event to allow the
-         * channel switch. If we got here, we just heard a beacon so
-         * the session protection is not needed anymore anyway.
-         */
-        iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
-    }
-
-    time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
-    time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
-    time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
-    time_cmd.apply_time = cpu_to_le32(apply_time);
-    time_cmd.max_frags = TE_V2_FRAG_NONE;
-    time_cmd.duration = cpu_to_le32(duration);
-    time_cmd.repeat = 1;
-    time_cmd.interval = cpu_to_le32(1);
-    time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_ABSENCE);
-    if (!apply_time) { time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY); }
-
-    return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+  return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/time-event.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/time-event.h
index 5b6dd7e..cdb2703 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/time-event.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/time-event.h
@@ -36,7 +36,6 @@
 #define SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM_TIME_EVENT_H_
 
 #include "fw-api.h"
-
 #include "mvm.h"
 
 /**
@@ -201,9 +200,11 @@
  * This function returns true iff this TE is added to the fw.
  */
 static inline bool iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data* te_data) {
-    if (!te_data) { return false; }
+  if (!te_data) {
+    return false;
+  }
 
-    return !!te_data->uid;
+  return !!te_data->uid;
 }
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM_TIME_EVENT_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tof.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tof.c
index 0642ae6..211f47d 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tof.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tof.c
@@ -31,12 +31,13 @@
  *
  *****************************************************************************/
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tof.h"
+
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mvm.h"
 
 #define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
 
 void iwl_mvm_tof_init(struct iwl_mvm* mvm) {
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
     struct iwl_mvm_tof_data* tof_data = &mvm->tof_data;
 
     if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) { return; }
@@ -65,19 +66,19 @@
 }
 
 void iwl_mvm_tof_clean(struct iwl_mvm* mvm) {
-    struct iwl_mvm_tof_data* tof_data = &mvm->tof_data;
+  struct iwl_mvm_tof_data* tof_data = &mvm->tof_data;
 
-    if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) ||
-        !(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE)) {
-        return;
-    }
+  if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) ||
+      !(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE)) {
+    return;
+  }
 
-    memset(tof_data, 0, sizeof(*tof_data));
-    mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
-    mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
+  memset(tof_data, 0, sizeof(*tof_data));
+  mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+  mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static void iwl_tof_iterator(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
     bool* enabled = _data;
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tof.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tof.h
index 3ebe94c..d39aa97 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tof.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tof.h
@@ -34,19 +34,18 @@
 #define SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_MVM_TOF_H_
 
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h"
-
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tof.h"
 
 struct iwl_mvm_tof_data {
-    struct iwl_tof_config_cmd tof_cfg;
-    struct iwl_tof_range_req_cmd range_req;
-    struct iwl_tof_range_req_ext_cmd range_req_ext;
+  struct iwl_tof_config_cmd tof_cfg;
+  struct iwl_tof_range_req_cmd range_req;
+  struct iwl_tof_range_req_ext_cmd range_req_ext;
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    struct iwl_tof_responder_config_cmd responder_cfg;
+  struct iwl_tof_responder_config_cmd responder_cfg;
 #endif
-    struct iwl_tof_range_rsp_ntfy range_resp;
-    uint8_t last_abort_id;
-    uint16_t active_range_request;
+  struct iwl_tof_range_rsp_ntfy range_resp;
+  uint8_t last_abort_id;
+  uint16_t active_range_request;
 };
 
 void iwl_mvm_tof_init(struct iwl_mvm* mvm);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tt.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tt.c
index 6a7534a..6fa535a 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tt.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tt.c
@@ -34,10 +34,9 @@
  *****************************************************************************/
 
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h"
-
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mvm.h"
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 #define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
 
 void iwl_mvm_enter_ctkill(struct iwl_mvm* mvm) {
@@ -744,11 +743,11 @@
         mvm->cooling_dev.cdev = NULL;
     }
 }
-#endif /* CONFIG_THERMAL */
+#endif  /* CONFIG_THERMAL */
 #endif  // NEEDS_PORTING
 
 void iwl_mvm_thermal_initialize(struct iwl_mvm* mvm, uint32_t min_backoff) {
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
     struct iwl_mvm_tt_mgmt* tt = &mvm->thermal_throttle;
 
     IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
@@ -773,7 +772,9 @@
 }
 
 void iwl_mvm_thermal_exit(struct iwl_mvm* mvm) {
-    if (!(mvm->init_status & IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE)) { return; }
+  if (!(mvm->init_status & IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE)) {
+    return;
+  }
 
 #if 0   // NEEDS_PORTING
     cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
@@ -781,8 +782,8 @@
 #endif  // NEEDS_PORTING
 
 #ifdef CONFIG_THERMAL
-    iwl_mvm_cooling_device_unregister(mvm);
-    iwl_mvm_thermal_zone_unregister(mvm);
+  iwl_mvm_cooling_device_unregister(mvm);
+  iwl_mvm_thermal_zone_unregister(mvm);
 #endif
-    mvm->init_status &= ~IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
+  mvm->init_status &= ~IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tx.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tx.c
index 575d975..eed1c81 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tx.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/tx.c
@@ -39,7 +39,7 @@
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/mvm.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/sta.h"
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static void iwl_mvm_bar_check_trigger(struct iwl_mvm* mvm, const uint8_t* addr, uint16_t tid,
                                       uint16_t ssn) {
     struct iwl_fw_dbg_trigger_tlv* trig;
@@ -1153,12 +1153,12 @@
 
 #ifdef CPTCFG_IWLWIFI_DEBUG
 const char* iwl_mvm_get_tx_fail_reason(uint32_t status) {
-#define TX_STATUS_FAIL(x)    \
-    case TX_STATUS_FAIL_##x: \
-        return #x
-#define TX_STATUS_POSTPONE(x)    \
-    case TX_STATUS_POSTPONE_##x: \
-        return #x
+#define TX_STATUS_FAIL(x)  \
+  case TX_STATUS_FAIL_##x: \
+    return #x
+#define TX_STATUS_POSTPONE(x)  \
+  case TX_STATUS_POSTPONE_##x: \
+    return #x
 
     switch (status & TX_STATUS_MSK) {
     case TX_STATUS_SUCCESS:
@@ -1289,7 +1289,7 @@
 #endif  // NEEDS_PORTING
 
 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm* mvm, struct iwl_rx_packet* pkt) {
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
     struct ieee80211_sta* sta;
     uint16_t sequence = le16_to_cpu(pkt->hdr.sequence);
     int txq_id = SEQ_TO_QUEUE(sequence);
@@ -1413,7 +1413,7 @@
         if (info->flags & IEEE80211_TX_STAT_ACK) {
             iwl_mvm_tdls_peer_cache_pkt(mvm, (void*)skb->data, skb->len, -1);
         }
-#endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
+#endif  /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
 
         ieee80211_tx_status(mvm->hw, skb);
     }
@@ -1516,11 +1516,11 @@
 #endif  // NEEDS_PORTING
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 #ifdef CPTCFG_IWLWIFI_DEBUG
-#define AGG_TX_STATE_(x)   \
-    case AGG_TX_STATE_##x: \
-        return #x
+#define AGG_TX_STATE_(x) \
+  case AGG_TX_STATE_##x: \
+    return #x
 static const char* iwl_get_agg_tx_status(uint16_t status) {
     switch (status & AGG_TX_STATE_STATUS_MSK) {
         AGG_TX_STATE_(TRANSMITTED);
@@ -1557,7 +1557,7 @@
 }
 #else
 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm* mvm, struct iwl_rx_packet* pkt) {}
-#endif /* CPTCFG_IWLWIFI_DEBUG */
+#endif  /* CPTCFG_IWLWIFI_DEBUG */
 #endif  // NEEDS_PORTING
 
 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm* mvm, struct iwl_rx_packet* pkt) {
@@ -1599,17 +1599,17 @@
 }
 
 void iwl_mvm_rx_tx_cmd(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_mvm_tx_resp* tx_resp = (void*)pkt->data;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_mvm_tx_resp* tx_resp = (void*)pkt->data;
 
-    if (tx_resp->frame_count == 1) {
-        iwl_mvm_rx_tx_cmd_single(mvm, pkt);
-    } else {
-        iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
-    }
+  if (tx_resp->frame_count == 1) {
+    iwl_mvm_rx_tx_cmd_single(mvm, pkt);
+  } else {
+    iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
+  }
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static void iwl_mvm_tx_reclaim(struct iwl_mvm* mvm, int sta_id, int tid, int txq, int index,
                                struct ieee80211_tx_info* ba_info, uint32_t rate) {
     struct sk_buff_head reclaimed_skbs;
@@ -1689,7 +1689,7 @@
 
 #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE
         iwl_mvm_tdls_peer_cache_pkt(mvm, hdr, skb->len, -1);
-#endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
+#endif  /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
 
         /* this is the first skb we deliver in this batch */
         /* put the rate scaling data there */
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/utils.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/utils.c
index 772d999..f1ce63b 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/utils.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/utils.c
@@ -34,7 +34,6 @@
  *
  *****************************************************************************/
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h"
-
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/rs.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-debug.h"
@@ -47,56 +46,64 @@
  * CMD_WANT_SKB is set in cmd->flags.
  */
 zx_status_t iwl_mvm_send_cmd(struct iwl_mvm* mvm, struct iwl_host_cmd* cmd) {
-    zx_status_t ret;
+  zx_status_t ret;
 
 #if defined(CPTCFG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
-    if (WARN_ON(mvm->d3_test_active)) { return -EIO; }
+  if (WARN_ON(mvm->d3_test_active)) {
+    return -EIO;
+  }
 #endif
 
-    /*
-     * Synchronous commands from this op-mode must hold
-     * the mutex, this ensures we don't try to send two
-     * (or more) synchronous commands at a time.
-     */
-    if (!(cmd->flags & CMD_ASYNC)) {
-        lockdep_assert_held(&mvm->mutex);
-        if (!(cmd->flags & CMD_SEND_IN_IDLE)) { iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD); }
+  /*
+   * Synchronous commands from this op-mode must hold
+   * the mutex, this ensures we don't try to send two
+   * (or more) synchronous commands at a time.
+   */
+  if (!(cmd->flags & CMD_ASYNC)) {
+    lockdep_assert_held(&mvm->mutex);
+    if (!(cmd->flags & CMD_SEND_IN_IDLE)) {
+      iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
     }
+  }
 
-    ret = iwl_trans_send_cmd(mvm->trans, cmd);
+  ret = iwl_trans_send_cmd(mvm->trans, cmd);
 
-    if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE))) {
-        iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
-    }
+  if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE))) {
+    iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
+  }
 
-    /*
-     * If the caller wants the SKB, then don't hide any problems, the
-     * caller might access the response buffer which will be NULL if
-     * the command failed.
-     */
-    if (cmd->flags & CMD_WANT_SKB) { return ret; }
-
-    /* Silently ignore failures if ZX_ERR_BAD_STATE is asserted */
-    if (!ret || ret == ZX_ERR_BAD_STATE) { return ZX_OK; }
+  /*
+   * If the caller wants the SKB, then don't hide any problems, the
+   * caller might access the response buffer which will be NULL if
+   * the command failed.
+   */
+  if (cmd->flags & CMD_WANT_SKB) {
     return ret;
+  }
+
+  /* Silently ignore failures if ZX_ERR_BAD_STATE is asserted */
+  if (!ret || ret == ZX_ERR_BAD_STATE) {
+    return ZX_OK;
+  }
+  return ret;
 }
 
 int iwl_mvm_send_cmd_pdu(struct iwl_mvm* mvm, uint32_t id, uint32_t flags, uint16_t len,
                          const void* data) {
-    struct iwl_host_cmd cmd = {
-        .id = id,
-        .len =
-            {
-                len,
-            },
-        .data =
-            {
-                data,
-            },
-        .flags = flags,
-    };
+  struct iwl_host_cmd cmd = {
+      .id = id,
+      .len =
+          {
+              len,
+          },
+      .data =
+          {
+              data,
+          },
+      .flags = flags,
+  };
 
-    return iwl_mvm_send_cmd(mvm, &cmd);
+  return iwl_mvm_send_cmd(mvm, &cmd);
 }
 
 /*
@@ -104,52 +111,54 @@
  */
 zx_status_t iwl_mvm_send_cmd_status(struct iwl_mvm* mvm, struct iwl_host_cmd* cmd,
                                     uint32_t* status) {
-    struct iwl_rx_packet* pkt;
-    struct iwl_cmd_response* resp;
-    int ret, resp_len;
+  struct iwl_rx_packet* pkt;
+  struct iwl_cmd_response* resp;
+  int ret, resp_len;
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
 #if defined(CPTCFG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
-    if (WARN_ON(mvm->d3_test_active)) { return -EIO; }
+  if (WARN_ON(mvm->d3_test_active)) {
+    return -EIO;
+  }
 #endif
 
+  /*
+   * Only synchronous commands can wait for status, we use WANT_SKB so the caller can't.
+   */
+  if (cmd->flags & (CMD_ASYNC | CMD_WANT_SKB)) {
+    IWL_WARN(mvm, "cmd flags 0x%x\n", cmd->flags);
+    return ZX_ERR_INVALID_ARGS;
+  }
+
+  cmd->flags |= CMD_WANT_SKB;
+
+  ret = iwl_trans_send_cmd(mvm->trans, cmd);
+  if (ret == ZX_ERR_BAD_STATE) {
     /*
-     * Only synchronous commands can wait for status, we use WANT_SKB so the caller can't.
+     * The command failed because of ZX_ERR_BAD_STATE(RFKILL), don't update
+     * the status, leave it as success and return 0.
      */
-    if (cmd->flags & (CMD_ASYNC | CMD_WANT_SKB)) {
-        IWL_WARN(mvm, "cmd flags 0x%x\n", cmd->flags);
-        return ZX_ERR_INVALID_ARGS;
-    }
-
-    cmd->flags |= CMD_WANT_SKB;
-
-    ret = iwl_trans_send_cmd(mvm->trans, cmd);
-    if (ret == ZX_ERR_BAD_STATE) {
-        /*
-         * The command failed because of ZX_ERR_BAD_STATE(RFKILL), don't update
-         * the status, leave it as success and return 0.
-         */
-        return ZX_OK;
-    } else if (ret) {
-        return ret;
-    }
-
-    pkt = cmd->resp_pkt;
-
-    resp_len = iwl_rx_packet_payload_len(pkt);
-    if (resp_len != sizeof(*resp)) {
-        IWL_WARN(mvm, "Rx packet payload length is not expected. expected: %lu, actual: %d\n",
-                      sizeof(*resp), resp_len);
-        ret = ZX_ERR_IO;
-        goto out_free_resp;
-    }
-
-    resp = (void*)pkt->data;
-    *status = le32_to_cpu(resp->status);
-out_free_resp:
-    iwl_free_resp(cmd);
+    return ZX_OK;
+  } else if (ret) {
     return ret;
+  }
+
+  pkt = cmd->resp_pkt;
+
+  resp_len = iwl_rx_packet_payload_len(pkt);
+  if (resp_len != sizeof(*resp)) {
+    IWL_WARN(mvm, "Rx packet payload length is not expected. expected: %lu, actual: %d\n",
+             sizeof(*resp), resp_len);
+    ret = ZX_ERR_IO;
+    goto out_free_resp;
+  }
+
+  resp = (void*)pkt->data;
+  *status = le32_to_cpu(resp->status);
+out_free_resp:
+  iwl_free_resp(cmd);
+  return ret;
 }
 
 /*
@@ -157,19 +166,19 @@
  */
 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm* mvm, uint32_t id, uint16_t len, const void* data,
                                 uint32_t* status) {
-    struct iwl_host_cmd cmd = {
-        .id = id,
-        .len =
-            {
-                len,
-            },
-        .data =
-            {
-                data,
-            },
-    };
+  struct iwl_host_cmd cmd = {
+      .id = id,
+      .len =
+          {
+              len,
+          },
+      .data =
+          {
+              data,
+          },
+  };
 
-    return iwl_mvm_send_cmd_status(mvm, &cmd, status);
+  return iwl_mvm_send_cmd_status(mvm, &cmd, status);
 }
 
 #define IWL_DECLARE_RATE_INFO(r) [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
@@ -178,16 +187,13 @@
  * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
  */
 static const uint8_t fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
-    IWL_DECLARE_RATE_INFO(1),  IWL_DECLARE_RATE_INFO(2),
-    IWL_DECLARE_RATE_INFO(5),  IWL_DECLARE_RATE_INFO(11),
-    IWL_DECLARE_RATE_INFO(6),  IWL_DECLARE_RATE_INFO(9),
-    IWL_DECLARE_RATE_INFO(12), IWL_DECLARE_RATE_INFO(18),
-    IWL_DECLARE_RATE_INFO(24), IWL_DECLARE_RATE_INFO(36),
-    IWL_DECLARE_RATE_INFO(48), IWL_DECLARE_RATE_INFO(54),
+    IWL_DECLARE_RATE_INFO(1),  IWL_DECLARE_RATE_INFO(2),  IWL_DECLARE_RATE_INFO(5),
+    IWL_DECLARE_RATE_INFO(11), IWL_DECLARE_RATE_INFO(6),  IWL_DECLARE_RATE_INFO(9),
+    IWL_DECLARE_RATE_INFO(12), IWL_DECLARE_RATE_INFO(18), IWL_DECLARE_RATE_INFO(24),
+    IWL_DECLARE_RATE_INFO(36), IWL_DECLARE_RATE_INFO(48), IWL_DECLARE_RATE_INFO(54),
 };
 
-zx_status_t iwl_mvm_legacy_rate_to_mac80211_idx(uint32_t rate_n_flags,
-                                                enum nl80211_band band,
+zx_status_t iwl_mvm_legacy_rate_to_mac80211_idx(uint32_t rate_n_flags, enum nl80211_band band,
                                                 int* ptr_idx) {
   int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
   int idx;
@@ -224,14 +230,14 @@
 }
 
 void iwl_mvm_rx_fw_error(struct iwl_mvm* mvm, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    struct iwl_error_resp* err_resp = (void*)pkt->data;
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_error_resp* err_resp = (void*)pkt->data;
 
-    IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
-            le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
-    IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
-            le16_to_cpu(err_resp->bad_cmd_seq_num), le32_to_cpu(err_resp->error_service));
-    IWL_ERR(mvm, "FW Error notification: timestamp 0x%016lX\n", le64_to_cpu(err_resp->timestamp));
+  IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
+          le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
+  IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
+          le16_to_cpu(err_resp->bad_cmd_seq_num), le32_to_cpu(err_resp->error_service));
+  IWL_ERR(mvm, "FW Error notification: timestamp 0x%016lX\n", le64_to_cpu(err_resp->timestamp));
 }
 
 /*
@@ -239,11 +245,11 @@
  * The parameter should also be a combination of ANT_[ABC].
  */
 uint8_t first_antenna(uint8_t mask) {
-    BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
-    if (WARN_ON_ONCE(!mask)) {     /* ffs will return 0 if mask is zeroed */
-        return BIT(0);
-    }
-    return BIT(ffs(mask) - 1);
+  BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
+  if (WARN_ON_ONCE(!mask)) {     /* ffs will return 0 if mask is zeroed */
+    return BIT(0);
+  }
+  return BIT(ffs(mask) - 1);
 }
 
 /*
@@ -253,19 +259,21 @@
  * In order to set it in the tx_cmd, must do BIT(idx).
  */
 uint8_t iwl_mvm_next_antenna(struct iwl_mvm* mvm, uint8_t valid, uint8_t last_idx) {
-    uint8_t ind = last_idx;
-    int i;
+  uint8_t ind = last_idx;
+  int i;
 
-    for (i = 0; i < MAX_ANT_NUM; i++) {
-        ind = (ind + 1) % MAX_ANT_NUM;
-        if (valid & BIT(ind)) { return ind; }
+  for (i = 0; i < MAX_ANT_NUM; i++) {
+    ind = (ind + 1) % MAX_ANT_NUM;
+    if (valid & BIT(ind)) {
+      return ind;
     }
+  }
 
-    IWL_WARN(mvm, "Failed to toggle between antennas 0x%x\n", valid);
-    return last_idx;
+  IWL_WARN(mvm, "Failed to toggle between antennas 0x%x\n", valid);
+  return last_idx;
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 #define FW_SYSASSERT_CPU_MASK 0xf0000000
 static const struct {
     const char* name;
@@ -312,98 +320,98 @@
  * need to be ordered correctly though!
  */
 struct iwl_error_event_table_v1 {
-    uint32_t valid;          /* (nonzero) valid, (0) log is empty */
-    uint32_t error_id;       /* type of error */
-    uint32_t pc;             /* program counter */
-    uint32_t blink1;         /* branch link */
-    uint32_t blink2;         /* branch link */
-    uint32_t ilink1;         /* interrupt link */
-    uint32_t ilink2;         /* interrupt link */
-    uint32_t data1;          /* error-specific data */
-    uint32_t data2;          /* error-specific data */
-    uint32_t data3;          /* error-specific data */
-    uint32_t bcon_time;      /* beacon timer */
-    uint32_t tsf_low;        /* network timestamp function timer */
-    uint32_t tsf_hi;         /* network timestamp function timer */
-    uint32_t gp1;            /* GP1 timer register */
-    uint32_t gp2;            /* GP2 timer register */
-    uint32_t gp3;            /* GP3 timer register */
-    uint32_t ucode_ver;      /* uCode version */
-    uint32_t hw_ver;         /* HW Silicon version */
-    uint32_t brd_ver;        /* HW board version */
-    uint32_t log_pc;         /* log program counter */
-    uint32_t frame_ptr;      /* frame pointer */
-    uint32_t stack_ptr;      /* stack pointer */
-    uint32_t hcmd;           /* last host command header */
-    uint32_t isr0;           /* isr status register LMPM_NIC_ISR0:
-                              * rxtx_flag */
-    uint32_t isr1;           /* isr status register LMPM_NIC_ISR1:
-                              * host_flag */
-    uint32_t isr2;           /* isr status register LMPM_NIC_ISR2:
-                              * enc_flag */
-    uint32_t isr3;           /* isr status register LMPM_NIC_ISR3:
-                              * time_flag */
-    uint32_t isr4;           /* isr status register LMPM_NIC_ISR4:
-                              * wico interrupt */
-    uint32_t isr_pref;       /* isr status register LMPM_NIC_PREF_STAT */
-    uint32_t wait_event;     /* wait event() caller address */
-    uint32_t l2p_control;    /* L2pControlField */
-    uint32_t l2p_duration;   /* L2pDurationField */
-    uint32_t l2p_mhvalid;    /* L2pMhValidBits */
-    uint32_t l2p_addr_match; /* L2pAddrMatchStat */
-    uint32_t lmpm_pmg_sel;   /* indicate which clocks are turned on
-                              * (LMPM_PMG_SEL) */
-    uint32_t u_timestamp;    /* indicate when the date and time of the
-                              * compilation */
-    uint32_t flow_handler;   /* FH read/write pointers, RX credit */
+  uint32_t valid;          /* (nonzero) valid, (0) log is empty */
+  uint32_t error_id;       /* type of error */
+  uint32_t pc;             /* program counter */
+  uint32_t blink1;         /* branch link */
+  uint32_t blink2;         /* branch link */
+  uint32_t ilink1;         /* interrupt link */
+  uint32_t ilink2;         /* interrupt link */
+  uint32_t data1;          /* error-specific data */
+  uint32_t data2;          /* error-specific data */
+  uint32_t data3;          /* error-specific data */
+  uint32_t bcon_time;      /* beacon timer */
+  uint32_t tsf_low;        /* network timestamp function timer */
+  uint32_t tsf_hi;         /* network timestamp function timer */
+  uint32_t gp1;            /* GP1 timer register */
+  uint32_t gp2;            /* GP2 timer register */
+  uint32_t gp3;            /* GP3 timer register */
+  uint32_t ucode_ver;      /* uCode version */
+  uint32_t hw_ver;         /* HW Silicon version */
+  uint32_t brd_ver;        /* HW board version */
+  uint32_t log_pc;         /* log program counter */
+  uint32_t frame_ptr;      /* frame pointer */
+  uint32_t stack_ptr;      /* stack pointer */
+  uint32_t hcmd;           /* last host command header */
+  uint32_t isr0;           /* isr status register LMPM_NIC_ISR0:
+                            * rxtx_flag */
+  uint32_t isr1;           /* isr status register LMPM_NIC_ISR1:
+                            * host_flag */
+  uint32_t isr2;           /* isr status register LMPM_NIC_ISR2:
+                            * enc_flag */
+  uint32_t isr3;           /* isr status register LMPM_NIC_ISR3:
+                            * time_flag */
+  uint32_t isr4;           /* isr status register LMPM_NIC_ISR4:
+                            * wico interrupt */
+  uint32_t isr_pref;       /* isr status register LMPM_NIC_PREF_STAT */
+  uint32_t wait_event;     /* wait event() caller address */
+  uint32_t l2p_control;    /* L2pControlField */
+  uint32_t l2p_duration;   /* L2pDurationField */
+  uint32_t l2p_mhvalid;    /* L2pMhValidBits */
+  uint32_t l2p_addr_match; /* L2pAddrMatchStat */
+  uint32_t lmpm_pmg_sel;   /* indicate which clocks are turned on
+                            * (LMPM_PMG_SEL) */
+  uint32_t u_timestamp;    /* indicate when the date and time of the
+                            * compilation */
+  uint32_t flow_handler;   /* FH read/write pointers, RX credit */
 } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
 
 struct iwl_error_event_table {
-    uint32_t valid;          /* (nonzero) valid, (0) log is empty */
-    uint32_t error_id;       /* type of error */
-    uint32_t trm_hw_status0; /* TRM HW status */
-    uint32_t trm_hw_status1; /* TRM HW status */
-    uint32_t blink2;         /* branch link */
-    uint32_t ilink1;         /* interrupt link */
-    uint32_t ilink2;         /* interrupt link */
-    uint32_t data1;          /* error-specific data */
-    uint32_t data2;          /* error-specific data */
-    uint32_t data3;          /* error-specific data */
-    uint32_t bcon_time;      /* beacon timer */
-    uint32_t tsf_low;        /* network timestamp function timer */
-    uint32_t tsf_hi;         /* network timestamp function timer */
-    uint32_t gp1;            /* GP1 timer register */
-    uint32_t gp2;            /* GP2 timer register */
-    uint32_t fw_rev_type;    /* firmware revision type */
-    uint32_t major;          /* uCode version major */
-    uint32_t minor;          /* uCode version minor */
-    uint32_t hw_ver;         /* HW Silicon version */
-    uint32_t brd_ver;        /* HW board version */
-    uint32_t log_pc;         /* log program counter */
-    uint32_t frame_ptr;      /* frame pointer */
-    uint32_t stack_ptr;      /* stack pointer */
-    uint32_t hcmd;           /* last host command header */
-    uint32_t isr0;           /* isr status register LMPM_NIC_ISR0:
-                              * rxtx_flag */
-    uint32_t isr1;           /* isr status register LMPM_NIC_ISR1:
-                              * host_flag */
-    uint32_t isr2;           /* isr status register LMPM_NIC_ISR2:
-                              * enc_flag */
-    uint32_t isr3;           /* isr status register LMPM_NIC_ISR3:
-                              * time_flag */
-    uint32_t isr4;           /* isr status register LMPM_NIC_ISR4:
-                              * wico interrupt */
-    uint32_t last_cmd_id;    /* last HCMD id handled by the firmware */
-    uint32_t wait_event;     /* wait event() caller address */
-    uint32_t l2p_control;    /* L2pControlField */
-    uint32_t l2p_duration;   /* L2pDurationField */
-    uint32_t l2p_mhvalid;    /* L2pMhValidBits */
-    uint32_t l2p_addr_match; /* L2pAddrMatchStat */
-    uint32_t lmpm_pmg_sel;   /* indicate which clocks are turned on
-                              * (LMPM_PMG_SEL) */
-    uint32_t u_timestamp;    /* indicate when the date and time of the
-                              * compilation */
-    uint32_t flow_handler;   /* FH read/write pointers, RX credit */
+  uint32_t valid;          /* (nonzero) valid, (0) log is empty */
+  uint32_t error_id;       /* type of error */
+  uint32_t trm_hw_status0; /* TRM HW status */
+  uint32_t trm_hw_status1; /* TRM HW status */
+  uint32_t blink2;         /* branch link */
+  uint32_t ilink1;         /* interrupt link */
+  uint32_t ilink2;         /* interrupt link */
+  uint32_t data1;          /* error-specific data */
+  uint32_t data2;          /* error-specific data */
+  uint32_t data3;          /* error-specific data */
+  uint32_t bcon_time;      /* beacon timer */
+  uint32_t tsf_low;        /* network timestamp function timer */
+  uint32_t tsf_hi;         /* network timestamp function timer */
+  uint32_t gp1;            /* GP1 timer register */
+  uint32_t gp2;            /* GP2 timer register */
+  uint32_t fw_rev_type;    /* firmware revision type */
+  uint32_t major;          /* uCode version major */
+  uint32_t minor;          /* uCode version minor */
+  uint32_t hw_ver;         /* HW Silicon version */
+  uint32_t brd_ver;        /* HW board version */
+  uint32_t log_pc;         /* log program counter */
+  uint32_t frame_ptr;      /* frame pointer */
+  uint32_t stack_ptr;      /* stack pointer */
+  uint32_t hcmd;           /* last host command header */
+  uint32_t isr0;           /* isr status register LMPM_NIC_ISR0:
+                            * rxtx_flag */
+  uint32_t isr1;           /* isr status register LMPM_NIC_ISR1:
+                            * host_flag */
+  uint32_t isr2;           /* isr status register LMPM_NIC_ISR2:
+                            * enc_flag */
+  uint32_t isr3;           /* isr status register LMPM_NIC_ISR3:
+                            * time_flag */
+  uint32_t isr4;           /* isr status register LMPM_NIC_ISR4:
+                            * wico interrupt */
+  uint32_t last_cmd_id;    /* last HCMD id handled by the firmware */
+  uint32_t wait_event;     /* wait event() caller address */
+  uint32_t l2p_control;    /* L2pControlField */
+  uint32_t l2p_duration;   /* L2pDurationField */
+  uint32_t l2p_mhvalid;    /* L2pMhValidBits */
+  uint32_t l2p_addr_match; /* L2pAddrMatchStat */
+  uint32_t lmpm_pmg_sel;   /* indicate which clocks are turned on
+                            * (LMPM_PMG_SEL) */
+  uint32_t u_timestamp;    /* indicate when the date and time of the
+                            * compilation */
+  uint32_t flow_handler;   /* FH read/write pointers, RX credit */
 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
 
 /*
@@ -414,21 +422,21 @@
  * need to be ordered correctly though!
  */
 struct iwl_umac_error_event_table {
-    uint32_t valid;    /* (nonzero) valid, (0) log is empty */
-    uint32_t error_id; /* type of error */
-    uint32_t blink1;   /* branch link */
-    uint32_t blink2;   /* branch link */
-    uint32_t ilink1;   /* interrupt link */
-    uint32_t ilink2;   /* interrupt link */
-    uint32_t data1;    /* error-specific data */
-    uint32_t data2;    /* error-specific data */
-    uint32_t data3;    /* error-specific data */
-    uint32_t umac_major;
-    uint32_t umac_minor;
-    uint32_t frame_pointer; /* core register 27*/
-    uint32_t stack_pointer; /* core register 28 */
-    uint32_t cmd_header;    /* latest host cmd sent to UMAC */
-    uint32_t nic_isr_pref;  /* ISR status register */
+  uint32_t valid;    /* (nonzero) valid, (0) log is empty */
+  uint32_t error_id; /* type of error */
+  uint32_t blink1;   /* branch link */
+  uint32_t blink2;   /* branch link */
+  uint32_t ilink1;   /* interrupt link */
+  uint32_t ilink2;   /* interrupt link */
+  uint32_t data1;    /* error-specific data */
+  uint32_t data2;    /* error-specific data */
+  uint32_t data3;    /* error-specific data */
+  uint32_t umac_major;
+  uint32_t umac_minor;
+  uint32_t frame_pointer; /* core register 27*/
+  uint32_t stack_pointer; /* core register 28 */
+  uint32_t cmd_header;    /* latest host cmd sent to UMAC */
+  uint32_t nic_isr_pref;  /* ISR status register */
 } __packed;
 
 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
@@ -560,50 +568,52 @@
 }
 
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm* mvm) {
-    if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
-        IWL_ERR(mvm, "DEVICE_ENABLED bit is not set. Aborting dump.\n");
-        return;
-    }
+  if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+    IWL_ERR(mvm, "DEVICE_ENABLED bit is not set. Aborting dump.\n");
+    return;
+  }
 
-    iwl_mvm_dump_lmac_error_log(mvm, 0);
+  iwl_mvm_dump_lmac_error_log(mvm, 0);
 
-    if (mvm->error_event_table[1]) { iwl_mvm_dump_lmac_error_log(mvm, 1); }
+  if (mvm->error_event_table[1]) {
+    iwl_mvm_dump_lmac_error_log(mvm, 1);
+  }
 
-    iwl_mvm_dump_umac_error_log(mvm);
+  iwl_mvm_dump_umac_error_log(mvm);
 }
 
 zx_status_t iwl_mvm_reconfig_scd(struct iwl_mvm* mvm, int queue, int fifo, int sta_id, int tid,
                                  int frame_limit, uint16_t ssn) {
-    struct iwl_scd_txq_cfg_cmd cmd = {
-        .scd_queue = queue,
-        .action = SCD_CFG_ENABLE_QUEUE,
-        .window = frame_limit,
-        .sta_id = sta_id,
-        .ssn = cpu_to_le16(ssn),
-        .tx_fifo = fifo,
-        .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
-        .tid = tid,
-    };
-    zx_status_t ret;
+  struct iwl_scd_txq_cfg_cmd cmd = {
+      .scd_queue = queue,
+      .action = SCD_CFG_ENABLE_QUEUE,
+      .window = frame_limit,
+      .sta_id = sta_id,
+      .ssn = cpu_to_le16(ssn),
+      .tx_fifo = fifo,
+      .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
+      .tid = tid,
+  };
+  zx_status_t ret;
 
-    if (iwl_mvm_has_new_tx_api(mvm) != ZX_OK) {
-        IWL_WARN(mvm, "iwl_mvm_has_new_tx_api() returns true%s\n", "");
-        return ZX_ERR_INVALID_ARGS;
-    }
+  if (iwl_mvm_has_new_tx_api(mvm) != ZX_OK) {
+    IWL_WARN(mvm, "iwl_mvm_has_new_tx_api() returns true%s\n", "");
+    return ZX_ERR_INVALID_ARGS;
+  }
 
-    if (mvm->queue_info[queue].tid_bitmap == 0) {
-        IWL_WARN(mvm, "Trying to reconfig unallocated queue %d\n", queue);
-        return ZX_ERR_BAD_HANDLE;
-    }
+  if (mvm->queue_info[queue].tid_bitmap == 0) {
+    IWL_WARN(mvm, "Trying to reconfig unallocated queue %d\n", queue);
+    return ZX_ERR_BAD_HANDLE;
+  }
 
-    IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
+  IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
 
-    ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
-    if (ret != ZX_OK) {
-        IWL_WARN(mvm, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", queue, fifo, ret);
-    }
+  ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+  if (ret != ZX_OK) {
+    IWL_WARN(mvm, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", queue, fifo, ret);
+  }
 
-    return ret;
+  return ret;
 }
 
 /**
@@ -616,7 +626,7 @@
  * progress.
  */
 int iwl_mvm_send_lq_cmd(struct iwl_mvm* mvm, struct iwl_lq_cmd* lq, bool sync) {
-    return ZX_ERR_NOT_SUPPORTED;
+  return ZX_ERR_NOT_SUPPORTED;
 #if 0   // NEEDS_PORTING
     struct iwl_host_cmd cmd = {
         .id = LQ_CMD,
@@ -683,7 +693,7 @@
 }
 
 int iwl_mvm_request_statistics(struct iwl_mvm* mvm, bool clear) {
-    return ZX_ERR_NOT_SUPPORTED;
+  return ZX_ERR_NOT_SUPPORTED;
 #if 0   // NEEDS_PORTING
     struct iwl_statistics_cmd scmd = {
         .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
@@ -709,13 +719,13 @@
 }
 
 void iwl_mvm_accu_radio_stats(struct iwl_mvm* mvm) {
-    mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
-    mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
-    mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
-    mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
+  mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
+  mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
+  mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
+  mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static void iwl_mvm_diversity_iter(void* _data, uint8_t* mac, struct ieee80211_vif* vif) {
     struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
     bool* result = _data;
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/vendor-cmd.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/vendor-cmd.c
index 5fda383..6a6c2fb 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/vendor-cmd.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/mvm/vendor-cmd.c
@@ -37,6 +37,7 @@
 #include <linux/etherdevice.h>
 #include <net/mac80211.h>
 #include <net/netlink.h>
+
 #include "iwl-vendor-cmd.h"
 #include "mvm.h"
 
@@ -80,938 +81,1056 @@
 };
 
 static int iwl_mvm_parse_vendor_data(struct nlattr** tb, const void* data, int data_len) {
-    if (!data) { return -EINVAL; }
+  if (!data) {
+    return -EINVAL;
+  }
 
-    return nla_parse(tb, MAX_IWL_MVM_VENDOR_ATTR, data, data_len, iwl_mvm_vendor_attr_policy, NULL);
+  return nla_parse(tb, MAX_IWL_MVM_VENDOR_ATTR, data, data_len, iwl_mvm_vendor_attr_policy, NULL);
 }
 
 static int iwl_mvm_set_low_latency(struct wiphy* wiphy, struct wireless_dev* wdev, const void* data,
                                    int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    int err;
-    struct ieee80211_vif* vif = wdev_to_ieee80211_vif(wdev);
-    bool low_latency;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  int err;
+  struct ieee80211_vif* vif = wdev_to_ieee80211_vif(wdev);
+  bool low_latency;
 
-    if (!vif) { return -ENODEV; }
+  if (!vif) {
+    return -ENODEV;
+  }
 
-    if (data) {
-        err = iwl_mvm_parse_vendor_data(tb, data, data_len);
-        if (err) { return err; }
-        low_latency = tb[IWL_MVM_VENDOR_ATTR_LOW_LATENCY];
-    } else {
-        low_latency = false;
+  if (data) {
+    err = iwl_mvm_parse_vendor_data(tb, data, data_len);
+    if (err) {
+      return err;
     }
+    low_latency = tb[IWL_MVM_VENDOR_ATTR_LOW_LATENCY];
+  } else {
+    low_latency = false;
+  }
 
-    mutex_lock(&mvm->mutex);
-    err = iwl_mvm_update_low_latency(mvm, vif, low_latency, LOW_LATENCY_VCMD);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  err = iwl_mvm_update_low_latency(mvm, vif, low_latency, LOW_LATENCY_VCMD);
+  mutex_unlock(&mvm->mutex);
 
-    return err;
+  return err;
 }
 
 static int iwl_mvm_get_low_latency(struct wiphy* wiphy, struct wireless_dev* wdev, const void* data,
                                    int data_len) {
-    struct ieee80211_vif* vif = wdev_to_ieee80211_vif(wdev);
-    struct iwl_mvm_vif* mvmvif;
-    struct sk_buff* skb;
+  struct ieee80211_vif* vif = wdev_to_ieee80211_vif(wdev);
+  struct iwl_mvm_vif* mvmvif;
+  struct sk_buff* skb;
 
-    if (!vif) { return -ENODEV; }
-    mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  if (!vif) {
+    return -ENODEV;
+  }
+  mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-    skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
-    if (!skb) { return -ENOMEM; }
-    if (iwl_mvm_vif_low_latency(mvmvif) && nla_put_flag(skb, IWL_MVM_VENDOR_ATTR_LOW_LATENCY)) {
-        kfree_skb(skb);
-        return -ENOBUFS;
-    }
+  skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+  if (!skb) {
+    return -ENOMEM;
+  }
+  if (iwl_mvm_vif_low_latency(mvmvif) && nla_put_flag(skb, IWL_MVM_VENDOR_ATTR_LOW_LATENCY)) {
+    kfree_skb(skb);
+    return -ENOBUFS;
+  }
 
-    return cfg80211_vendor_cmd_reply(skb);
+  return cfg80211_vendor_cmd_reply(skb);
 }
 
 static int iwl_mvm_set_country(struct wiphy* wiphy, struct wireless_dev* wdev, const void* data,
                                int data_len) {
-    struct ieee80211_regdomain* regd;
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    int retval;
+  struct ieee80211_regdomain* regd;
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  int retval;
 
-    if (!iwl_mvm_is_lar_supported(mvm)) { return -EOPNOTSUPP; }
+  if (!iwl_mvm_is_lar_supported(mvm)) {
+    return -EOPNOTSUPP;
+  }
 
-    retval = iwl_mvm_parse_vendor_data(tb, data, data_len);
-    if (retval) { return retval; }
-
-    if (!tb[IWL_MVM_VENDOR_ATTR_COUNTRY]) { return -EINVAL; }
-
-    mutex_lock(&mvm->mutex);
-
-    /* set regdomain information to FW */
-    regd = iwl_mvm_get_regdomain(
-        wiphy, nla_data(tb[IWL_MVM_VENDOR_ATTR_COUNTRY]),
-        iwl_mvm_is_wifi_mcc_supported(mvm) ? MCC_SOURCE_3G_LTE_HOST : MCC_SOURCE_OLD_FW, NULL);
-    if (IS_ERR_OR_NULL(regd)) {
-        retval = -EIO;
-        goto unlock;
-    }
-
-    retval = regulatory_set_wiphy_regd(wiphy, regd);
-    kfree(regd);
-unlock:
-    mutex_unlock(&mvm->mutex);
+  retval = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  if (retval) {
     return retval;
+  }
+
+  if (!tb[IWL_MVM_VENDOR_ATTR_COUNTRY]) {
+    return -EINVAL;
+  }
+
+  mutex_lock(&mvm->mutex);
+
+  /* set regdomain information to FW */
+  regd = iwl_mvm_get_regdomain(
+      wiphy, nla_data(tb[IWL_MVM_VENDOR_ATTR_COUNTRY]),
+      iwl_mvm_is_wifi_mcc_supported(mvm) ? MCC_SOURCE_3G_LTE_HOST : MCC_SOURCE_OLD_FW, NULL);
+  if (IS_ERR_OR_NULL(regd)) {
+    retval = -EIO;
+    goto unlock;
+  }
+
+  retval = regulatory_set_wiphy_regd(wiphy, regd);
+  kfree(regd);
+unlock:
+  mutex_unlock(&mvm->mutex);
+  return retval;
 }
 
 #ifdef CPTCFG_IWLWIFI_LTE_COEX
 static int iwl_vendor_lte_coex_state_cmd(struct wiphy* wiphy, struct wireless_dev* wdev,
                                          const void* data, int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    const struct lte_coex_state_cmd* cmd = data;
-    struct sk_buff* skb;
-    int err = LTE_OK;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  const struct lte_coex_state_cmd* cmd = data;
+  struct sk_buff* skb;
+  int err = LTE_OK;
 
-    skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
-    if (!skb) { return -ENOMEM; }
+  skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+  if (!skb) {
+    return -ENOMEM;
+  }
 
-    if (data_len != sizeof(*cmd)) {
-        err = LTE_INVALID_DATA;
-        goto out;
-    }
+  if (data_len != sizeof(*cmd)) {
+    err = LTE_INVALID_DATA;
+    goto out;
+  }
 
-    IWL_DEBUG_COEX(mvm, "LTE-COEX: state cmd:\n\tstate: %d\n", cmd->lte_state);
+  IWL_DEBUG_COEX(mvm, "LTE-COEX: state cmd:\n\tstate: %d\n", cmd->lte_state);
 
-    switch (cmd->lte_state) {
+  switch (cmd->lte_state) {
     case LTE_OFF:
-        if (mvm->lte_state.has_config && mvm->lte_state.state != LTE_CONNECTED) {
-            err = LTE_STATE_ERR;
-            goto out;
-        }
-        mvm->lte_state.state = LTE_OFF;
-        mvm->lte_state.has_config = 0;
-        mvm->lte_state.has_rprtd_chan = 0;
-        mvm->lte_state.has_sps = 0;
-        mvm->lte_state.has_ft = 0;
-        break;
-    case LTE_IDLE:
-        if (!mvm->lte_state.has_static ||
-            (mvm->lte_state.has_config && mvm->lte_state.state != LTE_CONNECTED)) {
-            err = LTE_STATE_ERR;
-            goto out;
-        }
-        mvm->lte_state.has_config = 0;
-        mvm->lte_state.has_sps = 0;
-        mvm->lte_state.state = LTE_IDLE;
-        break;
-    case LTE_CONNECTED:
-        if (!(mvm->lte_state.has_config)) {
-            err = LTE_STATE_ERR;
-            goto out;
-        }
-        mvm->lte_state.state = LTE_CONNECTED;
-        break;
-    default:
-        err = LTE_ILLEGAL_PARAMS;
+      if (mvm->lte_state.has_config && mvm->lte_state.state != LTE_CONNECTED) {
+        err = LTE_STATE_ERR;
         goto out;
-    }
+      }
+      mvm->lte_state.state = LTE_OFF;
+      mvm->lte_state.has_config = 0;
+      mvm->lte_state.has_rprtd_chan = 0;
+      mvm->lte_state.has_sps = 0;
+      mvm->lte_state.has_ft = 0;
+      break;
+    case LTE_IDLE:
+      if (!mvm->lte_state.has_static ||
+          (mvm->lte_state.has_config && mvm->lte_state.state != LTE_CONNECTED)) {
+        err = LTE_STATE_ERR;
+        goto out;
+      }
+      mvm->lte_state.has_config = 0;
+      mvm->lte_state.has_sps = 0;
+      mvm->lte_state.state = LTE_IDLE;
+      break;
+    case LTE_CONNECTED:
+      if (!(mvm->lte_state.has_config)) {
+        err = LTE_STATE_ERR;
+        goto out;
+      }
+      mvm->lte_state.state = LTE_CONNECTED;
+      break;
+    default:
+      err = LTE_ILLEGAL_PARAMS;
+      goto out;
+  }
 
-    mvm->lte_state.config.lte_state = cpu_to_le32(mvm->lte_state.state);
+  mvm->lte_state.config.lte_state = cpu_to_le32(mvm->lte_state.state);
 
-    mutex_lock(&mvm->mutex);
-    if (iwl_mvm_send_lte_coex_config_cmd(mvm)) { err = LTE_OTHER_ERR; }
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  if (iwl_mvm_send_lte_coex_config_cmd(mvm)) {
+    err = LTE_OTHER_ERR;
+  }
+  mutex_unlock(&mvm->mutex);
 
 out:
-    if (err) { iwl_mvm_reset_lte_state(mvm); }
+  if (err) {
+    iwl_mvm_reset_lte_state(mvm);
+  }
 
-    if (nla_put_u8(skb, NLA_BINARY, err)) {
-        kfree_skb(skb);
-        return -ENOBUFS;
-    }
+  if (nla_put_u8(skb, NLA_BINARY, err)) {
+    kfree_skb(skb);
+    return -ENOBUFS;
+  }
 
-    return cfg80211_vendor_cmd_reply(skb);
+  return cfg80211_vendor_cmd_reply(skb);
 }
 
 static int iwl_vendor_lte_coex_config_cmd(struct wiphy* wiphy, struct wireless_dev* wdev,
                                           const void* data, int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    const struct lte_coex_config_info_cmd* cmd = data;
-    struct iwl_lte_coex_static_params_cmd* stat = &mvm->lte_state.stat;
-    struct sk_buff* skb;
-    int err = LTE_OK;
-    int i, j;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  const struct lte_coex_config_info_cmd* cmd = data;
+  struct iwl_lte_coex_static_params_cmd* stat = &mvm->lte_state.stat;
+  struct sk_buff* skb;
+  int err = LTE_OK;
+  int i, j;
 
-    skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
-    if (!skb) { return -ENOMEM; }
+  skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+  if (!skb) {
+    return -ENOMEM;
+  }
 
-    if (data_len != sizeof(*cmd)) {
-        err = LTE_INVALID_DATA;
-        goto out;
+  if (data_len != sizeof(*cmd)) {
+    err = LTE_INVALID_DATA;
+    goto out;
+  }
+
+  IWL_DEBUG_COEX(mvm, "LTE-COEX: config cmd:\n");
+
+  /* send static config only once in the FW life */
+  if (mvm->lte_state.has_static) {
+    goto out;
+  }
+
+  for (i = 0; i < LTE_MWS_CONF_LENGTH; i++) {
+    IWL_DEBUG_COEX(mvm, "\tmws config data[%d]: %d\n", i, cmd->mws_conf_data[i]);
+    stat->mfu_config[i] = cpu_to_le32(cmd->mws_conf_data[i]);
+  }
+
+  if (cmd->safe_power_table[0] != LTE_SAFE_PT_FIRST ||
+      cmd->safe_power_table[LTE_SAFE_PT_LENGTH - 1] != LTE_SAFE_PT_LAST) {
+    err = LTE_ILLEGAL_PARAMS;
+    goto out;
+  }
+
+  /* power table must be ascending ordered */
+  j = LTE_SAFE_PT_FIRST;
+  for (i = 0; i < LTE_SAFE_PT_LENGTH; i++) {
+    IWL_DEBUG_COEX(mvm, "\tsafe power table[%d]: %d\n", i, cmd->safe_power_table[i]);
+    if (cmd->safe_power_table[i] < j) {
+      err = LTE_ILLEGAL_PARAMS;
+      goto out;
     }
+    j = cmd->safe_power_table[i];
+    stat->tx_power_in_dbm[i] = cmd->safe_power_table[i];
+  }
 
-    IWL_DEBUG_COEX(mvm, "LTE-COEX: config cmd:\n");
-
-    /* send static config only once in the FW life */
-    if (mvm->lte_state.has_static) { goto out; }
-
-    for (i = 0; i < LTE_MWS_CONF_LENGTH; i++) {
-        IWL_DEBUG_COEX(mvm, "\tmws config data[%d]: %d\n", i, cmd->mws_conf_data[i]);
-        stat->mfu_config[i] = cpu_to_le32(cmd->mws_conf_data[i]);
-    }
-
-    if (cmd->safe_power_table[0] != LTE_SAFE_PT_FIRST ||
-        cmd->safe_power_table[LTE_SAFE_PT_LENGTH - 1] != LTE_SAFE_PT_LAST) {
-        err = LTE_ILLEGAL_PARAMS;
-        goto out;
-    }
-
-    /* power table must be ascending ordered */
-    j = LTE_SAFE_PT_FIRST;
-    for (i = 0; i < LTE_SAFE_PT_LENGTH; i++) {
-        IWL_DEBUG_COEX(mvm, "\tsafe power table[%d]: %d\n", i, cmd->safe_power_table[i]);
-        if (cmd->safe_power_table[i] < j) {
-            err = LTE_ILLEGAL_PARAMS;
-            goto out;
-        }
-        j = cmd->safe_power_table[i];
-        stat->tx_power_in_dbm[i] = cmd->safe_power_table[i];
-    }
-
-    mutex_lock(&mvm->mutex);
-    if (iwl_mvm_send_lte_coex_static_params_cmd(mvm)) {
-        err = LTE_OTHER_ERR;
-    } else {
-        mvm->lte_state.has_static = 1;
-    }
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  if (iwl_mvm_send_lte_coex_static_params_cmd(mvm)) {
+    err = LTE_OTHER_ERR;
+  } else {
+    mvm->lte_state.has_static = 1;
+  }
+  mutex_unlock(&mvm->mutex);
 
 out:
-    if (err) { iwl_mvm_reset_lte_state(mvm); }
+  if (err) {
+    iwl_mvm_reset_lte_state(mvm);
+  }
 
-    if (nla_put_u8(skb, NLA_BINARY, err)) {
-        kfree_skb(skb);
-        return -ENOBUFS;
-    }
+  if (nla_put_u8(skb, NLA_BINARY, err)) {
+    kfree_skb(skb);
+    return -ENOBUFS;
+  }
 
-    return cfg80211_vendor_cmd_reply(skb);
+  return cfg80211_vendor_cmd_reply(skb);
 }
 
-static int in_range(int val, int min, int max) {
-    return (val >= min) && (val <= max);
-}
+static int in_range(int val, int min, int max) { return (val >= min) && (val <= max); }
 
 static bool is_valid_lte_range(uint16_t min, uint16_t max) {
-    return (min == 0 && max == 0) || (max >= min && in_range(min, LTE_FRQ_MIN, LTE_FRQ_MAX) &&
-                                      in_range(max, LTE_FRQ_MIN, LTE_FRQ_MAX));
+  return (min == 0 && max == 0) || (max >= min && in_range(min, LTE_FRQ_MIN, LTE_FRQ_MAX) &&
+                                    in_range(max, LTE_FRQ_MIN, LTE_FRQ_MAX));
 }
 
 static int iwl_vendor_lte_coex_dynamic_info_cmd(struct wiphy* wiphy, struct wireless_dev* wdev,
                                                 const void* data, int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    const struct lte_coex_dynamic_info_cmd* cmd = data;
-    struct iwl_lte_coex_config_cmd* config = &mvm->lte_state.config;
-    struct sk_buff* skb;
-    int err = LTE_OK;
-    int i;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  const struct lte_coex_dynamic_info_cmd* cmd = data;
+  struct iwl_lte_coex_config_cmd* config = &mvm->lte_state.config;
+  struct sk_buff* skb;
+  int err = LTE_OK;
+  int i;
 
-    skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
-    if (!skb) { return -ENOMEM; }
+  skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+  if (!skb) {
+    return -ENOMEM;
+  }
 
-    if (data_len != sizeof(*cmd)) {
-        err = LTE_INVALID_DATA;
-        goto out;
+  if (data_len != sizeof(*cmd)) {
+    err = LTE_INVALID_DATA;
+    goto out;
+  }
+
+  if (!mvm->lte_state.has_static ||
+      (mvm->lte_state.has_config && mvm->lte_state.state != LTE_CONNECTED)) {
+    err = LTE_STATE_ERR;
+    goto out;
+  }
+
+  IWL_DEBUG_COEX(mvm,
+                 "LTE-COEX: dynamic cmd:\n"
+                 "\tlte band[0]: %d, chan[0]: %d\n\ttx range: %d - %d\n"
+                 "\trx range: %d - %d\n",
+                 cmd->lte_connected_bands[0], cmd->lte_connected_bands[1],
+                 cmd->wifi_tx_safe_freq_min, cmd->wifi_tx_safe_freq_max, cmd->wifi_rx_safe_freq_min,
+                 cmd->wifi_rx_safe_freq_max);
+
+  /* TODO: validate lte connected bands and channel, and frame struct */
+  config->lte_band = cpu_to_le32(cmd->lte_connected_bands[0]);
+  config->lte_chan = cpu_to_le32(cmd->lte_connected_bands[1]);
+  for (i = 0; i < LTE_FRAME_STRUCT_LENGTH; i++) {
+    IWL_DEBUG_COEX(mvm, "\tframe structure[%d]: %d\n", i, cmd->lte_frame_structure[i]);
+    config->lte_frame_structure[i] = cpu_to_le32(cmd->lte_frame_structure[i]);
+  }
+  if (!is_valid_lte_range(cmd->wifi_tx_safe_freq_min, cmd->wifi_tx_safe_freq_max) ||
+      !is_valid_lte_range(cmd->wifi_rx_safe_freq_min, cmd->wifi_rx_safe_freq_max)) {
+    err = LTE_ILLEGAL_PARAMS;
+    goto out;
+  }
+  config->tx_safe_freq_min = cpu_to_le32(cmd->wifi_tx_safe_freq_min);
+  config->tx_safe_freq_max = cpu_to_le32(cmd->wifi_tx_safe_freq_max);
+  config->rx_safe_freq_min = cpu_to_le32(cmd->wifi_rx_safe_freq_min);
+  config->rx_safe_freq_max = cpu_to_le32(cmd->wifi_rx_safe_freq_max);
+  for (i = 0; i < LTE_TX_POWER_LENGTH; i++) {
+    IWL_DEBUG_COEX(mvm, "\twifi max tx power[%d]: %d\n", i, cmd->wifi_max_tx_power[i]);
+    if (!in_range(cmd->wifi_max_tx_power[i], LTE_MAX_TX_MIN, LTE_MAX_TX_MAX)) {
+      err = LTE_ILLEGAL_PARAMS;
+      goto out;
     }
+    config->max_tx_power[i] = cmd->wifi_max_tx_power[i];
+  }
 
-    if (!mvm->lte_state.has_static ||
-        (mvm->lte_state.has_config && mvm->lte_state.state != LTE_CONNECTED)) {
-        err = LTE_STATE_ERR;
-        goto out;
-    }
+  mvm->lte_state.has_config = 1;
 
-    IWL_DEBUG_COEX(mvm,
-                   "LTE-COEX: dynamic cmd:\n"
-                   "\tlte band[0]: %d, chan[0]: %d\n\ttx range: %d - %d\n"
-                   "\trx range: %d - %d\n",
-                   cmd->lte_connected_bands[0], cmd->lte_connected_bands[1],
-                   cmd->wifi_tx_safe_freq_min, cmd->wifi_tx_safe_freq_max,
-                   cmd->wifi_rx_safe_freq_min, cmd->wifi_rx_safe_freq_max);
-
-    /* TODO: validate lte connected bands and channel, and frame struct */
-    config->lte_band = cpu_to_le32(cmd->lte_connected_bands[0]);
-    config->lte_chan = cpu_to_le32(cmd->lte_connected_bands[1]);
-    for (i = 0; i < LTE_FRAME_STRUCT_LENGTH; i++) {
-        IWL_DEBUG_COEX(mvm, "\tframe structure[%d]: %d\n", i, cmd->lte_frame_structure[i]);
-        config->lte_frame_structure[i] = cpu_to_le32(cmd->lte_frame_structure[i]);
+  if (mvm->lte_state.state == LTE_CONNECTED) {
+    mutex_lock(&mvm->mutex);
+    if (iwl_mvm_send_lte_coex_config_cmd(mvm)) {
+      err = LTE_OTHER_ERR;
     }
-    if (!is_valid_lte_range(cmd->wifi_tx_safe_freq_min, cmd->wifi_tx_safe_freq_max) ||
-        !is_valid_lte_range(cmd->wifi_rx_safe_freq_min, cmd->wifi_rx_safe_freq_max)) {
-        err = LTE_ILLEGAL_PARAMS;
-        goto out;
-    }
-    config->tx_safe_freq_min = cpu_to_le32(cmd->wifi_tx_safe_freq_min);
-    config->tx_safe_freq_max = cpu_to_le32(cmd->wifi_tx_safe_freq_max);
-    config->rx_safe_freq_min = cpu_to_le32(cmd->wifi_rx_safe_freq_min);
-    config->rx_safe_freq_max = cpu_to_le32(cmd->wifi_rx_safe_freq_max);
-    for (i = 0; i < LTE_TX_POWER_LENGTH; i++) {
-        IWL_DEBUG_COEX(mvm, "\twifi max tx power[%d]: %d\n", i, cmd->wifi_max_tx_power[i]);
-        if (!in_range(cmd->wifi_max_tx_power[i], LTE_MAX_TX_MIN, LTE_MAX_TX_MAX)) {
-            err = LTE_ILLEGAL_PARAMS;
-            goto out;
-        }
-        config->max_tx_power[i] = cmd->wifi_max_tx_power[i];
-    }
-
-    mvm->lte_state.has_config = 1;
-
-    if (mvm->lte_state.state == LTE_CONNECTED) {
-        mutex_lock(&mvm->mutex);
-        if (iwl_mvm_send_lte_coex_config_cmd(mvm)) { err = LTE_OTHER_ERR; }
-        mutex_unlock(&mvm->mutex);
-    }
+    mutex_unlock(&mvm->mutex);
+  }
 out:
-    if (err) { iwl_mvm_reset_lte_state(mvm); }
+  if (err) {
+    iwl_mvm_reset_lte_state(mvm);
+  }
 
-    if (nla_put_u8(skb, NLA_BINARY, err)) {
-        kfree_skb(skb);
-        return -ENOBUFS;
-    }
+  if (nla_put_u8(skb, NLA_BINARY, err)) {
+    kfree_skb(skb);
+    return -ENOBUFS;
+  }
 
-    return cfg80211_vendor_cmd_reply(skb);
+  return cfg80211_vendor_cmd_reply(skb);
 }
 
 static int iwl_vendor_lte_sps_cmd(struct wiphy* wiphy, struct wireless_dev* wdev, const void* data,
                                   int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    const struct lte_coex_sps_info_cmd* cmd = data;
-    struct iwl_lte_coex_sps_cmd* sps = &mvm->lte_state.sps;
-    struct sk_buff* skb;
-    int err = LTE_OK;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  const struct lte_coex_sps_info_cmd* cmd = data;
+  struct iwl_lte_coex_sps_cmd* sps = &mvm->lte_state.sps;
+  struct sk_buff* skb;
+  int err = LTE_OK;
 
-    skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
-    if (!skb) { return -ENOMEM; }
+  skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+  if (!skb) {
+    return -ENOMEM;
+  }
 
-    if (data_len != sizeof(*cmd)) {
-        err = LTE_INVALID_DATA;
-        goto out;
-    }
+  if (data_len != sizeof(*cmd)) {
+    err = LTE_INVALID_DATA;
+    goto out;
+  }
 
-    IWL_DEBUG_COEX(mvm, "LTE-COEX: sps cmd:\n\tsps info: %d\n", cmd->sps_info);
+  IWL_DEBUG_COEX(mvm, "LTE-COEX: sps cmd:\n\tsps info: %d\n", cmd->sps_info);
 
-    if (mvm->lte_state.state != LTE_CONNECTED) {
-        err = LTE_STATE_ERR;
-        goto out;
-    }
+  if (mvm->lte_state.state != LTE_CONNECTED) {
+    err = LTE_STATE_ERR;
+    goto out;
+  }
 
-    /* TODO: validate SPS */
-    sps->lte_semi_persistent_info = cpu_to_le32(cmd->sps_info);
+  /* TODO: validate SPS */
+  sps->lte_semi_persistent_info = cpu_to_le32(cmd->sps_info);
 
-    mutex_lock(&mvm->mutex);
-    if (iwl_mvm_send_lte_sps_cmd(mvm)) {
-        err = LTE_OTHER_ERR;
-    } else {
-        mvm->lte_state.has_sps = 1;
-    }
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  if (iwl_mvm_send_lte_sps_cmd(mvm)) {
+    err = LTE_OTHER_ERR;
+  } else {
+    mvm->lte_state.has_sps = 1;
+  }
+  mutex_unlock(&mvm->mutex);
 
 out:
-    if (err) { iwl_mvm_reset_lte_state(mvm); }
+  if (err) {
+    iwl_mvm_reset_lte_state(mvm);
+  }
 
-    if (nla_put_u8(skb, NLA_BINARY, err)) {
-        kfree_skb(skb);
-        return -ENOBUFS;
-    }
+  if (nla_put_u8(skb, NLA_BINARY, err)) {
+    kfree_skb(skb);
+    return -ENOBUFS;
+  }
 
-    return cfg80211_vendor_cmd_reply(skb);
+  return cfg80211_vendor_cmd_reply(skb);
 }
 
 static int iwl_vendor_lte_coex_wifi_reported_channel_cmd(struct wiphy* wiphy,
                                                          struct wireless_dev* wdev,
                                                          const void* data, int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    const struct lte_coex_wifi_reported_chan_cmd* cmd = data;
-    struct iwl_lte_coex_wifi_reported_channel_cmd* rprtd_chan = &mvm->lte_state.rprtd_chan;
-    struct sk_buff* skb;
-    int err = LTE_OK;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  const struct lte_coex_wifi_reported_chan_cmd* cmd = data;
+  struct iwl_lte_coex_wifi_reported_channel_cmd* rprtd_chan = &mvm->lte_state.rprtd_chan;
+  struct sk_buff* skb;
+  int err = LTE_OK;
 
-    skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
-    if (!skb) { return -ENOMEM; }
+  skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+  if (!skb) {
+    return -ENOMEM;
+  }
 
-    if (data_len != sizeof(*cmd)) {
-        err = LTE_INVALID_DATA;
-        goto out;
-    }
+  if (data_len != sizeof(*cmd)) {
+    err = LTE_INVALID_DATA;
+    goto out;
+  }
 
-    IWL_DEBUG_COEX(mvm,
-                   "LTE-COEX: wifi reported channel cmd:\n"
-                   "\tchannel: %d, bandwidth: %d\n",
-                   cmd->chan, cmd->bandwidth);
+  IWL_DEBUG_COEX(mvm,
+                 "LTE-COEX: wifi reported channel cmd:\n"
+                 "\tchannel: %d, bandwidth: %d\n",
+                 cmd->chan, cmd->bandwidth);
 
-    if (!in_range(cmd->chan, LTE_RC_CHAN_MIN, LTE_RC_CHAN_MAX) ||
-        !in_range(cmd->bandwidth, LTE_RC_BW_MIN, LTE_RC_BW_MAX)) {
-        err = LTE_ILLEGAL_PARAMS;
-        goto out;
-    }
+  if (!in_range(cmd->chan, LTE_RC_CHAN_MIN, LTE_RC_CHAN_MAX) ||
+      !in_range(cmd->bandwidth, LTE_RC_BW_MIN, LTE_RC_BW_MAX)) {
+    err = LTE_ILLEGAL_PARAMS;
+    goto out;
+  }
 
-    rprtd_chan->channel = cpu_to_le32(cmd->chan);
-    rprtd_chan->bandwidth = cpu_to_le32(cmd->bandwidth);
+  rprtd_chan->channel = cpu_to_le32(cmd->chan);
+  rprtd_chan->bandwidth = cpu_to_le32(cmd->bandwidth);
 
-    mutex_lock(&mvm->mutex);
-    if (iwl_mvm_send_lte_coex_wifi_reported_channel_cmd(mvm)) {
-        err = LTE_OTHER_ERR;
-    } else {
-        mvm->lte_state.has_rprtd_chan = 1;
-    }
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  if (iwl_mvm_send_lte_coex_wifi_reported_channel_cmd(mvm)) {
+    err = LTE_OTHER_ERR;
+  } else {
+    mvm->lte_state.has_rprtd_chan = 1;
+  }
+  mutex_unlock(&mvm->mutex);
 
 out:
-    if (err) { iwl_mvm_reset_lte_state(mvm); }
+  if (err) {
+    iwl_mvm_reset_lte_state(mvm);
+  }
 
-    if (nla_put_u8(skb, NLA_BINARY, err)) {
-        kfree_skb(skb);
-        return -ENOBUFS;
-    }
+  if (nla_put_u8(skb, NLA_BINARY, err)) {
+    kfree_skb(skb);
+    return -ENOBUFS;
+  }
 
-    return cfg80211_vendor_cmd_reply(skb);
+  return cfg80211_vendor_cmd_reply(skb);
 }
 #endif /* CPTCFG_IWLWIFI_LTE_COEX */
 
 static int iwl_vendor_frame_filter_cmd(struct wiphy* wiphy, struct wireless_dev* wdev,
                                        const void* data, int data_len) {
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    struct ieee80211_vif* vif = wdev_to_ieee80211_vif(wdev);
-    int err = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  struct ieee80211_vif* vif = wdev_to_ieee80211_vif(wdev);
+  int err = iwl_mvm_parse_vendor_data(tb, data, data_len);
 
-    if (err) { return err; }
-    if (!vif) { return -EINVAL; }
-    vif->filter_grat_arp_unsol_na = tb[IWL_MVM_VENDOR_ATTR_FILTER_ARP_NA];
-    vif->filter_gtk = tb[IWL_MVM_VENDOR_ATTR_FILTER_GTK];
+  if (err) {
+    return err;
+  }
+  if (!vif) {
+    return -EINVAL;
+  }
+  vif->filter_grat_arp_unsol_na = tb[IWL_MVM_VENDOR_ATTR_FILTER_ARP_NA];
+  vif->filter_gtk = tb[IWL_MVM_VENDOR_ATTR_FILTER_GTK];
 
-    return 0;
+  return 0;
 }
 
 #ifdef CPTCFG_IWLMVM_TDLS_PEER_CACHE
 static int iwl_vendor_tdls_peer_cache_add(struct wiphy* wiphy, struct wireless_dev* wdev,
                                           const void* data, int data_len) {
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct iwl_mvm_tdls_peer_counter* cnt;
-    uint8_t* addr;
-    struct ieee80211_vif* vif = wdev_to_ieee80211_vif(wdev);
-    int err = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_mvm_tdls_peer_counter* cnt;
+  uint8_t* addr;
+  struct ieee80211_vif* vif = wdev_to_ieee80211_vif(wdev);
+  int err = iwl_mvm_parse_vendor_data(tb, data, data_len);
 
-    if (err) { return err; }
+  if (err) {
+    return err;
+  }
 
-    if (!vif) { return -ENODEV; }
+  if (!vif) {
+    return -ENODEV;
+  }
 
-    if (vif->type != NL80211_IFTYPE_STATION || !tb[IWL_MVM_VENDOR_ATTR_ADDR]) { return -EINVAL; }
+  if (vif->type != NL80211_IFTYPE_STATION || !tb[IWL_MVM_VENDOR_ATTR_ADDR]) {
+    return -EINVAL;
+  }
 
-    mutex_lock(&mvm->mutex);
-    if (mvm->tdls_peer_cache_cnt >= IWL_MVM_TDLS_CNT_MAX_PEERS) {
-        err = -ENOSPC;
-        goto out_unlock;
-    }
+  mutex_lock(&mvm->mutex);
+  if (mvm->tdls_peer_cache_cnt >= IWL_MVM_TDLS_CNT_MAX_PEERS) {
+    err = -ENOSPC;
+    goto out_unlock;
+  }
 
-    addr = nla_data(tb[IWL_MVM_VENDOR_ATTR_ADDR]);
+  addr = nla_data(tb[IWL_MVM_VENDOR_ATTR_ADDR]);
 
-    rcu_read_lock();
-    cnt = iwl_mvm_tdls_peer_cache_find(mvm, addr);
-    rcu_read_unlock();
-    if (cnt) {
-        err = -EEXIST;
-        goto out_unlock;
-    }
+  rcu_read_lock();
+  cnt = iwl_mvm_tdls_peer_cache_find(mvm, addr);
+  rcu_read_unlock();
+  if (cnt) {
+    err = -EEXIST;
+    goto out_unlock;
+  }
 
-    cnt = kzalloc(sizeof(*cnt) + sizeof(cnt->rx[0]) * mvm->trans->num_rx_queues, GFP_KERNEL);
-    if (!cnt) {
-        err = -ENOMEM;
-        goto out_unlock;
-    }
+  cnt = kzalloc(sizeof(*cnt) + sizeof(cnt->rx[0]) * mvm->trans->num_rx_queues, GFP_KERNEL);
+  if (!cnt) {
+    err = -ENOMEM;
+    goto out_unlock;
+  }
 
-    IWL_DEBUG_TDLS(mvm, "Adding %pM to TDLS peer cache\n", addr);
-    ether_addr_copy(cnt->mac.addr, addr);
-    cnt->vif = vif;
-    list_add_tail_rcu(&cnt->list, &mvm->tdls_peer_cache_list);
-    mvm->tdls_peer_cache_cnt++;
+  IWL_DEBUG_TDLS(mvm, "Adding %pM to TDLS peer cache\n", addr);
+  ether_addr_copy(cnt->mac.addr, addr);
+  cnt->vif = vif;
+  list_add_tail_rcu(&cnt->list, &mvm->tdls_peer_cache_list);
+  mvm->tdls_peer_cache_cnt++;
 
 out_unlock:
-    mutex_unlock(&mvm->mutex);
-    return err;
+  mutex_unlock(&mvm->mutex);
+  return err;
 }
 
 static int iwl_vendor_tdls_peer_cache_del(struct wiphy* wiphy, struct wireless_dev* wdev,
                                           const void* data, int data_len) {
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct iwl_mvm_tdls_peer_counter* cnt;
-    uint8_t* addr;
-    int err = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_mvm_tdls_peer_counter* cnt;
+  uint8_t* addr;
+  int err = iwl_mvm_parse_vendor_data(tb, data, data_len);
 
-    if (err) { return err; }
+  if (err) {
+    return err;
+  }
 
-    if (!tb[IWL_MVM_VENDOR_ATTR_ADDR]) { return -EINVAL; }
+  if (!tb[IWL_MVM_VENDOR_ATTR_ADDR]) {
+    return -EINVAL;
+  }
 
-    addr = nla_data(tb[IWL_MVM_VENDOR_ATTR_ADDR]);
+  addr = nla_data(tb[IWL_MVM_VENDOR_ATTR_ADDR]);
 
-    mutex_lock(&mvm->mutex);
-    rcu_read_lock();
-    cnt = iwl_mvm_tdls_peer_cache_find(mvm, addr);
-    if (!cnt) {
-        IWL_DEBUG_TDLS(mvm, "%pM not found in TDLS peer cache\n", addr);
-        err = -ENOENT;
-        goto out_unlock;
-    }
+  mutex_lock(&mvm->mutex);
+  rcu_read_lock();
+  cnt = iwl_mvm_tdls_peer_cache_find(mvm, addr);
+  if (!cnt) {
+    IWL_DEBUG_TDLS(mvm, "%pM not found in TDLS peer cache\n", addr);
+    err = -ENOENT;
+    goto out_unlock;
+  }
 
-    IWL_DEBUG_TDLS(mvm, "Removing %pM from TDLS peer cache\n", addr);
-    mvm->tdls_peer_cache_cnt--;
-    list_del_rcu(&cnt->list);
-    kfree_rcu(cnt, rcu_head);
+  IWL_DEBUG_TDLS(mvm, "Removing %pM from TDLS peer cache\n", addr);
+  mvm->tdls_peer_cache_cnt--;
+  list_del_rcu(&cnt->list);
+  kfree_rcu(cnt, rcu_head);
 
 out_unlock:
-    rcu_read_unlock();
-    mutex_unlock(&mvm->mutex);
-    return err;
+  rcu_read_unlock();
+  mutex_unlock(&mvm->mutex);
+  return err;
 }
 
 static int iwl_vendor_tdls_peer_cache_query(struct wiphy* wiphy, struct wireless_dev* wdev,
                                             const void* data, int data_len) {
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct iwl_mvm_tdls_peer_counter* cnt;
-    struct sk_buff* skb;
-    uint32_t rx_bytes, tx_bytes;
-    uint8_t* addr;
-    int err = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_mvm_tdls_peer_counter* cnt;
+  struct sk_buff* skb;
+  uint32_t rx_bytes, tx_bytes;
+  uint8_t* addr;
+  int err = iwl_mvm_parse_vendor_data(tb, data, data_len);
 
-    if (err) { return err; }
+  if (err) {
+    return err;
+  }
 
-    if (!tb[IWL_MVM_VENDOR_ATTR_ADDR]) { return -EINVAL; }
+  if (!tb[IWL_MVM_VENDOR_ATTR_ADDR]) {
+    return -EINVAL;
+  }
 
-    addr = nla_data(tb[IWL_MVM_VENDOR_ATTR_ADDR]);
+  addr = nla_data(tb[IWL_MVM_VENDOR_ATTR_ADDR]);
 
-    rcu_read_lock();
-    cnt = iwl_mvm_tdls_peer_cache_find(mvm, addr);
-    if (!cnt) {
-        IWL_DEBUG_TDLS(mvm, "%pM not found in TDLS peer cache\n", addr);
-        err = -ENOENT;
-    } else {
-        int q;
+  rcu_read_lock();
+  cnt = iwl_mvm_tdls_peer_cache_find(mvm, addr);
+  if (!cnt) {
+    IWL_DEBUG_TDLS(mvm, "%pM not found in TDLS peer cache\n", addr);
+    err = -ENOENT;
+  } else {
+    int q;
 
-        tx_bytes = cnt->tx_bytes;
-        rx_bytes = 0;
-        for (q = 0; q < mvm->trans->num_rx_queues; q++) {
-            rx_bytes += cnt->rx[q].bytes;
-        }
+    tx_bytes = cnt->tx_bytes;
+    rx_bytes = 0;
+    for (q = 0; q < mvm->trans->num_rx_queues; q++) {
+      rx_bytes += cnt->rx[q].bytes;
     }
-    rcu_read_unlock();
-    if (err) { return err; }
+  }
+  rcu_read_unlock();
+  if (err) {
+    return err;
+  }
 
-    skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
-    if (!skb) { return -ENOMEM; }
-    if (nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_TX_BYTES, tx_bytes) ||
-        nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_RX_BYTES, rx_bytes)) {
-        kfree_skb(skb);
-        return -ENOBUFS;
-    }
+  skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+  if (!skb) {
+    return -ENOMEM;
+  }
+  if (nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_TX_BYTES, tx_bytes) ||
+      nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_RX_BYTES, rx_bytes)) {
+    kfree_skb(skb);
+    return -ENOBUFS;
+  }
 
-    return cfg80211_vendor_cmd_reply(skb);
+  return cfg80211_vendor_cmd_reply(skb);
 }
 #endif /* CPTCFG_IWLMVM_TDLS_PEER_CACHE */
 
 static int iwl_vendor_set_nic_txpower_limit(struct wiphy* wiphy, struct wireless_dev* wdev,
                                             const void* data, int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    union {
-        struct iwl_dev_tx_power_cmd_v4 v4;
-        struct iwl_dev_tx_power_cmd v5;
-    } cmd = {
-        .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_DEVICE),
-        .v5.v3.dev_24 = cpu_to_le16(IWL_DEV_MAX_TX_POWER),
-        .v5.v3.dev_52_low = cpu_to_le16(IWL_DEV_MAX_TX_POWER),
-        .v5.v3.dev_52_high = cpu_to_le16(IWL_DEV_MAX_TX_POWER),
-    };
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    int len = sizeof(cmd);
-    int err;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  union {
+    struct iwl_dev_tx_power_cmd_v4 v4;
+    struct iwl_dev_tx_power_cmd v5;
+  } cmd = {
+      .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_DEVICE),
+      .v5.v3.dev_24 = cpu_to_le16(IWL_DEV_MAX_TX_POWER),
+      .v5.v3.dev_52_low = cpu_to_le16(IWL_DEV_MAX_TX_POWER),
+      .v5.v3.dev_52_high = cpu_to_le16(IWL_DEV_MAX_TX_POWER),
+  };
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  int len = sizeof(cmd);
+  int err;
 
-    err = iwl_mvm_parse_vendor_data(tb, data, data_len);
-    if (err) { return err; }
+  err = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  if (err) {
+    return err;
+  }
 
-    if (tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24]) {
-        int32_t txp = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24]);
+  if (tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24]) {
+    int32_t txp = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_24]);
 
-        if (txp < 0 || txp > IWL_DEV_MAX_TX_POWER) { return -EINVAL; }
-        cmd.v5.v3.dev_24 = cpu_to_le16(txp);
+    if (txp < 0 || txp > IWL_DEV_MAX_TX_POWER) {
+      return -EINVAL;
     }
+    cmd.v5.v3.dev_24 = cpu_to_le16(txp);
+  }
 
-    if (tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L]) {
-        int32_t txp = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L]);
+  if (tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L]) {
+    int32_t txp = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52L]);
 
-        if (txp < 0 || txp > IWL_DEV_MAX_TX_POWER) { return -EINVAL; }
-        cmd.v5.v3.dev_52_low = cpu_to_le16(txp);
+    if (txp < 0 || txp > IWL_DEV_MAX_TX_POWER) {
+      return -EINVAL;
     }
+    cmd.v5.v3.dev_52_low = cpu_to_le16(txp);
+  }
 
-    if (tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H]) {
-        int32_t txp = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H]);
+  if (tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H]) {
+    int32_t txp = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_TXP_LIMIT_52H]);
 
-        if (txp < 0 || txp > IWL_DEV_MAX_TX_POWER) { return -EINVAL; }
-        cmd.v5.v3.dev_52_high = cpu_to_le16(txp);
+    if (txp < 0 || txp > IWL_DEV_MAX_TX_POWER) {
+      return -EINVAL;
     }
+    cmd.v5.v3.dev_52_high = cpu_to_le16(txp);
+  }
 
-    mvm->txp_cmd.v5 = cmd.v5;
+  mvm->txp_cmd.v5 = cmd.v5;
 
-    if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
-        len = sizeof(mvm->txp_cmd.v5);
-    } else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
-        len = sizeof(mvm->txp_cmd.v4);
-    } else {
-        len = sizeof(mvm->txp_cmd.v4.v3);
-    }
+  if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
+    len = sizeof(mvm->txp_cmd.v5);
+  } else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
+    len = sizeof(mvm->txp_cmd.v4);
+  } else {
+    len = sizeof(mvm->txp_cmd.v4.v3);
+  }
 
-    mutex_lock(&mvm->mutex);
-    err = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  err = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+  mutex_unlock(&mvm->mutex);
 
-    if (err) { IWL_ERR(mvm, "failed to update device TX power: %d\n", err); }
-    return 0;
+  if (err) {
+    IWL_ERR(mvm, "failed to update device TX power: %d\n", err);
+  }
+  return 0;
 }
 
 #ifdef CPTCFG_IWLMVM_P2P_OPPPS_TEST_WA
 static int iwl_mvm_oppps_wa_update_quota(struct iwl_mvm* mvm, struct ieee80211_vif* vif,
                                          bool enable) {
-    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-    struct ieee80211_p2p_noa_attr* noa = &vif->bss_conf.p2p_noa_attr;
-    bool force_update = true;
+  struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+  struct ieee80211_p2p_noa_attr* noa = &vif->bss_conf.p2p_noa_attr;
+  bool force_update = true;
 
-    if (enable && noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) {
-        mvm->p2p_opps_test_wa_vif = mvmvif;
-    } else {
-        mvm->p2p_opps_test_wa_vif = NULL;
-    }
+  if (enable && noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) {
+    mvm->p2p_opps_test_wa_vif = mvmvif;
+  } else {
+    mvm->p2p_opps_test_wa_vif = NULL;
+  }
 
-    if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+  if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
 #ifdef CPTCFG_IWLWIFI_DEBUG_HOST_CMD_ENABLED
-        return iwl_mvm_dhc_quota_enforce(mvm, mvm->p2p_opps_test_wa_vif, 0);
+    return iwl_mvm_dhc_quota_enforce(mvm, mvm->p2p_opps_test_wa_vif, 0);
 #else
-        return -EOPNOTSUPP;
+    return -EOPNOTSUPP;
 #endif
-    }
+  }
 
-    return iwl_mvm_update_quotas(mvm, force_update, NULL);
+  return iwl_mvm_update_quotas(mvm, force_update, NULL);
 }
 
 static int iwl_mvm_oppps_wa(struct wiphy* wiphy, struct wireless_dev* wdev, const void* data,
                             int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    int err = iwl_mvm_parse_vendor_data(tb, data, data_len);
-    struct ieee80211_vif* vif = wdev_to_ieee80211_vif(wdev);
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  int err = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  struct ieee80211_vif* vif = wdev_to_ieee80211_vif(wdev);
 
-    if (err) { return err; }
-
-    if (!vif) { return -ENODEV; }
-
-    mutex_lock(&mvm->mutex);
-    if (vif->type == NL80211_IFTYPE_STATION && vif->p2p) {
-        bool enable = !!tb[IWL_MVM_VENDOR_ATTR_OPPPS_WA];
-
-        err = iwl_mvm_oppps_wa_update_quota(mvm, vif, enable);
-    }
-    mutex_unlock(&mvm->mutex);
-
+  if (err) {
     return err;
+  }
+
+  if (!vif) {
+    return -ENODEV;
+  }
+
+  mutex_lock(&mvm->mutex);
+  if (vif->type == NL80211_IFTYPE_STATION && vif->p2p) {
+    bool enable = !!tb[IWL_MVM_VENDOR_ATTR_OPPPS_WA];
+
+    err = iwl_mvm_oppps_wa_update_quota(mvm, vif, enable);
+  }
+  mutex_unlock(&mvm->mutex);
+
+  return err;
 }
 #endif
 
 void iwl_mvm_active_rx_filters(struct iwl_mvm* mvm) {
-    int i, len, total = 0;
-    struct iwl_mcast_filter_cmd* cmd;
-    static const uint8_t ipv4mc[] = {0x01, 0x00, 0x5e};
-    static const uint8_t ipv6mc[] = {0x33, 0x33};
-    static const uint8_t ipv4_mdns[] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0xfb};
-    static const uint8_t ipv6_mdns[] = {0x33, 0x33, 0x00, 0x00, 0x00, 0xfb};
+  int i, len, total = 0;
+  struct iwl_mcast_filter_cmd* cmd;
+  static const uint8_t ipv4mc[] = {0x01, 0x00, 0x5e};
+  static const uint8_t ipv6mc[] = {0x33, 0x33};
+  static const uint8_t ipv4_mdns[] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0xfb};
+  static const uint8_t ipv6_mdns[] = {0x33, 0x33, 0x00, 0x00, 0x00, 0xfb};
 
-    lockdep_assert_held(&mvm->mutex);
+  lockdep_assert_held(&mvm->mutex);
 
-    if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_EINVAL) { return; }
+  if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_EINVAL) {
+    return;
+  }
 
-    for (i = 0; i < mvm->mcast_filter_cmd->count; i++) {
-        if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST4 &&
-            memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4mc, sizeof(ipv4mc)) == 0) {
-            total++;
-        } else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4_mdns,
-                          sizeof(ipv4_mdns)) == 0) {
-            total++;
-        } else if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST6 &&
-                   memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6mc,
-                          sizeof(ipv6mc)) == 0) {
-            total++;
-        } else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6_mdns,
-                          sizeof(ipv6_mdns)) == 0) {
-            total++;
-        }
+  for (i = 0; i < mvm->mcast_filter_cmd->count; i++) {
+    if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST4 &&
+        memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4mc, sizeof(ipv4mc)) == 0) {
+      total++;
+    } else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4_mdns,
+                      sizeof(ipv4_mdns)) == 0) {
+      total++;
+    } else if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST6 &&
+               memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6mc, sizeof(ipv6mc)) ==
+                   0) {
+      total++;
+    } else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6_mdns,
+                      sizeof(ipv6_mdns)) == 0) {
+      total++;
+    }
+  }
+
+  /* FW expects full words */
+  len = roundup(sizeof(*cmd) + total * ETH_ALEN, 4);
+  cmd = kzalloc(len, GFP_KERNEL);
+  if (!cmd) {
+    return;
+  }
+
+  memcpy(cmd, mvm->mcast_filter_cmd, sizeof(*cmd));
+  cmd->count = 0;
+
+  for (i = 0; i < mvm->mcast_filter_cmd->count; i++) {
+    bool copy_filter = false;
+
+    if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST4 &&
+        memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4mc, sizeof(ipv4mc)) == 0) {
+      copy_filter = true;
+    } else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4_mdns,
+                      sizeof(ipv4_mdns)) == 0) {
+      copy_filter = true;
+    } else if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST6 &&
+               memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6mc, sizeof(ipv6mc)) ==
+                   0) {
+      copy_filter = true;
+    } else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6_mdns,
+                      sizeof(ipv6_mdns)) == 0) {
+      copy_filter = true;
     }
 
-    /* FW expects full words */
-    len = roundup(sizeof(*cmd) + total * ETH_ALEN, 4);
-    cmd = kzalloc(len, GFP_KERNEL);
-    if (!cmd) { return; }
-
-    memcpy(cmd, mvm->mcast_filter_cmd, sizeof(*cmd));
-    cmd->count = 0;
-
-    for (i = 0; i < mvm->mcast_filter_cmd->count; i++) {
-        bool copy_filter = false;
-
-        if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST4 &&
-            memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4mc, sizeof(ipv4mc)) == 0) {
-            copy_filter = true;
-        } else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv4_mdns,
-                          sizeof(ipv4_mdns)) == 0) {
-            copy_filter = true;
-        } else if (mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_MCAST6 &&
-                   memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6mc,
-                          sizeof(ipv6mc)) == 0) {
-            copy_filter = true;
-        } else if (memcmp(&mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN], ipv6_mdns,
-                          sizeof(ipv6_mdns)) == 0) {
-            copy_filter = true;
-        }
-
-        if (!copy_filter) { continue; }
-
-        ether_addr_copy(&cmd->addr_list[cmd->count * ETH_ALEN],
-                        &mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN]);
-        cmd->count++;
+    if (!copy_filter) {
+      continue;
     }
 
-    kfree(mvm->mcast_active_filter_cmd);
-    mvm->mcast_active_filter_cmd = cmd;
+    ether_addr_copy(&cmd->addr_list[cmd->count * ETH_ALEN],
+                    &mvm->mcast_filter_cmd->addr_list[i * ETH_ALEN]);
+    cmd->count++;
+  }
+
+  kfree(mvm->mcast_active_filter_cmd);
+  mvm->mcast_active_filter_cmd = cmd;
 }
 
 static int iwl_mvm_vendor_rxfilter(struct wiphy* wiphy, struct wireless_dev* wdev, const void* data,
                                    int data_len) {
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    enum iwl_mvm_vendor_rxfilter_flags filter, rx_filters, old_rx_filters;
-    enum iwl_mvm_vendor_rxfilter_op op;
-    bool first_set;
-    uint32_t mask;
-    int retval;
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  enum iwl_mvm_vendor_rxfilter_flags filter, rx_filters, old_rx_filters;
+  enum iwl_mvm_vendor_rxfilter_op op;
+  bool first_set;
+  uint32_t mask;
+  int retval;
 
-    retval = iwl_mvm_parse_vendor_data(tb, data, data_len);
-    if (retval) { return retval; }
+  retval = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  if (retval) {
+    return retval;
+  }
 
-    if (!tb[IWL_MVM_VENDOR_ATTR_RXFILTER]) { return -EINVAL; }
+  if (!tb[IWL_MVM_VENDOR_ATTR_RXFILTER]) {
+    return -EINVAL;
+  }
 
-    if (!tb[IWL_MVM_VENDOR_ATTR_RXFILTER_OP]) { return -EINVAL; }
+  if (!tb[IWL_MVM_VENDOR_ATTR_RXFILTER_OP]) {
+    return -EINVAL;
+  }
 
-    filter = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_RXFILTER]);
-    op = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_RXFILTER_OP]);
+  filter = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_RXFILTER]);
+  op = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_RXFILTER_OP]);
 
-    if (filter != IWL_MVM_VENDOR_RXFILTER_UNICAST && filter != IWL_MVM_VENDOR_RXFILTER_BCAST &&
-        filter != IWL_MVM_VENDOR_RXFILTER_MCAST4 && filter != IWL_MVM_VENDOR_RXFILTER_MCAST6) {
-        return -EINVAL;
-    }
+  if (filter != IWL_MVM_VENDOR_RXFILTER_UNICAST && filter != IWL_MVM_VENDOR_RXFILTER_BCAST &&
+      filter != IWL_MVM_VENDOR_RXFILTER_MCAST4 && filter != IWL_MVM_VENDOR_RXFILTER_MCAST6) {
+    return -EINVAL;
+  }
 
-    rx_filters = mvm->rx_filters & ~IWL_MVM_VENDOR_RXFILTER_EINVAL;
-    switch (op) {
+  rx_filters = mvm->rx_filters & ~IWL_MVM_VENDOR_RXFILTER_EINVAL;
+  switch (op) {
     case IWL_MVM_VENDOR_RXFILTER_OP_DROP:
-        rx_filters &= ~filter;
-        break;
+      rx_filters &= ~filter;
+      break;
     case IWL_MVM_VENDOR_RXFILTER_OP_PASS:
-        rx_filters |= filter;
-        break;
+      rx_filters |= filter;
+      break;
     default:
-        return -EINVAL;
-    }
+      return -EINVAL;
+  }
 
-    first_set = mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_EINVAL;
+  first_set = mvm->rx_filters & IWL_MVM_VENDOR_RXFILTER_EINVAL;
 
-    /* If first time set - clear EINVAL value */
-    mvm->rx_filters &= ~IWL_MVM_VENDOR_RXFILTER_EINVAL;
+  /* If first time set - clear EINVAL value */
+  mvm->rx_filters &= ~IWL_MVM_VENDOR_RXFILTER_EINVAL;
 
-    if (rx_filters == mvm->rx_filters && !first_set) { return 0; }
-
-    mutex_lock(&mvm->mutex);
-
-    old_rx_filters = mvm->rx_filters;
-    mvm->rx_filters = rx_filters;
-
-    mask = IWL_MVM_VENDOR_RXFILTER_MCAST4 | IWL_MVM_VENDOR_RXFILTER_MCAST6;
-    if ((old_rx_filters & mask) != (rx_filters & mask) || first_set) {
-        iwl_mvm_active_rx_filters(mvm);
-        iwl_mvm_recalc_multicast(mvm);
-    }
-
-    mask = IWL_MVM_VENDOR_RXFILTER_BCAST;
-    if ((old_rx_filters & mask) != (rx_filters & mask) || first_set) {
-        iwl_mvm_configure_bcast_filter(mvm);
-    }
-
-    mutex_unlock(&mvm->mutex);
-
+  if (rx_filters == mvm->rx_filters && !first_set) {
     return 0;
+  }
+
+  mutex_lock(&mvm->mutex);
+
+  old_rx_filters = mvm->rx_filters;
+  mvm->rx_filters = rx_filters;
+
+  mask = IWL_MVM_VENDOR_RXFILTER_MCAST4 | IWL_MVM_VENDOR_RXFILTER_MCAST6;
+  if ((old_rx_filters & mask) != (rx_filters & mask) || first_set) {
+    iwl_mvm_active_rx_filters(mvm);
+    iwl_mvm_recalc_multicast(mvm);
+  }
+
+  mask = IWL_MVM_VENDOR_RXFILTER_BCAST;
+  if ((old_rx_filters & mask) != (rx_filters & mask) || first_set) {
+    iwl_mvm_configure_bcast_filter(mvm);
+  }
+
+  mutex_unlock(&mvm->mutex);
+
+  return 0;
 }
 
 static int iwl_mvm_vendor_dbg_collect(struct wiphy* wiphy, struct wireless_dev* wdev,
                                       const void* data, int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    int err, len = 0;
-    const char* trigger_desc;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  int err, len = 0;
+  const char* trigger_desc;
 
-    err = iwl_mvm_parse_vendor_data(tb, data, data_len);
-    if (err) { return err; }
+  err = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  if (err) {
+    return err;
+  }
 
-    if (!tb[IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER]) { return -EINVAL; }
+  if (!tb[IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER]) {
+    return -EINVAL;
+  }
 
-    trigger_desc = nla_data(tb[IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER]);
-    len = nla_len(tb[IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER]);
+  trigger_desc = nla_data(tb[IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER]);
+  len = nla_len(tb[IWL_MVM_VENDOR_ATTR_DBG_COLLECT_TRIGGER]);
 
-    iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER_EXTENDED, trigger_desc, len);
+  iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER_EXTENDED, trigger_desc, len);
 
-    return 0;
+  return 0;
 }
 
 static int iwl_mvm_vendor_nan_faw_conf(struct wiphy* wiphy, struct wireless_dev* wdev,
                                        const void* data, int data_len) {
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct cfg80211_chan_def def = {};
-    struct ieee80211_channel* chan;
-    uint32_t freq;
-    uint8_t slots;
-    int retval;
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct cfg80211_chan_def def = {};
+  struct ieee80211_channel* chan;
+  uint32_t freq;
+  uint8_t slots;
+  int retval;
 
-    retval = iwl_mvm_parse_vendor_data(tb, data, data_len);
-    if (retval) { return retval; }
+  retval = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  if (retval) {
+    return retval;
+  }
 
-    if (!tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS]) { return -EINVAL; }
+  if (!tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS]) {
+    return -EINVAL;
+  }
 
-    if (!tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ]) { return -EINVAL; }
+  if (!tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ]) {
+    return -EINVAL;
+  }
 
-    freq = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ]);
-    slots = nla_get_u8(tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS]);
+  freq = nla_get_u32(tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_FREQ]);
+  slots = nla_get_u8(tb[IWL_MVM_VENDOR_ATTR_NAN_FAW_SLOTS]);
 
-    chan = ieee80211_get_channel(wiphy, freq);
-    if (!chan) { return -EINVAL; }
+  chan = ieee80211_get_channel(wiphy, freq);
+  if (!chan) {
+    return -EINVAL;
+  }
 
-    cfg80211_chandef_create(&def, chan, NL80211_CHAN_NO_HT);
+  cfg80211_chandef_create(&def, chan, NL80211_CHAN_NO_HT);
 
-    if (!cfg80211_chandef_usable(wiphy, &def, IEEE80211_CHAN_DISABLED)) { return -EINVAL; }
+  if (!cfg80211_chandef_usable(wiphy, &def, IEEE80211_CHAN_DISABLED)) {
+    return -EINVAL;
+  }
 
-    return iwl_mvm_nan_config_nan_faw_cmd(mvm, &def, slots);
+  return iwl_mvm_nan_config_nan_faw_cmd(mvm, &def, slots);
 }
 
 #ifdef CONFIG_ACPI
 static int iwl_mvm_vendor_set_dynamic_txp_profile(struct wiphy* wiphy, struct wireless_dev* wdev,
                                                   const void* data, int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    int ret;
-    uint8_t chain_a, chain_b;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  int ret;
+  uint8_t chain_a, chain_b;
 
-    ret = iwl_mvm_parse_vendor_data(tb, data, data_len);
-    if (ret) { return ret; }
+  ret = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  if (ret) {
+    return ret;
+  }
 
-    if (!tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE] ||
-        !tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE]) {
-        return -EINVAL;
-    }
+  if (!tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE] ||
+      !tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE]) {
+    return -EINVAL;
+  }
 
-    chain_a = nla_get_u8(tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE]);
-    chain_b = nla_get_u8(tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE]);
+  chain_a = nla_get_u8(tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE]);
+  chain_b = nla_get_u8(tb[IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE]);
 
-    if (mvm->sar_chain_a_profile == chain_a && mvm->sar_chain_b_profile == chain_b) { return 0; }
+  if (mvm->sar_chain_a_profile == chain_a && mvm->sar_chain_b_profile == chain_b) {
+    return 0;
+  }
 
-    mvm->sar_chain_a_profile = chain_a;
-    mvm->sar_chain_b_profile = chain_b;
+  mvm->sar_chain_a_profile = chain_a;
+  mvm->sar_chain_b_profile = chain_b;
 
-    return iwl_mvm_sar_select_profile(mvm, chain_a, chain_b);
+  return iwl_mvm_sar_select_profile(mvm, chain_a, chain_b);
 }
 
 static int iwl_mvm_vendor_get_sar_profile_info(struct wiphy* wiphy, struct wireless_dev* wdev,
                                                const void* data, int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct sk_buff* skb;
-    int i;
-    uint32_t n_profiles = 0;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct sk_buff* skb;
+  int i;
+  uint32_t n_profiles = 0;
 
-    for (i = 0; i < ACPI_SAR_PROFILE_NUM; i++) {
-        if (mvm->sar_profiles[i].enabled) { n_profiles++; }
+  for (i = 0; i < ACPI_SAR_PROFILE_NUM; i++) {
+    if (mvm->sar_profiles[i].enabled) {
+      n_profiles++;
     }
+  }
 
-    skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
-    if (!skb) { return -ENOMEM; }
-    if (nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_SAR_ENABLED_PROFILE_NUM, n_profiles) ||
-        nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE, mvm->sar_chain_a_profile) ||
-        nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE, mvm->sar_chain_b_profile)) {
-        kfree_skb(skb);
-        return -ENOBUFS;
-    }
+  skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+  if (!skb) {
+    return -ENOMEM;
+  }
+  if (nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_SAR_ENABLED_PROFILE_NUM, n_profiles) ||
+      nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_SAR_CHAIN_A_PROFILE, mvm->sar_chain_a_profile) ||
+      nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_SAR_CHAIN_B_PROFILE, mvm->sar_chain_b_profile)) {
+    kfree_skb(skb);
+    return -ENOBUFS;
+  }
 
-    return cfg80211_vendor_cmd_reply(skb);
+  return cfg80211_vendor_cmd_reply(skb);
 }
 
 #define IWL_MVM_SAR_GEO_NUM_BANDS 2
 
 static int iwl_mvm_vendor_get_geo_profile_info(struct wiphy* wiphy, struct wireless_dev* wdev,
                                                const void* data, int data_len) {
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct sk_buff* skb;
-    struct nlattr* nl_profile;
-    int i, tbl_idx;
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct sk_buff* skb;
+  struct nlattr* nl_profile;
+  int i, tbl_idx;
 
-    tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
-    if (tbl_idx < 0) { return tbl_idx; }
+  tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
+  if (tbl_idx < 0) {
+    return tbl_idx;
+  }
 
-    skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
-    if (!skb) { return -ENOMEM; }
+  skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+  if (!skb) {
+    return -ENOMEM;
+  }
 
-    nl_profile = nla_nest_start(skb, IWL_MVM_VENDOR_ATTR_SAR_GEO_PROFILE);
-    if (!nl_profile) {
-        kfree_skb(skb);
-        return -ENOBUFS;
+  nl_profile = nla_nest_start(skb, IWL_MVM_VENDOR_ATTR_SAR_GEO_PROFILE);
+  if (!nl_profile) {
+    kfree_skb(skb);
+    return -ENOBUFS;
+  }
+  if (!tbl_idx) {
+    goto out;
+  }
+
+  for (i = 0; i < IWL_MVM_SAR_GEO_NUM_BANDS; i++) {
+    uint8_t* value;
+    struct nlattr* nl_chain = nla_nest_start(skb, i + 1);
+    int idx = i * ACPI_GEO_PER_CHAIN_SIZE;
+
+    if (!nl_chain) {
+      kfree_skb(skb);
+      return -ENOBUFS;
     }
-    if (!tbl_idx) { goto out; }
 
-    for (i = 0; i < IWL_MVM_SAR_GEO_NUM_BANDS; i++) {
-        uint8_t* value;
-        struct nlattr* nl_chain = nla_nest_start(skb, i + 1);
-        int idx = i * ACPI_GEO_PER_CHAIN_SIZE;
+    value = &mvm->geo_profiles[tbl_idx - 1].values[idx];
 
-        if (!nl_chain) {
-            kfree_skb(skb);
-            return -ENOBUFS;
-        }
-
-        value = &mvm->geo_profiles[tbl_idx - 1].values[idx];
-
-        nla_put_u8(skb, IWL_VENDOR_SAR_GEO_MAX_TXP, value[0]);
-        nla_put_u8(skb, IWL_VENDOR_SAR_GEO_CHAIN_A_OFFSET, value[1]);
-        nla_put_u8(skb, IWL_VENDOR_SAR_GEO_CHAIN_B_OFFSET, value[2]);
-        nla_nest_end(skb, nl_chain);
-    }
+    nla_put_u8(skb, IWL_VENDOR_SAR_GEO_MAX_TXP, value[0]);
+    nla_put_u8(skb, IWL_VENDOR_SAR_GEO_CHAIN_A_OFFSET, value[1]);
+    nla_put_u8(skb, IWL_VENDOR_SAR_GEO_CHAIN_B_OFFSET, value[2]);
+    nla_nest_end(skb, nl_chain);
+  }
 out:
-    nla_nest_end(skb, nl_profile);
+  nla_nest_end(skb, nl_profile);
 
-    return cfg80211_vendor_cmd_reply(skb);
+  return cfg80211_vendor_cmd_reply(skb);
 }
 #endif
 
@@ -1024,36 +1143,36 @@
 };
 
 static int iwl_mvm_vendor_validate_ccm_vector(struct nlattr** tb) {
-    if (!tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY] || !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE] ||
-        !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD] ||
-        nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_128 ||
-        nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]) != FIPS_CCM_NONCE_LEN) {
-        return -EINVAL;
-    }
+  if (!tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY] || !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE] ||
+      !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD] ||
+      nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_128 ||
+      nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]) != FIPS_CCM_NONCE_LEN) {
+    return -EINVAL;
+  }
 
-    return 0;
+  return 0;
 }
 
 static int iwl_mvm_vendor_validate_gcm_vector(struct nlattr** tb) {
-    if (!tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY] || !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE] ||
-        !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD] ||
-        (nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_128 &&
-         nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_256) ||
-        nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]) != FIPS_GCM_NONCE_LEN) {
-        return -EINVAL;
-    }
+  if (!tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY] || !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE] ||
+      !tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD] ||
+      (nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_128 &&
+       nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_256) ||
+      nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]) != FIPS_GCM_NONCE_LEN) {
+    return -EINVAL;
+  }
 
-    return 0;
+  return 0;
 }
 
 static int iwl_mvm_vendor_validate_aes_vector(struct nlattr** tb) {
-    if (!tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY] ||
-        (nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_128 &&
-         nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_256)) {
-        return -EINVAL;
-    }
+  if (!tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY] ||
+      (nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_128 &&
+       nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) != FIPS_KEY_LEN_256)) {
+    return -EINVAL;
+  }
 
-    return 0;
+  return 0;
 }
 
 /**
@@ -1069,153 +1188,167 @@
  * success, or a negative error code on failure.
  */
 static int iwl_mvm_vendor_build_vector(uint8_t** cmd_buf, struct nlattr* vector, uint8_t flags) {
-    struct nlattr* tb[NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW];
-    struct iwl_fips_test_cmd* cmd;
-    int err;
-    int payload_len = 0;
-    uint8_t* buf;
+  struct nlattr* tb[NUM_IWL_VENDOR_FIPS_TEST_VECTOR_HW];
+  struct iwl_fips_test_cmd* cmd;
+  int err;
+  int payload_len = 0;
+  uint8_t* buf;
 
-    err = nla_parse_nested(tb, MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HW, vector,
-                           iwl_mvm_vendor_fips_hw_policy, NULL);
-    if (err) { return err; }
+  err = nla_parse_nested(tb, MAX_IWL_VENDOR_FIPS_TEST_VECTOR_HW, vector,
+                         iwl_mvm_vendor_fips_hw_policy, NULL);
+  if (err) {
+    return err;
+  }
 
-    switch (flags) {
+  switch (flags) {
     case IWL_FIPS_TEST_VECTOR_FLAGS_CCM:
-        err = iwl_mvm_vendor_validate_ccm_vector(tb);
-        break;
+      err = iwl_mvm_vendor_validate_ccm_vector(tb);
+      break;
     case IWL_FIPS_TEST_VECTOR_FLAGS_GCM:
-        err = iwl_mvm_vendor_validate_gcm_vector(tb);
-        break;
+      err = iwl_mvm_vendor_validate_gcm_vector(tb);
+      break;
     case IWL_FIPS_TEST_VECTOR_FLAGS_AES:
-        err = iwl_mvm_vendor_validate_aes_vector(tb);
-        break;
+      err = iwl_mvm_vendor_validate_aes_vector(tb);
+      break;
     default:
-        return -EINVAL;
+      return -EINVAL;
+  }
+
+  if (err) {
+    return err;
+  }
+
+  if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD] &&
+      nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]) > FIPS_MAX_AAD_LEN) {
+    return -EINVAL;
+  }
+
+  if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD]) {
+    payload_len = nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD]);
+  }
+
+  buf = kzalloc(sizeof(*cmd) + payload_len, GFP_KERNEL);
+  if (!buf) {
+    return -ENOMEM;
+  }
+
+  cmd = (void*)buf;
+
+  cmd->flags = cpu_to_le32(flags);
+
+  memcpy(cmd->key, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]),
+         nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]));
+
+  if (nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) == FIPS_KEY_LEN_256) {
+    cmd->flags |= cpu_to_le32(IWL_FIPS_TEST_VECTOR_FLAGS_KEY_256);
+  }
+
+  if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE])
+    memcpy(cmd->nonce, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]),
+           nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]));
+
+  if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]) {
+    memcpy(cmd->aad, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]),
+           nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]));
+    cmd->aad_len = cpu_to_le32(nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]));
+  }
+
+  if (payload_len) {
+    memcpy(cmd->payload, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD]), payload_len);
+    cmd->payload_len = cpu_to_le32(payload_len);
+  }
+
+  if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS]) {
+    uint8_t hw_flags = nla_get_u8(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS]);
+
+    if (hw_flags & IWL_VENDOR_FIPS_TEST_VECTOR_FLAGS_ENCRYPT) {
+      cmd->flags |= cpu_to_le32(IWL_FIPS_TEST_VECTOR_FLAGS_ENC);
     }
+  }
 
-    if (err) { return err; }
-
-    if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD] &&
-        nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]) > FIPS_MAX_AAD_LEN) {
-        return -EINVAL;
-    }
-
-    if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD]) {
-        payload_len = nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD]);
-    }
-
-    buf = kzalloc(sizeof(*cmd) + payload_len, GFP_KERNEL);
-    if (!buf) { return -ENOMEM; }
-
-    cmd = (void*)buf;
-
-    cmd->flags = cpu_to_le32(flags);
-
-    memcpy(cmd->key, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]),
-           nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]));
-
-    if (nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_KEY]) == FIPS_KEY_LEN_256) {
-        cmd->flags |= cpu_to_le32(IWL_FIPS_TEST_VECTOR_FLAGS_KEY_256);
-    }
-
-    if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE])
-        memcpy(cmd->nonce, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]),
-               nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_NONCE]));
-
-    if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]) {
-        memcpy(cmd->aad, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]),
-               nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]));
-        cmd->aad_len = cpu_to_le32(nla_len(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_AAD]));
-    }
-
-    if (payload_len) {
-        memcpy(cmd->payload, nla_data(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_PAYLOAD]), payload_len);
-        cmd->payload_len = cpu_to_le32(payload_len);
-    }
-
-    if (tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS]) {
-        uint8_t hw_flags = nla_get_u8(tb[IWL_VENDOR_FIPS_TEST_VECTOR_HW_FLAGS]);
-
-        if (hw_flags & IWL_VENDOR_FIPS_TEST_VECTOR_FLAGS_ENCRYPT) {
-            cmd->flags |= cpu_to_le32(IWL_FIPS_TEST_VECTOR_FLAGS_ENC);
-        }
-    }
-
-    *cmd_buf = buf;
-    return sizeof(*cmd) + payload_len;
+  *cmd_buf = buf;
+  return sizeof(*cmd) + payload_len;
 }
 
 static int iwl_mvm_vendor_test_fips_send_resp(struct wiphy* wiphy,
                                               struct iwl_fips_test_resp* resp) {
-    struct sk_buff* skb;
-    uint32_t resp_len = le32_to_cpu(resp->len);
-    uint32_t* status = (void*)(resp->payload + resp_len);
+  struct sk_buff* skb;
+  uint32_t resp_len = le32_to_cpu(resp->len);
+  uint32_t* status = (void*)(resp->payload + resp_len);
 
-    skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, sizeof(*resp));
-    if (!skb) { return -ENOMEM; }
+  skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, sizeof(*resp));
+  if (!skb) {
+    return -ENOMEM;
+  }
 
-    if ((*status) == IWL_FIPS_TEST_STATUS_SUCCESS &&
-        nla_put(skb, IWL_MVM_VENDOR_ATTR_FIPS_TEST_RESULT, resp_len, resp->payload)) {
-        kfree_skb(skb);
-        return -ENOBUFS;
-    }
+  if ((*status) == IWL_FIPS_TEST_STATUS_SUCCESS &&
+      nla_put(skb, IWL_MVM_VENDOR_ATTR_FIPS_TEST_RESULT, resp_len, resp->payload)) {
+    kfree_skb(skb);
+    return -ENOBUFS;
+  }
 
-    return cfg80211_vendor_cmd_reply(skb);
+  return cfg80211_vendor_cmd_reply(skb);
 }
 
 static int iwl_mvm_vendor_test_fips(struct wiphy* wiphy, struct wireless_dev* wdev,
                                     const void* data, int data_len) {
-    struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
-    struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
-    struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
-    struct iwl_host_cmd hcmd = {
-        .id = iwl_cmd_id(FIPS_TEST_VECTOR_CMD, LEGACY_GROUP, 0),
-        .flags = CMD_WANT_SKB,
-        .dataflags = {IWL_HCMD_DFL_NOCOPY},
-    };
-    struct iwl_rx_packet* pkt;
-    struct iwl_fips_test_resp* resp;
-    struct nlattr* vector;
-    uint8_t flags;
-    uint8_t* buf = NULL;
-    int ret;
+  struct nlattr* tb[NUM_IWL_MVM_VENDOR_ATTR];
+  struct ieee80211_hw* hw = wiphy_to_ieee80211_hw(wiphy);
+  struct iwl_mvm* mvm = IWL_MAC80211_GET_MVM(hw);
+  struct iwl_host_cmd hcmd = {
+      .id = iwl_cmd_id(FIPS_TEST_VECTOR_CMD, LEGACY_GROUP, 0),
+      .flags = CMD_WANT_SKB,
+      .dataflags = {IWL_HCMD_DFL_NOCOPY},
+  };
+  struct iwl_rx_packet* pkt;
+  struct iwl_fips_test_resp* resp;
+  struct nlattr* vector;
+  uint8_t flags;
+  uint8_t* buf = NULL;
+  int ret;
 
-    ret = iwl_mvm_parse_vendor_data(tb, data, data_len);
-    if (ret) { return ret; }
+  ret = iwl_mvm_parse_vendor_data(tb, data, data_len);
+  if (ret) {
+    return ret;
+  }
 
-    if (tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM]) {
-        vector = tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM];
-        flags = IWL_FIPS_TEST_VECTOR_FLAGS_CCM;
-    } else if (tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM]) {
-        vector = tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM];
-        flags = IWL_FIPS_TEST_VECTOR_FLAGS_GCM;
-    } else if (tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES]) {
-        vector = tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES];
-        flags = IWL_FIPS_TEST_VECTOR_FLAGS_AES;
-    } else {
-        return -EINVAL;
-    }
+  if (tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM]) {
+    vector = tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_CCM];
+    flags = IWL_FIPS_TEST_VECTOR_FLAGS_CCM;
+  } else if (tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM]) {
+    vector = tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_GCM];
+    flags = IWL_FIPS_TEST_VECTOR_FLAGS_GCM;
+  } else if (tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES]) {
+    vector = tb[IWL_MVM_VENDOR_ATTR_FIPS_TEST_VECTOR_HW_AES];
+    flags = IWL_FIPS_TEST_VECTOR_FLAGS_AES;
+  } else {
+    return -EINVAL;
+  }
 
-    ret = iwl_mvm_vendor_build_vector(&buf, vector, flags);
-    if (ret <= 0) { return ret; }
+  ret = iwl_mvm_vendor_build_vector(&buf, vector, flags);
+  if (ret <= 0) {
+    return ret;
+  }
 
-    hcmd.data[0] = buf;
-    hcmd.len[0] = ret;
+  hcmd.data[0] = buf;
+  hcmd.len[0] = ret;
 
-    mutex_lock(&mvm->mutex);
-    ret = iwl_mvm_send_cmd(mvm, &hcmd);
-    mutex_unlock(&mvm->mutex);
+  mutex_lock(&mvm->mutex);
+  ret = iwl_mvm_send_cmd(mvm, &hcmd);
+  mutex_unlock(&mvm->mutex);
 
-    if (ret) { return ret; }
+  if (ret) {
+    return ret;
+  }
 
-    pkt = hcmd.resp_pkt;
-    resp = (void*)pkt->data;
+  pkt = hcmd.resp_pkt;
+  resp = (void*)pkt->data;
 
-    iwl_mvm_vendor_test_fips_send_resp(wiphy, resp);
-    iwl_free_resp(&hcmd);
+  iwl_mvm_vendor_test_fips_send_resp(wiphy, resp);
+  iwl_free_resp(&hcmd);
 
-    kfree(buf);
-    return 0;
+  kfree(buf);
+  return 0;
 }
 
 static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
@@ -1430,52 +1563,54 @@
 };
 
 void iwl_mvm_set_wiphy_vendor_commands(struct wiphy* wiphy) {
-    wiphy->vendor_commands = iwl_mvm_vendor_commands;
-    wiphy->n_vendor_commands = ARRAY_SIZE(iwl_mvm_vendor_commands);
-    wiphy->vendor_events = iwl_mvm_vendor_events;
-    wiphy->n_vendor_events = ARRAY_SIZE(iwl_mvm_vendor_events);
+  wiphy->vendor_commands = iwl_mvm_vendor_commands;
+  wiphy->n_vendor_commands = ARRAY_SIZE(iwl_mvm_vendor_commands);
+  wiphy->vendor_events = iwl_mvm_vendor_events;
+  wiphy->n_vendor_events = ARRAY_SIZE(iwl_mvm_vendor_events);
 }
 
 static enum iwl_mvm_vendor_load iwl_mvm_get_vendor_load(enum iwl_mvm_traffic_load load) {
-    switch (load) {
+  switch (load) {
     case IWL_MVM_TRAFFIC_HIGH:
-        return IWL_MVM_VENDOR_LOAD_HIGH;
+      return IWL_MVM_VENDOR_LOAD_HIGH;
     case IWL_MVM_TRAFFIC_MEDIUM:
-        return IWL_MVM_VENDOR_LOAD_MEDIUM;
+      return IWL_MVM_VENDOR_LOAD_MEDIUM;
     case IWL_MVM_TRAFFIC_LOW:
-        return IWL_MVM_VENDOR_LOAD_LOW;
+      return IWL_MVM_VENDOR_LOAD_LOW;
     default:
-        break;
-    }
+      break;
+  }
 
-    return IWL_MVM_VENDOR_LOAD_LOW;
+  return IWL_MVM_VENDOR_LOAD_LOW;
 }
 
 void iwl_mvm_send_tcm_event(struct iwl_mvm* mvm, struct ieee80211_vif* vif) {
-    struct sk_buff* msg = cfg80211_vendor_event_alloc(
-        mvm->hw->wiphy, ieee80211_vif_to_wdev(vif), 200, IWL_MVM_VENDOR_EVENT_IDX_TCM, GFP_ATOMIC);
+  struct sk_buff* msg = cfg80211_vendor_event_alloc(mvm->hw->wiphy, ieee80211_vif_to_wdev(vif), 200,
+                                                    IWL_MVM_VENDOR_EVENT_IDX_TCM, GFP_ATOMIC);
 
-    if (!msg) { return; }
-
-    if (vif) {
-        struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-        if (nla_put(msg, IWL_MVM_VENDOR_ATTR_VIF_ADDR, ETH_ALEN, vif->addr) ||
-            nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_VIF_LL, iwl_mvm_vif_low_latency(mvmvif)) ||
-            nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_VIF_LOAD, mvm->tcm.result.load[mvmvif->id])) {
-            goto nla_put_failure;
-        }
-    }
-
-    if (nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_LL, iwl_mvm_low_latency(mvm)) ||
-        nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_LOAD,
-                   iwl_mvm_get_vendor_load(mvm->tcm.result.global_load))) {
-        goto nla_put_failure;
-    }
-
-    cfg80211_vendor_event(msg, GFP_ATOMIC);
+  if (!msg) {
     return;
+  }
+
+  if (vif) {
+    struct iwl_mvm_vif* mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+    if (nla_put(msg, IWL_MVM_VENDOR_ATTR_VIF_ADDR, ETH_ALEN, vif->addr) ||
+        nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_VIF_LL, iwl_mvm_vif_low_latency(mvmvif)) ||
+        nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_VIF_LOAD, mvm->tcm.result.load[mvmvif->id])) {
+      goto nla_put_failure;
+    }
+  }
+
+  if (nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_LL, iwl_mvm_low_latency(mvm)) ||
+      nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_LOAD,
+                 iwl_mvm_get_vendor_load(mvm->tcm.result.global_load))) {
+    goto nla_put_failure;
+  }
+
+  cfg80211_vendor_event(msg, GFP_ATOMIC);
+  return;
 
 nla_put_failure:
-    kfree_skb(msg);
+  kfree_skb(msg);
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/ctxt-info-gen3.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 4195865..032331b 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -38,115 +38,127 @@
 #include "iwl-trans.h"
 
 int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans* trans, const struct fw_img* fw) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_context_info_gen3* ctxt_info_gen3;
-    struct iwl_prph_scratch* prph_scratch;
-    struct iwl_prph_scratch_ctrl_cfg* prph_sc_ctrl;
-    struct iwl_prph_info* prph_info;
-    void* iml_img;
-    uint32_t control_flags = 0;
-    int ret;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_context_info_gen3* ctxt_info_gen3;
+  struct iwl_prph_scratch* prph_scratch;
+  struct iwl_prph_scratch_ctrl_cfg* prph_sc_ctrl;
+  struct iwl_prph_info* prph_info;
+  void* iml_img;
+  uint32_t control_flags = 0;
+  int ret;
 
-    /* Allocate prph scratch */
-    prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
-                                      &trans_pcie->prph_scratch_dma_addr, GFP_KERNEL);
-    if (!prph_scratch) { return -ENOMEM; }
+  /* Allocate prph scratch */
+  prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
+                                    &trans_pcie->prph_scratch_dma_addr, GFP_KERNEL);
+  if (!prph_scratch) {
+    return -ENOMEM;
+  }
 
-    prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+  prph_sc_ctrl = &prph_scratch->ctrl_cfg;
 
-    prph_sc_ctrl->version.version = 0;
-    prph_sc_ctrl->version.mac_id = cpu_to_le16((uint16_t)iwl_read32(trans, CSR_HW_REV));
-    prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
+  prph_sc_ctrl->version.version = 0;
+  prph_sc_ctrl->version.mac_id = cpu_to_le16((uint16_t)iwl_read32(trans, CSR_HW_REV));
+  prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
 
-    control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K | IWL_PRPH_SCRATCH_MTR_MODE |
-                    (IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT) |
-                    IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
-    prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+  control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K | IWL_PRPH_SCRATCH_MTR_MODE |
+                  (IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT) |
+                  IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+  prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
 
-    /* initialize RX default queue */
-    prph_sc_ctrl->rbd_cfg.free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
+  /* initialize RX default queue */
+  prph_sc_ctrl->rbd_cfg.free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
 
-    /* Configure debug, for integration */
-    if (!trans->ini_valid) { iwl_pcie_alloc_fw_monitor(trans, 0); }
-    if (trans->num_blocks) {
-        prph_sc_ctrl->hwm_cfg.hwm_base_addr = cpu_to_le64(trans->fw_mon[0].physical);
-        prph_sc_ctrl->hwm_cfg.hwm_size = cpu_to_le32(trans->fw_mon[0].size);
-    }
+  /* Configure debug, for integration */
+  if (!trans->ini_valid) {
+    iwl_pcie_alloc_fw_monitor(trans, 0);
+  }
+  if (trans->num_blocks) {
+    prph_sc_ctrl->hwm_cfg.hwm_base_addr = cpu_to_le64(trans->fw_mon[0].physical);
+    prph_sc_ctrl->hwm_cfg.hwm_size = cpu_to_le32(trans->fw_mon[0].size);
+  }
 
-    /* allocate ucode sections in dram and set addresses */
-    ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
-    if (ret) {
-        dma_free_coherent(trans->dev, sizeof(*prph_scratch), prph_scratch,
-                          trans_pcie->prph_scratch_dma_addr);
-        return ret;
-    }
+  /* allocate ucode sections in dram and set addresses */
+  ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
+  if (ret) {
+    dma_free_coherent(trans->dev, sizeof(*prph_scratch), prph_scratch,
+                      trans_pcie->prph_scratch_dma_addr);
+    return ret;
+  }
 
-    /* Allocate prph information
-     * currently we don't assign to the prph info anything, but it would get
-     * assigned later */
-    prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info), &trans_pcie->prph_info_dma_addr,
-                                   GFP_KERNEL);
-    if (!prph_info) { return -ENOMEM; }
+  /* Allocate prph information
+   * currently we don't assign to the prph info anything, but it would get
+   * assigned later */
+  prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info), &trans_pcie->prph_info_dma_addr,
+                                 GFP_KERNEL);
+  if (!prph_info) {
+    return -ENOMEM;
+  }
 
-    /* Allocate context info */
-    ctxt_info_gen3 = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info_gen3),
-                                        &trans_pcie->ctxt_info_dma_addr, GFP_KERNEL);
-    if (!ctxt_info_gen3) { return -ENOMEM; }
+  /* Allocate context info */
+  ctxt_info_gen3 = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info_gen3),
+                                      &trans_pcie->ctxt_info_dma_addr, GFP_KERNEL);
+  if (!ctxt_info_gen3) {
+    return -ENOMEM;
+  }
 
-    ctxt_info_gen3->prph_info_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr);
-    ctxt_info_gen3->prph_scratch_base_addr = cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
-    ctxt_info_gen3->prph_scratch_size = cpu_to_le32(sizeof(*prph_scratch));
-    ctxt_info_gen3->cr_head_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
-    ctxt_info_gen3->tr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
-    ctxt_info_gen3->cr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
-    ctxt_info_gen3->cr_idx_arr_size = cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
-    ctxt_info_gen3->tr_idx_arr_size = cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
-    ctxt_info_gen3->mtr_base_addr = cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
-    ctxt_info_gen3->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
-    ctxt_info_gen3->mtr_size = cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
-    ctxt_info_gen3->mcr_size = cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+  ctxt_info_gen3->prph_info_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr);
+  ctxt_info_gen3->prph_scratch_base_addr = cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
+  ctxt_info_gen3->prph_scratch_size = cpu_to_le32(sizeof(*prph_scratch));
+  ctxt_info_gen3->cr_head_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+  ctxt_info_gen3->tr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
+  ctxt_info_gen3->cr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
+  ctxt_info_gen3->cr_idx_arr_size = cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
+  ctxt_info_gen3->tr_idx_arr_size = cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
+  ctxt_info_gen3->mtr_base_addr = cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+  ctxt_info_gen3->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+  ctxt_info_gen3->mtr_size = cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
+  ctxt_info_gen3->mcr_size = cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
 
-    trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
-    trans_pcie->prph_info = prph_info;
-    trans_pcie->prph_scratch = prph_scratch;
+  trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
+  trans_pcie->prph_info = prph_info;
+  trans_pcie->prph_scratch = prph_scratch;
 
-    /* Allocate IML */
-    iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, &trans_pcie->iml_dma_addr, GFP_KERNEL);
-    if (!iml_img) { return -ENOMEM; }
+  /* Allocate IML */
+  iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, &trans_pcie->iml_dma_addr, GFP_KERNEL);
+  if (!iml_img) {
+    return -ENOMEM;
+  }
 
-    memcpy(iml_img, trans->iml, trans->iml_len);
+  memcpy(iml_img, trans->iml, trans->iml_len);
 
-    iwl_enable_interrupts(trans);
+  iwl_enable_interrupts(trans);
 
-    /* kick FW self load */
-    iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr);
-    iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr);
-    iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
-    iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
-    iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
+  /* kick FW self load */
+  iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr);
+  iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr);
+  iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
+  iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
+  iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
 
-    return 0;
+  return 0;
 }
 
 void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    if (!trans_pcie->ctxt_info_gen3) { return; }
+  if (!trans_pcie->ctxt_info_gen3) {
+    return;
+  }
 
-    dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), trans_pcie->ctxt_info_gen3,
-                      trans_pcie->ctxt_info_dma_addr);
-    trans_pcie->ctxt_info_dma_addr = 0;
-    trans_pcie->ctxt_info_gen3 = NULL;
+  dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), trans_pcie->ctxt_info_gen3,
+                    trans_pcie->ctxt_info_dma_addr);
+  trans_pcie->ctxt_info_dma_addr = 0;
+  trans_pcie->ctxt_info_gen3 = NULL;
 
-    iwl_pcie_ctxt_info_free_fw_img(trans);
+  iwl_pcie_ctxt_info_free_fw_img(trans);
 
-    dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), trans_pcie->prph_scratch,
-                      trans_pcie->prph_scratch_dma_addr);
-    trans_pcie->prph_scratch_dma_addr = 0;
-    trans_pcie->prph_scratch = NULL;
+  dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), trans_pcie->prph_scratch,
+                    trans_pcie->prph_scratch_dma_addr);
+  trans_pcie->prph_scratch_dma_addr = 0;
+  trans_pcie->prph_scratch = NULL;
 
-    dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info), trans_pcie->prph_info,
-                      trans_pcie->prph_info_dma_addr);
-    trans_pcie->prph_info_dma_addr = 0;
-    trans_pcie->prph_info = NULL;
+  dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info), trans_pcie->prph_info,
+                    trans_pcie->prph_info_dma_addr);
+  trans_pcie->prph_info_dma_addr = 0;
+  trans_pcie->prph_info = NULL;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/ctxt-info.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/ctxt-info.c
index d88c381..5d84dfe 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/ctxt-info.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/ctxt-info.c
@@ -39,173 +39,188 @@
 #include "iwl-trans.h"
 
 void iwl_pcie_ctxt_info_free_paging(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_self_init_dram* dram = &trans_pcie->init_dram;
-    int i;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_self_init_dram* dram = &trans_pcie->init_dram;
+  int i;
 
-    if (!dram->paging) {
-        WARN_ON(dram->paging_cnt);
-        return;
-    }
+  if (!dram->paging) {
+    WARN_ON(dram->paging_cnt);
+    return;
+  }
 
-    /* free paging*/
-    for (i = 0; i < dram->paging_cnt; i++)
-        dma_free_coherent(trans->dev, dram->paging[i].size, dram->paging[i].block,
-                          dram->paging[i].physical);
+  /* free paging*/
+  for (i = 0; i < dram->paging_cnt; i++)
+    dma_free_coherent(trans->dev, dram->paging[i].size, dram->paging[i].block,
+                      dram->paging[i].physical);
 
-    kfree(dram->paging);
-    dram->paging_cnt = 0;
-    dram->paging = NULL;
+  kfree(dram->paging);
+  dram->paging_cnt = 0;
+  dram->paging = NULL;
 }
 
 int iwl_pcie_init_fw_sec(struct iwl_trans* trans, const struct fw_img* fw,
                          struct iwl_context_info_dram* ctxt_dram) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_self_init_dram* dram = &trans_pcie->init_dram;
-    int i, ret, lmac_cnt, umac_cnt, paging_cnt;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_self_init_dram* dram = &trans_pcie->init_dram;
+  int i, ret, lmac_cnt, umac_cnt, paging_cnt;
 
-    if (WARN(dram->paging, "paging shouldn't already be initialized (%d pages)\n",
-             dram->paging_cnt)) {
-        iwl_pcie_ctxt_info_free_paging(trans);
+  if (WARN(dram->paging, "paging shouldn't already be initialized (%d pages)\n",
+           dram->paging_cnt)) {
+    iwl_pcie_ctxt_info_free_paging(trans);
+  }
+
+  lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
+  /* add 1 due to separator */
+  umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
+  /* add 2 due to separators */
+  paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
+
+  dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
+  if (!dram->fw) {
+    return -ENOMEM;
+  }
+  dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
+  if (!dram->paging) {
+    return -ENOMEM;
+  }
+
+  /* initialize lmac sections */
+  for (i = 0; i < lmac_cnt; i++) {
+    ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i], &dram->fw[dram->fw_cnt]);
+    if (ret) {
+      return ret;
+    }
+    ctxt_dram->lmac_img[i] = cpu_to_le64(dram->fw[dram->fw_cnt].physical);
+    dram->fw_cnt++;
+  }
+
+  /* initialize umac sections */
+  for (i = 0; i < umac_cnt; i++) {
+    /* access FW with +1 to make up for lmac separator */
+    ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[dram->fw_cnt + 1], &dram->fw[dram->fw_cnt]);
+    if (ret) {
+      return ret;
+    }
+    ctxt_dram->umac_img[i] = cpu_to_le64(dram->fw[dram->fw_cnt].physical);
+    dram->fw_cnt++;
+  }
+
+  /*
+   * Initialize paging.
+   * Paging memory isn't stored in dram->fw as the umac and lmac - it is
+   * stored separately.
+   * This is since the timing of its release is different -
+   * while fw memory can be released on alive, the paging memory can be
+   * freed only when the device goes down.
+   * Given that, the logic here in accessing the fw image is a bit
+   * different - fw_cnt isn't changing so loop counter is added to it.
+   */
+  for (i = 0; i < paging_cnt; i++) {
+    /* access FW with +2 to make up for lmac & umac separators */
+    int fw_idx = dram->fw_cnt + i + 2;
+
+    ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx], &dram->paging[i]);
+    if (ret) {
+      return ret;
     }
 
-    lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
-    /* add 1 due to separator */
-    umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
-    /* add 2 due to separators */
-    paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
+    ctxt_dram->virtual_img[i] = cpu_to_le64(dram->paging[i].physical);
+    dram->paging_cnt++;
+  }
 
-    dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
-    if (!dram->fw) { return -ENOMEM; }
-    dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
-    if (!dram->paging) { return -ENOMEM; }
-
-    /* initialize lmac sections */
-    for (i = 0; i < lmac_cnt; i++) {
-        ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i], &dram->fw[dram->fw_cnt]);
-        if (ret) { return ret; }
-        ctxt_dram->lmac_img[i] = cpu_to_le64(dram->fw[dram->fw_cnt].physical);
-        dram->fw_cnt++;
-    }
-
-    /* initialize umac sections */
-    for (i = 0; i < umac_cnt; i++) {
-        /* access FW with +1 to make up for lmac separator */
-        ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[dram->fw_cnt + 1],
-                                           &dram->fw[dram->fw_cnt]);
-        if (ret) { return ret; }
-        ctxt_dram->umac_img[i] = cpu_to_le64(dram->fw[dram->fw_cnt].physical);
-        dram->fw_cnt++;
-    }
-
-    /*
-     * Initialize paging.
-     * Paging memory isn't stored in dram->fw as the umac and lmac - it is
-     * stored separately.
-     * This is since the timing of its release is different -
-     * while fw memory can be released on alive, the paging memory can be
-     * freed only when the device goes down.
-     * Given that, the logic here in accessing the fw image is a bit
-     * different - fw_cnt isn't changing so loop counter is added to it.
-     */
-    for (i = 0; i < paging_cnt; i++) {
-        /* access FW with +2 to make up for lmac & umac separators */
-        int fw_idx = dram->fw_cnt + i + 2;
-
-        ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx], &dram->paging[i]);
-        if (ret) { return ret; }
-
-        ctxt_dram->virtual_img[i] = cpu_to_le64(dram->paging[i].physical);
-        dram->paging_cnt++;
-    }
-
-    return 0;
+  return 0;
 }
 
 int iwl_pcie_ctxt_info_init(struct iwl_trans* trans, const struct fw_img* fw) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_context_info* ctxt_info;
-    struct iwl_context_info_rbd_cfg* rx_cfg;
-    uint32_t control_flags = 0, rb_size;
-    int ret;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_context_info* ctxt_info;
+  struct iwl_context_info_rbd_cfg* rx_cfg;
+  uint32_t control_flags = 0, rb_size;
+  int ret;
 
-    ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), &trans_pcie->ctxt_info_dma_addr,
-                                   GFP_KERNEL);
-    if (!ctxt_info) { return -ENOMEM; }
+  ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), &trans_pcie->ctxt_info_dma_addr,
+                                 GFP_KERNEL);
+  if (!ctxt_info) {
+    return -ENOMEM;
+  }
 
-    ctxt_info->version.version = 0;
-    ctxt_info->version.mac_id = cpu_to_le16((uint16_t)iwl_read32(trans, CSR_HW_REV));
-    /* size is in DWs */
-    ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
+  ctxt_info->version.version = 0;
+  ctxt_info->version.mac_id = cpu_to_le16((uint16_t)iwl_read32(trans, CSR_HW_REV));
+  /* size is in DWs */
+  ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
 
-    switch (trans_pcie->rx_buf_size) {
+  switch (trans_pcie->rx_buf_size) {
     case IWL_AMSDU_2K:
-        rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
-        break;
+      rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
+      break;
     case IWL_AMSDU_4K:
-        rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
-        break;
+      rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
+      break;
     case IWL_AMSDU_8K:
-        rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
-        break;
+      rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
+      break;
     case IWL_AMSDU_12K:
-        rb_size = IWL_CTXT_INFO_RB_SIZE_12K;
-        break;
+      rb_size = IWL_CTXT_INFO_RB_SIZE_12K;
+      break;
     default:
-        WARN_ON(1);
-        rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
-    }
+      WARN_ON(1);
+      rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
+  }
 
-    BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
-    control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
-                    (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << IWL_CTXT_INFO_RB_CB_SIZE_POS) |
-                    (rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
-    ctxt_info->control.control_flags = cpu_to_le32(control_flags);
+  BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
+  control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
+                  (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << IWL_CTXT_INFO_RB_CB_SIZE_POS) |
+                  (rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
+  ctxt_info->control.control_flags = cpu_to_le32(control_flags);
 
-    /* initialize RX default queue */
-    rx_cfg = &ctxt_info->rbd_cfg;
-    rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
-    rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
-    rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+  /* initialize RX default queue */
+  rx_cfg = &ctxt_info->rbd_cfg;
+  rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
+  rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+  rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
 
-    /* initialize TX command queue */
-    ctxt_info->hcmd_cfg.cmd_queue_addr =
-        cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
-    ctxt_info->hcmd_cfg.cmd_queue_size = TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
+  /* initialize TX command queue */
+  ctxt_info->hcmd_cfg.cmd_queue_addr =
+      cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+  ctxt_info->hcmd_cfg.cmd_queue_size = TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
 
-    /* allocate ucode sections in dram and set addresses */
-    ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
-    if (ret) {
-        dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), ctxt_info,
-                          trans_pcie->ctxt_info_dma_addr);
-        return ret;
-    }
+  /* allocate ucode sections in dram and set addresses */
+  ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
+  if (ret) {
+    dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), ctxt_info,
+                      trans_pcie->ctxt_info_dma_addr);
+    return ret;
+  }
 
-    trans_pcie->ctxt_info = ctxt_info;
+  trans_pcie->ctxt_info = ctxt_info;
 
-    iwl_enable_interrupts(trans);
+  iwl_enable_interrupts(trans);
 
-    /* Configure debug, if exists */
-    if (iwl_pcie_dbg_on(trans)) { iwl_pcie_apply_destination(trans); }
+  /* Configure debug, if exists */
+  if (iwl_pcie_dbg_on(trans)) {
+    iwl_pcie_apply_destination(trans);
+  }
 
-    /* kick FW self load */
-    iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
-    iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+  /* kick FW self load */
+  iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
+  iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
 
-    /* Context info will be released upon alive or failure to get one */
+  /* Context info will be released upon alive or failure to get one */
 
-    return 0;
+  return 0;
 }
 
 void iwl_pcie_ctxt_info_free(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    if (!trans_pcie->ctxt_info) { return; }
+  if (!trans_pcie->ctxt_info) {
+    return;
+  }
 
-    dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), trans_pcie->ctxt_info,
-                      trans_pcie->ctxt_info_dma_addr);
-    trans_pcie->ctxt_info_dma_addr = 0;
-    trans_pcie->ctxt_info = NULL;
+  dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), trans_pcie->ctxt_info,
+                    trans_pcie->ctxt_info_dma_addr);
+  trans_pcie->ctxt_info_dma_addr = 0;
+  trans_pcie->ctxt_info = NULL;
 
-    iwl_pcie_ctxt_info_free_fw_img(trans);
+  iwl_pcie_ctxt_info_free_fw_img(trans);
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/drv.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/drv.c
index 98df4cf..1a58c34 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/drv.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/drv.c
@@ -34,15 +34,14 @@
  *
  *****************************************************************************/
 
-#include <stdlib.h>
-
 #include <ddk/binding.h>
 #include <ddk/debug.h>
 #include <ddk/device.h>
 #include <ddk/driver.h>
-#include <lib/device-protocol/pci.h>
 #include <ddk/protocol/pci.h>
 #include <ddk/protocol/wlanphyimpl.h>
+#include <lib/device-protocol/pci.h>
+#include <stdlib.h>
 #include <wlan/protocol/mac.h>
 #include <zircon/status.h>
 
@@ -54,13 +53,13 @@
 #endif  // NEEDS_PORTING
 
 struct iwl_pci_device {
-    uint16_t device_id;
-    uint16_t subsystem_device_id;
-    const struct iwl_cfg* config;
+  uint16_t device_id;
+  uint16_t subsystem_device_id;
+  const struct iwl_cfg* config;
 };
 
 #define IWL_PCI_DEVICE(dev, subdev, cfg) \
-    .device_id = (dev), .subsystem_device_id = (subdev), .config = &(cfg)
+  .device_id = (dev), .subsystem_device_id = (subdev), .config = &(cfg)
 
 /* Hardware specific file defines the PCI IDs table for that hardware module */
 static const struct iwl_pci_device iwl_devices[] = {
@@ -949,25 +948,25 @@
 
 static zx_status_t iwl_pci_config(uint16_t device_id, uint16_t subsystem_device_id,
                                   const struct iwl_cfg** out_cfg) {
-    const struct iwl_pci_device* device = iwl_devices;
-    for (size_t i = 0; i != ARRAY_SIZE(iwl_devices); ++i) {
-        if (iwl_devices[i].device_id == device_id &&
-            iwl_devices[i].subsystem_device_id == subsystem_device_id) {
-            *out_cfg = iwl_devices[i].config;
-            return ZX_OK;
-        }
-        device++;
+  const struct iwl_pci_device* device = iwl_devices;
+  for (size_t i = 0; i != ARRAY_SIZE(iwl_devices); ++i) {
+    if (iwl_devices[i].device_id == device_id &&
+        iwl_devices[i].subsystem_device_id == subsystem_device_id) {
+      *out_cfg = iwl_devices[i].config;
+      return ZX_OK;
     }
-    return ZX_ERR_NOT_FOUND;
+    device++;
+  }
+  return ZX_ERR_NOT_FOUND;
 }
 
 static void iwl_pci_unbind(void* ctx) {
-    struct iwl_trans* trans = (struct iwl_trans*)ctx;
-    device_remove(trans->zxdev);
+  struct iwl_trans* trans = (struct iwl_trans*)ctx;
+  device_remove(trans->zxdev);
 }
 
 static void iwl_pci_release(void* ctx) {
-    struct iwl_trans* trans = (struct iwl_trans*)ctx;
+  struct iwl_trans* trans = (struct iwl_trans*)ctx;
 
 #if 0   // NEEDS_PORTING
     /* if RTPM was in use, restore it to the state before probe */
@@ -980,12 +979,12 @@
     }
 #endif  // NEEDS_PORTING
 
-    iwl_drv_stop(trans->drv);
+  iwl_drv_stop(trans->drv);
 
 #if 0   // NEEDS_PORTING
     iwl_trans_pcie_free(trans);
 #endif  // NEEDS_PORTING
-    free(trans);
+  free(trans);
 }
 
 static zx_protocol_device_t device_ops = {
@@ -1001,68 +1000,68 @@
 };
 
 static zx_status_t iwl_pci_bind(void* ctx, zx_device_t* dev) {
-    struct iwl_trans* iwl_trans;
-    zx_status_t status;
+  struct iwl_trans* iwl_trans;
+  zx_status_t status;
 
-    pci_protocol_t pci;
-    status = device_get_protocol(dev, ZX_PROTOCOL_PCI, &pci);
-    if (status != ZX_OK) {
-        return status;
-    }
+  pci_protocol_t pci;
+  status = device_get_protocol(dev, ZX_PROTOCOL_PCI, &pci);
+  if (status != ZX_OK) {
+    return status;
+  }
 
-    zx_pcie_device_info_t pci_info;
-    status = pci_get_device_info(&pci, &pci_info);
-    if (status != ZX_OK) {
-        return status;
-    }
+  zx_pcie_device_info_t pci_info;
+  status = pci_get_device_info(&pci, &pci_info);
+  if (status != ZX_OK) {
+    return status;
+  }
 
-    uint16_t subsystem_device_id;
-    status = pci_config_read16(&pci, PCI_CFG_SUBSYSTEM_ID, &subsystem_device_id);
-    if (status != ZX_OK) {
-        IWL_ERR(iwl_trans, "Failed to read PCI subsystem device ID: %s\n",
-                zx_status_get_string(status));
-        return status;
-    }
+  uint16_t subsystem_device_id;
+  status = pci_config_read16(&pci, PCI_CFG_SUBSYSTEM_ID, &subsystem_device_id);
+  if (status != ZX_OK) {
+    IWL_ERR(iwl_trans, "Failed to read PCI subsystem device ID: %s\n",
+            zx_status_get_string(status));
+    return status;
+  }
 
-    IWL_INFO(iwl_trans, "Device ID: %04x Subsystem Device ID: %04x\n", pci_info.device_id,
-             subsystem_device_id);
+  IWL_INFO(iwl_trans, "Device ID: %04x Subsystem Device ID: %04x\n", pci_info.device_id,
+           subsystem_device_id);
 
-    const struct iwl_cfg* cfg;
-    status = iwl_pci_config(pci_info.device_id, subsystem_device_id, &cfg);
-    if (status != ZX_OK) {
-        IWL_ERR(iwl_trans, "Failed to find PCI config: %s\n", zx_status_get_string(status));
-        return ZX_ERR_NOT_SUPPORTED;
-    }
+  const struct iwl_cfg* cfg;
+  status = iwl_pci_config(pci_info.device_id, subsystem_device_id, &cfg);
+  if (status != ZX_OK) {
+    IWL_ERR(iwl_trans, "Failed to find PCI config: %s\n", zx_status_get_string(status));
+    return ZX_ERR_NOT_SUPPORTED;
+  }
 
-    iwl_trans = iwl_trans_pcie_alloc(&pci, cfg);
-    if (!iwl_trans) {
-        IWL_ERR(iwl_trans, "Failed to allocate PCIE transport: %s\n", zx_status_get_string(status));
-        return ZX_ERR_NO_MEMORY;
-    }
+  iwl_trans = iwl_trans_pcie_alloc(&pci, cfg);
+  if (!iwl_trans) {
+    IWL_ERR(iwl_trans, "Failed to allocate PCIE transport: %s\n", zx_status_get_string(status));
+    return ZX_ERR_NO_MEMORY;
+  }
 
-    if (!iwl_trans->cfg->csr) {
-        IWL_ERR(iwl_trans, "CSR addresses aren't configured\n");
-        return ZX_ERR_BAD_STATE;
-    }
+  if (!iwl_trans->cfg->csr) {
+    IWL_ERR(iwl_trans, "CSR addresses aren't configured\n");
+    return ZX_ERR_BAD_STATE;
+  }
 
-    /*
-     * special-case 7265D, it has the same PCI IDs.
-     *
-     * Note that because we already pass the cfg to the transport above,
-     * all the parameters that the transport uses must, until that is
-     * changed, be identical to the ones in the 7265D configuration.
-     */
-    const struct iwl_cfg* cfg_7265d = NULL;
-    if (iwl_trans->cfg == &iwl7265_2ac_cfg) {
-        cfg_7265d = &iwl7265d_2ac_cfg;
-    } else if (iwl_trans->cfg == &iwl7265_2n_cfg) {
-        cfg_7265d = &iwl7265d_2n_cfg;
-    } else if (iwl_trans->cfg == &iwl7265_n_cfg) {
-        cfg_7265d = &iwl7265d_n_cfg;
-    }
-    if (cfg_7265d && (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
-        iwl_trans->cfg = cfg_7265d;
-    }
+  /*
+   * special-case 7265D, it has the same PCI IDs.
+   *
+   * Note that because we already pass the cfg to the transport above,
+   * all the parameters that the transport uses must, until that is
+   * changed, be identical to the ones in the 7265D configuration.
+   */
+  const struct iwl_cfg* cfg_7265d = NULL;
+  if (iwl_trans->cfg == &iwl7265_2ac_cfg) {
+    cfg_7265d = &iwl7265d_2ac_cfg;
+  } else if (iwl_trans->cfg == &iwl7265_2n_cfg) {
+    cfg_7265d = &iwl7265d_2n_cfg;
+  } else if (iwl_trans->cfg == &iwl7265_n_cfg) {
+    cfg_7265d = &iwl7265d_n_cfg;
+  }
+  if (cfg_7265d && (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
+    iwl_trans->cfg = cfg_7265d;
+  }
 
 #if 0  // NEEDS_PORTING
 #if CPTCFG_IWLMVM || CPTCFG_IWLFMAC
@@ -1093,35 +1092,35 @@
 #endif  // CPTCFG_IWLMVM || CPTCFG_IWLFMAC
 #endif  // NEEDS_PORTING
 
-    device_add_args_t args = {
-        .version = DEVICE_ADD_ARGS_VERSION,
-        .name = "iwlwifi-wlanphy",
-        .ctx = iwl_trans,
-        .ops = &device_ops,
-        .proto_id = ZX_PROTOCOL_WLANPHY_IMPL,
-        .proto_ops = &wlanphy_ops,
-        .flags = DEVICE_ADD_INVISIBLE,
-    };
+  device_add_args_t args = {
+      .version = DEVICE_ADD_ARGS_VERSION,
+      .name = "iwlwifi-wlanphy",
+      .ctx = iwl_trans,
+      .ops = &device_ops,
+      .proto_id = ZX_PROTOCOL_WLANPHY_IMPL,
+      .proto_ops = &wlanphy_ops,
+      .flags = DEVICE_ADD_INVISIBLE,
+  };
 
-    status = device_add(dev, &args, &iwl_trans->zxdev);
-    if (status != ZX_OK) {
-        IWL_ERR(iwl_trans, "Failed to create device: %s\n", zx_status_get_string(status));
-        free(iwl_trans);
-        return status;
-    }
+  status = device_add(dev, &args, &iwl_trans->zxdev);
+  if (status != ZX_OK) {
+    IWL_ERR(iwl_trans, "Failed to create device: %s\n", zx_status_get_string(status));
+    free(iwl_trans);
+    return status;
+  }
 
-    status = iwl_drv_start(iwl_trans);
-    if (status != ZX_OK) {
-        IWL_ERR(iwl_trans, "Failed to start driver: %s\n", zx_status_get_string(status));
-        goto fail_remove_device;
-    }
+  status = iwl_drv_start(iwl_trans);
+  if (status != ZX_OK) {
+    IWL_ERR(iwl_trans, "Failed to start driver: %s\n", zx_status_get_string(status));
+    goto fail_remove_device;
+  }
 
-    /* register transport layer debugfs here */
-    status = iwl_trans_pcie_dbgfs_register(iwl_trans);
-    if (status != ZX_OK) {
-        IWL_ERR(iwl_trans, "Failed to register debugfs: %s\n", zx_status_get_string(status));
-        goto fail_stop_device;
-    }
+  /* register transport layer debugfs here */
+  status = iwl_trans_pcie_dbgfs_register(iwl_trans);
+  if (status != ZX_OK) {
+    IWL_ERR(iwl_trans, "Failed to register debugfs: %s\n", zx_status_get_string(status));
+    goto fail_stop_device;
+  }
 
 #if 0   // NEEDS_PORTING
     /* if RTPM is in use, enable it in our device */
@@ -1144,13 +1143,13 @@
     }
 #endif  // NEEDS_PORTING
 
-    return ZX_OK;
+  return ZX_OK;
 
 fail_stop_device:
-    iwl_drv_stop(iwl_trans->drv);
+  iwl_drv_stop(iwl_trans->drv);
 fail_remove_device:
-    device_remove(iwl_trans->zxdev);
-    return status;
+  device_remove(iwl_trans->zxdev);
+  return status;
 }
 
 static zx_driver_ops_t iwlwifi_pci_driver_ops = {
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/internal.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/internal.h
index c4cd2e1..5f64d78 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/internal.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/internal.h
@@ -82,12 +82,12 @@
  * @size: size used from the buffer
  */
 struct iwl_rx_mem_buffer {
-    dma_addr_t page_dma;
-    struct page* page;
-    uint16_t vid;
-    bool invalid;
-    list_node_t list;
-    uint32_t size;
+  dma_addr_t page_dma;
+  struct page* page;
+  uint16_t vid;
+  bool invalid;
+  list_node_t list;
+  uint32_t size;
 };
 
 /**
@@ -95,17 +95,17 @@
  *
  */
 struct isr_statistics {
-    uint32_t hw;
-    uint32_t sw;
-    uint32_t err_code;
-    uint32_t sch;
-    uint32_t alive;
-    uint32_t rfkill;
-    uint32_t ctkill;
-    uint32_t wakeup;
-    uint32_t rx;
-    uint32_t tx;
-    uint32_t unhandled;
+  uint32_t hw;
+  uint32_t sw;
+  uint32_t err_code;
+  uint32_t sch;
+  uint32_t alive;
+  uint32_t rfkill;
+  uint32_t ctkill;
+  uint32_t wakeup;
+  uint32_t rx;
+  uint32_t tx;
+  uint32_t unhandled;
 };
 
 #define IWL_RX_TD_TYPE_MSK 0xff000000
@@ -123,10 +123,10 @@
  * @reserved: reserved
  */
 struct iwl_rx_transfer_desc {
-    __le32 type_n_size;
-    __le64 addr;
-    __le16 rbid;
-    __le16 reserved;
+  __le32 type_n_size;
+  __le64 addr;
+  __le16 rbid;
+  __le16 reserved;
 } __packed;
 
 #define IWL_RX_CD_SIZE 0xffffff00
@@ -142,12 +142,12 @@
  * @reserved2: reserved
  */
 struct iwl_rx_completion_desc {
-    uint8_t type;
-    uint8_t status;
-    __le16 reserved1;
-    __le16 rbid;
-    __le32 size;
-    uint8_t reserved2[22];
+  uint8_t type;
+  uint8_t status;
+  __le16 reserved1;
+  __le16 rbid;
+  __le32 size;
+  uint8_t reserved2[22];
 } __packed;
 
 /**
@@ -179,33 +179,33 @@
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
-    int id;
-    void* bd;
-    dma_addr_t bd_dma;
-    union {
-        void* used_bd;
-        __le32* bd_32;
-        struct iwl_rx_completion_desc* cd;
-    };
-    dma_addr_t used_bd_dma;
-    __le16* tr_tail;
-    dma_addr_t tr_tail_dma;
-    __le16* cr_tail;
-    dma_addr_t cr_tail_dma;
-    uint32_t read;
-    uint32_t write;
-    uint32_t free_count;
-    uint32_t used_count;
-    uint32_t write_actual;
-    uint32_t queue_size;
-    list_node_t rx_free;
-    list_node_t rx_used;
-    bool need_update;
-    void* rb_stts;
-    dma_addr_t rb_stts_dma;
-    mtx_t lock;
-    struct napi_struct napi;
-    struct iwl_rx_mem_buffer* queue[RX_QUEUE_SIZE];
+  int id;
+  void* bd;
+  dma_addr_t bd_dma;
+  union {
+    void* used_bd;
+    __le32* bd_32;
+    struct iwl_rx_completion_desc* cd;
+  };
+  dma_addr_t used_bd_dma;
+  __le16* tr_tail;
+  dma_addr_t tr_tail_dma;
+  __le16* cr_tail;
+  dma_addr_t cr_tail_dma;
+  uint32_t read;
+  uint32_t write;
+  uint32_t free_count;
+  uint32_t used_count;
+  uint32_t write_actual;
+  uint32_t queue_size;
+  list_node_t rx_free;
+  list_node_t rx_used;
+  bool need_update;
+  void* rb_stts;
+  dma_addr_t rb_stts_dma;
+  mtx_t lock;
+  struct napi_struct napi;
+  struct iwl_rx_mem_buffer* queue[RX_QUEUE_SIZE];
 };
 
 /**
@@ -221,11 +221,11 @@
  * @rx_alloc: work struct for background calls
  */
 struct iwl_rb_allocator {
-    atomic_int req_pending;
-    atomic_int req_ready;
-    list_node_t rbd_allocated;
-    list_node_t rbd_empty;
-    mtx_t lock;
+  atomic_int req_pending;
+  atomic_int req_ready;
+  list_node_t rbd_allocated;
+  list_node_t rbd_empty;
+  mtx_t lock;
 #if 0   // NEEDS_PORTING
     struct workqueue_struct* alloc_wq;
     struct work_struct rx_alloc;
@@ -233,9 +233,9 @@
 };
 
 struct iwl_dma_ptr {
-    dma_addr_t dma;
-    void* addr;
-    size_t size;
+  dma_addr_t dma;
+  void* addr;
+  size_t size;
 };
 
 /**
@@ -243,7 +243,7 @@
  * @index -- current index
  */
 static inline int iwl_queue_inc_wrap(struct iwl_trans* trans, int index) {
-    return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+  return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
 }
 
 /**
@@ -269,14 +269,14 @@
  * @index -- current index
  */
 static inline int iwl_queue_dec_wrap(struct iwl_trans* trans, int index) {
-    return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+  return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
 }
 
 struct iwl_cmd_meta {
-    /* only for SYNC commands, iff the reply skb is wanted */
-    struct iwl_host_cmd* source;
-    uint32_t flags;
-    uint32_t tbs;
+  /* only for SYNC commands, iff the reply skb is wanted */
+  struct iwl_host_cmd* source;
+  uint32_t flags;
+  uint32_t tbs;
 };
 
 #define TFD_TX_CMD_SLOTS 256
@@ -295,15 +295,15 @@
 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
 
 struct iwl_pcie_txq_entry {
-    struct iwl_device_cmd* cmd;
-    struct sk_buff* skb;
-    /* buffer to free after command completes */
-    const void* free_buf;
-    struct iwl_cmd_meta meta;
+  struct iwl_device_cmd* cmd;
+  struct sk_buff* skb;
+  /* buffer to free after command completes */
+  const void* free_buf;
+  struct iwl_cmd_meta meta;
 };
 
 struct iwl_pcie_first_tb_buf {
-    uint8_t buf[IWL_FIRST_TB_SIZE_ALIGN];
+  uint8_t buf[IWL_FIRST_TB_SIZE_ALIGN];
 };
 
 /**
@@ -349,40 +349,40 @@
  * data is a window overlayed over the HW queue.
  */
 struct iwl_txq {
-    void* tfds;
-    struct iwl_pcie_first_tb_buf* first_tb_bufs;
-    dma_addr_t first_tb_dma;
-    struct iwl_pcie_txq_entry* entries;
-    mtx_t lock;
-    unsigned long frozen_expiry_remainder;
+  void* tfds;
+  struct iwl_pcie_first_tb_buf* first_tb_bufs;
+  dma_addr_t first_tb_dma;
+  struct iwl_pcie_txq_entry* entries;
+  mtx_t lock;
+  unsigned long frozen_expiry_remainder;
 #if 0   // NEEDS_PORTING
     struct timer_list stuck_timer;
 #endif  // NEEDS_PORTING
-    struct iwl_trans_pcie* trans_pcie;
-    bool need_update;
-    bool frozen;
-    bool ampdu;
-    int block;
-    unsigned long wd_timeout;
-    struct sk_buff_head overflow_q;
-    struct iwl_dma_ptr bc_tbl;
+  struct iwl_trans_pcie* trans_pcie;
+  bool need_update;
+  bool frozen;
+  bool ampdu;
+  int block;
+  unsigned long wd_timeout;
+  struct sk_buff_head overflow_q;
+  struct iwl_dma_ptr bc_tbl;
 
-    int write_ptr;
-    int read_ptr;
-    dma_addr_t dma_addr;
-    int n_window;
-    uint32_t id;
-    int low_mark;
-    int high_mark;
+  int write_ptr;
+  int read_ptr;
+  dma_addr_t dma_addr;
+  int n_window;
+  uint32_t id;
+  int low_mark;
+  int high_mark;
 };
 
 static inline dma_addr_t iwl_pcie_get_first_tb_dma(struct iwl_txq* txq, int idx) {
-    return txq->first_tb_dma + sizeof(struct iwl_pcie_first_tb_buf) * idx;
+  return txq->first_tb_dma + sizeof(struct iwl_pcie_first_tb_buf) * idx;
 }
 
 struct iwl_tso_hdr_page {
-    struct page* page;
-    uint8_t* pos;
+  struct page* page;
+  uint8_t* pos;
 };
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
@@ -396,9 +396,9 @@
  *  set the file can no longer be used.
  */
 enum iwl_fw_mon_dbgfs_state {
-    IWL_FW_MON_DBGFS_STATE_CLOSED,
-    IWL_FW_MON_DBGFS_STATE_OPEN,
-    IWL_FW_MON_DBGFS_STATE_DISABLED,
+  IWL_FW_MON_DBGFS_STATE_CLOSED,
+  IWL_FW_MON_DBGFS_STATE_OPEN,
+  IWL_FW_MON_DBGFS_STATE_DISABLED,
 };
 #endif
 
@@ -408,8 +408,8 @@
  * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
  */
 enum iwl_shared_irq_flags {
-    IWL_SHARED_IRQ_NON_RX = BIT(0),
-    IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
+  IWL_SHARED_IRQ_NON_RX = BIT(0),
+  IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
 };
 
 /**
@@ -419,9 +419,9 @@
  * @IWL_IMAGE_RESP_FAIL: iml reading failed
  */
 enum iwl_image_response_code {
-    IWL_IMAGE_RESP_DEF = 0,
-    IWL_IMAGE_RESP_SUCCESS = 1,
-    IWL_IMAGE_RESP_FAIL = 2,
+  IWL_IMAGE_RESP_DEF = 0,
+  IWL_IMAGE_RESP_SUCCESS = 1,
+  IWL_IMAGE_RESP_FAIL = 2,
 };
 
 /**
@@ -432,10 +432,10 @@
  * @paging_cnt: total number of items in array
  */
 struct iwl_self_init_dram {
-    struct iwl_dram_data* fw;
-    int fw_cnt;
-    struct iwl_dram_data* paging;
-    int paging_cnt;
+  struct iwl_dram_data* fw;
+  int fw_cnt;
+  struct iwl_dram_data* paging;
+  int paging_cnt;
 };
 
 /**
@@ -450,11 +450,11 @@
  */
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
 struct cont_rec {
-    uint32_t prev_wr_ptr;
-    uint32_t prev_wrap_cnt;
-    uint8_t state;
-    /* Used to sync monitor_data debugfs file with driver unload flow */
-    struct mutex mutex;
+  uint32_t prev_wr_ptr;
+  uint32_t prev_wrap_cnt;
+  uint8_t state;
+  /* Used to sync monitor_data debugfs file with driver unload flow */
+  struct mutex mutex;
 };
 #endif
 
@@ -512,22 +512,22 @@
  * @in_rescan: true if we have triggered a device rescan
  */
 struct iwl_trans_pcie {
-    struct iwl_rxq* rxq;
-    struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
-    struct iwl_rx_mem_buffer* global_table[RX_POOL_SIZE];
-    struct iwl_rb_allocator rba;
-    union {
-        struct iwl_context_info* ctxt_info;
-        struct iwl_context_info_gen3* ctxt_info_gen3;
-    };
-    struct iwl_prph_info* prph_info;
-    struct iwl_prph_scratch* prph_scratch;
-    dma_addr_t ctxt_info_dma_addr;
-    dma_addr_t prph_info_dma_addr;
-    dma_addr_t prph_scratch_dma_addr;
-    dma_addr_t iml_dma_addr;
-    struct iwl_self_init_dram init_dram;
-    struct iwl_trans* trans;
+  struct iwl_rxq* rxq;
+  struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
+  struct iwl_rx_mem_buffer* global_table[RX_POOL_SIZE];
+  struct iwl_rb_allocator rba;
+  union {
+    struct iwl_context_info* ctxt_info;
+    struct iwl_context_info_gen3* ctxt_info_gen3;
+  };
+  struct iwl_prph_info* prph_info;
+  struct iwl_prph_scratch* prph_scratch;
+  dma_addr_t ctxt_info_dma_addr;
+  dma_addr_t prph_info_dma_addr;
+  dma_addr_t prph_scratch_dma_addr;
+  dma_addr_t iml_dma_addr;
+  struct iwl_self_init_dram init_dram;
+  struct iwl_trans* trans;
 
 #if 0   // NEEDS_PORTING
     struct net_device napi_dev;
@@ -535,87 +535,87 @@
     struct __percpu iwl_tso_hdr_page* tso_hdr_page;
 #endif  // NEEDS_PORTING
 
-    /* INT ICT Table */
-    __le32* ict_tbl;
-    dma_addr_t ict_tbl_dma;
-    int ict_index;
-    bool use_ict;
-    bool is_down, opmode_down;
-    bool debug_rfkill;
-    struct isr_statistics isr_stats;
+  /* INT ICT Table */
+  __le32* ict_tbl;
+  dma_addr_t ict_tbl_dma;
+  int ict_index;
+  bool use_ict;
+  bool is_down, opmode_down;
+  bool debug_rfkill;
+  struct isr_statistics isr_stats;
 
 #if 0   // NEEDS_PORTING
     spinlock_t irq_lock;
 #endif  // NEEDS_PORTING
-    mtx_t mutex;
-    uint32_t inta_mask;
-    uint32_t scd_base_addr;
-    struct iwl_dma_ptr scd_bc_tbls;
-    struct iwl_dma_ptr kw;
+  mtx_t mutex;
+  uint32_t inta_mask;
+  uint32_t scd_base_addr;
+  struct iwl_dma_ptr scd_bc_tbls;
+  struct iwl_dma_ptr kw;
 
-    struct iwl_txq* txq_memory;
-    struct iwl_txq* txq[IWL_MAX_TVQM_QUEUES];
-    unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
-    unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+  struct iwl_txq* txq_memory;
+  struct iwl_txq* txq[IWL_MAX_TVQM_QUEUES];
+  unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+  unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
 
-    /* PCI bus related data */
-    struct pci_dev* pci_dev;
-    pci_protocol_t* pci;
-    mmio_buffer_t mmio;
+  /* PCI bus related data */
+  struct pci_dev* pci_dev;
+  pci_protocol_t* pci;
+  mmio_buffer_t mmio;
 
-    bool ucode_write_complete;
+  bool ucode_write_complete;
 #if 0   // NEEDS_PORTING
     wait_queue_head_t ucode_write_waitq;
     wait_queue_head_t wait_command_queue;
     wait_queue_head_t d0i3_waitq;
 #endif  // NEEDS_PORTING
 
-    uint8_t page_offs, dev_cmd_offs;
+  uint8_t page_offs, dev_cmd_offs;
 
-    uint8_t cmd_queue;
-    uint8_t def_rx_queue;
-    uint8_t cmd_fifo;
-    unsigned int cmd_q_wdg_timeout;
-    uint8_t n_no_reclaim_cmds;
-    uint8_t no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
-    uint8_t max_tbs;
-    uint16_t tfd_size;
+  uint8_t cmd_queue;
+  uint8_t def_rx_queue;
+  uint8_t cmd_fifo;
+  unsigned int cmd_q_wdg_timeout;
+  uint8_t n_no_reclaim_cmds;
+  uint8_t no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
+  uint8_t max_tbs;
+  uint16_t tfd_size;
 
-    enum iwl_amsdu_size rx_buf_size;
-    bool bc_table_dword;
-    bool scd_set_active;
-    bool sw_csum_tx;
-    bool pcie_dbg_dumped_once;
-    uint32_t rx_page_order;
+  enum iwl_amsdu_size rx_buf_size;
+  bool bc_table_dword;
+  bool scd_set_active;
+  bool sw_csum_tx;
+  bool pcie_dbg_dumped_once;
+  uint32_t rx_page_order;
 
-    /*protect hw register */
-    mtx_t reg_lock;
-    bool cmd_hold_nic_awake;
-    bool ref_cmd_in_flight;
+  /*protect hw register */
+  mtx_t reg_lock;
+  bool cmd_hold_nic_awake;
+  bool ref_cmd_in_flight;
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
-    struct cont_rec fw_mon_data;
+  struct cont_rec fw_mon_data;
 #endif
 
 #if 0   // NEEDS_PORTING
     struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
 #endif  // NEEDS_PORTING
-    bool msix_enabled;
-    uint8_t shared_vec_mask;
-    uint32_t alloc_vecs;
-    uint32_t def_irq;
-    uint32_t fh_init_mask;
-    uint32_t hw_init_mask;
-    uint32_t fh_mask;
-    uint32_t hw_mask;
+  bool msix_enabled;
+  uint8_t shared_vec_mask;
+  uint32_t alloc_vecs;
+  uint32_t def_irq;
+  uint32_t fh_init_mask;
+  uint32_t hw_init_mask;
+  uint32_t fh_mask;
+  uint32_t hw_mask;
 #if 0   // NEEDS_PORTING
     cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
 #endif  // NEEDS_PORTING
-    bool in_rescan;
+  bool in_rescan;
 };
 
 static inline struct iwl_trans_pcie* IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans* trans) {
-    return (struct iwl_trans_pcie*)trans->trans_specific;
+  return (struct iwl_trans_pcie*)trans->trans_specific;
 }
 
 #if 0   // NEEDS_PORTING
@@ -633,7 +633,7 @@
 #endif  // NEEDS_PORTING
 
 static inline struct iwl_trans* iwl_trans_pcie_get_trans(struct iwl_trans_pcie* trans_pcie) {
-    return containerof(trans_pcie, struct iwl_trans, trans_specific);
+  return containerof(trans_pcie, struct iwl_trans, trans_specific);
 }
 
 /*
@@ -702,7 +702,7 @@
 void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie* trans_pcie, struct iwl_txq* txq,
                                    uint16_t byte_cnt, int num_tbs);
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 static inline uint16_t iwl_pcie_tfd_tb_get_len(struct iwl_trans* trans, void* _tfd, uint8_t idx) {
     if (trans->cfg->use_tfh) {
         struct iwl_tfh_tfd* tfd = _tfd;
@@ -725,25 +725,25 @@
 /*****************************************************
  * Helpers
  ******************************************************/
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 static inline void _iwl_disable_interrupts(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    clear_bit(STATUS_INT_ENABLED, &trans->status);
-    if (!trans_pcie->msix_enabled) {
-        /* disable interrupts from uCode/NIC to host */
-        iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+  clear_bit(STATUS_INT_ENABLED, &trans->status);
+  if (!trans_pcie->msix_enabled) {
+    /* disable interrupts from uCode/NIC to host */
+    iwl_write32(trans, CSR_INT_MASK, 0x00000000);
 
-        /* acknowledge/clear/reset any interrupts still pending
-         * from uCode or flow handler (Rx/Tx DMA) */
-        iwl_write32(trans, CSR_INT, 0xffffffff);
-        iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
-    } else {
-        /* disable all the interrupt we might use */
-        iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, trans_pcie->fh_init_mask);
-        iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, trans_pcie->hw_init_mask);
-    }
-    IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
+    /* acknowledge/clear/reset any interrupts still pending
+     * from uCode or flow handler (Rx/Tx DMA) */
+    iwl_write32(trans, CSR_INT, 0xffffffff);
+    iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
+  } else {
+    /* disable all the interrupt we might use */
+    iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, trans_pcie->fh_init_mask);
+    iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, trans_pcie->hw_init_mask);
+  }
+  IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
 }
 #if 0  // NEEDS_PORTING
 
@@ -794,18 +794,18 @@
     dram->fw = NULL;
 }
 
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 static inline void iwl_disable_interrupts(struct iwl_trans* trans) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
     spin_lock(&trans_pcie->irq_lock);
     _iwl_disable_interrupts(trans);
     spin_unlock(&trans_pcie->irq_lock);
-#endif // NEEDS_PORTING
-    _iwl_disable_interrupts(trans);
+#endif  // NEEDS_PORTING
+  _iwl_disable_interrupts(trans);
 }
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 
 static inline void _iwl_enable_interrupts(struct iwl_trans* trans) {
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -863,17 +863,17 @@
 #endif  // NEEDS_PORTING
 
 static inline uint16_t iwl_pcie_get_cmd_index(const struct iwl_txq* q, uint32_t index) {
-    return index & (q->n_window - 1);
+  return index & (q->n_window - 1);
 }
 
 static inline void* iwl_pcie_get_tfd(struct iwl_trans* trans, struct iwl_txq* txq, int idx) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    if (trans->cfg->use_tfh) {
-        idx = iwl_pcie_get_cmd_index(txq, idx);
-    }
+  if (trans->cfg->use_tfh) {
+    idx = iwl_pcie_get_cmd_index(txq, idx);
+  }
 
-    return (char*)txq->tfds + trans_pcie->tfd_size * idx;
+  return (char*)txq->tfds + trans_pcie->tfd_size * idx;
 }
 
 #if 0   // NEEDS_PORTING
@@ -965,29 +965,29 @@
 
 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans* trans, uint32_t reg,
                                                   uint32_t mask, uint32_t value) {
-    uint32_t v;
+  uint32_t v;
 
 #ifdef CPTCFG_IWLWIFI_DEBUG
-    WARN_ON_ONCE(value & ~mask);
+  WARN_ON_ONCE(value & ~mask);
 #endif
 
-    v = iwl_read32(trans, reg);
-    v &= ~mask;
-    v |= value;
-    iwl_write32(trans, reg, v);
+  v = iwl_read32(trans, reg);
+  v &= ~mask;
+  v |= value;
+  iwl_write32(trans, reg, v);
 }
 
 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans* trans, uint32_t reg,
                                               uint32_t mask) {
-    __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+  __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
 }
 
 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans* trans, uint32_t reg, uint32_t mask) {
-    __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+  __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
 }
 
 static inline bool iwl_pcie_dbg_on(struct iwl_trans* trans) {
-    return (trans->dbg_dest_tlv || trans->ini_valid);
+  return (trans->dbg_dest_tlv || trans->ini_valid);
 }
 
 void iwl_trans_pcie_rf_kill(struct iwl_trans* trans, bool state);
@@ -996,9 +996,7 @@
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
 int iwl_trans_pcie_dbgfs_register(struct iwl_trans* trans);
 #else
-static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans* trans) {
-    return 0;
-}
+static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans* trans) { return 0; }
 #endif
 #if 0   // NEEDS_PORTING
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/rx.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/rx.c
index eb89db3..cc3ac8d 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/rx.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/rx.c
@@ -146,198 +146,202 @@
  * iwl_rxq_space - Return number of free slots available in queue.
  */
 static int iwl_rxq_space(const struct iwl_rxq* rxq) {
-    /* Make sure rx queue size is a power of 2 */
-    WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
+  /* Make sure rx queue size is a power of 2 */
+  WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
 
-    /*
-     * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
-     * between empty and completely full queues.
-     * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
-     * defined for negative dividends.
-     */
-    return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
+  /*
+   * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
+   * between empty and completely full queues.
+   * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
+   * defined for negative dividends.
+   */
+  return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
 }
 
 /*
  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
  */
 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) {
-    return cpu_to_le32((uint32_t)(dma_addr >> 8));
+  return cpu_to_le32((uint32_t)(dma_addr >> 8));
 }
 
 /*
  * iwl_pcie_rx_stop - stops the Rx DMA
  */
 int iwl_pcie_rx_stop(struct iwl_trans* trans) {
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-        /* TODO: remove this for 22560 once fw does it */
-        iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
-        return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
-    } else if (trans->cfg->mq_rx_supported) {
-        iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
-        return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
-    } else {
-        iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-        return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
-                                   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-    }
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+    /* TODO: remove this for 22560 once fw does it */
+    iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
+    return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+  } else if (trans->cfg->mq_rx_supported) {
+    iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
+    return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+  } else {
+    iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+    return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+                               1000);
+  }
 }
 
 /*
  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
  */
 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans* trans, struct iwl_rxq* rxq) {
-    uint32_t reg;
+  uint32_t reg;
 
-    lockdep_assert_held(&rxq->lock);
+  lockdep_assert_held(&rxq->lock);
 
-    /*
-     * explicitly wake up the NIC if:
-     * 1. shadow registers aren't enabled
-     * 2. there is a chance that the NIC is asleep
-     */
-    if (!trans->cfg->base_params->shadow_reg_enable &&
-        test_bit(STATUS_TPOWER_PMI, &trans->status)) {
-        reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+  /*
+   * explicitly wake up the NIC if:
+   * 1. shadow registers aren't enabled
+   * 2. there is a chance that the NIC is asleep
+   */
+  if (!trans->cfg->base_params->shadow_reg_enable && test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+    reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
 
-        if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
-            IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", reg);
-            iwl_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req));
-            rxq->need_update = true;
-            return;
-        }
+    if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+      IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", reg);
+      iwl_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req));
+      rxq->need_update = true;
+      return;
     }
+  }
 
-    rxq->write_actual = round_down(rxq->write, 8);
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
-        iwl_write32(trans, HBUS_TARG_WRPTR,
-                    (rxq->write_actual | ((FIRST_RX_QUEUE + rxq->id) << 16)));
-    else if (trans->cfg->mq_rx_supported) {
-        iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), rxq->write_actual);
-    } else {
-        iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
-    }
+  rxq->write_actual = round_down(rxq->write, 8);
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+    iwl_write32(trans, HBUS_TARG_WRPTR, (rxq->write_actual | ((FIRST_RX_QUEUE + rxq->id) << 16)));
+  else if (trans->cfg->mq_rx_supported) {
+    iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), rxq->write_actual);
+  } else {
+    iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+  }
 }
 
 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    int i;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  int i;
 
-    for (i = 0; i < trans->num_rx_queues; i++) {
-        struct iwl_rxq* rxq = &trans_pcie->rxq[i];
+  for (i = 0; i < trans->num_rx_queues; i++) {
+    struct iwl_rxq* rxq = &trans_pcie->rxq[i];
 
-        if (!rxq->need_update) { continue; }
-        spin_lock(&rxq->lock);
-        iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
-        rxq->need_update = false;
-        spin_unlock(&rxq->lock);
+    if (!rxq->need_update) {
+      continue;
     }
+    spin_lock(&rxq->lock);
+    iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+    rxq->need_update = false;
+    spin_unlock(&rxq->lock);
+  }
 }
 
 static void iwl_pcie_restock_bd(struct iwl_trans* trans, struct iwl_rxq* rxq,
                                 struct iwl_rx_mem_buffer* rxb) {
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-        struct iwl_rx_transfer_desc* bd = rxq->bd;
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+    struct iwl_rx_transfer_desc* bd = rxq->bd;
 
-        bd[rxq->write].type_n_size = cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
-                                                 ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
-        bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
-        bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
-    } else {
-        __le64* bd = rxq->bd;
+    bd[rxq->write].type_n_size = cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
+                                             ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
+    bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
+    bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
+  } else {
+    __le64* bd = rxq->bd;
 
-        bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
-    }
+    bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+  }
 
-    IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", (uint32_t)rxb->vid,
-                 rxq->id, rxq->write);
+  IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", (uint32_t)rxb->vid,
+               rxq->id, rxq->write);
 }
 
 /*
  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
  */
 static void iwl_pcie_rxmq_restock(struct iwl_trans* trans, struct iwl_rxq* rxq) {
-    struct iwl_rx_mem_buffer* rxb;
+  struct iwl_rx_mem_buffer* rxb;
 
-    /*
-     * If the device isn't enabled - no need to try to add buffers...
-     * This can happen when we stop the device and still have an interrupt
-     * pending. We stop the APM before we sync the interrupts because we
-     * have to (see comment there). On the other hand, since the APM is
-     * stopped, we cannot access the HW (in particular not prph).
-     * So don't try to restock if the APM has been already stopped.
-     */
-    if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { return; }
+  /*
+   * If the device isn't enabled - no need to try to add buffers...
+   * This can happen when we stop the device and still have an interrupt
+   * pending. We stop the APM before we sync the interrupts because we
+   * have to (see comment there). On the other hand, since the APM is
+   * stopped, we cannot access the HW (in particular not prph).
+   * So don't try to restock if the APM has been already stopped.
+   */
+  if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+    return;
+  }
 
+  spin_lock(&rxq->lock);
+  while (rxq->free_count) {
+    /* Get next free Rx buffer, remove from free list */
+    rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list);
+    list_del(&rxb->list);
+    rxb->invalid = false;
+    /* 12 first bits are expected to be empty */
+    WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
+    /* Point to Rx buffer via next RBD in circular buffer */
+    iwl_pcie_restock_bd(trans, rxq, rxb);
+    rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
+    rxq->free_count--;
+  }
+  spin_unlock(&rxq->lock);
+
+  /*
+   * If we've added more space for the firmware to place data, tell it.
+   * Increment device's write pointer in multiples of 8.
+   */
+  if (rxq->write_actual != (rxq->write & ~0x7)) {
     spin_lock(&rxq->lock);
-    while (rxq->free_count) {
-        /* Get next free Rx buffer, remove from free list */
-        rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list);
-        list_del(&rxb->list);
-        rxb->invalid = false;
-        /* 12 first bits are expected to be empty */
-        WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
-        /* Point to Rx buffer via next RBD in circular buffer */
-        iwl_pcie_restock_bd(trans, rxq, rxb);
-        rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
-        rxq->free_count--;
-    }
+    iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
     spin_unlock(&rxq->lock);
-
-    /*
-     * If we've added more space for the firmware to place data, tell it.
-     * Increment device's write pointer in multiples of 8.
-     */
-    if (rxq->write_actual != (rxq->write & ~0x7)) {
-        spin_lock(&rxq->lock);
-        iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
-        spin_unlock(&rxq->lock);
-    }
+  }
 }
 
 /*
  * iwl_pcie_rxsq_restock - restock implementation for single queue rx
  */
 static void iwl_pcie_rxsq_restock(struct iwl_trans* trans, struct iwl_rxq* rxq) {
-    struct iwl_rx_mem_buffer* rxb;
+  struct iwl_rx_mem_buffer* rxb;
 
-    /*
-     * If the device isn't enabled - not need to try to add buffers...
-     * This can happen when we stop the device and still have an interrupt
-     * pending. We stop the APM before we sync the interrupts because we
-     * have to (see comment there). On the other hand, since the APM is
-     * stopped, we cannot access the HW (in particular not prph).
-     * So don't try to restock if the APM has been already stopped.
-     */
-    if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { return; }
+  /*
+   * If the device isn't enabled - not need to try to add buffers...
+   * This can happen when we stop the device and still have an interrupt
+   * pending. We stop the APM before we sync the interrupts because we
+   * have to (see comment there). On the other hand, since the APM is
+   * stopped, we cannot access the HW (in particular not prph).
+   * So don't try to restock if the APM has been already stopped.
+   */
+  if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+    return;
+  }
 
+  spin_lock(&rxq->lock);
+  while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
+    __le32* bd = (__le32*)rxq->bd;
+    /* The overwritten rxb must be a used one */
+    rxb = rxq->queue[rxq->write];
+    BUG_ON(rxb && rxb->page);
+
+    /* Get next free Rx buffer, remove from free list */
+    rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list);
+    list_del(&rxb->list);
+    rxb->invalid = false;
+
+    /* Point to Rx buffer via next RBD in circular buffer */
+    bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
+    rxq->queue[rxq->write] = rxb;
+    rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+    rxq->free_count--;
+  }
+  spin_unlock(&rxq->lock);
+
+  /* If we've added more space for the firmware to place data, tell it.
+   * Increment device's write pointer in multiples of 8. */
+  if (rxq->write_actual != (rxq->write & ~0x7)) {
     spin_lock(&rxq->lock);
-    while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
-        __le32* bd = (__le32*)rxq->bd;
-        /* The overwritten rxb must be a used one */
-        rxb = rxq->queue[rxq->write];
-        BUG_ON(rxb && rxb->page);
-
-        /* Get next free Rx buffer, remove from free list */
-        rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list);
-        list_del(&rxb->list);
-        rxb->invalid = false;
-
-        /* Point to Rx buffer via next RBD in circular buffer */
-        bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
-        rxq->queue[rxq->write] = rxb;
-        rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
-        rxq->free_count--;
-    }
+    iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
     spin_unlock(&rxq->lock);
-
-    /* If we've added more space for the firmware to place data, tell it.
-     * Increment device's write pointer in multiples of 8. */
-    if (rxq->write_actual != (rxq->write & ~0x7)) {
-        spin_lock(&rxq->lock);
-        iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
-        spin_unlock(&rxq->lock);
-    }
+  }
 }
 
 /*
@@ -352,11 +356,11 @@
  * target buffer.
  */
 static void iwl_pcie_rxq_restock(struct iwl_trans* trans, struct iwl_rxq* rxq) {
-    if (trans->cfg->mq_rx_supported) {
-        iwl_pcie_rxmq_restock(trans, rxq);
-    } else {
-        iwl_pcie_rxsq_restock(trans, rxq);
-    }
+  if (trans->cfg->mq_rx_supported) {
+    iwl_pcie_rxmq_restock(trans, rxq);
+  } else {
+    iwl_pcie_rxsq_restock(trans, rxq);
+  }
 }
 
 /*
@@ -364,28 +368,30 @@
  *
  */
 static struct page* iwl_pcie_rx_alloc_page(struct iwl_trans* trans, gfp_t priority) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct page* page;
-    gfp_t gfp_mask = priority;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct page* page;
+  gfp_t gfp_mask = priority;
 
-    if (trans_pcie->rx_page_order > 0) { gfp_mask |= __GFP_COMP; }
+  if (trans_pcie->rx_page_order > 0) {
+    gfp_mask |= __GFP_COMP;
+  }
 
-    /* Alloc a new receive buffer */
-    page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-    if (!page) {
-        if (net_ratelimit()) {
-            IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", trans_pcie->rx_page_order);
-        }
-        /*
-         * Issue an error if we don't have enough pre-allocated
-          * buffers.
-        `        */
-        if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) {
-            IWL_CRIT(trans, "Failed to alloc_pages\n");
-        }
-        return NULL;
+  /* Alloc a new receive buffer */
+  page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+  if (!page) {
+    if (net_ratelimit()) {
+      IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", trans_pcie->rx_page_order);
     }
-    return page;
+    /*
+     * Issue an error if we don't have enough pre-allocated
+      * buffers.
+    `        */
+    if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) {
+      IWL_CRIT(trans, "Failed to alloc_pages\n");
+    }
+    return NULL;
+  }
+  return page;
 }
 
 /*
@@ -398,67 +404,71 @@
  * allocated buffers.
  */
 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans* trans, gfp_t priority, struct iwl_rxq* rxq) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_rx_mem_buffer* rxb;
-    struct page* page;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_rx_mem_buffer* rxb;
+  struct page* page;
 
-    while (1) {
-        spin_lock(&rxq->lock);
-        if (list_empty(&rxq->rx_used)) {
-            spin_unlock(&rxq->lock);
-            return;
-        }
-        spin_unlock(&rxq->lock);
-
-        /* Alloc a new receive buffer */
-        page = iwl_pcie_rx_alloc_page(trans, priority);
-        if (!page) { return; }
-
-        spin_lock(&rxq->lock);
-
-        if (list_empty(&rxq->rx_used)) {
-            spin_unlock(&rxq->lock);
-            __free_pages(page, trans_pcie->rx_page_order);
-            return;
-        }
-        rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, list);
-        list_del(&rxb->list);
-        spin_unlock(&rxq->lock);
-
-        BUG_ON(rxb->page);
-        rxb->page = page;
-        /* Get physical address of the RB */
-        rxb->page_dma = dma_map_page(trans->dev, page, 0, PAGE_SIZE << trans_pcie->rx_page_order,
-                                     DMA_FROM_DEVICE);
-        if (dma_mapping_error(trans->dev, rxb->page_dma)) {
-            rxb->page = NULL;
-            spin_lock(&rxq->lock);
-            list_add(&rxb->list, &rxq->rx_used);
-            spin_unlock(&rxq->lock);
-            __free_pages(page, trans_pcie->rx_page_order);
-            return;
-        }
-
-        spin_lock(&rxq->lock);
-
-        list_add_tail(&rxb->list, &rxq->rx_free);
-        rxq->free_count++;
-
-        spin_unlock(&rxq->lock);
+  while (1) {
+    spin_lock(&rxq->lock);
+    if (list_empty(&rxq->rx_used)) {
+      spin_unlock(&rxq->lock);
+      return;
     }
+    spin_unlock(&rxq->lock);
+
+    /* Alloc a new receive buffer */
+    page = iwl_pcie_rx_alloc_page(trans, priority);
+    if (!page) {
+      return;
+    }
+
+    spin_lock(&rxq->lock);
+
+    if (list_empty(&rxq->rx_used)) {
+      spin_unlock(&rxq->lock);
+      __free_pages(page, trans_pcie->rx_page_order);
+      return;
+    }
+    rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, list);
+    list_del(&rxb->list);
+    spin_unlock(&rxq->lock);
+
+    BUG_ON(rxb->page);
+    rxb->page = page;
+    /* Get physical address of the RB */
+    rxb->page_dma =
+        dma_map_page(trans->dev, page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE);
+    if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+      rxb->page = NULL;
+      spin_lock(&rxq->lock);
+      list_add(&rxb->list, &rxq->rx_used);
+      spin_unlock(&rxq->lock);
+      __free_pages(page, trans_pcie->rx_page_order);
+      return;
+    }
+
+    spin_lock(&rxq->lock);
+
+    list_add_tail(&rxb->list, &rxq->rx_free);
+    rxq->free_count++;
+
+    spin_unlock(&rxq->lock);
+  }
 }
 
 void iwl_pcie_free_rbs_pool(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    int i;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  int i;
 
-    for (i = 0; i < RX_POOL_SIZE; i++) {
-        if (!trans_pcie->rx_pool[i].page) { continue; }
-        dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
-                       PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE);
-        __free_pages(trans_pcie->rx_pool[i].page, trans_pcie->rx_page_order);
-        trans_pcie->rx_pool[i].page = NULL;
+  for (i = 0; i < RX_POOL_SIZE; i++) {
+    if (!trans_pcie->rx_pool[i].page) {
+      continue;
     }
+    dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
+                   PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE);
+    __free_pages(trans_pcie->rx_pool[i].page, trans_pcie->rx_page_order);
+    trans_pcie->rx_pool[i].page = NULL;
+  }
 }
 
 /*
@@ -468,80 +478,84 @@
  * Called as a scheduled work item.
  */
 static void iwl_pcie_rx_allocator(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_rb_allocator* rba = &trans_pcie->rba;
-    struct list_head local_empty;
-    int pending = atomic_xchg(&rba->req_pending, 0);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_rb_allocator* rba = &trans_pcie->rba;
+  struct list_head local_empty;
+  int pending = atomic_xchg(&rba->req_pending, 0);
 
-    IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+  IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
 
-    /* If we were scheduled - there is at least one request */
-    spin_lock(&rba->lock);
-    /* swap out the rba->rbd_empty to a local list */
-    list_replace_init(&rba->rbd_empty, &local_empty);
-    spin_unlock(&rba->lock);
+  /* If we were scheduled - there is at least one request */
+  spin_lock(&rba->lock);
+  /* swap out the rba->rbd_empty to a local list */
+  list_replace_init(&rba->rbd_empty, &local_empty);
+  spin_unlock(&rba->lock);
 
-    while (pending) {
-        int i;
-        LIST_HEAD(local_allocated);
-        gfp_t gfp_mask = GFP_KERNEL;
+  while (pending) {
+    int i;
+    LIST_HEAD(local_allocated);
+    gfp_t gfp_mask = GFP_KERNEL;
 
-        /* Do not post a warning if there are only a few requests */
-        if (pending < RX_PENDING_WATERMARK) { gfp_mask |= __GFP_NOWARN; }
+    /* Do not post a warning if there are only a few requests */
+    if (pending < RX_PENDING_WATERMARK) {
+      gfp_mask |= __GFP_NOWARN;
+    }
 
-        for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
-            struct iwl_rx_mem_buffer* rxb;
-            struct page* page;
+    for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+      struct iwl_rx_mem_buffer* rxb;
+      struct page* page;
 
-            /* List should never be empty - each reused RBD is
-             * returned to the list, and initial pool covers any
-             * possible gap between the time the page is allocated
-             * to the time the RBD is added.
-             */
-            BUG_ON(list_empty(&local_empty));
-            /* Get the first rxb from the rbd list */
-            rxb = list_first_entry(&local_empty, struct iwl_rx_mem_buffer, list);
-            BUG_ON(rxb->page);
+      /* List should never be empty - each reused RBD is
+       * returned to the list, and initial pool covers any
+       * possible gap between the time the page is allocated
+       * to the time the RBD is added.
+       */
+      BUG_ON(list_empty(&local_empty));
+      /* Get the first rxb from the rbd list */
+      rxb = list_first_entry(&local_empty, struct iwl_rx_mem_buffer, list);
+      BUG_ON(rxb->page);
 
-            /* Alloc a new receive buffer */
-            page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
-            if (!page) { continue; }
-            rxb->page = page;
+      /* Alloc a new receive buffer */
+      page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
+      if (!page) {
+        continue;
+      }
+      rxb->page = page;
 
-            /* Get physical address of the RB */
-            rxb->page_dma = dma_map_page(trans->dev, page, 0,
-                                         PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE);
-            if (dma_mapping_error(trans->dev, rxb->page_dma)) {
-                rxb->page = NULL;
-                __free_pages(page, trans_pcie->rx_page_order);
-                continue;
-            }
+      /* Get physical address of the RB */
+      rxb->page_dma = dma_map_page(trans->dev, page, 0, PAGE_SIZE << trans_pcie->rx_page_order,
+                                   DMA_FROM_DEVICE);
+      if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+        rxb->page = NULL;
+        __free_pages(page, trans_pcie->rx_page_order);
+        continue;
+      }
 
-            /* move the allocated entry to the out list */
-            list_move(&rxb->list, &local_allocated);
-            i++;
-        }
+      /* move the allocated entry to the out list */
+      list_move(&rxb->list, &local_allocated);
+      i++;
+    }
 
-        pending--;
-        if (!pending) {
-            pending = atomic_xchg(&rba->req_pending, 0);
-            IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
-        }
-
-        spin_lock(&rba->lock);
-        /* add the allocated rbds to the allocator allocated list */
-        list_splice_tail(&local_allocated, &rba->rbd_allocated);
-        /* get more empty RBDs for current pending requests */
-        list_splice_tail_init(&rba->rbd_empty, &local_empty);
-        spin_unlock(&rba->lock);
-
-        atomic_inc(&rba->req_ready);
+    pending--;
+    if (!pending) {
+      pending = atomic_xchg(&rba->req_pending, 0);
+      IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
     }
 
     spin_lock(&rba->lock);
-    /* return unused rbds to the allocator empty list */
-    list_splice_tail(&local_empty, &rba->rbd_empty);
+    /* add the allocated rbds to the allocator allocated list */
+    list_splice_tail(&local_allocated, &rba->rbd_allocated);
+    /* get more empty RBDs for current pending requests */
+    list_splice_tail_init(&rba->rbd_empty, &local_empty);
     spin_unlock(&rba->lock);
+
+    atomic_inc(&rba->req_ready);
+  }
+
+  spin_lock(&rba->lock);
+  /* return unused rbds to the allocator empty list */
+  list_splice_tail(&local_empty, &rba->rbd_empty);
+  spin_unlock(&rba->lock);
 }
 
 /*
@@ -553,472 +567,504 @@
  * and updates the relevant counters.
  */
 static void iwl_pcie_rx_allocator_get(struct iwl_trans* trans, struct iwl_rxq* rxq) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_rb_allocator* rba = &trans_pcie->rba;
-    int i;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_rb_allocator* rba = &trans_pcie->rba;
+  int i;
 
-    lockdep_assert_held(&rxq->lock);
+  lockdep_assert_held(&rxq->lock);
 
-    /*
-     * atomic_dec_if_positive returns req_ready - 1 for any scenario.
-     * If req_ready is 0 atomic_dec_if_positive will return -1 and this
-     * function will return early, as there are no ready requests.
-     * atomic_dec_if_positive will perofrm the *actual* decrement only if
-     * req_ready > 0, i.e. - there are ready requests and the function
-     * hands one request to the caller.
-     */
-    if (atomic_dec_if_positive(&rba->req_ready) < 0) { return; }
+  /*
+   * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+   * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+   * function will return early, as there are no ready requests.
+   * atomic_dec_if_positive will perofrm the *actual* decrement only if
+   * req_ready > 0, i.e. - there are ready requests and the function
+   * hands one request to the caller.
+   */
+  if (atomic_dec_if_positive(&rba->req_ready) < 0) {
+    return;
+  }
 
-    spin_lock(&rba->lock);
-    for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
-        /* Get next free Rx buffer, remove it from free list */
-        struct iwl_rx_mem_buffer* rxb =
-            list_first_entry(&rba->rbd_allocated, struct iwl_rx_mem_buffer, list);
+  spin_lock(&rba->lock);
+  for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+    /* Get next free Rx buffer, remove it from free list */
+    struct iwl_rx_mem_buffer* rxb =
+        list_first_entry(&rba->rbd_allocated, struct iwl_rx_mem_buffer, list);
 
-        list_move(&rxb->list, &rxq->rx_free);
-    }
-    spin_unlock(&rba->lock);
+    list_move(&rxb->list, &rxq->rx_free);
+  }
+  spin_unlock(&rba->lock);
 
-    rxq->used_count -= RX_CLAIM_REQ_ALLOC;
-    rxq->free_count += RX_CLAIM_REQ_ALLOC;
+  rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+  rxq->free_count += RX_CLAIM_REQ_ALLOC;
 }
 
 void iwl_pcie_rx_allocator_work(struct work_struct* data) {
-    struct iwl_rb_allocator* rba_p = container_of(data, struct iwl_rb_allocator, rx_alloc);
-    struct iwl_trans_pcie* trans_pcie = container_of(rba_p, struct iwl_trans_pcie, rba);
+  struct iwl_rb_allocator* rba_p = container_of(data, struct iwl_rb_allocator, rx_alloc);
+  struct iwl_trans_pcie* trans_pcie = container_of(rba_p, struct iwl_trans_pcie, rba);
 
-    iwl_pcie_rx_allocator(trans_pcie->trans);
+  iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
 static int iwl_pcie_free_bd_size(struct iwl_trans* trans, bool use_rx_td) {
-    struct iwl_rx_transfer_desc* rx_td;
+  struct iwl_rx_transfer_desc* rx_td;
 
-    if (use_rx_td) {
-        return sizeof(*rx_td);
-    } else {
-        return trans->cfg->mq_rx_supported ? sizeof(__le64) : sizeof(__le32);
-    }
+  if (use_rx_td) {
+    return sizeof(*rx_td);
+  } else {
+    return trans->cfg->mq_rx_supported ? sizeof(__le64) : sizeof(__le32);
+  }
 }
 
 static void iwl_pcie_free_rxq_dma(struct iwl_trans* trans, struct iwl_rxq* rxq) {
-    struct device* dev = trans->dev;
-    bool use_rx_td = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560);
-    int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+  struct device* dev = trans->dev;
+  bool use_rx_td = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560);
+  int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
 
-    if (rxq->bd) {
-        dma_free_coherent(trans->dev, free_size * rxq->queue_size, rxq->bd, rxq->bd_dma);
-    }
-    rxq->bd_dma = 0;
-    rxq->bd = NULL;
+  if (rxq->bd) {
+    dma_free_coherent(trans->dev, free_size * rxq->queue_size, rxq->bd, rxq->bd_dma);
+  }
+  rxq->bd_dma = 0;
+  rxq->bd = NULL;
 
-    if (rxq->rb_stts)
-        dma_free_coherent(trans->dev, use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status),
-                          rxq->rb_stts, rxq->rb_stts_dma);
-    rxq->rb_stts_dma = 0;
-    rxq->rb_stts = NULL;
+  if (rxq->rb_stts)
+    dma_free_coherent(trans->dev, use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status),
+                      rxq->rb_stts, rxq->rb_stts_dma);
+  rxq->rb_stts_dma = 0;
+  rxq->rb_stts = NULL;
 
-    if (rxq->used_bd)
-        dma_free_coherent(trans->dev,
-                          (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
-                          rxq->used_bd, rxq->used_bd_dma);
-    rxq->used_bd_dma = 0;
-    rxq->used_bd = NULL;
+  if (rxq->used_bd)
+    dma_free_coherent(trans->dev, (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
+                      rxq->used_bd, rxq->used_bd_dma);
+  rxq->used_bd_dma = 0;
+  rxq->used_bd = NULL;
 
-    if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) { return; }
+  if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) {
+    return;
+  }
 
-    if (rxq->tr_tail) { dma_free_coherent(dev, sizeof(__le16), rxq->tr_tail, rxq->tr_tail_dma); }
-    rxq->tr_tail_dma = 0;
-    rxq->tr_tail = NULL;
+  if (rxq->tr_tail) {
+    dma_free_coherent(dev, sizeof(__le16), rxq->tr_tail, rxq->tr_tail_dma);
+  }
+  rxq->tr_tail_dma = 0;
+  rxq->tr_tail = NULL;
 
-    if (rxq->cr_tail) { dma_free_coherent(dev, sizeof(__le16), rxq->cr_tail, rxq->cr_tail_dma); }
-    rxq->cr_tail_dma = 0;
-    rxq->cr_tail = NULL;
+  if (rxq->cr_tail) {
+    dma_free_coherent(dev, sizeof(__le16), rxq->cr_tail, rxq->cr_tail_dma);
+  }
+  rxq->cr_tail_dma = 0;
+  rxq->cr_tail = NULL;
 }
 
 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans* trans, struct iwl_rxq* rxq) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct device* dev = trans->dev;
-    int i;
-    int free_size;
-    bool use_rx_td = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct device* dev = trans->dev;
+  int i;
+  int free_size;
+  bool use_rx_td = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560);
 
-    spin_lock_init(&rxq->lock);
-    if (trans->cfg->mq_rx_supported) {
-        rxq->queue_size = MQ_RX_TABLE_SIZE;
-    } else {
-        rxq->queue_size = RX_QUEUE_SIZE;
+  spin_lock_init(&rxq->lock);
+  if (trans->cfg->mq_rx_supported) {
+    rxq->queue_size = MQ_RX_TABLE_SIZE;
+  } else {
+    rxq->queue_size = RX_QUEUE_SIZE;
+  }
+
+  free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+
+  /*
+   * Allocate the circular buffer of Read Buffer Descriptors
+   * (RBDs)
+   */
+  rxq->bd = dma_zalloc_coherent(dev, free_size * rxq->queue_size, &rxq->bd_dma, GFP_KERNEL);
+  if (!rxq->bd) {
+    goto err;
+  }
+
+  if (trans->cfg->mq_rx_supported) {
+    rxq->used_bd =
+        dma_zalloc_coherent(dev, (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
+                            &rxq->used_bd_dma, GFP_KERNEL);
+    if (!rxq->used_bd) {
+      goto err;
     }
+  }
 
-    free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+  /* Allocate the driver's pointer to receive buffer status */
+  rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status),
+                                     &rxq->rb_stts_dma, GFP_KERNEL);
+  if (!rxq->rb_stts) {
+    goto err;
+  }
 
-    /*
-     * Allocate the circular buffer of Read Buffer Descriptors
-     * (RBDs)
-     */
-    rxq->bd = dma_zalloc_coherent(dev, free_size * rxq->queue_size, &rxq->bd_dma, GFP_KERNEL);
-    if (!rxq->bd) { goto err; }
-
-    if (trans->cfg->mq_rx_supported) {
-        rxq->used_bd = dma_zalloc_coherent(
-            dev, (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
-            &rxq->used_bd_dma, GFP_KERNEL);
-        if (!rxq->used_bd) { goto err; }
-    }
-
-    /* Allocate the driver's pointer to receive buffer status */
-    rxq->rb_stts =
-        dma_zalloc_coherent(dev, use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status),
-                            &rxq->rb_stts_dma, GFP_KERNEL);
-    if (!rxq->rb_stts) { goto err; }
-
-    if (!use_rx_td) { return 0; }
-
-    /* Allocate the driver's pointer to TR tail */
-    rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), &rxq->tr_tail_dma, GFP_KERNEL);
-    if (!rxq->tr_tail) { goto err; }
-
-    /* Allocate the driver's pointer to CR tail */
-    rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), &rxq->cr_tail_dma, GFP_KERNEL);
-    if (!rxq->cr_tail) { goto err; }
-    /*
-     * W/A 22560 device step Z0 must be non zero bug
-     * TODO: remove this when stop supporting Z0
-     */
-    *rxq->cr_tail = cpu_to_le16(500);
-
+  if (!use_rx_td) {
     return 0;
+  }
+
+  /* Allocate the driver's pointer to TR tail */
+  rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), &rxq->tr_tail_dma, GFP_KERNEL);
+  if (!rxq->tr_tail) {
+    goto err;
+  }
+
+  /* Allocate the driver's pointer to CR tail */
+  rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), &rxq->cr_tail_dma, GFP_KERNEL);
+  if (!rxq->cr_tail) {
+    goto err;
+  }
+  /*
+   * W/A 22560 device step Z0 must be non zero bug
+   * TODO: remove this when stop supporting Z0
+   */
+  *rxq->cr_tail = cpu_to_le16(500);
+
+  return 0;
 
 err:
-    for (i = 0; i < trans->num_rx_queues; i++) {
-        struct iwl_rxq* rxq = &trans_pcie->rxq[i];
+  for (i = 0; i < trans->num_rx_queues; i++) {
+    struct iwl_rxq* rxq = &trans_pcie->rxq[i];
 
-        iwl_pcie_free_rxq_dma(trans, rxq);
-    }
-    kfree(trans_pcie->rxq);
+    iwl_pcie_free_rxq_dma(trans, rxq);
+  }
+  kfree(trans_pcie->rxq);
 
-    return -ENOMEM;
+  return -ENOMEM;
 }
 
 int iwl_pcie_rx_alloc(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_rb_allocator* rba = &trans_pcie->rba;
-    int i, ret;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_rb_allocator* rba = &trans_pcie->rba;
+  int i, ret;
 
-    if (WARN_ON(trans_pcie->rxq)) { return -EINVAL; }
+  if (WARN_ON(trans_pcie->rxq)) {
+    return -EINVAL;
+  }
 
-    trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), GFP_KERNEL);
-    if (!trans_pcie->rxq) { return -EINVAL; }
+  trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), GFP_KERNEL);
+  if (!trans_pcie->rxq) {
+    return -EINVAL;
+  }
 
-    spin_lock_init(&rba->lock);
+  spin_lock_init(&rba->lock);
 
-    for (i = 0; i < trans->num_rx_queues; i++) {
-        struct iwl_rxq* rxq = &trans_pcie->rxq[i];
+  for (i = 0; i < trans->num_rx_queues; i++) {
+    struct iwl_rxq* rxq = &trans_pcie->rxq[i];
 
-        ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
-        if (ret) { return ret; }
+    ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
+    if (ret) {
+      return ret;
     }
-    return 0;
+  }
+  return 0;
 }
 
 static void iwl_pcie_rx_hw_init(struct iwl_trans* trans, struct iwl_rxq* rxq) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    uint32_t rb_size;
-    unsigned long flags;
-    const uint32_t rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  uint32_t rb_size;
+  unsigned long flags;
+  const uint32_t rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
 
-    switch (trans_pcie->rx_buf_size) {
+  switch (trans_pcie->rx_buf_size) {
     case IWL_AMSDU_4K:
-        rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-        break;
+      rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+      break;
     case IWL_AMSDU_8K:
-        rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
-        break;
+      rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+      break;
     case IWL_AMSDU_12K:
-        rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
-        break;
+      rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
+      break;
     default:
-        WARN_ON(1);
-        rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-    }
+      WARN_ON(1);
+      rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+  }
 
-    if (!iwl_trans_grab_nic_access(trans, &flags)) { return; }
+  if (!iwl_trans_grab_nic_access(trans, &flags)) {
+    return;
+  }
 
-    /* Stop Rx DMA */
-    iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-    /* reset and flush pointers */
-    iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
-    iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
-    iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
+  /* Stop Rx DMA */
+  iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+  /* reset and flush pointers */
+  iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+  iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+  iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
 
-    /* Reset driver's Rx queue write index */
-    iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+  /* Reset driver's Rx queue write index */
+  iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
 
-    /* Tell device where to find RBD circular buffer in DRAM */
-    iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, (uint32_t)(rxq->bd_dma >> 8));
+  /* Tell device where to find RBD circular buffer in DRAM */
+  iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, (uint32_t)(rxq->bd_dma >> 8));
 
-    /* Tell device where in DRAM to update its Rx status */
-    iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
+  /* Tell device where in DRAM to update its Rx status */
+  iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
 
-    /* Enable Rx DMA
-     * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
-     *      the credit mechanism in 5000 HW RX FIFO
-     * Direct rx interrupts to hosts
-     * Rx buffer size 4 or 8k or 12k
-     * RB timeout 0x10
-     * 256 RBDs
-     */
-    iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
-                FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
-                    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | rb_size |
-                    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
-                    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+  /* Enable Rx DMA
+   * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+   *      the credit mechanism in 5000 HW RX FIFO
+   * Direct rx interrupts to hosts
+   * Rx buffer size 4 or 8k or 12k
+   * RB timeout 0x10
+   * 256 RBDs
+   */
+  iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+              FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+                  FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | rb_size |
+                  (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+                  (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
 
-    iwl_trans_release_nic_access(trans, &flags);
+  iwl_trans_release_nic_access(trans, &flags);
 
-    /* Set interrupt coalescing timer to default (2048 usecs) */
-    iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+  /* Set interrupt coalescing timer to default (2048 usecs) */
+  iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
 
-    /* W/A for interrupt coalescing bug in 7260 and 3160 */
-    if (trans->cfg->host_interrupt_operation_mode) {
-        iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
-    }
+  /* W/A for interrupt coalescing bug in 7260 and 3160 */
+  if (trans->cfg->host_interrupt_operation_mode) {
+    iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
+  }
 }
 
 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    uint32_t rb_size, enabled = 0;
-    unsigned long flags;
-    int i;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  uint32_t rb_size, enabled = 0;
+  unsigned long flags;
+  int i;
 
-    switch (trans_pcie->rx_buf_size) {
+  switch (trans_pcie->rx_buf_size) {
     case IWL_AMSDU_2K:
-        rb_size = RFH_RXF_DMA_RB_SIZE_2K;
-        break;
+      rb_size = RFH_RXF_DMA_RB_SIZE_2K;
+      break;
     case IWL_AMSDU_4K:
-        rb_size = RFH_RXF_DMA_RB_SIZE_4K;
-        break;
+      rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+      break;
     case IWL_AMSDU_8K:
-        rb_size = RFH_RXF_DMA_RB_SIZE_8K;
-        break;
+      rb_size = RFH_RXF_DMA_RB_SIZE_8K;
+      break;
     case IWL_AMSDU_12K:
-        rb_size = RFH_RXF_DMA_RB_SIZE_12K;
-        break;
+      rb_size = RFH_RXF_DMA_RB_SIZE_12K;
+      break;
     default:
-        WARN_ON(1);
-        rb_size = RFH_RXF_DMA_RB_SIZE_4K;
-    }
+      WARN_ON(1);
+      rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+  }
 
-    if (!iwl_trans_grab_nic_access(trans, &flags)) { return; }
+  if (!iwl_trans_grab_nic_access(trans, &flags)) {
+    return;
+  }
 
-    /* Stop Rx DMA */
-    iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
-    /* disable free amd used rx queue operation */
-    iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
+  /* Stop Rx DMA */
+  iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
+  /* disable free amd used rx queue operation */
+  iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
 
-    for (i = 0; i < trans->num_rx_queues; i++) {
-        /* Tell device where to find RBD free table in DRAM */
-        iwl_write_prph64_no_grab(trans, RFH_Q_FRBDCB_BA_LSB(i), trans_pcie->rxq[i].bd_dma);
-        /* Tell device where to find RBD used table in DRAM */
-        iwl_write_prph64_no_grab(trans, RFH_Q_URBDCB_BA_LSB(i), trans_pcie->rxq[i].used_bd_dma);
-        /* Tell device where in DRAM to update its Rx status */
-        iwl_write_prph64_no_grab(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
-                                 trans_pcie->rxq[i].rb_stts_dma);
-        /* Reset device indice tables */
-        iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
-        iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
-        iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
+  for (i = 0; i < trans->num_rx_queues; i++) {
+    /* Tell device where to find RBD free table in DRAM */
+    iwl_write_prph64_no_grab(trans, RFH_Q_FRBDCB_BA_LSB(i), trans_pcie->rxq[i].bd_dma);
+    /* Tell device where to find RBD used table in DRAM */
+    iwl_write_prph64_no_grab(trans, RFH_Q_URBDCB_BA_LSB(i), trans_pcie->rxq[i].used_bd_dma);
+    /* Tell device where in DRAM to update its Rx status */
+    iwl_write_prph64_no_grab(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), trans_pcie->rxq[i].rb_stts_dma);
+    /* Reset device indice tables */
+    iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
+    iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
+    iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
 
-        enabled |= BIT(i) | BIT(i + 16);
-    }
+    enabled |= BIT(i) | BIT(i + 16);
+  }
 
-    /*
-     * Enable Rx DMA
-     * Rx buffer size 4 or 8k or 12k
-     * Min RB size 4 or 8
-     * Drop frames that exceed RB size
-     * 512 RBDs
-     */
-    iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
-                           RFH_DMA_EN_ENABLE_VAL | rb_size | RFH_RXF_DMA_MIN_RB_4_8 |
-                               RFH_RXF_DMA_DROP_TOO_LARGE_MASK | RFH_RXF_DMA_RBDCB_SIZE_512);
+  /*
+   * Enable Rx DMA
+   * Rx buffer size 4 or 8k or 12k
+   * Min RB size 4 or 8
+   * Drop frames that exceed RB size
+   * 512 RBDs
+   */
+  iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
+                         RFH_DMA_EN_ENABLE_VAL | rb_size | RFH_RXF_DMA_MIN_RB_4_8 |
+                             RFH_RXF_DMA_DROP_TOO_LARGE_MASK | RFH_RXF_DMA_RBDCB_SIZE_512);
 
-    /*
-     * Activate DMA snooping.
-     * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
-     * Default queue is 0
-     */
-    iwl_write_prph_no_grab(
-        trans, RFH_GEN_CFG,
-        RFH_GEN_CFG_RFH_DMA_SNOOP | RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
-            RFH_GEN_CFG_SERVICE_DMA_SNOOP |
-            RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, trans->cfg->integrated ? RFH_GEN_CFG_RB_CHUNK_SIZE_64
-                                                                  : RFH_GEN_CFG_RB_CHUNK_SIZE_128));
-    /* Enable the relevant rx queues */
-    iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
+  /*
+   * Activate DMA snooping.
+   * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
+   * Default queue is 0
+   */
+  iwl_write_prph_no_grab(
+      trans, RFH_GEN_CFG,
+      RFH_GEN_CFG_RFH_DMA_SNOOP | RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
+          RFH_GEN_CFG_SERVICE_DMA_SNOOP |
+          RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, trans->cfg->integrated ? RFH_GEN_CFG_RB_CHUNK_SIZE_64
+                                                                : RFH_GEN_CFG_RB_CHUNK_SIZE_128));
+  /* Enable the relevant rx queues */
+  iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
 
-    iwl_trans_release_nic_access(trans, &flags);
+  iwl_trans_release_nic_access(trans, &flags);
 
-    /* Set interrupt coalescing timer to default (2048 usecs) */
-    iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+  /* Set interrupt coalescing timer to default (2048 usecs) */
+  iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
 }
 
 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq* rxq) {
-    lockdep_assert_held(&rxq->lock);
+  lockdep_assert_held(&rxq->lock);
 
-    INIT_LIST_HEAD(&rxq->rx_free);
-    INIT_LIST_HEAD(&rxq->rx_used);
-    rxq->free_count = 0;
-    rxq->used_count = 0;
+  INIT_LIST_HEAD(&rxq->rx_free);
+  INIT_LIST_HEAD(&rxq->rx_used);
+  rxq->free_count = 0;
+  rxq->used_count = 0;
 }
 
 int iwl_pcie_dummy_napi_poll(struct napi_struct* napi, int budget) {
-    WARN_ON(1);
-    return 0;
+  WARN_ON(1);
+  return 0;
 }
 
 int _iwl_pcie_rx_init(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_rxq* def_rxq;
-    struct iwl_rb_allocator* rba = &trans_pcie->rba;
-    int i, err, queue_size, allocator_pool_size, num_alloc;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_rxq* def_rxq;
+  struct iwl_rb_allocator* rba = &trans_pcie->rba;
+  int i, err, queue_size, allocator_pool_size, num_alloc;
 
-    if (!trans_pcie->rxq) {
-        err = iwl_pcie_rx_alloc(trans);
-        if (err) { return err; }
+  if (!trans_pcie->rxq) {
+    err = iwl_pcie_rx_alloc(trans);
+    if (err) {
+      return err;
     }
-    def_rxq = trans_pcie->rxq;
+  }
+  def_rxq = trans_pcie->rxq;
 
-    cancel_work_sync(&rba->rx_alloc);
+  cancel_work_sync(&rba->rx_alloc);
 
-    spin_lock(&rba->lock);
-    atomic_set(&rba->req_pending, 0);
-    atomic_set(&rba->req_ready, 0);
-    INIT_LIST_HEAD(&rba->rbd_allocated);
-    INIT_LIST_HEAD(&rba->rbd_empty);
-    spin_unlock(&rba->lock);
+  spin_lock(&rba->lock);
+  atomic_set(&rba->req_pending, 0);
+  atomic_set(&rba->req_ready, 0);
+  INIT_LIST_HEAD(&rba->rbd_allocated);
+  INIT_LIST_HEAD(&rba->rbd_empty);
+  spin_unlock(&rba->lock);
 
-    /* free all first - we might be reconfigured for a different size */
-    iwl_pcie_free_rbs_pool(trans);
+  /* free all first - we might be reconfigured for a different size */
+  iwl_pcie_free_rbs_pool(trans);
 
-    for (i = 0; i < RX_QUEUE_SIZE; i++) {
-        def_rxq->queue[i] = NULL;
+  for (i = 0; i < RX_QUEUE_SIZE; i++) {
+    def_rxq->queue[i] = NULL;
+  }
+
+  for (i = 0; i < trans->num_rx_queues; i++) {
+    struct iwl_rxq* rxq = &trans_pcie->rxq[i];
+
+    rxq->id = i;
+
+    spin_lock(&rxq->lock);
+    /*
+     * Set read write pointer to reflect that we have processed
+     * and used all buffers, but have not restocked the Rx queue
+     * with fresh buffers
+     */
+    rxq->read = 0;
+    rxq->write = 0;
+    rxq->write_actual = 0;
+    memset(rxq->rb_stts, 0,
+           (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? sizeof(__le16)
+                                                                  : sizeof(struct iwl_rb_status));
+
+    iwl_pcie_rx_init_rxb_lists(rxq);
+
+    if (!rxq->napi.poll) {
+      netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, iwl_pcie_dummy_napi_poll, 64);
     }
 
-    for (i = 0; i < trans->num_rx_queues; i++) {
-        struct iwl_rxq* rxq = &trans_pcie->rxq[i];
+    spin_unlock(&rxq->lock);
+  }
 
-        rxq->id = i;
+  /* move the pool to the default queue and allocator ownerships */
+  queue_size = trans->cfg->mq_rx_supported ? MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
+  allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
+  num_alloc = queue_size + allocator_pool_size;
+  BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != ARRAY_SIZE(trans_pcie->rx_pool));
+  for (i = 0; i < num_alloc; i++) {
+    struct iwl_rx_mem_buffer* rxb = &trans_pcie->rx_pool[i];
 
-        spin_lock(&rxq->lock);
-        /*
-         * Set read write pointer to reflect that we have processed
-         * and used all buffers, but have not restocked the Rx queue
-         * with fresh buffers
-         */
-        rxq->read = 0;
-        rxq->write = 0;
-        rxq->write_actual = 0;
-        memset(rxq->rb_stts, 0,
-               (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
-                   ? sizeof(__le16)
-                   : sizeof(struct iwl_rb_status));
-
-        iwl_pcie_rx_init_rxb_lists(rxq);
-
-        if (!rxq->napi.poll) {
-            netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, iwl_pcie_dummy_napi_poll, 64);
-        }
-
-        spin_unlock(&rxq->lock);
+    if (i < allocator_pool_size) {
+      list_add(&rxb->list, &rba->rbd_empty);
+    } else {
+      list_add(&rxb->list, &def_rxq->rx_used);
     }
+    trans_pcie->global_table[i] = rxb;
+    rxb->vid = (uint16_t)(i + 1);
+    rxb->invalid = true;
+  }
 
-    /* move the pool to the default queue and allocator ownerships */
-    queue_size = trans->cfg->mq_rx_supported ? MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
-    allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
-    num_alloc = queue_size + allocator_pool_size;
-    BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != ARRAY_SIZE(trans_pcie->rx_pool));
-    for (i = 0; i < num_alloc; i++) {
-        struct iwl_rx_mem_buffer* rxb = &trans_pcie->rx_pool[i];
+  iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
 
-        if (i < allocator_pool_size) {
-            list_add(&rxb->list, &rba->rbd_empty);
-        } else {
-            list_add(&rxb->list, &def_rxq->rx_used);
-        }
-        trans_pcie->global_table[i] = rxb;
-        rxb->vid = (uint16_t)(i + 1);
-        rxb->invalid = true;
-    }
-
-    iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
-
-    return 0;
+  return 0;
 }
 
 int iwl_pcie_rx_init(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    int ret = _iwl_pcie_rx_init(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  int ret = _iwl_pcie_rx_init(trans);
 
-    if (ret) { return ret; }
+  if (ret) {
+    return ret;
+  }
 
-    if (trans->cfg->mq_rx_supported) {
-        iwl_pcie_rx_mq_hw_init(trans);
-    } else {
-        iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
-    }
+  if (trans->cfg->mq_rx_supported) {
+    iwl_pcie_rx_mq_hw_init(trans);
+  } else {
+    iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
+  }
 
-    iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
+  iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
 
-    spin_lock(&trans_pcie->rxq->lock);
-    iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
-    spin_unlock(&trans_pcie->rxq->lock);
+  spin_lock(&trans_pcie->rxq->lock);
+  iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
+  spin_unlock(&trans_pcie->rxq->lock);
 
-    return 0;
+  return 0;
 }
 
 int iwl_pcie_gen2_rx_init(struct iwl_trans* trans) {
-    /* Set interrupt coalescing timer to default (2048 usecs) */
-    iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+  /* Set interrupt coalescing timer to default (2048 usecs) */
+  iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
 
-    /*
-     * We don't configure the RFH.
-     * Restock will be done at alive, after firmware configured the RFH.
-     */
-    return _iwl_pcie_rx_init(trans);
+  /*
+   * We don't configure the RFH.
+   * Restock will be done at alive, after firmware configured the RFH.
+   */
+  return _iwl_pcie_rx_init(trans);
 }
 
 void iwl_pcie_rx_free(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_rb_allocator* rba = &trans_pcie->rba;
-    int i;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_rb_allocator* rba = &trans_pcie->rba;
+  int i;
 
-    /*
-     * if rxq is NULL, it means that nothing has been allocated,
-     * exit now
-     */
-    if (!trans_pcie->rxq) {
-        IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
-        return;
+  /*
+   * if rxq is NULL, it means that nothing has been allocated,
+   * exit now
+   */
+  if (!trans_pcie->rxq) {
+    IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+    return;
+  }
+
+  cancel_work_sync(&rba->rx_alloc);
+
+  iwl_pcie_free_rbs_pool(trans);
+
+  for (i = 0; i < trans->num_rx_queues; i++) {
+    struct iwl_rxq* rxq = &trans_pcie->rxq[i];
+
+    iwl_pcie_free_rxq_dma(trans, rxq);
+
+    if (rxq->napi.poll) {
+      netif_napi_del(&rxq->napi);
     }
-
-    cancel_work_sync(&rba->rx_alloc);
-
-    iwl_pcie_free_rbs_pool(trans);
-
-    for (i = 0; i < trans->num_rx_queues; i++) {
-        struct iwl_rxq* rxq = &trans_pcie->rxq[i];
-
-        iwl_pcie_free_rxq_dma(trans, rxq);
-
-        if (rxq->napi.poll) { netif_napi_del(&rxq->napi); }
-    }
-    kfree(trans_pcie->rxq);
+  }
+  kfree(trans_pcie->rxq);
 }
 
 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq* rxq, struct iwl_rb_allocator* rba) {
-    spin_lock(&rba->lock);
-    list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
-    spin_unlock(&rba->lock);
+  spin_lock(&rba->lock);
+  list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+  spin_unlock(&rba->lock);
 }
 
 /*
@@ -1029,306 +1075,330 @@
  */
 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans* trans, struct iwl_rx_mem_buffer* rxb,
                                   struct iwl_rxq* rxq, bool emergency) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_rb_allocator* rba = &trans_pcie->rba;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_rb_allocator* rba = &trans_pcie->rba;
 
-    /* Move the RBD to the used list, will be moved to allocator in batches
-     * before claiming or posting a request*/
-    list_add_tail(&rxb->list, &rxq->rx_used);
+  /* Move the RBD to the used list, will be moved to allocator in batches
+   * before claiming or posting a request*/
+  list_add_tail(&rxb->list, &rxq->rx_used);
 
-    if (unlikely(emergency)) { return; }
+  if (unlikely(emergency)) {
+    return;
+  }
 
-    /* Count the allocator owned RBDs */
-    rxq->used_count++;
+  /* Count the allocator owned RBDs */
+  rxq->used_count++;
 
-    /* If we have RX_POST_REQ_ALLOC new released rx buffers -
-     * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
-     * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
-     * after but we still need to post another request.
-     */
-    if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
-        /* Move the 2 RBDs to the allocator ownership.
-         Allocator has another 6 from pool for the request completion*/
-        iwl_pcie_rx_move_to_allocator(rxq, rba);
+  /* If we have RX_POST_REQ_ALLOC new released rx buffers -
+   * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+   * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+   * after but we still need to post another request.
+   */
+  if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+    /* Move the 2 RBDs to the allocator ownership.
+     Allocator has another 6 from pool for the request completion*/
+    iwl_pcie_rx_move_to_allocator(rxq, rba);
 
-        atomic_inc(&rba->req_pending);
-        queue_work(rba->alloc_wq, &rba->rx_alloc);
-    }
+    atomic_inc(&rba->req_pending);
+    queue_work(rba->alloc_wq, &rba->rx_alloc);
+  }
 }
 
 static void iwl_pcie_rx_handle_rb(struct iwl_trans* trans, struct iwl_rxq* rxq,
                                   struct iwl_rx_mem_buffer* rxb, bool emergency, int i) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_txq* txq = trans_pcie->txq[trans_pcie->cmd_queue];
-    bool page_stolen = false;
-    int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
-    uint32_t offset = 0;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_txq* txq = trans_pcie->txq[trans_pcie->cmd_queue];
+  bool page_stolen = false;
+  int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+  uint32_t offset = 0;
 
-    if (WARN_ON(!rxb)) { return; }
+  if (WARN_ON(!rxb)) {
+    return;
+  }
 
-    dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
+  dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
 
-    while (offset + sizeof(uint32_t) + sizeof(struct iwl_cmd_header) < max_len) {
-        struct iwl_rx_packet* pkt;
-        uint16_t sequence;
-        bool reclaim;
-        int index, cmd_index, len;
-        struct iwl_rx_cmd_buffer rxcb = {
-            ._offset = offset,
-            ._rx_page_order = trans_pcie->rx_page_order,
-            ._page = rxb->page,
-            ._page_stolen = false,
-            .truesize = max_len,
-        };
+  while (offset + sizeof(uint32_t) + sizeof(struct iwl_cmd_header) < max_len) {
+    struct iwl_rx_packet* pkt;
+    uint16_t sequence;
+    bool reclaim;
+    int index, cmd_index, len;
+    struct iwl_rx_cmd_buffer rxcb = {
+        ._offset = offset,
+        ._rx_page_order = trans_pcie->rx_page_order,
+        ._page = rxb->page,
+        ._page_stolen = false,
+        .truesize = max_len,
+    };
 
-        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-            rxcb.status = rxq->cd[i].status;
-        }
-
-        pkt = rxb_addr(&rxcb);
-
-        if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
-            IWL_DEBUG_RX(trans, "Q %d: RB end marker at offset %d\n", rxq->id, offset);
-            break;
-        }
-
-        WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> FH_RSCSR_RXQ_POS != rxq->id,
-             "frame on invalid queue - is on %d and indicates %d\n", rxq->id,
-             (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> FH_RSCSR_RXQ_POS);
-
-        IWL_DEBUG_RX(trans, "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", rxq->id, offset,
-                     iwl_get_cmd_string(trans, iwl_cmd_id(pkt->hdr.cmd, pkt->hdr.group_id, 0)),
-                     pkt->hdr.group_id, pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
-
-        len = iwl_rx_packet_len(pkt);
-        len += sizeof(uint32_t); /* account for status word */
-        trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
-        trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
-
-        /* Reclaim a command buffer only if this packet is a response
-         *   to a (driver-originated) command.
-         * If the packet (e.g. Rx frame) originated from uCode,
-         *   there is no command buffer to reclaim.
-         * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
-         *   but apparently a few don't get set; catch them here. */
-        reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
-        if (reclaim && !pkt->hdr.group_id) {
-            int i;
-
-            for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
-                if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) {
-                    reclaim = false;
-                    break;
-                }
-            }
-        }
-
-        sequence = le16_to_cpu(pkt->hdr.sequence);
-        index = SEQ_TO_INDEX(sequence);
-        cmd_index = iwl_pcie_get_cmd_index(txq, index);
-
-        if (rxq->id == trans_pcie->def_rx_queue) {
-            iwl_op_mode_rx(trans->op_mode, &rxq->napi, &rxcb);
-        } else {
-            iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, &rxcb, rxq->id);
-        }
-
-        if (reclaim) {
-            kzfree(txq->entries[cmd_index].free_buf);
-            txq->entries[cmd_index].free_buf = NULL;
-        }
-
-        /*
-         * After here, we should always check rxcb._page_stolen,
-         * if it is true then one of the handlers took the page.
-         */
-
-        if (reclaim) {
-            /* Invoke any callbacks, transfer the buffer to caller,
-             * and fire off the (possibly) blocking
-             * iwl_trans_send_cmd()
-             * as we reclaim the driver command queue */
-            if (!rxcb._page_stolen) {
-                iwl_pcie_hcmd_complete(trans, &rxcb);
-            } else {
-                IWL_WARN(trans, "Claim null rxb?\n");
-            }
-        }
-
-        page_stolen |= rxcb._page_stolen;
-        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { break; }
-        offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
+    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+      rxcb.status = rxq->cd[i].status;
     }
 
-    /* page was stolen from us -- free our reference */
-    if (page_stolen) {
-        __free_pages(rxb->page, trans_pcie->rx_page_order);
-        rxb->page = NULL;
+    pkt = rxb_addr(&rxcb);
+
+    if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
+      IWL_DEBUG_RX(trans, "Q %d: RB end marker at offset %d\n", rxq->id, offset);
+      break;
     }
 
-    /* Reuse the page if possible. For notification packets and
-     * SKBs that fail to Rx correctly, add them back into the
-     * rx_free list for reuse later. */
-    if (rxb->page != NULL) {
-        rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
-                                     PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE);
-        if (dma_mapping_error(trans->dev, rxb->page_dma)) {
-            /*
-             * free the page(s) as well to not break
-             * the invariant that the items on the used
-             * list have no page(s)
-             */
-            __free_pages(rxb->page, trans_pcie->rx_page_order);
-            rxb->page = NULL;
-            iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
-        } else {
-            list_add_tail(&rxb->list, &rxq->rx_free);
-            rxq->free_count++;
+    WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> FH_RSCSR_RXQ_POS != rxq->id,
+         "frame on invalid queue - is on %d and indicates %d\n", rxq->id,
+         (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> FH_RSCSR_RXQ_POS);
+
+    IWL_DEBUG_RX(trans, "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", rxq->id, offset,
+                 iwl_get_cmd_string(trans, iwl_cmd_id(pkt->hdr.cmd, pkt->hdr.group_id, 0)),
+                 pkt->hdr.group_id, pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
+
+    len = iwl_rx_packet_len(pkt);
+    len += sizeof(uint32_t); /* account for status word */
+    trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
+    trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
+
+    /* Reclaim a command buffer only if this packet is a response
+     *   to a (driver-originated) command.
+     * If the packet (e.g. Rx frame) originated from uCode,
+     *   there is no command buffer to reclaim.
+     * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+     *   but apparently a few don't get set; catch them here. */
+    reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
+    if (reclaim && !pkt->hdr.group_id) {
+      int i;
+
+      for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
+        if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) {
+          reclaim = false;
+          break;
         }
+      }
+    }
+
+    sequence = le16_to_cpu(pkt->hdr.sequence);
+    index = SEQ_TO_INDEX(sequence);
+    cmd_index = iwl_pcie_get_cmd_index(txq, index);
+
+    if (rxq->id == trans_pcie->def_rx_queue) {
+      iwl_op_mode_rx(trans->op_mode, &rxq->napi, &rxcb);
     } else {
-        iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
+      iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, &rxcb, rxq->id);
     }
+
+    if (reclaim) {
+      kzfree(txq->entries[cmd_index].free_buf);
+      txq->entries[cmd_index].free_buf = NULL;
+    }
+
+    /*
+     * After here, we should always check rxcb._page_stolen,
+     * if it is true then one of the handlers took the page.
+     */
+
+    if (reclaim) {
+      /* Invoke any callbacks, transfer the buffer to caller,
+       * and fire off the (possibly) blocking
+       * iwl_trans_send_cmd()
+       * as we reclaim the driver command queue */
+      if (!rxcb._page_stolen) {
+        iwl_pcie_hcmd_complete(trans, &rxcb);
+      } else {
+        IWL_WARN(trans, "Claim null rxb?\n");
+      }
+    }
+
+    page_stolen |= rxcb._page_stolen;
+    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+      break;
+    }
+    offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
+  }
+
+  /* page was stolen from us -- free our reference */
+  if (page_stolen) {
+    __free_pages(rxb->page, trans_pcie->rx_page_order);
+    rxb->page = NULL;
+  }
+
+  /* Reuse the page if possible. For notification packets and
+   * SKBs that fail to Rx correctly, add them back into the
+   * rx_free list for reuse later. */
+  if (rxb->page != NULL) {
+    rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, PAGE_SIZE << trans_pcie->rx_page_order,
+                                 DMA_FROM_DEVICE);
+    if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+      /*
+       * free the page(s) as well to not break
+       * the invariant that the items on the used
+       * list have no page(s)
+       */
+      __free_pages(rxb->page, trans_pcie->rx_page_order);
+      rxb->page = NULL;
+      iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
+    } else {
+      list_add_tail(&rxb->list, &rxq->rx_free);
+      rxq->free_count++;
+    }
+  } else {
+    iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
+  }
 }
 
 static struct iwl_rx_mem_buffer* iwl_pcie_get_rxb(struct iwl_trans* trans, struct iwl_rxq* rxq,
                                                   int i) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_rx_mem_buffer* rxb;
-    uint16_t vid;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_rx_mem_buffer* rxb;
+  uint16_t vid;
 
-    if (!trans->cfg->mq_rx_supported) {
-        rxb = rxq->queue[i];
-        rxq->queue[i] = NULL;
-        return rxb;
-    }
-
-    /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-        vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
-    } else {
-        vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
-    }
-
-    if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) { goto out_err; }
-
-    rxb = trans_pcie->global_table[vid - 1];
-    if (rxb->invalid) { goto out_err; }
-
-    IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (uint32_t)rxb->vid);
-
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-        rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
-    }
-
-    rxb->invalid = true;
-
+  if (!trans->cfg->mq_rx_supported) {
+    rxb = rxq->queue[i];
+    rxq->queue[i] = NULL;
     return rxb;
+  }
+
+  /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+    vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+  } else {
+    vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+  }
+
+  if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) {
+    goto out_err;
+  }
+
+  rxb = trans_pcie->global_table[vid - 1];
+  if (rxb->invalid) {
+    goto out_err;
+  }
+
+  IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (uint32_t)rxb->vid);
+
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+    rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
+  }
+
+  rxb->invalid = true;
+
+  return rxb;
 
 out_err:
-    WARN(1, "Invalid rxb from HW %u\n", (uint32_t)vid);
-    iwl_force_nmi(trans);
-    return NULL;
+  WARN(1, "Invalid rxb from HW %u\n", (uint32_t)vid);
+  iwl_force_nmi(trans);
+  return NULL;
 }
 
 /*
  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
  */
 static void iwl_pcie_rx_handle(struct iwl_trans* trans, int queue) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_rxq* rxq = &trans_pcie->rxq[queue];
-    uint32_t r, i, count = 0;
-    bool emergency = false;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_rxq* rxq = &trans_pcie->rxq[queue];
+  uint32_t r, i, count = 0;
+  bool emergency = false;
 
 restart:
-    spin_lock(&rxq->lock);
-    /* uCode's read index (stored in shared DRAM) indicates the last Rx
-     * buffer that the driver may process (last buffer filled by ucode). */
-    r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
-    i = rxq->read;
+  spin_lock(&rxq->lock);
+  /* uCode's read index (stored in shared DRAM) indicates the last Rx
+   * buffer that the driver may process (last buffer filled by ucode). */
+  r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
+  i = rxq->read;
 
-    /* W/A 9000 device step A0 wrap-around bug */
-    r &= (rxq->queue_size - 1);
+  /* W/A 9000 device step A0 wrap-around bug */
+  r &= (rxq->queue_size - 1);
 
-    /* Rx interrupt, but nothing sent from uCode */
-    if (i == r) { IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); }
+  /* Rx interrupt, but nothing sent from uCode */
+  if (i == r) {
+    IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
+  }
 
-    while (i != r) {
-        struct iwl_rb_allocator* rba = &trans_pcie->rba;
-        struct iwl_rx_mem_buffer* rxb;
-        /* number of RBDs still waiting for page allocation */
-        uint32_t rb_pending_alloc = atomic_read(&trans_pcie->rba.req_pending) * RX_CLAIM_REQ_ALLOC;
+  while (i != r) {
+    struct iwl_rb_allocator* rba = &trans_pcie->rba;
+    struct iwl_rx_mem_buffer* rxb;
+    /* number of RBDs still waiting for page allocation */
+    uint32_t rb_pending_alloc = atomic_read(&trans_pcie->rba.req_pending) * RX_CLAIM_REQ_ALLOC;
 
-        if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && !emergency)) {
-            iwl_pcie_rx_move_to_allocator(rxq, rba);
-            emergency = true;
-        }
-
-        IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
-
-        rxb = iwl_pcie_get_rxb(trans, rxq, i);
-        if (!rxb) { goto out; }
-
-        iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
-
-        i = (i + 1) & (rxq->queue_size - 1);
-
-        /*
-         * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
-         * try to claim the pre-allocated buffers from the allocator.
-         * If not ready - will try to reclaim next time.
-         * There is no need to reschedule work - allocator exits only
-         * on success
-         */
-        if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { iwl_pcie_rx_allocator_get(trans, rxq); }
-
-        if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
-            /* Add the remaining empty RBDs for allocator use */
-            iwl_pcie_rx_move_to_allocator(rxq, rba);
-        } else if (emergency) {
-            count++;
-            if (count == 8) {
-                count = 0;
-                if (rb_pending_alloc < rxq->queue_size / 3) { emergency = false; }
-
-                rxq->read = i;
-                spin_unlock(&rxq->lock);
-                iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
-                iwl_pcie_rxq_restock(trans, rxq);
-                goto restart;
-            }
-        }
+    if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && !emergency)) {
+      iwl_pcie_rx_move_to_allocator(rxq, rba);
+      emergency = true;
     }
-out:
-    /* Backtrack one entry */
-    rxq->read = i;
-    /* update cr tail with the rxq read pointer */
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { *rxq->cr_tail = cpu_to_le16(r); }
-    spin_unlock(&rxq->lock);
+
+    IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
+
+    rxb = iwl_pcie_get_rxb(trans, rxq, i);
+    if (!rxb) {
+      goto out;
+    }
+
+    iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
+
+    i = (i + 1) & (rxq->queue_size - 1);
 
     /*
-     * handle a case where in emergency there are some unallocated RBDs.
-     * those RBDs are in the used list, but are not tracked by the queue's
-     * used_count which counts allocator owned RBDs.
-     * unallocated emergency RBDs must be allocated on exit, otherwise
-     * when called again the function may not be in emergency mode and
-     * they will be handed to the allocator with no tracking in the RBD
-     * allocator counters, which will lead to them never being claimed back
-     * by the queue.
-     * by allocating them here, they are now in the queue free list, and
-     * will be restocked by the next call of iwl_pcie_rxq_restock.
+     * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+     * try to claim the pre-allocated buffers from the allocator.
+     * If not ready - will try to reclaim next time.
+     * There is no need to reschedule work - allocator exits only
+     * on success
      */
-    if (unlikely(emergency && count)) { iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); }
+    if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+      iwl_pcie_rx_allocator_get(trans, rxq);
+    }
 
-    if (rxq->napi.poll) { napi_gro_flush(&rxq->napi, false); }
+    if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
+      /* Add the remaining empty RBDs for allocator use */
+      iwl_pcie_rx_move_to_allocator(rxq, rba);
+    } else if (emergency) {
+      count++;
+      if (count == 8) {
+        count = 0;
+        if (rb_pending_alloc < rxq->queue_size / 3) {
+          emergency = false;
+        }
 
-    iwl_pcie_rxq_restock(trans, rxq);
+        rxq->read = i;
+        spin_unlock(&rxq->lock);
+        iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
+        iwl_pcie_rxq_restock(trans, rxq);
+        goto restart;
+      }
+    }
+  }
+out:
+  /* Backtrack one entry */
+  rxq->read = i;
+  /* update cr tail with the rxq read pointer */
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+    *rxq->cr_tail = cpu_to_le16(r);
+  }
+  spin_unlock(&rxq->lock);
+
+  /*
+   * handle a case where in emergency there are some unallocated RBDs.
+   * those RBDs are in the used list, but are not tracked by the queue's
+   * used_count which counts allocator owned RBDs.
+   * unallocated emergency RBDs must be allocated on exit, otherwise
+   * when called again the function may not be in emergency mode and
+   * they will be handed to the allocator with no tracking in the RBD
+   * allocator counters, which will lead to them never being claimed back
+   * by the queue.
+   * by allocating them here, they are now in the queue free list, and
+   * will be restocked by the next call of iwl_pcie_rxq_restock.
+   */
+  if (unlikely(emergency && count)) {
+    iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
+  }
+
+  if (rxq->napi.poll) {
+    napi_gro_flush(&rxq->napi, false);
+  }
+
+  iwl_pcie_rxq_restock(trans, rxq);
 }
 
 static struct iwl_trans_pcie* iwl_pcie_get_trans_pcie(struct msix_entry* entry) {
-    uint8_t queue = entry->entry;
-    struct msix_entry* entries = entry - queue;
+  uint8_t queue = entry->entry;
+  struct msix_entry* entries = entry - queue;
 
-    return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
+  return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
 }
 
 /*
@@ -1336,69 +1406,73 @@
  * This interrupt handler should be used with RSS queue only.
  */
 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void* dev_id) {
-    struct msix_entry* entry = dev_id;
-    struct iwl_trans_pcie* trans_pcie = iwl_pcie_get_trans_pcie(entry);
-    struct iwl_trans* trans = trans_pcie->trans;
+  struct msix_entry* entry = dev_id;
+  struct iwl_trans_pcie* trans_pcie = iwl_pcie_get_trans_pcie(entry);
+  struct iwl_trans* trans = trans_pcie->trans;
 
-    trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
+  trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
 
-    if (WARN_ON(entry->entry >= trans->num_rx_queues)) { return IRQ_NONE; }
+  if (WARN_ON(entry->entry >= trans->num_rx_queues)) {
+    return IRQ_NONE;
+  }
 
-    lock_map_acquire(&trans->sync_cmd_lockdep_map);
+  lock_map_acquire(&trans->sync_cmd_lockdep_map);
 
-    local_bh_disable();
-    iwl_pcie_rx_handle(trans, entry->entry);
-    local_bh_enable();
+  local_bh_disable();
+  iwl_pcie_rx_handle(trans, entry->entry);
+  local_bh_enable();
 
-    iwl_pcie_clear_irq(trans, entry);
+  iwl_pcie_clear_irq(trans, entry);
 
-    lock_map_release(&trans->sync_cmd_lockdep_map);
+  lock_map_release(&trans->sync_cmd_lockdep_map);
 
-    return IRQ_HANDLED;
+  return IRQ_HANDLED;
 }
 
 /*
  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
  */
 static void iwl_pcie_irq_handle_error(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    int i;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  int i;
 
-    /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
-    if (trans->cfg->internal_wimax_coex && !trans->cfg->apmg_not_supported &&
-        (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) ||
-         (iwl_read_prph(trans, APMG_PS_CTRL_REG) & APMG_PS_CTRL_VAL_RESET_REQ))) {
-        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
-        iwl_op_mode_wimax_active(trans->op_mode);
-        wake_up(&trans_pcie->wait_command_queue);
-        return;
-    }
-
-    for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
-        if (!trans_pcie->txq[i]) { continue; }
-        del_timer(&trans_pcie->txq[i]->stuck_timer);
-    }
-
-    /* The STATUS_FW_ERROR bit is set in this function. This must happen
-     * before we wake up the command caller, to ensure a proper cleanup. */
-    iwl_trans_fw_error(trans);
-
+  /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
+  if (trans->cfg->internal_wimax_coex && !trans->cfg->apmg_not_supported &&
+      (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) ||
+       (iwl_read_prph(trans, APMG_PS_CTRL_REG) & APMG_PS_CTRL_VAL_RESET_REQ))) {
     clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+    iwl_op_mode_wimax_active(trans->op_mode);
     wake_up(&trans_pcie->wait_command_queue);
+    return;
+  }
+
+  for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+    if (!trans_pcie->txq[i]) {
+      continue;
+    }
+    del_timer(&trans_pcie->txq[i]->stuck_timer);
+  }
+
+  /* The STATUS_FW_ERROR bit is set in this function. This must happen
+   * before we wake up the command caller, to ensure a proper cleanup. */
+  iwl_trans_fw_error(trans);
+
+  clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+  wake_up(&trans_pcie->wait_command_queue);
 }
 
 static uint32_t iwl_pcie_int_cause_non_ict(struct iwl_trans* trans) {
-    uint32_t inta;
+  uint32_t inta;
 
-    lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
+  lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
 
-    trace_iwlwifi_dev_irq(trans->dev);
+  trace_iwlwifi_dev_irq(trans->dev);
 
-    /* Discover which interrupts are active/pending */
-    inta = iwl_read32(trans, CSR_INT);
+  /* Discover which interrupts are active/pending */
+  inta = iwl_read32(trans, CSR_INT);
 
-    /* the thread will service interrupts and re-enable them */
-    return inta;
+  /* the thread will service interrupts and re-enable them */
+  return inta;
 }
 
 /* a device (PCI-E) page is 4096 bytes long */
@@ -1415,320 +1489,330 @@
  * set index.
  */
 static uint32_t iwl_pcie_int_cause_ict(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    uint32_t inta;
-    uint32_t val = 0;
-    uint32_t read;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  uint32_t inta;
+  uint32_t val = 0;
+  uint32_t read;
 
-    trace_iwlwifi_dev_irq(trans->dev);
+  trace_iwlwifi_dev_irq(trans->dev);
 
-    /* Ignore interrupt if there's nothing in NIC to service.
-     * This may be due to IRQ shared with another device,
-     * or due to sporadic interrupts thrown from our NIC. */
+  /* Ignore interrupt if there's nothing in NIC to service.
+   * This may be due to IRQ shared with another device,
+   * or due to sporadic interrupts thrown from our NIC. */
+  read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+  trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+  if (!read) {
+    return 0;
+  }
+
+  /*
+   * Collect all entries up to the first 0, starting from ict_index;
+   * note we already read at ict_index.
+   */
+  do {
+    val |= read;
+    IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", trans_pcie->ict_index, read);
+    trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+    trans_pcie->ict_index = ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
+
     read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
     trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
-    if (!read) { return 0; }
+  } while (read);
 
-    /*
-     * Collect all entries up to the first 0, starting from ict_index;
-     * note we already read at ict_index.
-     */
-    do {
-        val |= read;
-        IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", trans_pcie->ict_index, read);
-        trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
-        trans_pcie->ict_index = ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
+  /* We should not get this value, just ignore it. */
+  if (val == 0xffffffff) {
+    val = 0;
+  }
 
-        read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
-        trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
-    } while (read);
+  /*
+   * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+   * (bit 15 before shifting it to 31) to clear when using interrupt
+   * coalescing. fortunately, bits 18 and 19 stay set when this happens
+   * so we use them to decide on the real state of the Rx bit.
+   * In order words, bit 15 is set if bit 18 or bit 19 are set.
+   */
+  if (val & 0xC0000) {
+    val |= 0x8000;
+  }
 
-    /* We should not get this value, just ignore it. */
-    if (val == 0xffffffff) { val = 0; }
-
-    /*
-     * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
-     * (bit 15 before shifting it to 31) to clear when using interrupt
-     * coalescing. fortunately, bits 18 and 19 stay set when this happens
-     * so we use them to decide on the real state of the Rx bit.
-     * In order words, bit 15 is set if bit 18 or bit 19 are set.
-     */
-    if (val & 0xC0000) { val |= 0x8000; }
-
-    inta = (0xff & val) | ((0xff00 & val) << 16);
-    return inta;
+  inta = (0xff & val) | ((0xff00 & val) << 16);
+  return inta;
 }
 
 void iwl_pcie_handle_rfkill_irq(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct isr_statistics* isr_stats = &trans_pcie->isr_stats;
-    bool hw_rfkill, prev, report;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct isr_statistics* isr_stats = &trans_pcie->isr_stats;
+  bool hw_rfkill, prev, report;
 
-    mutex_lock(&trans_pcie->mutex);
-    prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
-    hw_rfkill = iwl_is_rfkill_set(trans);
-    if (hw_rfkill) {
-        set_bit(STATUS_RFKILL_OPMODE, &trans->status);
-        set_bit(STATUS_RFKILL_HW, &trans->status);
+  mutex_lock(&trans_pcie->mutex);
+  prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+  hw_rfkill = iwl_is_rfkill_set(trans);
+  if (hw_rfkill) {
+    set_bit(STATUS_RFKILL_OPMODE, &trans->status);
+    set_bit(STATUS_RFKILL_HW, &trans->status);
+  }
+  if (trans_pcie->opmode_down) {
+    report = hw_rfkill;
+  } else {
+    report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+  }
+
+  IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rfkill ? "disable radio" : "enable radio");
+
+  isr_stats->rfkill++;
+
+  if (prev != report) {
+    iwl_trans_pcie_rf_kill(trans, report);
+  }
+  mutex_unlock(&trans_pcie->mutex);
+
+  if (hw_rfkill) {
+    if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
+      IWL_DEBUG_RF_KILL(trans, "Rfkill while SYNC HCMD in flight\n");
     }
+    wake_up(&trans_pcie->wait_command_queue);
+  } else {
+    clear_bit(STATUS_RFKILL_HW, &trans->status);
     if (trans_pcie->opmode_down) {
-        report = hw_rfkill;
-    } else {
-        report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+      clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
     }
-
-    IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rfkill ? "disable radio" : "enable radio");
-
-    isr_stats->rfkill++;
-
-    if (prev != report) { iwl_trans_pcie_rf_kill(trans, report); }
-    mutex_unlock(&trans_pcie->mutex);
-
-    if (hw_rfkill) {
-        if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
-            IWL_DEBUG_RF_KILL(trans, "Rfkill while SYNC HCMD in flight\n");
-        }
-        wake_up(&trans_pcie->wait_command_queue);
-    } else {
-        clear_bit(STATUS_RFKILL_HW, &trans->status);
-        if (trans_pcie->opmode_down) { clear_bit(STATUS_RFKILL_OPMODE, &trans->status); }
-    }
+  }
 }
 
 irqreturn_t iwl_pcie_irq_handler(int irq, void* dev_id) {
-    struct iwl_trans* trans = dev_id;
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct isr_statistics* isr_stats = &trans_pcie->isr_stats;
-    uint32_t inta = 0;
-    uint32_t handled = 0;
+  struct iwl_trans* trans = dev_id;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct isr_statistics* isr_stats = &trans_pcie->isr_stats;
+  uint32_t inta = 0;
+  uint32_t handled = 0;
 
-    lock_map_acquire(&trans->sync_cmd_lockdep_map);
+  lock_map_acquire(&trans->sync_cmd_lockdep_map);
 
-    spin_lock(&trans_pcie->irq_lock);
+  spin_lock(&trans_pcie->irq_lock);
 
-    /* dram interrupt table not set yet,
-     * use legacy interrupt.
+  /* dram interrupt table not set yet,
+   * use legacy interrupt.
+   */
+  if (likely(trans_pcie->use_ict)) {
+    inta = iwl_pcie_int_cause_ict(trans);
+  } else {
+    inta = iwl_pcie_int_cause_non_ict(trans);
+  }
+
+  if (iwl_have_debug_level(IWL_DL_ISR)) {
+    IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
+                  inta, trans_pcie->inta_mask, iwl_read32(trans, CSR_INT_MASK),
+                  iwl_read32(trans, CSR_FH_INT_STATUS));
+    if (inta & (~trans_pcie->inta_mask))
+      IWL_DEBUG_ISR(trans, "We got a masked interrupt (0x%08x)\n", inta & (~trans_pcie->inta_mask));
+  }
+
+  inta &= trans_pcie->inta_mask;
+
+  /*
+   * Ignore interrupt if there's nothing in NIC to service.
+   * This may be due to IRQ shared with another device,
+   * or due to sporadic interrupts thrown from our NIC.
+   */
+  if (unlikely(!inta)) {
+    IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+    /*
+     * Re-enable interrupts here since we don't
+     * have anything to service
      */
-    if (likely(trans_pcie->use_ict)) {
-        inta = iwl_pcie_int_cause_ict(trans);
-    } else {
-        inta = iwl_pcie_int_cause_non_ict(trans);
+    if (test_bit(STATUS_INT_ENABLED, &trans->status)) {
+      _iwl_enable_interrupts(trans);
+    }
+    spin_unlock(&trans_pcie->irq_lock);
+    lock_map_release(&trans->sync_cmd_lockdep_map);
+    return IRQ_NONE;
+  }
+
+  if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+    /*
+     * Hardware disappeared. It might have
+     * already raised an interrupt.
+     */
+    IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+    spin_unlock(&trans_pcie->irq_lock);
+    goto out;
+  }
+
+  /* Ack/clear/reset pending uCode interrupts.
+   * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+   */
+  /* There is a hardware bug in the interrupt mask function that some
+   * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+   * they are disabled in the CSR_INT_MASK register. Furthermore the
+   * ICT interrupt handling mechanism has another bug that might cause
+   * these unmasked interrupts fail to be detected. We workaround the
+   * hardware bugs here by ACKing all the possible interrupts so that
+   * interrupt coalescing can still be achieved.
+   */
+  iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
+
+  if (iwl_have_debug_level(IWL_DL_ISR))
+    IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", inta, iwl_read32(trans, CSR_INT_MASK));
+
+  spin_unlock(&trans_pcie->irq_lock);
+
+  /* Now service all interrupt bits discovered above. */
+  if (inta & CSR_INT_BIT_HW_ERR) {
+    IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
+
+    /* Tell the device to stop sending interrupts */
+    iwl_disable_interrupts(trans);
+
+    isr_stats->hw++;
+    iwl_pcie_irq_handle_error(trans);
+
+    handled |= CSR_INT_BIT_HW_ERR;
+
+    goto out;
+  }
+
+  if (iwl_have_debug_level(IWL_DL_ISR)) {
+    /* NIC fires this, but we don't use it, redundant with WAKEUP */
+    if (inta & CSR_INT_BIT_SCD) {
+      IWL_DEBUG_ISR(trans, "Scheduler finished to transmit the frame/frames.\n");
+      isr_stats->sch++;
     }
 
-    if (iwl_have_debug_level(IWL_DL_ISR)) {
-        IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
-                      inta, trans_pcie->inta_mask, iwl_read32(trans, CSR_INT_MASK),
-                      iwl_read32(trans, CSR_FH_INT_STATUS));
-        if (inta & (~trans_pcie->inta_mask))
-            IWL_DEBUG_ISR(trans, "We got a masked interrupt (0x%08x)\n",
-                          inta & (~trans_pcie->inta_mask));
+    /* Alive notification via Rx interrupt will do the real work */
+    if (inta & CSR_INT_BIT_ALIVE) {
+      IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+      isr_stats->alive++;
+      if (trans->cfg->gen2) {
+        /*
+         * We can restock, since firmware configured
+         * the RFH
+         */
+        iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+      }
     }
+  }
 
-    inta &= trans_pcie->inta_mask;
+  /* Safely ignore these bits for debug checks below */
+  inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+  /* HW RF KILL switch toggled */
+  if (inta & CSR_INT_BIT_RF_KILL) {
+    iwl_pcie_handle_rfkill_irq(trans);
+    handled |= CSR_INT_BIT_RF_KILL;
+  }
+
+  /* Chip got too hot and stopped itself */
+  if (inta & CSR_INT_BIT_CT_KILL) {
+    IWL_ERR(trans, "Microcode CT kill error detected.\n");
+    isr_stats->ctkill++;
+    handled |= CSR_INT_BIT_CT_KILL;
+  }
+
+  /* Error detected by uCode */
+  if (inta & CSR_INT_BIT_SW_ERR) {
+    IWL_ERR(trans,
+            "Microcode SW error detected. "
+            " Restarting 0x%X.\n",
+            inta);
+    isr_stats->sw++;
+    iwl_pcie_irq_handle_error(trans);
+    handled |= CSR_INT_BIT_SW_ERR;
+  }
+
+  /* uCode wakes up after power-down sleep */
+  if (inta & CSR_INT_BIT_WAKEUP) {
+    IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+    iwl_pcie_rxq_check_wrptr(trans);
+    iwl_pcie_txq_check_wrptrs(trans);
+
+    isr_stats->wakeup++;
+
+    handled |= CSR_INT_BIT_WAKEUP;
+  }
+
+  /* All uCode command responses, including Tx command responses,
+   * Rx "responses" (frame-received notification), and other
+   * notifications from uCode come through here*/
+  if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | CSR_INT_BIT_RX_PERIODIC)) {
+    IWL_DEBUG_ISR(trans, "Rx interrupt\n");
+    if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+      handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+      iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_RX_MASK);
+    }
+    if (inta & CSR_INT_BIT_RX_PERIODIC) {
+      handled |= CSR_INT_BIT_RX_PERIODIC;
+      iwl_write32(trans, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
+    }
+    /* Sending RX interrupt require many steps to be done in the
+     * the device:
+     * 1- write interrupt to current index in ICT table.
+     * 2- dma RX frame.
+     * 3- update RX shared data to indicate last write index.
+     * 4- send interrupt.
+     * This could lead to RX race, driver could receive RX interrupt
+     * but the shared data changes does not reflect this;
+     * periodic interrupt will detect any dangling Rx activity.
+     */
+
+    /* Disable periodic interrupt; we use it as just a one-shot. */
+    iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_DIS);
 
     /*
-     * Ignore interrupt if there's nothing in NIC to service.
-     * This may be due to IRQ shared with another device,
-     * or due to sporadic interrupts thrown from our NIC.
+     * Enable periodic interrupt in 8 msec only if we received
+     * real RX interrupt (instead of just periodic int), to catch
+     * any dangling Rx interrupt.  If it was just the periodic
+     * interrupt, there was no dangling Rx activity, and no need
+     * to extend the periodic interrupt; one-shot is enough.
      */
-    if (unlikely(!inta)) {
-        IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
-        /*
-         * Re-enable interrupts here since we don't
-         * have anything to service
-         */
-        if (test_bit(STATUS_INT_ENABLED, &trans->status)) { _iwl_enable_interrupts(trans); }
-        spin_unlock(&trans_pcie->irq_lock);
-        lock_map_release(&trans->sync_cmd_lockdep_map);
-        return IRQ_NONE;
+    if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+      iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_ENA);
     }
 
-    if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
-        /*
-         * Hardware disappeared. It might have
-         * already raised an interrupt.
-         */
-        IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
-        spin_unlock(&trans_pcie->irq_lock);
-        goto out;
-    }
+    isr_stats->rx++;
 
-    /* Ack/clear/reset pending uCode interrupts.
-     * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
-     */
-    /* There is a hardware bug in the interrupt mask function that some
-     * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
-     * they are disabled in the CSR_INT_MASK register. Furthermore the
-     * ICT interrupt handling mechanism has another bug that might cause
-     * these unmasked interrupts fail to be detected. We workaround the
-     * hardware bugs here by ACKing all the possible interrupts so that
-     * interrupt coalescing can still be achieved.
-     */
-    iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
+    local_bh_disable();
+    iwl_pcie_rx_handle(trans, 0);
+    local_bh_enable();
+  }
 
-    if (iwl_have_debug_level(IWL_DL_ISR))
-        IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", inta,
-                      iwl_read32(trans, CSR_INT_MASK));
+  /* This "Tx" DMA channel is used only for loading uCode */
+  if (inta & CSR_INT_BIT_FH_TX) {
+    iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
+    IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+    isr_stats->tx++;
+    handled |= CSR_INT_BIT_FH_TX;
+    /* Wake up uCode load routine, now that load is complete */
+    trans_pcie->ucode_write_complete = true;
+    wake_up(&trans_pcie->ucode_write_waitq);
+  }
 
-    spin_unlock(&trans_pcie->irq_lock);
+  if (inta & ~handled) {
+    IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+    isr_stats->unhandled++;
+  }
 
-    /* Now service all interrupt bits discovered above. */
-    if (inta & CSR_INT_BIT_HW_ERR) {
-        IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
+  if (inta & ~(trans_pcie->inta_mask)) {
+    IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", inta & ~trans_pcie->inta_mask);
+  }
 
-        /* Tell the device to stop sending interrupts */
-        iwl_disable_interrupts(trans);
-
-        isr_stats->hw++;
-        iwl_pcie_irq_handle_error(trans);
-
-        handled |= CSR_INT_BIT_HW_ERR;
-
-        goto out;
-    }
-
-    if (iwl_have_debug_level(IWL_DL_ISR)) {
-        /* NIC fires this, but we don't use it, redundant with WAKEUP */
-        if (inta & CSR_INT_BIT_SCD) {
-            IWL_DEBUG_ISR(trans, "Scheduler finished to transmit the frame/frames.\n");
-            isr_stats->sch++;
-        }
-
-        /* Alive notification via Rx interrupt will do the real work */
-        if (inta & CSR_INT_BIT_ALIVE) {
-            IWL_DEBUG_ISR(trans, "Alive interrupt\n");
-            isr_stats->alive++;
-            if (trans->cfg->gen2) {
-                /*
-                 * We can restock, since firmware configured
-                 * the RFH
-                 */
-                iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
-            }
-        }
-    }
-
-    /* Safely ignore these bits for debug checks below */
-    inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
-    /* HW RF KILL switch toggled */
-    if (inta & CSR_INT_BIT_RF_KILL) {
-        iwl_pcie_handle_rfkill_irq(trans);
-        handled |= CSR_INT_BIT_RF_KILL;
-    }
-
-    /* Chip got too hot and stopped itself */
-    if (inta & CSR_INT_BIT_CT_KILL) {
-        IWL_ERR(trans, "Microcode CT kill error detected.\n");
-        isr_stats->ctkill++;
-        handled |= CSR_INT_BIT_CT_KILL;
-    }
-
-    /* Error detected by uCode */
-    if (inta & CSR_INT_BIT_SW_ERR) {
-        IWL_ERR(trans,
-                "Microcode SW error detected. "
-                " Restarting 0x%X.\n",
-                inta);
-        isr_stats->sw++;
-        iwl_pcie_irq_handle_error(trans);
-        handled |= CSR_INT_BIT_SW_ERR;
-    }
-
-    /* uCode wakes up after power-down sleep */
-    if (inta & CSR_INT_BIT_WAKEUP) {
-        IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
-        iwl_pcie_rxq_check_wrptr(trans);
-        iwl_pcie_txq_check_wrptrs(trans);
-
-        isr_stats->wakeup++;
-
-        handled |= CSR_INT_BIT_WAKEUP;
-    }
-
-    /* All uCode command responses, including Tx command responses,
-     * Rx "responses" (frame-received notification), and other
-     * notifications from uCode come through here*/
-    if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | CSR_INT_BIT_RX_PERIODIC)) {
-        IWL_DEBUG_ISR(trans, "Rx interrupt\n");
-        if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
-            handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
-            iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_RX_MASK);
-        }
-        if (inta & CSR_INT_BIT_RX_PERIODIC) {
-            handled |= CSR_INT_BIT_RX_PERIODIC;
-            iwl_write32(trans, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
-        }
-        /* Sending RX interrupt require many steps to be done in the
-         * the device:
-         * 1- write interrupt to current index in ICT table.
-         * 2- dma RX frame.
-         * 3- update RX shared data to indicate last write index.
-         * 4- send interrupt.
-         * This could lead to RX race, driver could receive RX interrupt
-         * but the shared data changes does not reflect this;
-         * periodic interrupt will detect any dangling Rx activity.
-         */
-
-        /* Disable periodic interrupt; we use it as just a one-shot. */
-        iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_DIS);
-
-        /*
-         * Enable periodic interrupt in 8 msec only if we received
-         * real RX interrupt (instead of just periodic int), to catch
-         * any dangling Rx interrupt.  If it was just the periodic
-         * interrupt, there was no dangling Rx activity, and no need
-         * to extend the periodic interrupt; one-shot is enough.
-         */
-        if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
-            iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_ENA);
-        }
-
-        isr_stats->rx++;
-
-        local_bh_disable();
-        iwl_pcie_rx_handle(trans, 0);
-        local_bh_enable();
-    }
-
-    /* This "Tx" DMA channel is used only for loading uCode */
-    if (inta & CSR_INT_BIT_FH_TX) {
-        iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
-        IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
-        isr_stats->tx++;
-        handled |= CSR_INT_BIT_FH_TX;
-        /* Wake up uCode load routine, now that load is complete */
-        trans_pcie->ucode_write_complete = true;
-        wake_up(&trans_pcie->ucode_write_waitq);
-    }
-
-    if (inta & ~handled) {
-        IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
-        isr_stats->unhandled++;
-    }
-
-    if (inta & ~(trans_pcie->inta_mask)) {
-        IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", inta & ~trans_pcie->inta_mask);
-    }
-
-    spin_lock(&trans_pcie->irq_lock);
-    /* only Re-enable all interrupt if disabled by irq */
-    if (test_bit(STATUS_INT_ENABLED, &trans->status)) {
-        _iwl_enable_interrupts(trans);
-    }
-    /* we are loading the firmware, enable FH_TX interrupt only */
-    else if (handled & CSR_INT_BIT_FH_TX) {
-        iwl_enable_fw_load_int(trans);
-    }
-    /* Re-enable RF_KILL if it occurred */
-    else if (handled & CSR_INT_BIT_RF_KILL) {
-        iwl_enable_rfkill_int(trans);
-    }
-    spin_unlock(&trans_pcie->irq_lock);
+  spin_lock(&trans_pcie->irq_lock);
+  /* only Re-enable all interrupt if disabled by irq */
+  if (test_bit(STATUS_INT_ENABLED, &trans->status)) {
+    _iwl_enable_interrupts(trans);
+  }
+  /* we are loading the firmware, enable FH_TX interrupt only */
+  else if (handled & CSR_INT_BIT_FH_TX) {
+    iwl_enable_fw_load_int(trans);
+  }
+  /* Re-enable RF_KILL if it occurred */
+  else if (handled & CSR_INT_BIT_RF_KILL) {
+    iwl_enable_rfkill_int(trans);
+  }
+  spin_unlock(&trans_pcie->irq_lock);
 
 out:
-    lock_map_release(&trans->sync_cmd_lockdep_map);
-    return IRQ_HANDLED;
+  lock_map_release(&trans->sync_cmd_lockdep_map);
+  return IRQ_HANDLED;
 }
 
 /******************************************************************************
@@ -1739,13 +1823,13 @@
 
 /* Free dram table */
 void iwl_pcie_free_ict(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    if (trans_pcie->ict_tbl) {
-        dma_free_coherent(trans->dev, ICT_SIZE, trans_pcie->ict_tbl, trans_pcie->ict_tbl_dma);
-        trans_pcie->ict_tbl = NULL;
-        trans_pcie->ict_tbl_dma = 0;
-    }
+  if (trans_pcie->ict_tbl) {
+    dma_free_coherent(trans->dev, ICT_SIZE, trans_pcie->ict_tbl, trans_pcie->ict_tbl_dma);
+    trans_pcie->ict_tbl = NULL;
+    trans_pcie->ict_tbl_dma = 0;
+  }
 }
 
 /*
@@ -1754,194 +1838,199 @@
  * also reset all data related to ICT table interrupt.
  */
 int iwl_pcie_alloc_ict(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    trans_pcie->ict_tbl =
-        dma_zalloc_coherent(trans->dev, ICT_SIZE, &trans_pcie->ict_tbl_dma, GFP_KERNEL);
-    if (!trans_pcie->ict_tbl) { return -ENOMEM; }
+  trans_pcie->ict_tbl =
+      dma_zalloc_coherent(trans->dev, ICT_SIZE, &trans_pcie->ict_tbl_dma, GFP_KERNEL);
+  if (!trans_pcie->ict_tbl) {
+    return -ENOMEM;
+  }
 
-    /* just an API sanity check ... it is guaranteed to be aligned */
-    if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
-        iwl_pcie_free_ict(trans);
-        return -EINVAL;
-    }
+  /* just an API sanity check ... it is guaranteed to be aligned */
+  if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
+    iwl_pcie_free_ict(trans);
+    return -EINVAL;
+  }
 
-    return 0;
+  return 0;
 }
 
 /* Device is going up inform it about using ICT interrupt table,
  * also we need to tell the driver to start using ICT interrupt.
  */
 void iwl_pcie_reset_ict(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    uint32_t val;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  uint32_t val;
 
-    if (!trans_pcie->ict_tbl) { return; }
+  if (!trans_pcie->ict_tbl) {
+    return;
+  }
 
-    spin_lock(&trans_pcie->irq_lock);
-    _iwl_disable_interrupts(trans);
+  spin_lock(&trans_pcie->irq_lock);
+  _iwl_disable_interrupts(trans);
 
-    memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
+  memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
 
-    val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
+  val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
 
-    val |= CSR_DRAM_INT_TBL_ENABLE | CSR_DRAM_INIT_TBL_WRAP_CHECK | CSR_DRAM_INIT_TBL_WRITE_POINTER;
+  val |= CSR_DRAM_INT_TBL_ENABLE | CSR_DRAM_INIT_TBL_WRAP_CHECK | CSR_DRAM_INIT_TBL_WRITE_POINTER;
 
-    IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
+  IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
 
-    iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
-    trans_pcie->use_ict = true;
-    trans_pcie->ict_index = 0;
-    iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
-    _iwl_enable_interrupts(trans);
-    spin_unlock(&trans_pcie->irq_lock);
+  iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
+  trans_pcie->use_ict = true;
+  trans_pcie->ict_index = 0;
+  iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
+  _iwl_enable_interrupts(trans);
+  spin_unlock(&trans_pcie->irq_lock);
 }
 
 /* Device is going down disable ict interrupt usage */
 void iwl_pcie_disable_ict(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    spin_lock(&trans_pcie->irq_lock);
-    trans_pcie->use_ict = false;
-    spin_unlock(&trans_pcie->irq_lock);
+  spin_lock(&trans_pcie->irq_lock);
+  trans_pcie->use_ict = false;
+  spin_unlock(&trans_pcie->irq_lock);
 }
 
 irqreturn_t iwl_pcie_isr(int irq, void* data) {
-    struct iwl_trans* trans = data;
+  struct iwl_trans* trans = data;
 
-    if (!trans) { return IRQ_NONE; }
+  if (!trans) {
+    return IRQ_NONE;
+  }
 
-    /* Disable (but don't clear!) interrupts here to avoid
-     * back-to-back ISRs and sporadic interrupts from our NIC.
-     * If we have something to service, the tasklet will re-enable ints.
-     * If we *don't* have something, we'll re-enable before leaving here.
-     */
-    iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+  /* Disable (but don't clear!) interrupts here to avoid
+   * back-to-back ISRs and sporadic interrupts from our NIC.
+   * If we have something to service, the tasklet will re-enable ints.
+   * If we *don't* have something, we'll re-enable before leaving here.
+   */
+  iwl_write32(trans, CSR_INT_MASK, 0x00000000);
 
-    return IRQ_WAKE_THREAD;
+  return IRQ_WAKE_THREAD;
 }
 
-irqreturn_t iwl_pcie_msix_isr(int irq, void* data) {
-    return IRQ_WAKE_THREAD;
-}
+irqreturn_t iwl_pcie_msix_isr(int irq, void* data) { return IRQ_WAKE_THREAD; }
 
 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void* dev_id) {
-    struct msix_entry* entry = dev_id;
-    struct iwl_trans_pcie* trans_pcie = iwl_pcie_get_trans_pcie(entry);
-    struct iwl_trans* trans = trans_pcie->trans;
-    struct isr_statistics* isr_stats = &trans_pcie->isr_stats;
-    uint32_t inta_fh, inta_hw;
+  struct msix_entry* entry = dev_id;
+  struct iwl_trans_pcie* trans_pcie = iwl_pcie_get_trans_pcie(entry);
+  struct iwl_trans* trans = trans_pcie->trans;
+  struct isr_statistics* isr_stats = &trans_pcie->isr_stats;
+  uint32_t inta_fh, inta_hw;
 
-    lock_map_acquire(&trans->sync_cmd_lockdep_map);
+  lock_map_acquire(&trans->sync_cmd_lockdep_map);
 
-    spin_lock(&trans_pcie->irq_lock);
-    inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
-    inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
-    /*
-     * Clear causes registers to avoid being handling the same cause.
-     */
-    iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
-    iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
-    spin_unlock(&trans_pcie->irq_lock);
+  spin_lock(&trans_pcie->irq_lock);
+  inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
+  inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+  /*
+   * Clear causes registers to avoid being handling the same cause.
+   */
+  iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
+  iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
+  spin_unlock(&trans_pcie->irq_lock);
 
-    trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
+  trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
 
-    if (unlikely(!(inta_fh | inta_hw))) {
-        IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
-        lock_map_release(&trans->sync_cmd_lockdep_map);
-        return IRQ_NONE;
-    }
-
-    if (iwl_have_debug_level(IWL_DL_ISR))
-        IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n", inta_fh,
-                      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
-
-    if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && inta_fh & MSIX_FH_INT_CAUSES_Q0) {
-        local_bh_disable();
-        iwl_pcie_rx_handle(trans, 0);
-        local_bh_enable();
-    }
-
-    if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
-        inta_fh & MSIX_FH_INT_CAUSES_Q1) {
-        local_bh_disable();
-        iwl_pcie_rx_handle(trans, 1);
-        local_bh_enable();
-    }
-
-    /* This "Tx" DMA channel is used only for loading uCode */
-    if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
-        IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
-        isr_stats->tx++;
-        /*
-         * Wake up uCode load routine,
-         * now that load is complete
-         */
-        trans_pcie->ucode_write_complete = true;
-        wake_up(&trans_pcie->ucode_write_waitq);
-    }
-
-    /* Error detected by uCode */
-    if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
-        (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
-        IWL_ERR(trans, "Microcode SW error detected. Restarting 0x%X.\n", inta_fh);
-        isr_stats->sw++;
-        iwl_pcie_irq_handle_error(trans);
-    }
-
-    /* After checking FH register check HW register */
-    if (iwl_have_debug_level(IWL_DL_ISR))
-        IWL_DEBUG_ISR(trans, "ISR inta_hw 0x%08x, enabled 0x%08x\n", inta_hw,
-                      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
-
-    /* Alive notification via Rx interrupt will do the real work */
-    if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
-        IWL_DEBUG_ISR(trans, "Alive interrupt\n");
-        isr_stats->alive++;
-        if (trans->cfg->gen2) {
-            /* We can restock, since firmware configured the RFH */
-            iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
-        }
-    }
-
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
-        inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
-        /* Reflect IML transfer status */
-        int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
-
-        IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
-        if (res == IWL_IMAGE_RESP_FAIL) {
-            isr_stats->sw++;
-            iwl_pcie_irq_handle_error(trans);
-        }
-    } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
-        /* uCode wakes up after power-down sleep */
-        IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
-        iwl_pcie_rxq_check_wrptr(trans);
-        iwl_pcie_txq_check_wrptrs(trans);
-
-        isr_stats->wakeup++;
-    }
-
-    /* Chip got too hot and stopped itself */
-    if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
-        IWL_ERR(trans, "Microcode CT kill error detected.\n");
-        isr_stats->ctkill++;
-    }
-
-    /* HW RF KILL switch toggled */
-    if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) { iwl_pcie_handle_rfkill_irq(trans); }
-
-    if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
-        IWL_ERR(trans, "Hardware error detected. Restarting.\n");
-
-        isr_stats->hw++;
-        iwl_pcie_irq_handle_error(trans);
-    }
-
-    iwl_pcie_clear_irq(trans, entry);
-
+  if (unlikely(!(inta_fh | inta_hw))) {
+    IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
     lock_map_release(&trans->sync_cmd_lockdep_map);
+    return IRQ_NONE;
+  }
 
-    return IRQ_HANDLED;
+  if (iwl_have_debug_level(IWL_DL_ISR))
+    IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n", inta_fh,
+                  iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+
+  if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && inta_fh & MSIX_FH_INT_CAUSES_Q0) {
+    local_bh_disable();
+    iwl_pcie_rx_handle(trans, 0);
+    local_bh_enable();
+  }
+
+  if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && inta_fh & MSIX_FH_INT_CAUSES_Q1) {
+    local_bh_disable();
+    iwl_pcie_rx_handle(trans, 1);
+    local_bh_enable();
+  }
+
+  /* This "Tx" DMA channel is used only for loading uCode */
+  if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+    IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+    isr_stats->tx++;
+    /*
+     * Wake up uCode load routine,
+     * now that load is complete
+     */
+    trans_pcie->ucode_write_complete = true;
+    wake_up(&trans_pcie->ucode_write_waitq);
+  }
+
+  /* Error detected by uCode */
+  if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
+      (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
+    IWL_ERR(trans, "Microcode SW error detected. Restarting 0x%X.\n", inta_fh);
+    isr_stats->sw++;
+    iwl_pcie_irq_handle_error(trans);
+  }
+
+  /* After checking FH register check HW register */
+  if (iwl_have_debug_level(IWL_DL_ISR))
+    IWL_DEBUG_ISR(trans, "ISR inta_hw 0x%08x, enabled 0x%08x\n", inta_hw,
+                  iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
+
+  /* Alive notification via Rx interrupt will do the real work */
+  if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
+    IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+    isr_stats->alive++;
+    if (trans->cfg->gen2) {
+      /* We can restock, since firmware configured the RFH */
+      iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+    }
+  }
+
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
+      inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
+    /* Reflect IML transfer status */
+    int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
+
+    IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
+    if (res == IWL_IMAGE_RESP_FAIL) {
+      isr_stats->sw++;
+      iwl_pcie_irq_handle_error(trans);
+    }
+  } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+    /* uCode wakes up after power-down sleep */
+    IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+    iwl_pcie_rxq_check_wrptr(trans);
+    iwl_pcie_txq_check_wrptrs(trans);
+
+    isr_stats->wakeup++;
+  }
+
+  /* Chip got too hot and stopped itself */
+  if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
+    IWL_ERR(trans, "Microcode CT kill error detected.\n");
+    isr_stats->ctkill++;
+  }
+
+  /* HW RF KILL switch toggled */
+  if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
+    iwl_pcie_handle_rfkill_irq(trans);
+  }
+
+  if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
+    IWL_ERR(trans, "Hardware error detected. Restarting.\n");
+
+    isr_stats->hw++;
+    iwl_pcie_irq_handle_error(trans);
+  }
+
+  iwl_pcie_clear_irq(trans, entry);
+
+  lock_map_release(&trans->sync_cmd_lockdep_map);
+
+  return IRQ_HANDLED;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/trans-gen2.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/trans-gen2.c
index 12bc482..8702ee5 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/trans-gen2.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/trans-gen2.c
@@ -44,7 +44,7 @@
  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
  * NOTE:  This does not load uCode nor start the embedded processor
  */
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 int iwl_pcie_gen2_apm_init(struct iwl_trans* trans) {
     int ret = 0;
 
@@ -202,10 +202,10 @@
     /* re-take ownership to prevent other users from stealing the device */
     iwl_pcie_prepare_card_hw(trans);
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans* trans, bool low_power) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     bool was_in_rfkill;
 
@@ -215,11 +215,11 @@
     _iwl_trans_pcie_gen2_stop_device(trans, low_power);
     iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
     mutex_unlock(&trans_pcie->mutex);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 static int iwl_pcie_gen2_nic_init(struct iwl_trans* trans) {
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
@@ -242,10 +242,10 @@
 
     return 0;
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans* trans, uint32_t scd_addr) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
     iwl_pcie_reset_ict(trans);
@@ -258,13 +258,13 @@
      * paging memory cannot be freed included since FW will still use it
      */
     iwl_pcie_ctxt_info_free(trans);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans* trans, const struct fw_img* fw,
                                  bool run_in_rfkill) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     bool hw_rfkill;
     int ret;
@@ -333,7 +333,7 @@
 out:
     mutex_unlock(&trans_pcie->mutex);
     return ret;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/trans.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/trans.c
index 7e0d05c..9ca1022 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/trans.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/trans.c
@@ -39,7 +39,6 @@
 
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/dbg.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/error-dump.h"
-#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/internal.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-agn-hw.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-constants.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h"
@@ -48,6 +47,7 @@
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-prph.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-scd.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-trans.h"
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/internal.h"
 #ifdef CPTCFG_IWLWIFI_DEVICE_TESTMODE
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-dnt-cfg.h"
 #endif
@@ -132,17 +132,17 @@
 out:
     trans_pcie->pcie_dbg_dumped_once = 1;
     kfree(buf);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 static void iwl_trans_pcie_sw_reset(struct iwl_trans* trans) {
-    /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
-    iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset, BIT(trans->cfg->csr->flag_sw_reset));
-    zx_nanosleep(zx_deadline_after(ZX_MSEC(6)));
+  /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+  iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset, BIT(trans->cfg->csr->flag_sw_reset));
+  zx_nanosleep(zx_deadline_after(ZX_MSEC(6)));
 }
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 static void iwl_pcie_free_fw_monitor(struct iwl_trans* trans) {
     int i;
 
@@ -155,11 +155,11 @@
         trans->num_blocks--;
     }
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans* trans, uint8_t max_power,
                                             uint8_t min_power) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     void* cpu_addr = NULL;
     dma_addr_t phys = 0;
     uint32_t size = 0;
@@ -185,34 +185,36 @@
     trans->fw_mon[trans->num_blocks].physical = phys;
     trans->fw_mon[trans->num_blocks].size = size;
     trans->num_blocks++;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 void iwl_pcie_alloc_fw_monitor(struct iwl_trans* trans, uint8_t max_power) {
-    if (!max_power) {
-        /* default max_power is maximum */
-        max_power = 26;
-    } else {
-        max_power += 11;
-    }
+  if (!max_power) {
+    /* default max_power is maximum */
+    max_power = 26;
+  } else {
+    max_power += 11;
+  }
 
-    if (max_power > 26) {
-        IWL_WARN(iwl_trans, "External buffer size for monitor is too big %d, check the FW TLV\n",
-                 max_power);
-        return;
-    }
+  if (max_power > 26) {
+    IWL_WARN(iwl_trans, "External buffer size for monitor is too big %d, check the FW TLV\n",
+             max_power);
+    return;
+  }
 
-    /*
-     * This function allocats the default fw monitor.
-     * The optional additional ones will be allocated in runtime
-     */
-    if (trans->num_blocks) { return; }
+  /*
+   * This function allocats the default fw monitor.
+   * The optional additional ones will be allocated in runtime
+   */
+  if (trans->num_blocks) {
+    return;
+  }
 
-    iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
+  iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
 }
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 static uint32_t iwl_trans_pcie_read_shr(struct iwl_trans* trans, uint32_t reg) {
     iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, ((reg & 0x0000ffff) | (2 << 28)));
     return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
@@ -233,13 +235,13 @@
         iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
                                ~APMG_PS_CTRL_MSK_PWR_SRC);
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT 0x041
 
 void iwl_pcie_apm_config(struct iwl_trans* trans) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     uint16_t lctl;
     uint16_t cap;
@@ -265,8 +267,8 @@
     IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
                     (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
                     trans->ltr_enabled ? "En" : "Dis");
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 /*
@@ -513,7 +515,7 @@
      */
     iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_init_done));
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 #if 0   // NEEDS_PORTING
 static int iwl_pcie_nic_init(struct iwl_trans* trans) {
@@ -545,58 +547,64 @@
 
     return 0;
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 #define HW_READY_TIMEOUT (50)
 
 /* Note: returns poll_bit return value, which is >= 0 if success */
 static int iwl_pcie_set_hw_ready(struct iwl_trans* trans) {
-    int ret;
+  int ret;
 
-    iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+  iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
 
-    /* See if we got it */
-    ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
-                       CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
+  /* See if we got it */
+  ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+                     CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
 
-    if (ret >= 0) { iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); }
+  if (ret >= 0) {
+    iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
+  }
 
-    IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
-    return ret;
+  IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
+  return ret;
 }
 
 /* Note: returns standard 0/-ERROR code */
 int iwl_pcie_prepare_card_hw(struct iwl_trans* trans) {
-    int ret;
-    int t = 0;
-    int iter;
+  int ret;
+  int t = 0;
+  int iter;
 
-    IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+  IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
 
-    ret = iwl_pcie_set_hw_ready(trans);
-    /* If the card is ready, exit 0 */
-    if (ret >= 0) { return 0; }
+  ret = iwl_pcie_set_hw_ready(trans);
+  /* If the card is ready, exit 0 */
+  if (ret >= 0) {
+    return 0;
+  }
 
-    iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED);
-    zx_nanosleep(zx_deadline_after(ZX_MSEC(2)));
+  iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED);
+  zx_nanosleep(zx_deadline_after(ZX_MSEC(2)));
 
-    for (iter = 0; iter < 10; iter++) {
-        /* If HW is not ready, prepare the conditions to check again */
-        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
+  for (iter = 0; iter < 10; iter++) {
+    /* If HW is not ready, prepare the conditions to check again */
+    iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
 
-        do {
-            ret = iwl_pcie_set_hw_ready(trans);
-            if (ret >= 0) { return 0; }
+    do {
+      ret = iwl_pcie_set_hw_ready(trans);
+      if (ret >= 0) {
+        return 0;
+      }
 
-            zx_nanosleep(zx_deadline_after(ZX_MSEC(1)));
-            t += 200;
-        } while (t < 150000);
-        zx_nanosleep(zx_deadline_after(ZX_MSEC(25)));
-    }
+      zx_nanosleep(zx_deadline_after(ZX_MSEC(1)));
+      t += 200;
+    } while (t < 150000);
+    zx_nanosleep(zx_deadline_after(ZX_MSEC(25)));
+  }
 
-    IWL_ERR(trans, "Couldn't prepare the card\n");
+  IWL_ERR(trans, "Couldn't prepare the card\n");
 
-    return ret;
+  return ret;
 }
 
 #if 0  // NEEDS_PORTING
@@ -1215,11 +1223,11 @@
         synchronize_irq(trans_pcie->pci_dev->irq);
     }
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 static int iwl_trans_pcie_start_fw(struct iwl_trans* trans, const struct fw_img* fw,
                                    bool run_in_rfkill) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     bool hw_rfkill;
     int ret;
@@ -1301,21 +1309,21 @@
 out:
     mutex_unlock(&trans_pcie->mutex);
     return ret;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
 static void iwl_trans_pcie_fw_alive(struct iwl_trans* trans, uint32_t scd_addr) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     iwl_pcie_reset_ict(trans);
     iwl_pcie_tx_start(trans, scd_addr);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans* trans, bool was_in_rfkill) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     bool hw_rfkill;
 
     /*
@@ -1339,12 +1347,12 @@
         clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
     }
     if (hw_rfkill != was_in_rfkill) { iwl_trans_pcie_rf_kill(trans, hw_rfkill); }
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 static void iwl_trans_pcie_stop_device(struct iwl_trans* trans, bool low_power) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     bool was_in_rfkill;
 
@@ -1354,12 +1362,12 @@
     _iwl_trans_pcie_stop_device(trans, low_power);
     iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
     mutex_unlock(&trans_pcie->mutex);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 void iwl_trans_pcie_rf_kill(struct iwl_trans* trans, bool state) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie __maybe_unused* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
     lockdep_assert_held(&trans_pcie->mutex);
@@ -1372,12 +1380,12 @@
             _iwl_trans_pcie_stop_device(trans, true);
         }
     }
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 static void iwl_trans_pcie_d3_suspend(struct iwl_trans* trans, bool test, bool reset) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     if (!reset) {
         /* Enable persistence mode to avoid reset */
         iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
@@ -1408,13 +1416,13 @@
     }
 
     iwl_pcie_set_pwr(trans, true);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 static int iwl_trans_pcie_d3_resume(struct iwl_trans* trans, enum iwl_d3_status* status, bool test,
                                     bool reset) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     uint32_t val;
     int ret;
@@ -1474,12 +1482,12 @@
     }
 
     return 0;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 static void iwl_pcie_set_interrupt_capa(struct pci_dev* pdev, struct iwl_trans* trans) {
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     int max_irqs, num_irqs, i, ret;
@@ -1583,20 +1591,20 @@
 
     return 0;
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 static int _iwl_trans_pcie_start_hw(struct iwl_trans* trans, bool low_power) {
-    //struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    //uint32_t hpm;
-    int err;
+  // struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  // uint32_t hpm;
+  int err;
 
-    err = iwl_pcie_prepare_card_hw(trans);
-    if (err) {
-        IWL_ERR(trans, "Error while preparing HW: %d\n", err);
-        return err;
-    }
+  err = iwl_pcie_prepare_card_hw(trans);
+  if (err) {
+    IWL_ERR(trans, "Error while preparing HW: %d\n", err);
+    return err;
+  }
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 
     hpm = iwl_trans_read_prph(trans, HPM_DEBUG);
     if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
@@ -1629,23 +1637,23 @@
     if (low_power) { pm_runtime_resume(trans->dev); }
 
     return 0;
-#endif // NEEDS_PORTING
-    return -1;
+#endif  // NEEDS_PORTING
+  return -1;
 }
 
 static int iwl_trans_pcie_start_hw(struct iwl_trans* trans, bool low_power) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    int ret;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  int ret;
 
-    mtx_lock(&trans_pcie->mutex);
-    ret = _iwl_trans_pcie_start_hw(trans, low_power);
-    mtx_unlock(&trans_pcie->mutex);
+  mtx_lock(&trans_pcie->mutex);
+  ret = _iwl_trans_pcie_start_hw(trans, low_power);
+  mtx_unlock(&trans_pcie->mutex);
 
-    return ret;
+  return ret;
 }
 
 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans* trans) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
     mutex_lock(&trans_pcie->mutex);
@@ -1662,79 +1670,79 @@
     mutex_unlock(&trans_pcie->mutex);
 
     iwl_pcie_synchronize_irqs(trans);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 static void iwl_trans_pcie_write8(struct iwl_trans* trans, uint32_t ofs, uint8_t val) {
-    uintptr_t addr = (uintptr_t)(IWL_TRANS_GET_PCIE_TRANS(trans)->mmio.vaddr) + ofs;
-    *(volatile uint8_t*)addr = val;
+  uintptr_t addr = (uintptr_t)(IWL_TRANS_GET_PCIE_TRANS(trans)->mmio.vaddr) + ofs;
+  *(volatile uint8_t*)addr = val;
 }
 
 static void iwl_trans_pcie_write32(struct iwl_trans* trans, uint32_t ofs, uint32_t val) {
-    uintptr_t addr = (uintptr_t)(IWL_TRANS_GET_PCIE_TRANS(trans)->mmio.vaddr) + ofs;
-    *(volatile uint32_t*)addr = val;
+  uintptr_t addr = (uintptr_t)(IWL_TRANS_GET_PCIE_TRANS(trans)->mmio.vaddr) + ofs;
+  *(volatile uint32_t*)addr = val;
 }
 
 static uint32_t iwl_trans_pcie_read32(struct iwl_trans* trans, uint32_t ofs) {
-    uintptr_t addr = (uintptr_t)(IWL_TRANS_GET_PCIE_TRANS(trans)->mmio.vaddr) + ofs;
-    return *(volatile uint32_t*)addr;
+  uintptr_t addr = (uintptr_t)(IWL_TRANS_GET_PCIE_TRANS(trans)->mmio.vaddr) + ofs;
+  return *(volatile uint32_t*)addr;
 }
 
 static uint32_t iwl_trans_pcie_prph_msk(struct iwl_trans* trans) {
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-        return 0x00FFFFFF;
-    } else {
-        return 0x000FFFFF;
-    }
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+    return 0x00FFFFFF;
+  } else {
+    return 0x000FFFFF;
+  }
 }
 
 static uint32_t iwl_trans_pcie_read_prph(struct iwl_trans* trans, uint32_t reg) {
-    uint32_t mask = iwl_trans_pcie_prph_msk(trans);
+  uint32_t mask = iwl_trans_pcie_prph_msk(trans);
 
-    iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, ((reg & mask) | (3 << 24)));
-    return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
+  iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, ((reg & mask) | (3 << 24)));
+  return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
 }
 
 static void iwl_trans_pcie_write_prph(struct iwl_trans* trans, uint32_t addr, uint32_t val) {
-    uint32_t mask = iwl_trans_pcie_prph_msk(trans);
+  uint32_t mask = iwl_trans_pcie_prph_msk(trans);
 
-    iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
-    iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+  iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
+  iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
 }
 
 static void iwl_trans_pcie_configure(struct iwl_trans* trans,
                                      const struct iwl_trans_config* trans_cfg) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    trans_pcie->cmd_queue = trans_cfg->cmd_queue;
-    trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
-    trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
-    if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) {
-        trans_pcie->n_no_reclaim_cmds = 0;
-    } else {
-        trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
-    }
-    if (trans_pcie->n_no_reclaim_cmds)
-        memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
-               trans_pcie->n_no_reclaim_cmds * sizeof(uint8_t));
+  trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+  trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
+  trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+  if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) {
+    trans_pcie->n_no_reclaim_cmds = 0;
+  } else {
+    trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
+  }
+  if (trans_pcie->n_no_reclaim_cmds)
+    memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
+           trans_pcie->n_no_reclaim_cmds * sizeof(uint8_t));
 
-    trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
-#if 0  // NEEDS_PORTING
+  trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
+#if 0   // NEEDS_PORTING
     trans_pcie->rx_page_order = iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
-    trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
-    trans_pcie->scd_set_active = trans_cfg->scd_set_active;
-    trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
+  trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+  trans_pcie->scd_set_active = trans_cfg->scd_set_active;
+  trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
 
-    trans_pcie->page_offs = trans_cfg->cb_data_offs;
-    trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void*);
+  trans_pcie->page_offs = trans_cfg->cb_data_offs;
+  trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void*);
 
-    trans->command_groups = trans_cfg->command_groups;
-    trans->command_groups_size = trans_cfg->command_groups_size;
+  trans->command_groups = trans_cfg->command_groups;
+  trans->command_groups_size = trans_cfg->command_groups_size;
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     /* Initialize NAPI here - it should be before registering to mac80211
      * in the opmode but after the HW struct is allocated.
      * As this function may be called again in some corner cases don't
@@ -1743,11 +1751,11 @@
     if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) {
         init_dummy_netdev(&trans_pcie->napi_dev);
     }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 }
 
 void iwl_trans_pcie_free(struct iwl_trans* trans) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     int i;
 
@@ -1785,21 +1793,21 @@
 
     free_percpu(trans_pcie->tso_hdr_page);
     mutex_destroy(&trans_pcie->mutex);
-#endif // NEEDS_PORTING
-    iwl_trans_free(trans);
+#endif  // NEEDS_PORTING
+  iwl_trans_free(trans);
 }
 
 static void iwl_trans_pcie_set_pmi(struct iwl_trans* trans, bool state) {
-    if (state) {
-        set_bit(STATUS_TPOWER_PMI, &trans->status);
-    } else {
-        clear_bit(STATUS_TPOWER_PMI, &trans->status);
-    }
+  if (state) {
+    set_bit(STATUS_TPOWER_PMI, &trans->status);
+  } else {
+    clear_bit(STATUS_TPOWER_PMI, &trans->status);
+  }
 }
 
 struct iwl_trans_pcie_removal {
-    struct pci_dev* pdev;
-    struct work_struct work;
+  struct pci_dev* pdev;
+  struct work_struct work;
 };
 
 #if 0  // NEEDS_PORTING
@@ -1818,61 +1826,65 @@
     pci_dev_put(pdev);
     pci_stop_and_remove_bus_device(pdev);
     pci_unlock_rescan_remove();
-#endif /* LINUX_VERSION_IS_LESS(3,14,0) */
+#endif  /* LINUX_VERSION_IS_LESS(3,14,0) */
 
     kfree(removal);
     module_put(THIS_MODULE);
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans* trans, unsigned long* flags) {
-    int ret;
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  int ret;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    mtx_lock(&trans_pcie->reg_lock);
+  mtx_lock(&trans_pcie->reg_lock);
 
-    if (trans_pcie->cmd_hold_nic_awake) { goto out; }
+  if (trans_pcie->cmd_hold_nic_awake) {
+    goto out;
+  }
 
-    /* this bit wakes up the NIC */
-    __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req));
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
-        zx_nanosleep(zx_deadline_after(ZX_USEC(2)));
-    }
+  /* this bit wakes up the NIC */
+  __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req));
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
+    zx_nanosleep(zx_deadline_after(ZX_USEC(2)));
+  }
 
-    /*
-     * These bits say the device is running, and should keep running for
-     * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
-     * but they do not indicate that embedded SRAM is restored yet;
-     * HW with volatile SRAM must save/restore contents to/from
-     * host DRAM when sleeping/waking for power-saving.
-     * Each direction takes approximately 1/4 millisecond; with this
-     * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
-     * series of register accesses are expected (e.g. reading Event Log),
-     * to keep device from sleeping.
-     *
-     * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
-     * SRAM is okay/restored.  We don't check that here because this call
-     * is just for hardware register access; but GP1 MAC_SLEEP
-     * check is a good idea before accessing the SRAM of HW with
-     * volatile SRAM (e.g. reading Event Log).
-     *
-     * 5000 series and later (including 1000 series) have non-volatile SRAM,
-     * and do not save/restore SRAM when power cycling.
-     */
-    ret = iwl_poll_bit(
-        trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_val_mac_access_en),
-        (BIT(trans->cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
-    if (unlikely(ret < 0)) {
-        uint32_t cntrl = iwl_read32(trans, CSR_GP_CNTRL);
+  /*
+   * These bits say the device is running, and should keep running for
+   * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+   * but they do not indicate that embedded SRAM is restored yet;
+   * HW with volatile SRAM must save/restore contents to/from
+   * host DRAM when sleeping/waking for power-saving.
+   * Each direction takes approximately 1/4 millisecond; with this
+   * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+   * series of register accesses are expected (e.g. reading Event Log),
+   * to keep device from sleeping.
+   *
+   * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+   * SRAM is okay/restored.  We don't check that here because this call
+   * is just for hardware register access; but GP1 MAC_SLEEP
+   * check is a good idea before accessing the SRAM of HW with
+   * volatile SRAM (e.g. reading Event Log).
+   *
+   * 5000 series and later (including 1000 series) have non-volatile SRAM,
+   * and do not save/restore SRAM when power cycling.
+   */
+  ret = iwl_poll_bit(
+      trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_val_mac_access_en),
+      (BIT(trans->cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+  if (unlikely(ret < 0)) {
+    uint32_t cntrl = iwl_read32(trans, CSR_GP_CNTRL);
 
-        IWL_WARN(trans, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", cntrl);
+    IWL_WARN(trans, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", cntrl);
 
-        iwl_trans_pcie_dump_regs(trans);
+    iwl_trans_pcie_dump_regs(trans);
 
-        if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
-            if (test_bit(STATUS_TRANS_DEAD, &trans->status)) { goto err; }
+    if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
+      if (test_bit(STATUS_TRANS_DEAD, &trans->status)) {
+        goto err;
+      }
 
-            IWL_ERR(trans, "Device gone - exit!\n");
+      IWL_ERR(trans, "Device gone - exit!\n");
 
 #if 0   // NEEDS_PORTING
             struct iwl_trans_pcie_removal* removal;
@@ -1906,36 +1918,38 @@
             pci_dev_get(removal->pdev);
             schedule_work(&removal->work);
 #endif  // NEEDS_PORTING
-        } else {
-            iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
-        }
-
-    err:
-        mtx_unlock(&trans_pcie->reg_lock);
-        return false;
+    } else {
+      iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
     }
 
-out:
-    /*
-     * Fool sparse by faking we release the lock - sparse will
-     * track nic_access anyway.
-     */
+  err:
     mtx_unlock(&trans_pcie->reg_lock);
-    return true;
+    return false;
+  }
+
+out:
+  /*
+   * Fool sparse by faking we release the lock - sparse will
+   * track nic_access anyway.
+   */
+  mtx_unlock(&trans_pcie->reg_lock);
+  return true;
 }
 
 static void iwl_trans_pcie_release_nic_access(struct iwl_trans* trans, unsigned long* flags) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    /*
-     * Fool sparse by faking we acquiring the lock - sparse will
-     * track nic_access anyway.
-     */
-    mtx_lock(&trans_pcie->reg_lock);
+  /*
+   * Fool sparse by faking we acquiring the lock - sparse will
+   * track nic_access anyway.
+   */
+  mtx_lock(&trans_pcie->reg_lock);
 
-    if (trans_pcie->cmd_hold_nic_awake) { goto out; }
+  if (trans_pcie->cmd_hold_nic_awake) {
+    goto out;
+  }
 
-    __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req));
+  __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req));
 #if 0   // NEEDS_PORTING
     /*
      * Above we read the CSR_GP_CNTRL register, which will flush
@@ -1946,11 +1960,11 @@
     mmiowb();
 #endif  // NEEDS_PORTING
 out:
-    mtx_unlock(&trans_pcie->reg_lock);
+  mtx_unlock(&trans_pcie->reg_lock);
 }
 
 static int iwl_trans_pcie_read_mem(struct iwl_trans* trans, uint32_t addr, void* buf, int dwords) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     unsigned long flags;
     int offs, ret = 0;
     uint32_t* vals = buf;
@@ -1965,14 +1979,14 @@
         ret = -EBUSY;
     }
     return ret;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
 static int iwl_trans_pcie_write_mem(struct iwl_trans* trans, uint32_t addr, const void* buf,
                                     int dwords) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     unsigned long flags;
     int offs, ret = 0;
     const uint32_t* vals = buf;
@@ -1987,14 +2001,14 @@
         ret = -EBUSY;
     }
     return ret;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
 static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans* trans, unsigned long txqs,
                                             bool freeze) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     int queue;
 
@@ -2037,12 +2051,12 @@
     next_queue:
         spin_unlock_bh(&txq->lock);
     }
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans* trans, bool block) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     int i;
 
@@ -2062,13 +2076,13 @@
 
         spin_unlock_bh(&txq->lock);
     }
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 #define IWL_FLUSH_WAIT_MS 2000
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 void iwl_trans_pcie_log_scd_error(struct iwl_trans* trans, struct iwl_txq* txq) {
     uint32_t txq_id = txq->id;
     uint32_t status;
@@ -2096,11 +2110,11 @@
                 (trans->cfg->base_params->max_tfd_queue_size - 1),
             iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans* trans, int queue,
                                        struct iwl_trans_rxq_dma_data* data) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
     if (queue >= trans->num_rx_queues || !trans_pcie->rxq) { return -EINVAL; }
@@ -2111,13 +2125,13 @@
     data->fr_bd_wid = 0;
 
     return 0;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans* trans, int txq_idx) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     struct iwl_txq* txq;
     unsigned long now = jiffies;
@@ -2152,13 +2166,13 @@
     IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
 
     return 0;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans* trans, uint32_t txq_bm) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     int cnt;
     int ret = 0;
@@ -2174,39 +2188,39 @@
     }
 
     return ret;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans* trans, uint32_t reg, uint32_t mask,
                                          uint32_t value) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    mtx_lock(&trans_pcie->reg_lock);
-    __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
-    mtx_unlock(&trans_pcie->reg_lock);
+  mtx_lock(&trans_pcie->reg_lock);
+  __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+  mtx_unlock(&trans_pcie->reg_lock);
 }
 
 static void iwl_trans_pcie_ref(struct iwl_trans* trans) {
-    // This function is to tell the power management core that we are going to use the device,
-    // please do not put the device into the power saving mode.
-    //
-    // TODO(WLAN-1118): supports power management in Fuchsia.
+  // This function is to tell the power management core that we are going to use the device,
+  // please do not put the device into the power saving mode.
+  //
+  // TODO(WLAN-1118): supports power management in Fuchsia.
 }
 
 static void iwl_trans_pcie_unref(struct iwl_trans* trans) {
-    // This function is to tell the power management core that we no longer use this device,
-    // feel free to put the device into the power saving mode.
-    //
-    // TODO(WLAN-1118): supports power management in Fuchsia.
+  // This function is to tell the power management core that we no longer use this device,
+  // feel free to put the device into the power saving mode.
+  //
+  // TODO(WLAN-1118): supports power management in Fuchsia.
 }
 
 #if 0  // NEEDS_PORTING
 static const char* get_csr_string(int cmd) {
 #define IWL_CMD(x) \
-    case x:        \
-        return #x
+  case x:          \
+    return #x
     switch (cmd) {
         IWL_CMD(CSR_HW_IF_CONFIG_REG);
         IWL_CMD(CSR_INT_COALESCING);
@@ -2276,33 +2290,34 @@
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
 /* create and remove of files */
-#define DEBUGFS_ADD_FILE(name, parent, mode)                                                     \
-    do {                                                                                         \
-        if (!debugfs_create_file(#name, mode, parent, trans, &iwl_dbgfs_##name##_ops)) goto err; \
-    } while (0)
+#define DEBUGFS_ADD_FILE(name, parent, mode)                                       \
+  do {                                                                             \
+    if (!debugfs_create_file(#name, mode, parent, trans, &iwl_dbgfs_##name##_ops)) \
+      goto err;                                                                    \
+  } while (0)
 
 /* file operation */
-#define DEBUGFS_READ_FILE_OPS(name)                                \
-    static const struct file_operations iwl_dbgfs_##name##_ops = { \
-        .read = iwl_dbgfs_##name##_read,                           \
-        .open = simple_open,                                       \
-        .llseek = generic_file_llseek,                             \
-    };
+#define DEBUGFS_READ_FILE_OPS(name)                              \
+  static const struct file_operations iwl_dbgfs_##name##_ops = { \
+      .read = iwl_dbgfs_##name##_read,                           \
+      .open = simple_open,                                       \
+      .llseek = generic_file_llseek,                             \
+  };
 
-#define DEBUGFS_WRITE_FILE_OPS(name)                               \
-    static const struct file_operations iwl_dbgfs_##name##_ops = { \
-        .write = iwl_dbgfs_##name##_write,                         \
-        .open = simple_open,                                       \
-        .llseek = generic_file_llseek,                             \
-    };
+#define DEBUGFS_WRITE_FILE_OPS(name)                             \
+  static const struct file_operations iwl_dbgfs_##name##_ops = { \
+      .write = iwl_dbgfs_##name##_write,                         \
+      .open = simple_open,                                       \
+      .llseek = generic_file_llseek,                             \
+  };
 
-#define DEBUGFS_READ_WRITE_FILE_OPS(name)                          \
-    static const struct file_operations iwl_dbgfs_##name##_ops = { \
-        .write = iwl_dbgfs_##name##_write,                         \
-        .read = iwl_dbgfs_##name##_read,                           \
-        .open = simple_open,                                       \
-        .llseek = generic_file_llseek,                             \
-    };
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)                        \
+  static const struct file_operations iwl_dbgfs_##name##_ops = { \
+      .write = iwl_dbgfs_##name##_write,                         \
+      .read = iwl_dbgfs_##name##_read,                           \
+      .open = simple_open,                                       \
+      .llseek = generic_file_llseek,                             \
+  };
 
 static ssize_t iwl_dbgfs_tx_queue_read(struct file* file, char __user* user_buf, size_t count,
                                        loff_t* ppos) {
@@ -2859,11 +2874,11 @@
     }
     return 0;
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 static struct iwl_trans_dump_data* iwl_trans_pcie_dump_data(struct iwl_trans* trans,
                                                             uint32_t dump_mask) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     struct iwl_fw_error_dump_data* data;
     struct iwl_txq* cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
@@ -2993,40 +3008,40 @@
     dump_data->len = len;
 
     return dump_data;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return NULL;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return NULL;
 }
 
 #ifdef CONFIG_PM_SLEEP
 static int iwl_trans_pcie_suspend(struct iwl_trans* trans) {
-    if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
-        (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) {
-        return iwl_pci_fw_enter_d0i3(trans);
-    }
+  if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+      (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) {
+    return iwl_pci_fw_enter_d0i3(trans);
+  }
 
-    return 0;
+  return 0;
 }
 
 static void iwl_trans_pcie_resume(struct iwl_trans* trans) {
-    if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
-        (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) {
-        iwl_pci_fw_exit_d0i3(trans);
-    }
+  if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+      (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) {
+    iwl_pci_fw_exit_d0i3(trans);
+  }
 }
 #endif /* CONFIG_PM_SLEEP */
 
-#define IWL_TRANS_COMMON_OPS                                                                \
-    .op_mode_leave = iwl_trans_pcie_op_mode_leave, .write8 = iwl_trans_pcie_write8,         \
-    .write32 = iwl_trans_pcie_write32, .read32 = iwl_trans_pcie_read32,                     \
-    .read_prph = iwl_trans_pcie_read_prph, .write_prph = iwl_trans_pcie_write_prph,         \
-    .read_mem = iwl_trans_pcie_read_mem, .write_mem = iwl_trans_pcie_write_mem,             \
-    .configure = iwl_trans_pcie_configure, .set_pmi = iwl_trans_pcie_set_pmi,               \
-    .sw_reset = iwl_trans_pcie_sw_reset, .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
-    .release_nic_access = iwl_trans_pcie_release_nic_access,                                \
-    .set_bits_mask = iwl_trans_pcie_set_bits_mask, .ref = iwl_trans_pcie_ref,               \
-    .unref = iwl_trans_pcie_unref, .dump_data = iwl_trans_pcie_dump_data,                   \
-    .d3_suspend = iwl_trans_pcie_d3_suspend, .d3_resume = iwl_trans_pcie_d3_resume
+#define IWL_TRANS_COMMON_OPS                                                              \
+  .op_mode_leave = iwl_trans_pcie_op_mode_leave, .write8 = iwl_trans_pcie_write8,         \
+  .write32 = iwl_trans_pcie_write32, .read32 = iwl_trans_pcie_read32,                     \
+  .read_prph = iwl_trans_pcie_read_prph, .write_prph = iwl_trans_pcie_write_prph,         \
+  .read_mem = iwl_trans_pcie_read_mem, .write_mem = iwl_trans_pcie_write_mem,             \
+  .configure = iwl_trans_pcie_configure, .set_pmi = iwl_trans_pcie_set_pmi,               \
+  .sw_reset = iwl_trans_pcie_sw_reset, .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
+  .release_nic_access = iwl_trans_pcie_release_nic_access,                                \
+  .set_bits_mask = iwl_trans_pcie_set_bits_mask, .ref = iwl_trans_pcie_ref,               \
+  .unref = iwl_trans_pcie_unref, .dump_data = iwl_trans_pcie_dump_data,                   \
+  .d3_suspend = iwl_trans_pcie_d3_suspend, .d3_resume = iwl_trans_pcie_d3_resume
 
 #ifdef CONFIG_PM_SLEEP
 #define IWL_TRANS_PM_OPS .suspend = iwl_trans_pcie_suspend, .resume = iwl_trans_pcie_resume,
@@ -3082,32 +3097,34 @@
 };
 
 struct iwl_trans* iwl_trans_pcie_alloc(const pci_protocol_t* pci, const struct iwl_cfg* cfg) {
-    struct iwl_trans_pcie* trans_pcie;
-    struct iwl_trans* trans;
-    zx_status_t status;
-    int addr_size;
-#if 0  // NEEDS_PORTING
+  struct iwl_trans_pcie* trans_pcie;
+  struct iwl_trans* trans;
+  zx_status_t status;
+  int addr_size;
+#if 0   // NEEDS_PORTING
     int ret, addr_size;
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
-    if (cfg->gen2) {
-        trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), cfg, &trans_ops_pcie_gen2);
-    } else {
-        trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), cfg, &trans_ops_pcie);
-    }
+  if (cfg->gen2) {
+    trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), cfg, &trans_ops_pcie_gen2);
+  } else {
+    trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), cfg, &trans_ops_pcie);
+  }
 
-    if (!trans) { return NULL; }
+  if (!trans) {
+    return NULL;
+  }
 
-    trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-    trans_pcie->trans = trans;
-    trans_pcie->opmode_down = true;
-#if 0  // NEEDS_PORTING
+  trans_pcie->trans = trans;
+  trans_pcie->opmode_down = true;
+#if 0   // NEEDS_PORTING
     spin_lock_init(&trans_pcie->irq_lock);
-#endif // NEEDS_PORTING
-    mtx_init(&trans_pcie->reg_lock, mtx_plain);
-    mtx_init(&trans_pcie->mutex, mtx_plain);
-#if 0  // NEEDS_PORTING
+#endif  // NEEDS_PORTING
+  mtx_init(&trans_pcie->reg_lock, mtx_plain);
+  mtx_init(&trans_pcie->mutex, mtx_plain);
+#if 0   // NEEDS_PORTING
     init_waitqueue_head(&trans_pcie->ucode_write_waitq);
     trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
     if (!trans_pcie->tso_hdr_page) {
@@ -3124,29 +3141,29 @@
         pci_disable_link_state(pdev,
                                PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
     }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
-    trans_pcie->def_rx_queue = 0;
+  trans_pcie->def_rx_queue = 0;
 
-    if (cfg->use_tfh) {
-        addr_size = 64;
-        trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
-        trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
-    } else {
-        addr_size = 36;
-        trans_pcie->max_tbs = IWL_NUM_OF_TBS;
-        trans_pcie->tfd_size = sizeof(struct iwl_tfd);
-    }
-    trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
+  if (cfg->use_tfh) {
+    addr_size = 64;
+    trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
+    trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
+  } else {
+    addr_size = 36;
+    trans_pcie->max_tbs = IWL_NUM_OF_TBS;
+    trans_pcie->tfd_size = sizeof(struct iwl_tfd);
+  }
+  trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
 
-    memcpy(&trans_pcie->pci, &pci, sizeof(trans_pcie->pci));
-    status = pci_enable_bus_master(trans_pcie->pci, true);
-    if (status != ZX_OK) {
-        IWL_ERR(trans, "Failed to enabled bus mastering: %s\n", zx_status_get_string(status));
-        goto out_no_pci;
-    }
+  memcpy(&trans_pcie->pci, &pci, sizeof(trans_pcie->pci));
+  status = pci_enable_bus_master(trans_pcie->pci, true);
+  if (status != ZX_OK) {
+    IWL_ERR(trans, "Failed to enabled bus mastering: %s\n", zx_status_get_string(status));
+    goto out_no_pci;
+  }
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
     if (!ret) { ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(addr_size)); }
     if (ret) {
@@ -3158,37 +3175,36 @@
             goto out_no_pci;
         }
     }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
-    status = pci_map_bar_buffer(trans_pcie->pci, 0 /* bar_id */, ZX_CACHE_POLICY_UNCACHED_DEVICE,
-                                &trans_pcie->mmio);
-    if (status != ZX_OK) {
-        IWL_ERR(trans, "Failed to map resources for BAR 0: %s\n", zx_status_get_string(status));
-        goto out_no_pci;
-    }
+  status = pci_map_bar_buffer(trans_pcie->pci, 0 /* bar_id */, ZX_CACHE_POLICY_UNCACHED_DEVICE,
+                              &trans_pcie->mmio);
+  if (status != ZX_OK) {
+    IWL_ERR(trans, "Failed to map resources for BAR 0: %s\n", zx_status_get_string(status));
+    goto out_no_pci;
+  }
 
+  /* We disable the RETRY_TIMEOUT register (0x41) to keep
+   * PCI Tx retries from interfering with C3 CPU state */
+  pci_config_write8(trans_pcie->pci, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
-    /* We disable the RETRY_TIMEOUT register (0x41) to keep
-     * PCI Tx retries from interfering with C3 CPU state */
-    pci_config_write8(trans_pcie->pci, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
-    #if 0
+#if 0
     iwl_disable_interrupts(trans);
-    #endif
+#endif
 
-    trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
-    if (trans->hw_rev == 0xffffffff) {
-        IWL_ERR(trans, "HW_REV=0xFFFFFFFF, PCI issues?\n");
-        goto out_no_pci;
-    }
+  trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+  if (trans->hw_rev == 0xffffffff) {
+    IWL_ERR(trans, "HW_REV=0xFFFFFFFF, PCI issues?\n");
+    goto out_no_pci;
+  }
 
-    /*
-     * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
-     * changed, and now the revision step also includes bit 0-1 (no more
-     * "dash" value). To keep hw_rev backwards compatible - we'll store it
-     * in the old format.
-     */
-#if 0  // NEEDS_PORTING
+  /*
+   * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
+   * changed, and now the revision step also includes bit 0-1 (no more
+   * "dash" value). To keep hw_rev backwards compatible - we'll store it
+   * in the old format.
+   */
+#if 0   // NEEDS_PORTING
     if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
         unsigned long flags;
 
@@ -3228,81 +3244,80 @@
             iwl_trans_release_nic_access(trans, &flags);
         }
     }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
-    IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
+  IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
 
 #if IS_ENABLED(CPTCFG_IWLMVM) || IS_ENABLED(CPTCFG_IWLFMAC)
-    trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
+  trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
 
-    if (cfg == &iwl22560_2ax_cfg_hr) {
-        if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
-            CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
-            trans->cfg = &iwl22560_2ax_cfg_hr;
-        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
-                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
-            trans->cfg = &iwl22000_2ax_cfg_jf;
-        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
-                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
-            IWL_ERR(trans, "RF ID HRCDB is not supported\n");
-            ret = -EINVAL;
-            goto out_no_pci;
-        } else {
-            IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n",
-                    CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id));
-            ret = -EINVAL;
-            goto out_no_pci;
-        }
+  if (cfg == &iwl22560_2ax_cfg_hr) {
+    if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+        CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
+      trans->cfg = &iwl22560_2ax_cfg_hr;
     } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
-                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
-               (trans->cfg != &iwl22260_2ax_cfg || trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
-        uint32_t hw_status;
-
-        hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
-        if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP) {
-            if (hw_status & UMAG_GEN_HW_IS_FPGA) {
-                trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0_f0;
-            } else {
-                trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
-            }
-        } else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
-                   CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
-            trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
-        } else {
-            /*
-             * a step no FPGA
-             */
-            trans->cfg = &iwl22000_2ac_cfg_hr;
-        }
+               CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
+      trans->cfg = &iwl22000_2ax_cfg_jf;
+    } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+               CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
+      IWL_ERR(trans, "RF ID HRCDB is not supported\n");
+      ret = -EINVAL;
+      goto out_no_pci;
+    } else {
+      IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n", CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id));
+      ret = -EINVAL;
+      goto out_no_pci;
     }
+  } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+                 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+             (trans->cfg != &iwl22260_2ax_cfg || trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
+    uint32_t hw_status;
 
-    /*
-     * The RF_ID is set to zero in blank OTP so read version
-     * to extract the RF_ID.
-     */
-    if (trans->cfg->rf_id && !CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
-        unsigned long flags;
-
-        if (iwl_trans_grab_nic_access(trans, &flags)) {
-            uint32_t val;
-
-            val = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
-            val |= ENABLE_WFPM;
-            iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, val);
-            val = iwl_read_prph_no_grab(trans, SD_REG_VER);
-
-            val &= 0xff00;
-            switch (val) {
-            case REG_VER_RF_ID_JF:
-                trans->hw_rf_id = CSR_HW_RF_ID_TYPE_JF;
-                break;
-            /* TODO: get value for REG_VER_RF_ID_HR */
-            default:
-                trans->hw_rf_id = CSR_HW_RF_ID_TYPE_HR;
-            }
-            iwl_trans_release_nic_access(trans, &flags);
-        }
+    hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
+    if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP) {
+      if (hw_status & UMAG_GEN_HW_IS_FPGA) {
+        trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0_f0;
+      } else {
+        trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+      }
+    } else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
+               CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
+      trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
+    } else {
+      /*
+       * a step no FPGA
+       */
+      trans->cfg = &iwl22000_2ac_cfg_hr;
     }
+  }
+
+  /*
+   * The RF_ID is set to zero in blank OTP so read version
+   * to extract the RF_ID.
+   */
+  if (trans->cfg->rf_id && !CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+    unsigned long flags;
+
+    if (iwl_trans_grab_nic_access(trans, &flags)) {
+      uint32_t val;
+
+      val = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
+      val |= ENABLE_WFPM;
+      iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, val);
+      val = iwl_read_prph_no_grab(trans, SD_REG_VER);
+
+      val &= 0xff00;
+      switch (val) {
+        case REG_VER_RF_ID_JF:
+          trans->hw_rf_id = CSR_HW_RF_ID_TYPE_JF;
+          break;
+        /* TODO: get value for REG_VER_RF_ID_HR */
+        default:
+          trans->hw_rf_id = CSR_HW_RF_ID_TYPE_HR;
+      }
+      iwl_trans_release_nic_access(trans, &flags);
+    }
+  }
 #endif
 #if 0  // NEEDS_PORTING
 
@@ -3346,17 +3361,17 @@
     mutex_init(&trans_pcie->fw_mon_data.mutex);
 #endif
 
-#endif // NEEDS_PORTING
-    return trans;
+#endif  // NEEDS_PORTING
+  return trans;
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 out_free_ict:
     iwl_pcie_free_ict(trans);
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 out_no_pci:
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     free_percpu(trans_pcie->tso_hdr_page);
-#endif // NEEDS_PORTING
-    iwl_trans_free(trans);
-    return NULL;
+#endif  // NEEDS_PORTING
+  iwl_trans_free(trans);
+  return NULL;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/tx-gen2.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/tx-gen2.c
index 5e77019..b8ae176 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/tx-gen2.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/tx-gen2.c
@@ -32,7 +32,6 @@
  *
  *****************************************************************************/
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fuchsia_porting.h"
-
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tx.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-debug.h"
@@ -43,22 +42,24 @@
  * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
  */
 void iwl_pcie_gen2_tx_stop(struct iwl_trans* trans) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    size_t txq_id;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  size_t txq_id;
 
-    /*
-     * This function can be called before the op_mode disabled the
-     * queues. This happens when we have an rfkill interrupt.
-     * Since we stop Tx altogether - mark the queues as stopped.
-     */
-    memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
-    memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+  /*
+   * This function can be called before the op_mode disabled the
+   * queues. This happens when we have an rfkill interrupt.
+   * Since we stop Tx altogether - mark the queues as stopped.
+   */
+  memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+  memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
 
-    /* Unmap DMA from host system and free skb's */
-    for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
-        if (!trans_pcie->txq[txq_id]) { continue; }
-        iwl_pcie_gen2_txq_unmap(trans, txq_id);
+  /* Unmap DMA from host system and free skb's */
+  for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
+    if (!trans_pcie->txq[txq_id]) {
+      continue;
     }
+    iwl_pcie_gen2_txq_unmap(trans, txq_id);
+  }
 }
 
 /*
@@ -66,71 +67,75 @@
  */
 void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie* trans_pcie, struct iwl_txq* txq,
                                    uint16_t byte_cnt, int num_tbs) {
-    struct iwlagn_scd_bc_tbl* scd_bc_tbl = txq->bc_tbl.addr;
-    struct iwl_trans* trans = iwl_trans_pcie_get_trans(trans_pcie);
-    struct iwl_gen3_bc_tbl* scd_bc_tbl_gen3 = txq->bc_tbl.addr;
-    int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
-    uint8_t filled_tfd_size, num_fetch_chunks;
-    uint16_t len = byte_cnt;
-    __le16 bc_ent;
+  struct iwlagn_scd_bc_tbl* scd_bc_tbl = txq->bc_tbl.addr;
+  struct iwl_trans* trans = iwl_trans_pcie_get_trans(trans_pcie);
+  struct iwl_gen3_bc_tbl* scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+  int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+  uint8_t filled_tfd_size, num_fetch_chunks;
+  uint16_t len = byte_cnt;
+  __le16 bc_ent;
 
-    if (trans_pcie->bc_table_dword) { len = DIV_ROUND_UP(len, 4); }
+  if (trans_pcie->bc_table_dword) {
+    len = DIV_ROUND_UP(len, 4);
+  }
 
-    if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) { return; }
+  if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) {
+    return;
+  }
 
-    filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + num_tbs * sizeof(struct iwl_tfh_tb);
-    /*
-     * filled_tfd_size contains the number of filled bytes in the TFD.
-     * Dividing it by 64 will give the number of chunks to fetch
-     * to SRAM- 0 for one chunk, 1 for 2 and so on.
-     * If, for example, TFD contains only 3 TBs then 32 bytes
-     * of the TFD are used, and only one chunk of 64 bytes should
-     * be fetched
-     */
-    num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+  filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + num_tbs * sizeof(struct iwl_tfh_tb);
+  /*
+   * filled_tfd_size contains the number of filled bytes in the TFD.
+   * Dividing it by 64 will give the number of chunks to fetch
+   * to SRAM- 0 for one chunk, 1 for 2 and so on.
+   * If, for example, TFD contains only 3 TBs then 32 bytes
+   * of the TFD are used, and only one chunk of 64 bytes should
+   * be fetched
+   */
+  num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
 
-    bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
-    if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-        scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
-    } else {
-        scd_bc_tbl->tfd_offset[idx] = bc_ent;
-    }
+  bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+  if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+    scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+  } else {
+    scd_bc_tbl->tfd_offset[idx] = bc_ent;
+  }
 }
 
 /*
  * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
  */
 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans* trans, struct iwl_txq* txq) {
-    lockdep_assert_held(&txq->lock);
+  lockdep_assert_held(&txq->lock);
 
-    IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+  IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
 
-    /*
-     * if not in power-save mode, uCode will never sleep when we're
-     * trying to tx (during RFKILL, we're not trying to tx).
-     */
-    iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+  /*
+   * if not in power-save mode, uCode will never sleep when we're
+   * trying to tx (during RFKILL, we're not trying to tx).
+   */
+  iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
 }
 
 static uint8_t iwl_pcie_gen2_get_num_tbs(struct iwl_trans* trans, struct iwl_tfh_tfd* tfd) {
-    return le16_to_cpu(tfd->num_tbs) & 0x1f;
+  return le16_to_cpu(tfd->num_tbs) & 0x1f;
 }
 
 static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans* trans, struct iwl_cmd_meta* meta,
                                     struct iwl_tfh_tfd* tfd) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    int i, num_tbs;
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  int i, num_tbs;
 
-    /* Sanity check on number of chunks */
-    num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
+  /* Sanity check on number of chunks */
+  num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
 
-    if (num_tbs > trans_pcie->max_tbs) {
-        IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
-        return;
-    }
+  if (num_tbs > trans_pcie->max_tbs) {
+    IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+    return;
+  }
 
-    /* first TB is never freed - it's the bidirectional DMA data */
-    for (i = 1; i < num_tbs; i++) {
+  /* first TB is never freed - it's the bidirectional DMA data */
+  for (i = 1; i < num_tbs; i++) {
 #if 0   // NEEDS_PORTING
         if (meta->tbs & BIT(i))
             dma_unmap_page(trans->dev, le64_to_cpu(tfd->tbs[i].addr),
@@ -139,39 +144,39 @@
             dma_unmap_single(trans->dev, le64_to_cpu(tfd->tbs[i].addr),
                              le16_to_cpu(tfd->tbs[i].tb_len), DMA_TO_DEVICE);
 #endif  // NEEDS_PORTING
-    }
+  }
 
-    tfd->num_tbs = 0;
+  tfd->num_tbs = 0;
 }
 
 static void iwl_pcie_gen2_free_tfd(struct iwl_trans* trans, struct iwl_txq* txq) {
-    /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
-     * idx is bounded by n_window
+  /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+   * idx is bounded by n_window
+   */
+  int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+
+  lockdep_assert_held(&txq->lock);
+
+  iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, iwl_pcie_get_tfd(trans, txq, idx));
+
+  /* free SKB */
+  if (txq->entries) {
+    struct sk_buff* skb;
+
+    skb = txq->entries[idx].skb;
+
+    /* Can be called from irqs-disabled context
+     * If skb is not NULL, it means that the whole queue is being
+     * freed and that the queue is not empty - free the skb
      */
-    int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
-
-    lockdep_assert_held(&txq->lock);
-
-    iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, iwl_pcie_get_tfd(trans, txq, idx));
-
-    /* free SKB */
-    if (txq->entries) {
-        struct sk_buff* skb;
-
-        skb = txq->entries[idx].skb;
-
-        /* Can be called from irqs-disabled context
-         * If skb is not NULL, it means that the whole queue is being
-         * freed and that the queue is not empty - free the skb
-         */
-        if (skb) {
-            iwl_op_mode_free_skb(trans->op_mode, skb);
-            txq->entries[idx].skb = NULL;
-        }
+    if (skb) {
+      iwl_op_mode_free_skb(trans->op_mode, skb);
+      txq->entries[idx].skb = NULL;
     }
+  }
 }
 
-#if 0   // NEEDS_PORTING
+#if 0  // NEEDS_PORTING
 static int iwl_pcie_gen2_set_tb(struct iwl_trans* trans, struct iwl_tfh_tfd* tfd, dma_addr_t addr,
                                 uint16_t len) {
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -490,11 +495,11 @@
     }
     return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, hdr_len, len, !amsdu);
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 int iwl_trans_pcie_gen2_tx(struct iwl_trans* trans, struct sk_buff* skb,
                            struct iwl_device_cmd* dev_cmd, int txq_id) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     struct iwl_cmd_meta* out_meta;
     struct iwl_txq* txq = trans_pcie->txq[txq_id];
@@ -576,9 +581,9 @@
      */
     spin_unlock(&txq->lock);
     return 0;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
 #if 0  // NEEDS_PORTING
@@ -903,10 +908,10 @@
 
     return ret;
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans* trans, struct iwl_host_cmd* cmd) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
         IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", cmd->id);
         return -ERFKILL;
@@ -928,23 +933,23 @@
     }
 
     return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
 /*
  * iwl_pcie_gen2_txq_unmap -  Unmap any remaining DMA mappings and free skb's
  */
 void iwl_pcie_gen2_txq_unmap(struct iwl_trans* trans, int txq_id) {
-    struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-    struct iwl_txq* txq = trans_pcie->txq[txq_id];
+  struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+  struct iwl_txq* txq = trans_pcie->txq[txq_id];
 
-    mtx_lock(&txq->lock);
-    while (txq->write_ptr != txq->read_ptr) {
-        IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr);
+  mtx_lock(&txq->lock);
+  while (txq->write_ptr != txq->read_ptr) {
+    IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr);
 
-        if (txq_id != trans_pcie->cmd_queue) {
+    if (txq_id != trans_pcie->cmd_queue) {
 #if 0   // NEEDS_PORTING
             int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
             struct sk_buff* skb = txq->entries[idx].skb;
@@ -953,23 +958,23 @@
 
             iwl_pcie_free_tso_page(trans_pcie, skb);
 #endif  // NEEDS_PORTING
-        }
-        iwl_pcie_gen2_free_tfd(trans, txq);
-        txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
-
-        if (txq->read_ptr == txq->write_ptr) {
-            mtx_lock(&trans_pcie->reg_lock);
-            if (txq_id != trans_pcie->cmd_queue) {
-                IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", txq->id);
-                iwl_trans_unref(trans);
-            } else if (trans_pcie->ref_cmd_in_flight) {
-                trans_pcie->ref_cmd_in_flight = false;
-                IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight\n");
-                iwl_trans_unref(trans);
-            }
-            mtx_unlock(&trans_pcie->reg_lock);
-        }
     }
+    iwl_pcie_gen2_free_tfd(trans, txq);
+    txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
+
+    if (txq->read_ptr == txq->write_ptr) {
+      mtx_lock(&trans_pcie->reg_lock);
+      if (txq_id != trans_pcie->cmd_queue) {
+        IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", txq->id);
+        iwl_trans_unref(trans);
+      } else if (trans_pcie->ref_cmd_in_flight) {
+        trans_pcie->ref_cmd_in_flight = false;
+        IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight\n");
+        iwl_trans_unref(trans);
+      }
+      mtx_unlock(&trans_pcie->reg_lock);
+    }
+  }
 
 #if 0   // NEEDS_PORTING
     while (!skb_queue_empty(&txq->overflow_q)) {
@@ -979,7 +984,7 @@
     }
 #endif  // NEEDS_PORTING
 
-    mtx_unlock(&txq->lock);
+  mtx_unlock(&txq->lock);
 
 #if 0   // NEEDS_PORTING
     /* just in case - this queue may have been stopped */
@@ -1120,11 +1125,11 @@
     iwl_pcie_gen2_txq_free_memory(trans, txq);
     return ret;
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans* trans, __le16 flags, uint8_t sta_id, uint8_t tid,
                                  int cmd_id, int size, unsigned int timeout) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_txq* txq = NULL;
     struct iwl_tx_queue_cfg_cmd cmd = {
         .flags = flags,
@@ -1157,13 +1162,13 @@
 error:
     iwl_pcie_gen2_txq_free_memory(trans, txq);
     return ret;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans* trans, int queue) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
     /*
@@ -1180,11 +1185,11 @@
     iwl_pcie_gen2_txq_unmap(trans, queue);
 
     IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
 void iwl_pcie_gen2_tx_free(struct iwl_trans* trans) {
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     int i;
@@ -1235,4 +1240,4 @@
     iwl_pcie_gen2_tx_free(trans);
     return ret;
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/tx.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/tx.c
index d8e9f86..2b0f462 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/tx.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/tx.c
@@ -35,13 +35,14 @@
  *****************************************************************************/
 #if 0  // NEEDS_PORTING
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/fw/api/tx.h"
+
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-op-mode.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-prph.h"
-#endif // NEEDS_PORTING
-#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-scd.h"
+#endif  // NEEDS_PORTING
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-csr.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-debug.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-io.h"
+#include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/iwl-scd.h"
 #include "src/connectivity/wlan/drivers/third_party/intel/iwlwifi/pcie/internal.h"
 
 #if 0  // NEEDS_PORTING
@@ -947,12 +948,12 @@
         mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
     }
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 /* Frees buffers until index _not_ inclusive */
 void iwl_trans_pcie_reclaim(struct iwl_trans* trans, int txq_id, int ssn,
                             struct sk_buff_head* skbs) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     struct iwl_txq* txq = trans_pcie->txq[txq_id];
     int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
@@ -1048,8 +1049,8 @@
 
 out:
     spin_unlock_bh(&txq->lock);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 #if 0  // NEEDS_PORTING
@@ -1164,11 +1165,11 @@
 /* Receiver address (actually, Rx station's index into station table),
  * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 bool iwl_trans_pcie_txq_enable(struct iwl_trans* trans, int txq_id, uint16_t ssn,
                                const struct iwl_trans_txq_scd_cfg* cfg, unsigned int wdg_timeout) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     struct iwl_txq* txq = trans_pcie->txq[txq_id];
     int fifo = -1;
@@ -1267,24 +1268,24 @@
     }
 
     return scd_bug;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return false;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return false;
 }
 
 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans* trans, uint32_t txq_id,
                                         bool shared_mode) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     struct iwl_txq* txq = trans_pcie->txq[txq_id];
 
     txq->ampdu = !shared_mode;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 void iwl_trans_pcie_txq_disable(struct iwl_trans* trans, int txq_id, bool configure_scd) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     struct iwl_trans_pcie* trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
     uint32_t stts_addr = trans_pcie->scd_base_addr + SCD_TX_STTS_QUEUE_OFFSET(txq_id);
     static const uint32_t zero_val[4] = {};
@@ -1313,8 +1314,8 @@
     trans_pcie->txq[txq_id]->ampdu = false;
 
     IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
 }
 
 #if 0  // NEEDS_PORTING
@@ -1764,10 +1765,10 @@
 
     return ret;
 }
-#endif // NEEDS_PORTING
+#endif  // NEEDS_PORTING
 
 int iwl_trans_pcie_send_hcmd(struct iwl_trans* trans, struct iwl_host_cmd* cmd) {
-#if 0  // NEEDS_PORTING
+#if 0   // NEEDS_PORTING
     /* Make sure the NIC is still alive in the bus */
     if (test_bit(STATUS_TRANS_DEAD, &trans->status)) { return -ENODEV; }
 
@@ -1780,9 +1781,9 @@
 
     /* We still can fail on RFKILL that can be asserted while we wait */
     return iwl_pcie_send_hcmd_sync(trans, cmd);
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
 
 #if 0  // NEEDS_PORTING
@@ -2030,7 +2031,7 @@
 
     return 0;
 }
-#else  /* CONFIG_INET */
+#else   /* CONFIG_INET */
 static int iwl_fill_data_tbs_amsdu(struct iwl_trans* trans, struct sk_buff* skb,
                                    struct iwl_txq* txq, uint8_t hdr_len,
                                    struct iwl_cmd_meta* out_meta, struct iwl_device_cmd* dev_cmd,
@@ -2040,8 +2041,8 @@
 
     return -1;
 }
-#endif /* CONFIG_INET */
-#endif // NEEDS_PORTING
+#endif  /* CONFIG_INET */
+#endif  // NEEDS_PORTING
 
 int iwl_trans_pcie_tx(struct iwl_trans* trans, struct sk_buff* skb, struct iwl_device_cmd* dev_cmd,
                       int txq_id) {
@@ -2240,7 +2241,7 @@
     iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
     spin_unlock(&txq->lock);
     return -1;
-#endif // NEEDS_PORTING
-    IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
-    return -1;
+#endif  // NEEDS_PORTING
+  IWL_ERR(trans, "%s needs porting\n", __FUNCTION__);
+  return -1;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/pcie_test.cc b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/pcie_test.cc
index 0460553..be1e206 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/pcie_test.cc
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/pcie_test.cc
@@ -34,7 +34,7 @@
 void write32_wrapper(struct iwl_trans* trans, uint32_t ofs, uint32_t val);
 
 class TransOps {
-public:
+ public:
   virtual void write32(uint32_t ofs, uint32_t val) = 0;
 };
 
@@ -48,9 +48,7 @@
     trans_pcie_ = &wrapper->trans_pcie;
   }
 
-  ~PcieTest() {
-    iwl_trans_free(trans_);
-  }
+  ~PcieTest() { iwl_trans_free(trans_); }
 
   MOCK_METHOD2(write32, void(uint32_t ofs, uint32_t val));
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/single-ap-test.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/single-ap-test.h
index 55652dd..8bfcfa4 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/single-ap-test.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/single-ap-test.h
@@ -17,14 +17,11 @@
 //
 class SingleApTest : public ::testing::Test {
  public:
-  SingleApTest() : ap_(kApAddr, kSsid, kSsidLen, kChannel), fw_(&env_) {
-    env_.AddAp(&ap_);
-  }
+  SingleApTest() : ap_(kApAddr, kSsid, kSsidLen, kChannel), fw_(&env_) { env_.AddAp(&ap_); }
   ~SingleApTest() {}
 
  protected:
-  static constexpr uint8_t kApAddr[ETH_ALEN] = {0x12, 0x34, 0x56,
-                                                0x78, 0x9a, 0xbc};
+  static constexpr uint8_t kApAddr[ETH_ALEN] = {0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc};
   static constexpr uint8_t kSsid[] = "MySSID";
   static constexpr size_t kSsidLen = 6;  // The length of 'ssid' above.
   static constexpr uint8_t kChannel = 11;
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/utils_test.cc b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/utils_test.cc
index 71f5e3a..aa984b3 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/utils_test.cc
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test/utils_test.cc
@@ -35,43 +35,31 @@
   int idx;  // 802.11 index
 
   // The band is out of range
-  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(0, NUM_NL80211_BANDS, &idx),
-            ZX_ERR_OUT_OF_RANGE);
+  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(0, NUM_NL80211_BANDS, &idx), ZX_ERR_OUT_OF_RANGE);
 
   // Invalid pointer
   EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(0, NL80211_BAND_5GHZ, nullptr),
             ZX_ERR_INVALID_ARGS);
 
   // Not supported 60GHz
-  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(0, NL80211_BAND_60GHZ, &idx),
-            ZX_ERR_NOT_SUPPORTED);
+  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(0, NL80211_BAND_60GHZ, &idx), ZX_ERR_NOT_SUPPORTED);
 
   // 2.4 GHz: data rate: 1 MHz ~ 54 MHz
-  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(10 /* 1 Mhz */,
-                                                NL80211_BAND_2GHZ, &idx),
-            ZX_OK);
+  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(10 /* 1 Mhz */, NL80211_BAND_2GHZ, &idx), ZX_OK);
   EXPECT_EQ(idx, 0);
-  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(3 /* 54 Mhz */,
-                                                NL80211_BAND_2GHZ, &idx),
-            ZX_OK);
+  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(3 /* 54 Mhz */, NL80211_BAND_2GHZ, &idx), ZX_OK);
   EXPECT_EQ(idx, 11);
 
   // 5 GHz: data rate: 6 MHz ~ 54 MHz
-  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(13 /* 6 Mhz */,
-                                                NL80211_BAND_5GHZ, &idx),
-            ZX_OK);
+  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(13 /* 6 Mhz */, NL80211_BAND_5GHZ, &idx), ZX_OK);
   EXPECT_EQ(idx, 0);
-  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(3 /* 54 Mhz */,
-                                                NL80211_BAND_5GHZ, &idx),
-            ZX_OK);
+  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(3 /* 54 Mhz */, NL80211_BAND_5GHZ, &idx), ZX_OK);
   EXPECT_EQ(idx, 7);
-  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(10 /* 1 Mhz */,
-                                                NL80211_BAND_5GHZ, &idx),
+  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(10 /* 1 Mhz */, NL80211_BAND_5GHZ, &idx),
             ZX_ERR_NOT_FOUND);
 
   // Not in the table
-  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(0 /* random number */,
-                                                NL80211_BAND_5GHZ, &idx),
+  EXPECT_EQ(iwl_mvm_legacy_rate_to_mac80211_idx(0 /* random number */, NL80211_BAND_5GHZ, &idx),
             ZX_ERR_NOT_FOUND);
 }
 
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/debugfs.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/debugfs.c
index 594f489..353e8f6 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/debugfs.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/debugfs.c
@@ -35,62 +35,70 @@
 #include "fw/dbg.h"
 #include "xvt.h"
 
-#define XVT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)                                     \
-    static ssize_t _iwl_dbgfs_##name##_write(struct file* file, const char __user* user_buf, \
-                                             size_t count, loff_t* ppos) {                   \
-        argtype* arg = file->private_data;                                                   \
-        char buf[buflen] = {};                                                               \
-        size_t buf_size = min(count, sizeof(buf) - 1);                                       \
-                                                                                             \
-        if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT;                         \
-                                                                                             \
-        return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos);                           \
-    }
+#define XVT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)                                   \
+  static ssize_t _iwl_dbgfs_##name##_write(struct file* file, const char __user* user_buf, \
+                                           size_t count, loff_t* ppos) {                   \
+    argtype* arg = file->private_data;                                                     \
+    char buf[buflen] = {};                                                                 \
+    size_t buf_size = min(count, sizeof(buf) - 1);                                         \
+                                                                                           \
+    if (copy_from_user(buf, user_buf, buf_size))                                           \
+      return -EFAULT;                                                                      \
+                                                                                           \
+    return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos);                             \
+  }
 
-#define _XVT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype)         \
-    XVT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)               \
-    static const struct file_operations iwl_dbgfs_##name##_ops = { \
-        .write = _iwl_dbgfs_##name##_write,                        \
-        .open = simple_open,                                       \
-        .llseek = generic_file_llseek,                             \
-    }
+#define _XVT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype)       \
+  XVT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)               \
+  static const struct file_operations iwl_dbgfs_##name##_ops = { \
+      .write = _iwl_dbgfs_##name##_write,                        \
+      .open = simple_open,                                       \
+      .llseek = generic_file_llseek,                             \
+  }
 
 #define XVT_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
-    _XVT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_xvt)
+  _XVT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_xvt)
 
-#define XVT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode)                                  \
-    do {                                                                                       \
-        if (!debugfs_create_file(alias, mode, parent, xvt, &iwl_dbgfs_##name##_ops)) goto err; \
-    } while (0)
+#define XVT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode)                    \
+  do {                                                                           \
+    if (!debugfs_create_file(alias, mode, parent, xvt, &iwl_dbgfs_##name##_ops)) \
+      goto err;                                                                  \
+  } while (0)
 
 #define XVT_DEBUGFS_ADD_FILE(name, parent, mode) \
-    XVT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+  XVT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
 
 static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_xvt* xvt, char* buf, size_t count,
                                               loff_t* ppos) {
-    if (!(xvt->state == IWL_XVT_STATE_OPERATIONAL && xvt->fw_running)) { return -EIO; }
+  if (!(xvt->state == IWL_XVT_STATE_OPERATIONAL && xvt->fw_running)) {
+    return -EIO;
+  }
 
-    if (count == 0) { return 0; }
+  if (count == 0) {
+    return 0;
+  }
 
-    iwl_fw_dbg_collect(&xvt->fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1));
+  iwl_fw_dbg_collect(&xvt->fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1));
 
-    return count;
+  return count;
 }
 
 static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_xvt* xvt, char* buf, size_t count,
                                           loff_t* ppos) {
-    int __maybe_unused ret;
+  int __maybe_unused ret;
 
-    if (!xvt->fw_running) { return -EIO; }
+  if (!xvt->fw_running) {
+    return -EIO;
+  }
 
-    mutex_lock(&xvt->mutex);
+  mutex_lock(&xvt->mutex);
 
-    /* Take the return value, though failure is expected, for compilation */
-    ret = iwl_xvt_send_cmd_pdu(xvt, REPLY_ERROR, 0, 0, NULL);
+  /* Take the return value, though failure is expected, for compilation */
+  ret = iwl_xvt_send_cmd_pdu(xvt, REPLY_ERROR, 0, 0, NULL);
 
-    mutex_unlock(&xvt->mutex);
+  mutex_unlock(&xvt->mutex);
 
-    return count;
+  return count;
 }
 
 /* Device wide debugfs entries */
@@ -99,14 +107,14 @@
 
 #ifdef CPTCFG_IWLWIFI_DEBUGFS
 int iwl_xvt_dbgfs_register(struct iwl_xvt* xvt, struct dentry* dbgfs_dir) {
-    xvt->debugfs_dir = dbgfs_dir;
+  xvt->debugfs_dir = dbgfs_dir;
 
-    XVT_DEBUGFS_ADD_FILE(fw_dbg_collect, xvt->debugfs_dir, S_IWUSR);
-    XVT_DEBUGFS_ADD_FILE(fw_restart, xvt->debugfs_dir, S_IWUSR);
+  XVT_DEBUGFS_ADD_FILE(fw_dbg_collect, xvt->debugfs_dir, S_IWUSR);
+  XVT_DEBUGFS_ADD_FILE(fw_restart, xvt->debugfs_dir, S_IWUSR);
 
-    return 0;
+  return 0;
 err:
-    IWL_ERR(xvt, "Can't create the xvt debugfs directory\n");
-    return -ENOMEM;
+  IWL_ERR(xvt, "Can't create the xvt debugfs directory\n");
+  return -ENOMEM;
 }
 #endif /* CPTCFG_IWLWIFI_DEBUGFS */
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/fw-api.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/fw-api.h
index 90cf8f7..2f5f20d 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/fw-api.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/fw-api.h
@@ -55,63 +55,63 @@
 #define XVT_LMAC_1_ID 1
 
 enum {
-    APMG_PD_SV_CMD = 0x43,
+  APMG_PD_SV_CMD = 0x43,
 
-    /* ToF */
-    LOCATION_GROUP_NOTIFICATION = 0x11,
+  /* ToF */
+  LOCATION_GROUP_NOTIFICATION = 0x11,
 
-    NVM_COMMIT_COMPLETE_NOTIFICATION = 0xad,
-    GET_SET_PHY_DB_CMD = 0x8f,
+  NVM_COMMIT_COMPLETE_NOTIFICATION = 0xad,
+  GET_SET_PHY_DB_CMD = 0x8f,
 
-    /* BFE */
-    REPLY_HD_PARAMS_CMD = 0xa6,
+  /* BFE */
+  REPLY_HD_PARAMS_CMD = 0xa6,
 
-    REPLY_RX_DSP_EXT_INFO = 0xc4,
+  REPLY_RX_DSP_EXT_INFO = 0xc4,
 
-    REPLY_DEBUG_XVT_CMD = 0xf3,
+  REPLY_DEBUG_XVT_CMD = 0xf3,
 };
 
 struct xvt_alive_resp_ver2 {
-    __le16 status;
-    __le16 flags;
-    uint8_t ucode_minor;
-    uint8_t ucode_major;
-    __le16 id;
-    uint8_t api_minor;
-    uint8_t api_major;
-    uint8_t ver_subtype;
-    uint8_t ver_type;
-    uint8_t mac;
-    uint8_t opt;
-    __le16 reserved2;
-    __le32 timestamp;
-    __le32 error_event_table_ptr; /* SRAM address for error log */
-    __le32 log_event_table_ptr;   /* SRAM address for LMAC event log */
-    __le32 cpu_register_ptr;
-    __le32 dbgm_config_ptr;
-    __le32 alive_counter_ptr;
-    __le32 scd_base_ptr; /* SRAM address for SCD */
-    __le32 st_fwrd_addr; /* pointer to Store and forward */
-    __le32 st_fwrd_size;
-    uint8_t umac_minor;     /* UMAC version: minor */
-    uint8_t umac_major;     /* UMAC version: major */
-    __le16 umac_id;         /* UMAC version: id */
-    __le32 error_info_addr; /* SRAM address for UMAC error log */
-    __le32 dbg_print_buff_addr;
+  __le16 status;
+  __le16 flags;
+  uint8_t ucode_minor;
+  uint8_t ucode_major;
+  __le16 id;
+  uint8_t api_minor;
+  uint8_t api_major;
+  uint8_t ver_subtype;
+  uint8_t ver_type;
+  uint8_t mac;
+  uint8_t opt;
+  __le16 reserved2;
+  __le32 timestamp;
+  __le32 error_event_table_ptr; /* SRAM address for error log */
+  __le32 log_event_table_ptr;   /* SRAM address for LMAC event log */
+  __le32 cpu_register_ptr;
+  __le32 dbgm_config_ptr;
+  __le32 alive_counter_ptr;
+  __le32 scd_base_ptr; /* SRAM address for SCD */
+  __le32 st_fwrd_addr; /* pointer to Store and forward */
+  __le32 st_fwrd_size;
+  uint8_t umac_minor;     /* UMAC version: minor */
+  uint8_t umac_major;     /* UMAC version: major */
+  __le16 umac_id;         /* UMAC version: id */
+  __le32 error_info_addr; /* SRAM address for UMAC error log */
+  __le32 dbg_print_buff_addr;
 } __packed; /* ALIVE_RES_API_S_VER_2 */
 
 enum {
-    XVT_DBG_GET_SVDROP_VER_OP = 0x01,
+  XVT_DBG_GET_SVDROP_VER_OP = 0x01,
 };
 
 struct xvt_debug_cmd {
-    __le32 opcode;
-    __le32 dw_num;
+  __le32 opcode;
+  __le32 dw_num;
 }; /* DEBUG_XVT_CMD_API_S_VER_1 */
 
 struct xvt_debug_res {
-    __le32 dw_num;
-    __le32 data[0];
+  __le32 dw_num;
+  __le32 data[0];
 }; /* DEBUG_XVT_RES_API_S_VER_1 */
 
 #endif  // SRC_CONNECTIVITY_WLAN_DRIVERS_THIRD_PARTY_INTEL_IWLWIFI_XVT_FW_API_H_
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/fw.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/fw.c
index f559d68..8a142ff 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/fw.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/fw.c
@@ -32,261 +32,275 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include "fw/dbg.h"
 #include "fw/img.h"
+#include "fw/testmode.h"
 #include "iwl-csr.h"
+#include "iwl-dnt-cfg.h"
 #include "iwl-op-mode.h"
 #include "iwl-trans.h"
-
-#include "fw/dbg.h"
-#include "fw/testmode.h"
-#include "iwl-dnt-cfg.h"
 #include "xvt.h"
 
 #define XVT_UCODE_ALIVE_TIMEOUT (HZ * CPTCFG_IWL_TIMEOUT_FACTOR)
 
 struct iwl_xvt_alive_data {
-    bool valid;
-    uint32_t scd_base_addr;
+  bool valid;
+  uint32_t scd_base_addr;
 };
 
 static int iwl_xvt_send_dqa_cmd(struct iwl_xvt* xvt) {
-    struct iwl_dqa_enable_cmd dqa_cmd = {
-        .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
-    };
-    uint32_t cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
-    int ret;
+  struct iwl_dqa_enable_cmd dqa_cmd = {
+      .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
+  };
+  uint32_t cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
+  int ret;
 
-    ret = iwl_xvt_send_cmd_pdu(xvt, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
-    if (ret) {
-        IWL_ERR(xvt, "Failed to send DQA enabling command: %d\n", ret);
-    } else {
-        IWL_DEBUG_FW(xvt, "Working in DQA mode\n");
-    }
+  ret = iwl_xvt_send_cmd_pdu(xvt, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
+  if (ret) {
+    IWL_ERR(xvt, "Failed to send DQA enabling command: %d\n", ret);
+  } else {
+    IWL_DEBUG_FW(xvt, "Working in DQA mode\n");
+  }
 
-    return ret;
+  return ret;
 }
 
 static bool iwl_alive_fn(struct iwl_notif_wait_data* notif_wait, struct iwl_rx_packet* pkt,
                          void* data) {
-    struct iwl_xvt* xvt = container_of(notif_wait, struct iwl_xvt, notif_wait);
-    struct iwl_xvt_alive_data* alive_data = data;
-    struct xvt_alive_resp_ver2* palive2;
-    struct mvm_alive_resp_v3* palive3;
-    struct mvm_alive_resp* palive4;
-    struct iwl_lmac_alive *lmac1, *lmac2;
-    struct iwl_umac_alive* umac;
-    uint32_t rx_packet_payload_size = iwl_rx_packet_payload_len(pkt);
-    uint16_t status, flags;
-    xvt->support_umac_log = false;
+  struct iwl_xvt* xvt = container_of(notif_wait, struct iwl_xvt, notif_wait);
+  struct iwl_xvt_alive_data* alive_data = data;
+  struct xvt_alive_resp_ver2* palive2;
+  struct mvm_alive_resp_v3* palive3;
+  struct mvm_alive_resp* palive4;
+  struct iwl_lmac_alive *lmac1, *lmac2;
+  struct iwl_umac_alive* umac;
+  uint32_t rx_packet_payload_size = iwl_rx_packet_payload_len(pkt);
+  uint16_t status, flags;
+  xvt->support_umac_log = false;
 
-    if (rx_packet_payload_size == sizeof(*palive2)) {
-        palive2 = (void*)pkt->data;
+  if (rx_packet_payload_size == sizeof(*palive2)) {
+    palive2 = (void*)pkt->data;
 
-        xvt->error_event_table[0] = le32_to_cpu(palive2->error_event_table_ptr);
-        alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
+    xvt->error_event_table[0] = le32_to_cpu(palive2->error_event_table_ptr);
+    alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
 
-        alive_data->valid = le16_to_cpu(palive2->status) == IWL_ALIVE_STATUS_OK;
-        iwl_tm_set_fw_ver(xvt->trans, palive2->ucode_major, palive2->ucode_minor);
-        xvt->umac_error_event_table = le32_to_cpu(palive2->error_info_addr);
-        if (xvt->umac_error_event_table) { xvt->support_umac_log = true; }
-
-        IWL_DEBUG_FW(xvt,
-                     "Alive VER2 ucode status 0x%04x revision 0x%01X "
-                     "0x%01X flags 0x%01X\n",
-                     le16_to_cpu(palive2->status), palive2->ver_type, palive2->ver_subtype,
-                     palive2->flags);
-
-        IWL_DEBUG_FW(xvt, "UMAC version: Major - 0x%x, Minor - 0x%x\n", palive2->umac_major,
-                     palive2->umac_minor);
-    } else {
-        if (rx_packet_payload_size == sizeof(*palive3)) {
-            palive3 = (void*)pkt->data;
-            status = le16_to_cpu(palive3->status);
-            flags = le16_to_cpu(palive3->flags);
-            lmac1 = &palive3->lmac_data;
-            umac = &palive3->umac_data;
-
-            IWL_DEBUG_FW(xvt, "Alive VER3\n");
-        } else if (rx_packet_payload_size == sizeof(*palive4)) {
-            palive4 = (void*)pkt->data;
-            status = le16_to_cpu(palive4->status);
-            flags = le16_to_cpu(palive4->flags);
-            lmac1 = &palive4->lmac_data[0];
-            lmac2 = &palive4->lmac_data[1];
-            umac = &palive4->umac_data;
-            xvt->error_event_table[1] = le32_to_cpu(lmac2->error_event_table_ptr);
-
-            IWL_DEBUG_FW(xvt, "Alive VER4 CDB\n");
-        } else {
-            IWL_ERR(xvt, "unrecognized alive notificatio\n");
-            return false;
-        }
-
-        alive_data->valid = status == IWL_ALIVE_STATUS_OK;
-        xvt->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
-        alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
-        iwl_tm_set_fw_ver(xvt->trans, le32_to_cpu(lmac1->ucode_major),
-                          le32_to_cpu(lmac1->ucode_minor));
-        xvt->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
-        if (xvt->umac_error_event_table) { xvt->support_umac_log = true; }
-
-        IWL_DEBUG_FW(xvt, "status 0x%04x rev 0x%01X 0x%01X flags 0x%01X\n", status, lmac1->ver_type,
-                     lmac1->ver_subtype, flags);
-        IWL_DEBUG_FW(xvt, "UMAC version: Major - 0x%x, Minor - 0x%x\n", umac->umac_major,
-                     umac->umac_minor);
+    alive_data->valid = le16_to_cpu(palive2->status) == IWL_ALIVE_STATUS_OK;
+    iwl_tm_set_fw_ver(xvt->trans, palive2->ucode_major, palive2->ucode_minor);
+    xvt->umac_error_event_table = le32_to_cpu(palive2->error_info_addr);
+    if (xvt->umac_error_event_table) {
+      xvt->support_umac_log = true;
     }
 
-    return true;
+    IWL_DEBUG_FW(xvt,
+                 "Alive VER2 ucode status 0x%04x revision 0x%01X "
+                 "0x%01X flags 0x%01X\n",
+                 le16_to_cpu(palive2->status), palive2->ver_type, palive2->ver_subtype,
+                 palive2->flags);
+
+    IWL_DEBUG_FW(xvt, "UMAC version: Major - 0x%x, Minor - 0x%x\n", palive2->umac_major,
+                 palive2->umac_minor);
+  } else {
+    if (rx_packet_payload_size == sizeof(*palive3)) {
+      palive3 = (void*)pkt->data;
+      status = le16_to_cpu(palive3->status);
+      flags = le16_to_cpu(palive3->flags);
+      lmac1 = &palive3->lmac_data;
+      umac = &palive3->umac_data;
+
+      IWL_DEBUG_FW(xvt, "Alive VER3\n");
+    } else if (rx_packet_payload_size == sizeof(*palive4)) {
+      palive4 = (void*)pkt->data;
+      status = le16_to_cpu(palive4->status);
+      flags = le16_to_cpu(palive4->flags);
+      lmac1 = &palive4->lmac_data[0];
+      lmac2 = &palive4->lmac_data[1];
+      umac = &palive4->umac_data;
+      xvt->error_event_table[1] = le32_to_cpu(lmac2->error_event_table_ptr);
+
+      IWL_DEBUG_FW(xvt, "Alive VER4 CDB\n");
+    } else {
+      IWL_ERR(xvt, "unrecognized alive notificatio\n");
+      return false;
+    }
+
+    alive_data->valid = status == IWL_ALIVE_STATUS_OK;
+    xvt->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
+    alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
+    iwl_tm_set_fw_ver(xvt->trans, le32_to_cpu(lmac1->ucode_major), le32_to_cpu(lmac1->ucode_minor));
+    xvt->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
+    if (xvt->umac_error_event_table) {
+      xvt->support_umac_log = true;
+    }
+
+    IWL_DEBUG_FW(xvt, "status 0x%04x rev 0x%01X 0x%01X flags 0x%01X\n", status, lmac1->ver_type,
+                 lmac1->ver_subtype, flags);
+    IWL_DEBUG_FW(xvt, "UMAC version: Major - 0x%x, Minor - 0x%x\n", umac->umac_major,
+                 umac->umac_minor);
+  }
+
+  return true;
 }
 
 static int iwl_xvt_load_ucode_wait_alive(struct iwl_xvt* xvt, enum iwl_ucode_type ucode_type) {
-    struct iwl_notification_wait alive_wait;
-    struct iwl_xvt_alive_data alive_data;
-    const struct fw_img* fw;
-    int ret;
-    enum iwl_ucode_type old_type = xvt->fwrt.cur_fw_img;
-    static const uint16_t alive_cmd[] = {MVM_ALIVE};
-    struct iwl_scd_txq_cfg_cmd cmd = {
-        .scd_queue = IWL_XVT_DEFAULT_TX_QUEUE,
-        .action = SCD_CFG_ENABLE_QUEUE,
-        .window = IWL_FRAME_LIMIT,
-        .sta_id = IWL_XVT_TX_STA_ID_DEFAULT,
-        .ssn = 0,
-        .tx_fifo = IWL_XVT_DEFAULT_TX_FIFO,
-        .aggregate = false,
-        .tid = IWL_MAX_TID_COUNT,
-    };
+  struct iwl_notification_wait alive_wait;
+  struct iwl_xvt_alive_data alive_data;
+  const struct fw_img* fw;
+  int ret;
+  enum iwl_ucode_type old_type = xvt->fwrt.cur_fw_img;
+  static const uint16_t alive_cmd[] = {MVM_ALIVE};
+  struct iwl_scd_txq_cfg_cmd cmd = {
+      .scd_queue = IWL_XVT_DEFAULT_TX_QUEUE,
+      .action = SCD_CFG_ENABLE_QUEUE,
+      .window = IWL_FRAME_LIMIT,
+      .sta_id = IWL_XVT_TX_STA_ID_DEFAULT,
+      .ssn = 0,
+      .tx_fifo = IWL_XVT_DEFAULT_TX_FIFO,
+      .aggregate = false,
+      .tid = IWL_MAX_TID_COUNT,
+  };
 
-    iwl_fw_set_current_image(&xvt->fwrt, ucode_type);
-    fw = iwl_get_ucode_image(xvt->fw, ucode_type);
+  iwl_fw_set_current_image(&xvt->fwrt, ucode_type);
+  fw = iwl_get_ucode_image(xvt->fw, ucode_type);
 
-    if (!fw) { return -EINVAL; }
+  if (!fw) {
+    return -EINVAL;
+  }
 
-    iwl_init_notification_wait(&xvt->notif_wait, &alive_wait, alive_cmd, ARRAY_SIZE(alive_cmd),
-                               iwl_alive_fn, &alive_data);
+  iwl_init_notification_wait(&xvt->notif_wait, &alive_wait, alive_cmd, ARRAY_SIZE(alive_cmd),
+                             iwl_alive_fn, &alive_data);
 
-    ret = iwl_trans_start_fw_dbg(
-        xvt->trans, fw, ucode_type == IWL_UCODE_INIT,
-        (xvt->sw_stack_cfg.fw_dbg_flags & ~IWL_XVT_DBG_FLAGS_NO_DEFAULT_TXQ));
+  ret =
+      iwl_trans_start_fw_dbg(xvt->trans, fw, ucode_type == IWL_UCODE_INIT,
+                             (xvt->sw_stack_cfg.fw_dbg_flags & ~IWL_XVT_DBG_FLAGS_NO_DEFAULT_TXQ));
+  if (ret) {
+    iwl_fw_set_current_image(&xvt->fwrt, old_type);
+    iwl_remove_notification(&xvt->notif_wait, &alive_wait);
+    return ret;
+  }
+
+  /*
+   * Some things may run in the background now, but we
+   * just wait for the ALIVE notification here.
+   */
+  ret = iwl_wait_notification(&xvt->notif_wait, &alive_wait, XVT_UCODE_ALIVE_TIMEOUT);
+  if (ret) {
+    iwl_fw_set_current_image(&xvt->fwrt, old_type);
+    return ret;
+  }
+
+  if (!alive_data.valid) {
+    IWL_ERR(xvt, "Loaded ucode is not valid!\n");
+    iwl_fw_set_current_image(&xvt->fwrt, old_type);
+    return -EIO;
+  }
+
+  /* fresh firmware was loaded */
+  xvt->fw_error = false;
+
+  iwl_trans_fw_alive(xvt->trans, alive_data.scd_base_addr);
+
+  ret = iwl_init_paging(&xvt->fwrt, ucode_type);
+  if (ret) {
+    return ret;
+  }
+
+  if (ucode_type == IWL_UCODE_REGULAR) {
+    ret = iwl_xvt_send_dqa_cmd(xvt);
     if (ret) {
-        iwl_fw_set_current_image(&xvt->fwrt, old_type);
-        iwl_remove_notification(&xvt->notif_wait, &alive_wait);
-        return ret;
+      return ret;
     }
+  }
+  /*
+   * Starting from 22000 tx queue allocation must be done after add
+   * station, so it is not part of the init flow.
+   */
+  if (!iwl_xvt_is_unified_fw(xvt) && iwl_xvt_has_default_txq(xvt)) {
+    iwl_trans_txq_enable_cfg(xvt->trans, IWL_XVT_DEFAULT_TX_QUEUE, 0, NULL, 0);
 
-    /*
-     * Some things may run in the background now, but we
-     * just wait for the ALIVE notification here.
-     */
-    ret = iwl_wait_notification(&xvt->notif_wait, &alive_wait, XVT_UCODE_ALIVE_TIMEOUT);
-    if (ret) {
-        iwl_fw_set_current_image(&xvt->fwrt, old_type);
-        return ret;
-    }
+    WARN(iwl_xvt_send_cmd_pdu(xvt, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+         "Failed to configure queue %d on FIFO %d\n", IWL_XVT_DEFAULT_TX_QUEUE,
+         IWL_XVT_DEFAULT_TX_FIFO);
+    xvt->tx_meta_data[XVT_LMAC_0_ID].queue = IWL_XVT_DEFAULT_TX_QUEUE;
+  }
 
-    if (!alive_data.valid) {
-        IWL_ERR(xvt, "Loaded ucode is not valid!\n");
-        iwl_fw_set_current_image(&xvt->fwrt, old_type);
-        return -EIO;
-    }
+  xvt->fw_running = true;
 
-    /* fresh firmware was loaded */
-    xvt->fw_error = false;
-
-    iwl_trans_fw_alive(xvt->trans, alive_data.scd_base_addr);
-
-    ret = iwl_init_paging(&xvt->fwrt, ucode_type);
-    if (ret) { return ret; }
-
-    if (ucode_type == IWL_UCODE_REGULAR) {
-        ret = iwl_xvt_send_dqa_cmd(xvt);
-        if (ret) { return ret; }
-    }
-    /*
-     * Starting from 22000 tx queue allocation must be done after add
-     * station, so it is not part of the init flow.
-     */
-    if (!iwl_xvt_is_unified_fw(xvt) && iwl_xvt_has_default_txq(xvt)) {
-        iwl_trans_txq_enable_cfg(xvt->trans, IWL_XVT_DEFAULT_TX_QUEUE, 0, NULL, 0);
-
-        WARN(iwl_xvt_send_cmd_pdu(xvt, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
-             "Failed to configure queue %d on FIFO %d\n", IWL_XVT_DEFAULT_TX_QUEUE,
-             IWL_XVT_DEFAULT_TX_FIFO);
-        xvt->tx_meta_data[XVT_LMAC_0_ID].queue = IWL_XVT_DEFAULT_TX_QUEUE;
-    }
-
-    xvt->fw_running = true;
-
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_send_extended_config(struct iwl_xvt* xvt) {
-    /*
-     * TODO: once WRT will be implemented in xVT, IWL_INIT_DEBUG_CFG
-     * flag will not always be set
-     */
-    struct iwl_init_extended_cfg_cmd ext_cfg = {
-        .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM) | BIT(IWL_INIT_DEBUG_CFG)),
+  /*
+   * TODO: once WRT will be implemented in xVT, IWL_INIT_DEBUG_CFG
+   * flag will not always be set
+   */
+  struct iwl_init_extended_cfg_cmd ext_cfg = {
+      .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM) | BIT(IWL_INIT_DEBUG_CFG)),
 
-    };
+  };
 
-    if (xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_RUNTIME) {
-        ext_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
-    }
+  if (xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_RUNTIME) {
+    ext_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
+  }
 
-    return iwl_xvt_send_cmd_pdu(xvt, WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), 0,
-                                sizeof(ext_cfg), &ext_cfg);
+  return iwl_xvt_send_cmd_pdu(xvt, WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), 0, sizeof(ext_cfg),
+                              &ext_cfg);
 }
 
 int iwl_xvt_run_fw(struct iwl_xvt* xvt, uint32_t ucode_type, bool cont_run) {
-    int ret;
+  int ret;
 
-    if (ucode_type >= IWL_UCODE_TYPE_MAX) { return -EINVAL; }
+  if (ucode_type >= IWL_UCODE_TYPE_MAX) {
+    return -EINVAL;
+  }
 
-    lockdep_assert_held(&xvt->mutex);
+  lockdep_assert_held(&xvt->mutex);
 
-    if (xvt->state != IWL_XVT_STATE_UNINITIALIZED) {
-        if (xvt->fw_running) {
-            xvt->fw_running = false;
-            if (xvt->fwrt.cur_fw_img == IWL_UCODE_REGULAR) { iwl_xvt_txq_disable(xvt); }
-        }
-        _iwl_trans_stop_device(xvt->trans, !cont_run);
+  if (xvt->state != IWL_XVT_STATE_UNINITIALIZED) {
+    if (xvt->fw_running) {
+      xvt->fw_running = false;
+      if (xvt->fwrt.cur_fw_img == IWL_UCODE_REGULAR) {
+        iwl_xvt_txq_disable(xvt);
+      }
     }
+    _iwl_trans_stop_device(xvt->trans, !cont_run);
+  }
 
-    if (cont_run) {
-        ret = _iwl_trans_start_hw(xvt->trans, false);
-    } else {
-        ret = iwl_trans_start_hw(xvt->trans);
-    }
-    if (ret) {
-        IWL_ERR(xvt, "Failed to start HW\n");
-        return ret;
-    }
-
-    iwl_trans_set_bits_mask(xvt->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
-                            CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
-    /* Will also start the device */
-    ret = iwl_xvt_load_ucode_wait_alive(xvt, ucode_type);
-    if (ret) {
-        IWL_ERR(xvt, "Failed to start ucode: %d\n", ret);
-        iwl_trans_stop_device(xvt->trans);
-    }
-
-    if (iwl_xvt_is_unified_fw(xvt)) {
-        ret = iwl_xvt_send_extended_config(xvt);
-        if (ret) {
-            IWL_ERR(xvt, "Failed to send extended_config: %d\n", ret);
-            iwl_trans_stop_device(xvt->trans);
-            return ret;
-        }
-    }
-    iwl_dnt_start(xvt->trans);
-
-    xvt->fwrt.dump.conf = FW_DBG_INVALID;
-    /* if we have a destination, assume EARLY START */
-    if (xvt->fw->dbg.dest_tlv) { xvt->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; }
-    iwl_fw_start_dbg_conf(&xvt->fwrt, FW_DBG_START_FROM_ALIVE);
-
+  if (cont_run) {
+    ret = _iwl_trans_start_hw(xvt->trans, false);
+  } else {
+    ret = iwl_trans_start_hw(xvt->trans);
+  }
+  if (ret) {
+    IWL_ERR(xvt, "Failed to start HW\n");
     return ret;
+  }
+
+  iwl_trans_set_bits_mask(xvt->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
+                          CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+  /* Will also start the device */
+  ret = iwl_xvt_load_ucode_wait_alive(xvt, ucode_type);
+  if (ret) {
+    IWL_ERR(xvt, "Failed to start ucode: %d\n", ret);
+    iwl_trans_stop_device(xvt->trans);
+  }
+
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    ret = iwl_xvt_send_extended_config(xvt);
+    if (ret) {
+      IWL_ERR(xvt, "Failed to send extended_config: %d\n", ret);
+      iwl_trans_stop_device(xvt->trans);
+      return ret;
+    }
+  }
+  iwl_dnt_start(xvt->trans);
+
+  xvt->fwrt.dump.conf = FW_DBG_INVALID;
+  /* if we have a destination, assume EARLY START */
+  if (xvt->fw->dbg.dest_tlv) {
+    xvt->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
+  }
+  iwl_fw_start_dbg_conf(&xvt->fwrt, FW_DBG_START_FROM_ALIVE);
+
+  return ret;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/nvm.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/nvm.c
index 1a890c6..3bdd676 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/nvm.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/nvm.c
@@ -49,13 +49,13 @@
 #define NVM_READ_OPCODE 0
 
 enum wkp_nvm_offsets {
-    /* NVM HW-Section offset (in words) definitions */
-    HW_ADDR = 0x15,
+  /* NVM HW-Section offset (in words) definitions */
+  HW_ADDR = 0x15,
 };
 
 enum ext_nvm_offsets {
-    /* NVM HW-Section offset (in words) definitions */
-    MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
+  /* NVM HW-Section offset (in words) definitions */
+  MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
 };
 
 /*
@@ -64,42 +64,44 @@
  */
 static int iwl_nvm_write_chunk(struct iwl_xvt* xvt, uint16_t section, uint16_t offset,
                                uint16_t length, const uint8_t* data) {
-    struct iwl_nvm_access_cmd nvm_access_cmd = {
-        .offset = cpu_to_le16(offset),
-        .length = cpu_to_le16(length),
-        .type = cpu_to_le16(section),
-        .op_code = NVM_WRITE_OPCODE,
-    };
-    struct iwl_host_cmd cmd = {
-        .id = NVM_ACCESS_CMD,
-        .len = {sizeof(struct iwl_nvm_access_cmd), length},
-        .flags = CMD_SEND_IN_RFKILL,
-        .data = {&nvm_access_cmd, data},
-        /* data may come from vmalloc, so use _DUP */
-        .dataflags = {0, IWL_HCMD_DFL_DUP},
-    };
+  struct iwl_nvm_access_cmd nvm_access_cmd = {
+      .offset = cpu_to_le16(offset),
+      .length = cpu_to_le16(length),
+      .type = cpu_to_le16(section),
+      .op_code = NVM_WRITE_OPCODE,
+  };
+  struct iwl_host_cmd cmd = {
+      .id = NVM_ACCESS_CMD,
+      .len = {sizeof(struct iwl_nvm_access_cmd), length},
+      .flags = CMD_SEND_IN_RFKILL,
+      .data = {&nvm_access_cmd, data},
+      /* data may come from vmalloc, so use _DUP */
+      .dataflags = {0, IWL_HCMD_DFL_DUP},
+  };
 
-    return iwl_xvt_send_cmd(xvt, &cmd);
+  return iwl_xvt_send_cmd(xvt, &cmd);
 }
 
 static int iwl_nvm_write_section(struct iwl_xvt* xvt, uint16_t section, const uint8_t* data,
                                  uint16_t length) {
-    int offset = 0;
+  int offset = 0;
 
-    /* copy data in chunks of 2k (and remainder if any) */
+  /* copy data in chunks of 2k (and remainder if any) */
 
-    while (offset < length) {
-        int chunk_size, ret;
+  while (offset < length) {
+    int chunk_size, ret;
 
-        chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, length - offset);
+    chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, length - offset);
 
-        ret = iwl_nvm_write_chunk(xvt, section, offset, chunk_size, data + offset);
-        if (ret < 0) { return ret; }
-
-        offset += chunk_size;
+    ret = iwl_nvm_write_chunk(xvt, section, offset, chunk_size, data + offset);
+    if (ret < 0) {
+      return ret;
     }
 
-    return 0;
+    offset += chunk_size;
+  }
+
+  return 0;
 }
 
 #define MAX_NVM_FILE_LEN 16384
@@ -124,17 +126,17 @@
  * 4. save as "iNVM_xxx.bin" under /lib/firmware
  */
 static int iwl_xvt_load_external_nvm(struct iwl_xvt* xvt) {
-    int ret, section_size;
-    uint16_t section_id;
-    const struct firmware* fw_entry;
-    const struct {
-        __le16 word1;
-        __le16 word2;
-        uint8_t data[];
-    } * file_sec;
-    const uint8_t* eof;
-    const __le32* dword_buff;
-    const uint8_t* hw_addr;
+  int ret, section_size;
+  uint16_t section_id;
+  const struct firmware* fw_entry;
+  const struct {
+    __le16 word1;
+    __le16 word2;
+    uint8_t data[];
+  } * file_sec;
+  const uint8_t* eof;
+  const __le32* dword_buff;
+  const uint8_t* hw_addr;
 
 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
 #define NVM_WORD2_ID(x) (x >> 12)
@@ -144,132 +146,133 @@
 #define NVM_HEADER_1 (0x4E564D2A)
 #define NVM_HEADER_SIZE (4 * sizeof(uint32_t))
 
-    /*
-     * Obtain NVM image via request_firmware. Since we already used
-     * request_firmware_nowait() for the firmware binary load and only
-     * get here after that we assume the NVM request can be satisfied
-     * synchronously.
-     */
-    ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file, xvt->trans->dev);
-    if (ret) {
-        IWL_WARN(xvt, "WARNING: %s isn't available %d\n", iwlwifi_mod_params.nvm_file, ret);
-        return 0;
+  /*
+   * Obtain NVM image via request_firmware. Since we already used
+   * request_firmware_nowait() for the firmware binary load and only
+   * get here after that we assume the NVM request can be satisfied
+   * synchronously.
+   */
+  ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file, xvt->trans->dev);
+  if (ret) {
+    IWL_WARN(xvt, "WARNING: %s isn't available %d\n", iwlwifi_mod_params.nvm_file, ret);
+    return 0;
+  }
+
+  IWL_INFO(xvt, "Loaded NVM file %s (%zu bytes)\n", iwlwifi_mod_params.nvm_file, fw_entry->size);
+
+  if (fw_entry->size > MAX_NVM_FILE_LEN) {
+    IWL_ERR(xvt, "NVM file too large\n");
+    ret = -EINVAL;
+    goto out;
+  }
+
+  eof = fw_entry->data + fw_entry->size;
+  dword_buff = (__le32*)fw_entry->data;
+
+  /* some NVM file will contain a header.
+   * The header is identified by 2 dwords header as follows:
+   * dword[0] = 0x2A504C54
+   * dword[1] = 0x4E564D2A
+   *
+   * This header must be skipped when providing the NVM data to the FW.
+   */
+  if (fw_entry->size > NVM_HEADER_SIZE && dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
+      dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
+    file_sec = (void*)(fw_entry->data + NVM_HEADER_SIZE);
+    IWL_INFO(xvt, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
+    IWL_INFO(xvt, "NVM Manufacturing date %08X\n", le32_to_cpu(dword_buff[3]));
+  } else {
+    file_sec = (void*)fw_entry->data;
+  }
+
+  while (true) {
+    if (file_sec->data > eof) {
+      IWL_ERR(xvt, "ERROR - NVM file too short for section header\n");
+      ret = -EINVAL;
+      break;
     }
 
-    IWL_INFO(xvt, "Loaded NVM file %s (%zu bytes)\n", iwlwifi_mod_params.nvm_file, fw_entry->size);
-
-    if (fw_entry->size > MAX_NVM_FILE_LEN) {
-        IWL_ERR(xvt, "NVM file too large\n");
-        ret = -EINVAL;
-        goto out;
+    /* check for EOF marker */
+    if (!file_sec->word1 && !file_sec->word2) {
+      ret = 0;
+      break;
     }
 
-    eof = fw_entry->data + fw_entry->size;
-    dword_buff = (__le32*)fw_entry->data;
-
-    /* some NVM file will contain a header.
-     * The header is identified by 2 dwords header as follows:
-     * dword[0] = 0x2A504C54
-     * dword[1] = 0x4E564D2A
-     *
-     * This header must be skipped when providing the NVM data to the FW.
-     */
-    if (fw_entry->size > NVM_HEADER_SIZE && dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
-        dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
-        file_sec = (void*)(fw_entry->data + NVM_HEADER_SIZE);
-        IWL_INFO(xvt, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
-        IWL_INFO(xvt, "NVM Manufacturing date %08X\n", le32_to_cpu(dword_buff[3]));
+    if (xvt->trans->cfg->nvm_type != IWL_NVM_EXT) {
+      section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
+      section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
     } else {
-        file_sec = (void*)fw_entry->data;
+      section_size = 2 * EXT_NVM_WORD2_LEN(le16_to_cpu(file_sec->word2));
+      section_id = EXT_NVM_WORD1_ID(le16_to_cpu(file_sec->word1));
     }
 
-    while (true) {
-        if (file_sec->data > eof) {
-            IWL_ERR(xvt, "ERROR - NVM file too short for section header\n");
-            ret = -EINVAL;
-            break;
-        }
-
-        /* check for EOF marker */
-        if (!file_sec->word1 && !file_sec->word2) {
-            ret = 0;
-            break;
-        }
-
-        if (xvt->trans->cfg->nvm_type != IWL_NVM_EXT) {
-            section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
-            section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
-        } else {
-            section_size = 2 * EXT_NVM_WORD2_LEN(le16_to_cpu(file_sec->word2));
-            section_id = EXT_NVM_WORD1_ID(le16_to_cpu(file_sec->word1));
-        }
-
-        if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
-            IWL_ERR(xvt, "ERROR - section too large (%d)\n", section_size);
-            ret = -EINVAL;
-            break;
-        }
-
-        if (!section_size) {
-            IWL_ERR(xvt, "ERROR - section empty\n");
-            ret = -EINVAL;
-            break;
-        }
-
-        if (file_sec->data + section_size > eof) {
-            IWL_ERR(xvt, "ERROR - NVM file too short for section (%d bytes)\n", section_size);
-            ret = -EINVAL;
-            break;
-        }
-
-        if (section_id == xvt->cfg->nvm_hw_section_num) {
-            hw_addr = (const uint8_t*)((const __le16*)file_sec->data + HW_ADDR);
-
-            /* The byte order is little endian 16 bit, meaning 214365 */
-            xvt->nvm_hw_addr[0] = hw_addr[1];
-            xvt->nvm_hw_addr[1] = hw_addr[0];
-            xvt->nvm_hw_addr[2] = hw_addr[3];
-            xvt->nvm_hw_addr[3] = hw_addr[2];
-            xvt->nvm_hw_addr[4] = hw_addr[5];
-            xvt->nvm_hw_addr[5] = hw_addr[4];
-        }
-        if (section_id == NVM_SECTION_TYPE_MAC_OVERRIDE) {
-            xvt->is_nvm_mac_override = true;
-            hw_addr =
-                (const uint8_t*)((const __le16*)file_sec->data + MAC_ADDRESS_OVERRIDE_EXT_NVM);
-
-            /*
-             * Store the MAC address from MAO section.
-             * No byte swapping is required in MAO section.
-             */
-            memcpy(xvt->nvm_hw_addr, hw_addr, ETH_ALEN);
-        }
-
-        ret = iwl_nvm_write_section(xvt, section_id, file_sec->data, section_size);
-        if (ret < 0) {
-            IWL_ERR(xvt, "iwl_mvm_send_cmd failed: %d\n", ret);
-            break;
-        }
-
-        /* advance to the next section */
-        file_sec = (void*)(file_sec->data + section_size);
+    if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
+      IWL_ERR(xvt, "ERROR - section too large (%d)\n", section_size);
+      ret = -EINVAL;
+      break;
     }
+
+    if (!section_size) {
+      IWL_ERR(xvt, "ERROR - section empty\n");
+      ret = -EINVAL;
+      break;
+    }
+
+    if (file_sec->data + section_size > eof) {
+      IWL_ERR(xvt, "ERROR - NVM file too short for section (%d bytes)\n", section_size);
+      ret = -EINVAL;
+      break;
+    }
+
+    if (section_id == xvt->cfg->nvm_hw_section_num) {
+      hw_addr = (const uint8_t*)((const __le16*)file_sec->data + HW_ADDR);
+
+      /* The byte order is little endian 16 bit, meaning 214365 */
+      xvt->nvm_hw_addr[0] = hw_addr[1];
+      xvt->nvm_hw_addr[1] = hw_addr[0];
+      xvt->nvm_hw_addr[2] = hw_addr[3];
+      xvt->nvm_hw_addr[3] = hw_addr[2];
+      xvt->nvm_hw_addr[4] = hw_addr[5];
+      xvt->nvm_hw_addr[5] = hw_addr[4];
+    }
+    if (section_id == NVM_SECTION_TYPE_MAC_OVERRIDE) {
+      xvt->is_nvm_mac_override = true;
+      hw_addr = (const uint8_t*)((const __le16*)file_sec->data + MAC_ADDRESS_OVERRIDE_EXT_NVM);
+
+      /*
+       * Store the MAC address from MAO section.
+       * No byte swapping is required in MAO section.
+       */
+      memcpy(xvt->nvm_hw_addr, hw_addr, ETH_ALEN);
+    }
+
+    ret = iwl_nvm_write_section(xvt, section_id, file_sec->data, section_size);
+    if (ret < 0) {
+      IWL_ERR(xvt, "iwl_mvm_send_cmd failed: %d\n", ret);
+      break;
+    }
+
+    /* advance to the next section */
+    file_sec = (void*)(file_sec->data + section_size);
+  }
 out:
-    release_firmware(fw_entry);
-    return ret;
+  release_firmware(fw_entry);
+  return ret;
 }
 
 int iwl_xvt_nvm_init(struct iwl_xvt* xvt) {
-    int ret;
+  int ret;
 
-    xvt->is_nvm_mac_override = false;
+  xvt->is_nvm_mac_override = false;
 
-    /* load external NVM if configured */
-    if (iwlwifi_mod_params.nvm_file) {
-        /* move to External NVM flow */
-        ret = iwl_xvt_load_external_nvm(xvt);
-        if (ret) { return ret; }
+  /* load external NVM if configured */
+  if (iwlwifi_mod_params.nvm_file) {
+    /* move to External NVM flow */
+    ret = iwl_xvt_load_external_nvm(xvt);
+    if (ret) {
+      return ret;
     }
+  }
 
-    return 0;
+  return 0;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/rx.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/rx.c
index fa6f6b2..bb75ac8a 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/rx.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/rx.c
@@ -31,10 +31,11 @@
  *
  *****************************************************************************/
 
+#include "fw/api/rx.h"
+
 #include <linux/module.h>
 #include <linux/types.h>
 
-#include "fw/api/rx.h"
 #include "fw/dbg.h"
 #include "xvt.h"
 
@@ -45,221 +46,240 @@
  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
  */
 static bool iwl_xvt_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size) {
-    return ieee80211_sn_less(sn1, sn2) && !ieee80211_sn_less(sn1, sn2 - buffer_size);
+  return ieee80211_sn_less(sn1, sn2) && !ieee80211_sn_less(sn1, sn2 - buffer_size);
 }
 
 static void iwl_xvt_release_frames(struct iwl_xvt* xvt, struct iwl_xvt_reorder_buffer* reorder_buf,
                                    uint16_t nssn) {
-    uint16_t ssn = reorder_buf->head_sn;
+  uint16_t ssn = reorder_buf->head_sn;
 
-    lockdep_assert_held(&reorder_buf->lock);
-    IWL_DEBUG_HT(xvt, "reorder: release nssn=%d\n", nssn);
+  lockdep_assert_held(&reorder_buf->lock);
+  IWL_DEBUG_HT(xvt, "reorder: release nssn=%d\n", nssn);
 
-    /* ignore nssn smaller than head sn - this can happen due to timeout */
-    if (iwl_xvt_is_sn_less(nssn, ssn, reorder_buf->buf_size)) { return; }
+  /* ignore nssn smaller than head sn - this can happen due to timeout */
+  if (iwl_xvt_is_sn_less(nssn, ssn, reorder_buf->buf_size)) {
+    return;
+  }
 
-    while (iwl_xvt_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
-        int index = ssn % reorder_buf->buf_size;
-        uint16_t frames_count = reorder_buf->entries[index];
+  while (iwl_xvt_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+    int index = ssn % reorder_buf->buf_size;
+    uint16_t frames_count = reorder_buf->entries[index];
 
-        ssn = ieee80211_sn_inc(ssn);
+    ssn = ieee80211_sn_inc(ssn);
 
-        /*
-         * Reset frame count. Will have more than one frame for A-MSDU.
-         * entries=0 is valid as well since nssn indicates frames were
-         * received.
-         */
-        IWL_DEBUG_HT(xvt, "reorder: deliver index=0x%x\n", index);
+    /*
+     * Reset frame count. Will have more than one frame for A-MSDU.
+     * entries=0 is valid as well since nssn indicates frames were
+     * received.
+     */
+    IWL_DEBUG_HT(xvt, "reorder: deliver index=0x%x\n", index);
 
-        reorder_buf->entries[index] = 0;
-        reorder_buf->num_stored -= frames_count;
-        reorder_buf->stats.released += frames_count;
-    }
-    reorder_buf->head_sn = nssn;
+    reorder_buf->entries[index] = 0;
+    reorder_buf->num_stored -= frames_count;
+    reorder_buf->stats.released += frames_count;
+  }
+  reorder_buf->head_sn = nssn;
 
-    /* don't mess with reorder timer for now */
+  /* don't mess with reorder timer for now */
 }
 
 void iwl_xvt_rx_frame_release(struct iwl_xvt* xvt, struct iwl_rx_packet* pkt) {
-    struct iwl_frame_release* release = (void*)pkt->data;
-    struct iwl_xvt_reorder_buffer* buffer;
-    int baid = release->baid;
+  struct iwl_frame_release* release = (void*)pkt->data;
+  struct iwl_xvt_reorder_buffer* buffer;
+  int baid = release->baid;
 
-    IWL_DEBUG_HT(xvt, "Frame release notification for BAID %u, NSSN %d\n", baid,
-                 le16_to_cpu(release->nssn));
+  IWL_DEBUG_HT(xvt, "Frame release notification for BAID %u, NSSN %d\n", baid,
+               le16_to_cpu(release->nssn));
 
-    if (WARN_ON_ONCE(baid >= IWL_MAX_BAID)) { return; }
+  if (WARN_ON_ONCE(baid >= IWL_MAX_BAID)) {
+    return;
+  }
 
-    buffer = &xvt->reorder_bufs[baid];
-    if (buffer->sta_id == IWL_XVT_INVALID_STA) { return; }
+  buffer = &xvt->reorder_bufs[baid];
+  if (buffer->sta_id == IWL_XVT_INVALID_STA) {
+    return;
+  }
 
-    spin_lock_bh(&buffer->lock);
-    iwl_xvt_release_frames(xvt, buffer, le16_to_cpu(release->nssn));
-    spin_unlock_bh(&buffer->lock);
+  spin_lock_bh(&buffer->lock);
+  iwl_xvt_release_frames(xvt, buffer, le16_to_cpu(release->nssn));
+  spin_unlock_bh(&buffer->lock);
 }
 
 void iwl_xvt_destroy_reorder_buffer(struct iwl_xvt* xvt, struct iwl_xvt_reorder_buffer* buf) {
-    if (buf->sta_id == IWL_XVT_INVALID_STA) { return; }
+  if (buf->sta_id == IWL_XVT_INVALID_STA) {
+    return;
+  }
 
-    spin_lock_bh(&buf->lock);
-    iwl_xvt_release_frames(xvt, buf, ieee80211_sn_add(buf->head_sn, buf->buf_size));
-    buf->sta_id = IWL_XVT_INVALID_STA;
-    spin_unlock_bh(&buf->lock);
+  spin_lock_bh(&buf->lock);
+  iwl_xvt_release_frames(xvt, buf, ieee80211_sn_add(buf->head_sn, buf->buf_size));
+  buf->sta_id = IWL_XVT_INVALID_STA;
+  spin_unlock_bh(&buf->lock);
 }
 
 static bool iwl_xvt_init_reorder_buffer(struct iwl_xvt_reorder_buffer* buf, uint8_t sta_id,
                                         uint8_t tid, uint16_t ssn, uint8_t buf_size) {
-    int j;
+  int j;
 
-    if (WARN_ON(buf_size > ARRAY_SIZE(buf->entries))) { return false; }
+  if (WARN_ON(buf_size > ARRAY_SIZE(buf->entries))) {
+    return false;
+  }
 
-    buf->num_stored = 0;
-    buf->head_sn = ssn;
-    buf->buf_size = buf_size;
-    spin_lock_init(&buf->lock);
-    buf->queue = 0;
-    buf->sta_id = sta_id;
-    buf->tid = tid;
-    for (j = 0; j < buf->buf_size; j++) {
-        buf->entries[j] = 0;
-    }
-    memset(&buf->stats, 0, sizeof(buf->stats));
+  buf->num_stored = 0;
+  buf->head_sn = ssn;
+  buf->buf_size = buf_size;
+  spin_lock_init(&buf->lock);
+  buf->queue = 0;
+  buf->sta_id = sta_id;
+  buf->tid = tid;
+  for (j = 0; j < buf->buf_size; j++) {
+    buf->entries[j] = 0;
+  }
+  memset(&buf->stats, 0, sizeof(buf->stats));
 
-    /* currently there's no need to mess with reorder timer */
-    return true;
+  /* currently there's no need to mess with reorder timer */
+  return true;
 }
 
 bool iwl_xvt_reorder(struct iwl_xvt* xvt, struct iwl_rx_packet* pkt) {
-    struct iwl_rx_mpdu_desc* desc = (void*)pkt->data;
-    struct ieee80211_hdr* hdr;
-    uint32_t reorder = le32_to_cpu(desc->reorder_data);
-    struct iwl_xvt_reorder_buffer* buffer;
-    uint16_t tail;
-    bool last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
-    uint8_t sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
-    bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
-    uint16_t nssn, sn, min_sn;
-    int index;
-    uint8_t baid;
-    uint8_t sta_id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
-    uint8_t tid;
+  struct iwl_rx_mpdu_desc* desc = (void*)pkt->data;
+  struct ieee80211_hdr* hdr;
+  uint32_t reorder = le32_to_cpu(desc->reorder_data);
+  struct iwl_xvt_reorder_buffer* buffer;
+  uint16_t tail;
+  bool last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+  uint8_t sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+  bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+  uint16_t nssn, sn, min_sn;
+  int index;
+  uint8_t baid;
+  uint8_t sta_id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
+  uint8_t tid;
 
-    baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT;
+  baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT;
 
-    if (baid >= IWL_MAX_BAID) { return false; }
+  if (baid >= IWL_MAX_BAID) {
+    return false;
+  }
 
-    if (xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-        hdr = (void*)(pkt->data + sizeof(struct iwl_rx_mpdu_desc));
-    } else {
-        hdr = (void*)(pkt->data + IWL_RX_DESC_SIZE_V1);
+  if (xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+    hdr = (void*)(pkt->data + sizeof(struct iwl_rx_mpdu_desc));
+  } else {
+    hdr = (void*)(pkt->data + IWL_RX_DESC_SIZE_V1);
+  }
+
+  /* not a data packet */
+  if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) {
+    return false;
+  }
+
+  if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
+    return false;
+  }
+
+  nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
+  sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >> IWL_RX_MPDU_REORDER_SN_SHIFT;
+  min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
+
+  tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+
+  /* Check if buffer needs to be initialized */
+  buffer = &xvt->reorder_bufs[baid];
+  if (buffer->sta_id == IWL_XVT_INVALID_STA) {
+    /* don't initialize until first valid packet comes through */
+    if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
+      return false;
     }
-
-    /* not a data packet */
-    if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) {
-        return false;
+    if (!iwl_xvt_init_reorder_buffer(buffer, sta_id, tid, min_sn, IEEE80211_MAX_AMPDU_BUF_HT)) {
+      return false;
     }
+  }
 
-    if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) { return false; }
+  /* verify sta_id and tid match the reorder buffer params */
+  if (buffer->sta_id != sta_id || buffer->tid != tid) {
+    /* TODO: add add_ba/del_ba notifications */
+    WARN(1, "sta_id/tid doesn't match saved baid params\n");
+    return false;
+  }
 
-    nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
-    sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >> IWL_RX_MPDU_REORDER_SN_SHIFT;
-    min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
+  spin_lock_bh(&buffer->lock);
 
-    tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+  /*
+   * If there was a significant jump in the nssn - adjust.
+   * If the SN is smaller than the NSSN it might need to first go into
+   * the reorder buffer, in which case we just release up to it and the
+   * rest of the function will take care of storing it and releasing up to
+   * the nssn
+   */
+  if (!iwl_xvt_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, buffer->buf_size) ||
+      !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
+    iwl_xvt_release_frames(xvt, buffer, min_sn);
+  }
 
-    /* Check if buffer needs to be initialized */
-    buffer = &xvt->reorder_bufs[baid];
-    if (buffer->sta_id == IWL_XVT_INVALID_STA) {
-        /* don't initialize until first valid packet comes through */
-        if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) { return false; }
-        if (!iwl_xvt_init_reorder_buffer(buffer, sta_id, tid, min_sn, IEEE80211_MAX_AMPDU_BUF_HT)) {
-            return false;
-        }
+  /* drop any outdated packets */
+  if (ieee80211_sn_less(sn, buffer->head_sn)) {
+    goto drop;
+  }
+
+  /* release immediately if allowed by nssn and no stored frames */
+  if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+    if (iwl_xvt_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) && (!amsdu || last_subframe)) {
+      buffer->head_sn = nssn;
     }
-
-    /* verify sta_id and tid match the reorder buffer params */
-    if (buffer->sta_id != sta_id || buffer->tid != tid) {
-        /* TODO: add add_ba/del_ba notifications */
-        WARN(1, "sta_id/tid doesn't match saved baid params\n");
-        return false;
-    }
-
-    spin_lock_bh(&buffer->lock);
-
-    /*
-     * If there was a significant jump in the nssn - adjust.
-     * If the SN is smaller than the NSSN it might need to first go into
-     * the reorder buffer, in which case we just release up to it and the
-     * rest of the function will take care of storing it and releasing up to
-     * the nssn
-     */
-    if (!iwl_xvt_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, buffer->buf_size) ||
-        !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
-        iwl_xvt_release_frames(xvt, buffer, min_sn);
-    }
-
-    /* drop any outdated packets */
-    if (ieee80211_sn_less(sn, buffer->head_sn)) { goto drop; }
-
-    /* release immediately if allowed by nssn and no stored frames */
-    if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
-        if (iwl_xvt_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
-            (!amsdu || last_subframe)) {
-            buffer->head_sn = nssn;
-        }
-        /* No need to update AMSDU last SN - we are moving the head */
-        spin_unlock_bh(&buffer->lock);
-        buffer->stats.released++;
-        buffer->stats.skipped++;
-        return false;
-    }
-
-    index = sn % buffer->buf_size;
-
-    /*
-     * Check if we already stored this frame
-     * As AMSDU is either received or not as whole, logic is simple:
-     * If we have frames in that position in the buffer and the last frame
-     * originated from AMSDU had a different SN then it is a retransmission.
-     * If it is the same SN then if the subframe index is incrementing it
-     * is the same AMSDU - otherwise it is a retransmission.
-     */
-    tail = buffer->entries[index];
-    if (tail && !amsdu) {
-        goto drop;
-    } else if (tail && (sn != buffer->last_amsdu || buffer->last_sub_index >= sub_frame_idx)) {
-        goto drop;
-    }
-
-    /* put in reorder buffer */
-    buffer->entries[index]++;
-    buffer->num_stored++;
-
-    if (amsdu) {
-        buffer->last_amsdu = sn;
-        buffer->last_sub_index = sub_frame_idx;
-    }
-    buffer->stats.reordered++;
-
-    /*
-     * We cannot trust NSSN for AMSDU sub-frames that are not the last.
-     * The reason is that NSSN advances on the first sub-frame, and may
-     * cause the reorder buffer to advance before all the sub-frames arrive.
-     * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
-     * SN 1. NSSN for first sub frame will be 3 with the result of driver
-     * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
-     * already ahead and it will be dropped.
-     * If the last sub-frame is not on this queue - we will get frame
-     * release notification with up to date NSSN.
-     */
-    if (!amsdu || last_subframe) { iwl_xvt_release_frames(xvt, buffer, nssn); }
-
+    /* No need to update AMSDU last SN - we are moving the head */
     spin_unlock_bh(&buffer->lock);
+    buffer->stats.released++;
+    buffer->stats.skipped++;
+    return false;
+  }
 
-    return true;
+  index = sn % buffer->buf_size;
+
+  /*
+   * Check if we already stored this frame
+   * As AMSDU is either received or not as whole, logic is simple:
+   * If we have frames in that position in the buffer and the last frame
+   * originated from AMSDU had a different SN then it is a retransmission.
+   * If it is the same SN then if the subframe index is incrementing it
+   * is the same AMSDU - otherwise it is a retransmission.
+   */
+  tail = buffer->entries[index];
+  if (tail && !amsdu) {
+    goto drop;
+  } else if (tail && (sn != buffer->last_amsdu || buffer->last_sub_index >= sub_frame_idx)) {
+    goto drop;
+  }
+
+  /* put in reorder buffer */
+  buffer->entries[index]++;
+  buffer->num_stored++;
+
+  if (amsdu) {
+    buffer->last_amsdu = sn;
+    buffer->last_sub_index = sub_frame_idx;
+  }
+  buffer->stats.reordered++;
+
+  /*
+   * We cannot trust NSSN for AMSDU sub-frames that are not the last.
+   * The reason is that NSSN advances on the first sub-frame, and may
+   * cause the reorder buffer to advance before all the sub-frames arrive.
+   * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+   * SN 1. NSSN for first sub frame will be 3 with the result of driver
+   * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+   * already ahead and it will be dropped.
+   * If the last sub-frame is not on this queue - we will get frame
+   * release notification with up to date NSSN.
+   */
+  if (!amsdu || last_subframe) {
+    iwl_xvt_release_frames(xvt, buffer, nssn);
+  }
+
+  spin_unlock_bh(&buffer->lock);
+
+  return true;
 
 drop:
-    buffer->stats.dropped++;
-    spin_unlock_bh(&buffer->lock);
-    return true;
+  buffer->stats.dropped++;
+  spin_unlock_bh(&buffer->lock);
+  return true;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/user-infc.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/user-infc.c
index 51bd4e3..2469022 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/user-infc.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/user-infc.c
@@ -34,6 +34,8 @@
  *
  *****************************************************************************/
 
+#include "user-infc.h"
+
 #include <linux/dma-mapping.h>
 #include <linux/etherdevice.h>
 #include <linux/if_ether.h>
@@ -52,7 +54,6 @@
 #include "iwl-phy-db.h"
 #include "iwl-prph.h"
 #include "iwl-trans.h"
-#include "user-infc.h"
 #include "xvt.h"
 
 #define XVT_UCODE_CALIB_TIMEOUT (CPTCFG_IWL_TIMEOUT_FACTOR * HZ)
@@ -66,87 +67,91 @@
 #define XVT_STOP_TX (IEEE80211_SCTL_FRAG + 1)
 
 void iwl_xvt_send_user_rx_notif(struct iwl_xvt* xvt, struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
-    void* data = pkt->data;
-    uint32_t size = iwl_rx_packet_payload_len(pkt);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  void* data = pkt->data;
+  uint32_t size = iwl_rx_packet_payload_len(pkt);
 
-    IWL_DEBUG_INFO(xvt, "rx notification: group=0x%x, id=0x%x\n", pkt->hdr.group_id, pkt->hdr.cmd);
+  IWL_DEBUG_INFO(xvt, "rx notification: group=0x%x, id=0x%x\n", pkt->hdr.group_id, pkt->hdr.cmd);
 
-    switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+  switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
     case WIDE_ID(LONG_GROUP, GET_SET_PHY_DB_CMD):
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_PHY_DB, data, size, GFP_ATOMIC);
-        break;
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_PHY_DB, data, size, GFP_ATOMIC);
+      break;
     case DTS_MEASUREMENT_NOTIFICATION:
     case WIDE_ID(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE):
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_DTS_MEASUREMENTS, data, size,
-                                GFP_ATOMIC);
-        break;
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_DTS_MEASUREMENTS, data, size, GFP_ATOMIC);
+      break;
     case REPLY_RX_DSP_EXT_INFO:
-        if (!xvt->rx_hdr_enabled) { break; }
-
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_RX_HDR, data, size, GFP_ATOMIC);
+      if (!xvt->rx_hdr_enabled) {
         break;
+      }
+
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_RX_HDR, data, size, GFP_ATOMIC);
+      break;
     case APMG_PD_SV_CMD:
-        if (!xvt->apmg_pd_en) { break; }
-
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_APMG_PD, data, size, GFP_ATOMIC);
+      if (!xvt->apmg_pd_en) {
         break;
+      }
+
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_APMG_PD, data, size, GFP_ATOMIC);
+      break;
     case REPLY_RX_MPDU_CMD:
-        if (!xvt->send_rx_mpdu) { break; }
+      if (!xvt->send_rx_mpdu) {
+        break;
+      }
 
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT, data, size, GFP_ATOMIC);
-        break;
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_UCODE_RX_PKT, data, size, GFP_ATOMIC);
+      break;
     case NVM_COMMIT_COMPLETE_NOTIFICATION:
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_COMMIT_STATISTICS, data, size,
-                                GFP_ATOMIC);
-        break;
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_COMMIT_STATISTICS, data, size, GFP_ATOMIC);
+      break;
     case REPLY_HD_PARAMS_CMD:
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_BFE, data, size, GFP_ATOMIC);
-        break;
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_BFE, data, size, GFP_ATOMIC);
+      break;
     case DEBUG_LOG_MSG:
-        iwl_dnt_dispatch_collect_ucode_message(xvt->trans, rxb);
-        break;
+      iwl_dnt_dispatch_collect_ucode_message(xvt->trans, rxb);
+      break;
     case WIDE_ID(TOF_GROUP, TOF_MCSI_DEBUG_NOTIF):
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_LOC_MCSI, data, size, GFP_ATOMIC);
-        break;
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_LOC_MCSI, data, size, GFP_ATOMIC);
+      break;
     case WIDE_ID(TOF_GROUP, TOF_RANGE_RESPONSE_NOTIF):
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_LOC_RANGE, data, size, GFP_ATOMIC);
-        break;
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_LOC_RANGE, data, size, GFP_ATOMIC);
+      break;
     case WIDE_ID(XVT_GROUP, IQ_CALIB_CONFIG_NOTIF):
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_IQ_CALIB, data, size, GFP_ATOMIC);
-        break;
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_IQ_CALIB, data, size, GFP_ATOMIC);
+      break;
     case WIDE_ID(PHY_OPS_GROUP, CT_KILL_NOTIFICATION):
-        iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_CT_KILL, data, size, GFP_ATOMIC);
-        break;
+      iwl_xvt_user_send_notif(xvt, IWL_TM_USER_CMD_NOTIF_CT_KILL, data, size, GFP_ATOMIC);
+      break;
     case REPLY_RX_PHY_CMD:
-        IWL_DEBUG_INFO(xvt, "REPLY_RX_PHY_CMD received but not handled\n");
-        break;
+      IWL_DEBUG_INFO(xvt, "REPLY_RX_PHY_CMD received but not handled\n");
+      break;
     case INIT_COMPLETE_NOTIF:
-        IWL_DEBUG_INFO(xvt, "received INIT_COMPLETE_NOTIF\n");
-        break;
+      IWL_DEBUG_INFO(xvt, "received INIT_COMPLETE_NOTIF\n");
+      break;
     case TX_CMD:
-        if (xvt->send_tx_resp) {
-            iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_TX_CMD_RESP, data, size, GFP_ATOMIC);
-        }
-        break;
+      if (xvt->send_tx_resp) {
+        iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_TX_CMD_RESP, data, size, GFP_ATOMIC);
+      }
+      break;
     default:
-        IWL_DEBUG_INFO(xvt, "xVT mode RX command 0x%x not handled\n", pkt->hdr.cmd);
-    }
+      IWL_DEBUG_INFO(xvt, "xVT mode RX command 0x%x not handled\n", pkt->hdr.cmd);
+  }
 }
 
 static void iwl_xvt_led_enable(struct iwl_xvt* xvt) {
-    iwl_write32(xvt->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
+  iwl_write32(xvt->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
 }
 
 static void iwl_xvt_led_disable(struct iwl_xvt* xvt) {
-    iwl_write32(xvt->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF);
+  iwl_write32(xvt->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF);
 }
 
 static int iwl_xvt_sdio_io_toggle(struct iwl_xvt* xvt, struct iwl_tm_data* data_in,
                                   struct iwl_tm_data* data_out) {
-    struct iwl_tm_sdio_io_toggle* sdio_io_toggle = data_in->data;
+  struct iwl_tm_sdio_io_toggle* sdio_io_toggle = data_in->data;
 
-    return iwl_trans_test_mode_cmd(xvt->trans, sdio_io_toggle->enable);
+  return iwl_trans_test_mode_cmd(xvt->trans, sdio_io_toggle->enable);
 }
 
 /**
@@ -155,277 +160,302 @@
  * Return: the SV drop (>= 0) or a negative error number
  */
 static int iwl_xvt_read_sv_drop(struct iwl_xvt* xvt) {
-    struct xvt_debug_cmd debug_cmd = {
-        .opcode = cpu_to_le32(XVT_DBG_GET_SVDROP_VER_OP),
-        .dw_num = 0,
-    };
-    struct xvt_debug_res* debug_res;
-    struct iwl_rx_packet* pkt;
-    struct iwl_host_cmd host_cmd = {
-        .id = REPLY_DEBUG_XVT_CMD,
-        .data[0] = &debug_cmd,
-        .len[0] = sizeof(debug_cmd),
-        .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-        .flags = CMD_WANT_SKB,
-    };
-    int ret;
+  struct xvt_debug_cmd debug_cmd = {
+      .opcode = cpu_to_le32(XVT_DBG_GET_SVDROP_VER_OP),
+      .dw_num = 0,
+  };
+  struct xvt_debug_res* debug_res;
+  struct iwl_rx_packet* pkt;
+  struct iwl_host_cmd host_cmd = {
+      .id = REPLY_DEBUG_XVT_CMD,
+      .data[0] = &debug_cmd,
+      .len[0] = sizeof(debug_cmd),
+      .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+      .flags = CMD_WANT_SKB,
+  };
+  int ret;
 
-    if (xvt->state != IWL_XVT_STATE_OPERATIONAL) { return 0; }
+  if (xvt->state != IWL_XVT_STATE_OPERATIONAL) {
+    return 0;
+  }
 
-    ret = iwl_xvt_send_cmd(xvt, &host_cmd);
-    if (ret) { return ret; }
+  ret = iwl_xvt_send_cmd(xvt, &host_cmd);
+  if (ret) {
+    return ret;
+  }
 
-    /* Retrieve response packet */
-    pkt = host_cmd.resp_pkt;
+  /* Retrieve response packet */
+  pkt = host_cmd.resp_pkt;
 
-    /* Get response data */
-    debug_res = (struct xvt_debug_res*)pkt->data;
-    if (le32_to_cpu(debug_res->dw_num) < 1) {
-        ret = -ENODATA;
-        goto out;
-    }
-    ret = le32_to_cpu(debug_res->data[0]) & 0xFF;
+  /* Get response data */
+  debug_res = (struct xvt_debug_res*)pkt->data;
+  if (le32_to_cpu(debug_res->dw_num) < 1) {
+    ret = -ENODATA;
+    goto out;
+  }
+  ret = le32_to_cpu(debug_res->data[0]) & 0xFF;
 
 out:
-    iwl_free_resp(&host_cmd);
-    return ret;
+  iwl_free_resp(&host_cmd);
+  return ret;
 }
 
 static int iwl_xvt_get_dev_info(struct iwl_xvt* xvt, struct iwl_tm_data* data_in,
                                 struct iwl_tm_data* data_out) {
-    struct iwl_tm_dev_info_req* dev_info_req;
-    struct iwl_tm_dev_info* dev_info;
-    const uint8_t driver_ver[] = BACKPORTS_GIT_TRACKED;
-    int sv_step = 0x00;
-    int dev_info_size;
-    bool read_sv_drop = true;
+  struct iwl_tm_dev_info_req* dev_info_req;
+  struct iwl_tm_dev_info* dev_info;
+  const uint8_t driver_ver[] = BACKPORTS_GIT_TRACKED;
+  int sv_step = 0x00;
+  int dev_info_size;
+  bool read_sv_drop = true;
 
-    if (data_in) {
-        dev_info_req = (struct iwl_tm_dev_info_req*)data_in->data;
-        read_sv_drop = dev_info_req->read_sv ? true : false;
+  if (data_in) {
+    dev_info_req = (struct iwl_tm_dev_info_req*)data_in->data;
+    read_sv_drop = dev_info_req->read_sv ? true : false;
+  }
+
+  if (xvt->fwrt.cur_fw_img == IWL_UCODE_REGULAR && read_sv_drop) {
+    sv_step = iwl_xvt_read_sv_drop(xvt);
+    if (sv_step < 0) {
+      return sv_step;
     }
+  }
 
-    if (xvt->fwrt.cur_fw_img == IWL_UCODE_REGULAR && read_sv_drop) {
-        sv_step = iwl_xvt_read_sv_drop(xvt);
-        if (sv_step < 0) { return sv_step; }
-    }
+  dev_info_size = sizeof(struct iwl_tm_dev_info) + (strlen(driver_ver) + 1) * sizeof(uint8_t);
+  dev_info = kzalloc(dev_info_size, GFP_KERNEL);
+  if (!dev_info) {
+    return -ENOMEM;
+  }
 
-    dev_info_size = sizeof(struct iwl_tm_dev_info) + (strlen(driver_ver) + 1) * sizeof(uint8_t);
-    dev_info = kzalloc(dev_info_size, GFP_KERNEL);
-    if (!dev_info) { return -ENOMEM; }
+  dev_info->dev_id = xvt->trans->hw_id;
+  dev_info->fw_ver = xvt->fw->ucode_ver;
+  dev_info->vendor_id = PCI_VENDOR_ID_INTEL;
+  dev_info->build_ver = sv_step;
 
-    dev_info->dev_id = xvt->trans->hw_id;
-    dev_info->fw_ver = xvt->fw->ucode_ver;
-    dev_info->vendor_id = PCI_VENDOR_ID_INTEL;
-    dev_info->build_ver = sv_step;
+  /*
+   * TODO: Silicon step is retrieved by reading
+   * radio register 0x00. Simplifying implementation
+   * by reading it in user space.
+   */
+  dev_info->silicon_step = 0x00;
 
-    /*
-     * TODO: Silicon step is retrieved by reading
-     * radio register 0x00. Simplifying implementation
-     * by reading it in user space.
-     */
-    dev_info->silicon_step = 0x00;
+  strcpy(dev_info->driver_ver, driver_ver);
 
-    strcpy(dev_info->driver_ver, driver_ver);
+  data_out->data = dev_info;
+  data_out->len = dev_info_size;
 
-    data_out->data = dev_info;
-    data_out->len = dev_info_size;
-
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_set_sw_config(struct iwl_xvt* xvt, struct iwl_tm_data* data_in) {
-    struct iwl_xvt_sw_cfg_request* sw_cfg = (struct iwl_xvt_sw_cfg_request*)data_in->data;
-    struct iwl_phy_cfg_cmd* fw_calib_cmd_cfg = xvt->sw_stack_cfg.fw_calib_cmd_cfg;
-    __le32 cfg_mask = cpu_to_le32(sw_cfg->cfg_mask), fw_calib_event, fw_calib_flow, event_override,
-           flow_override;
-    int usr_idx, iwl_idx;
+  struct iwl_xvt_sw_cfg_request* sw_cfg = (struct iwl_xvt_sw_cfg_request*)data_in->data;
+  struct iwl_phy_cfg_cmd* fw_calib_cmd_cfg = xvt->sw_stack_cfg.fw_calib_cmd_cfg;
+  __le32 cfg_mask = cpu_to_le32(sw_cfg->cfg_mask), fw_calib_event, fw_calib_flow, event_override,
+         flow_override;
+  int usr_idx, iwl_idx;
 
-    if (data_in->len < sizeof(struct iwl_xvt_sw_cfg_request)) { return -EINVAL; }
+  if (data_in->len < sizeof(struct iwl_xvt_sw_cfg_request)) {
+    return -EINVAL;
+  }
 
-    xvt->sw_stack_cfg.fw_dbg_flags = sw_cfg->dbg_flags;
-    xvt->sw_stack_cfg.load_mask = sw_cfg->load_mask;
-    xvt->sw_stack_cfg.calib_override_mask = sw_cfg->cfg_mask;
+  xvt->sw_stack_cfg.fw_dbg_flags = sw_cfg->dbg_flags;
+  xvt->sw_stack_cfg.load_mask = sw_cfg->load_mask;
+  xvt->sw_stack_cfg.calib_override_mask = sw_cfg->cfg_mask;
 
-    for (usr_idx = 0; usr_idx < IWL_USER_FW_IMAGE_IDX_TYPE_MAX; usr_idx++) {
-        switch (usr_idx) {
-        case IWL_USER_FW_IMAGE_IDX_INIT:
-            iwl_idx = IWL_UCODE_INIT;
-            break;
-        case IWL_USER_FW_IMAGE_IDX_REGULAR:
-            iwl_idx = IWL_UCODE_REGULAR;
-            break;
-        case IWL_USER_FW_IMAGE_IDX_WOWLAN:
-            iwl_idx = IWL_UCODE_WOWLAN;
-            break;
-        }
-        /* TODO: Calculate PHY config according to device values */
-        fw_calib_cmd_cfg[iwl_idx].phy_cfg = cpu_to_le32(xvt->fw->phy_config);
-
-        /*
-         * If a cfg_mask bit is unset, take the default value
-         * from the FW. Otherwise, take the value from sw_cfg.
-         */
-        fw_calib_event = xvt->fw->default_calib[iwl_idx].event_trigger;
-        event_override = cpu_to_le32(sw_cfg->calib_ctrl[usr_idx].event_trigger);
-
-        fw_calib_cmd_cfg[iwl_idx].calib_control.event_trigger =
-            (~cfg_mask & fw_calib_event) | (cfg_mask & event_override);
-
-        fw_calib_flow = xvt->fw->default_calib[iwl_idx].flow_trigger;
-        flow_override = cpu_to_le32(sw_cfg->calib_ctrl[usr_idx].flow_trigger);
-
-        fw_calib_cmd_cfg[iwl_idx].calib_control.flow_trigger =
-            (~cfg_mask & fw_calib_flow) | (cfg_mask & flow_override);
+  for (usr_idx = 0; usr_idx < IWL_USER_FW_IMAGE_IDX_TYPE_MAX; usr_idx++) {
+    switch (usr_idx) {
+      case IWL_USER_FW_IMAGE_IDX_INIT:
+        iwl_idx = IWL_UCODE_INIT;
+        break;
+      case IWL_USER_FW_IMAGE_IDX_REGULAR:
+        iwl_idx = IWL_UCODE_REGULAR;
+        break;
+      case IWL_USER_FW_IMAGE_IDX_WOWLAN:
+        iwl_idx = IWL_UCODE_WOWLAN;
+        break;
     }
+    /* TODO: Calculate PHY config according to device values */
+    fw_calib_cmd_cfg[iwl_idx].phy_cfg = cpu_to_le32(xvt->fw->phy_config);
 
-    return 0;
+    /*
+     * If a cfg_mask bit is unset, take the default value
+     * from the FW. Otherwise, take the value from sw_cfg.
+     */
+    fw_calib_event = xvt->fw->default_calib[iwl_idx].event_trigger;
+    event_override = cpu_to_le32(sw_cfg->calib_ctrl[usr_idx].event_trigger);
+
+    fw_calib_cmd_cfg[iwl_idx].calib_control.event_trigger =
+        (~cfg_mask & fw_calib_event) | (cfg_mask & event_override);
+
+    fw_calib_flow = xvt->fw->default_calib[iwl_idx].flow_trigger;
+    flow_override = cpu_to_le32(sw_cfg->calib_ctrl[usr_idx].flow_trigger);
+
+    fw_calib_cmd_cfg[iwl_idx].calib_control.flow_trigger =
+        (~cfg_mask & fw_calib_flow) | (cfg_mask & flow_override);
+  }
+
+  return 0;
 }
 
 static int iwl_xvt_get_sw_config(struct iwl_xvt* xvt, struct iwl_tm_data* data_in,
                                  struct iwl_tm_data* data_out) {
-    struct iwl_xvt_sw_cfg_request* get_cfg_req;
-    struct iwl_xvt_sw_cfg_request* sw_cfg;
-    struct iwl_phy_cfg_cmd* fw_calib_cmd_cfg = xvt->sw_stack_cfg.fw_calib_cmd_cfg;
-    __le32 event_trigger, flow_trigger;
-    int i, u;
+  struct iwl_xvt_sw_cfg_request* get_cfg_req;
+  struct iwl_xvt_sw_cfg_request* sw_cfg;
+  struct iwl_phy_cfg_cmd* fw_calib_cmd_cfg = xvt->sw_stack_cfg.fw_calib_cmd_cfg;
+  __le32 event_trigger, flow_trigger;
+  int i, u;
 
-    if (data_in->len < sizeof(struct iwl_xvt_sw_cfg_request)) { return -EINVAL; }
+  if (data_in->len < sizeof(struct iwl_xvt_sw_cfg_request)) {
+    return -EINVAL;
+  }
 
-    get_cfg_req = data_in->data;
-    sw_cfg = kzalloc(sizeof(*sw_cfg), GFP_KERNEL);
-    if (!sw_cfg) { return -ENOMEM; }
+  get_cfg_req = data_in->data;
+  sw_cfg = kzalloc(sizeof(*sw_cfg), GFP_KERNEL);
+  if (!sw_cfg) {
+    return -ENOMEM;
+  }
 
-    sw_cfg->load_mask = xvt->sw_stack_cfg.load_mask;
-    sw_cfg->phy_config = xvt->fw->phy_config;
-    sw_cfg->cfg_mask = xvt->sw_stack_cfg.calib_override_mask;
-    sw_cfg->dbg_flags = xvt->sw_stack_cfg.fw_dbg_flags;
-    for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) {
-        switch (i) {
-        case IWL_UCODE_INIT:
-            u = IWL_USER_FW_IMAGE_IDX_INIT;
-            break;
-        case IWL_UCODE_REGULAR:
-            u = IWL_USER_FW_IMAGE_IDX_REGULAR;
-            break;
-        case IWL_UCODE_WOWLAN:
-            u = IWL_USER_FW_IMAGE_IDX_WOWLAN;
-            break;
-        case IWL_UCODE_REGULAR_USNIFFER:
-            continue;
-        }
-        if (get_cfg_req->get_calib_type == IWL_XVT_GET_CALIB_TYPE_DEF) {
-            event_trigger = xvt->fw->default_calib[i].event_trigger;
-            flow_trigger = xvt->fw->default_calib[i].flow_trigger;
-        } else {
-            event_trigger = fw_calib_cmd_cfg[i].calib_control.event_trigger;
-            flow_trigger = fw_calib_cmd_cfg[i].calib_control.flow_trigger;
-        }
-        sw_cfg->calib_ctrl[u].event_trigger = le32_to_cpu(event_trigger);
-        sw_cfg->calib_ctrl[u].flow_trigger = le32_to_cpu(flow_trigger);
+  sw_cfg->load_mask = xvt->sw_stack_cfg.load_mask;
+  sw_cfg->phy_config = xvt->fw->phy_config;
+  sw_cfg->cfg_mask = xvt->sw_stack_cfg.calib_override_mask;
+  sw_cfg->dbg_flags = xvt->sw_stack_cfg.fw_dbg_flags;
+  for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) {
+    switch (i) {
+      case IWL_UCODE_INIT:
+        u = IWL_USER_FW_IMAGE_IDX_INIT;
+        break;
+      case IWL_UCODE_REGULAR:
+        u = IWL_USER_FW_IMAGE_IDX_REGULAR;
+        break;
+      case IWL_UCODE_WOWLAN:
+        u = IWL_USER_FW_IMAGE_IDX_WOWLAN;
+        break;
+      case IWL_UCODE_REGULAR_USNIFFER:
+        continue;
     }
+    if (get_cfg_req->get_calib_type == IWL_XVT_GET_CALIB_TYPE_DEF) {
+      event_trigger = xvt->fw->default_calib[i].event_trigger;
+      flow_trigger = xvt->fw->default_calib[i].flow_trigger;
+    } else {
+      event_trigger = fw_calib_cmd_cfg[i].calib_control.event_trigger;
+      flow_trigger = fw_calib_cmd_cfg[i].calib_control.flow_trigger;
+    }
+    sw_cfg->calib_ctrl[u].event_trigger = le32_to_cpu(event_trigger);
+    sw_cfg->calib_ctrl[u].flow_trigger = le32_to_cpu(flow_trigger);
+  }
 
-    data_out->data = sw_cfg;
-    data_out->len = sizeof(*sw_cfg);
-    return 0;
+  data_out->data = sw_cfg;
+  data_out->len = sizeof(*sw_cfg);
+  return 0;
 }
 
 static int iwl_xvt_send_phy_cfg_cmd(struct iwl_xvt* xvt, uint32_t ucode_type) {
-    struct iwl_phy_cfg_cmd* calib_cmd_cfg = &xvt->sw_stack_cfg.fw_calib_cmd_cfg[ucode_type];
-    int err;
+  struct iwl_phy_cfg_cmd* calib_cmd_cfg = &xvt->sw_stack_cfg.fw_calib_cmd_cfg[ucode_type];
+  int err;
 
-    IWL_DEBUG_INFO(xvt, "Sending Phy CFG command: 0x%x\n", calib_cmd_cfg->phy_cfg);
+  IWL_DEBUG_INFO(xvt, "Sending Phy CFG command: 0x%x\n", calib_cmd_cfg->phy_cfg);
 
-    /* ESL workaround - calibration is not allowed */
-    if (CPTCFG_IWL_TIMEOUT_FACTOR > 20) {
-        calib_cmd_cfg->calib_control.event_trigger = 0;
-        calib_cmd_cfg->calib_control.flow_trigger = 0;
-    }
+  /* ESL workaround - calibration is not allowed */
+  if (CPTCFG_IWL_TIMEOUT_FACTOR > 20) {
+    calib_cmd_cfg->calib_control.event_trigger = 0;
+    calib_cmd_cfg->calib_control.flow_trigger = 0;
+  }
 
-    /* Sending calibration configuration control data */
-    err =
-        iwl_xvt_send_cmd_pdu(xvt, PHY_CONFIGURATION_CMD, 0, sizeof(*calib_cmd_cfg), calib_cmd_cfg);
-    if (err) { IWL_ERR(xvt, "Error (%d) running INIT calibrations control\n", err); }
+  /* Sending calibration configuration control data */
+  err = iwl_xvt_send_cmd_pdu(xvt, PHY_CONFIGURATION_CMD, 0, sizeof(*calib_cmd_cfg), calib_cmd_cfg);
+  if (err) {
+    IWL_ERR(xvt, "Error (%d) running INIT calibrations control\n", err);
+  }
 
-    return err;
+  return err;
 }
 
 static int iwl_xvt_continue_init_unified(struct iwl_xvt* xvt) {
-    struct iwl_nvm_access_complete_cmd nvm_complete = {};
-    struct iwl_notification_wait init_complete_wait;
-    static const uint16_t init_complete[] = {INIT_COMPLETE_NOTIF};
-    int err;
+  struct iwl_nvm_access_complete_cmd nvm_complete = {};
+  struct iwl_notification_wait init_complete_wait;
+  static const uint16_t init_complete[] = {INIT_COMPLETE_NOTIF};
+  int err;
 
-    err = iwl_xvt_send_cmd_pdu(xvt, WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_ACCESS_COMPLETE), 0,
-                               sizeof(nvm_complete), &nvm_complete);
-    if (err) { goto init_error; }
+  err = iwl_xvt_send_cmd_pdu(xvt, WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_ACCESS_COMPLETE), 0,
+                             sizeof(nvm_complete), &nvm_complete);
+  if (err) {
+    goto init_error;
+  }
 
-    xvt->state = IWL_XVT_STATE_OPERATIONAL;
+  xvt->state = IWL_XVT_STATE_OPERATIONAL;
 
-    iwl_init_notification_wait(&xvt->notif_wait, &init_complete_wait, init_complete,
-                               sizeof(init_complete), NULL, NULL);
+  iwl_init_notification_wait(&xvt->notif_wait, &init_complete_wait, init_complete,
+                             sizeof(init_complete), NULL, NULL);
 
-    err = iwl_xvt_send_phy_cfg_cmd(xvt, IWL_UCODE_REGULAR);
-    if (err) {
-        iwl_remove_notification(&xvt->notif_wait, &init_complete_wait);
-        goto init_error;
-    }
+  err = iwl_xvt_send_phy_cfg_cmd(xvt, IWL_UCODE_REGULAR);
+  if (err) {
+    iwl_remove_notification(&xvt->notif_wait, &init_complete_wait);
+    goto init_error;
+  }
 
-    err = iwl_wait_notification(&xvt->notif_wait, &init_complete_wait, XVT_UCODE_CALIB_TIMEOUT);
-    if (err) { goto init_error; }
-    return 0;
+  err = iwl_wait_notification(&xvt->notif_wait, &init_complete_wait, XVT_UCODE_CALIB_TIMEOUT);
+  if (err) {
+    goto init_error;
+  }
+  return 0;
 init_error:
-    xvt->state = IWL_XVT_STATE_UNINITIALIZED;
-    iwl_trans_stop_device(xvt->trans);
-    return err;
+  xvt->state = IWL_XVT_STATE_UNINITIALIZED;
+  iwl_trans_stop_device(xvt->trans);
+  return err;
 }
 static int iwl_xvt_run_runtime_fw(struct iwl_xvt* xvt, bool cont_run) {
-    int err;
+  int err;
 
-    err = iwl_xvt_run_fw(xvt, IWL_UCODE_REGULAR, cont_run);
-    if (err) { goto fw_error; }
+  err = iwl_xvt_run_fw(xvt, IWL_UCODE_REGULAR, cont_run);
+  if (err) {
+    goto fw_error;
+  }
 
-    xvt->state = IWL_XVT_STATE_OPERATIONAL;
+  xvt->state = IWL_XVT_STATE_OPERATIONAL;
 
-    if (iwl_xvt_is_unified_fw(xvt)) {
-        err = iwl_xvt_nvm_init(xvt);
-        if (err) {
-            IWL_ERR(xvt, "Failed to read NVM: %d\n", err);
-            return err;
-        }
-        return iwl_xvt_continue_init_unified(xvt);
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    err = iwl_xvt_nvm_init(xvt);
+    if (err) {
+      IWL_ERR(xvt, "Failed to read NVM: %d\n", err);
+      return err;
     }
+    return iwl_xvt_continue_init_unified(xvt);
+  }
 
-    /* Send phy db control command and then phy db calibration*/
-    err = iwl_send_phy_db_data(xvt->phy_db);
-    if (err) { goto phy_error; }
+  /* Send phy db control command and then phy db calibration*/
+  err = iwl_send_phy_db_data(xvt->phy_db);
+  if (err) {
+    goto phy_error;
+  }
 
-    err = iwl_xvt_send_phy_cfg_cmd(xvt, IWL_UCODE_REGULAR);
-    if (err) { goto phy_error; }
+  err = iwl_xvt_send_phy_cfg_cmd(xvt, IWL_UCODE_REGULAR);
+  if (err) {
+    goto phy_error;
+  }
 
-    return 0;
+  return 0;
 
 phy_error:
-    iwl_trans_stop_device(xvt->trans);
+  iwl_trans_stop_device(xvt->trans);
 
 fw_error:
-    xvt->state = IWL_XVT_STATE_UNINITIALIZED;
+  xvt->state = IWL_XVT_STATE_UNINITIALIZED;
 
-    return err;
+  return err;
 }
 
 static bool iwl_xvt_wait_phy_db_entry(struct iwl_notif_wait_data* notif_wait,
                                       struct iwl_rx_packet* pkt, void* data) {
-    struct iwl_phy_db* phy_db = data;
+  struct iwl_phy_db* phy_db = data;
 
-    if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
-        WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
-        return true;
-    }
+  if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
+    WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+    return true;
+  }
 
-    WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
+  WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
 
-    return false;
+  return false;
 }
 
 /*
@@ -434,72 +464,80 @@
  * to user.
  */
 static int iwl_xvt_start_op_mode(struct iwl_xvt* xvt) {
-    int err = 0;
-    uint32_t ucode_type = IWL_UCODE_INIT;
+  int err = 0;
+  uint32_t ucode_type = IWL_UCODE_INIT;
 
-    /*
-     * If init FW and runtime FW are both enabled,
-     * Runtime FW will be executed after "continue
-     * initialization" is done.
-     * If init FW is disabled and runtime FW is
-     * enabled, run Runtime FW. If runtime fw is
-     * disabled, do nothing.
-     */
-    if (!(xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_INIT)) {
-        if (xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_RUNTIME) {
-            err = iwl_xvt_run_runtime_fw(xvt, false);
-        } else {
-            if (xvt->state != IWL_XVT_STATE_UNINITIALIZED) {
-                xvt->fw_running = false;
-                iwl_trans_stop_device(xvt->trans);
-            }
-            err = iwl_trans_start_hw(xvt->trans);
-            if (err) {
-                IWL_ERR(xvt, "Failed to start HW\n");
-            } else {
-                iwl_write32(xvt->trans, CSR_RESET, 0);
-                xvt->state = IWL_XVT_STATE_NO_FW;
-            }
-        }
-
-        return err;
+  /*
+   * If init FW and runtime FW are both enabled,
+   * Runtime FW will be executed after "continue
+   * initialization" is done.
+   * If init FW is disabled and runtime FW is
+   * enabled, run Runtime FW. If runtime fw is
+   * disabled, do nothing.
+   */
+  if (!(xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_INIT)) {
+    if (xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_RUNTIME) {
+      err = iwl_xvt_run_runtime_fw(xvt, false);
+    } else {
+      if (xvt->state != IWL_XVT_STATE_UNINITIALIZED) {
+        xvt->fw_running = false;
+        iwl_trans_stop_device(xvt->trans);
+      }
+      err = iwl_trans_start_hw(xvt->trans);
+      if (err) {
+        IWL_ERR(xvt, "Failed to start HW\n");
+      } else {
+        iwl_write32(xvt->trans, CSR_RESET, 0);
+        xvt->state = IWL_XVT_STATE_NO_FW;
+      }
     }
 
-    /* when fw image is unified, only regular ucode is loaded. */
-    if (iwl_xvt_is_unified_fw(xvt)) { ucode_type = IWL_UCODE_REGULAR; }
-    err = iwl_xvt_run_fw(xvt, ucode_type, false);
-    if (err) { return err; }
-
-    xvt->state = IWL_XVT_STATE_INIT_STARTED;
-
-    err = iwl_xvt_nvm_init(xvt);
-    if (err) { IWL_ERR(xvt, "Failed to read NVM: %d\n", err); }
-
-    /*
-     * The initialization flow is not yet complete.
-     * User need to execute "Continue initialization"
-     * flow in order to complete it.
-     *
-     * NOT sending ALIVE notification to user. User
-     * knows that FW is alive when "start op mode"
-     * returns without errors.
-     */
-
     return err;
+  }
+
+  /* when fw image is unified, only regular ucode is loaded. */
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    ucode_type = IWL_UCODE_REGULAR;
+  }
+  err = iwl_xvt_run_fw(xvt, ucode_type, false);
+  if (err) {
+    return err;
+  }
+
+  xvt->state = IWL_XVT_STATE_INIT_STARTED;
+
+  err = iwl_xvt_nvm_init(xvt);
+  if (err) {
+    IWL_ERR(xvt, "Failed to read NVM: %d\n", err);
+  }
+
+  /*
+   * The initialization flow is not yet complete.
+   * User need to execute "Continue initialization"
+   * flow in order to complete it.
+   *
+   * NOT sending ALIVE notification to user. User
+   * knows that FW is alive when "start op mode"
+   * returns without errors.
+   */
+
+  return err;
 }
 
 static void iwl_xvt_stop_op_mode(struct iwl_xvt* xvt) {
-    if (xvt->state == IWL_XVT_STATE_UNINITIALIZED) { return; }
+  if (xvt->state == IWL_XVT_STATE_UNINITIALIZED) {
+    return;
+  }
 
-    if (xvt->fw_running) {
-        iwl_xvt_txq_disable(xvt);
-        xvt->fw_running = false;
-    }
-    iwl_trans_stop_device(xvt->trans);
+  if (xvt->fw_running) {
+    iwl_xvt_txq_disable(xvt);
+    xvt->fw_running = false;
+  }
+  iwl_trans_stop_device(xvt->trans);
 
-    iwl_free_fw_paging(&xvt->fwrt);
+  iwl_free_fw_paging(&xvt->fwrt);
 
-    xvt->state = IWL_XVT_STATE_UNINITIALIZED;
+  xvt->state = IWL_XVT_STATE_UNINITIALIZED;
 }
 
 /*
@@ -508,185 +546,205 @@
  * is marked in the load mask.
  */
 static int iwl_xvt_continue_init(struct iwl_xvt* xvt) {
-    struct iwl_notification_wait calib_wait;
-    static const uint16_t init_complete[] = {INIT_COMPLETE_NOTIF, CALIB_RES_NOTIF_PHY_DB};
-    int err;
+  struct iwl_notification_wait calib_wait;
+  static const uint16_t init_complete[] = {INIT_COMPLETE_NOTIF, CALIB_RES_NOTIF_PHY_DB};
+  int err;
 
-    if (xvt->state != IWL_XVT_STATE_INIT_STARTED) { return -EINVAL; }
+  if (xvt->state != IWL_XVT_STATE_INIT_STARTED) {
+    return -EINVAL;
+  }
 
-    if (iwl_xvt_is_unified_fw(xvt)) { return iwl_xvt_continue_init_unified(xvt); }
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    return iwl_xvt_continue_init_unified(xvt);
+  }
 
-    iwl_init_notification_wait(&xvt->notif_wait, &calib_wait, init_complete,
-                               ARRAY_SIZE(init_complete), iwl_xvt_wait_phy_db_entry, xvt->phy_db);
+  iwl_init_notification_wait(&xvt->notif_wait, &calib_wait, init_complete,
+                             ARRAY_SIZE(init_complete), iwl_xvt_wait_phy_db_entry, xvt->phy_db);
 
-    err = iwl_xvt_send_phy_cfg_cmd(xvt, IWL_UCODE_INIT);
-    if (err) {
-        iwl_remove_notification(&xvt->notif_wait, &calib_wait);
-        goto error;
-    }
+  err = iwl_xvt_send_phy_cfg_cmd(xvt, IWL_UCODE_INIT);
+  if (err) {
+    iwl_remove_notification(&xvt->notif_wait, &calib_wait);
+    goto error;
+  }
 
-    /*
-     * Waiting for the calibration complete notification
-     * iwl_xvt_wait_phy_db_entry will store the calibrations
-     */
-    err = iwl_wait_notification(&xvt->notif_wait, &calib_wait, XVT_UCODE_CALIB_TIMEOUT);
-    if (err) { goto error; }
+  /*
+   * Waiting for the calibration complete notification
+   * iwl_xvt_wait_phy_db_entry will store the calibrations
+   */
+  err = iwl_wait_notification(&xvt->notif_wait, &calib_wait, XVT_UCODE_CALIB_TIMEOUT);
+  if (err) {
+    goto error;
+  }
 
-    xvt->state = IWL_XVT_STATE_OPERATIONAL;
+  xvt->state = IWL_XVT_STATE_OPERATIONAL;
 
-    if (xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_RUNTIME)
-    /* Run runtime FW stops the device by itself if error occurs */
-    {
-        err = iwl_xvt_run_runtime_fw(xvt, true);
-    }
+  if (xvt->sw_stack_cfg.load_mask & IWL_XVT_LOAD_MASK_RUNTIME)
+  /* Run runtime FW stops the device by itself if error occurs */
+  {
+    err = iwl_xvt_run_runtime_fw(xvt, true);
+  }
 
-    goto cont_init_end;
+  goto cont_init_end;
 
 error:
-    xvt->state = IWL_XVT_STATE_UNINITIALIZED;
-    iwl_xvt_txq_disable(xvt);
-    iwl_trans_stop_device(xvt->trans);
+  xvt->state = IWL_XVT_STATE_UNINITIALIZED;
+  iwl_xvt_txq_disable(xvt);
+  iwl_trans_stop_device(xvt->trans);
 
 cont_init_end:
 
-    return err;
+  return err;
 }
 
 static int iwl_xvt_get_phy_db(struct iwl_xvt* xvt, struct iwl_tm_data* data_in,
                               struct iwl_tm_data* data_out) {
-    struct iwl_xvt_phy_db_request* phy_db_req = (struct iwl_xvt_phy_db_request*)data_in->data;
-    struct iwl_xvt_phy_db_request* phy_db_resp;
-    uint8_t* phy_data;
-    uint16_t phy_size;
-    uint32_t resp_size;
-    int err;
+  struct iwl_xvt_phy_db_request* phy_db_req = (struct iwl_xvt_phy_db_request*)data_in->data;
+  struct iwl_xvt_phy_db_request* phy_db_resp;
+  uint8_t* phy_data;
+  uint16_t phy_size;
+  uint32_t resp_size;
+  int err;
 
-    if ((data_in->len < sizeof(struct iwl_xvt_phy_db_request)) || (phy_db_req->size != 0)) {
-        return -EINVAL;
-    }
+  if ((data_in->len < sizeof(struct iwl_xvt_phy_db_request)) || (phy_db_req->size != 0)) {
+    return -EINVAL;
+  }
 
-    err = iwl_phy_db_get_section_data(xvt->phy_db, phy_db_req->type, &phy_data, &phy_size,
-                                      phy_db_req->chg_id);
-    if (err) { return err; }
+  err = iwl_phy_db_get_section_data(xvt->phy_db, phy_db_req->type, &phy_data, &phy_size,
+                                    phy_db_req->chg_id);
+  if (err) {
+    return err;
+  }
 
-    resp_size = sizeof(*phy_db_resp) + phy_size;
-    phy_db_resp = kzalloc(resp_size, GFP_KERNEL);
-    if (!phy_db_resp) { return -ENOMEM; }
-    phy_db_resp->chg_id = phy_db_req->chg_id;
-    phy_db_resp->type = phy_db_req->type;
-    phy_db_resp->size = phy_size;
-    memcpy(phy_db_resp->data, phy_data, phy_size);
+  resp_size = sizeof(*phy_db_resp) + phy_size;
+  phy_db_resp = kzalloc(resp_size, GFP_KERNEL);
+  if (!phy_db_resp) {
+    return -ENOMEM;
+  }
+  phy_db_resp->chg_id = phy_db_req->chg_id;
+  phy_db_resp->type = phy_db_req->type;
+  phy_db_resp->size = phy_size;
+  memcpy(phy_db_resp->data, phy_data, phy_size);
 
-    data_out->data = phy_db_resp;
-    data_out->len = resp_size;
+  data_out->data = phy_db_resp;
+  data_out->len = resp_size;
 
-    return 0;
+  return 0;
 }
 
 static struct iwl_device_cmd* iwl_xvt_init_tx_dev_cmd(struct iwl_xvt* xvt) {
-    struct iwl_device_cmd* dev_cmd;
+  struct iwl_device_cmd* dev_cmd;
 
-    dev_cmd = iwl_trans_alloc_tx_cmd(xvt->trans);
-    if (unlikely(!dev_cmd)) { return NULL; }
+  dev_cmd = iwl_trans_alloc_tx_cmd(xvt->trans);
+  if (unlikely(!dev_cmd)) {
+    return NULL;
+  }
 
-    memset(dev_cmd, 0, sizeof(*dev_cmd));
-    dev_cmd->hdr.cmd = TX_CMD;
+  memset(dev_cmd, 0, sizeof(*dev_cmd));
+  dev_cmd->hdr.cmd = TX_CMD;
 
-    return dev_cmd;
+  return dev_cmd;
 }
 
 static uint16_t iwl_xvt_get_offload_assist(struct ieee80211_hdr* hdr) {
-    int hdrlen = ieee80211_hdrlen(hdr->frame_control);
-    uint16_t offload_assist = 0;
-    bool amsdu;
+  int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+  uint16_t offload_assist = 0;
+  bool amsdu;
 
-    amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
-            (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+  amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+          (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT);
 
-    if (amsdu) { offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); }
+  if (amsdu) {
+    offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+  }
 
-    /*
-     * padding is inserted later in transport.
-     * do not align A-MSDUs to dword, as the subframe header
-     * aligns the SNAP header.
-     */
-    if (hdrlen % 4 && !amsdu) { offload_assist |= BIT(TX_CMD_OFFLD_PAD); }
+  /*
+   * padding is inserted later in transport.
+   * do not align A-MSDUs to dword, as the subframe header
+   * aligns the SNAP header.
+   */
+  if (hdrlen % 4 && !amsdu) {
+    offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+  }
 
-    return offload_assist;
+  return offload_assist;
 }
 
 static struct iwl_device_cmd* iwl_xvt_set_tx_params_gen3(struct iwl_xvt* xvt, struct sk_buff* skb,
                                                          uint32_t rate_flags, uint32_t tx_flags)
 
 {
-    struct iwl_device_cmd* dev_cmd;
-    struct iwl_tx_cmd_gen3* cmd;
-    struct ieee80211_hdr* hdr = (struct ieee80211_hdr*)skb->data;
-    struct iwl_xvt_skb_info* skb_info = (void*)skb->cb;
-    uint32_t header_length = ieee80211_hdrlen(hdr->frame_control);
+  struct iwl_device_cmd* dev_cmd;
+  struct iwl_tx_cmd_gen3* cmd;
+  struct ieee80211_hdr* hdr = (struct ieee80211_hdr*)skb->data;
+  struct iwl_xvt_skb_info* skb_info = (void*)skb->cb;
+  uint32_t header_length = ieee80211_hdrlen(hdr->frame_control);
 
-    dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt);
-    if (unlikely(!dev_cmd)) { return NULL; }
+  dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt);
+  if (unlikely(!dev_cmd)) {
+    return NULL;
+  }
 
-    cmd = (struct iwl_tx_cmd_gen3*)dev_cmd->payload;
+  cmd = (struct iwl_tx_cmd_gen3*)dev_cmd->payload;
 
-    cmd->offload_assist |= cpu_to_le32(iwl_xvt_get_offload_assist(hdr));
+  cmd->offload_assist |= cpu_to_le32(iwl_xvt_get_offload_assist(hdr));
 
-    cmd->len = cpu_to_le16((uint16_t)skb->len);
+  cmd->len = cpu_to_le16((uint16_t)skb->len);
 
-    cmd->flags = cpu_to_le16(tx_flags);
-    if (ieee80211_has_morefrags(hdr->frame_control))
-    /* though this flag is not supported for gen3, it is used
-     * here for silicon feedback tests. */
-    {
-        cmd->flags |= cpu_to_le16(TX_CMD_FLG_MORE_FRAG);
-    }
+  cmd->flags = cpu_to_le16(tx_flags);
+  if (ieee80211_has_morefrags(hdr->frame_control))
+  /* though this flag is not supported for gen3, it is used
+   * here for silicon feedback tests. */
+  {
+    cmd->flags |= cpu_to_le16(TX_CMD_FLG_MORE_FRAG);
+  }
 
-    cmd->rate_n_flags = cpu_to_le32(rate_flags);
+  cmd->rate_n_flags = cpu_to_le32(rate_flags);
 
-    /* Copy MAC header from skb into command buffer */
-    memcpy(cmd->hdr, hdr, header_length);
+  /* Copy MAC header from skb into command buffer */
+  memcpy(cmd->hdr, hdr, header_length);
 
-    /* Saving device command address itself in the control buffer, to be
-     * used when reclaiming the command.
-     */
-    skb_info->dev_cmd = dev_cmd;
+  /* Saving device command address itself in the control buffer, to be
+   * used when reclaiming the command.
+   */
+  skb_info->dev_cmd = dev_cmd;
 
-    return dev_cmd;
+  return dev_cmd;
 }
 
 static struct iwl_device_cmd* iwl_xvt_set_tx_params_gen2(struct iwl_xvt* xvt, struct sk_buff* skb,
                                                          uint32_t rate_flags, uint32_t flags) {
-    struct iwl_device_cmd* dev_cmd;
-    struct iwl_xvt_skb_info* skb_info = (void*)skb->cb;
-    struct iwl_tx_cmd_gen2* tx_cmd;
-    struct ieee80211_hdr* hdr = (struct ieee80211_hdr*)skb->data;
-    uint32_t header_length = ieee80211_hdrlen(hdr->frame_control);
+  struct iwl_device_cmd* dev_cmd;
+  struct iwl_xvt_skb_info* skb_info = (void*)skb->cb;
+  struct iwl_tx_cmd_gen2* tx_cmd;
+  struct ieee80211_hdr* hdr = (struct ieee80211_hdr*)skb->data;
+  uint32_t header_length = ieee80211_hdrlen(hdr->frame_control);
 
-    dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt);
-    if (unlikely(!dev_cmd)) { return NULL; }
+  dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt);
+  if (unlikely(!dev_cmd)) {
+    return NULL;
+  }
 
-    tx_cmd = (struct iwl_tx_cmd_gen2*)dev_cmd->payload;
-    tx_cmd->len = cpu_to_le16((uint16_t)skb->len);
-    tx_cmd->offload_assist |= cpu_to_le16(iwl_xvt_get_offload_assist(hdr));
-    tx_cmd->flags = cpu_to_le32(flags);
-    if (ieee80211_has_morefrags(hdr->frame_control))
-    /* though this flag is not supported for gen2, it is used
-     * for silicon feedback tests. */
-    {
-        tx_cmd->flags |= cpu_to_le32(TX_CMD_FLG_MORE_FRAG);
-    }
-    tx_cmd->rate_n_flags = cpu_to_le32(rate_flags);
+  tx_cmd = (struct iwl_tx_cmd_gen2*)dev_cmd->payload;
+  tx_cmd->len = cpu_to_le16((uint16_t)skb->len);
+  tx_cmd->offload_assist |= cpu_to_le16(iwl_xvt_get_offload_assist(hdr));
+  tx_cmd->flags = cpu_to_le32(flags);
+  if (ieee80211_has_morefrags(hdr->frame_control))
+  /* though this flag is not supported for gen2, it is used
+   * for silicon feedback tests. */
+  {
+    tx_cmd->flags |= cpu_to_le32(TX_CMD_FLG_MORE_FRAG);
+  }
+  tx_cmd->rate_n_flags = cpu_to_le32(rate_flags);
 
-    /* Copy MAC header from skb into command buffer */
-    memcpy(tx_cmd->hdr, hdr, header_length);
+  /* Copy MAC header from skb into command buffer */
+  memcpy(tx_cmd->hdr, hdr, header_length);
 
-    /* Saving device command address itself in the
-     * control buffer, to be used when reclaiming
-     * the command. */
-    skb_info->dev_cmd = dev_cmd;
+  /* Saving device command address itself in the
+   * control buffer, to be used when reclaiming
+   * the command. */
+  skb_info->dev_cmd = dev_cmd;
 
-    return dev_cmd;
+  return dev_cmd;
 }
 
 /*
@@ -695,1103 +753,1177 @@
 static struct iwl_device_cmd* iwl_xvt_set_mod_tx_params(struct iwl_xvt* xvt, struct sk_buff* skb,
                                                         uint8_t sta_id, uint32_t rate_flags,
                                                         uint32_t flags) {
-    struct iwl_device_cmd* dev_cmd;
-    struct iwl_xvt_skb_info* skb_info = (void*)skb->cb;
-    struct iwl_tx_cmd* tx_cmd;
+  struct iwl_device_cmd* dev_cmd;
+  struct iwl_xvt_skb_info* skb_info = (void*)skb->cb;
+  struct iwl_tx_cmd* tx_cmd;
 
-    dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt);
-    if (unlikely(!dev_cmd)) { return NULL; }
+  dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt);
+  if (unlikely(!dev_cmd)) {
+    return NULL;
+  }
 
-    tx_cmd = (struct iwl_tx_cmd*)dev_cmd->payload;
+  tx_cmd = (struct iwl_tx_cmd*)dev_cmd->payload;
 
-    tx_cmd->len = cpu_to_le16((uint16_t)skb->len);
-    tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+  tx_cmd->len = cpu_to_le16((uint16_t)skb->len);
+  tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
 
-    tx_cmd->sta_id = sta_id;
-    tx_cmd->rate_n_flags = cpu_to_le32(rate_flags);
-    tx_cmd->tx_flags = cpu_to_le32(flags);
+  tx_cmd->sta_id = sta_id;
+  tx_cmd->rate_n_flags = cpu_to_le32(rate_flags);
+  tx_cmd->tx_flags = cpu_to_le32(flags);
 
-    /* the skb should already hold the data */
-    memcpy(tx_cmd->hdr, skb->data, sizeof(struct ieee80211_hdr));
+  /* the skb should already hold the data */
+  memcpy(tx_cmd->hdr, skb->data, sizeof(struct ieee80211_hdr));
 
-    /*
-     * Saving device command address itself in the
-     * control buffer, to be used when reclaiming
-     * the command.
-     */
-    skb_info->dev_cmd = dev_cmd;
+  /*
+   * Saving device command address itself in the
+   * control buffer, to be used when reclaiming
+   * the command.
+   */
+  skb_info->dev_cmd = dev_cmd;
 
-    return dev_cmd;
+  return dev_cmd;
 }
 
 static void iwl_xvt_set_seq_number(struct iwl_xvt* xvt, struct tx_meta_data* meta_tx,
                                    struct sk_buff* skb, uint8_t frag_num) {
-    struct ieee80211_hdr* hdr = (struct ieee80211_hdr*)skb->data;
-    uint8_t *qc, tid;
+  struct ieee80211_hdr* hdr = (struct ieee80211_hdr*)skb->data;
+  uint8_t *qc, tid;
 
-    if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) {
-        return;
-    }
+  if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) {
+    return;
+  }
 
-    qc = ieee80211_get_qos_ctl(hdr);
-    tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
-    if (WARN_ON(tid >= IWL_MAX_TID_COUNT)) { tid = IWL_MAX_TID_COUNT - 1; }
+  qc = ieee80211_get_qos_ctl(hdr);
+  tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+  if (WARN_ON(tid >= IWL_MAX_TID_COUNT)) {
+    tid = IWL_MAX_TID_COUNT - 1;
+  }
 
-    /* frag_num is expected to be zero in case of no fragmentation */
-    hdr->seq_ctrl = cpu_to_le16(meta_tx->seq_num[tid] | (frag_num & IEEE80211_SCTL_FRAG));
+  /* frag_num is expected to be zero in case of no fragmentation */
+  hdr->seq_ctrl = cpu_to_le16(meta_tx->seq_num[tid] | (frag_num & IEEE80211_SCTL_FRAG));
 
-    if (!ieee80211_has_morefrags(hdr->frame_control)) { meta_tx->seq_num[tid] += 0x10; }
+  if (!ieee80211_has_morefrags(hdr->frame_control)) {
+    meta_tx->seq_num[tid] += 0x10;
+  }
 }
 
 static int iwl_xvt_send_packet(struct iwl_xvt* xvt, struct iwl_tm_mod_tx_request* tx_req,
                                uint32_t* status, struct tx_meta_data* meta_tx) {
-    struct sk_buff* skb;
-    struct iwl_device_cmd* dev_cmd;
-    int time_remain, err = 0;
-    uint32_t flags = 0;
-    uint32_t rate_flags = tx_req->rate_flags;
+  struct sk_buff* skb;
+  struct iwl_device_cmd* dev_cmd;
+  int time_remain, err = 0;
+  uint32_t flags = 0;
+  uint32_t rate_flags = tx_req->rate_flags;
 
-    if (xvt->fw_error) {
-        IWL_ERR(xvt, "FW Error while sending Tx\n");
-        *status = XVT_TX_DRIVER_ABORTED;
-        return -ENODEV;
-    }
+  if (xvt->fw_error) {
+    IWL_ERR(xvt, "FW Error while sending Tx\n");
+    *status = XVT_TX_DRIVER_ABORTED;
+    return -ENODEV;
+  }
 
-    skb = alloc_skb(tx_req->len, GFP_KERNEL);
-    if (!skb) {
-        *status = XVT_TX_DRIVER_ABORTED;
-        return -ENOMEM;
-    }
+  skb = alloc_skb(tx_req->len, GFP_KERNEL);
+  if (!skb) {
+    *status = XVT_TX_DRIVER_ABORTED;
+    return -ENOMEM;
+  }
 
-    memcpy(skb_put(skb, tx_req->len), tx_req->data, tx_req->len);
-    iwl_xvt_set_seq_number(xvt, meta_tx, skb, 0);
+  memcpy(skb_put(skb, tx_req->len), tx_req->data, tx_req->len);
+  iwl_xvt_set_seq_number(xvt, meta_tx, skb, 0);
 
-    flags = tx_req->no_ack ? 0 : TX_CMD_FLG_ACK;
+  flags = tx_req->no_ack ? 0 : TX_CMD_FLG_ACK;
 
-    if (iwl_xvt_is_unified_fw(xvt)) {
-        flags |= IWL_TX_FLAGS_CMD_RATE;
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    flags |= IWL_TX_FLAGS_CMD_RATE;
 
-        if (xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-            dev_cmd = iwl_xvt_set_tx_params_gen3(xvt, skb, rate_flags, flags);
-        } else {
-            dev_cmd = iwl_xvt_set_tx_params_gen2(xvt, skb, rate_flags, flags);
-        }
+    if (xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+      dev_cmd = iwl_xvt_set_tx_params_gen3(xvt, skb, rate_flags, flags);
     } else {
-        dev_cmd = iwl_xvt_set_mod_tx_params(xvt, skb, tx_req->sta_id, tx_req->rate_flags, flags);
+      dev_cmd = iwl_xvt_set_tx_params_gen2(xvt, skb, rate_flags, flags);
     }
-    if (!dev_cmd) {
-        kfree_skb(skb);
-        *status = XVT_TX_DRIVER_ABORTED;
-        return -ENOMEM;
-    }
-
-    if (tx_req->trigger_led) { iwl_xvt_led_enable(xvt); }
-
-    /* wait until the tx queue isn't full */
-    time_remain = wait_event_interruptible_timeout(meta_tx->mod_tx_wq, !meta_tx->txq_full, HZ);
-
-    if (time_remain <= 0) {
-        /* This should really not happen */
-        WARN_ON_ONCE(meta_tx->txq_full);
-        IWL_ERR(xvt, "Error while sending Tx\n");
-        *status = XVT_TX_DRIVER_QUEUE_FULL;
-        err = -EIO;
-        goto err;
-    }
-
-    if (xvt->fw_error) {
-        WARN_ON_ONCE(meta_tx->txq_full);
-        IWL_ERR(xvt, "FW Error while sending Tx\n");
-        *status = XVT_TX_DRIVER_ABORTED;
-        err = -ENODEV;
-        goto err;
-    }
-
-    /* Assume we have one Txing thread only: the queue is not full
-     * any more - nobody could fill it up in the meantime since we
-     * were blocked.
-     */
-
-    local_bh_disable();
-
-    err = iwl_trans_tx(xvt->trans, skb, dev_cmd, meta_tx->queue);
-
-    local_bh_enable();
-    if (err) {
-        IWL_ERR(xvt, "Tx command failed (error %d)\n", err);
-        *status = XVT_TX_DRIVER_ABORTED;
-        goto err;
-    }
-
-    if (tx_req->trigger_led) { iwl_xvt_led_disable(xvt); }
-
-    return err;
-err:
-    iwl_trans_free_tx_cmd(xvt->trans, dev_cmd);
+  } else {
+    dev_cmd = iwl_xvt_set_mod_tx_params(xvt, skb, tx_req->sta_id, tx_req->rate_flags, flags);
+  }
+  if (!dev_cmd) {
     kfree_skb(skb);
-    return err;
+    *status = XVT_TX_DRIVER_ABORTED;
+    return -ENOMEM;
+  }
+
+  if (tx_req->trigger_led) {
+    iwl_xvt_led_enable(xvt);
+  }
+
+  /* wait until the tx queue isn't full */
+  time_remain = wait_event_interruptible_timeout(meta_tx->mod_tx_wq, !meta_tx->txq_full, HZ);
+
+  if (time_remain <= 0) {
+    /* This should really not happen */
+    WARN_ON_ONCE(meta_tx->txq_full);
+    IWL_ERR(xvt, "Error while sending Tx\n");
+    *status = XVT_TX_DRIVER_QUEUE_FULL;
+    err = -EIO;
+    goto err;
+  }
+
+  if (xvt->fw_error) {
+    WARN_ON_ONCE(meta_tx->txq_full);
+    IWL_ERR(xvt, "FW Error while sending Tx\n");
+    *status = XVT_TX_DRIVER_ABORTED;
+    err = -ENODEV;
+    goto err;
+  }
+
+  /* Assume we have one Txing thread only: the queue is not full
+   * any more - nobody could fill it up in the meantime since we
+   * were blocked.
+   */
+
+  local_bh_disable();
+
+  err = iwl_trans_tx(xvt->trans, skb, dev_cmd, meta_tx->queue);
+
+  local_bh_enable();
+  if (err) {
+    IWL_ERR(xvt, "Tx command failed (error %d)\n", err);
+    *status = XVT_TX_DRIVER_ABORTED;
+    goto err;
+  }
+
+  if (tx_req->trigger_led) {
+    iwl_xvt_led_disable(xvt);
+  }
+
+  return err;
+err:
+  iwl_trans_free_tx_cmd(xvt->trans, dev_cmd);
+  kfree_skb(skb);
+  return err;
 }
 
 static struct iwl_device_cmd* iwl_xvt_set_tx_params(struct iwl_xvt* xvt, struct sk_buff* skb,
                                                     struct iwl_xvt_tx_start* tx_start,
                                                     uint8_t packet_index) {
-    struct iwl_device_cmd* dev_cmd;
-    struct iwl_xvt_skb_info* skb_info = (void*)skb->cb;
-    struct iwl_tx_cmd* tx_cmd;
-    /* the skb should already hold the data */
-    struct ieee80211_hdr* hdr = (struct ieee80211_hdr*)skb->data;
-    uint32_t header_length = ieee80211_hdrlen(hdr->frame_control);
+  struct iwl_device_cmd* dev_cmd;
+  struct iwl_xvt_skb_info* skb_info = (void*)skb->cb;
+  struct iwl_tx_cmd* tx_cmd;
+  /* the skb should already hold the data */
+  struct ieee80211_hdr* hdr = (struct ieee80211_hdr*)skb->data;
+  uint32_t header_length = ieee80211_hdrlen(hdr->frame_control);
 
-    dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt);
-    if (unlikely(!dev_cmd)) { return NULL; }
+  dev_cmd = iwl_xvt_init_tx_dev_cmd(xvt);
+  if (unlikely(!dev_cmd)) {
+    return NULL;
+  }
 
-    tx_cmd = (struct iwl_tx_cmd*)dev_cmd->payload;
+  tx_cmd = (struct iwl_tx_cmd*)dev_cmd->payload;
 
-    /* let the fw manage the seq number for non-qos/multicast */
-    if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) {
-        tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
-    }
+  /* let the fw manage the seq number for non-qos/multicast */
+  if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) {
+    tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+  }
 
-    tx_cmd->len = cpu_to_le16((uint16_t)skb->len);
-    tx_cmd->offload_assist |= cpu_to_le16(iwl_xvt_get_offload_assist(hdr));
-    tx_cmd->tx_flags |= cpu_to_le32(tx_start->tx_data.tx_flags);
-    if (ieee80211_has_morefrags(hdr->frame_control)) {
-        tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MORE_FRAG);
-    }
-    tx_cmd->rate_n_flags = cpu_to_le32(tx_start->tx_data.rate_flags);
-    tx_cmd->sta_id = tx_start->frames_data[packet_index].sta_id;
-    tx_cmd->sec_ctl = tx_start->frames_data[packet_index].sec_ctl;
-    tx_cmd->initial_rate_index = tx_start->tx_data.initial_rate_index;
-    tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
-    tx_cmd->rts_retry_limit = tx_start->tx_data.rts_retry_limit;
-    tx_cmd->data_retry_limit = tx_start->tx_data.data_retry_limit;
-    tx_cmd->tid_tspec = tx_start->frames_data[packet_index].tid_tspec;
-    memcpy(tx_cmd->key, tx_start->frames_data[packet_index].key, sizeof(tx_cmd->key));
+  tx_cmd->len = cpu_to_le16((uint16_t)skb->len);
+  tx_cmd->offload_assist |= cpu_to_le16(iwl_xvt_get_offload_assist(hdr));
+  tx_cmd->tx_flags |= cpu_to_le32(tx_start->tx_data.tx_flags);
+  if (ieee80211_has_morefrags(hdr->frame_control)) {
+    tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MORE_FRAG);
+  }
+  tx_cmd->rate_n_flags = cpu_to_le32(tx_start->tx_data.rate_flags);
+  tx_cmd->sta_id = tx_start->frames_data[packet_index].sta_id;
+  tx_cmd->sec_ctl = tx_start->frames_data[packet_index].sec_ctl;
+  tx_cmd->initial_rate_index = tx_start->tx_data.initial_rate_index;
+  tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+  tx_cmd->rts_retry_limit = tx_start->tx_data.rts_retry_limit;
+  tx_cmd->data_retry_limit = tx_start->tx_data.data_retry_limit;
+  tx_cmd->tid_tspec = tx_start->frames_data[packet_index].tid_tspec;
+  memcpy(tx_cmd->key, tx_start->frames_data[packet_index].key, sizeof(tx_cmd->key));
 
-    memcpy(tx_cmd->hdr, hdr, header_length);
+  memcpy(tx_cmd->hdr, hdr, header_length);
 
-    /*
-     * Saving device command address itself in the control buffer,
-     * to be used when reclaiming the command.
-     */
-    skb_info->dev_cmd = dev_cmd;
+  /*
+   * Saving device command address itself in the control buffer,
+   * to be used when reclaiming the command.
+   */
+  skb_info->dev_cmd = dev_cmd;
 
-    return dev_cmd;
+  return dev_cmd;
 }
 
 static struct sk_buff* iwl_xvt_set_skb(struct iwl_xvt* xvt, struct ieee80211_hdr* hdr,
                                        struct tx_payload* payload) {
-    struct sk_buff* skb;
-    uint32_t header_size = ieee80211_hdrlen(hdr->frame_control);
-    uint32_t payload_length = payload->length;
-    uint32_t packet_length = payload_length + header_size;
+  struct sk_buff* skb;
+  uint32_t header_size = ieee80211_hdrlen(hdr->frame_control);
+  uint32_t payload_length = payload->length;
+  uint32_t packet_length = payload_length + header_size;
 
-    skb = alloc_skb(packet_length, GFP_KERNEL);
-    if (!skb) { return NULL; }
-    /* copy MAC header into skb */
-    memcpy(skb_put(skb, header_size), hdr, header_size);
-    /* copy frame payload into skb */
-    memcpy(skb_put(skb, payload_length), payload, payload_length);
+  skb = alloc_skb(packet_length, GFP_KERNEL);
+  if (!skb) {
+    return NULL;
+  }
+  /* copy MAC header into skb */
+  memcpy(skb_put(skb, header_size), hdr, header_size);
+  /* copy frame payload into skb */
+  memcpy(skb_put(skb, payload_length), payload, payload_length);
 
-    return skb;
+  return skb;
 }
 
 static struct sk_buff* iwl_xvt_create_fragment_skb(struct iwl_xvt* xvt, struct ieee80211_hdr* hdr,
                                                    struct tx_payload* payload,
                                                    uint32_t fragment_size, uint8_t frag_num) {
-    struct sk_buff* skb;
-    const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
-    uint32_t header_size = ieee80211_hdrlen(hdr->frame_control);
-    uint32_t skb_size, offset, payload_remain, payload_chunck_size;
+  struct sk_buff* skb;
+  const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
+  uint32_t header_size = ieee80211_hdrlen(hdr->frame_control);
+  uint32_t skb_size, offset, payload_remain, payload_chunck_size;
 
-    if (WARN(fragment_size <= header_size || !ieee80211_is_data_qos(hdr->frame_control),
-             "can't fragment, fragment_size small big or not qos data")) {
-        return NULL;
-    }
+  if (WARN(fragment_size <= header_size || !ieee80211_is_data_qos(hdr->frame_control),
+           "can't fragment, fragment_size small big or not qos data")) {
+    return NULL;
+  }
 
-    payload_chunck_size = fragment_size - header_size;
-    offset = payload_chunck_size * frag_num;
-    if (WARN(offset >= payload->length, "invalid fragment number %d\n", frag_num)) { return NULL; }
+  payload_chunck_size = fragment_size - header_size;
+  offset = payload_chunck_size * frag_num;
+  if (WARN(offset >= payload->length, "invalid fragment number %d\n", frag_num)) {
+    return NULL;
+  }
 
-    payload_remain = payload->length - offset;
+  payload_remain = payload->length - offset;
 
-    if (fragment_size < payload_remain + header_size) {
-        skb_size = fragment_size;
-        hdr->frame_control |= morefrags;
-    } else {
-        skb_size = payload_remain + header_size;
-        hdr->frame_control &= ~morefrags;
-        payload_chunck_size = payload_remain;
-    }
+  if (fragment_size < payload_remain + header_size) {
+    skb_size = fragment_size;
+    hdr->frame_control |= morefrags;
+  } else {
+    skb_size = payload_remain + header_size;
+    hdr->frame_control &= ~morefrags;
+    payload_chunck_size = payload_remain;
+  }
 
-    skb = alloc_skb(skb_size, GFP_KERNEL);
-    if (!skb) { return NULL; }
+  skb = alloc_skb(skb_size, GFP_KERNEL);
+  if (!skb) {
+    return NULL;
+  }
 
-    /* copy MAC header into skb */
-    memcpy(skb_put(skb, header_size), hdr, header_size);
+  /* copy MAC header into skb */
+  memcpy(skb_put(skb, header_size), hdr, header_size);
 
-    /* copy frame payload into skb */
-    memcpy(skb_put(skb, payload_chunck_size), &payload->payload[offset], payload_chunck_size);
+  /* copy frame payload into skb */
+  memcpy(skb_put(skb, payload_chunck_size), &payload->payload[offset], payload_chunck_size);
 
-    return skb;
+  return skb;
 }
 
 static struct sk_buff* iwl_xvt_get_skb(struct iwl_xvt* xvt, struct ieee80211_hdr* hdr,
                                        struct tx_payload* payload, uint32_t fragment_size,
                                        uint8_t frag_num) {
-    if (fragment_size == 0) { /* no framgmentation */
-        return iwl_xvt_set_skb(xvt, hdr, payload);
-    }
+  if (fragment_size == 0) { /* no framgmentation */
+    return iwl_xvt_set_skb(xvt, hdr, payload);
+  }
 
-    return iwl_xvt_create_fragment_skb(xvt, hdr, payload, fragment_size, frag_num);
+  return iwl_xvt_create_fragment_skb(xvt, hdr, payload, fragment_size, frag_num);
 }
 
 static int iwl_xvt_transmit_packet(struct iwl_xvt* xvt, struct sk_buff* skb,
                                    struct iwl_xvt_tx_start* tx_start, uint8_t packet_index,
                                    uint8_t frag_num, uint32_t* status) {
-    struct iwl_device_cmd* dev_cmd;
-    int time_remain, err = 0;
-    uint8_t queue = tx_start->frames_data[packet_index].queue;
-    struct tx_queue_data* queue_data = &xvt->queue_data[queue];
-    uint32_t rate_flags = tx_start->tx_data.rate_flags;
-    uint32_t tx_flags = tx_start->tx_data.tx_flags;
+  struct iwl_device_cmd* dev_cmd;
+  int time_remain, err = 0;
+  uint8_t queue = tx_start->frames_data[packet_index].queue;
+  struct tx_queue_data* queue_data = &xvt->queue_data[queue];
+  uint32_t rate_flags = tx_start->tx_data.rate_flags;
+  uint32_t tx_flags = tx_start->tx_data.tx_flags;
 
-    /* set tx number */
-    iwl_xvt_set_seq_number(xvt, &xvt->tx_meta_data[XVT_LMAC_0_ID], skb, frag_num);
+  /* set tx number */
+  iwl_xvt_set_seq_number(xvt, &xvt->tx_meta_data[XVT_LMAC_0_ID], skb, frag_num);
 
-    if (iwl_xvt_is_unified_fw(xvt)) {
-        if (xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
-            dev_cmd = iwl_xvt_set_tx_params_gen3(xvt, skb, rate_flags, tx_flags);
-        } else {
-            dev_cmd = iwl_xvt_set_tx_params_gen2(xvt, skb, rate_flags, tx_flags);
-        }
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    if (xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+      dev_cmd = iwl_xvt_set_tx_params_gen3(xvt, skb, rate_flags, tx_flags);
     } else {
-        dev_cmd = iwl_xvt_set_tx_params(xvt, skb, tx_start, packet_index);
+      dev_cmd = iwl_xvt_set_tx_params_gen2(xvt, skb, rate_flags, tx_flags);
     }
-    if (!dev_cmd) {
-        kfree_skb(skb);
-        *status = XVT_TX_DRIVER_ABORTED;
-        return -ENOMEM;
-    }
-    /* wait until the tx queue isn't full */
-    time_remain = wait_event_interruptible_timeout(queue_data->tx_wq, !queue_data->txq_full, HZ);
+  } else {
+    dev_cmd = iwl_xvt_set_tx_params(xvt, skb, tx_start, packet_index);
+  }
+  if (!dev_cmd) {
+    kfree_skb(skb);
+    *status = XVT_TX_DRIVER_ABORTED;
+    return -ENOMEM;
+  }
+  /* wait until the tx queue isn't full */
+  time_remain = wait_event_interruptible_timeout(queue_data->tx_wq, !queue_data->txq_full, HZ);
 
-    if (time_remain <= 0) {
-        /* This should really not happen */
-        WARN_ON_ONCE(queue_data->txq_full);
-        IWL_ERR(xvt, "Error while sending Tx\n");
-        *status = XVT_TX_DRIVER_QUEUE_FULL;
-        err = -EIO;
-        goto on_err;
-    }
+  if (time_remain <= 0) {
+    /* This should really not happen */
+    WARN_ON_ONCE(queue_data->txq_full);
+    IWL_ERR(xvt, "Error while sending Tx\n");
+    *status = XVT_TX_DRIVER_QUEUE_FULL;
+    err = -EIO;
+    goto on_err;
+  }
 
-    if (xvt->fw_error) {
-        WARN_ON_ONCE(queue_data->txq_full);
-        IWL_ERR(xvt, "FW Error while sending packet\n");
-        *status = XVT_TX_DRIVER_ABORTED;
-        err = -ENODEV;
-        goto on_err;
-    }
-    /* Assume we have one Txing thread only: the queue is not full
-     * any more - nobody could fill it up in the meantime since we
-     * were blocked.
-     */
-    local_bh_disable();
-    err = iwl_trans_tx(xvt->trans, skb, dev_cmd, queue);
-    local_bh_enable();
-    if (err) {
-        IWL_ERR(xvt, "Tx command failed (error %d)\n", err);
-        *status = XVT_TX_DRIVER_ABORTED;
-        goto on_err;
-    }
+  if (xvt->fw_error) {
+    WARN_ON_ONCE(queue_data->txq_full);
+    IWL_ERR(xvt, "FW Error while sending packet\n");
+    *status = XVT_TX_DRIVER_ABORTED;
+    err = -ENODEV;
+    goto on_err;
+  }
+  /* Assume we have one Txing thread only: the queue is not full
+   * any more - nobody could fill it up in the meantime since we
+   * were blocked.
+   */
+  local_bh_disable();
+  err = iwl_trans_tx(xvt->trans, skb, dev_cmd, queue);
+  local_bh_enable();
+  if (err) {
+    IWL_ERR(xvt, "Tx command failed (error %d)\n", err);
+    *status = XVT_TX_DRIVER_ABORTED;
+    goto on_err;
+  }
 
-    return 0;
+  return 0;
 
 on_err:
-    iwl_trans_free_tx_cmd(xvt->trans, dev_cmd);
-    kfree_skb(skb);
-    return err;
+  iwl_trans_free_tx_cmd(xvt->trans, dev_cmd);
+  kfree_skb(skb);
+  return err;
 }
 
 static int iwl_xvt_send_tx_done_notif(struct iwl_xvt* xvt, uint32_t status) {
-    struct iwl_xvt_tx_done* done_notif;
-    uint32_t i, j, done_notif_size, num_of_queues = 0;
-    int err;
+  struct iwl_xvt_tx_done* done_notif;
+  uint32_t i, j, done_notif_size, num_of_queues = 0;
+  int err;
 
-    for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
-        if (xvt->queue_data[i].allocated_queue) { num_of_queues++; }
+  for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
+    if (xvt->queue_data[i].allocated_queue) {
+      num_of_queues++;
     }
+  }
 
-    done_notif_size = sizeof(*done_notif) + num_of_queues * sizeof(struct iwl_xvt_post_tx_data);
-    done_notif = kzalloc(done_notif_size, GFP_KERNEL);
-    if (!done_notif) { return -ENOMEM; }
+  done_notif_size = sizeof(*done_notif) + num_of_queues * sizeof(struct iwl_xvt_post_tx_data);
+  done_notif = kzalloc(done_notif_size, GFP_KERNEL);
+  if (!done_notif) {
+    return -ENOMEM;
+  }
 
-    done_notif->status = status;
-    done_notif->num_of_queues = num_of_queues;
+  done_notif->status = status;
+  done_notif->num_of_queues = num_of_queues;
 
-    for (i = 1, j = 0; i <= num_of_queues; i++) {
-        if (!xvt->queue_data[i].allocated_queue) { continue; }
-        done_notif->tx_data[j].num_of_packets = xvt->queue_data[i].tx_counter;
-        done_notif->tx_data[j].queue = i;
-        j++;
+  for (i = 1, j = 0; i <= num_of_queues; i++) {
+    if (!xvt->queue_data[i].allocated_queue) {
+      continue;
     }
-    err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_ENHANCED_TX_DONE, (void*)done_notif,
-                                  done_notif_size, GFP_ATOMIC);
-    if (err) {
-        IWL_ERR(xvt, "Error %d sending tx_done notification\n", err);
-        kfree(done_notif);
-        return err;
-    }
+    done_notif->tx_data[j].num_of_packets = xvt->queue_data[i].tx_counter;
+    done_notif->tx_data[j].queue = i;
+    j++;
+  }
+  err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_ENHANCED_TX_DONE, (void*)done_notif,
+                                done_notif_size, GFP_ATOMIC);
+  if (err) {
+    IWL_ERR(xvt, "Error %d sending tx_done notification\n", err);
+    kfree(done_notif);
+    return err;
+  }
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_start_tx_handler(void* data) {
-    struct iwl_xvt_enhanced_tx_data* task_data = data;
-    struct iwl_xvt_tx_start* tx_start = &task_data->tx_start_data;
-    struct iwl_xvt* xvt = task_data->xvt;
-    uint8_t num_of_frames;
-    uint32_t status, packets_in_cycle = 0;
-    int time_remain, err = 0, sent_packets = 0;
-    uint32_t num_of_cycles = tx_start->num_of_cycles;
-    uint64_t i, num_of_iterations;
+  struct iwl_xvt_enhanced_tx_data* task_data = data;
+  struct iwl_xvt_tx_start* tx_start = &task_data->tx_start_data;
+  struct iwl_xvt* xvt = task_data->xvt;
+  uint8_t num_of_frames;
+  uint32_t status, packets_in_cycle = 0;
+  int time_remain, err = 0, sent_packets = 0;
+  uint32_t num_of_cycles = tx_start->num_of_cycles;
+  uint64_t i, num_of_iterations;
 
-    /* reset tx parameters */
-    xvt->num_of_tx_resp = 0;
-    xvt->send_tx_resp = tx_start->send_tx_resp;
-    status = 0;
+  /* reset tx parameters */
+  xvt->num_of_tx_resp = 0;
+  xvt->send_tx_resp = tx_start->send_tx_resp;
+  status = 0;
 
-    for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
-        xvt->queue_data[i].tx_counter = 0;
-    }
+  for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
+    xvt->queue_data[i].tx_counter = 0;
+  }
 
-    num_of_frames = tx_start->num_of_different_frames;
-    for (i = 0; i < num_of_frames; i++) {
-        packets_in_cycle += tx_start->frames_data[i].times;
-    }
-    if (WARN(packets_in_cycle == 0, "invalid packets amount to send")) { return -EINVAL; }
+  num_of_frames = tx_start->num_of_different_frames;
+  for (i = 0; i < num_of_frames; i++) {
+    packets_in_cycle += tx_start->frames_data[i].times;
+  }
+  if (WARN(packets_in_cycle == 0, "invalid packets amount to send")) {
+    return -EINVAL;
+  }
 
-    if (num_of_cycles == IWL_XVT_TX_MODULATED_INFINITE) {
-        num_of_cycles = XVT_MAX_TX_COUNT / packets_in_cycle;
-    }
-    xvt->expected_tx_amount = packets_in_cycle * num_of_cycles;
-    num_of_iterations = num_of_cycles * num_of_frames;
+  if (num_of_cycles == IWL_XVT_TX_MODULATED_INFINITE) {
+    num_of_cycles = XVT_MAX_TX_COUNT / packets_in_cycle;
+  }
+  xvt->expected_tx_amount = packets_in_cycle * num_of_cycles;
+  num_of_iterations = num_of_cycles * num_of_frames;
 
-    for (i = 0; (i < num_of_iterations) && !kthread_should_stop(); i++) {
-        uint16_t j, times;
-        uint8_t frame_index, payload_idx, frag_idx, frag_num;
-        struct ieee80211_hdr* hdr;
-        struct sk_buff* skb;
-        uint8_t frag_size = tx_start->tx_data.fragment_size;
-        struct tx_payload* payload;
-        uint8_t frag_array_size = ARRAY_SIZE(tx_start->tx_data.frag_num);
+  for (i = 0; (i < num_of_iterations) && !kthread_should_stop(); i++) {
+    uint16_t j, times;
+    uint8_t frame_index, payload_idx, frag_idx, frag_num;
+    struct ieee80211_hdr* hdr;
+    struct sk_buff* skb;
+    uint8_t frag_size = tx_start->tx_data.fragment_size;
+    struct tx_payload* payload;
+    uint8_t frag_array_size = ARRAY_SIZE(tx_start->tx_data.frag_num);
 
-        frame_index = i % num_of_frames;
-        payload_idx = tx_start->frames_data[frame_index].payload_index;
-        payload = xvt->payloads[payload_idx];
-        hdr = (struct ieee80211_hdr*)tx_start->frames_data[frame_index].header;
-        times = tx_start->frames_data[frame_index].times;
-        for (j = 0; j < times; j++) {
-            if (xvt->fw_error) {
-                IWL_ERR(xvt, "FW Error during TX\n");
-                status = XVT_TX_DRIVER_ABORTED;
-                err = -ENODEV;
-                goto on_exit;
-            }
+    frame_index = i % num_of_frames;
+    payload_idx = tx_start->frames_data[frame_index].payload_index;
+    payload = xvt->payloads[payload_idx];
+    hdr = (struct ieee80211_hdr*)tx_start->frames_data[frame_index].header;
+    times = tx_start->frames_data[frame_index].times;
+    for (j = 0; j < times; j++) {
+      if (xvt->fw_error) {
+        IWL_ERR(xvt, "FW Error during TX\n");
+        status = XVT_TX_DRIVER_ABORTED;
+        err = -ENODEV;
+        goto on_exit;
+      }
 
-            frag_idx = 0;
-            while (frag_idx < frag_array_size) {
-                frag_num = tx_start->tx_data.frag_num[frag_idx];
+      frag_idx = 0;
+      while (frag_idx < frag_array_size) {
+        frag_num = tx_start->tx_data.frag_num[frag_idx];
 
-                if (frag_num == XVT_STOP_TX || (frag_size == 0 && frag_idx > 0)) { break; }
-
-                skb = iwl_xvt_get_skb(xvt, hdr, payload, frag_size, frag_num);
-                if (!skb) {
-                    IWL_ERR(xvt, "skb is NULL\n");
-                    status = XVT_TX_DRIVER_ABORTED;
-                    err = -ENOMEM;
-                    goto on_exit;
-                }
-                err = iwl_xvt_transmit_packet(xvt, skb, tx_start, frame_index, frag_num, &status);
-                sent_packets++;
-                if (err) {
-                    IWL_ERR(xvt, "stop due to err %d\n", err);
-                    goto on_exit;
-                }
-
-                ++frag_idx;
-            }
+        if (frag_num == XVT_STOP_TX || (frag_size == 0 && frag_idx > 0)) {
+          break;
         }
+
+        skb = iwl_xvt_get_skb(xvt, hdr, payload, frag_size, frag_num);
+        if (!skb) {
+          IWL_ERR(xvt, "skb is NULL\n");
+          status = XVT_TX_DRIVER_ABORTED;
+          err = -ENOMEM;
+          goto on_exit;
+        }
+        err = iwl_xvt_transmit_packet(xvt, skb, tx_start, frame_index, frag_num, &status);
+        sent_packets++;
+        if (err) {
+          IWL_ERR(xvt, "stop due to err %d\n", err);
+          goto on_exit;
+        }
+
+        ++frag_idx;
+      }
     }
-    time_remain = wait_event_interruptible_timeout(
-        xvt->tx_done_wq, xvt->num_of_tx_resp == sent_packets, 5 * HZ * CPTCFG_IWL_TIMEOUT_FACTOR);
-    if (time_remain <= 0) {
-        IWL_ERR(xvt, "Not all Tx messages were sent\n");
-        status = XVT_TX_DRIVER_TIMEOUT;
-    }
+  }
+  time_remain = wait_event_interruptible_timeout(
+      xvt->tx_done_wq, xvt->num_of_tx_resp == sent_packets, 5 * HZ * CPTCFG_IWL_TIMEOUT_FACTOR);
+  if (time_remain <= 0) {
+    IWL_ERR(xvt, "Not all Tx messages were sent\n");
+    status = XVT_TX_DRIVER_TIMEOUT;
+  }
 
 on_exit:
-    err = iwl_xvt_send_tx_done_notif(xvt, status);
+  err = iwl_xvt_send_tx_done_notif(xvt, status);
 
-    xvt->is_enhanced_tx = false;
-    kfree(data);
-    for (i = 0; i < IWL_XVT_MAX_PAYLOADS_AMOUNT; i++) {
-        kfree(xvt->payloads[i]);
-        xvt->payloads[i] = NULL;
-    }
-    do_exit(err);
+  xvt->is_enhanced_tx = false;
+  kfree(data);
+  for (i = 0; i < IWL_XVT_MAX_PAYLOADS_AMOUNT; i++) {
+    kfree(xvt->payloads[i]);
+    xvt->payloads[i] = NULL;
+  }
+  do_exit(err);
 }
 
 static int iwl_xvt_modulated_tx_handler(void* data) {
-    uint64_t tx_count, max_tx;
-    int time_remain, num_of_packets, err = 0;
-    struct iwl_xvt* xvt;
-    struct iwl_xvt_tx_mod_done* done_notif;
-    uint32_t status = XVT_TX_DRIVER_SUCCESSFUL;
-    struct iwl_xvt_tx_mod_task_data* task_data = (struct iwl_xvt_tx_mod_task_data*)data;
-    struct tx_meta_data* xvt_tx;
+  uint64_t tx_count, max_tx;
+  int time_remain, num_of_packets, err = 0;
+  struct iwl_xvt* xvt;
+  struct iwl_xvt_tx_mod_done* done_notif;
+  uint32_t status = XVT_TX_DRIVER_SUCCESSFUL;
+  struct iwl_xvt_tx_mod_task_data* task_data = (struct iwl_xvt_tx_mod_task_data*)data;
+  struct tx_meta_data* xvt_tx;
 
-    xvt = task_data->xvt;
-    xvt_tx = &xvt->tx_meta_data[task_data->lmac_id];
-    xvt_tx->tx_task_operating = true;
-    num_of_packets = task_data->tx_req.times;
-    max_tx = (num_of_packets == IWL_XVT_TX_MODULATED_INFINITE) ? XVT_MAX_TX_COUNT : num_of_packets;
-    xvt_tx->tot_tx = num_of_packets;
-    xvt_tx->tx_counter = 0;
+  xvt = task_data->xvt;
+  xvt_tx = &xvt->tx_meta_data[task_data->lmac_id];
+  xvt_tx->tx_task_operating = true;
+  num_of_packets = task_data->tx_req.times;
+  max_tx = (num_of_packets == IWL_XVT_TX_MODULATED_INFINITE) ? XVT_MAX_TX_COUNT : num_of_packets;
+  xvt_tx->tot_tx = num_of_packets;
+  xvt_tx->tx_counter = 0;
 
-    for (tx_count = 0; (tx_count < max_tx) && (!kthread_should_stop()); tx_count++) {
-        err = iwl_xvt_send_packet(xvt, &task_data->tx_req, &status, xvt_tx);
-        if (err) {
-            IWL_ERR(xvt, "stop send packets due to err %d\n", err);
-            break;
-        }
-    }
-
-    if (!err) {
-        time_remain = wait_event_interruptible_timeout(xvt_tx->mod_tx_done_wq,
-                                                       xvt_tx->tx_counter == tx_count, 5 * HZ);
-        if (time_remain <= 0) {
-            IWL_ERR(xvt, "Not all Tx messages were sent\n");
-            xvt_tx->tx_task_operating = false;
-            status = XVT_TX_DRIVER_TIMEOUT;
-        }
-    }
-
-    done_notif = kmalloc(sizeof(*done_notif), GFP_KERNEL);
-    if (!done_notif) {
-        xvt_tx->tx_task_operating = false;
-        kfree(data);
-        return -ENOMEM;
-    }
-    done_notif->num_of_packets = xvt_tx->tx_counter;
-    done_notif->status = status;
-    done_notif->lmac_id = task_data->lmac_id;
-    err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_MOD_TX_DONE, (void*)done_notif,
-                                  sizeof(*done_notif), GFP_ATOMIC);
+  for (tx_count = 0; (tx_count < max_tx) && (!kthread_should_stop()); tx_count++) {
+    err = iwl_xvt_send_packet(xvt, &task_data->tx_req, &status, xvt_tx);
     if (err) {
-        IWL_ERR(xvt, "Error %d sending tx_done notification\n", err);
-        kfree(done_notif);
+      IWL_ERR(xvt, "stop send packets due to err %d\n", err);
+      break;
     }
+  }
 
+  if (!err) {
+    time_remain = wait_event_interruptible_timeout(xvt_tx->mod_tx_done_wq,
+                                                   xvt_tx->tx_counter == tx_count, 5 * HZ);
+    if (time_remain <= 0) {
+      IWL_ERR(xvt, "Not all Tx messages were sent\n");
+      xvt_tx->tx_task_operating = false;
+      status = XVT_TX_DRIVER_TIMEOUT;
+    }
+  }
+
+  done_notif = kmalloc(sizeof(*done_notif), GFP_KERNEL);
+  if (!done_notif) {
     xvt_tx->tx_task_operating = false;
     kfree(data);
-    do_exit(err);
+    return -ENOMEM;
+  }
+  done_notif->num_of_packets = xvt_tx->tx_counter;
+  done_notif->status = status;
+  done_notif->lmac_id = task_data->lmac_id;
+  err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_MOD_TX_DONE, (void*)done_notif,
+                                sizeof(*done_notif), GFP_ATOMIC);
+  if (err) {
+    IWL_ERR(xvt, "Error %d sending tx_done notification\n", err);
+    kfree(done_notif);
+  }
+
+  xvt_tx->tx_task_operating = false;
+  kfree(data);
+  do_exit(err);
 }
 
 static int iwl_xvt_modulated_tx_infinite_stop(struct iwl_xvt* xvt, struct iwl_tm_data* data_in) {
-    int err = 0;
-    uint32_t lmac_id = ((struct iwl_xvt_tx_mod_stop*)data_in->data)->lmac_id;
-    struct tx_meta_data* xvt_tx = &xvt->tx_meta_data[lmac_id];
+  int err = 0;
+  uint32_t lmac_id = ((struct iwl_xvt_tx_mod_stop*)data_in->data)->lmac_id;
+  struct tx_meta_data* xvt_tx = &xvt->tx_meta_data[lmac_id];
 
-    if (xvt_tx->tx_mod_thread && xvt_tx->tx_task_operating) {
-        err = kthread_stop(xvt_tx->tx_mod_thread);
-        xvt_tx->tx_mod_thread = NULL;
-    }
+  if (xvt_tx->tx_mod_thread && xvt_tx->tx_task_operating) {
+    err = kthread_stop(xvt_tx->tx_mod_thread);
+    xvt_tx->tx_mod_thread = NULL;
+  }
 
-    return err;
+  return err;
 }
 
 static inline int map_sta_to_lmac(struct iwl_xvt* xvt, uint8_t sta_id) {
-    switch (sta_id) {
+  switch (sta_id) {
     case XVT_LMAC_0_STA_ID:
-        return XVT_LMAC_0_ID;
+      return XVT_LMAC_0_ID;
     case XVT_LMAC_1_STA_ID:
-        return XVT_LMAC_1_ID;
+      return XVT_LMAC_1_ID;
     default:
-        IWL_ERR(xvt, "wrong sta id, can't match queue\n");
-        return -EINVAL;
-    }
+      IWL_ERR(xvt, "wrong sta id, can't match queue\n");
+      return -EINVAL;
+  }
 }
 
 static int iwl_xvt_tx_queue_cfg(struct iwl_xvt* xvt, struct iwl_tm_data* data_in) {
-    struct iwl_xvt_tx_queue_cfg* input = (struct iwl_xvt_tx_queue_cfg*)data_in->data;
-    uint8_t sta_id = input->sta_id;
-    int lmac_id = map_sta_to_lmac(xvt, sta_id);
+  struct iwl_xvt_tx_queue_cfg* input = (struct iwl_xvt_tx_queue_cfg*)data_in->data;
+  uint8_t sta_id = input->sta_id;
+  int lmac_id = map_sta_to_lmac(xvt, sta_id);
 
-    if (lmac_id < 0) { return lmac_id; }
+  if (lmac_id < 0) {
+    return lmac_id;
+  }
 
-    switch (input->operation) {
+  switch (input->operation) {
     case TX_QUEUE_CFG_ADD:
-        return iwl_xvt_allocate_tx_queue(xvt, sta_id, lmac_id);
+      return iwl_xvt_allocate_tx_queue(xvt, sta_id, lmac_id);
     case TX_QUEUE_CFG_REMOVE:
-        iwl_xvt_free_tx_queue(xvt, lmac_id);
-        break;
+      iwl_xvt_free_tx_queue(xvt, lmac_id);
+      break;
     default:
-        IWL_ERR(xvt, "failed in tx config - wrong operation\n");
-        return -EINVAL;
-    }
+      IWL_ERR(xvt, "failed in tx config - wrong operation\n");
+      return -EINVAL;
+  }
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_start_tx(struct iwl_xvt* xvt, struct iwl_xvt_driver_command_req* req) {
-    struct iwl_xvt_enhanced_tx_data* task_data;
+  struct iwl_xvt_enhanced_tx_data* task_data;
 
-    if (WARN(xvt->is_enhanced_tx || xvt->tx_meta_data[XVT_LMAC_0_ID].tx_task_operating ||
-                 xvt->tx_meta_data[XVT_LMAC_1_ID].tx_task_operating,
-             "TX is already in progress\n")) {
-        return -EINVAL;
-    }
+  if (WARN(xvt->is_enhanced_tx || xvt->tx_meta_data[XVT_LMAC_0_ID].tx_task_operating ||
+               xvt->tx_meta_data[XVT_LMAC_1_ID].tx_task_operating,
+           "TX is already in progress\n")) {
+    return -EINVAL;
+  }
 
+  xvt->is_enhanced_tx = true;
+
+  task_data = kzalloc(sizeof(*task_data), GFP_KERNEL);
+  if (!task_data) {
+    xvt->is_enhanced_tx = false;
+    return -ENOMEM;
+  }
+
+  task_data->xvt = xvt;
+  memcpy(&task_data->tx_start_data, req->input_data, sizeof(struct iwl_xvt_tx_start));
+
+  xvt->tx_task = kthread_run(iwl_xvt_start_tx_handler, task_data, "start enhanced tx command");
+  if (!xvt->tx_task) {
     xvt->is_enhanced_tx = true;
+    kfree(task_data);
+    return -ENOMEM;
+  }
 
-    task_data = kzalloc(sizeof(*task_data), GFP_KERNEL);
-    if (!task_data) {
-        xvt->is_enhanced_tx = false;
-        return -ENOMEM;
-    }
-
-    task_data->xvt = xvt;
-    memcpy(&task_data->tx_start_data, req->input_data, sizeof(struct iwl_xvt_tx_start));
-
-    xvt->tx_task = kthread_run(iwl_xvt_start_tx_handler, task_data, "start enhanced tx command");
-    if (!xvt->tx_task) {
-        xvt->is_enhanced_tx = true;
-        kfree(task_data);
-        return -ENOMEM;
-    }
-
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_stop_tx(struct iwl_xvt* xvt) {
-    int err = 0;
+  int err = 0;
 
-    if (xvt->tx_task && xvt->is_enhanced_tx) {
-        err = kthread_stop(xvt->tx_task);
-        xvt->tx_task = NULL;
-    }
+  if (xvt->tx_task && xvt->is_enhanced_tx) {
+    err = kthread_stop(xvt->tx_task);
+    xvt->tx_task = NULL;
+  }
 
-    return err;
+  return err;
 }
 
 static int iwl_xvt_set_tx_payload(struct iwl_xvt* xvt, struct iwl_xvt_driver_command_req* req) {
-    struct iwl_xvt_set_tx_payload* input = (struct iwl_xvt_set_tx_payload*)req->input_data;
-    uint32_t size = sizeof(struct tx_payload) + input->length;
-    struct tx_payload* payload_struct;
+  struct iwl_xvt_set_tx_payload* input = (struct iwl_xvt_set_tx_payload*)req->input_data;
+  uint32_t size = sizeof(struct tx_payload) + input->length;
+  struct tx_payload* payload_struct;
 
-    if (WARN(input->index >= IWL_XVT_MAX_PAYLOADS_AMOUNT, "invalid payload index\n")) {
-        return -EINVAL;
-    }
+  if (WARN(input->index >= IWL_XVT_MAX_PAYLOADS_AMOUNT, "invalid payload index\n")) {
+    return -EINVAL;
+  }
 
-    /* First free payload in case index is already in use */
-    kfree(xvt->payloads[input->index]);
+  /* First free payload in case index is already in use */
+  kfree(xvt->payloads[input->index]);
 
-    /* Allocate payload in xvt buffer */
-    xvt->payloads[input->index] = kzalloc(size, GFP_KERNEL);
-    if (!xvt->payloads[input->index]) { return -ENOMEM; }
+  /* Allocate payload in xvt buffer */
+  xvt->payloads[input->index] = kzalloc(size, GFP_KERNEL);
+  if (!xvt->payloads[input->index]) {
+    return -ENOMEM;
+  }
 
-    payload_struct = xvt->payloads[input->index];
-    payload_struct->length = input->length;
-    memcpy(payload_struct->payload, input->payload, input->length);
+  payload_struct = xvt->payloads[input->index];
+  payload_struct->length = input->length;
+  memcpy(payload_struct->payload, input->payload, input->length);
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_modulated_tx(struct iwl_xvt* xvt, struct iwl_tm_data* data_in) {
-    uint32_t pkt_length = ((struct iwl_tm_mod_tx_request*)data_in->data)->len;
-    uint32_t req_length = sizeof(struct iwl_tm_mod_tx_request) + pkt_length;
-    uint32_t task_data_length = sizeof(struct iwl_xvt_tx_mod_task_data) + pkt_length;
-    struct tx_meta_data* xvt_tx = &xvt->tx_meta_data[XVT_LMAC_0_ID];
-    uint8_t sta_id;
-    int lmac_id;
-    struct iwl_xvt_tx_mod_task_data* task_data;
+  uint32_t pkt_length = ((struct iwl_tm_mod_tx_request*)data_in->data)->len;
+  uint32_t req_length = sizeof(struct iwl_tm_mod_tx_request) + pkt_length;
+  uint32_t task_data_length = sizeof(struct iwl_xvt_tx_mod_task_data) + pkt_length;
+  struct tx_meta_data* xvt_tx = &xvt->tx_meta_data[XVT_LMAC_0_ID];
+  uint8_t sta_id;
+  int lmac_id;
+  struct iwl_xvt_tx_mod_task_data* task_data;
 
-    /* Verify this command was not called while tx is operating */
-    if (WARN_ON(xvt->is_enhanced_tx)) { return -EINVAL; }
+  /* Verify this command was not called while tx is operating */
+  if (WARN_ON(xvt->is_enhanced_tx)) {
+    return -EINVAL;
+  }
 
-    task_data = kzalloc(task_data_length, GFP_KERNEL);
-    if (!task_data) { return -ENOMEM; }
+  task_data = kzalloc(task_data_length, GFP_KERNEL);
+  if (!task_data) {
+    return -ENOMEM;
+  }
 
-    /*
-     * no need to check whether tx already operating on lmac, since check
-     * is already done in the USC
-     */
-    task_data->xvt = xvt;
-    memcpy(&task_data->tx_req, data_in->data, req_length);
+  /*
+   * no need to check whether tx already operating on lmac, since check
+   * is already done in the USC
+   */
+  task_data->xvt = xvt;
+  memcpy(&task_data->tx_req, data_in->data, req_length);
 
-    if (iwl_xvt_is_unified_fw(xvt)) {
-        sta_id = task_data->tx_req.sta_id;
-        lmac_id = map_sta_to_lmac(xvt, sta_id);
-        if (lmac_id < 0) { return lmac_id; }
-
-        task_data->lmac_id = lmac_id;
-        xvt_tx = &xvt->tx_meta_data[lmac_id];
-
-        /* check if tx queue is allocated. if not - return */
-        if (xvt_tx->queue < 0) {
-            IWL_ERR(xvt, "failed in tx - queue is not allocated\n");
-            return -EIO;
-        }
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    sta_id = task_data->tx_req.sta_id;
+    lmac_id = map_sta_to_lmac(xvt, sta_id);
+    if (lmac_id < 0) {
+      return lmac_id;
     }
 
-    xvt_tx->tx_mod_thread = kthread_run(iwl_xvt_modulated_tx_handler, task_data, "tx mod infinite");
-    if (!xvt_tx->tx_mod_thread) {
-        xvt_tx->tx_task_operating = false;
-        kfree(task_data);
-        return -ENOMEM;
-    }
+    task_data->lmac_id = lmac_id;
+    xvt_tx = &xvt->tx_meta_data[lmac_id];
 
-    return 0;
+    /* check if tx queue is allocated. if not - return */
+    if (xvt_tx->queue < 0) {
+      IWL_ERR(xvt, "failed in tx - queue is not allocated\n");
+      return -EIO;
+    }
+  }
+
+  xvt_tx->tx_mod_thread = kthread_run(iwl_xvt_modulated_tx_handler, task_data, "tx mod infinite");
+  if (!xvt_tx->tx_mod_thread) {
+    xvt_tx->tx_task_operating = false;
+    kfree(task_data);
+    return -ENOMEM;
+  }
+
+  return 0;
 }
 
 static int iwl_xvt_rx_hdrs_mode(struct iwl_xvt* xvt, struct iwl_tm_data* data_in) {
-    struct iwl_xvt_rx_hdrs_mode_request* rx_hdr = data_in->data;
+  struct iwl_xvt_rx_hdrs_mode_request* rx_hdr = data_in->data;
 
-    if (data_in->len < sizeof(struct iwl_xvt_rx_hdrs_mode_request)) { return -EINVAL; }
+  if (data_in->len < sizeof(struct iwl_xvt_rx_hdrs_mode_request)) {
+    return -EINVAL;
+  }
 
-    if (rx_hdr->mode) {
-        xvt->rx_hdr_enabled = true;
-    } else {
-        xvt->rx_hdr_enabled = false;
-    }
+  if (rx_hdr->mode) {
+    xvt->rx_hdr_enabled = true;
+  } else {
+    xvt->rx_hdr_enabled = false;
+  }
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_apmg_pd_mode(struct iwl_xvt* xvt, struct iwl_tm_data* data_in) {
-    struct iwl_xvt_apmg_pd_mode_request* apmg_pd = data_in->data;
+  struct iwl_xvt_apmg_pd_mode_request* apmg_pd = data_in->data;
 
-    if (apmg_pd->mode) {
-        xvt->apmg_pd_en = true;
-    } else {
-        xvt->apmg_pd_en = false;
-    }
+  if (apmg_pd->mode) {
+    xvt->apmg_pd_en = true;
+  } else {
+    xvt->apmg_pd_en = false;
+  }
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_allocate_dma(struct iwl_xvt* xvt, struct iwl_tm_data* data_in,
                                 struct iwl_tm_data* data_out) {
-    struct iwl_xvt_alloc_dma* dma_req = data_in->data;
-    struct iwl_xvt_alloc_dma* dma_res;
+  struct iwl_xvt_alloc_dma* dma_req = data_in->data;
+  struct iwl_xvt_alloc_dma* dma_res;
 
-    if (data_in->len < sizeof(struct iwl_xvt_alloc_dma)) { return -EINVAL; }
+  if (data_in->len < sizeof(struct iwl_xvt_alloc_dma)) {
+    return -EINVAL;
+  }
 
-    if (xvt->dma_cpu_addr) {
-        IWL_ERR(xvt, "XVT DMA already allocated\n");
-        return -EBUSY;
-    }
+  if (xvt->dma_cpu_addr) {
+    IWL_ERR(xvt, "XVT DMA already allocated\n");
+    return -EBUSY;
+  }
 
-    xvt->dma_cpu_addr =
-        dma_alloc_coherent(xvt->trans->dev, dma_req->size, &(xvt->dma_addr), GFP_KERNEL);
+  xvt->dma_cpu_addr =
+      dma_alloc_coherent(xvt->trans->dev, dma_req->size, &(xvt->dma_addr), GFP_KERNEL);
 
-    if (!xvt->dma_cpu_addr) { return false; }
+  if (!xvt->dma_cpu_addr) {
+    return false;
+  }
 
-    dma_res = kmalloc(sizeof(*dma_res), GFP_KERNEL);
-    if (!dma_res) {
-        dma_free_coherent(xvt->trans->dev, dma_req->size, xvt->dma_cpu_addr, xvt->dma_addr);
-        xvt->dma_cpu_addr = NULL;
-        xvt->dma_addr = 0;
-        return -ENOMEM;
-    }
-    dma_res->size = dma_req->size;
-    /* Casting to avoid compilation warnings when DMA address is 32bit */
-    dma_res->addr = (uint64_t)xvt->dma_addr;
+  dma_res = kmalloc(sizeof(*dma_res), GFP_KERNEL);
+  if (!dma_res) {
+    dma_free_coherent(xvt->trans->dev, dma_req->size, xvt->dma_cpu_addr, xvt->dma_addr);
+    xvt->dma_cpu_addr = NULL;
+    xvt->dma_addr = 0;
+    return -ENOMEM;
+  }
+  dma_res->size = dma_req->size;
+  /* Casting to avoid compilation warnings when DMA address is 32bit */
+  dma_res->addr = (uint64_t)xvt->dma_addr;
 
-    data_out->data = dma_res;
-    data_out->len = sizeof(struct iwl_xvt_alloc_dma);
-    xvt->dma_buffer_size = dma_req->size;
+  data_out->data = dma_res;
+  data_out->len = sizeof(struct iwl_xvt_alloc_dma);
+  xvt->dma_buffer_size = dma_req->size;
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_get_dma(struct iwl_xvt* xvt, struct iwl_tm_data* data_in,
                            struct iwl_tm_data* data_out) {
-    struct iwl_xvt_get_dma* get_dma_resp;
-    uint32_t resp_size;
+  struct iwl_xvt_get_dma* get_dma_resp;
+  uint32_t resp_size;
 
-    if (!xvt->dma_cpu_addr) { return -ENOMEM; }
+  if (!xvt->dma_cpu_addr) {
+    return -ENOMEM;
+  }
 
-    resp_size = sizeof(*get_dma_resp) + xvt->dma_buffer_size;
-    get_dma_resp = kmalloc(resp_size, GFP_KERNEL);
-    if (!get_dma_resp) { return -ENOMEM; }
+  resp_size = sizeof(*get_dma_resp) + xvt->dma_buffer_size;
+  get_dma_resp = kmalloc(resp_size, GFP_KERNEL);
+  if (!get_dma_resp) {
+    return -ENOMEM;
+  }
 
-    get_dma_resp->size = xvt->dma_buffer_size;
-    memcpy(get_dma_resp->data, xvt->dma_cpu_addr, xvt->dma_buffer_size);
-    data_out->data = get_dma_resp;
-    data_out->len = resp_size;
+  get_dma_resp->size = xvt->dma_buffer_size;
+  memcpy(get_dma_resp->data, xvt->dma_cpu_addr, xvt->dma_buffer_size);
+  data_out->data = get_dma_resp;
+  data_out->len = resp_size;
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_free_dma(struct iwl_xvt* xvt, struct iwl_tm_data* data_in) {
-    if (!xvt->dma_cpu_addr) {
-        IWL_ERR(xvt, "XVT DMA was not allocated\n");
-        return 0;
-    }
-
-    dma_free_coherent(xvt->trans->dev, xvt->dma_buffer_size, xvt->dma_cpu_addr, xvt->dma_addr);
-    xvt->dma_cpu_addr = NULL;
-    xvt->dma_addr = 0;
-    xvt->dma_buffer_size = 0;
-
+  if (!xvt->dma_cpu_addr) {
+    IWL_ERR(xvt, "XVT DMA was not allocated\n");
     return 0;
+  }
+
+  dma_free_coherent(xvt->trans->dev, xvt->dma_buffer_size, xvt->dma_cpu_addr, xvt->dma_addr);
+  xvt->dma_cpu_addr = NULL;
+  xvt->dma_addr = 0;
+  xvt->dma_buffer_size = 0;
+
+  return 0;
 }
 
 static int iwl_xvt_get_chip_id(struct iwl_xvt* xvt, struct iwl_tm_data* data_out) {
-    struct iwl_xvt_chip_id* chip_id;
+  struct iwl_xvt_chip_id* chip_id;
 
-    chip_id = kmalloc(sizeof(struct iwl_xvt_chip_id), GFP_KERNEL);
-    if (!chip_id) { return -ENOMEM; }
+  chip_id = kmalloc(sizeof(struct iwl_xvt_chip_id), GFP_KERNEL);
+  if (!chip_id) {
+    return -ENOMEM;
+  }
 
-    chip_id->registers[0] = ioread32((void __force __iomem*)XVT_SCU_SNUM1);
-    chip_id->registers[1] = ioread32((void __force __iomem*)XVT_SCU_SNUM2);
-    chip_id->registers[2] = ioread32((void __force __iomem*)XVT_SCU_SNUM3);
+  chip_id->registers[0] = ioread32((void __force __iomem*)XVT_SCU_SNUM1);
+  chip_id->registers[1] = ioread32((void __force __iomem*)XVT_SCU_SNUM2);
+  chip_id->registers[2] = ioread32((void __force __iomem*)XVT_SCU_SNUM3);
 
-    data_out->data = chip_id;
-    data_out->len = sizeof(struct iwl_xvt_chip_id);
+  data_out->data = chip_id;
+  data_out->len = sizeof(struct iwl_xvt_chip_id);
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_get_mac_addr_info(struct iwl_xvt* xvt, struct iwl_tm_data* data_out) {
-    struct iwl_xvt_mac_addr_info* mac_addr_info;
-    uint32_t mac_addr0, mac_addr1;
-    __u8 temp_mac_addr[ETH_ALEN];
-    const uint8_t* hw_addr;
+  struct iwl_xvt_mac_addr_info* mac_addr_info;
+  uint32_t mac_addr0, mac_addr1;
+  __u8 temp_mac_addr[ETH_ALEN];
+  const uint8_t* hw_addr;
 
-    mac_addr_info = kzalloc(sizeof(*mac_addr_info), GFP_KERNEL);
-    if (!mac_addr_info) { return -ENOMEM; }
+  mac_addr_info = kzalloc(sizeof(*mac_addr_info), GFP_KERNEL);
+  if (!mac_addr_info) {
+    return -ENOMEM;
+  }
 
-    if (xvt->cfg->nvm_type != IWL_NVM_EXT) {
-        memcpy(mac_addr_info->mac_addr, xvt->nvm_hw_addr, sizeof(mac_addr_info->mac_addr));
+  if (xvt->cfg->nvm_type != IWL_NVM_EXT) {
+    memcpy(mac_addr_info->mac_addr, xvt->nvm_hw_addr, sizeof(mac_addr_info->mac_addr));
+  } else {
+    /* MAC address in family 8000 */
+    if (xvt->is_nvm_mac_override) {
+      memcpy(mac_addr_info->mac_addr, xvt->nvm_mac_addr, sizeof(mac_addr_info->mac_addr));
     } else {
-        /* MAC address in family 8000 */
-        if (xvt->is_nvm_mac_override) {
-            memcpy(mac_addr_info->mac_addr, xvt->nvm_mac_addr, sizeof(mac_addr_info->mac_addr));
-        } else {
-            /* read the mac address from WFMP registers */
-            mac_addr0 = iwl_trans_read_prph(xvt->trans, WFMP_MAC_ADDR_0);
-            mac_addr1 = iwl_trans_read_prph(xvt->trans, WFMP_MAC_ADDR_1);
+      /* read the mac address from WFMP registers */
+      mac_addr0 = iwl_trans_read_prph(xvt->trans, WFMP_MAC_ADDR_0);
+      mac_addr1 = iwl_trans_read_prph(xvt->trans, WFMP_MAC_ADDR_1);
 
-            hw_addr = (const uint8_t*)&mac_addr0;
-            temp_mac_addr[0] = hw_addr[3];
-            temp_mac_addr[1] = hw_addr[2];
-            temp_mac_addr[2] = hw_addr[1];
-            temp_mac_addr[3] = hw_addr[0];
+      hw_addr = (const uint8_t*)&mac_addr0;
+      temp_mac_addr[0] = hw_addr[3];
+      temp_mac_addr[1] = hw_addr[2];
+      temp_mac_addr[2] = hw_addr[1];
+      temp_mac_addr[3] = hw_addr[0];
 
-            hw_addr = (const uint8_t*)&mac_addr1;
-            temp_mac_addr[4] = hw_addr[1];
-            temp_mac_addr[5] = hw_addr[0];
+      hw_addr = (const uint8_t*)&mac_addr1;
+      temp_mac_addr[4] = hw_addr[1];
+      temp_mac_addr[5] = hw_addr[0];
 
-            memcpy(mac_addr_info->mac_addr, temp_mac_addr, sizeof(mac_addr_info->mac_addr));
-        }
+      memcpy(mac_addr_info->mac_addr, temp_mac_addr, sizeof(mac_addr_info->mac_addr));
     }
+  }
 
-    data_out->data = mac_addr_info;
-    data_out->len = sizeof(*mac_addr_info);
+  data_out->data = mac_addr_info;
+  data_out->len = sizeof(*mac_addr_info);
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_add_txq(struct iwl_xvt* xvt, struct iwl_scd_txq_cfg_cmd* cmd, uint16_t ssn,
                            uint16_t flags, int size) {
-    int queue_id = cmd->scd_queue, ret;
+  int queue_id = cmd->scd_queue, ret;
 
-    if (iwl_xvt_is_unified_fw(xvt)) {
-        /*TODO: add support for second lmac*/
-        queue_id = iwl_trans_txq_alloc(xvt->trans, cpu_to_le16(flags), cmd->sta_id, cmd->tid,
-                                       SCD_QUEUE_CFG, size, 0);
-        if (queue_id < 0) { return queue_id; }
-    } else {
-        iwl_trans_txq_enable_cfg(xvt->trans, queue_id, ssn, NULL, 0);
-        ret = iwl_xvt_send_cmd_pdu(xvt, SCD_QUEUE_CFG, 0, sizeof(*cmd), cmd);
-        if (ret) {
-            IWL_ERR(xvt, "Failed to config queue %d on FIFO %d\n", cmd->scd_queue, cmd->tx_fifo);
-            return ret;
-        }
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    /*TODO: add support for second lmac*/
+    queue_id = iwl_trans_txq_alloc(xvt->trans, cpu_to_le16(flags), cmd->sta_id, cmd->tid,
+                                   SCD_QUEUE_CFG, size, 0);
+    if (queue_id < 0) {
+      return queue_id;
     }
+  } else {
+    iwl_trans_txq_enable_cfg(xvt->trans, queue_id, ssn, NULL, 0);
+    ret = iwl_xvt_send_cmd_pdu(xvt, SCD_QUEUE_CFG, 0, sizeof(*cmd), cmd);
+    if (ret) {
+      IWL_ERR(xvt, "Failed to config queue %d on FIFO %d\n", cmd->scd_queue, cmd->tx_fifo);
+      return ret;
+    }
+  }
 
-    xvt->queue_data[queue_id].allocated_queue = true;
-    init_waitqueue_head(&xvt->queue_data[queue_id].tx_wq);
+  xvt->queue_data[queue_id].allocated_queue = true;
+  init_waitqueue_head(&xvt->queue_data[queue_id].tx_wq);
 
-    return queue_id;
+  return queue_id;
 }
 
 static int iwl_xvt_remove_txq(struct iwl_xvt* xvt, struct iwl_scd_txq_cfg_cmd* cmd) {
-    int ret = 0;
+  int ret = 0;
 
-    if (iwl_xvt_is_unified_fw(xvt)) {
-        iwl_trans_txq_free(xvt->trans, cmd->scd_queue);
-    } else {
-        iwl_trans_txq_disable(xvt->trans, cmd->scd_queue, false);
-        ret = iwl_xvt_send_cmd_pdu(xvt, SCD_QUEUE_CFG, 0, sizeof(*cmd), cmd);
-    }
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    iwl_trans_txq_free(xvt->trans, cmd->scd_queue);
+  } else {
+    iwl_trans_txq_disable(xvt->trans, cmd->scd_queue, false);
+    ret = iwl_xvt_send_cmd_pdu(xvt, SCD_QUEUE_CFG, 0, sizeof(*cmd), cmd);
+  }
 
-    if (WARN(ret, "failed to send SCD_QUEUE_CFG")) { return ret; }
+  if (WARN(ret, "failed to send SCD_QUEUE_CFG")) {
+    return ret;
+  }
 
-    xvt->queue_data[cmd->scd_queue].allocated_queue = false;
+  xvt->queue_data[cmd->scd_queue].allocated_queue = false;
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_config_txq(struct iwl_xvt* xvt, struct iwl_xvt_driver_command_req* req,
                               struct iwl_xvt_driver_command_resp* resp) {
-    struct iwl_xvt_txq_config* conf = (struct iwl_xvt_txq_config*)req->input_data;
-    struct iwl_xvt_txq_config_resp txq_resp;
-    int queue_id = conf->scd_queue, error;
+  struct iwl_xvt_txq_config* conf = (struct iwl_xvt_txq_config*)req->input_data;
+  struct iwl_xvt_txq_config_resp txq_resp;
+  int queue_id = conf->scd_queue, error;
 
-    struct iwl_scd_txq_cfg_cmd cmd = {
-        .sta_id = conf->sta_id,
-        .tid = conf->tid,
-        .scd_queue = conf->scd_queue,
-        .action = conf->action,
-        .aggregate = conf->aggregate,
-        .tx_fifo = conf->tx_fifo,
-        .window = conf->window,
-        .ssn = cpu_to_le16(conf->ssn),
-    };
+  struct iwl_scd_txq_cfg_cmd cmd = {
+      .sta_id = conf->sta_id,
+      .tid = conf->tid,
+      .scd_queue = conf->scd_queue,
+      .action = conf->action,
+      .aggregate = conf->aggregate,
+      .tx_fifo = conf->tx_fifo,
+      .window = conf->window,
+      .ssn = cpu_to_le16(conf->ssn),
+  };
 
-    if (req->max_out_length < sizeof(txq_resp)) { return -ENOBUFS; }
+  if (req->max_out_length < sizeof(txq_resp)) {
+    return -ENOBUFS;
+  }
 
-    if (conf->action == TX_QUEUE_CFG_REMOVE) {
-        error = iwl_xvt_remove_txq(xvt, &cmd);
-        if (WARN(error, "failed to remove queue")) { return error; }
-    } else {
-        queue_id = iwl_xvt_add_txq(xvt, &cmd, conf->ssn, conf->flags, conf->queue_size);
-        if (queue_id < 0) { return queue_id; }
+  if (conf->action == TX_QUEUE_CFG_REMOVE) {
+    error = iwl_xvt_remove_txq(xvt, &cmd);
+    if (WARN(error, "failed to remove queue")) {
+      return error;
     }
+  } else {
+    queue_id = iwl_xvt_add_txq(xvt, &cmd, conf->ssn, conf->flags, conf->queue_size);
+    if (queue_id < 0) {
+      return queue_id;
+    }
+  }
 
-    txq_resp.scd_queue = queue_id;
-    txq_resp.sta_id = conf->sta_id;
-    txq_resp.tid = conf->tid;
-    memcpy(resp->resp_data, &txq_resp, sizeof(txq_resp));
-    resp->length = sizeof(txq_resp);
+  txq_resp.scd_queue = queue_id;
+  txq_resp.sta_id = conf->sta_id;
+  txq_resp.tid = conf->tid;
+  memcpy(resp->resp_data, &txq_resp, sizeof(txq_resp));
+  resp->length = sizeof(txq_resp);
 
-    return 0;
+  return 0;
 }
 
 static int iwl_xvt_get_rx_agg_stats_cmd(struct iwl_xvt* xvt, struct iwl_xvt_driver_command_req* req,
                                         struct iwl_xvt_driver_command_resp* resp) {
-    struct iwl_xvt_get_rx_agg_stats* params = (void*)req->input_data;
-    struct iwl_xvt_get_rx_agg_stats_resp* stats_resp = (void*)resp->resp_data;
-    struct iwl_xvt_reorder_buffer* buffer;
-    int i;
+  struct iwl_xvt_get_rx_agg_stats* params = (void*)req->input_data;
+  struct iwl_xvt_get_rx_agg_stats_resp* stats_resp = (void*)resp->resp_data;
+  struct iwl_xvt_reorder_buffer* buffer;
+  int i;
 
-    IWL_DEBUG_INFO(xvt, "get rx agg stats: sta_id=%d, tid=%d\n", params->sta_id, params->tid);
+  IWL_DEBUG_INFO(xvt, "get rx agg stats: sta_id=%d, tid=%d\n", params->sta_id, params->tid);
 
-    if (req->max_out_length < sizeof(stats_resp)) { return -ENOBUFS; }
+  if (req->max_out_length < sizeof(stats_resp)) {
+    return -ENOBUFS;
+  }
 
-    for (i = 0; i < ARRAY_SIZE(xvt->reorder_bufs); i++) {
-        buffer = &xvt->reorder_bufs[i];
-        if (buffer->sta_id != params->sta_id || buffer->tid != params->tid) { continue; }
-
-        spin_lock_bh(&buffer->lock);
-        stats_resp->dropped = buffer->stats.dropped;
-        stats_resp->released = buffer->stats.released;
-        stats_resp->skipped = buffer->stats.skipped;
-        stats_resp->reordered = buffer->stats.reordered;
-
-        /* clear statistics */
-        memset(&buffer->stats, 0, sizeof(buffer->stats));
-        spin_unlock_bh(&buffer->lock);
-
-        break;
+  for (i = 0; i < ARRAY_SIZE(xvt->reorder_bufs); i++) {
+    buffer = &xvt->reorder_bufs[i];
+    if (buffer->sta_id != params->sta_id || buffer->tid != params->tid) {
+      continue;
     }
 
-    if (i == ARRAY_SIZE(xvt->reorder_bufs)) { return -ENOENT; }
+    spin_lock_bh(&buffer->lock);
+    stats_resp->dropped = buffer->stats.dropped;
+    stats_resp->released = buffer->stats.released;
+    stats_resp->skipped = buffer->stats.skipped;
+    stats_resp->reordered = buffer->stats.reordered;
 
-    resp->length = sizeof(*stats_resp);
-    return 0;
+    /* clear statistics */
+    memset(&buffer->stats, 0, sizeof(buffer->stats));
+    spin_unlock_bh(&buffer->lock);
+
+    break;
+  }
+
+  if (i == ARRAY_SIZE(xvt->reorder_bufs)) {
+    return -ENOENT;
+  }
+
+  resp->length = sizeof(*stats_resp);
+  return 0;
 }
 
 static void iwl_xvt_config_rx_mpdu(struct iwl_xvt* xvt, struct iwl_xvt_driver_command_req* req)
 
 {
-    xvt->send_rx_mpdu = ((struct iwl_xvt_config_rx_mpdu_req*)req->input_data)->enable;
+  xvt->send_rx_mpdu = ((struct iwl_xvt_config_rx_mpdu_req*)req->input_data)->enable;
 }
 
 static int iwl_xvt_echo_notif(struct iwl_xvt* xvt) {
-    return iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_ECHO_NOTIF, NULL, 0, GFP_KERNEL);
+  return iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_ECHO_NOTIF, NULL, 0, GFP_KERNEL);
 }
 
 static int iwl_xvt_handle_driver_cmd(struct iwl_xvt* xvt, struct iwl_tm_data* data_in,
                                      struct iwl_tm_data* data_out) {
-    struct iwl_xvt_driver_command_req* req = data_in->data;
-    struct iwl_xvt_driver_command_resp* resp = NULL;
-    __u32 cmd_id = req->command_id;
-    int err = 0;
+  struct iwl_xvt_driver_command_req* req = data_in->data;
+  struct iwl_xvt_driver_command_resp* resp = NULL;
+  __u32 cmd_id = req->command_id;
+  int err = 0;
 
-    IWL_DEBUG_INFO(xvt, "handle driver command 0x%X\n", cmd_id);
+  IWL_DEBUG_INFO(xvt, "handle driver command 0x%X\n", cmd_id);
 
-    if (req->max_out_length > 0) {
-        resp = kzalloc(sizeof(*resp) + req->max_out_length, GFP_KERNEL);
-        if (!resp) { return -ENOMEM; }
+  if (req->max_out_length > 0) {
+    resp = kzalloc(sizeof(*resp) + req->max_out_length, GFP_KERNEL);
+    if (!resp) {
+      return -ENOMEM;
     }
+  }
 
-    /* resp->length and resp->resp_data should be set in command handler */
-    switch (cmd_id) {
+  /* resp->length and resp->resp_data should be set in command handler */
+  switch (cmd_id) {
     case IWL_DRV_CMD_CONFIG_TX_QUEUE:
-        err = iwl_xvt_config_txq(xvt, req, resp);
-        break;
+      err = iwl_xvt_config_txq(xvt, req, resp);
+      break;
     case IWL_DRV_CMD_SET_TX_PAYLOAD:
-        err = iwl_xvt_set_tx_payload(xvt, req);
-        break;
+      err = iwl_xvt_set_tx_payload(xvt, req);
+      break;
     case IWL_DRV_CMD_TX_START:
-        err = iwl_xvt_start_tx(xvt, req);
-        break;
+      err = iwl_xvt_start_tx(xvt, req);
+      break;
     case IWL_DRV_CMD_TX_STOP:
-        err = iwl_xvt_stop_tx(xvt);
-        break;
+      err = iwl_xvt_stop_tx(xvt);
+      break;
     case IWL_DRV_CMD_GET_RX_AGG_STATS:
-        err = iwl_xvt_get_rx_agg_stats_cmd(xvt, req, resp);
-        break;
+      err = iwl_xvt_get_rx_agg_stats_cmd(xvt, req, resp);
+      break;
     case IWL_DRV_CMD_CONFIG_RX_MPDU:
-        iwl_xvt_config_rx_mpdu(xvt, req);
-        break;
+      iwl_xvt_config_rx_mpdu(xvt, req);
+      break;
     case IWL_DRV_CMD_ECHO_NOTIF:
-        err = iwl_xvt_echo_notif(xvt);
-        break;
+      err = iwl_xvt_echo_notif(xvt);
+      break;
     default:
-        IWL_ERR(xvt, "no command handler found for cmd_id[%u]\n", cmd_id);
-        err = -EOPNOTSUPP;
+      IWL_ERR(xvt, "no command handler found for cmd_id[%u]\n", cmd_id);
+      err = -EOPNOTSUPP;
+  }
+
+  if (err) {
+    goto out_free;
+  }
+
+  if (req->max_out_length > 0) {
+    if (WARN_ONCE(resp->length == 0, "response was not set correctly\n")) {
+      err = -ENODATA;
+      goto out_free;
     }
 
-    if (err) { goto out_free; }
+    resp->command_id = cmd_id;
+    data_out->len = resp->length + sizeof(struct iwl_xvt_driver_command_resp);
+    data_out->data = resp;
 
-    if (req->max_out_length > 0) {
-        if (WARN_ONCE(resp->length == 0, "response was not set correctly\n")) {
-            err = -ENODATA;
-            goto out_free;
-        }
-
-        resp->command_id = cmd_id;
-        data_out->len = resp->length + sizeof(struct iwl_xvt_driver_command_resp);
-        data_out->data = resp;
-
-        return err;
-    }
+    return err;
+  }
 
 out_free:
-    kfree(resp);
-    return err;
+  kfree(resp);
+  return err;
 }
 
 int iwl_xvt_user_cmd_execute(struct iwl_testmode* testmode, uint32_t cmd,
                              struct iwl_tm_data* data_in, struct iwl_tm_data* data_out,
                              bool* supported_cmd) {
-    struct iwl_xvt* xvt = testmode->op_mode;
-    int ret = 0;
+  struct iwl_xvt* xvt = testmode->op_mode;
+  int ret = 0;
 
-    *supported_cmd = true;
-    if (WARN_ON_ONCE(!xvt || !data_in)) { return -EINVAL; }
+  *supported_cmd = true;
+  if (WARN_ON_ONCE(!xvt || !data_in)) {
+    return -EINVAL;
+  }
 
-    IWL_DEBUG_INFO(xvt, "%s cmd=0x%X\n", __func__, cmd);
-    mutex_lock(&xvt->mutex);
+  IWL_DEBUG_INFO(xvt, "%s cmd=0x%X\n", __func__, cmd);
+  mutex_lock(&xvt->mutex);
 
-    switch (cmd) {
-        /* Testmode custom cases */
+  switch (cmd) {
+      /* Testmode custom cases */
 
     case IWL_TM_USER_CMD_GET_DEVICE_INFO:
-        ret = iwl_xvt_get_dev_info(xvt, data_in, data_out);
-        break;
+      ret = iwl_xvt_get_dev_info(xvt, data_in, data_out);
+      break;
 
     case IWL_TM_USER_CMD_SV_IO_TOGGLE:
-        ret = iwl_xvt_sdio_io_toggle(xvt, data_in, data_out);
-        break;
+      ret = iwl_xvt_sdio_io_toggle(xvt, data_in, data_out);
+      break;
 
-        /* xVT cases */
+      /* xVT cases */
 
     case IWL_XVT_CMD_START:
-        ret = iwl_xvt_start_op_mode(xvt);
-        break;
+      ret = iwl_xvt_start_op_mode(xvt);
+      break;
 
     case IWL_XVT_CMD_STOP:
-        iwl_xvt_stop_op_mode(xvt);
-        break;
+      iwl_xvt_stop_op_mode(xvt);
+      break;
 
     case IWL_XVT_CMD_CONTINUE_INIT:
-        ret = iwl_xvt_continue_init(xvt);
-        break;
+      ret = iwl_xvt_continue_init(xvt);
+      break;
 
     case IWL_XVT_CMD_GET_PHY_DB_ENTRY:
-        ret = iwl_xvt_get_phy_db(xvt, data_in, data_out);
-        break;
+      ret = iwl_xvt_get_phy_db(xvt, data_in, data_out);
+      break;
 
     case IWL_XVT_CMD_SET_CONFIG:
-        ret = iwl_xvt_set_sw_config(xvt, data_in);
-        break;
+      ret = iwl_xvt_set_sw_config(xvt, data_in);
+      break;
 
     case IWL_XVT_CMD_GET_CONFIG:
-        ret = iwl_xvt_get_sw_config(xvt, data_in, data_out);
-        break;
+      ret = iwl_xvt_get_sw_config(xvt, data_in, data_out);
+      break;
 
     case IWL_XVT_CMD_MOD_TX:
-        ret = iwl_xvt_modulated_tx(xvt, data_in);
-        break;
+      ret = iwl_xvt_modulated_tx(xvt, data_in);
+      break;
 
     case IWL_XVT_CMD_RX_HDRS_MODE:
-        ret = iwl_xvt_rx_hdrs_mode(xvt, data_in);
-        break;
+      ret = iwl_xvt_rx_hdrs_mode(xvt, data_in);
+      break;
 
     case IWL_XVT_CMD_APMG_PD_MODE:
-        ret = iwl_xvt_apmg_pd_mode(xvt, data_in);
-        break;
+      ret = iwl_xvt_apmg_pd_mode(xvt, data_in);
+      break;
 
     case IWL_XVT_CMD_ALLOC_DMA:
-        ret = iwl_xvt_allocate_dma(xvt, data_in, data_out);
-        break;
+      ret = iwl_xvt_allocate_dma(xvt, data_in, data_out);
+      break;
 
     case IWL_XVT_CMD_GET_DMA:
-        ret = iwl_xvt_get_dma(xvt, data_in, data_out);
-        break;
+      ret = iwl_xvt_get_dma(xvt, data_in, data_out);
+      break;
 
     case IWL_XVT_CMD_FREE_DMA:
-        ret = iwl_xvt_free_dma(xvt, data_in);
-        break;
+      ret = iwl_xvt_free_dma(xvt, data_in);
+      break;
     case IWL_XVT_CMD_GET_CHIP_ID:
-        ret = iwl_xvt_get_chip_id(xvt, data_out);
-        break;
+      ret = iwl_xvt_get_chip_id(xvt, data_out);
+      break;
 
     case IWL_XVT_CMD_GET_MAC_ADDR_INFO:
-        ret = iwl_xvt_get_mac_addr_info(xvt, data_out);
-        break;
+      ret = iwl_xvt_get_mac_addr_info(xvt, data_out);
+      break;
 
     case IWL_XVT_CMD_MOD_TX_STOP:
-        ret = iwl_xvt_modulated_tx_infinite_stop(xvt, data_in);
-        break;
+      ret = iwl_xvt_modulated_tx_infinite_stop(xvt, data_in);
+      break;
 
     case IWL_XVT_CMD_TX_QUEUE_CFG:
-        ret = iwl_xvt_tx_queue_cfg(xvt, data_in);
-        break;
+      ret = iwl_xvt_tx_queue_cfg(xvt, data_in);
+      break;
     case IWL_XVT_CMD_DRIVER_CMD:
-        ret = iwl_xvt_handle_driver_cmd(xvt, data_in, data_out);
-        break;
+      ret = iwl_xvt_handle_driver_cmd(xvt, data_in, data_out);
+      break;
 
     default:
-        *supported_cmd = false;
-        ret = -EOPNOTSUPP;
-        IWL_DEBUG_INFO(xvt, "%s (cmd=0x%X) Not supported by xVT\n", __func__, cmd);
-        break;
-    }
+      *supported_cmd = false;
+      ret = -EOPNOTSUPP;
+      IWL_DEBUG_INFO(xvt, "%s (cmd=0x%X) Not supported by xVT\n", __func__, cmd);
+      break;
+  }
 
-    mutex_unlock(&xvt->mutex);
+  mutex_unlock(&xvt->mutex);
 
-    if (ret && *supported_cmd) {
-        IWL_ERR(xvt, "%s (cmd=0x%X) ret=%d\n", __func__, cmd, ret);
-    } else {
-        IWL_DEBUG_INFO(xvt, "%s (cmd=0x%X) ended Ok\n", __func__, cmd);
-    }
-    return ret;
+  if (ret && *supported_cmd) {
+    IWL_ERR(xvt, "%s (cmd=0x%X) ret=%d\n", __func__, cmd, ret);
+  } else {
+    IWL_DEBUG_INFO(xvt, "%s (cmd=0x%X) ended Ok\n", __func__, cmd);
+  }
+  return ret;
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/user-infc.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/user-infc.h
index 396760f..cac4e71 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/user-infc.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/user-infc.h
@@ -46,12 +46,12 @@
  */
 static inline int iwl_xvt_user_send_notif(struct iwl_xvt* xvt, uint32_t cmd, void* data,
                                           uint32_t size, gfp_t flags) {
-    int err;
-    IWL_DEBUG_INFO(xvt, "send user notification: cmd=0x%x, size=%d\n", cmd, size);
-    err = iwl_tm_gnl_send_msg(xvt->trans, cmd, false, data, size, flags);
+  int err;
+  IWL_DEBUG_INFO(xvt, "send user notification: cmd=0x%x, size=%d\n", cmd, size);
+  err = iwl_tm_gnl_send_msg(xvt->trans, cmd, false, data, size, flags);
 
-    WARN_ONCE(err, "failed to send notification to user, err %d\n", err);
-    return err;
+  WARN_ONCE(err, "failed to send notification to user, err %d\n", err);
+  return err;
 }
 
 void iwl_xvt_send_user_rx_notif(struct iwl_xvt* xvt, struct iwl_rx_cmd_buffer* rxb);
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/utils.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/utils.c
index 755db7e..f406ebf 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/utils.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/utils.c
@@ -30,44 +30,45 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include "fw-api.h"
 #include "iwl-debug.h"
 #include "iwl-io.h"
-
-#include "fw-api.h"
 #include "xvt.h"
 
 int iwl_xvt_send_cmd(struct iwl_xvt* xvt, struct iwl_host_cmd* cmd) {
-    /*
-     * Synchronous commands from this op-mode must hold
-     * the mutex, this ensures we don't try to send two
-     * (or more) synchronous commands at a time.
-     */
-    if (!(cmd->flags & CMD_ASYNC)) { lockdep_assert_held(&xvt->mutex); }
+  /*
+   * Synchronous commands from this op-mode must hold
+   * the mutex, this ensures we don't try to send two
+   * (or more) synchronous commands at a time.
+   */
+  if (!(cmd->flags & CMD_ASYNC)) {
+    lockdep_assert_held(&xvt->mutex);
+  }
 
-    return iwl_trans_send_cmd(xvt->trans, cmd);
+  return iwl_trans_send_cmd(xvt->trans, cmd);
 }
 
 int iwl_xvt_send_cmd_pdu(struct iwl_xvt* xvt, uint32_t id, uint32_t flags, uint16_t len,
                          const void* data) {
-    struct iwl_host_cmd cmd = {
-        .id = id,
-        .len =
-            {
-                len,
-            },
-        .data =
-            {
-                data,
-            },
-        .flags = flags,
-    };
+  struct iwl_host_cmd cmd = {
+      .id = id,
+      .len =
+          {
+              len,
+          },
+      .data =
+          {
+              data,
+          },
+      .flags = flags,
+  };
 
-    return iwl_xvt_send_cmd(xvt, &cmd);
+  return iwl_xvt_send_cmd(xvt, &cmd);
 }
 
 static struct {
-    char* name;
-    uint8_t num;
+  char* name;
+  uint8_t num;
 } advanced_lookup[] = {
     {"NMI_INTERRUPT_WDG", 0x34},
     {"SYSASSERT", 0x35},
@@ -88,147 +89,157 @@
 };
 
 static const char* desc_lookup(uint32_t num) {
-    int i;
+  int i;
 
-    for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
-        if (advanced_lookup[i].num == num) { return advanced_lookup[i].name; }
+  for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
+    if (advanced_lookup[i].num == num) {
+      return advanced_lookup[i].name;
+    }
 
-    /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
-    return advanced_lookup[i].name;
+  /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+  return advanced_lookup[i].name;
 }
 
 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
 
 void iwl_xvt_get_nic_error_log_v1(struct iwl_xvt* xvt, struct iwl_error_event_table_v1* table) {
-    struct iwl_trans* trans = xvt->trans;
-    uint32_t base;
-    /* TODO: support CDB */
-    base = xvt->error_event_table[0];
-    if (xvt->fwrt.cur_fw_img == IWL_UCODE_INIT) {
-        if (!base) { base = xvt->fw->init_errlog_ptr; }
-    } else {
-        if (!base) { base = xvt->fw->inst_errlog_ptr; }
+  struct iwl_trans* trans = xvt->trans;
+  uint32_t base;
+  /* TODO: support CDB */
+  base = xvt->error_event_table[0];
+  if (xvt->fwrt.cur_fw_img == IWL_UCODE_INIT) {
+    if (!base) {
+      base = xvt->fw->init_errlog_ptr;
     }
+  } else {
+    if (!base) {
+      base = xvt->fw->inst_errlog_ptr;
+    }
+  }
 
-    iwl_trans_read_mem_bytes(trans, base, table, sizeof(*table));
+  iwl_trans_read_mem_bytes(trans, base, table, sizeof(*table));
 }
 
 void iwl_xvt_dump_nic_error_log_v1(struct iwl_xvt* xvt, struct iwl_error_event_table_v1* table) {
-    IWL_ERR(xvt, "0x%08X | %-28s\n", table->error_id, desc_lookup(table->error_id));
-    IWL_ERR(xvt, "0x%08X | uPc\n", table->pc);
-    IWL_ERR(xvt, "0x%08X | branchlink1\n", table->blink1);
-    IWL_ERR(xvt, "0x%08X | branchlink2\n", table->blink2);
-    IWL_ERR(xvt, "0x%08X | interruptlink1\n", table->ilink1);
-    IWL_ERR(xvt, "0x%08X | interruptlink2\n", table->ilink2);
-    IWL_ERR(xvt, "0x%08X | data1\n", table->data1);
-    IWL_ERR(xvt, "0x%08X | data2\n", table->data2);
-    IWL_ERR(xvt, "0x%08X | data3\n", table->data3);
-    IWL_ERR(xvt, "0x%08X | beacon time\n", table->bcon_time);
-    IWL_ERR(xvt, "0x%08X | tsf low\n", table->tsf_low);
-    IWL_ERR(xvt, "0x%08X | tsf hi\n", table->tsf_hi);
-    IWL_ERR(xvt, "0x%08X | time gp1\n", table->gp1);
-    IWL_ERR(xvt, "0x%08X | time gp2\n", table->gp2);
-    IWL_ERR(xvt, "0x%08X | time gp3\n", table->gp3);
-    IWL_ERR(xvt, "0x%08X | uCode version\n", table->ucode_ver);
-    IWL_ERR(xvt, "0x%08X | hw version\n", table->hw_ver);
-    IWL_ERR(xvt, "0x%08X | board version\n", table->brd_ver);
-    IWL_ERR(xvt, "0x%08X | hcmd\n", table->hcmd);
-    IWL_ERR(xvt, "0x%08X | isr0\n", table->isr0);
-    IWL_ERR(xvt, "0x%08X | isr1\n", table->isr1);
-    IWL_ERR(xvt, "0x%08X | isr2\n", table->isr2);
-    IWL_ERR(xvt, "0x%08X | isr3\n", table->isr3);
-    IWL_ERR(xvt, "0x%08X | isr4\n", table->isr4);
-    IWL_ERR(xvt, "0x%08X | isr_pref\n", table->isr_pref);
-    IWL_ERR(xvt, "0x%08X | wait_event\n", table->wait_event);
-    IWL_ERR(xvt, "0x%08X | l2p_control\n", table->l2p_control);
-    IWL_ERR(xvt, "0x%08X | l2p_duration\n", table->l2p_duration);
-    IWL_ERR(xvt, "0x%08X | l2p_mhvalid\n", table->l2p_mhvalid);
-    IWL_ERR(xvt, "0x%08X | l2p_addr_match\n", table->l2p_addr_match);
-    IWL_ERR(xvt, "0x%08X | lmpm_pmg_sel\n", table->lmpm_pmg_sel);
-    IWL_ERR(xvt, "0x%08X | timestamp\n", table->u_timestamp);
-    IWL_ERR(xvt, "0x%08X | flow_handler\n", table->flow_handler);
+  IWL_ERR(xvt, "0x%08X | %-28s\n", table->error_id, desc_lookup(table->error_id));
+  IWL_ERR(xvt, "0x%08X | uPc\n", table->pc);
+  IWL_ERR(xvt, "0x%08X | branchlink1\n", table->blink1);
+  IWL_ERR(xvt, "0x%08X | branchlink2\n", table->blink2);
+  IWL_ERR(xvt, "0x%08X | interruptlink1\n", table->ilink1);
+  IWL_ERR(xvt, "0x%08X | interruptlink2\n", table->ilink2);
+  IWL_ERR(xvt, "0x%08X | data1\n", table->data1);
+  IWL_ERR(xvt, "0x%08X | data2\n", table->data2);
+  IWL_ERR(xvt, "0x%08X | data3\n", table->data3);
+  IWL_ERR(xvt, "0x%08X | beacon time\n", table->bcon_time);
+  IWL_ERR(xvt, "0x%08X | tsf low\n", table->tsf_low);
+  IWL_ERR(xvt, "0x%08X | tsf hi\n", table->tsf_hi);
+  IWL_ERR(xvt, "0x%08X | time gp1\n", table->gp1);
+  IWL_ERR(xvt, "0x%08X | time gp2\n", table->gp2);
+  IWL_ERR(xvt, "0x%08X | time gp3\n", table->gp3);
+  IWL_ERR(xvt, "0x%08X | uCode version\n", table->ucode_ver);
+  IWL_ERR(xvt, "0x%08X | hw version\n", table->hw_ver);
+  IWL_ERR(xvt, "0x%08X | board version\n", table->brd_ver);
+  IWL_ERR(xvt, "0x%08X | hcmd\n", table->hcmd);
+  IWL_ERR(xvt, "0x%08X | isr0\n", table->isr0);
+  IWL_ERR(xvt, "0x%08X | isr1\n", table->isr1);
+  IWL_ERR(xvt, "0x%08X | isr2\n", table->isr2);
+  IWL_ERR(xvt, "0x%08X | isr3\n", table->isr3);
+  IWL_ERR(xvt, "0x%08X | isr4\n", table->isr4);
+  IWL_ERR(xvt, "0x%08X | isr_pref\n", table->isr_pref);
+  IWL_ERR(xvt, "0x%08X | wait_event\n", table->wait_event);
+  IWL_ERR(xvt, "0x%08X | l2p_control\n", table->l2p_control);
+  IWL_ERR(xvt, "0x%08X | l2p_duration\n", table->l2p_duration);
+  IWL_ERR(xvt, "0x%08X | l2p_mhvalid\n", table->l2p_mhvalid);
+  IWL_ERR(xvt, "0x%08X | l2p_addr_match\n", table->l2p_addr_match);
+  IWL_ERR(xvt, "0x%08X | lmpm_pmg_sel\n", table->lmpm_pmg_sel);
+  IWL_ERR(xvt, "0x%08X | timestamp\n", table->u_timestamp);
+  IWL_ERR(xvt, "0x%08X | flow_handler\n", table->flow_handler);
 }
 
 void iwl_xvt_get_nic_error_log_v2(struct iwl_xvt* xvt, struct iwl_error_event_table_v2* table) {
-    struct iwl_trans* trans = xvt->trans;
-    uint32_t base;
-    /* TODO: support CDB */
-    base = xvt->error_event_table[0];
-    if (xvt->fwrt.cur_fw_img == IWL_UCODE_INIT) {
-        if (!base) { base = xvt->fw->init_errlog_ptr; }
-    } else {
-        if (!base) { base = xvt->fw->inst_errlog_ptr; }
+  struct iwl_trans* trans = xvt->trans;
+  uint32_t base;
+  /* TODO: support CDB */
+  base = xvt->error_event_table[0];
+  if (xvt->fwrt.cur_fw_img == IWL_UCODE_INIT) {
+    if (!base) {
+      base = xvt->fw->init_errlog_ptr;
     }
+  } else {
+    if (!base) {
+      base = xvt->fw->inst_errlog_ptr;
+    }
+  }
 
-    iwl_trans_read_mem_bytes(trans, base, table, sizeof(*table));
+  iwl_trans_read_mem_bytes(trans, base, table, sizeof(*table));
 }
 
 void iwl_xvt_dump_nic_error_log_v2(struct iwl_xvt* xvt, struct iwl_error_event_table_v2* table) {
-    IWL_ERR(xvt, "0x%08X | %-28s\n", table->error_id, desc_lookup(table->error_id));
-    IWL_ERR(xvt, "0x%08X | trm_hw_status0\n", table->trm_hw_status0);
-    IWL_ERR(xvt, "0x%08X | trm_hw_status1\n", table->trm_hw_status1);
-    IWL_ERR(xvt, "0x%08X | branchlink2\n", table->blink2);
-    IWL_ERR(xvt, "0x%08X | interruptlink1\n", table->ilink1);
-    IWL_ERR(xvt, "0x%08X | interruptlink2\n", table->ilink2);
-    IWL_ERR(xvt, "0x%08X | data1\n", table->data1);
-    IWL_ERR(xvt, "0x%08X | data2\n", table->data2);
-    IWL_ERR(xvt, "0x%08X | data3\n", table->data3);
-    IWL_ERR(xvt, "0x%08X | beacon time\n", table->bcon_time);
-    IWL_ERR(xvt, "0x%08X | tsf low\n", table->tsf_low);
-    IWL_ERR(xvt, "0x%08X | tsf hi\n", table->tsf_hi);
-    IWL_ERR(xvt, "0x%08X | time gp1\n", table->gp1);
-    IWL_ERR(xvt, "0x%08X | time gp2\n", table->gp2);
-    IWL_ERR(xvt, "0x%08X | uCode revision type\n", table->fw_rev_type);
-    IWL_ERR(xvt, "0x%08X | uCode version major\n", table->major);
-    IWL_ERR(xvt, "0x%08X | uCode version minor\n", table->minor);
-    IWL_ERR(xvt, "0x%08X | hw version\n", table->hw_ver);
-    IWL_ERR(xvt, "0x%08X | board version\n", table->brd_ver);
-    IWL_ERR(xvt, "0x%08X | hcmd\n", table->hcmd);
-    IWL_ERR(xvt, "0x%08X | isr0\n", table->isr0);
-    IWL_ERR(xvt, "0x%08X | isr1\n", table->isr1);
-    IWL_ERR(xvt, "0x%08X | isr2\n", table->isr2);
-    IWL_ERR(xvt, "0x%08X | isr3\n", table->isr3);
-    IWL_ERR(xvt, "0x%08X | isr4\n", table->isr4);
-    IWL_ERR(xvt, "0x%08X | last cmd Id\n", table->last_cmd_id);
-    IWL_ERR(xvt, "0x%08X | wait_event\n", table->wait_event);
-    IWL_ERR(xvt, "0x%08X | l2p_control\n", table->l2p_control);
-    IWL_ERR(xvt, "0x%08X | l2p_duration\n", table->l2p_duration);
-    IWL_ERR(xvt, "0x%08X | l2p_mhvalid\n", table->l2p_mhvalid);
-    IWL_ERR(xvt, "0x%08X | l2p_addr_match\n", table->l2p_addr_match);
-    IWL_ERR(xvt, "0x%08X | lmpm_pmg_sel\n", table->lmpm_pmg_sel);
-    IWL_ERR(xvt, "0x%08X | timestamp\n", table->u_timestamp);
-    IWL_ERR(xvt, "0x%08X | flow_handler\n", table->flow_handler);
+  IWL_ERR(xvt, "0x%08X | %-28s\n", table->error_id, desc_lookup(table->error_id));
+  IWL_ERR(xvt, "0x%08X | trm_hw_status0\n", table->trm_hw_status0);
+  IWL_ERR(xvt, "0x%08X | trm_hw_status1\n", table->trm_hw_status1);
+  IWL_ERR(xvt, "0x%08X | branchlink2\n", table->blink2);
+  IWL_ERR(xvt, "0x%08X | interruptlink1\n", table->ilink1);
+  IWL_ERR(xvt, "0x%08X | interruptlink2\n", table->ilink2);
+  IWL_ERR(xvt, "0x%08X | data1\n", table->data1);
+  IWL_ERR(xvt, "0x%08X | data2\n", table->data2);
+  IWL_ERR(xvt, "0x%08X | data3\n", table->data3);
+  IWL_ERR(xvt, "0x%08X | beacon time\n", table->bcon_time);
+  IWL_ERR(xvt, "0x%08X | tsf low\n", table->tsf_low);
+  IWL_ERR(xvt, "0x%08X | tsf hi\n", table->tsf_hi);
+  IWL_ERR(xvt, "0x%08X | time gp1\n", table->gp1);
+  IWL_ERR(xvt, "0x%08X | time gp2\n", table->gp2);
+  IWL_ERR(xvt, "0x%08X | uCode revision type\n", table->fw_rev_type);
+  IWL_ERR(xvt, "0x%08X | uCode version major\n", table->major);
+  IWL_ERR(xvt, "0x%08X | uCode version minor\n", table->minor);
+  IWL_ERR(xvt, "0x%08X | hw version\n", table->hw_ver);
+  IWL_ERR(xvt, "0x%08X | board version\n", table->brd_ver);
+  IWL_ERR(xvt, "0x%08X | hcmd\n", table->hcmd);
+  IWL_ERR(xvt, "0x%08X | isr0\n", table->isr0);
+  IWL_ERR(xvt, "0x%08X | isr1\n", table->isr1);
+  IWL_ERR(xvt, "0x%08X | isr2\n", table->isr2);
+  IWL_ERR(xvt, "0x%08X | isr3\n", table->isr3);
+  IWL_ERR(xvt, "0x%08X | isr4\n", table->isr4);
+  IWL_ERR(xvt, "0x%08X | last cmd Id\n", table->last_cmd_id);
+  IWL_ERR(xvt, "0x%08X | wait_event\n", table->wait_event);
+  IWL_ERR(xvt, "0x%08X | l2p_control\n", table->l2p_control);
+  IWL_ERR(xvt, "0x%08X | l2p_duration\n", table->l2p_duration);
+  IWL_ERR(xvt, "0x%08X | l2p_mhvalid\n", table->l2p_mhvalid);
+  IWL_ERR(xvt, "0x%08X | l2p_addr_match\n", table->l2p_addr_match);
+  IWL_ERR(xvt, "0x%08X | lmpm_pmg_sel\n", table->lmpm_pmg_sel);
+  IWL_ERR(xvt, "0x%08X | timestamp\n", table->u_timestamp);
+  IWL_ERR(xvt, "0x%08X | flow_handler\n", table->flow_handler);
 }
 
 void iwl_xvt_get_umac_error_log(struct iwl_xvt* xvt, struct iwl_umac_error_event_table* table) {
-    struct iwl_trans* trans = xvt->trans;
-    uint32_t base;
+  struct iwl_trans* trans = xvt->trans;
+  uint32_t base;
 
-    base = xvt->umac_error_event_table;
+  base = xvt->umac_error_event_table;
 
-    if (base < trans->cfg->min_umac_error_event_table) {
-        IWL_ERR(xvt, "Not valid error log pointer 0x%08X for %s uCode\n", base,
-                (xvt->fwrt.cur_fw_img == IWL_UCODE_INIT) ? "Init" : "RT");
-        return;
-    }
+  if (base < trans->cfg->min_umac_error_event_table) {
+    IWL_ERR(xvt, "Not valid error log pointer 0x%08X for %s uCode\n", base,
+            (xvt->fwrt.cur_fw_img == IWL_UCODE_INIT) ? "Init" : "RT");
+    return;
+  }
 
-    iwl_trans_read_mem_bytes(trans, base, table, sizeof(*table));
+  iwl_trans_read_mem_bytes(trans, base, table, sizeof(*table));
 }
 
 void iwl_xvt_dump_umac_error_log(struct iwl_xvt* xvt, struct iwl_umac_error_event_table* table) {
-    IWL_ERR(xvt, "0x%08X | %s\n", table->error_id, desc_lookup(table->error_id));
-    IWL_ERR(xvt, "0x%08X | umac branchlink1\n", table->blink1);
-    IWL_ERR(xvt, "0x%08X | umac branchlink2\n", table->blink2);
-    IWL_ERR(xvt, "0x%08X | umac interruptlink1\n", table->ilink1);
-    IWL_ERR(xvt, "0x%08X | umac interruptlink2\n", table->ilink2);
-    IWL_ERR(xvt, "0x%08X | umac data1\n", table->data1);
-    IWL_ERR(xvt, "0x%08X | umac data2\n", table->data2);
-    IWL_ERR(xvt, "0x%08X | umac data3\n", table->data3);
-    IWL_ERR(xvt, "0x%08X | umac major\n", table->umac_major);
-    IWL_ERR(xvt, "0x%08X | umac minor\n", table->umac_minor);
-    IWL_ERR(xvt, "0x%08X | frame pointer\n", table->frame_pointer);
-    IWL_ERR(xvt, "0x%08X | stack pointer\n", table->stack_pointer);
-    IWL_ERR(xvt, "0x%08X | last host cmd\n", table->cmd_header);
-    IWL_ERR(xvt, "0x%08X | isr status reg\n", table->nic_isr_pref);
+  IWL_ERR(xvt, "0x%08X | %s\n", table->error_id, desc_lookup(table->error_id));
+  IWL_ERR(xvt, "0x%08X | umac branchlink1\n", table->blink1);
+  IWL_ERR(xvt, "0x%08X | umac branchlink2\n", table->blink2);
+  IWL_ERR(xvt, "0x%08X | umac interruptlink1\n", table->ilink1);
+  IWL_ERR(xvt, "0x%08X | umac interruptlink2\n", table->ilink2);
+  IWL_ERR(xvt, "0x%08X | umac data1\n", table->data1);
+  IWL_ERR(xvt, "0x%08X | umac data2\n", table->data2);
+  IWL_ERR(xvt, "0x%08X | umac data3\n", table->data3);
+  IWL_ERR(xvt, "0x%08X | umac major\n", table->umac_major);
+  IWL_ERR(xvt, "0x%08X | umac minor\n", table->umac_minor);
+  IWL_ERR(xvt, "0x%08X | frame pointer\n", table->frame_pointer);
+  IWL_ERR(xvt, "0x%08X | stack pointer\n", table->stack_pointer);
+  IWL_ERR(xvt, "0x%08X | last host cmd\n", table->cmd_header);
+  IWL_ERR(xvt, "0x%08X | isr status reg\n", table->nic_isr_pref);
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/xvt.c b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/xvt.c
index 19961d7..fcd8808 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/xvt.c
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/xvt.c
@@ -32,6 +32,8 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include "xvt.h"
+
 #include <linux/module.h>
 #include <linux/types.h>
 
@@ -49,7 +51,6 @@
 #include "iwl-prph.h"
 #include "iwl-trans.h"
 #include "user-infc.h"
-#include "xvt.h"
 
 #define DRV_DESCRIPTION "Intel(R) xVT driver for Linux"
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -63,14 +64,10 @@
 /*
  * module init and exit functions
  */
-static int __init iwl_xvt_init(void) {
-    return iwl_opmode_register("iwlxvt", &iwl_xvt_ops);
-}
+static int __init iwl_xvt_init(void) { return iwl_opmode_register("iwlxvt", &iwl_xvt_ops); }
 module_init(iwl_xvt_init);
 
-static void __exit iwl_xvt_exit(void) {
-    iwl_opmode_deregister("iwlxvt");
-}
+static void __exit iwl_xvt_exit(void) { iwl_opmode_deregister("iwlxvt"); }
 module_exit(iwl_xvt_exit);
 
 /* Please keep this array *SORTED* by hex value.
@@ -173,481 +170,510 @@
 };
 
 static int iwl_xvt_tm_send_hcmd(void* op_mode, struct iwl_host_cmd* host_cmd) {
-    struct iwl_xvt* xvt = (struct iwl_xvt*)op_mode;
+  struct iwl_xvt* xvt = (struct iwl_xvt*)op_mode;
 
-    if (WARN_ON_ONCE(!op_mode)) { return -EINVAL; }
+  if (WARN_ON_ONCE(!op_mode)) {
+    return -EINVAL;
+  }
 
-    return iwl_xvt_send_cmd(xvt, host_cmd);
+  return iwl_xvt_send_cmd(xvt, host_cmd);
 }
 
 static struct iwl_op_mode* iwl_xvt_start(struct iwl_trans* trans, const struct iwl_cfg* cfg,
                                          const struct iwl_fw* fw, struct dentry* dbgfs_dir) {
-    struct iwl_op_mode* op_mode;
-    struct iwl_xvt* xvt;
-    struct iwl_trans_config trans_cfg = {};
-    static const uint8_t no_reclaim_cmds[] = {
-        TX_CMD,
-    };
-    uint8_t i, num_of_lmacs;
-    int err;
+  struct iwl_op_mode* op_mode;
+  struct iwl_xvt* xvt;
+  struct iwl_trans_config trans_cfg = {};
+  static const uint8_t no_reclaim_cmds[] = {
+      TX_CMD,
+  };
+  uint8_t i, num_of_lmacs;
+  int err;
 
-    op_mode = kzalloc(sizeof(struct iwl_op_mode) + sizeof(struct iwl_xvt), GFP_KERNEL);
-    if (!op_mode) { return NULL; }
+  op_mode = kzalloc(sizeof(struct iwl_op_mode) + sizeof(struct iwl_xvt), GFP_KERNEL);
+  if (!op_mode) {
+    return NULL;
+  }
 
-    op_mode->ops = &iwl_xvt_ops;
+  op_mode->ops = &iwl_xvt_ops;
 
-    xvt = IWL_OP_MODE_GET_XVT(op_mode);
-    xvt->fw = fw;
-    xvt->cfg = cfg;
-    xvt->trans = trans;
-    xvt->dev = trans->dev;
+  xvt = IWL_OP_MODE_GET_XVT(op_mode);
+  xvt->fw = fw;
+  xvt->cfg = cfg;
+  xvt->trans = trans;
+  xvt->dev = trans->dev;
 
-    iwl_fw_runtime_init(&xvt->fwrt, trans, fw, NULL, NULL, dbgfs_dir);
+  iwl_fw_runtime_init(&xvt->fwrt, trans, fw, NULL, NULL, dbgfs_dir);
 
-    mutex_init(&xvt->mutex);
-    spin_lock_init(&xvt->notif_lock);
+  mutex_init(&xvt->mutex);
+  spin_lock_init(&xvt->notif_lock);
 
-    /*
-     * Populate the state variables that the
-     * transport layer needs to know about.
-     */
-    trans_cfg.op_mode = op_mode;
-    trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
-    trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
-    trans_cfg.command_groups = iwl_xvt_cmd_groups;
-    trans_cfg.command_groups_size = ARRAY_SIZE(iwl_xvt_cmd_groups);
-    trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
-    IWL_DEBUG_INFO(xvt, "dqa supported\n");
-    trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
-    trans_cfg.bc_table_dword = true;
-    trans_cfg.scd_set_active = true;
-    trans->wide_cmd_header = true;
+  /*
+   * Populate the state variables that the
+   * transport layer needs to know about.
+   */
+  trans_cfg.op_mode = op_mode;
+  trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
+  trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+  trans_cfg.command_groups = iwl_xvt_cmd_groups;
+  trans_cfg.command_groups_size = ARRAY_SIZE(iwl_xvt_cmd_groups);
+  trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+  IWL_DEBUG_INFO(xvt, "dqa supported\n");
+  trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
+  trans_cfg.bc_table_dword = true;
+  trans_cfg.scd_set_active = true;
+  trans->wide_cmd_header = true;
 
-    switch (iwlwifi_mod_params.amsdu_size) {
+  switch (iwlwifi_mod_params.amsdu_size) {
     case IWL_AMSDU_DEF:
     case IWL_AMSDU_4K:
-        trans_cfg.rx_buf_size = IWL_AMSDU_4K;
-        break;
+      trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+      break;
     case IWL_AMSDU_8K:
-        trans_cfg.rx_buf_size = IWL_AMSDU_8K;
-        break;
+      trans_cfg.rx_buf_size = IWL_AMSDU_8K;
+      break;
     case IWL_AMSDU_12K:
-        trans_cfg.rx_buf_size = IWL_AMSDU_12K;
-        break;
+      trans_cfg.rx_buf_size = IWL_AMSDU_12K;
+      break;
     default:
-        pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size);
-        trans_cfg.rx_buf_size = IWL_AMSDU_4K;
-    }
-    /* the hardware splits the A-MSDU */
-    if (xvt->trans->cfg->mq_rx_supported) { trans_cfg.rx_buf_size = IWL_AMSDU_4K; }
+      pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size);
+      trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+  }
+  /* the hardware splits the A-MSDU */
+  if (xvt->trans->cfg->mq_rx_supported) {
+    trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+  }
 
-    trans->rx_mpdu_cmd_hdr_size = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
-                                      ? sizeof(struct iwl_rx_mpdu_desc)
-                                      : IWL_RX_DESC_SIZE_V1;
+  trans->rx_mpdu_cmd_hdr_size = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+                                    ? sizeof(struct iwl_rx_mpdu_desc)
+                                    : IWL_RX_DESC_SIZE_V1;
 
-    trans_cfg.cb_data_offs = offsetof(struct iwl_xvt_skb_info, trans);
+  trans_cfg.cb_data_offs = offsetof(struct iwl_xvt_skb_info, trans);
 
-    /* Configure transport layer */
-    iwl_trans_configure(xvt->trans, &trans_cfg);
-    trans->command_groups = trans_cfg.command_groups;
-    trans->command_groups_size = trans_cfg.command_groups_size;
+  /* Configure transport layer */
+  iwl_trans_configure(xvt->trans, &trans_cfg);
+  trans->command_groups = trans_cfg.command_groups;
+  trans->command_groups_size = trans_cfg.command_groups_size;
 
-    /* set up notification wait support */
-    iwl_notification_wait_init(&xvt->notif_wait);
+  /* set up notification wait support */
+  iwl_notification_wait_init(&xvt->notif_wait);
 
-    iwl_tm_init(trans, xvt->fw, &xvt->mutex, xvt);
+  iwl_tm_init(trans, xvt->fw, &xvt->mutex, xvt);
 
-    /* Init phy db */
-    xvt->phy_db = iwl_phy_db_init(xvt->trans);
-    if (!xvt->phy_db) { goto out_free; }
+  /* Init phy db */
+  xvt->phy_db = iwl_phy_db_init(xvt->trans);
+  if (!xvt->phy_db) {
+    goto out_free;
+  }
 
-    iwl_dnt_init(xvt->trans, dbgfs_dir);
+  iwl_dnt_init(xvt->trans, dbgfs_dir);
 
-    num_of_lmacs = iwl_xvt_is_cdb_supported(xvt) ? NUM_OF_LMACS : 1;
+  num_of_lmacs = iwl_xvt_is_cdb_supported(xvt) ? NUM_OF_LMACS : 1;
 
-    for (i = 0; i < num_of_lmacs; i++) {
-        init_waitqueue_head(&xvt->tx_meta_data[i].mod_tx_wq);
-        init_waitqueue_head(&xvt->tx_meta_data[i].mod_tx_done_wq);
-        xvt->tx_meta_data[i].queue = -1;
-        xvt->tx_meta_data[i].tx_mod_thread = NULL;
-        xvt->tx_meta_data[i].txq_full = false;
-    };
+  for (i = 0; i < num_of_lmacs; i++) {
+    init_waitqueue_head(&xvt->tx_meta_data[i].mod_tx_wq);
+    init_waitqueue_head(&xvt->tx_meta_data[i].mod_tx_done_wq);
+    xvt->tx_meta_data[i].queue = -1;
+    xvt->tx_meta_data[i].tx_mod_thread = NULL;
+    xvt->tx_meta_data[i].txq_full = false;
+  };
 
-    for (i = 0; i < ARRAY_SIZE(xvt->reorder_bufs); i++) {
-        xvt->reorder_bufs[i].sta_id = IWL_XVT_INVALID_STA;
-    }
+  for (i = 0; i < ARRAY_SIZE(xvt->reorder_bufs); i++) {
+    xvt->reorder_bufs[i].sta_id = IWL_XVT_INVALID_STA;
+  }
 
-    memset(xvt->payloads, 0, sizeof(xvt->payloads));
-    xvt->tx_task = NULL;
-    xvt->is_enhanced_tx = false;
-    xvt->send_tx_resp = false;
-    xvt->send_rx_mpdu = true;
-    memset(xvt->queue_data, 0, sizeof(xvt->queue_data));
-    init_waitqueue_head(&xvt->tx_done_wq);
+  memset(xvt->payloads, 0, sizeof(xvt->payloads));
+  xvt->tx_task = NULL;
+  xvt->is_enhanced_tx = false;
+  xvt->send_tx_resp = false;
+  xvt->send_rx_mpdu = true;
+  memset(xvt->queue_data, 0, sizeof(xvt->queue_data));
+  init_waitqueue_head(&xvt->tx_done_wq);
 
-    trans->dbg_dest_tlv = xvt->fw->dbg.dest_tlv;
-    trans->dbg_n_dest_reg = xvt->fw->dbg.n_dest_reg;
-    memcpy(trans->dbg_conf_tlv, xvt->fw->dbg.conf_tlv, sizeof(trans->dbg_conf_tlv));
-    trans->dbg_trigger_tlv = xvt->fw->dbg.trigger_tlv;
+  trans->dbg_dest_tlv = xvt->fw->dbg.dest_tlv;
+  trans->dbg_n_dest_reg = xvt->fw->dbg.n_dest_reg;
+  memcpy(trans->dbg_conf_tlv, xvt->fw->dbg.conf_tlv, sizeof(trans->dbg_conf_tlv));
+  trans->dbg_trigger_tlv = xvt->fw->dbg.trigger_tlv;
 
-    IWL_INFO(xvt, "Detected %s, REV=0x%X, xVT operation mode\n", xvt->cfg->name,
-             xvt->trans->hw_rev);
+  IWL_INFO(xvt, "Detected %s, REV=0x%X, xVT operation mode\n", xvt->cfg->name, xvt->trans->hw_rev);
 
-    err = iwl_xvt_dbgfs_register(xvt, dbgfs_dir);
-    if (err) { IWL_ERR(xvt, "failed register xvt debugfs folder (%d)\n", err); }
+  err = iwl_xvt_dbgfs_register(xvt, dbgfs_dir);
+  if (err) {
+    IWL_ERR(xvt, "failed register xvt debugfs folder (%d)\n", err);
+  }
 
-    return op_mode;
+  return op_mode;
 
 out_free:
-    kfree(op_mode);
+  kfree(op_mode);
 
-    return NULL;
+  return NULL;
 }
 
 static void iwl_xvt_stop(struct iwl_op_mode* op_mode) {
-    struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
-    int i;
+  struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
+  int i;
 
-    iwl_fw_cancel_timestamp(&xvt->fwrt);
+  iwl_fw_cancel_timestamp(&xvt->fwrt);
 
-    if (xvt->state != IWL_XVT_STATE_UNINITIALIZED) {
-        if (xvt->fw_running) {
-            iwl_xvt_txq_disable(xvt);
-            xvt->fw_running = false;
-        }
-        iwl_trans_stop_device(xvt->trans);
+  if (xvt->state != IWL_XVT_STATE_UNINITIALIZED) {
+    if (xvt->fw_running) {
+      iwl_xvt_txq_disable(xvt);
+      xvt->fw_running = false;
     }
+    iwl_trans_stop_device(xvt->trans);
+  }
 
-    for (i = 0; i < ARRAY_SIZE(xvt->reorder_bufs); i++) {
-        struct iwl_xvt_reorder_buffer* buffer;
+  for (i = 0; i < ARRAY_SIZE(xvt->reorder_bufs); i++) {
+    struct iwl_xvt_reorder_buffer* buffer;
 
-        buffer = &xvt->reorder_bufs[i];
-        iwl_xvt_destroy_reorder_buffer(xvt, buffer);
-    }
+    buffer = &xvt->reorder_bufs[i];
+    iwl_xvt_destroy_reorder_buffer(xvt, buffer);
+  }
 
-    iwl_phy_db_free(xvt->phy_db);
-    xvt->phy_db = NULL;
-    iwl_dnt_free(xvt->trans);
-    kfree(op_mode);
+  iwl_phy_db_free(xvt->phy_db);
+  xvt->phy_db = NULL;
+  iwl_dnt_free(xvt->trans);
+  kfree(op_mode);
 }
 
 static void iwl_xvt_reclaim_and_free(struct iwl_xvt* xvt, struct tx_meta_data* tx_data,
                                      uint16_t txq_id, uint16_t ssn) {
-    struct sk_buff_head skbs;
-    struct sk_buff* skb;
-    struct iwl_xvt_skb_info* skb_info;
+  struct sk_buff_head skbs;
+  struct sk_buff* skb;
+  struct iwl_xvt_skb_info* skb_info;
 
-    __skb_queue_head_init(&skbs);
+  __skb_queue_head_init(&skbs);
 
-    iwl_trans_reclaim(xvt->trans, txq_id, ssn, &skbs);
+  iwl_trans_reclaim(xvt->trans, txq_id, ssn, &skbs);
 
-    while (!skb_queue_empty(&skbs)) {
-        skb = __skb_dequeue(&skbs);
-        skb_info = (void*)skb->cb;
-        if (xvt->is_enhanced_tx) {
-            xvt->queue_data[txq_id].tx_counter++;
-            xvt->num_of_tx_resp++;
-        } else {
-            tx_data->tx_counter++;
-        }
-
-        if (skb_info->dev_cmd) { iwl_trans_free_tx_cmd(xvt->trans, skb_info->dev_cmd); }
-        kfree_skb(skb);
+  while (!skb_queue_empty(&skbs)) {
+    skb = __skb_dequeue(&skbs);
+    skb_info = (void*)skb->cb;
+    if (xvt->is_enhanced_tx) {
+      xvt->queue_data[txq_id].tx_counter++;
+      xvt->num_of_tx_resp++;
+    } else {
+      tx_data->tx_counter++;
     }
 
-    if (xvt->is_enhanced_tx && xvt->expected_tx_amount == xvt->num_of_tx_resp) {
-        wake_up_interruptible(&xvt->tx_done_wq);
-    } else if (tx_data->tot_tx == tx_data->tx_counter) {
-        wake_up_interruptible(&tx_data->mod_tx_done_wq);
+    if (skb_info->dev_cmd) {
+      iwl_trans_free_tx_cmd(xvt->trans, skb_info->dev_cmd);
     }
+    kfree_skb(skb);
+  }
+
+  if (xvt->is_enhanced_tx && xvt->expected_tx_amount == xvt->num_of_tx_resp) {
+    wake_up_interruptible(&xvt->tx_done_wq);
+  } else if (tx_data->tot_tx == tx_data->tx_counter) {
+    wake_up_interruptible(&tx_data->mod_tx_done_wq);
+  }
 }
 
 static struct tx_meta_data* iwl_xvt_rx_get_tx_meta_data(struct iwl_xvt* xvt, uint16_t txq_id) {
-    uint8_t lmac_id;
+  uint8_t lmac_id;
 
-    /*
-     * in case of enhanced_tx, tx_meta_data->queue is not
-     * being set, so there's nothing to verify
-     */
-    if (xvt->is_enhanced_tx) { return &xvt->tx_meta_data[XVT_LMAC_0_ID]; }
+  /*
+   * in case of enhanced_tx, tx_meta_data->queue is not
+   * being set, so there's nothing to verify
+   */
+  if (xvt->is_enhanced_tx) {
+    return &xvt->tx_meta_data[XVT_LMAC_0_ID];
+  }
 
-    if (!iwl_xvt_is_unified_fw(xvt)) {
-        lmac_id = XVT_LMAC_0_ID;
-        goto verify;
-    }
-
-    if (txq_id == xvt->tx_meta_data[XVT_LMAC_1_ID].queue) {
-        lmac_id = XVT_LMAC_1_ID;
-        goto verify;
-    }
-
+  if (!iwl_xvt_is_unified_fw(xvt)) {
     lmac_id = XVT_LMAC_0_ID;
-verify:
-    if (WARN(txq_id != xvt->tx_meta_data[lmac_id].queue,
-             "got TX_CMD from unidentified queue: (lmac %d) %d %d\n", lmac_id, txq_id,
-             xvt->tx_meta_data[lmac_id].queue)) {
-        return NULL;
-    }
+    goto verify;
+  }
 
-    return &xvt->tx_meta_data[lmac_id];
+  if (txq_id == xvt->tx_meta_data[XVT_LMAC_1_ID].queue) {
+    lmac_id = XVT_LMAC_1_ID;
+    goto verify;
+  }
+
+  lmac_id = XVT_LMAC_0_ID;
+verify:
+  if (WARN(txq_id != xvt->tx_meta_data[lmac_id].queue,
+           "got TX_CMD from unidentified queue: (lmac %d) %d %d\n", lmac_id, txq_id,
+           xvt->tx_meta_data[lmac_id].queue)) {
+    return NULL;
+  }
+
+  return &xvt->tx_meta_data[lmac_id];
 }
 
 static void iwl_xvt_rx_tx_cmd_single(struct iwl_xvt* xvt, struct iwl_rx_packet* pkt) {
-    /* struct iwl_mvm_tx_resp_v3 is almost the same */
-    struct iwl_mvm_tx_resp* tx_resp = (void*)pkt->data;
-    int txq_id = SEQ_TO_QUEUE(le16_to_cpu(pkt->hdr.sequence));
-    uint16_t ssn = iwl_xvt_get_scd_ssn(xvt, tx_resp);
-    struct tx_meta_data* tx_data;
-    uint16_t status = le16_to_cpu(iwl_xvt_get_agg_status(xvt, tx_resp)->status) & TX_STATUS_MSK;
+  /* struct iwl_mvm_tx_resp_v3 is almost the same */
+  struct iwl_mvm_tx_resp* tx_resp = (void*)pkt->data;
+  int txq_id = SEQ_TO_QUEUE(le16_to_cpu(pkt->hdr.sequence));
+  uint16_t ssn = iwl_xvt_get_scd_ssn(xvt, tx_resp);
+  struct tx_meta_data* tx_data;
+  uint16_t status = le16_to_cpu(iwl_xvt_get_agg_status(xvt, tx_resp)->status) & TX_STATUS_MSK;
 
-    tx_data = iwl_xvt_rx_get_tx_meta_data(xvt, txq_id);
-    if (!tx_data) { return; }
+  tx_data = iwl_xvt_rx_get_tx_meta_data(xvt, txq_id);
+  if (!tx_data) {
+    return;
+  }
 
-    if (unlikely(status != TX_STATUS_SUCCESS)) {
-        IWL_WARN(xvt, "got error TX_RSP status %#x\n", status);
-    }
+  if (unlikely(status != TX_STATUS_SUCCESS)) {
+    IWL_WARN(xvt, "got error TX_RSP status %#x\n", status);
+  }
 
-    iwl_xvt_reclaim_and_free(xvt, tx_data, txq_id, ssn);
+  iwl_xvt_reclaim_and_free(xvt, tx_data, txq_id, ssn);
 }
 
 static void iwl_xvt_rx_tx_cmd_handler(struct iwl_xvt* xvt, struct iwl_rx_packet* pkt) {
-    struct iwl_mvm_tx_resp* tx_resp = (void*)pkt->data;
+  struct iwl_mvm_tx_resp* tx_resp = (void*)pkt->data;
 
-    if (tx_resp->frame_count == 1) { iwl_xvt_rx_tx_cmd_single(xvt, pkt); }
+  if (tx_resp->frame_count == 1) {
+    iwl_xvt_rx_tx_cmd_single(xvt, pkt);
+  }
 
-    /* for aggregations - we reclaim on BA_NOTIF */
+  /* for aggregations - we reclaim on BA_NOTIF */
 }
 
 static void iwl_xvt_rx_ba_notif(struct iwl_xvt* xvt, struct iwl_rx_packet* pkt) {
-    struct iwl_mvm_ba_notif* ba_notif;
-    struct tx_meta_data* tx_data;
-    uint16_t scd_flow;
-    uint16_t scd_ssn;
+  struct iwl_mvm_ba_notif* ba_notif;
+  struct tx_meta_data* tx_data;
+  uint16_t scd_flow;
+  uint16_t scd_ssn;
 
-    if (iwl_xvt_is_unified_fw(xvt)) {
-        struct iwl_mvm_compressed_ba_notif* ba_res = (void*)pkt->data;
-        uint8_t tid;
-        uint16_t queue;
-        uint16_t tfd_idx;
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    struct iwl_mvm_compressed_ba_notif* ba_res = (void*)pkt->data;
+    uint8_t tid;
+    uint16_t queue;
+    uint16_t tfd_idx;
 
-        if (!le16_to_cpu(ba_res->tfd_cnt)) { goto out; }
-
-        /*
-         * TODO:
-         * When supporting multi TID aggregations - we need to move
-         * next_reclaimed to be per TXQ and not per TID or handle it
-         * in a different way.
-         * This will go together with SN and AddBA offload and cannot
-         * be handled properly for now.
-         */
-        WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1);
-        tid = ba_res->ra_tid[0].tid;
-        if (tid == IWL_MGMT_TID) { tid = IWL_MAX_TID_COUNT; }
-        queue = le16_to_cpu(ba_res->tfd[0].q_num);
-        tfd_idx = le16_to_cpu(ba_res->tfd[0].tfd_index);
-
-        tx_data = iwl_xvt_rx_get_tx_meta_data(xvt, queue);
-        if (!tx_data) { return; }
-
-        iwl_xvt_reclaim_and_free(xvt, tx_data, queue, tfd_idx);
-    out:
-        IWL_DEBUG_TX_REPLY(
-            xvt, "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
-            ba_res->sta_id, le32_to_cpu(ba_res->flags), le16_to_cpu(ba_res->txed),
-            le16_to_cpu(ba_res->done));
-        return;
+    if (!le16_to_cpu(ba_res->tfd_cnt)) {
+      goto out;
     }
 
-    ba_notif = (void*)pkt->data;
-    scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
-    scd_flow = le16_to_cpu(ba_notif->scd_flow);
+    /*
+     * TODO:
+     * When supporting multi TID aggregations - we need to move
+     * next_reclaimed to be per TXQ and not per TID or handle it
+     * in a different way.
+     * This will go together with SN and AddBA offload and cannot
+     * be handled properly for now.
+     */
+    WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1);
+    tid = ba_res->ra_tid[0].tid;
+    if (tid == IWL_MGMT_TID) {
+      tid = IWL_MAX_TID_COUNT;
+    }
+    queue = le16_to_cpu(ba_res->tfd[0].q_num);
+    tfd_idx = le16_to_cpu(ba_res->tfd[0].tfd_index);
 
-    tx_data = iwl_xvt_rx_get_tx_meta_data(xvt, scd_flow);
-    if (!tx_data) { return; }
+    tx_data = iwl_xvt_rx_get_tx_meta_data(xvt, queue);
+    if (!tx_data) {
+      return;
+    }
 
-    iwl_xvt_reclaim_and_free(xvt, tx_data, scd_flow, scd_ssn);
-
-    IWL_DEBUG_TX_REPLY(xvt, "ba_notif from %pM, sta_id = %d\n", ba_notif->sta_addr,
-                       ba_notif->sta_id);
+    iwl_xvt_reclaim_and_free(xvt, tx_data, queue, tfd_idx);
+  out:
     IWL_DEBUG_TX_REPLY(xvt,
-                       "tid %d, seq %d, bitmap 0x%llx, scd flow %d, ssn %d, sent %d, acked %d\n",
-                       ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
-                       (unsigned long long)le64_to_cpu(ba_notif->bitmap), scd_flow, scd_ssn,
-                       ba_notif->txed, ba_notif->txed_2_done);
+                       "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+                       ba_res->sta_id, le32_to_cpu(ba_res->flags), le16_to_cpu(ba_res->txed),
+                       le16_to_cpu(ba_res->done));
+    return;
+  }
+
+  ba_notif = (void*)pkt->data;
+  scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
+  scd_flow = le16_to_cpu(ba_notif->scd_flow);
+
+  tx_data = iwl_xvt_rx_get_tx_meta_data(xvt, scd_flow);
+  if (!tx_data) {
+    return;
+  }
+
+  iwl_xvt_reclaim_and_free(xvt, tx_data, scd_flow, scd_ssn);
+
+  IWL_DEBUG_TX_REPLY(xvt, "ba_notif from %pM, sta_id = %d\n", ba_notif->sta_addr, ba_notif->sta_id);
+  IWL_DEBUG_TX_REPLY(xvt, "tid %d, seq %d, bitmap 0x%llx, scd flow %d, ssn %d, sent %d, acked %d\n",
+                     ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
+                     (unsigned long long)le64_to_cpu(ba_notif->bitmap), scd_flow, scd_ssn,
+                     ba_notif->txed, ba_notif->txed_2_done);
 }
 
 static void iwl_xvt_rx_dispatch(struct iwl_op_mode* op_mode, struct napi_struct* napi,
                                 struct iwl_rx_cmd_buffer* rxb) {
-    struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
-    struct iwl_rx_packet* pkt = rxb_addr(rxb);
+  struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
+  struct iwl_rx_packet* pkt = rxb_addr(rxb);
 
-    spin_lock(&xvt->notif_lock);
-    iwl_notification_wait_notify(&xvt->notif_wait, pkt);
-    IWL_DEBUG_INFO(xvt, "rx dispatch got notification\n");
+  spin_lock(&xvt->notif_lock);
+  iwl_notification_wait_notify(&xvt->notif_wait, pkt);
+  IWL_DEBUG_INFO(xvt, "rx dispatch got notification\n");
 
-    switch (pkt->hdr.cmd) {
+  switch (pkt->hdr.cmd) {
     case TX_CMD:
-        iwl_xvt_rx_tx_cmd_handler(xvt, pkt);
-        break;
+      iwl_xvt_rx_tx_cmd_handler(xvt, pkt);
+      break;
     case BA_NOTIF:
-        iwl_xvt_rx_ba_notif(xvt, pkt);
-        break;
+      iwl_xvt_rx_ba_notif(xvt, pkt);
+      break;
     case REPLY_RX_MPDU_CMD:
-        iwl_xvt_reorder(xvt, pkt);
-        break;
+      iwl_xvt_reorder(xvt, pkt);
+      break;
     case FRAME_RELEASE:
-        iwl_xvt_rx_frame_release(xvt, pkt);
-    }
+      iwl_xvt_rx_frame_release(xvt, pkt);
+  }
 
-    iwl_xvt_send_user_rx_notif(xvt, rxb);
-    spin_unlock(&xvt->notif_lock);
+  iwl_xvt_send_user_rx_notif(xvt, rxb);
+  spin_unlock(&xvt->notif_lock);
 }
 
 static void iwl_xvt_nic_config(struct iwl_op_mode* op_mode) {
-    struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
-    uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
-    uint32_t reg_val = 0;
+  struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
+  uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+  uint32_t reg_val = 0;
 
-    radio_cfg_type = (xvt->fw->phy_config & FW_PHY_CFG_RADIO_TYPE) >> FW_PHY_CFG_RADIO_TYPE_POS;
-    radio_cfg_step = (xvt->fw->phy_config & FW_PHY_CFG_RADIO_STEP) >> FW_PHY_CFG_RADIO_STEP_POS;
-    radio_cfg_dash = (xvt->fw->phy_config & FW_PHY_CFG_RADIO_DASH) >> FW_PHY_CFG_RADIO_DASH_POS;
+  radio_cfg_type = (xvt->fw->phy_config & FW_PHY_CFG_RADIO_TYPE) >> FW_PHY_CFG_RADIO_TYPE_POS;
+  radio_cfg_step = (xvt->fw->phy_config & FW_PHY_CFG_RADIO_STEP) >> FW_PHY_CFG_RADIO_STEP_POS;
+  radio_cfg_dash = (xvt->fw->phy_config & FW_PHY_CFG_RADIO_DASH) >> FW_PHY_CFG_RADIO_DASH_POS;
 
-    /* SKU control */
-    reg_val |= CSR_HW_REV_STEP(xvt->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
-    reg_val |= CSR_HW_REV_DASH(xvt->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+  /* SKU control */
+  reg_val |= CSR_HW_REV_STEP(xvt->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+  reg_val |= CSR_HW_REV_DASH(xvt->trans->hw_rev) << CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
 
-    /* radio configuration */
-    reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
-    reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
-    reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+  /* radio configuration */
+  reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+  reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+  reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
 
-    WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
-            ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
+  WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
+          ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
 
-    /*
-     * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
-     * sampling, and shouldn't be set to any non-zero value.
-     * The same is supposed to be true of the other HW, but unsetting
-     * them (such as the 7260) causes automatic tests to fail on seemingly
-     * unrelated errors. Need to further investigate this, but for now
-     * we'll separate cases.
-     */
-    if (xvt->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
-        reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
-    }
+  /*
+   * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
+   * sampling, and shouldn't be set to any non-zero value.
+   * The same is supposed to be true of the other HW, but unsetting
+   * them (such as the 7260) causes automatic tests to fail on seemingly
+   * unrelated errors. Need to further investigate this, but for now
+   * we'll separate cases.
+   */
+  if (xvt->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
+    reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
+  }
 
-    iwl_trans_set_bits_mask(xvt->trans, CSR_HW_IF_CONFIG_REG,
-                            CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
-                                CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
-                                CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
-                                CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
-                                CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
-                            reg_val);
+  iwl_trans_set_bits_mask(xvt->trans, CSR_HW_IF_CONFIG_REG,
+                          CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+                              CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+                              CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+                              CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
+                              CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
+                          reg_val);
 
-    IWL_DEBUG_INFO(xvt, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, radio_cfg_step,
-                   radio_cfg_dash);
+  IWL_DEBUG_INFO(xvt, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, radio_cfg_step,
+                 radio_cfg_dash);
 
-    /*
-     * W/A : NIC is stuck in a reset state after Early PCIe power off
-     * (PCIe power is lost before PERST# is asserted), causing ME FW
-     * to lose ownership and not being able to obtain it back.
-     */
-    if (!xvt->trans->cfg->apmg_not_supported)
-        iwl_set_bits_mask_prph(xvt->trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
-                               ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+  /*
+   * W/A : NIC is stuck in a reset state after Early PCIe power off
+   * (PCIe power is lost before PERST# is asserted), causing ME FW
+   * to lose ownership and not being able to obtain it back.
+   */
+  if (!xvt->trans->cfg->apmg_not_supported)
+    iwl_set_bits_mask_prph(xvt->trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+                           ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
 }
 
 static void iwl_xvt_nic_error(struct iwl_op_mode* op_mode) {
-    struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
-    void* p_table;
-    void* p_table_umac = NULL;
-    struct iwl_error_event_table_v2 table_v2;
-    struct iwl_umac_error_event_table table_umac;
-    int err, table_size;
+  struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
+  void* p_table;
+  void* p_table_umac = NULL;
+  struct iwl_error_event_table_v2 table_v2;
+  struct iwl_umac_error_event_table table_umac;
+  int err, table_size;
 
-    xvt->fw_error = true;
-    wake_up_interruptible(&xvt->tx_meta_data[XVT_LMAC_0_ID].mod_tx_wq);
+  xvt->fw_error = true;
+  wake_up_interruptible(&xvt->tx_meta_data[XVT_LMAC_0_ID].mod_tx_wq);
 
-    iwl_xvt_get_nic_error_log_v2(xvt, &table_v2);
-    iwl_xvt_dump_nic_error_log_v2(xvt, &table_v2);
-    p_table = kmemdup(&table_v2, sizeof(table_v2), GFP_ATOMIC);
-    table_size = sizeof(table_v2);
+  iwl_xvt_get_nic_error_log_v2(xvt, &table_v2);
+  iwl_xvt_dump_nic_error_log_v2(xvt, &table_v2);
+  p_table = kmemdup(&table_v2, sizeof(table_v2), GFP_ATOMIC);
+  table_size = sizeof(table_v2);
 
-    if (xvt->support_umac_log) {
-        iwl_xvt_get_umac_error_log(xvt, &table_umac);
-        iwl_xvt_dump_umac_error_log(xvt, &table_umac);
-        p_table_umac = kmemdup(&table_umac, sizeof(table_umac), GFP_ATOMIC);
+  if (xvt->support_umac_log) {
+    iwl_xvt_get_umac_error_log(xvt, &table_umac);
+    iwl_xvt_dump_umac_error_log(xvt, &table_umac);
+    p_table_umac = kmemdup(&table_umac, sizeof(table_umac), GFP_ATOMIC);
+  }
+
+  if (p_table) {
+    err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_NIC_ERROR, (void*)p_table, table_size,
+                                  GFP_ATOMIC);
+    if (err) {
+      IWL_WARN(xvt, "Error %d sending NIC error notification\n", err);
     }
+    kfree(p_table);
+  }
 
-    if (p_table) {
-        err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_NIC_ERROR, (void*)p_table, table_size,
-                                      GFP_ATOMIC);
-        if (err) { IWL_WARN(xvt, "Error %d sending NIC error notification\n", err); }
-        kfree(p_table);
+  if (p_table_umac) {
+    err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_NIC_UMAC_ERROR, (void*)p_table_umac,
+                                  sizeof(table_umac), GFP_ATOMIC);
+    if (err) {
+      IWL_WARN(xvt, "Error %d sending NIC umac error notification\n", err);
     }
+    kfree(p_table_umac);
+  }
 
-    if (p_table_umac) {
-        err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_NIC_UMAC_ERROR, (void*)p_table_umac,
-                                      sizeof(table_umac), GFP_ATOMIC);
-        if (err) { IWL_WARN(xvt, "Error %d sending NIC umac error notification\n", err); }
-        kfree(p_table_umac);
-    }
-
-    iwl_fw_dbg_collect_desc(&xvt->fwrt, &iwl_dump_desc_assert, false, 0);
+  iwl_fw_dbg_collect_desc(&xvt->fwrt, &iwl_dump_desc_assert, false, 0);
 }
 
 static bool iwl_xvt_set_hw_rfkill_state(struct iwl_op_mode* op_mode, bool state) {
-    struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
-    uint32_t rfkill_state = state ? IWL_XVT_RFKILL_ON : IWL_XVT_RFKILL_OFF;
-    int err;
+  struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
+  uint32_t rfkill_state = state ? IWL_XVT_RFKILL_ON : IWL_XVT_RFKILL_OFF;
+  int err;
 
-    err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_RFKILL, &rfkill_state, sizeof(rfkill_state),
-                                  GFP_ATOMIC);
-    if (err) { IWL_WARN(xvt, "Error %d sending RFKILL notification\n", err); }
+  err = iwl_xvt_user_send_notif(xvt, IWL_XVT_CMD_SEND_RFKILL, &rfkill_state, sizeof(rfkill_state),
+                                GFP_ATOMIC);
+  if (err) {
+    IWL_WARN(xvt, "Error %d sending RFKILL notification\n", err);
+  }
 
-    return false;
+  return false;
 }
 
 static void iwl_xvt_free_skb(struct iwl_op_mode* op_mode, struct sk_buff* skb) {
-    struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
-    struct iwl_xvt_skb_info* skb_info = (void*)skb->cb;
+  struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
+  struct iwl_xvt_skb_info* skb_info = (void*)skb->cb;
 
-    iwl_trans_free_tx_cmd(xvt->trans, skb_info->dev_cmd);
-    kfree_skb(skb);
+  iwl_trans_free_tx_cmd(xvt->trans, skb_info->dev_cmd);
+  kfree_skb(skb);
 }
 
 static void iwl_xvt_stop_sw_queue(struct iwl_op_mode* op_mode, int queue) {
-    struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
-    uint8_t i;
+  struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
+  uint8_t i;
 
-    if (xvt->queue_data[queue].allocated_queue) {
-        xvt->queue_data[queue].txq_full = true;
-    } else {
-        for (i = 0; i < NUM_OF_LMACS; i++) {
-            if (queue == xvt->tx_meta_data[i].queue) {
-                xvt->tx_meta_data[i].txq_full = true;
-                break;
-            }
-        }
+  if (xvt->queue_data[queue].allocated_queue) {
+    xvt->queue_data[queue].txq_full = true;
+  } else {
+    for (i = 0; i < NUM_OF_LMACS; i++) {
+      if (queue == xvt->tx_meta_data[i].queue) {
+        xvt->tx_meta_data[i].txq_full = true;
+        break;
+      }
     }
+  }
 }
 
 static void iwl_xvt_wake_sw_queue(struct iwl_op_mode* op_mode, int queue) {
-    struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
-    uint8_t i;
+  struct iwl_xvt* xvt = IWL_OP_MODE_GET_XVT(op_mode);
+  uint8_t i;
 
-    if (xvt->queue_data[queue].allocated_queue) {
-        xvt->queue_data[queue].txq_full = false;
-        wake_up_interruptible(&xvt->queue_data[queue].tx_wq);
-    } else {
-        for (i = 0; i < NUM_OF_LMACS; i++) {
-            if (queue == xvt->tx_meta_data[i].queue) {
-                xvt->tx_meta_data[i].txq_full = false;
-                wake_up_interruptible(&xvt->tx_meta_data[i].mod_tx_wq);
-                break;
-            }
-        }
+  if (xvt->queue_data[queue].allocated_queue) {
+    xvt->queue_data[queue].txq_full = false;
+    wake_up_interruptible(&xvt->queue_data[queue].tx_wq);
+  } else {
+    for (i = 0; i < NUM_OF_LMACS; i++) {
+      if (queue == xvt->tx_meta_data[i].queue) {
+        xvt->tx_meta_data[i].txq_full = false;
+        wake_up_interruptible(&xvt->tx_meta_data[i].mod_tx_wq);
+        break;
+      }
     }
+  }
 }
 
 static const struct iwl_op_mode_ops iwl_xvt_ops = {
@@ -668,35 +694,39 @@
 };
 
 void iwl_xvt_free_tx_queue(struct iwl_xvt* xvt, uint8_t lmac_id) {
-    if (xvt->tx_meta_data[lmac_id].queue == -1) { return; }
+  if (xvt->tx_meta_data[lmac_id].queue == -1) {
+    return;
+  }
 
-    iwl_trans_txq_free(xvt->trans, xvt->tx_meta_data[lmac_id].queue);
+  iwl_trans_txq_free(xvt->trans, xvt->tx_meta_data[lmac_id].queue);
 
-    xvt->tx_meta_data[lmac_id].queue = -1;
+  xvt->tx_meta_data[lmac_id].queue = -1;
 }
 
 int iwl_xvt_allocate_tx_queue(struct iwl_xvt* xvt, uint8_t sta_id, uint8_t lmac_id) {
-    int ret;
+  int ret;
 
-    ret = iwl_trans_txq_alloc(xvt->trans, cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), sta_id,
-                              TX_QUEUE_CFG_TID, SCD_QUEUE_CFG, IWL_DEFAULT_QUEUE_SIZE, 0);
-    /* ret is positive when func returns the allocated the queue number */
-    if (ret > 0) {
-        xvt->tx_meta_data[lmac_id].queue = ret;
-        ret = 0;
-    } else {
-        IWL_ERR(xvt, "failed to allocate queue\n");
-    }
+  ret = iwl_trans_txq_alloc(xvt->trans, cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), sta_id,
+                            TX_QUEUE_CFG_TID, SCD_QUEUE_CFG, IWL_DEFAULT_QUEUE_SIZE, 0);
+  /* ret is positive when func returns the allocated the queue number */
+  if (ret > 0) {
+    xvt->tx_meta_data[lmac_id].queue = ret;
+    ret = 0;
+  } else {
+    IWL_ERR(xvt, "failed to allocate queue\n");
+  }
 
-    return ret;
+  return ret;
 }
 
 void iwl_xvt_txq_disable(struct iwl_xvt* xvt) {
-    if (!iwl_xvt_has_default_txq(xvt)) { return; }
-    if (iwl_xvt_is_unified_fw(xvt)) {
-        iwl_xvt_free_tx_queue(xvt, XVT_LMAC_0_ID);
-        iwl_xvt_free_tx_queue(xvt, XVT_LMAC_1_ID);
-    } else {
-        iwl_trans_txq_disable(xvt->trans, IWL_XVT_DEFAULT_TX_QUEUE, true);
-    }
+  if (!iwl_xvt_has_default_txq(xvt)) {
+    return;
+  }
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    iwl_xvt_free_tx_queue(xvt, XVT_LMAC_0_ID);
+    iwl_xvt_free_tx_queue(xvt, XVT_LMAC_1_ID);
+  } else {
+    iwl_trans_txq_disable(xvt->trans, IWL_XVT_DEFAULT_TX_QUEUE, true);
+  }
 }
diff --git a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/xvt.h b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/xvt.h
index bb2d438..ed7dbb4 100644
--- a/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/xvt.h
+++ b/src/connectivity/wlan/drivers/third_party/intel/iwlwifi/xvt/xvt.h
@@ -37,6 +37,7 @@
 
 #include <linux/if_ether.h>
 #include <linux/spinlock.h>
+
 #include "constants.h"
 #include "fw-api.h"
 #include "fw/img.h"
@@ -48,10 +49,10 @@
 #include "iwl-trans.h"
 
 enum iwl_xvt_state {
-    IWL_XVT_STATE_UNINITIALIZED = 0,
-    IWL_XVT_STATE_NO_FW,
-    IWL_XVT_STATE_INIT_STARTED,
-    IWL_XVT_STATE_OPERATIONAL,
+  IWL_XVT_STATE_UNINITIALIZED = 0,
+  IWL_XVT_STATE_NO_FW,
+  IWL_XVT_STATE_INIT_STARTED,
+  IWL_XVT_STATE_OPERATIONAL,
 };
 
 #define IWL_XVT_LOAD_MASK_INIT BIT(0)
@@ -77,15 +78,15 @@
  * @seq_num: sequence number of qos frames (per-tid)
  */
 struct tx_meta_data {
-    struct task_struct* tx_mod_thread;
-    wait_queue_head_t mod_tx_wq;
-    bool tx_task_operating;
-    int queue;
-    uint64_t tx_counter;
-    uint32_t tot_tx;
-    wait_queue_head_t mod_tx_done_wq;
-    bool txq_full;
-    uint16_t seq_num[IWL_MAX_TID_COUNT];
+  struct task_struct* tx_mod_thread;
+  wait_queue_head_t mod_tx_wq;
+  bool tx_task_operating;
+  int queue;
+  uint64_t tx_counter;
+  uint32_t tot_tx;
+  wait_queue_head_t mod_tx_done_wq;
+  bool txq_full;
+  uint16_t seq_num[IWL_MAX_TID_COUNT];
 };
 
 /*
@@ -97,10 +98,10 @@
  * @reordered: number of frames gone through the reorder buffer (unordered)
  */
 struct iwl_xvt_reorder_statistics {
-    uint32_t dropped;
-    uint32_t released;
-    uint32_t skipped;
-    uint32_t reordered;
+  uint32_t dropped;
+  uint32_t released;
+  uint32_t skipped;
+  uint32_t reordered;
 };
 
 /**
@@ -118,23 +119,23 @@
  * @stats: reorder buffer statistics
  */
 struct iwl_xvt_reorder_buffer {
-    uint16_t head_sn;
-    uint16_t num_stored;
-    uint8_t buf_size;
-    uint8_t sta_id;
-    uint8_t tid;
-    int queue;
-    uint16_t last_amsdu;
-    uint8_t last_sub_index;
+  uint16_t head_sn;
+  uint16_t num_stored;
+  uint8_t buf_size;
+  uint8_t sta_id;
+  uint8_t tid;
+  int queue;
+  uint16_t last_amsdu;
+  uint8_t last_sub_index;
 
-    /*
-     * we don't care about the actual frames, only their count.
-     * avoid messing with reorder timer for that reason as well
-     */
-    uint16_t entries[IEEE80211_MAX_AMPDU_BUF_HT];
+  /*
+   * we don't care about the actual frames, only their count.
+   * avoid messing with reorder timer for that reason as well
+   */
+  uint16_t entries[IEEE80211_MAX_AMPDU_BUF_HT];
 
-    spinlock_t lock; /* protect reorder buffer internal state */
-    struct iwl_xvt_reorder_statistics stats;
+  spinlock_t lock; /* protect reorder buffer internal state */
+  struct iwl_xvt_reorder_statistics stats;
 };
 
 /**
@@ -145,10 +146,10 @@
  * @allocated_queue: Whether queue is allocated
  */
 struct tx_queue_data {
-    wait_queue_head_t tx_wq;
-    uint64_t tx_counter;
-    bool txq_full;
-    bool allocated_queue;
+  wait_queue_head_t tx_wq;
+  uint64_t tx_counter;
+  bool txq_full;
+  bool allocated_queue;
 };
 
 /**
@@ -157,8 +158,8 @@
  * @payload: Payload buffer
  */
 struct tx_payload {
-    uint16_t length;
-    uint8_t payload[];
+  uint16_t length;
+  uint8_t payload[];
 };
 
 /**
@@ -167,10 +168,10 @@
  * @iwl_phy_cfg_cmd: Which calibrations should be done
  */
 struct iwl_sw_stack_config {
-    uint32_t load_mask;
-    uint32_t calib_override_mask;
-    uint32_t fw_dbg_flags;
-    struct iwl_phy_cfg_cmd fw_calib_cmd_cfg[IWL_UCODE_TYPE_MAX];
+  uint32_t load_mask;
+  uint32_t calib_override_mask;
+  uint32_t fw_dbg_flags;
+  struct iwl_phy_cfg_cmd fw_calib_cmd_cfg[IWL_UCODE_TYPE_MAX];
 };
 
 /* Note: This structure is read from the device with IO accesses,
@@ -179,57 +180,57 @@
  * need to be ordered correctly though!
  */
 struct iwl_error_event_table_v1 {
-    uint32_t valid;          /* (nonzero) valid, (0) log is empty */
-    uint32_t error_id;       /* type of error */
-    uint32_t pc;             /* program counter */
-    uint32_t blink1;         /* branch link */
-    uint32_t blink2;         /* branch link */
-    uint32_t ilink1;         /* interrupt link */
-    uint32_t ilink2;         /* interrupt link */
-    uint32_t data1;          /* error-specific data */
-    uint32_t data2;          /* error-specific data */
-    uint32_t data3;          /* error-specific data */
-    uint32_t bcon_time;      /* beacon timer */
-    uint32_t tsf_low;        /* network timestamp function timer */
-    uint32_t tsf_hi;         /* network timestamp function timer */
-    uint32_t gp1;            /* GP1 timer register */
-    uint32_t gp2;            /* GP2 timer register */
-    uint32_t gp3;            /* GP3 timer register */
-    uint32_t ucode_ver;      /* uCode version */
-    uint32_t hw_ver;         /* HW Silicon version */
-    uint32_t brd_ver;        /* HW board version */
-    uint32_t log_pc;         /* log program counter */
-    uint32_t frame_ptr;      /* frame pointer */
-    uint32_t stack_ptr;      /* stack pointer */
-    uint32_t hcmd;           /* last host command header */
-    uint32_t isr0;           /* isr status register LMPM_NIC_ISR0:
-                              * rxtx_flag
-                              */
-    uint32_t isr1;           /* isr status register LMPM_NIC_ISR1:
-                              * host_flag
-                              */
-    uint32_t isr2;           /* isr status register LMPM_NIC_ISR2:
-                              * enc_flag
-                              */
-    uint32_t isr3;           /* isr status register LMPM_NIC_ISR3:
-                              * time_flag
-                              */
-    uint32_t isr4;           /* isr status register LMPM_NIC_ISR4:
-                              * wico interrupt
-                              */
-    uint32_t isr_pref;       /* isr status register LMPM_NIC_PREF_STAT */
-    uint32_t wait_event;     /* wait event() caller address */
-    uint32_t l2p_control;    /* L2pControlField */
-    uint32_t l2p_duration;   /* L2pDurationField */
-    uint32_t l2p_mhvalid;    /* L2pMhValidBits */
-    uint32_t l2p_addr_match; /* L2pAddrMatchStat */
-    uint32_t lmpm_pmg_sel;   /* indicate which clocks are turned on
-                              * (LMPM_PMG_SEL)
-                              */
-    uint32_t u_timestamp;    /* indicate when the date and time of the
-                              * compilation
-                              */
-    uint32_t flow_handler;   /* FH read/write pointers, RX credit */
+  uint32_t valid;          /* (nonzero) valid, (0) log is empty */
+  uint32_t error_id;       /* type of error */
+  uint32_t pc;             /* program counter */
+  uint32_t blink1;         /* branch link */
+  uint32_t blink2;         /* branch link */
+  uint32_t ilink1;         /* interrupt link */
+  uint32_t ilink2;         /* interrupt link */
+  uint32_t data1;          /* error-specific data */
+  uint32_t data2;          /* error-specific data */
+  uint32_t data3;          /* error-specific data */
+  uint32_t bcon_time;      /* beacon timer */
+  uint32_t tsf_low;        /* network timestamp function timer */
+  uint32_t tsf_hi;         /* network timestamp function timer */
+  uint32_t gp1;            /* GP1 timer register */
+  uint32_t gp2;            /* GP2 timer register */
+  uint32_t gp3;            /* GP3 timer register */
+  uint32_t ucode_ver;      /* uCode version */
+  uint32_t hw_ver;         /* HW Silicon version */
+  uint32_t brd_ver;        /* HW board version */
+  uint32_t log_pc;         /* log program counter */
+  uint32_t frame_ptr;      /* frame pointer */
+  uint32_t stack_ptr;      /* stack pointer */
+  uint32_t hcmd;           /* last host command header */
+  uint32_t isr0;           /* isr status register LMPM_NIC_ISR0:
+                            * rxtx_flag
+                            */
+  uint32_t isr1;           /* isr status register LMPM_NIC_ISR1:
+                            * host_flag
+                            */
+  uint32_t isr2;           /* isr status register LMPM_NIC_ISR2:
+                            * enc_flag
+                            */
+  uint32_t isr3;           /* isr status register LMPM_NIC_ISR3:
+                            * time_flag
+                            */
+  uint32_t isr4;           /* isr status register LMPM_NIC_ISR4:
+                            * wico interrupt
+                            */
+  uint32_t isr_pref;       /* isr status register LMPM_NIC_PREF_STAT */
+  uint32_t wait_event;     /* wait event() caller address */
+  uint32_t l2p_control;    /* L2pControlField */
+  uint32_t l2p_duration;   /* L2pDurationField */
+  uint32_t l2p_mhvalid;    /* L2pMhValidBits */
+  uint32_t l2p_addr_match; /* L2pAddrMatchStat */
+  uint32_t lmpm_pmg_sel;   /* indicate which clocks are turned on
+                            * (LMPM_PMG_SEL)
+                            */
+  uint32_t u_timestamp;    /* indicate when the date and time of the
+                            * compilation
+                            */
+  uint32_t flow_handler;   /* FH read/write pointers, RX credit */
 } __packed;
 
 /* Note: This structure is read from the device with IO accesses,
@@ -238,58 +239,58 @@
  * need to be ordered correctly though!
  */
 struct iwl_error_event_table_v2 {
-    uint32_t valid;          /* (nonzero) valid, (0) log is empty */
-    uint32_t error_id;       /* type of error */
-    uint32_t trm_hw_status0; /* TRM HW status */
-    uint32_t trm_hw_status1; /* TRM HW status */
-    uint32_t blink2;         /* branch link */
-    uint32_t ilink1;         /* interrupt link */
-    uint32_t ilink2;         /* interrupt link */
-    uint32_t data1;          /* error-specific data */
-    uint32_t data2;          /* error-specific data */
-    uint32_t data3;          /* error-specific data */
-    uint32_t bcon_time;      /* beacon timer */
-    uint32_t tsf_low;        /* network timestamp function timer */
-    uint32_t tsf_hi;         /* network timestamp function timer */
-    uint32_t gp1;            /* GP1 timer register */
-    uint32_t gp2;            /* GP2 timer register */
-    uint32_t fw_rev_type;    /* firmware revision type */
-    uint32_t major;          /* uCode version major */
-    uint32_t minor;          /* uCode version minor */
-    uint32_t hw_ver;         /* HW Silicon version */
-    uint32_t brd_ver;        /* HW board version */
-    uint32_t log_pc;         /* log program counter */
-    uint32_t frame_ptr;      /* frame pointer */
-    uint32_t stack_ptr;      /* stack pointer */
-    uint32_t hcmd;           /* last host command header */
-    uint32_t isr0;           /* isr status register LMPM_NIC_ISR0:
-                              * rxtx_flag
-                              */
-    uint32_t isr1;           /* isr status register LMPM_NIC_ISR1:
-                              * host_flag
-                              */
-    uint32_t isr2;           /* isr status register LMPM_NIC_ISR2:
-                              * enc_flag
-                              */
-    uint32_t isr3;           /* isr status register LMPM_NIC_ISR3:
-                              * time_flag
-                              */
-    uint32_t isr4;           /* isr status register LMPM_NIC_ISR4:
-                              * wico interrupt
-                              */
-    uint32_t last_cmd_id;    /* last HCMD id handled by the firmware */
-    uint32_t wait_event;     /* wait event() caller address */
-    uint32_t l2p_control;    /* L2pControlField */
-    uint32_t l2p_duration;   /* L2pDurationField */
-    uint32_t l2p_mhvalid;    /* L2pMhValidBits */
-    uint32_t l2p_addr_match; /* L2pAddrMatchStat */
-    uint32_t lmpm_pmg_sel;   /* indicate which clocks are turned on
-                              * (LMPM_PMG_SEL)
-                              */
-    uint32_t u_timestamp;    /* indicate when the date and time of the
-                              * compilation
-                              */
-    uint32_t flow_handler;   /* FH read/write pointers, RX credit */
+  uint32_t valid;          /* (nonzero) valid, (0) log is empty */
+  uint32_t error_id;       /* type of error */
+  uint32_t trm_hw_status0; /* TRM HW status */
+  uint32_t trm_hw_status1; /* TRM HW status */
+  uint32_t blink2;         /* branch link */
+  uint32_t ilink1;         /* interrupt link */
+  uint32_t ilink2;         /* interrupt link */
+  uint32_t data1;          /* error-specific data */
+  uint32_t data2;          /* error-specific data */
+  uint32_t data3;          /* error-specific data */
+  uint32_t bcon_time;      /* beacon timer */
+  uint32_t tsf_low;        /* network timestamp function timer */
+  uint32_t tsf_hi;         /* network timestamp function timer */
+  uint32_t gp1;            /* GP1 timer register */
+  uint32_t gp2;            /* GP2 timer register */
+  uint32_t fw_rev_type;    /* firmware revision type */
+  uint32_t major;          /* uCode version major */
+  uint32_t minor;          /* uCode version minor */
+  uint32_t hw_ver;         /* HW Silicon version */
+  uint32_t brd_ver;        /* HW board version */
+  uint32_t log_pc;         /* log program counter */
+  uint32_t frame_ptr;      /* frame pointer */
+  uint32_t stack_ptr;      /* stack pointer */
+  uint32_t hcmd;           /* last host command header */
+  uint32_t isr0;           /* isr status register LMPM_NIC_ISR0:
+                            * rxtx_flag
+                            */
+  uint32_t isr1;           /* isr status register LMPM_NIC_ISR1:
+                            * host_flag
+                            */
+  uint32_t isr2;           /* isr status register LMPM_NIC_ISR2:
+                            * enc_flag
+                            */
+  uint32_t isr3;           /* isr status register LMPM_NIC_ISR3:
+                            * time_flag
+                            */
+  uint32_t isr4;           /* isr status register LMPM_NIC_ISR4:
+                            * wico interrupt
+                            */
+  uint32_t last_cmd_id;    /* last HCMD id handled by the firmware */
+  uint32_t wait_event;     /* wait event() caller address */
+  uint32_t l2p_control;    /* L2pControlField */
+  uint32_t l2p_duration;   /* L2pDurationField */
+  uint32_t l2p_mhvalid;    /* L2pMhValidBits */
+  uint32_t l2p_addr_match; /* L2pAddrMatchStat */
+  uint32_t lmpm_pmg_sel;   /* indicate which clocks are turned on
+                            * (LMPM_PMG_SEL)
+                            */
+  uint32_t u_timestamp;    /* indicate when the date and time of the
+                            * compilation
+                            */
+  uint32_t flow_handler;   /* FH read/write pointers, RX credit */
 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
 
 /* UMAC error struct - relevant starting from family 8000 chip.
@@ -299,21 +300,21 @@
  * need to be ordered correctly though!
  */
 struct iwl_umac_error_event_table {
-    uint32_t valid;    /* (nonzero) valid, (0) log is empty */
-    uint32_t error_id; /* type of error */
-    uint32_t blink1;   /* branch link */
-    uint32_t blink2;   /* branch link */
-    uint32_t ilink1;   /* interrupt link */
-    uint32_t ilink2;   /* interrupt link */
-    uint32_t data1;    /* error-specific data */
-    uint32_t data2;    /* error-specific data */
-    uint32_t data3;    /* error-specific data */
-    uint32_t umac_major;
-    uint32_t umac_minor;
-    uint32_t frame_pointer; /* core register 27*/
-    uint32_t stack_pointer; /* core register 28 */
-    uint32_t cmd_header;    /* latest host cmd sent to UMAC */
-    uint32_t nic_isr_pref;  /* ISR status register */
+  uint32_t valid;    /* (nonzero) valid, (0) log is empty */
+  uint32_t error_id; /* type of error */
+  uint32_t blink1;   /* branch link */
+  uint32_t blink2;   /* branch link */
+  uint32_t ilink1;   /* interrupt link */
+  uint32_t ilink2;   /* interrupt link */
+  uint32_t data1;    /* error-specific data */
+  uint32_t data2;    /* error-specific data */
+  uint32_t data3;    /* error-specific data */
+  uint32_t umac_major;
+  uint32_t umac_minor;
+  uint32_t frame_pointer; /* core register 27*/
+  uint32_t stack_pointer; /* core register 28 */
+  uint32_t cmd_header;    /* latest host cmd sent to UMAC */
+  uint32_t nic_isr_pref;  /* ISR status register */
 } __packed;
 
 /**
@@ -322,8 +323,8 @@
  * @trans: transport data
  */
 struct iwl_xvt_skb_info {
-    struct iwl_device_cmd* dev_cmd;
-    void* trans[2];
+  struct iwl_device_cmd* dev_cmd;
+  void* trans[2];
 };
 
 /**
@@ -335,55 +336,55 @@
  * @dev: pointer to struct device for printing purposes
  */
 struct iwl_xvt {
-    struct iwl_trans* trans;
-    const struct iwl_cfg* cfg;
-    struct iwl_phy_db* phy_db;
-    const struct iwl_fw* fw;
-    struct device* dev;
-    struct dentry* debugfs_dir;
+  struct iwl_trans* trans;
+  const struct iwl_cfg* cfg;
+  struct iwl_phy_db* phy_db;
+  const struct iwl_fw* fw;
+  struct device* dev;
+  struct dentry* debugfs_dir;
 
-    struct mutex mutex; /* Protects access to xVT struct */
-    spinlock_t notif_lock;
-    ; /* Protects notifications processing */
-    enum iwl_xvt_state state;
-    bool fw_error;
+  struct mutex mutex; /* Protects access to xVT struct */
+  spinlock_t notif_lock;
+  ; /* Protects notifications processing */
+  enum iwl_xvt_state state;
+  bool fw_error;
 
-    struct iwl_notif_wait_data notif_wait;
+  struct iwl_notif_wait_data notif_wait;
 
-    uint32_t error_event_table[2];
-    bool fw_running;
-    uint32_t umac_error_event_table;
-    bool support_umac_log;
+  uint32_t error_event_table[2];
+  bool fw_running;
+  uint32_t umac_error_event_table;
+  bool support_umac_log;
 
-    struct iwl_sw_stack_config sw_stack_cfg;
-    bool rx_hdr_enabled;
+  struct iwl_sw_stack_config sw_stack_cfg;
+  bool rx_hdr_enabled;
 
-    bool apmg_pd_en;
-    /* DMA buffer information */
-    uint32_t dma_buffer_size;
-    uint8_t* dma_cpu_addr;
-    dma_addr_t dma_addr;
+  bool apmg_pd_en;
+  /* DMA buffer information */
+  uint32_t dma_buffer_size;
+  uint8_t* dma_cpu_addr;
+  dma_addr_t dma_addr;
 
-    struct iwl_fw_runtime fwrt;
+  struct iwl_fw_runtime fwrt;
 
-    bool is_nvm_mac_override;
-    uint8_t nvm_hw_addr[ETH_ALEN];
-    uint8_t nvm_mac_addr[ETH_ALEN];
+  bool is_nvm_mac_override;
+  uint8_t nvm_hw_addr[ETH_ALEN];
+  uint8_t nvm_mac_addr[ETH_ALEN];
 
-    struct tx_meta_data tx_meta_data[NUM_OF_LMACS];
+  struct tx_meta_data tx_meta_data[NUM_OF_LMACS];
 
-    struct iwl_xvt_reorder_buffer reorder_bufs[IWL_MAX_BAID];
+  struct iwl_xvt_reorder_buffer reorder_bufs[IWL_MAX_BAID];
 
-    /* members for enhanced tx command */
-    struct tx_payload* payloads[IWL_XVT_MAX_PAYLOADS_AMOUNT];
-    struct task_struct* tx_task;
-    bool is_enhanced_tx;
-    bool send_tx_resp;
-    bool send_rx_mpdu;
-    uint64_t num_of_tx_resp;
-    uint64_t expected_tx_amount;
-    wait_queue_head_t tx_done_wq;
-    struct tx_queue_data queue_data[IWL_MAX_HW_QUEUES];
+  /* members for enhanced tx command */
+  struct tx_payload* payloads[IWL_XVT_MAX_PAYLOADS_AMOUNT];
+  struct task_struct* tx_task;
+  bool is_enhanced_tx;
+  bool send_tx_resp;
+  bool send_rx_mpdu;
+  uint64_t num_of_tx_resp;
+  uint64_t expected_tx_amount;
+  wait_queue_head_t tx_done_wq;
+  struct tx_queue_data queue_data[IWL_MAX_HW_QUEUES];
 };
 
 #define IWL_OP_MODE_GET_XVT(_op_mode) ((struct iwl_xvt*)((_op_mode)->op_mode_specific))
@@ -423,39 +424,38 @@
 
 /* Based on mvm function: iwl_mvm_has_new_tx_api */
 static inline bool iwl_xvt_is_unified_fw(struct iwl_xvt* xvt) {
-    /* TODO - replace with TLV once defined */
-    return xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+  /* TODO - replace with TLV once defined */
+  return xvt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
 }
 
 static inline bool iwl_xvt_is_cdb_supported(struct iwl_xvt* xvt) {
-    /*
-     * TODO:
-     * The issue of how to determine CDB APIs and usage is still not fully
-     * defined.
-     * There is a compilation for CDB and non-CDB FW, but there may
-     * be also runtime check.
-     * For now there is a TLV for checking compilation mode, but a
-     * runtime check will also have to be here - once defined.
-     */
-    return fw_has_capa(&xvt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
+  /*
+   * TODO:
+   * The issue of how to determine CDB APIs and usage is still not fully
+   * defined.
+   * There is a compilation for CDB and non-CDB FW, but there may
+   * be also runtime check.
+   * For now there is a TLV for checking compilation mode, but a
+   * runtime check will also have to be here - once defined.
+   */
+  return fw_has_capa(&xvt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
 }
 
 static inline struct agg_tx_status* iwl_xvt_get_agg_status(struct iwl_xvt* xvt,
                                                            struct iwl_mvm_tx_resp* tx_resp) {
-    if (iwl_xvt_is_unified_fw(xvt)) {
-        return &((struct iwl_mvm_tx_resp*)tx_resp)->status;
-    } else {
-        return ((struct iwl_mvm_tx_resp_v3*)tx_resp)->status;
-    }
+  if (iwl_xvt_is_unified_fw(xvt)) {
+    return &((struct iwl_mvm_tx_resp*)tx_resp)->status;
+  } else {
+    return ((struct iwl_mvm_tx_resp_v3*)tx_resp)->status;
+  }
 }
 
 static inline uint32_t iwl_xvt_get_scd_ssn(struct iwl_xvt* xvt, struct iwl_mvm_tx_resp* tx_resp) {
-    return le32_to_cpup((__le32*)iwl_xvt_get_agg_status(xvt, tx_resp) + tx_resp->frame_count) &
-           0xfff;
+  return le32_to_cpup((__le32*)iwl_xvt_get_agg_status(xvt, tx_resp) + tx_resp->frame_count) & 0xfff;
 }
 
 static inline bool iwl_xvt_has_default_txq(struct iwl_xvt* xvt) {
-    return !(xvt->sw_stack_cfg.fw_dbg_flags & IWL_XVT_DBG_FLAGS_NO_DEFAULT_TXQ);
+  return !(xvt->sw_stack_cfg.fw_dbg_flags & IWL_XVT_DBG_FLAGS_NO_DEFAULT_TXQ);
 }
 
 void iwl_xvt_free_tx_queue(struct iwl_xvt* xvt, uint8_t lmac_id);
@@ -469,7 +469,7 @@
 int iwl_xvt_dbgfs_register(struct iwl_xvt* xvt, struct dentry* dbgfs_dir);
 #else
 static inline int iwl_xvt_dbgfs_register(struct iwl_xvt* xvt, struct dentry* dbgfs_dir) {
-    return 0;
+  return 0;
 }
 #endif /* CPTCFG_IWLWIFI_DEBUGFS */