blob: a64bc73926d5460424e6a9310e89003957b1f2dc [file] [log] [blame]
/*
* Broadcom Dongle Host Driver (DHD), Linux-specific network interface
* Basically selected code segments from usb-cdc.c and usb-rndis.c
*
* Copyright 1999-2016, Broadcom Corporation
* All rights reserved,
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* This software is provided by the copyright holder "as is" and any express or
* implied warranties, including, but not limited to, the implied warranties of
* merchantability and fitness for a particular purpose are disclaimed. In no event
* shall copyright holder be liable for any direct, indirect, incidental, special,
* exemplary, or consequential damages (including, but not limited to, procurement
* of substitute goods or services; loss of use, data, or profits; or business
* interruption) however caused and on any theory of liability, whether in
* contract, strict liability, or tort (including negligence or otherwise) arising
* in any way out of the use of this software, even if advised of the possibility
* of such damage
*
*
* <<Broadcom-WL-IPTag/Open:>>
*
* $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $
*/
#include <typedefs.h>
#include <linuxver.h>
#include <osl.h>
#ifdef SHOW_LOGTRACE
#include <linux/syscalls.h>
#include <event_log.h>
#endif /* SHOW_LOGTRACE */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
#include <linux/random.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
#include <linux/ip.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <net/addrconf.h>
#ifdef ENABLE_ADAPTIVE_SCHED
#include <linux/cpufreq.h>
#endif /* ENABLE_ADAPTIVE_SCHED */
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <epivers.h>
#include <bcmutils.h>
#include <bcmendian.h>
#include <bcmdevs.h>
#include <proto/ethernet.h>
#include <proto/bcmevent.h>
#include <proto/vlan.h>
#include <proto/802.3.h>
#include <dngl_stats.h>
#include <dhd_linux_wq.h>
#include <dhd.h>
#include <dhd_linux.h>
#ifdef PCIE_FULL_DONGLE
#include <dhd_flowring.h>
#endif
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <dhd_config.h>
#ifdef WL_ESCAN
#include <wl_escan.h>
#endif
#include <dhd_dbg.h>
#ifdef CONFIG_HAS_WAKELOCK
#include <linux/wakelock.h>
#endif
#ifdef WL_CFG80211
#ifdef WL_CFG80211_V1
#include <wl_cfg80211_v1.h>
#else
#include <wl_cfg80211.h>
#endif /* WL_CFG80211_V1 */
#endif
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
#endif
#ifdef RTT_SUPPORT
#include <dhd_rtt.h>
#endif /* RTT_SUPPORT */
#ifdef CSI_SUPPORT
#include <dhd_csi.h>
#endif /* CSI_SUPPORT */
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
#endif
#ifdef DHD_WMF
#include <dhd_wmf_linux.h>
#endif /* DHD_WMF */
#ifdef DHD_L2_FILTER
#include <proto/bcmicmp.h>
#include <bcm_l2_filter.h>
#include <dhd_l2_filter.h>
#endif /* DHD_L2_FILTER */
#ifdef DHD_PSTA
#include <dhd_psta.h>
#endif /* DHD_PSTA */
#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
#ifdef DHD_DEBUG_PAGEALLOC
typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
#endif /* DHD_DEBUG_PAGEALLOC */
#if defined(DHD_LB)
/* Dynamic CPU selection for load balancing */
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/notifier.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#if !defined(DHD_LB_PRIMARY_CPUS)
#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
#endif
#if !defined(DHD_LB_SECONDARY_CPUS)
#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
#endif
#define HIST_BIN_SIZE 8
#if defined(DHD_LB_RXP)
static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
#endif /* DHD_LB_RXP */
#endif /* DHD_LB */
#ifdef WLMEDIA_HTSF
#include <linux/time.h>
#include <htsf.h>
#define HTSF_MINLEN 200 /* min. packet length to timestamp */
#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
#define TSMAX 1000 /* max no. of timing record kept */
#define NUMBIN 34
static uint32 tsidx = 0;
static uint32 htsf_seqnum = 0;
uint32 tsfsync;
struct timeval tsync;
static uint32 tsport = 5010;
typedef struct histo_ {
uint32 bin[NUMBIN];
} histo_t;
#if !ISPOWEROF2(DHD_SDALIGN)
#error DHD_SDALIGN is not a power of 2!
#endif
static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
#endif /* WLMEDIA_HTSF */
#ifdef STBLINUX
#ifdef quote_str
#undef quote_str
#endif /* quote_str */
#ifdef to_str
#undef to_str
#endif /* quote_str */
#define to_str(s) #s
#define quote_str(s) to_str(s)
static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
#endif /* STBLINUX */
#if defined(SOFTAP)
extern bool ap_cfg_running;
extern bool ap_fw_loaded;
#endif
extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
#ifdef FIX_CPU_MIN_CLOCK
#include <linux/pm_qos.h>
#endif /* FIX_CPU_MIN_CLOCK */
#ifdef SET_RANDOM_MAC_SOFTAP
#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
#endif
static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
#endif /* SET_RANDOM_MAC_SOFTAP */
#ifdef ENABLE_ADAPTIVE_SCHED
#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
#ifndef CUSTOM_CPUFREQ_THRESH
#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
#endif /* CUSTOM_CPUFREQ_THRESH */
#endif /* ENABLE_ADAPTIVE_SCHED */
/* enable HOSTIP cache update from the host side when an eth0:N is up */
#define AOE_IP_ALIAS_SUPPORT 1
#ifdef BCM_FD_AGGR
#include <bcm_rpc.h>
#include <bcm_rpc_tp.h>
#endif
#ifdef PROP_TXSTATUS
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
#endif
#include <wl_android.h>
/* Maximum STA per radio */
#define DHD_MAX_STA 32
#ifdef CUSTOMER_HW_AMLOGIC
#include <linux/amlogic/wifi_dt.h>
#endif
const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
#ifdef ARP_OFFLOAD_SUPPORT
void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
static int dhd_inetaddr_notifier_call(struct notifier_block *this,
unsigned long event, void *ptr);
static struct notifier_block dhd_inetaddr_notifier = {
.notifier_call = dhd_inetaddr_notifier_call
};
/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
* created in kernel notifier link list (with 'next' pointing to itself)
*/
static bool dhd_inetaddr_notifier_registered = FALSE;
#endif /* ARP_OFFLOAD_SUPPORT */
#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
static int dhd_inet6addr_notifier_call(struct notifier_block *this,
unsigned long event, void *ptr);
static struct notifier_block dhd_inet6addr_notifier = {
.notifier_call = dhd_inet6addr_notifier_call
};
/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
* created in kernel notifier link list (with 'next' pointing to itself)
*/
static bool dhd_inet6addr_notifier_registered = FALSE;
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
volatile bool dhd_mmc_suspend = FALSE;
DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
#if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
MODULE_LICENSE("GPL and additional rights");
#endif /* LinuxVer */
#include <dhd_bus.h>
#ifdef BCM_FD_AGGR
#define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
#else
#ifndef PROP_TXSTATUS
#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
#else
#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
#endif
#endif /* BCM_FD_AGGR */
#ifdef PROP_TXSTATUS
#ifdef WL_CFG80211_V1
extern bool dhd_wlfc_skip_fc(void);
#else
extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
#endif
extern void dhd_wlfc_plat_init(void *dhd);
extern void dhd_wlfc_plat_deinit(void *dhd);
#endif /* PROP_TXSTATUS */
extern uint sd_f2_blocksize;
extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
const char *
print_tainted()
{
return "";
}
#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
/* Linux wireless extension support */
#if defined(WL_WIRELESS_EXT)
#include <wl_iw.h>
extern wl_iw_extra_params_t g_wl_iw_params;
#endif /* defined(WL_WIRELESS_EXT) */
#ifdef CONFIG_PARTIALSUSPEND_SLP
#include <linux/partialsuspend_slp.h>
#define CONFIG_HAS_EARLYSUSPEND
#define DHD_USE_EARLYSUSPEND
#define register_early_suspend register_pre_suspend
#define unregister_early_suspend unregister_pre_suspend
#define early_suspend pre_suspend
#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
#else
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
#include <linux/earlysuspend.h>
#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
#endif /* CONFIG_PARTIALSUSPEND_SLP */
extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
#ifdef PKT_FILTER_SUPPORT
extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
#endif
#ifdef READ_MACADDR
extern int dhd_read_macaddr(struct dhd_info *dhd);
#else
static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
#endif
#ifdef WRITE_MACADDR
extern int dhd_write_macaddr(struct ether_addr *mac);
#else
static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
#endif
#ifdef DHD_FW_COREDUMP
static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
#endif /* DHD_FW_COREDUMP */
#ifdef DHD_LOG_DUMP
static void dhd_log_dump_init(dhd_pub_t *dhd);
static void dhd_log_dump_deinit(dhd_pub_t *dhd);
static void dhd_log_dump(void *handle, void *event_info, u8 event);
void dhd_schedule_log_dump(dhd_pub_t *dhdp);
static int do_dhd_log_dump(dhd_pub_t *dhdp);
#endif /* DHD_LOG_DUMP */
static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
static struct notifier_block dhd_reboot_notifier = {
.notifier_call = dhd_reboot_callback,
.priority = 1,
};
#ifdef BCMPCIE
static int is_reboot = 0;
#endif /* BCMPCIE */
typedef struct dhd_if_event {
struct list_head list;
wl_event_data_if_t event;
char name[IFNAMSIZ+1];
uint8 mac[ETHER_ADDR_LEN];
} dhd_if_event_t;
/* Interface control information */
typedef struct dhd_if {
struct dhd_info *info; /* back pointer to dhd_info */
/* OS/stack specifics */
struct net_device *net;
int idx; /* iface idx in dongle */
uint subunit; /* subunit */
uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
bool set_macaddress;
bool set_multicast;
uint8 bssidx; /* bsscfg index for the interface */
bool attached; /* Delayed attachment when unset */
bool txflowcontrol; /* Per interface flow control indicator */
char name[IFNAMSIZ+1]; /* linux interface name */
char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
struct net_device_stats stats;
#ifdef DHD_WMF
dhd_wmf_t wmf; /* per bsscfg wmf setting */
#endif /* DHD_WMF */
#ifdef PCIE_FULL_DONGLE
struct list_head sta_list; /* sll of associated stations */
#if !defined(BCM_GMAC3)
spinlock_t sta_list_lock; /* lock for manipulating sll */
#endif /* ! BCM_GMAC3 */
#endif /* PCIE_FULL_DONGLE */
uint32 ap_isolate; /* ap-isolation settings */
#ifdef DHD_L2_FILTER
bool parp_enable;
bool parp_discard;
bool parp_allnode;
arp_table_t *phnd_arp_table;
/* for Per BSS modification */
bool dhcp_unicast;
bool block_ping;
bool grat_arp;
#endif /* DHD_L2_FILTER */
#ifdef CSI_SUPPORT
struct bin_attribute attr;
struct list_head csi_list;
uint32 csi_update;
#endif /* CSI_SUPPORT */
} dhd_if_t;
#ifdef WLMEDIA_HTSF
typedef struct {
uint32 low;
uint32 high;
} tsf_t;
typedef struct {
uint32 last_cycle;
uint32 last_sec;
uint32 last_tsf;
uint32 coef; /* scaling factor */
uint32 coefdec1; /* first decimal */
uint32 coefdec2; /* second decimal */
} htsf_t;
typedef struct {
uint32 t1;
uint32 t2;
uint32 t3;
uint32 t4;
} tstamp_t;
static tstamp_t ts[TSMAX];
static tstamp_t maxdelayts;
static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
#endif /* WLMEDIA_HTSF */
struct ipv6_work_info_t {
uint8 if_idx;
char ipv6_addr[16];
unsigned long event;
};
#ifdef DHD_DEBUG
typedef struct dhd_dump {
uint8 *buf;
int bufsize;
} dhd_dump_t;
#endif /* DHD_DEBUG */
/* When Perimeter locks are deployed, any blocking calls must be preceeded
* with a PERIM UNLOCK and followed by a PERIM LOCK.
* Examples of blocking calls are: schedule_timeout(), down_interruptible(),
* wait_event_timeout().
*/
/* Local private structure (extension of pub) */
typedef struct dhd_info {
#if defined(WL_WIRELESS_EXT)
wl_iw_t iw; /* wireless extensions state (must be first) */
#endif /* defined(WL_WIRELESS_EXT) */
dhd_pub_t pub;
dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
void *adapter; /* adapter information, interrupt, fw path etc. */
char fw_path[PATH_MAX]; /* path to firmware image */
char nv_path[PATH_MAX]; /* path to nvram vars file */
char clm_path[PATH_MAX]; /* path to clm vars file */
char conf_path[PATH_MAX]; /* path to config vars file */
/* serialize dhd iovars */
struct mutex dhd_iovar_mutex;
struct semaphore proto_sem;
#ifdef PROP_TXSTATUS
spinlock_t wlfc_spinlock;
#endif /* PROP_TXSTATUS */
#ifdef WLMEDIA_HTSF
htsf_t htsf;
#endif
wait_queue_head_t ioctl_resp_wait;
wait_queue_head_t d3ack_wait;
wait_queue_head_t dhd_bus_busy_state_wait;
uint32 default_wd_interval;
struct timer_list timer;
bool wd_timer_valid;
#ifdef DHD_PCIE_RUNTIMEPM
struct timer_list rpm_timer;
bool rpm_timer_valid;
tsk_ctl_t thr_rpm_ctl;
#endif /* DHD_PCIE_RUNTIMEPM */
struct tasklet_struct tasklet;
spinlock_t sdlock;
spinlock_t txqlock;
spinlock_t dhd_lock;
struct semaphore sdsem;
tsk_ctl_t thr_dpc_ctl;
tsk_ctl_t thr_wdt_ctl;
tsk_ctl_t thr_rxf_ctl;
spinlock_t rxf_lock;
bool rxthread_enabled;
/* Wakelocks */
#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
struct wake_lock wl_wifi; /* Wifi wakelock */
struct wake_lock wl_rxwake; /* Wifi rx wakelock */
struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
struct wake_lock wl_wdwake; /* Wifi wd wakelock */
struct wake_lock wl_evtwake; /* Wifi event wakelock */
#ifdef BCMPCIE_OOB_HOST_WAKE
struct wake_lock wl_intrwake; /* Host wakeup wakelock */
#endif /* BCMPCIE_OOB_HOST_WAKE */
#ifdef DHD_USE_SCAN_WAKELOCK
struct wake_lock wl_scanwake; /* Wifi scan wakelock */
#endif /* DHD_USE_SCAN_WAKELOCK */
#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
/* net_device interface lock, prevent race conditions among net_dev interface
* calls and wifi_on or wifi_off
*/
struct mutex dhd_net_if_mutex;
struct mutex dhd_suspend_mutex;
#endif
spinlock_t wakelock_spinlock;
spinlock_t wakelock_evt_spinlock;
uint32 wakelock_event_counter;
uint32 wakelock_counter;
int wakelock_wd_counter;
int wakelock_rx_timeout_enable;
int wakelock_ctrl_timeout_enable;
bool waive_wakelock;
uint32 wakelock_before_waive;
/* Thread to issue ioctl for multicast */
wait_queue_head_t ctrl_wait;
atomic_t pend_8021x_cnt;
dhd_attach_states_t dhd_state;
#ifdef SHOW_LOGTRACE
dhd_event_log_t event_data;
#endif /* SHOW_LOGTRACE */
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
struct early_suspend early_suspend;
#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
#ifdef ARP_OFFLOAD_SUPPORT
u32 pend_ipaddr;
#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef BCM_FD_AGGR
void *rpc_th;
void *rpc_osh;
struct timer_list rpcth_timer;
bool rpcth_timer_active;
uint8 fdaggr;
#endif
#ifdef DHDTCPACK_SUPPRESS
spinlock_t tcpack_lock;
#endif /* DHDTCPACK_SUPPRESS */
#ifdef FIX_CPU_MIN_CLOCK
bool cpufreq_fix_status;
struct mutex cpufreq_fix;
struct pm_qos_request dhd_cpu_qos;
#ifdef FIX_BUS_MIN_CLOCK
struct pm_qos_request dhd_bus_qos;
#endif /* FIX_BUS_MIN_CLOCK */
#endif /* FIX_CPU_MIN_CLOCK */
void *dhd_deferred_wq;
#ifdef DEBUG_CPU_FREQ
struct notifier_block freq_trans;
int __percpu *new_freq;
#endif
unsigned int unit;
struct notifier_block pm_notifier;
#ifdef DHD_PSTA
uint32 psta_mode; /* PSTA or PSR */
#endif /* DHD_PSTA */
#ifdef DHD_DEBUG
dhd_dump_t *dump;
struct timer_list join_timer;
u32 join_timeout_val;
bool join_timer_active;
uint scan_time_count;
struct timer_list scan_timer;
bool scan_timer_active;
#endif
#if defined(DHD_LB)
/* CPU Load Balance dynamic CPU selection */
/* Variable that tracks the currect CPUs available for candidacy */
cpumask_var_t cpumask_curr_avail;
/* Primary and secondary CPU mask */
cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
struct notifier_block cpu_notifier;
/* Tasklet to handle Tx Completion packet freeing */
struct tasklet_struct tx_compl_tasklet;
atomic_t tx_compl_cpu;
/* Tasklet to handle RxBuf Post during Rx completion */
struct tasklet_struct rx_compl_tasklet;
atomic_t rx_compl_cpu;
/* Napi struct for handling rx packet sendup. Packets are removed from
* H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
* appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
* to run to rx_napi_cpu.
*/
struct sk_buff_head rx_pend_queue ____cacheline_aligned;
struct sk_buff_head rx_napi_queue ____cacheline_aligned;
struct napi_struct rx_napi_struct ____cacheline_aligned;
atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
struct net_device *rx_napi_netdev; /* netdev of primary interface */
struct work_struct rx_napi_dispatcher_work;
struct work_struct tx_compl_dispatcher_work;
struct work_struct rx_compl_dispatcher_work;
/* Number of times DPC Tasklet ran */
uint32 dhd_dpc_cnt;
/* Number of times NAPI processing got scheduled */
uint32 napi_sched_cnt;
/* Number of times NAPI processing ran on each available core */
uint32 napi_percpu_run_cnt[NR_CPUS];
/* Number of times RX Completions got scheduled */
uint32 rxc_sched_cnt;
/* Number of times RX Completion ran on each available core */
uint32 rxc_percpu_run_cnt[NR_CPUS];
/* Number of times TX Completions got scheduled */
uint32 txc_sched_cnt;
/* Number of times TX Completions ran on each available core */
uint32 txc_percpu_run_cnt[NR_CPUS];
/* CPU status */
/* Number of times each CPU came online */
uint32 cpu_online_cnt[NR_CPUS];
/* Number of times each CPU went offline */
uint32 cpu_offline_cnt[NR_CPUS];
/*
* Consumer Histogram - NAPI RX Packet processing
* -----------------------------------------------
* On Each CPU, when the NAPI RX Packet processing call back was invoked
* how many packets were processed is captured in this data structure.
* Now its difficult to capture the "exact" number of packets processed.
* So considering the packet counter to be a 32 bit one, we have a
* bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
* processed is rounded off to the next power of 2 and put in the
* approriate "bin" the value in the bin gets incremented.
* For example, assume that in CPU 1 if NAPI Rx runs 3 times
* and the packet count processed is as follows (assume the bin counters are 0)
* iteration 1 - 10 (the bin counter 2^4 increments to 1)
* iteration 2 - 30 (the bin counter 2^5 increments to 1)
* iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
*/
uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE];
uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE];
uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE];
#endif /* DHD_LB */
#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
struct kobject dhd_kobj;
#ifdef SUPPORT_SENSORHUB
uint32 shub_enable;
#endif /* SUPPORT_SENSORHUB */
struct delayed_work dhd_memdump_work;
} dhd_info_t;
#define DHDIF_FWDER(dhdif) FALSE
/* Flag to indicate if we should download firmware on driver load */
uint dhd_download_fw_on_driverload = TRUE;
/* Flag to indicate if driver is initialized */
uint dhd_driver_init_done = FALSE;
/* Definitions to provide path to the firmware and nvram
* example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
*/
char firmware_path[MOD_PARAM_PATHLEN];
char nvram_path[MOD_PARAM_PATHLEN];
char clm_path[MOD_PARAM_PATHLEN];
char config_path[MOD_PARAM_PATHLEN];
char ccode[MOD_PARAM_PATHLEN];
/* backup buffer for firmware and nvram path */
char fw_bak_path[MOD_PARAM_PATHLEN];
char nv_bak_path[MOD_PARAM_PATHLEN];
/* information string to keep firmware, chio, cheip version info visiable from log */
char info_string[MOD_PARAM_INFOLEN];
module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
int op_mode = 0;
int disable_proptx = 0;
module_param(op_mode, int, 0644);
#if defined(DHD_LB_RXP)
static int dhd_napi_weight = 32;
module_param(dhd_napi_weight, int, 0644);
#endif /* DHD_LB_RXP */
extern int wl_control_wl_start(struct net_device *dev);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
struct semaphore dhd_registration_sem;
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
/* deferred handlers */
static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
#ifdef WL_CFG80211
extern void dhd_netdev_free(struct net_device *ndev);
#endif /* WL_CFG80211 */
#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
static void dhd_bridge_dev_set(dhd_info_t * dhd, int ifidx, struct net_device * dev);
#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
/* Error bits */
module_param(dhd_msg_level, int, 0);
#if defined(WL_WIRELESS_EXT)
module_param(iw_msg_level, int, 0);
#endif
#ifdef WL_CFG80211
module_param(wl_dbg_level, int, 0);
#endif
module_param(android_msg_level, int, 0);
module_param(config_msg_level, int, 0);
#ifdef ARP_OFFLOAD_SUPPORT
/* ARP offload enable */
uint dhd_arp_enable = TRUE;
module_param(dhd_arp_enable, uint, 0);
/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
#ifdef ENABLE_ARP_SNOOP_MODE
uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP;
#else
uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
#endif /* ENABLE_ARP_SNOOP_MODE */
module_param(dhd_arp_mode, uint, 0);
#endif /* ARP_OFFLOAD_SUPPORT */
/* Disable Prop tx */
module_param(disable_proptx, int, 0644);
/* load firmware and/or nvram values from the filesystem */
module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
module_param_string(ccode, ccode, MOD_PARAM_PATHLEN, 0660);
/* Watchdog interval */
/* extend watchdog expiration to 2 seconds when DPC is running */
#define WATCHDOG_EXTEND_INTERVAL (2000)
uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
module_param(dhd_watchdog_ms, uint, 0);
#ifdef DHD_PCIE_RUNTIMEPM
uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
#endif /* DHD_PCIE_RUNTIMEPMT */
#if defined(DHD_DEBUG)
/* Console poll interval */
uint dhd_console_ms = 0;
module_param(dhd_console_ms, uint, 0644);
#endif /* defined(DHD_DEBUG) */
uint dhd_allow_stop = FALSE;
module_param(dhd_allow_stop, uint, 0644);
uint dhd_slpauto = TRUE;
module_param(dhd_slpauto, uint, 0);
#ifdef PKT_FILTER_SUPPORT
/* Global Pkt filter enable control */
uint dhd_pkt_filter_enable = TRUE;
module_param(dhd_pkt_filter_enable, uint, 0);
#endif
/* Pkt filter init setup */
uint dhd_pkt_filter_init = 0;
module_param(dhd_pkt_filter_init, uint, 0);
/* Pkt filter mode control */
#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
uint dhd_master_mode = FALSE;
#else
uint dhd_master_mode = FALSE;
#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
module_param(dhd_master_mode, uint, 0);
int dhd_watchdog_prio = 0;
module_param(dhd_watchdog_prio, int, 0);
/* DPC thread priority */
int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
module_param(dhd_dpc_prio, int, 0);
/* RX frame thread priority */
int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
module_param(dhd_rxf_prio, int, 0);
int passive_channel_skip = 0;
module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
#if !defined(BCMDHDUSB)
extern int dhd_dongle_ramsize;
module_param(dhd_dongle_ramsize, int, 0);
#endif /* BCMDHDUSB */
/* Keep track of number of instances */
static int dhd_found = 0;
static int instance_base = 0; /* Starting instance number */
module_param(instance_base, int, 0644);
/* Functions to manage sysfs interface for dhd */
static int dhd_sysfs_init(dhd_info_t *dhd);
static void dhd_sysfs_exit(dhd_info_t *dhd);
#if defined(DHD_LB)
static void
dhd_lb_set_default_cpus(dhd_info_t *dhd)
{
/* Default CPU allocation for the jobs */
atomic_set(&dhd->rx_napi_cpu, 1);
atomic_set(&dhd->rx_compl_cpu, 2);
atomic_set(&dhd->tx_compl_cpu, 2);
}
static void
dhd_cpumasks_deinit(dhd_info_t *dhd)
{
free_cpumask_var(dhd->cpumask_curr_avail);
free_cpumask_var(dhd->cpumask_primary);
free_cpumask_var(dhd->cpumask_primary_new);
free_cpumask_var(dhd->cpumask_secondary);
free_cpumask_var(dhd->cpumask_secondary_new);
}
static int
dhd_cpumasks_init(dhd_info_t *dhd)
{
int id;
uint32 cpus;
int ret = 0;
if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
!alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
!alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
!alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
!alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
ret = -ENOMEM;
goto fail;
}
cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
cpumask_clear(dhd->cpumask_primary);
cpumask_clear(dhd->cpumask_secondary);
cpus = DHD_LB_PRIMARY_CPUS;
for (id = 0; id < NR_CPUS; id++) {
if (isset(&cpus, id))
cpumask_set_cpu(id, dhd->cpumask_primary);
}
cpus = DHD_LB_SECONDARY_CPUS;
for (id = 0; id < NR_CPUS; id++) {
if (isset(&cpus, id))
cpumask_set_cpu(id, dhd->cpumask_secondary);
}
return ret;
fail:
dhd_cpumasks_deinit(dhd);
return ret;
}
/*
* The CPU Candidacy Algorithm
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~
* The available CPUs for selection are divided into two groups
* Primary Set - A CPU mask that carries the First Choice CPUs
* Secondary Set - A CPU mask that carries the Second Choice CPUs.
*
* There are two types of Job, that needs to be assigned to
* the CPUs, from one of the above mentioned CPU group. The Jobs are
* 1) Rx Packet Processing - napi_cpu
* 2) Completion Processiong (Tx, RX) - compl_cpu
*
* To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
* on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
* algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
* If there are more processors free, it assigns one to compl_cpu.
* It also tries to ensure that both napi_cpu and compl_cpu are not on the same
* CPU, as much as possible.
*
* By design, both Tx and Rx completion jobs are run on the same CPU core, as it
* would allow Tx completion skb's to be released into a local free pool from
* which the rx buffer posts could have been serviced. it is important to note
* that a Tx packet may not have a large enough buffer for rx posting.
*/
void dhd_select_cpu_candidacy(dhd_info_t *dhd)
{
uint32 primary_available_cpus; /* count of primary available cpus */
uint32 secondary_available_cpus; /* count of secondary available cpus */
uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
uint32 compl_cpu = 0; /* cpu selected for completion jobs */
cpumask_clear(dhd->cpumask_primary_new);
cpumask_clear(dhd->cpumask_secondary_new);
/*
* Now select from the primary mask. Even if a Job is
* already running on a CPU in secondary group, we still move
* to primary CPU. So no conditional checks.
*/
cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
dhd->cpumask_curr_avail);
cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
dhd->cpumask_curr_avail);
primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
if (primary_available_cpus > 0) {
napi_cpu = cpumask_first(dhd->cpumask_primary_new);
/* If no further CPU is available,
* cpumask_next returns >= nr_cpu_ids
*/
compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
if (compl_cpu >= nr_cpu_ids)
compl_cpu = 0;
}
DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n",
__FUNCTION__, napi_cpu, compl_cpu));
/* -- Now check for the CPUs from the secondary mask -- */
secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
__FUNCTION__, secondary_available_cpus, nr_cpu_ids));
if (secondary_available_cpus > 0) {
/* At this point if napi_cpu is unassigned it means no CPU
* is online from Primary Group
*/
if (napi_cpu == 0) {
napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
} else if (compl_cpu == 0) {
compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
}
/* If no CPU was available for completion, choose CPU 0 */
if (compl_cpu >= nr_cpu_ids)
compl_cpu = 0;
}
if ((primary_available_cpus == 0) &&
(secondary_available_cpus == 0)) {
/* No CPUs available from primary or secondary mask */
napi_cpu = 0;
compl_cpu = 0;
}
DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n",
__FUNCTION__, napi_cpu, compl_cpu));
ASSERT(napi_cpu < nr_cpu_ids);
ASSERT(compl_cpu < nr_cpu_ids);
atomic_set(&dhd->rx_napi_cpu, napi_cpu);
atomic_set(&dhd->tx_compl_cpu, compl_cpu);
atomic_set(&dhd->rx_compl_cpu, compl_cpu);
return;
}
/*
* Function to handle CPU Hotplug notifications.
* One of the task it does is to trigger the CPU Candidacy algorithm
* for load balancing.
*/
int
dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
switch (action)
{
case CPU_ONLINE:
DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
dhd_select_cpu_candidacy(dhd);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
dhd_select_cpu_candidacy(dhd);
break;
default:
break;
}
return NOTIFY_OK;
}
#if defined(DHD_LB_STATS)
void dhd_lb_stats_init(dhd_pub_t *dhdp)
{
dhd_info_t *dhd;
int i, j;
if (dhdp == NULL) {
DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n",
__FUNCTION__));
return;
}
dhd = dhdp->info;
if (dhd == NULL) {
DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
return;
}
DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
for (i = 0; i < NR_CPUS; i++) {
DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
}
for (i = 0; i < NR_CPUS; i++) {
for (j = 0; j < HIST_BIN_SIZE; j++) {
DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]);
DHD_LB_STATS_CLR(dhd->txc_hist[i][j]);
DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]);
}
}
return;
}
static void dhd_lb_stats_dump_histo(
struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE])
{
int i, j;
uint32 per_cpu_total[NR_CPUS] = {0};
uint32 total = 0;
bcm_bprintf(strbuf, "CPU: \t\t");
for (i = 0; i < num_possible_cpus(); i++)
bcm_bprintf(strbuf, "%d\t", i);
bcm_bprintf(strbuf, "\nBin\n");
for (i = 0; i < HIST_BIN_SIZE; i++) {
bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1));
for (j = 0; j < num_possible_cpus(); j++) {
bcm_bprintf(strbuf, "%d\t", hist[j][i]);
}
bcm_bprintf(strbuf, "\n");
}
bcm_bprintf(strbuf, "Per CPU Total \t");
total = 0;
for (i = 0; i < num_possible_cpus(); i++) {
for (j = 0; j < HIST_BIN_SIZE; j++) {
per_cpu_total[i] += (hist[i][j] * (1<<(j+1)));
}
bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
total += per_cpu_total[i];
}
bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
return;
}
static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
{
int i;
bcm_bprintf(strbuf, "CPU: \t");
for (i = 0; i < num_possible_cpus(); i++)
bcm_bprintf(strbuf, "%d\t", i);
bcm_bprintf(strbuf, "\n");
bcm_bprintf(strbuf, "Val: \t");
for (i = 0; i < num_possible_cpus(); i++)
bcm_bprintf(strbuf, "%u\t", *(p+i));
bcm_bprintf(strbuf, "\n");
return;
}
void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
{
dhd_info_t *dhd;
if (dhdp == NULL || strbuf == NULL) {
DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
__FUNCTION__, dhdp, strbuf));
return;
}
dhd = dhdp->info;
if (dhd == NULL) {
DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
return;
}
bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
bcm_bprintf(strbuf, "cpu_offline_cnt:\n");
dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
dhd->txc_sched_cnt);
#ifdef DHD_LB_RXP
bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n");
dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
#endif /* DHD_LB_RXP */
#ifdef DHD_LB_RXC
bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n");
dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
#endif /* DHD_LB_RXC */
#ifdef DHD_LB_TXC
bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n");
dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
#endif /* DHD_LB_TXC */
}
static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count)
{
uint32 bin_power;
uint32 *p = NULL;
bin_power = next_larger_power2(count);
switch (bin_power) {
case 0: break;
case 1: /* Fall through intentionally */
case 2: p = bin + 0; break;
case 4: p = bin + 1; break;
case 8: p = bin + 2; break;
case 16: p = bin + 3; break;
case 32: p = bin + 4; break;
case 64: p = bin + 5; break;
case 128: p = bin + 6; break;
default : p = bin + 7; break;
}
if (p)
*p = *p + 1;
return;
}
extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
{
int cpu;
dhd_info_t *dhd = dhdp->info;
cpu = get_cpu();
put_cpu();
dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count);
return;
}
extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
{
int cpu;
dhd_info_t *dhd = dhdp->info;
cpu = get_cpu();
put_cpu();
dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count);
return;
}
extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
{
int cpu;
dhd_info_t *dhd = dhdp->info;
cpu = get_cpu();
put_cpu();
dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count);
return;
}
extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
{
dhd_info_t *dhd = dhdp->info;
DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
}
extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
{
dhd_info_t *dhd = dhdp->info;
DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
}
#endif /* DHD_LB_STATS */
#endif /* DHD_LB */
#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
int g_frameburst = 1;
#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
/* DHD Perimiter lock only used in router with bypass forwarding. */
#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
#ifdef PCIE_FULL_DONGLE
#if defined(BCM_GMAC3)
#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
#else /* ! BCM_GMAC3 */
#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
struct list_head *snapshot_list);
static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
#endif /* ! BCM_GMAC3 */
#endif /* PCIE_FULL_DONGLE */
/* Control fw roaming */
uint dhd_roam_disable = 0;
#ifdef BCMDBGFS
extern int dhd_dbg_init(dhd_pub_t *dhdp);
extern void dhd_dbg_remove(void);
#endif
/* Control radio state */
uint dhd_radio_up = 1;
/* Network inteface name */
char iface_name[IFNAMSIZ] = {'\0'};
module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
/* The following are specific to the SDIO dongle */
/* IOCTL response timeout */
int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
/* Idle timeout for backplane clock */
int dhd_idletime = DHD_IDLETIME_TICKS;
module_param(dhd_idletime, int, 0);
/* Use polling */
uint dhd_poll = FALSE;
module_param(dhd_poll, uint, 0);
/* Use interrupts */
uint dhd_intr = TRUE;
module_param(dhd_intr, uint, 0);
/* SDIO Drive Strength (in milliamps) */
uint dhd_sdiod_drive_strength = 6;
module_param(dhd_sdiod_drive_strength, uint, 0);
#ifdef BCMSDIO
/* Tx/Rx bounds */
extern uint dhd_txbound;
extern uint dhd_rxbound;
module_param(dhd_txbound, uint, 0);
module_param(dhd_rxbound, uint, 0);
/* Deferred transmits */
extern uint dhd_deferred_tx;
module_param(dhd_deferred_tx, uint, 0);
#endif /* BCMSDIO */
#ifdef SDTEST
/* Echo packet generator (pkts/s) */
uint dhd_pktgen = 0;
module_param(dhd_pktgen, uint, 0);
/* Echo packet len (0 => sawtooth, max 2040) */
uint dhd_pktgen_len = 0;
module_param(dhd_pktgen_len, uint, 0);
#endif /* SDTEST */
/* Allow delayed firmware download for debug purpose */
int allow_delay_fwdl = FALSE;
module_param(allow_delay_fwdl, int, 0);
extern char dhd_version[];
extern char fw_version[];
extern char clm_version[];
int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
static void dhd_net_if_lock_local(dhd_info_t *dhd);
static void dhd_net_if_unlock_local(dhd_info_t *dhd);
static void dhd_suspend_lock(dhd_pub_t *dhdp);
static void dhd_suspend_unlock(dhd_pub_t *dhdp);
#ifdef WLMEDIA_HTSF
void htsf_update(dhd_info_t *dhd, void *data);
tsf_t prev_tsf, cur_tsf;
uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
static void dhd_dump_latency(void);
static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
static void dhd_dump_htsfhisto(histo_t *his, char *s);
#endif /* WLMEDIA_HTSF */
/* Monitor interface */
int dhd_monitor_init(void *dhd_pub);
int dhd_monitor_uninit(void);
#if defined(WL_WIRELESS_EXT)
struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
#endif /* defined(WL_WIRELESS_EXT) */
static void dhd_dpc(ulong data);
/* forward decl */
extern int dhd_wait_pend8021x(struct net_device *dev);
void dhd_os_wd_timer_extend(void *bus, bool extend);
#ifdef TOE
#ifndef BDC
#error TOE requires BDC
#endif /* !BDC */
static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
#endif /* TOE */
static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
wl_event_msg_t *event_ptr, void **data_ptr);
#if defined(CONFIG_PM_SLEEP)
static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
{
int ret = NOTIFY_DONE;
bool suspend = FALSE;
dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
BCM_REFERENCE(dhdinfo);
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
suspend = TRUE;
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
suspend = FALSE;
break;
}
#if defined(SUPPORT_P2P_GO_PS)
#ifdef PROP_TXSTATUS
if (suspend) {
DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
dhd_wlfc_suspend(&dhdinfo->pub);
DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
} else
dhd_wlfc_resume(&dhdinfo->pub);
#endif /* PROP_TXSTATUS */
#endif /* defined(SUPPORT_P2P_GO_PS) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
KERNEL_VERSION(2, 6, 39))
dhd_mmc_suspend = suspend;
smp_mb();
#endif
return ret;
}
/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
* created in kernel notifier link list (with 'next' pointing to itself)
*/
static bool dhd_pm_notifier_registered = FALSE;
extern int register_pm_notifier(struct notifier_block *nb);
extern int unregister_pm_notifier(struct notifier_block *nb);
#endif /* CONFIG_PM_SLEEP */
/* Request scheduling of the bus rx frame */
static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
static void dhd_os_rxflock(dhd_pub_t *pub);
static void dhd_os_rxfunlock(dhd_pub_t *pub);
/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
typedef struct dhd_dev_priv {
dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
int ifidx; /* interface index */
} dhd_dev_priv_t;
#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
/** Clear the dhd net_device's private structure. */
static inline void
dhd_dev_priv_clear(struct net_device * dev)
{
dhd_dev_priv_t * dev_priv;
ASSERT(dev != (struct net_device *)NULL);
dev_priv = DHD_DEV_PRIV(dev);
dev_priv->dhd = (dhd_info_t *)NULL;
dev_priv->ifp = (dhd_if_t *)NULL;
dev_priv->ifidx = DHD_BAD_IF;
}
/** Setup the dhd net_device's private structure. */
static inline void
dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
int ifidx)
{
dhd_dev_priv_t * dev_priv;
ASSERT(dev != (struct net_device *)NULL);
dev_priv = DHD_DEV_PRIV(dev);
dev_priv->dhd = dhd;
dev_priv->ifp = ifp;
dev_priv->ifidx = ifidx;
}
#ifdef GWIFI_SUPPORT
#define DHD_MM DHD_INFO
#define DHD_DUMP_MM(args) do {printf args;} while (0)
static bool dhd_rspec_to_rate_info(uint32_t rspec, rate_info_t *ri)
{
uint32 encode;
uint32 rate;
if (rspec == 0) {
return false;
} else {
encode = (rspec & WL_RSPEC_ENCODING_MASK);
rate = (rspec & WL_RSPEC_RATE_MASK);
ri->sgi = ((rspec & WL_RSPEC_SGI) != 0) ? 1:0;
DHD_MM(("sgi=%s\n", (ri->sgi==1)?"TRUE":"FALSE"));
switch (rspec & WL_RSPEC_BW_MASK) {
case WL_RSPEC_BW_20MHZ:
ri->bw = 0;
DHD_MM(("bw=20MHZ\n"));
break;
case WL_RSPEC_BW_40MHZ:
ri->bw = 1;
DHD_MM(("bw=40MHZ\n"));
break;
case WL_RSPEC_BW_80MHZ:
ri->bw = 2;
DHD_MM(("bw=80MHZ\n"));
break;
case WL_RSPEC_BW_160MHZ:
default:
ri->bw = 3;
DHD_MM(("bw=160MHZ\n"));
break;
}
}
if (encode == WL_RSPEC_ENCODE_RATE) {
ri->type = RATE_LEGACY;
switch (rate) {
case 2:
ri->idx.b = 0;
break;
case 4:
ri->idx.b = 1;
break;
case 11:
ri->idx.b = 2;
break;
case 22:
ri->idx.b = 3;
break;
case 12:
ri->idx.g = 0;
break;
case 18:
ri->idx.g = 1;
break;
case 24:
ri->idx.g = 2;
break;
case 36:
ri->idx.g = 3;
break;
case 48:
ri->idx.g = 4;
break;
case 72:
ri->idx.g = 5;
break;
case 96:
ri->idx.g = 6;
break;
case 104:
default:
ri->idx.g = 7;
break;
}
DHD_MM(("legacy rate %d%s Mbps\n", rate/2, (rate % 2)?".5":""));
} else if (encode == WL_RSPEC_ENCODE_HT) {
ri->type = RATE_HT;
ri->idx.mcs = rate % 16;
if (rate <= 7)
ri->nss = 1;
else if (rate <= 15)
ri->nss = 2;
else if (rate <= 23)
ri->nss = 3;
else
ri->nss = 4;
DHD_MM(("mcs index %d nss_index %d\n", rate, ri->nss));
} else {
ri->type = RATE_VHT;
ri->idx.vhtmcs = (rspec & WL_RSPEC_VHT_MCS_MASK);
ri->nss = ((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT);
DHD_MM(("vht mcs %d Nss %d\n", ri->idx.vhtmcs, ri->nss));
}
return true;
}
static gwifi_mcs_metric_t *dhd_mcs_metric_find_mm(dhd_pub_t *pub, void *ea)
{
gwifi_mcs_metric_t *gwifi_mm = NULL;
int i;
for (i = 0; i < MCS_METRIC_MAX_CLIENT; i++) {
if ((pub->gwifi_mm[i] != NULL) &&
!memcmp(ea, &pub->gwifi_mm[i]->mac, ETHER_ADDR_LEN)) {
gwifi_mm = pub->gwifi_mm[i];
break;
}
}
return gwifi_mm;
}
static void dhd_mcs_metric_dump_gmmc(gwifi_mm_count_t *gmmc)
{
int i;
DHD_DUMP_MM((" mcs count:"));
for (i = 0; i < 10; i++) {
DHD_DUMP_MM((" %lu", (unsigned long)gmmc->mcs_count[i]));
}
DHD_DUMP_MM(("\n"));
DHD_DUMP_MM((" bw count:"));
for (i = 0; i < 4; i++) {
DHD_DUMP_MM((" %lu", (unsigned long)gmmc->bw_count[i]));
}
DHD_DUMP_MM(("\n"));
DHD_DUMP_MM((" nss count:"));
for (i = 0; i < 4; i++) {
DHD_DUMP_MM((" %lu", (unsigned long)gmmc->nss_count[i]));
}
DHD_DUMP_MM(("\n"));
DHD_DUMP_MM((" gi count:"));
for (i = 0; i < 2; i++) {
DHD_DUMP_MM((" %lu", (unsigned long)gmmc->gi_count[i]));
}
DHD_DUMP_MM(("\n"));
DHD_DUMP_MM((" legacy rate count:"));
for (i = 0; i < 12; i++) {
DHD_DUMP_MM((" %lu", (unsigned long)gmmc->legacy_rate_count[i]));
}
DHD_DUMP_MM(("\n"));
return;
}
void dhd_mcs_metric_dump(dhd_pub_t *pub)
{
int i, j;
uint8 *eabuf = NULL;
gwifi_mcs_metric_t *g_mm = NULL;
struct histograms_report *g_hist = NULL;
for (i = 0; i < MCS_METRIC_MAX_CLIENT; i++) {
if (pub->gwifi_mm[i]) {
g_mm = pub->gwifi_mm[i];
g_hist = &g_mm->hist;
eabuf = g_mm->mac;
DHD_DUMP_MM(("sta:%02x:%02x:%02x:%02x:%02x:%02x:\n",
(uchar)eabuf[0]&0xff,
(uchar)eabuf[1]&0xff,
(uchar)eabuf[2]&0xff,
(uchar)eabuf[3]&0xff,
(uchar)eabuf[4]&0xff,
(uchar)eabuf[5]&0xff));
DHD_DUMP_MM((" txpackets:\n"));
dhd_mcs_metric_dump_gmmc(&g_mm->txpackets);
DHD_DUMP_MM((" txbytes:\n"));
dhd_mcs_metric_dump_gmmc(&g_mm->txbytes);
DHD_DUMP_MM((" txfailpackets:\n"));
dhd_mcs_metric_dump_gmmc(&g_mm->txfailpackets);
DHD_DUMP_MM((" txfailbytes:\n"));
dhd_mcs_metric_dump_gmmc(&g_mm->txfailbytes);
DHD_DUMP_MM((" txretrypackets:\n"));
dhd_mcs_metric_dump_gmmc(&g_mm->txretrypackets);
DHD_DUMP_MM((" txretrybytes:\n"));
dhd_mcs_metric_dump_gmmc(&g_mm->txretrybytes);
DHD_DUMP_MM((" rxpackets:\n"));
dhd_mcs_metric_dump_gmmc(&g_mm->rxpackets);
DHD_DUMP_MM((" rxbytes:\n"));
dhd_mcs_metric_dump_gmmc(&g_mm->rxbytes);
DHD_DUMP_MM(("\nrx snr:\n"));
for (j = 0; j < WSTATS_SNR_RANGE; j++) {
DHD_DUMP_MM(("%d ", g_hist->rxsnr[j]));
}
DHD_DUMP_MM(("\nrx noise:\n"));
for (j = 0; j < WSTATS_NOISE_FLR_RANGE; j++) {
DHD_DUMP_MM(("%d ", g_hist->rxnoiseflr[j]));
}
DHD_DUMP_MM(("\nrx signal level:\n"));
for (j = 0; j < WSTATS_RSSI_RANGE; j++) {
DHD_DUMP_MM(("%d ", g_hist->rxrssi[j]));
}
DHD_DUMP_MM(("\n"));
}
}
return;
}
static void dhd_hist_upd_count(struct histograms_report *hist, uint8 snr, uint8 noise_flr,
uint8 signal_level, uint32 rspec)
{
uint encode, rate, txexp, bw_val;
int sgi = 0;
uint vht = 0;
uint nss_idx = 0;
uint bw_idx = 0;
uint32 mcs_idx = 0;
if ((snr == 0) && (signal_level == 0) && (noise_flr == 0))
return;
if (rspec == 0) {
return;
} else {
encode = (rspec & WL_RSPEC_ENCODING_MASK);
rate = (rspec & WL_RSPEC_RATE_MASK);
txexp = (rspec & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT;
sgi = ((rspec & WL_RSPEC_SGI) != 0) ? 1:0;
bw_val = (rspec & WL_RSPEC_BW_MASK);
DHD_MM(("hist: sgi=%s\n", sgi?"TRUE":"FALSE"));
if (bw_val == WL_RSPEC_BW_20MHZ) {
bw_idx = 0;
DHD_MM(("hist: bw=20MHZ\n"));
} else if (bw_val == WL_RSPEC_BW_40MHZ) {
bw_idx = 1;
DHD_MM(("hist: bw=40MHZ\n"));
} else {
bw_idx = 2;
DHD_MM(("hist: bw=80/160MHZ\n"));
}
}
if (encode == WL_RSPEC_ENCODE_RATE) {
switch (rate) {
case 2:
hist->rx11b[0]++;
break;
case 4:
hist->rx11b[1]++;
break;
case 11:
hist->rx11b[2]++;
break;
case 22:
hist->rx11b[3]++;
break;
case 12:
hist->rx11g[0]++;
break;
case 18:
hist->rx11g[1]++;
break;
case 24:
hist->rx11g[2]++;
break;
case 36:
hist->rx11g[3]++;
break;
case 48:
hist->rx11g[4]++;
break;
case 72:
hist->rx11g[5]++;
break;
case 96:
hist->rx11g[6]++;
break;
case 104:
default:
hist->rx11g[7]++;
break;
}
DHD_MM(("hist: :legacy rate %d%s Mbps\n", rate/2, (rate % 2)?".5":""));
} else if (encode == WL_RSPEC_ENCODE_HT) {
mcs_idx = rate % 16;
hist->rx11n[sgi][bw_idx][mcs_idx]++;
if (rate <= 7)
nss_idx = 0;
else if (rate <= 15)
nss_idx = 1;
else if (rate <= 23)
nss_idx = 2;
else
nss_idx = 3;
DHD_MM(("hist: mcs index %d nss_index %d\n",
rate, nss_idx+1));
} else {
vht = (rspec & WL_RSPEC_VHT_MCS_MASK);
nss_idx = ((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT) - 1;
if (nss_idx < WSTATS_NSS_RANGE) {
hist->rx11ac[nss_idx][sgi][bw_idx][vht]++;
} else {
DHD_ERROR(("nss > %d, not support\n", WSTATS_NSS_RANGE));
}
DHD_MM(("hist: vht mcs %d Nss %d\n",
vht, nss_idx+1));
}
/* update snr */
hist->rxsnr[snr]++;
/* update noise floor */
hist->rxnoiseflr[noise_flr]++;
/* update noise floor */
hist->rxrssi[signal_level]++;
return;
}
static void dhd_mcs_metric_upd_count(gwifi_mm_count_t *g_mmc, uint64 count, uint32 rspec)
{
uint encode, rate, txexp, bw_val;
bool sgi = FALSE, legacy_val = FALSE, vht_val = FALSE;
uint vht = 0;
uint nss_idx = 0;
uint bw_idx = 0;
uint legacy_rate_idx = 0;
uint rt_idx_1 = 0, rt_idx_2 = 0;
if (rspec == 0) {
return;
} else {
encode = (rspec & WL_RSPEC_ENCODING_MASK);
rate = (rspec & WL_RSPEC_RATE_MASK);
txexp = (rspec & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT;
sgi = ((rspec & WL_RSPEC_SGI) != 0) ? TRUE : FALSE;
bw_val = (rspec & WL_RSPEC_BW_MASK);
DHD_MM(("mm: sgi=%s\n", sgi?"TRUE":"FALSE"));
if (bw_val == WL_RSPEC_BW_20MHZ) {
bw_idx = 0;
DHD_MM(("mm: bw=20MHZ\n"));
} else if (bw_val == WL_RSPEC_BW_40MHZ) {
bw_idx = 1;
DHD_MM(("mm: bw=40MHZ\n"));
} else if (bw_val == WL_RSPEC_BW_80MHZ) {
bw_idx = 2;
DHD_MM(("mm: bw=80MHZ\n"));
} else if (bw_val == WL_RSPEC_BW_160MHZ) {
bw_idx = 3;
DHD_MM(("mm: bw=160MHZ\n"));
}
}
if (encode == WL_RSPEC_ENCODE_RATE) {
legacy_val = TRUE;
switch (rate) {
case 2:
legacy_rate_idx = 0;
break;
case 4:
legacy_rate_idx = 1;
break;
case 11:
legacy_rate_idx = 2;
break;
case 22:
legacy_rate_idx = 3;
break;
case 12:
legacy_rate_idx = 4;
break;
case 18:
legacy_rate_idx = 5;
break;
case 24:
legacy_rate_idx = 6;
break;
case 36:
legacy_rate_idx = 7;
break;
case 48:
legacy_rate_idx = 8;
break;
case 72:
legacy_rate_idx = 9;
break;
case 96:
legacy_rate_idx = 10;
break;
case 104:
default:
legacy_rate_idx = 11;
break;
}
DHD_MM(("mm: legacy rate %d%s Mbps, index=%d\n",
rate/2, (rate % 2)?".5":"", legacy_rate_idx));
} else if (encode == WL_RSPEC_ENCODE_HT) {
if (rate <= 7)
nss_idx = 0;
else if (rate <= 15)
nss_idx = 1;
else if (rate <= 23)
nss_idx = 2;
else
nss_idx = 3;
DHD_MM(("mm: mcs index %d nss_index %d\n",
rate, nss_idx+1));
} else {
vht_val = TRUE;
vht = (rspec & WL_RSPEC_VHT_MCS_MASK);
nss_idx = ((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT) - 1;
if (nss_idx > 3) {
DHD_ERROR(("bad nss, exit\n"));
return;
}
DHD_MM(("mm: vht mcs %d Nss %d\n",
vht, nss_idx+1));
}
/* update bw_count */
g_mmc->bw_count[bw_idx] += count;
/*update nss_count */
g_mmc->nss_count[nss_idx] += count;
/*update gi_count */
g_mmc->gi_count[sgi?1:0] += count;
if (legacy_val) {
/* update legacy rate */
g_mmc->legacy_rate_count[legacy_rate_idx] += count;
} else {
uint32 mcs_idx = 0;
/* update mcs_count */
if (!vht_val)
mcs_idx = rate % 8;
else
mcs_idx = vht;
if (mcs_idx > 9) {
DHD_ERROR(("mm: wrong mcs_idx, exit!\n"));
return;
}
g_mmc->mcs_count[mcs_idx] += count;
/* pick up mcs index */
if (!vht_val) {
rt_idx_1 = rate + (rate / 8) * 2;
} else
rt_idx_1 = vht + (nss_idx * 10);
if (rt_idx_1 >= 40) {
DHD_ERROR(("mm: wrong rate table index 1, exit!\n"));
return;
}
/* pick up bw&gi index */
rt_idx_2 = bw_idx * 2 + (sgi?1:0);
if (rt_idx_2 >= 8) {
DHD_ERROR(("mm: wrong rate table index 2, exit!\n"));
return;
}
DHD_MM(("mm: rate table [%d][%d]\n", rt_idx_1, rt_idx_2));
/* update rate table */
g_mmc->rate_table_values[rt_idx_1][rt_idx_2] += count;
}
return;
}
static void dhd_mcs_metric_upd_entry(gwifi_mcs_metric_t *g_mm,
mm_item_t *mm)
{
gwifi_mm_count_t *g_mmc = NULL;
if (!mm || !g_mm)
return;
if (mm->status & MCS_METRIC_TXPKT) {
g_mmc = &g_mm->txpackets;
dhd_mcs_metric_upd_count(g_mmc, (uint64)mm->tx_num, mm->rate);
g_mmc = &g_mm->txbytes;
dhd_mcs_metric_upd_count(g_mmc, (uint64)(mm->tx_num * mm->bytes), mm->rate);
// record the lastest tx rate spec for cfg80211 get_station
g_mm->tx_rspec = mm->rate;
}
if (mm->status & MCS_METRIC_TXFAILURE) {
g_mmc = &g_mm->txfailpackets;
dhd_mcs_metric_upd_count(g_mmc, (uint64)mm->tx_fail_cnt, mm->rate);
g_mmc = &g_mm->txfailbytes;
dhd_mcs_metric_upd_count(g_mmc, (uint64)(mm->tx_fail_cnt * mm->bytes), mm->rate);
}
if (mm->status & MCS_METRIC_TXRETRY) {
g_mmc = &g_mm->txretrypackets;
dhd_mcs_metric_upd_count(g_mmc, (uint64)mm->tx_retry_cnt, mm->rate);
g_mmc = &g_mm->txretrybytes;
dhd_mcs_metric_upd_count(g_mmc, (uint64)(mm->tx_retry_cnt * mm->bytes), mm->rate);
}
if (mm->status & MCS_METRIC_RXPKT) {
g_mmc = &g_mm->rxpackets;
dhd_mcs_metric_upd_count(g_mmc, 1, mm->rate);
g_mmc = &g_mm->rxbytes;
dhd_mcs_metric_upd_count(g_mmc, mm->bytes, mm->rate);
// record the lastest tx rate spec for cfg80211 get_station
g_mm->rx_rspec = mm->rate;
/* update histogram */
dhd_hist_upd_count(&g_mm->hist, mm->rx_snr,
mm->rx_noise_flr, mm->rx_signal_lv, mm->rate);
}
return;
}
void dhd_mcs_metric_upd(dhd_pub_t *pub, mcs_metric_t *mcs_metric, void *ea)
{
gwifi_mcs_metric_t *g_mm = dhd_mcs_metric_find_mm(pub, ea);
mm_item_t *mm = NULL;
int i;
if (!g_mm) {
DHD_ERROR(("mm: entry not found!\n"));
return;
}
for (i = 0; i < mcs_metric->count; i++) {
mm = &mcs_metric->mm[i];
dhd_mcs_metric_upd_entry(g_mm, mm);
}
return;
}
void dhd_mcs_metric_addsta(dhd_pub_t *pub, void *ea, u16 ifindex)
{
int i, index = -1;
uint8 *eabuf = (uint8 *)ea;
DHD_MM(("mm: addsta:%02x:%02x:%02x:%02x:%02x:%02x: ifindex=%d\n",
(uchar)eabuf[0]&0xff, (uchar)eabuf[1]&0xff, (uchar)eabuf[2]&0xff,
(uchar)eabuf[3]&0xff, (uchar)eabuf[4]&0xff, (uchar)eabuf[5]&0xff,
ifindex));
for (i = 0; i < MCS_METRIC_MAX_CLIENT; i++) {
if ((index == -1) && (pub->gwifi_mm[i] == NULL))
index = i;
if ((pub->gwifi_mm[i] != NULL) &&
!memcmp(ea, &pub->gwifi_mm[i]->mac, ETHER_ADDR_LEN)) {
DHD_ERROR(("mm: sta exist, exit\n"));
return;
}
}
if (index == -1) {
DHD_ERROR(("mm: exceed max num, exit\n"));
}
DHD_MM(("mm: alloc new sta, alloc size %lu\n", sizeof(gwifi_mcs_metric_t)));
pub->gwifi_mm[index] = (gwifi_mcs_metric_t *)MALLOC(pub->osh, sizeof(gwifi_mcs_metric_t));
bzero(pub->gwifi_mm[index], sizeof(gwifi_mcs_metric_t));
memcpy(pub->gwifi_mm[index]->mac, ea, ETHER_ADDR_LEN);
pub->gwifi_mm[index]->ifindex = ifindex;
return;
}
void dhd_mcs_metric_delsta(dhd_pub_t *pub, void *ea)
{
int i;
uint8 *eabuf = (uint8 *)ea;
DHD_MM(("mm: delsta:%02x:%02x:%02x:%02x:%02x:%02x:\n",
(uchar)eabuf[0]&0xff,
(uchar)eabuf[1]&0xff,
(uchar)eabuf[2]&0xff,
(uchar)eabuf[3]&0xff,
(uchar)eabuf[4]&0xff,
(uchar)eabuf[5]&0xff));
for (i = 0; i < MCS_METRIC_MAX_CLIENT; i++) {
if ((pub->gwifi_mm[i] != NULL) &&
!memcmp(ea, &pub->gwifi_mm[i]->mac, ETHER_ADDR_LEN)) {
break;
}
}
if (i == MCS_METRIC_MAX_CLIENT) {
DHD_ERROR(("mm: sta no found, exit\n"));
return;
}
MFREE(pub->osh, pub->gwifi_mm[i], sizeof(gwifi_mcs_metric_t));
pub->gwifi_mm[i] = NULL;
return;
}
void dhd_mcs_metric_delsta_by_ifindex(dhd_pub_t *pub, u16 ifindex)
{
int i;
DHD_MM(("mm: del one sta: ifindex=%d\n", ifindex));
for (i = 0; i < MCS_METRIC_MAX_CLIENT; i++) {
if ((pub->gwifi_mm[i] != NULL) &&
(pub->gwifi_mm[i]->ifindex == ifindex)) {
uint8 *eabuf = (uint8 *)&pub->gwifi_mm[i]->mac;
DHD_MM(("mm: delsta:%02x:%02x:%02x:%02x:%02x:%02x:\n",
(uchar)eabuf[0]&0xff, (uchar)eabuf[1]&0xff,
(uchar)eabuf[2]&0xff, (uchar)eabuf[3]&0xff,
(uchar)eabuf[4]&0xff, (uchar)eabuf[5]&0xff));
break;
}
}
if (i == MCS_METRIC_MAX_CLIENT) {
DHD_ERROR(("mm: sta no found, exit\n"));
return;
}
MFREE(pub->osh, pub->gwifi_mm[i], sizeof(gwifi_mcs_metric_t));
pub->gwifi_mm[i] = NULL;
return;
}
#endif /* GWIFI_SUPPORT */
#ifdef WL_QOS_CTRL
int
dhd_dev_set_pkt_lifetime(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "lifetime", (char*)data,
len, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute pkt lifetime %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_pkt_lifetime(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "lifetime", data,
len, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute pkt lifetime %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_set_ac_srl(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
wme_tx_params_t cur_params[AC_COUNT], new_params[AC_COUNT];
uint8 *pbuf = (uint8*)data;
uint8 ac, srl;
ac = pbuf[0];
srl = pbuf[1];
err = dhd_iovar(&(dhd->pub), 0, "wme_tx_params", (void*)&cur_params,
sizeof(wme_tx_params_t)*AC_COUNT, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to read tx params %d\n", __FUNCTION__, err));
return err;
}
memcpy(&new_params, &cur_params, sizeof(new_params));
new_params[ac].short_retry = srl;
err = dhd_iovar(&(dhd->pub), 0, "wme_tx_params", (void*)&new_params,
sizeof(wme_tx_params_t)*AC_COUNT, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute srl %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_ac_srl(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
wme_tx_params_t cur_params[AC_COUNT];
uint8 *pbuf = (uint8*)data;
uint8 ac;
ac = pbuf[0];
err = dhd_iovar(&(dhd->pub), 0, "wme_tx_params", (void*)&cur_params,
sizeof(wme_tx_params_t)*AC_COUNT, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to read tx params %d\n", __FUNCTION__, err));
return err;
}
*(&((uint8*)data)[1]) = cur_params[ac].short_retry;
return err;
}
int
dhd_dev_set_ac_lrl(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
wme_tx_params_t cur_params[AC_COUNT], new_params[AC_COUNT];
uint8 *pbuf = (uint8*)data;
uint8 ac, lrl;
ac = pbuf[0];
lrl = pbuf[1];
err = dhd_iovar(&(dhd->pub), 0, "wme_tx_params", (void*)&cur_params,
sizeof(wme_tx_params_t)*AC_COUNT, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to read tx params %d\n", __FUNCTION__, err));
return err;
}
memcpy(&new_params, &cur_params, sizeof(new_params));
new_params[ac].long_retry = lrl;
err = dhd_iovar(&(dhd->pub), 0, "wme_tx_params", (void*)&new_params,
sizeof(wme_tx_params_t)*AC_COUNT, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute lrl %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_ac_lrl(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
wme_tx_params_t cur_params[AC_COUNT];
uint8 *pbuf = (uint8*)data;
uint8 ac;
ac = pbuf[0];
err = dhd_iovar(&(dhd->pub), 0, "wme_tx_params", (void*)&cur_params,
sizeof(wme_tx_params_t)*AC_COUNT, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to read tx params %d\n", __FUNCTION__, err));
return err;
}
*(&((uint8*)data)[1]) = cur_params[ac].long_retry;
return err;
}
int
dhd_dev_set_wme_noack(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "wme_noack", (char*)data,
len, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute wme noack %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_wme_noack(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "wme_noack", data,
len, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to get wme noack %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_set_wme_apsd(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "wme_apsd", (char*)data,
len, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute wme apsd %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_wme_apsd(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "wme_apsd", data,
len, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to get wme apsd %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_set_ampdu_per_tid(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "ampdu_tid", (void*)data,
len, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute ampdu_tid %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_ampdu_per_tid(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "ampdu_tid", data,
len, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute get ampdu_tid %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_set_ampdu_mpdu(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "ampdu_mpdu", (char*)data,
len, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute ampdu mpdu%d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_ampdu_mpdu(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "ampdu_mpdu", data,
len, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to get ampdu mpdu %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_set_ampdu_rts(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "ampdu_rts", (char*)data,
len, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute ampdu rts%d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_ampdu_rts(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "ampdu_rts", data,
len, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to get ampdu rts %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_set_ampdu(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "ampdu", (char*)data,
len, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute ampdu mpdu%d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_ampdu(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "ampdu", data,
len, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to get ampdu %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_set_rtsthresh(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "rtsthresh", (char*)data,
len, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute rtsthresh%d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_rtsthresh(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "rtsthresh", data,
len, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to get rtsthresh %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_set_2g_rate(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "2g_rate", (char*)data,
len, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute 2g_rate%d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_2g_rate(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "2g_rate", data,
len, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to get 2g_rate %d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_set_5g_rate(struct net_device *dev, const void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "5g_rate", (char*)data,
len, 1);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to execute 5g_rate%d\n", __FUNCTION__, err));
}
return err;
}
int
dhd_dev_get_5g_rate(struct net_device *dev, void *data, int len)
{
int err = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
err = dhd_iovar(&(dhd->pub), 0, "5g_rate", data,
len, 0);
if (err < 0 && err != BCME_UNSUPPORTED) {
DHD_ERROR(("%s : Failed to get 5g_rate %d\n", __FUNCTION__, err));
}
return err;
}
#endif /* WL_QOS_CTRL */
#ifdef PCIE_FULL_DONGLE
/** Dummy objects are defined with state representing bad|down.
* Performance gains from reducing branch conditionals, instruction parallelism,
* dual issue, reducing load shadows, avail of larger pipelines.
* Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
* is accessed via the dhd_sta_t.
*/
/* Dummy dhd_info object */
dhd_info_t dhd_info_null = {
#if defined(BCM_GMAC3)
.fwdh = FWDER_NULL,
#endif
.pub = {
.info = &dhd_info_null,
#ifdef DHDTCPACK_SUPPRESS
.tcpack_sup_mode = TCPACK_SUP_REPLACE,
#endif /* DHDTCPACK_SUPPRESS */
.up = FALSE,
.busstate = DHD_BUS_DOWN
}
};
#define DHD_INFO_NULL (&dhd_info_null)
#define DHD_PUB_NULL (&dhd_info_null.pub)
/* Dummy netdevice object */
struct net_device dhd_net_dev_null = {
.reg_state = NETREG_UNREGISTERED
};
#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
/* Dummy dhd_if object */
dhd_if_t dhd_if_null = {
#if defined(BCM_GMAC3)
.fwdh = FWDER_NULL,
#endif
#ifdef WMF
.wmf = { .wmf_enable = TRUE },
#endif
.info = DHD_INFO_NULL,
.net = DHD_NET_DEV_NULL,
.idx = DHD_BAD_IF
};
#define DHD_IF_NULL (&dhd_if_null)
#define DHD_STA_NULL ((dhd_sta_t *)NULL)
/** Interface STA list management. */
/** Fetch the dhd_if object, given the interface index in the dhd. */
static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
static void dhd_if_del_sta_list(dhd_if_t * ifp);
static void dhd_if_flush_sta(dhd_if_t * ifp);
/* Construct/Destruct a sta pool. */
static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
/* Clear the pool of dhd_sta_t objects for built-in type driver */
static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
/* Return interface pointer */
static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
{
ASSERT(ifidx < DHD_MAX_IFS);
if (ifidx >= DHD_MAX_IFS)
return NULL;
return dhdp->info->iflist[ifidx];
}
/** Reset a dhd_sta object and free into the dhd pool. */
static void
dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
{
int prio;
ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
/*
* Flush and free all packets in all flowring's queues belonging to sta.
* Packets in flow ring will be flushed later.
*/
for (prio = 0; prio < (int)NUMPRIO; prio++) {
uint16 flowid = sta->flowid[prio];
if (flowid != FLOWID_INVALID) {
unsigned long flags;
flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
flow_ring_node_t * flow_ring_node;
#ifdef DHDTCPACK_SUPPRESS
/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
* when there is a newly coming packet from network stack.
*/
dhd_tcpack_info_tbl_clean(dhdp);
#endif /* DHDTCPACK_SUPPRESS */
flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
void * pkt;
while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
PKTFREE(dhdp->osh, pkt, TRUE);
}
}
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
}
sta->flowid[prio] = FLOWID_INVALID;
}
id16_map_free(dhdp->staid_allocator, sta->idx);
DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
sta->ifidx = DHD_BAD_IF;
bzero(sta->ea.octet, ETHER_ADDR_LEN);
INIT_LIST_HEAD(&sta->list);
sta->idx = ID16_INVALID; /* implying free */
}
/** Allocate a dhd_sta object from the dhd pool. */
static dhd_sta_t *
dhd_sta_alloc(dhd_pub_t * dhdp)
{
uint16 idx;
dhd_sta_t * sta;
dhd_sta_pool_t * sta_pool;
ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
idx = id16_map_alloc(dhdp->staid_allocator);
if (idx == ID16_INVALID) {
DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
return DHD_STA_NULL;
}
sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
sta = &sta_pool[idx];
ASSERT((sta->idx == ID16_INVALID) &&
(sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
sta->idx = idx; /* implying allocated */
return sta;
}
/** Delete all STAs in an interface's STA list. */
static void
dhd_if_del_sta_list(dhd_if_t *ifp)
{
dhd_sta_t *sta, *next;
unsigned long flags;
DHD_IF_STA_LIST_LOCK(ifp, flags);
list_for_each_entry_safe(sta, next,