blob: c34d0a269b45e5ce4d67c9f7cba6cd134777fb3b [file] [log] [blame]
/**
* @file definition of host message ring functionality
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
* Copyright 1999-2016, Broadcom Corporation
* All rights reserved,
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* This software is provided by the copyright holder "as is" and any express or
* implied warranties, including, but not limited to, the implied warranties of
* merchantability and fitness for a particular purpose are disclaimed. In no event
* shall copyright holder be liable for any direct, indirect, incidental, special,
* exemplary, or consequential damages (including, but not limited to, procurement
* of substitute goods or services; loss of use, data, or profits; or business
* interruption) however caused and on any theory of liability, whether in
* contract, strict liability, or tort (including negligence or otherwise) arising
* in any way out of the use of this software, even if advised of the possibility
* of such damage
*
*
* <<Broadcom-WL-IPTag/Open:>>
*
* $Id: dhd_msgbuf.c 605475 2015-12-10 12:49:49Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <bcmmsgbuf.h>
#include <bcmendian.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_proto.h>
#include <dhd_bus.h>
#include <dhd_dbg.h>
#include <siutils.h>
#include <dhd_flowring.h>
#include <pcie_core.h>
#include <bcmpcie.h>
#include <dhd_pcie.h>
#if defined(DHD_LB)
#include <linux/cpu.h>
#include <bcm_ring.h>
#define DHD_LB_WORKQ_SZ (8192)
#define DHD_LB_WORKQ_SYNC (16)
#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
#endif /* DHD_LB */
/**
* Host configures a soft doorbell for d2h rings, by specifying a 32bit host
* address where a value must be written. Host may also interrupt coalescing
* on this soft doorbell.
* Use Case: Hosts with network processors, may register with the dongle the
* network processor's thread wakeup register and a value corresponding to the
* core/thread context. Dongle will issue a write transaction <address,value>
* to the PCIE RC which will need to be routed to the mapped register space, by
* the host.
*/
/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
/* Dependency Check */
#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
#define DEFAULT_RX_BUFFERS_TO_POST 256
#define RXBUFPOST_THRESHOLD 32
#define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
#define DHD_STOP_QUEUE_THRESHOLD 200
#define DHD_START_QUEUE_THRESHOLD 100
#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
#define FLOWRING_SIZE (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE)
/* flags for ioctl pending status */
#define MSGBUF_IOCTL_ACK_PENDING (1<<0)
#define MSGBUF_IOCTL_RESP_PENDING (1<<1)
#define DMA_ALIGN_LEN 4
#define DMA_D2H_SCRATCH_BUF_LEN 8
#define DMA_XFER_LEN_LIMIT 0x400000
#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
#define DHD_FLOWRING_MAX_EVENTBUF_POST 8
#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
#define DHD_PROT_FUNCS 37
/* Length of buffer in host for bus throughput measurement */
#define DHD_BUS_TPUT_BUF_LEN 2048
#define TXP_FLUSH_NITEMS
/* optimization to write "n" tx items at a time to ring */
#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
#define RING_NAME_MAX_LENGTH 24
struct msgbuf_ring; /* ring context for common and flow rings */
/**
* PCIE D2H DMA Complete Sync Modes
*
* Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
* Host system memory. A WAR using one of 3 approaches is needed:
* 1. Dongle places a modulo-253 seqnum in last word of each D2H message
* 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
* writes in the last word of each work item. Each work item has a seqnum
* number = sequence num % 253.
*
* 3. Read Barrier: Dongle does a host memory read access prior to posting an
* interrupt, ensuring that D2H data transfer indeed completed.
* 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
* ring contents before the indices.
*
* Host does not sync for DMA to complete with option #3 or #4, and a noop sync
* callback (see dhd_prot_d2h_sync_none) may be bound.
*
* Dongle advertizes host side sync mechanism requirements.
*/
#define PCIE_D2H_SYNC
#if defined(PCIE_D2H_SYNC)
#define PCIE_D2H_SYNC_WAIT_TRIES (512UL)
#define PCIE_D2H_SYNC_NUM_OF_STEPS (3UL)
#define PCIE_D2H_SYNC_DELAY (50UL) /* in terms of usecs */
/**
* Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
*
* On success: return cmn_msg_hdr_t::msg_type
* On failure: return 0 (invalid msg_type)
*/
typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
#endif /* PCIE_D2H_SYNC */
/*
* +----------------------------------------------------------------------------
*
* RingIds and FlowId are not equivalent as ringids include D2H rings whereas
* flowids do not.
*
* Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
* the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
*
* Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
* BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
* BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
*
* H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
* H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
*
* D2H Control Complete RingId = 2
* D2H Transmit Complete RingId = 3
* D2H Receive Complete RingId = 4
*
* H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
* H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
* H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
*
* When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
* unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
*
* Example: when a system supports 4 bc/mc and 128 uc flowrings, with
* BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
* FlowId values would be in the range [2..133] and the corresponding
* RingId values would be in the range [5..136].
*
* The flowId allocator, may chose to, allocate Flowids:
* bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
* X# of uc flowids in consecutive ranges (per station Id), where X is the
* packet's access category (e.g. 4 uc flowids per station).
*
* CAUTION:
* When DMA indices array feature is used, RingId=5, corresponding to the 0th
* FLOWRING, will actually use the FlowId as index into the H2D DMA index,
* since the FlowId truly represents the index in the H2D DMA indices array.
*
* Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
* will represent the index in the D2H DMA indices array.
*
* +----------------------------------------------------------------------------
*/
/* First TxPost Flowring Id */
#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
/* Determine whether a ringid belongs to a TxPost flowring */
#define DHD_IS_FLOWRING(ringid) \
((ringid) >= BCMPCIE_COMMON_MSGRINGS)
/* Convert a H2D TxPost FlowId to a MsgBuf RingId */
#define DHD_FLOWID_TO_RINGID(flowid) \
(BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
/* Convert a MsgBuf RingId to a H2D TxPost FlowId */
#define DHD_RINGID_TO_FLOWID(ringid) \
(BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
* This may be used for the H2D DMA WR index array or H2D DMA RD index array or
* any array of H2D rings.
*/
#define DHD_H2D_RING_OFFSET(ringid) \
((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
* This may be used for the D2H DMA WR index array or D2H DMA RD index array or
* any array of D2H rings.
*/
#define DHD_D2H_RING_OFFSET(ringid) \
((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)
/* Convert a D2H DMA Indices Offset to a RingId */
#define DHD_D2H_RINGID(offset) \
((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
#define DHD_DMAH_NULL ((void*)NULL)
/*
* Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
* buffer does not occupy the entire cacheline, and another object is placed
* following the DMA-able buffer, data corruption may occur if the DMA-able
* buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
* is not available.
*/
#if defined(L1_CACHE_BYTES)
#define DHD_DMA_PAD (L1_CACHE_BYTES)
#else
#define DHD_DMA_PAD (128)
#endif
/* Used in loopback tests */
typedef struct dhd_dmaxfer {
dhd_dma_buf_t srcmem;
dhd_dma_buf_t dstmem;
uint32 srcdelay;
uint32 destdelay;
uint32 len;
bool in_progress;
} dhd_dmaxfer_t;
/**
* msgbuf_ring : This object manages the host side ring that includes a DMA-able
* buffer, the WR and RD indices, ring parameters such as max number of items
* an length of each items, and other miscellaneous runtime state.
* A msgbuf_ring may be used to represent a H2D or D2H common ring or a
* H2D TxPost ring as specified in the PCIE FullDongle Spec.
* Ring parameters are conveyed to the dongle, which maintains its own peer end
* ring state. Depending on whether the DMA Indices feature is supported, the
* host will update the WR/RD index in the DMA indices array in host memory or
* directly in dongle memory.
*/
typedef struct msgbuf_ring {
bool inited;
uint16 idx; /* ring id */
uint16 rd; /* read index */
uint16 curr_rd; /* read index for debug */
uint16 wr; /* write index */
uint16 max_items; /* maximum number of items in ring */
uint16 item_len; /* length of each item in the ring */
sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */
dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
uint32 seqnum; /* next expected item's sequence number */
#ifdef TXP_FLUSH_NITEMS
void *start_addr;
/* # of messages on ring not yet announced to dongle */
uint16 pend_items_count;
#endif /* TXP_FLUSH_NITEMS */
uchar name[RING_NAME_MAX_LENGTH];
} msgbuf_ring_t;
#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
#define DHD_RING_END_VA(ring) \
((uint8 *)(DHD_RING_BGN_VA((ring))) + \
(((ring)->max_items - 1) * (ring)->item_len))
/** DHD protocol handle. Is an opaque type to other DHD software layers. */
typedef struct dhd_prot {
osl_t *osh; /* OSL handle */
uint16 rxbufpost;
uint16 max_rxbufpost;
uint16 max_eventbufpost;
uint16 max_ioctlrespbufpost;
uint16 cur_event_bufs_posted;
uint16 cur_ioctlresp_bufs_posted;
/* Flow control mechanism based on active transmits pending */
uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
uint16 max_tx_count;
uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
/* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
uint32 rx_dataoffset;
dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
/* ioctl related resources */
uint8 ioctl_state;
int16 ioctl_status; /* status returned from dongle */
uint16 ioctl_resplen;
dhd_ioctl_recieved_status_t ioctl_received;
uint curr_ioctl_cmd;
dhd_dma_buf_t retbuf; /* For holding ioctl response */
dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
/* DMA-able arrays for holding WR and RD indices */
uint32 rw_index_sz; /* Size of a RD or WR index in dongle */
dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */
dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
uint32 flowring_num;
#if defined(PCIE_D2H_SYNC)
d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
ulong d2h_sync_wait_tot; /* total wait loops */
#endif /* PCIE_D2H_SYNC */
dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
uint16 ioctl_seq_no;
uint16 data_seq_no;
uint16 ioctl_trans_id;
void *pktid_map_handle; /* a pktid maps to a packet and its metadata */
bool metadata_dbg;
void *pktid_map_handle_ioctl;
/* Applications/utilities can read tx and rx metadata using IOVARs */
uint16 rx_metadata_offset;
uint16 tx_metadata_offset;
#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
/* Host's soft doorbell configuration */
bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
#if defined(DHD_LB)
/* Work Queues to be used by the producer and the consumer, and threshold
* when the WRITE index must be synced to consumer's workq
*/
#if defined(DHD_LB_TXC)
uint32 tx_compl_prod_sync ____cacheline_aligned;
bcm_workq_t tx_compl_prod, tx_compl_cons;
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
uint32 rx_compl_prod_sync ____cacheline_aligned;
bcm_workq_t rx_compl_prod, rx_compl_cons;
#endif /* DHD_LB_RXC */
#endif /* DHD_LB */
} dhd_prot_t;
/* Convert a dmaaddr_t to a base_addr with htol operations */
static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
/* APIs for managing a DMA-able buffer */
static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
/* msgbuf ring management */
static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
/* Fetch and Release a flowring msgbuf_ring from flowring pool */
static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
uint16 flowid);
/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
/* Producer: Allocate space in a msgbuf ring */
static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
uint16 nitems, uint16 *alloced, bool exactly_nitems);
static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
uint16 *alloced, bool exactly_nitems);
/* Consumer: Determine the location where the next message may be consumed */
static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
uint32 *available_len);
/* Producer (WR index update) or Consumer (RD index update) indication */
static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
void *p, uint16 len);
static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
/* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */
static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
dhd_dma_buf_t *dma_buf, uint32 bufsz);
/* Set/Get a RD or WR index in the array of indices */
/* See also: dhd_prot_dma_indx_init() */
static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
uint16 ringid);
static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
/* Locate a packet given a pktid */
static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
bool free_pktid);
/* Locate a packet given a PktId and free it. */
static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
void *buf, uint len, uint8 action);
static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
void *buf, uint len, uint8 action);
static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
void *buf, int ifidx);
/* Post buffers for Rx, control ioctl response and events */
static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
/* D2H Message handling */
static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
/* D2H Message handlers */
static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
/* Loopback test with dongle */
static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
uint destdelay, dhd_dmaxfer_t *dma);
static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
/* Flowring management communication with dongle */
static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
/* Configure a soft doorbell per D2H ring */
static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg);
typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
/** callback functions for messages generated by the dongle */
#define MSG_TYPE_INVALID 0
static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
NULL,
dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
NULL,
dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
NULL,
dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
NULL,
dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
NULL,
dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
NULL,
dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
NULL,
dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
NULL,
dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */
NULL,
dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
NULL, /* MSG_TYPE_FLOW_RING_RESUME */
NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
NULL, /* MSG_TYPE_INFO_BUF_POST */
NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
NULL, /* MSG_TYPE_H2D_RING_CREATE */
NULL, /* MSG_TYPE_D2H_RING_CREATE */
NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
NULL, /* MSG_TYPE_H2D_RING_CONFIG */
NULL, /* MSG_TYPE_D2H_RING_CONFIG */
NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */
};
#ifdef DHD_RX_CHAINING
#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
(!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
!ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
!eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
!eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
(((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))) && \
dhd_l2_filter_chainable((dhd), (evh), (ifidx)))
static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
#define DHD_PKT_CTF_MAX_CHAIN_LEN 64
#endif /* DHD_RX_CHAINING */
static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
#if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */
/**
* D2H DMA to completion callback handlers. Based on the mode advertised by the
* dongle through the PCIE shared region, the appropriate callback will be
* registered in the proto layer to be invoked prior to precessing any message
* from a D2H DMA ring. If the dongle uses a read barrier or another mode that
* does not require host participation, then a noop callback handler will be
* bound that simply returns the msg_type.
*/
static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring,
uint32 tries, uchar *msg, int msglen);
static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
void dhd_prot_collect_memdump(dhd_pub_t *dhd)
{
DHD_ERROR(("%s(): Collecting mem dump now \r\n", __FUNCTION__));
#ifdef DHD_FW_COREDUMP
if (dhd->memdump_enabled) {
/* collect core dump */
dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
dhd_bus_mem_dump(dhd);
}
#endif /* DHD_FW_COREDUMP */
#ifdef SUPPORT_LINKDOWN_RECOVERY
#ifdef CONFIG_ARCH_MSM
dhd->bus->no_cfg_restore = 1;
#endif /* CONFIG_ARCH_MSM */
dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
dhd_os_send_hang_message(dhd);
#endif /* SUPPORT_LINKDOWN_RECOVERY */
}
/**
* dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
* not completed, a livelock condition occurs. Host will avert this livelock by
* dropping this message and moving to the next. This dropped message can lead
* to a packet leak, or even something disastrous in the case the dropped
* message happens to be a control response.
* Here we will log this condition. One may choose to reboot the dongle.
*
*/
static void
dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries,
uchar *msg, int msglen)
{
uint32 seqnum = ring->seqnum;
DHD_ERROR(("LIVELOCK DHD<%p> name<%s> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>"
"dma_buf va<%p> msg<%p> curr_rd<%d>\n",
dhd, ring->name, seqnum, seqnum% D2H_EPOCH_MODULO, tries,
dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
ring->dma_buf.va, msg, ring->curr_rd));
prhex("D2H MsgBuf Failure", (uchar *)msg, msglen);
dhd_dump_to_kernelog(dhd);
#ifdef DHD_FW_COREDUMP
if (dhd->memdump_enabled) {
/* collect core dump */
dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
dhd_bus_mem_dump(dhd);
}
#endif /* DHD_FW_COREDUMP */
#ifdef SUPPORT_LINKDOWN_RECOVERY
#ifdef CONFIG_ARCH_MSM
dhd->bus->no_cfg_restore = 1;
#endif /* CONFIG_ARCH_MSM */
dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
dhd_os_send_hang_message(dhd);
#endif /* SUPPORT_LINKDOWN_RECOVERY */
}
/**
* dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
* mode. Sequence number is always in the last word of a message.
*/
static uint8 BCMFASTPATH
dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
{
uint32 tries;
uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
int num_words = msglen / sizeof(uint32); /* num of 32bit words */
volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */
dhd_prot_t *prot = dhd->prot;
uint32 step = 0;
uint32 delay = PCIE_D2H_SYNC_DELAY;
uint32 total_tries = 0;
ASSERT(msglen == ring->item_len);
BCM_REFERENCE(delay);
/*
* For retries we have to make some sort of stepper algorithm.
* We see that every time when the Dongle comes out of the D3
* Cold state, the first D2H mem2mem DMA takes more time to
* complete, leading to livelock issues.
*
* Case 1 - Apart from Host CPU some other bus master is
* accessing the DDR port, probably page close to the ring
* so, PCIE does not get a change to update the memory.
* Solution - Increase the number of tries.
*
* Case 2 - The 50usec delay given by the Host CPU is not
* sufficient for the PCIe RC to start its work.
* In this case the breathing time of 50usec given by
* the Host CPU is not sufficient.
* Solution: Increase the delay in a stepper fashion.
* This is done to ensure that there are no
* unwanted extra delay introdcued in normal conditions.
*/
for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
uint32 msg_seqnum = *marker;
if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
ring->seqnum++; /* next expected sequence number */
goto dma_completed;
}
total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
if (total_tries > prot->d2h_sync_wait_max)
prot->d2h_sync_wait_max = total_tries;
OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
/* For ARM there is no pause in cpu_relax, so add extra delay */
OSL_DELAY(delay * step);
#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */
} /* for PCIE_D2H_SYNC_WAIT_TRIES */
} /* for number of steps */
dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen);
ring->seqnum++; /* skip this message ... leak of a pktid */
return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
dma_completed:
prot->d2h_sync_wait_tot += total_tries;
return msg->msg_type;
}
/**
* dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
* mode. The xorcsum is placed in the last word of a message. Dongle will also
* place a seqnum in the epoch field of the cmn_msg_hdr.
*/
static uint8 BCMFASTPATH
dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
{
uint32 tries;
uint32 prot_checksum = 0; /* computed checksum */
int num_words = msglen / sizeof(uint32); /* num of 32bit words */
uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
dhd_prot_t *prot = dhd->prot;
uint32 step = 0;
uint32 delay = PCIE_D2H_SYNC_DELAY;
uint32 total_tries = 0;
ASSERT(msglen == ring->item_len);
BCM_REFERENCE(delay);
/*
* For retries we have to make some sort of stepper algorithm.
* We see that every time when the Dongle comes out of the D3
* Cold state, the first D2H mem2mem DMA takes more time to
* complete, leading to livelock issues.
*
* Case 1 - Apart from Host CPU some other bus master is
* accessing the DDR port, probably page close to the ring
* so, PCIE does not get a change to update the memory.
* Solution - Increase the number of tries.
*
* Case 2 - The 50usec delay given by the Host CPU is not
* sufficient for the PCIe RC to start its work.
* In this case the breathing time of 50usec given by
* the Host CPU is not sufficient.
* Solution: Increase the delay in a stepper fashion.
* This is done to ensure that there are no
* unwanted extra delay introdcued in normal conditions.
*/
for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
if (prot_checksum == 0U) { /* checksum is OK */
if (msg->epoch == ring_seqnum) {
ring->seqnum++; /* next expected sequence number */
goto dma_completed;
}
}
total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
if (total_tries > prot->d2h_sync_wait_max)
prot->d2h_sync_wait_max = total_tries;
OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
/* For ARM there is no pause in cpu_relax, so add extra delay */
OSL_DELAY(delay * step);
#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */
} /* for PCIE_D2H_SYNC_WAIT_TRIES */
} /* for number of steps */
dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen);
ring->seqnum++; /* skip this message ... leak of a pktid */
return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
dma_completed:
prot->d2h_sync_wait_tot += total_tries;
return msg->msg_type;
}
/**
* dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
* need to try to sync. This noop sync handler will be bound when the dongle
* advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
*/
static uint8 BCMFASTPATH
dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
{
return msg->msg_type;
}
/**
* dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
* dongle advertizes.
*/
static void
dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
prot->d2h_sync_wait_max = 0UL;
prot->d2h_sync_wait_tot = 0UL;
prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
} else {
prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
}
}
#endif /* PCIE_D2H_SYNC */
int INLINE
dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
{
/* To synchronize with the previous memory operations call wmb() */
OSL_SMP_WMB();
dhd->prot->ioctl_received = reason;
/* Call another wmb() to make sure before waking up the other event value gets updated */
OSL_SMP_WMB();
dhd_os_ioctl_resp_wake(dhd);
return 0;
}
/**
* dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
*/
static void
dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
}
/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
/*
* +---------------------------------------------------------------------------+
* PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
* virtual and physical address, the buffer lenght and the DMA handler.
* A secdma handler is also included in the dhd_dma_buf object.
* +---------------------------------------------------------------------------+
*/
static INLINE void
dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
{
base_addr->low_addr = htol32(PHYSADDRLO(pa));
base_addr->high_addr = htol32(PHYSADDRHI(pa));
}
/**
* dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
*/
static int
dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
uint32 base, end; /* dongle uses 32bit ptr arithmetic */
ASSERT(dma_buf);
base = PHYSADDRLO(dma_buf->pa);
ASSERT(base);
ASSERT(ISALIGNED(base, DMA_ALIGN_LEN));
ASSERT(dma_buf->len != 0);
/* test 32bit offset arithmetic over dma buffer for loss of carry-over */
end = (base + dma_buf->len); /* end address */
if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */
DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
__FUNCTION__, base, dma_buf->len));
return BCME_ERROR;
}
return BCME_OK;
}
/**
* dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
* returns BCME_OK=0 on success
* returns non-zero negative error value on failure.
*/
static int
dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
{
uint32 dma_pad = 0;
osl_t *osh = dhd->osh;
ASSERT(dma_buf != NULL);
ASSERT(dma_buf->va == NULL);
ASSERT(dma_buf->len == 0);
/* Pad the buffer length by one extra cacheline size.
* Required for D2H direction.
*/
dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
DMA_ALIGN_LEN, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
if (dma_buf->va == NULL) {
DHD_ERROR(("%s: buf_len %d, no memory available\n",
__FUNCTION__, buf_len));
return BCME_NOMEM;
}
dma_buf->len = buf_len; /* not including padded len */
if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
dhd_dma_buf_free(dhd, dma_buf);
return BCME_ERROR;
}
dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
return BCME_OK;
}
/**
* dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
*/
static void
dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
if ((dma_buf == NULL) || (dma_buf->va == NULL)) {
return;
}
(void)dhd_dma_buf_audit(dhd, dma_buf);
/* Zero out the entire buffer and cache flush */
memset((void*)dma_buf->va, 0, dma_buf->len);
OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
}
/**
* dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
* dhd_dma_buf_alloc().
*/
static void
dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
osl_t *osh = dhd->osh;
ASSERT(dma_buf);
if (dma_buf->va == NULL) {
return; /* Allow for free invocation, when alloc failed */
}
/* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
(void)dhd_dma_buf_audit(dhd, dma_buf);
/* dma buffer may have been padded at allocation */
DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
dma_buf->pa, dma_buf->dmah);
memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
}
/**
* dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
* Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
*/
void
dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
{
dhd_dma_buf_t *dma_buf;
ASSERT(dhd_dma_buf);
dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
dma_buf->va = va;
dma_buf->len = len;
dma_buf->pa = pa;
dma_buf->dmah = dmah;
dma_buf->secdma = secdma;
/* Audit user defined configuration */
(void)dhd_dma_buf_audit(dhd, dma_buf);
}
/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
/*
* +---------------------------------------------------------------------------+
* PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
* Main purpose is to save memory on the dongle, has other purposes as well.
* The packet id map, also includes storage for some packet parameters that
* may be saved. A native packet pointer along with the parameters may be saved
* and a unique 32bit pkt id will be returned. Later, the saved packet pointer
* and the metadata may be retrieved using the previously allocated packet id.
* +---------------------------------------------------------------------------+
*/
#define DHD_PCIE_PKTID
#define MAX_PKTID_ITEMS (3072) /* Maximum number of pktids supported */
/* On Router, the pktptr serves as a pktid. */
#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
#endif
/* Enum for marking the buffer color based on usage */
typedef enum dhd_pkttype {
PKTTYPE_DATA_TX = 0,
PKTTYPE_DATA_RX,
PKTTYPE_IOCTL_RX,
PKTTYPE_EVENT_RX,
/* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
PKTTYPE_NO_CHECK
} dhd_pkttype_t;
#define DHD_PKTID_INVALID (0U)
#define DHD_IOCTL_REQ_PKTID (0xFFFE)
#define DHD_FAKE_PKTID (0xFACE)
#define DHD_PKTID_FREE_LOCKER (FALSE)
#define DHD_PKTID_RSV_LOCKER (TRUE)
typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
/* Construct a packet id mapping table, returning an opaque map handle */
static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index);
/* Destroy a packet id mapping table, freeing all packets active in the table */
static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
#define PKTID_MAP_HANDLE (0)
#define PKTID_MAP_HANDLE_IOCTL (1)
#define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index))
#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
#if defined(DHD_PCIE_PKTID)
/* Determine number of pktids that are available */
static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
/* Allocate a unique pktid against which a pkt and some metadata is saved */
static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
void *pkt);
static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
void *dmah, void *secdma, dhd_pkttype_t pkttype);
static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
void *dmah, void *secdma, dhd_pkttype_t pkttype);
/* Return an allocated pktid, retrieving previously saved pkt and metadata */
static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
/*
* DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
*
* DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
* DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
*
* CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
* either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
*/
#if defined(DHD_PKTID_AUDIT_ENABLED)
#define USE_DHD_PKTID_AUDIT_LOCK 1
/* Audit the pktidmap allocator */
/* #define DHD_PKTID_AUDIT_MAP */
/* Audit the pktid during production/consumption of workitems */
#define DHD_PKTID_AUDIT_RING
#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
#error "May only enabled audit of MAP or RING, at a time."
#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
#define DHD_DUPLICATE_ALLOC 1
#define DHD_DUPLICATE_FREE 2
#define DHD_TEST_IS_ALLOC 3
#define DHD_TEST_IS_FREE 4
#ifdef USE_DHD_PKTID_AUDIT_LOCK
#define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
#define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock)
#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
#else
#define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0)
#define DHD_PKTID_AUDIT_LOCK(lock) 0
#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0)
#endif /* !USE_DHD_PKTID_AUDIT_LOCK */
#endif /* DHD_PKTID_AUDIT_ENABLED */
/* #define USE_DHD_PKTID_LOCK 1 */
#ifdef USE_DHD_PKTID_LOCK
#define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
#define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
#define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock)
#define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
#else
#define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
#define DHD_PKTID_LOCK_DEINIT(osh, lock) \
do { \
BCM_REFERENCE(osh); \
BCM_REFERENCE(lock); \
} while (0)
#define DHD_PKTID_LOCK(lock) 0
#define DHD_PKTID_UNLOCK(lock, flags) \
do { \
BCM_REFERENCE(lock); \
BCM_REFERENCE(flags); \
} while (0)
#endif /* !USE_DHD_PKTID_LOCK */
/* Packet metadata saved in packet id mapper */
/* The Locker can be 3 states
* LOCKER_IS_FREE - Locker is free and can be allocated
* LOCKER_IS_BUSY - Locker is assigned and is being used, values in the
* locker (buffer address, len, phy addr etc) are populated
* with valid values
* LOCKER_IS_RSVD - The locker is reserved for future use, but the values
* in the locker are not valid. Especially pkt should be
* NULL in this state. When the user wants to re-use the
* locker dhd_pktid_map_free can be called with a flag
* to reserve the pktid for future use, which will clear
* the contents of the locker. When the user calls
* dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY
*/
typedef enum dhd_locker_state {
LOCKER_IS_FREE,
LOCKER_IS_BUSY,
LOCKER_IS_RSVD
} dhd_locker_state_t;
typedef struct dhd_pktid_item {
dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
uint16 len; /* length of mapped packet's buffer */
void *pkt; /* opaque native pointer to a packet */
dmaaddr_t pa; /* physical address of mapped packet's buffer */
void *dmah; /* handle to OS specific DMA map */
void *secdma;
} dhd_pktid_item_t;
typedef struct dhd_pktid_map {
uint32 items; /* total items in map */
uint32 avail; /* total available items */
int failures; /* lockers unavailable count */
/* Spinlock to protect dhd_pktid_map in process/tasklet context */
void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
#if defined(DHD_PKTID_AUDIT_ENABLED)
void *pktid_audit_lock;
struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
#endif /* DHD_PKTID_AUDIT_ENABLED */
uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
dhd_pktid_item_t lockers[0]; /* metadata storage */
} dhd_pktid_map_t;
/*
* PktId (Locker) #0 is never allocated and is considered invalid.
*
* On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
* depleted pktid pool and must not be used by the caller.
*
* Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
*/
#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
#define DHD_PKIDMAP_ITEMS(items) (items)
#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
(DHD_PKTID_ITEM_SZ * ((items) + 1)))
#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map) dhd_pktid_map_fini_ioctl((dhd), (map))
/* Convert a packet to a pktid, and save pkt pointer in busy locker */
#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) dhd_pktid_map_reserve((dhd), (map), (pkt))
/* Reuse a previously reserved locker to save packet params */
#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
(uint8)(dir), (void *)(dmah), (void *)(secdma), \
(dhd_pkttype_t)(pkttype))
/* Convert a packet to a pktid, and save packet params in locker */
#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
(uint8)(dir), (void *)(dmah), (void *)(secdma), \
(dhd_pkttype_t)(pkttype))
/* Convert pktid to a packet, and free the locker */
#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
(void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
/* Convert the pktid to a packet, empty locker, but keep it reserved */
#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
(void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
#if defined(DHD_PKTID_AUDIT_ENABLED)
static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
const int test_for, const char *errmsg);
/**
* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
*/
static int
dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
const int test_for, const char *errmsg)
{
#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
const uint32 max_pktid_items = (MAX_PKTID_ITEMS);
struct bcm_mwbmap *handle;
uint32 flags;
bool ignore_audit;
if (pktid_map == (dhd_pktid_map_t *)NULL) {
DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
return BCME_OK;
}
flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
handle = pktid_map->pktid_audit;
if (handle == (struct bcm_mwbmap *)NULL) {
DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
return BCME_OK;
}
/* Exclude special pktids from audit */
ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
if (ignore_audit) {
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
return BCME_OK;
}
if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
/* lock is released in "error" */
goto error;
}
/* Perform audit */
switch (test_for) {
case DHD_DUPLICATE_ALLOC:
if (!bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
errmsg, pktid));
goto error;
}
bcm_mwbmap_force(handle, pktid);
break;
case DHD_DUPLICATE_FREE:
if (bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
errmsg, pktid));
goto error;
}
bcm_mwbmap_free(handle, pktid);
break;
case DHD_TEST_IS_ALLOC:
if (bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
errmsg, pktid));
goto error;
}
break;
case DHD_TEST_IS_FREE:
if (!bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
errmsg, pktid));
goto error;
}
break;
default:
goto error;
}
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
return BCME_OK;
error:
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
/* May insert any trap mechanism here ! */
dhd_pktid_audit_fail_cb(dhd);
return BCME_ERROR;
}
#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
#endif /* DHD_PKTID_AUDIT_ENABLED */
/* +------------------ End of PCIE DHD PKTID AUDIT ------------------------+ */
/**
* +---------------------------------------------------------------------------+
* Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
*
* dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
*
* dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
* packet id is returned. This unique packet id may be used to retrieve the
* previously saved packet metadata, using dhd_pktid_map_free(). On invocation
* of dhd_pktid_map_free(), the unique packet id is essentially freed. A
* subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
*
* Implementation Note:
* Convert this into a <key,locker> abstraction and place into bcmutils !
* Locker abstraction should treat contents as opaque storage, and a
* callback should be registered to handle busy lockers on destructor.
*
* +---------------------------------------------------------------------------+
*/
/** Allocate and initialize a mapper of num_items <numbered_key, locker> */
static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
{
void *osh;
uint32 nkey;
dhd_pktid_map_t *map;
uint32 dhd_pktid_map_sz;
uint32 map_items;
#ifdef DHD_USE_STATIC_PKTIDMAP
uint32 section;
#endif /* DHD_USE_STATIC_PKTIDMAP */
osh = dhd->osh;
ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS));
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
#ifdef DHD_USE_STATIC_PKTIDMAP
if (index == PKTID_MAP_HANDLE) {
section = DHD_PREALLOC_PKTID_MAP;
} else {
section = DHD_PREALLOC_PKTID_MAP_IOCTL;
}
map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz);
#else
map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz);
#endif /* DHD_USE_STATIC_PKTIDMAP */
if (map == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
__FUNCTION__, __LINE__, dhd_pktid_map_sz));
goto error;
}
bzero(map, dhd_pktid_map_sz);
/* Initialize the lock that protects this structure */
map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
if (map->pktid_lock == NULL) {
DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
goto error;
}
map->items = num_items;
map->avail = num_items;
map_items = DHD_PKIDMAP_ITEMS(map->items);
#if defined(DHD_PKTID_AUDIT_ENABLED)
/* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
goto error;
} else {
DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
__FUNCTION__, __LINE__, map_items + 1));
}
map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
#endif /* DHD_PKTID_AUDIT_ENABLED */
for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
map->keys[nkey] = nkey; /* populate with unique keys */
map->lockers[nkey].state = LOCKER_IS_FREE;
map->lockers[nkey].pkt = NULL; /* bzero: redundant */
map->lockers[nkey].len = 0;
}
/* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */
map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY;
map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
map->lockers[DHD_PKTID_INVALID].len = 0;
#if defined(DHD_PKTID_AUDIT_ENABLED)
/* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
#endif /* DHD_PKTID_AUDIT_ENABLED */
return (dhd_pktid_map_handle_t *)map; /* opaque handle */
error:
if (map) {
#if defined(DHD_PKTID_AUDIT_ENABLED)
if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
map->pktid_audit = (struct bcm_mwbmap *)NULL;
if (map->pktid_audit_lock)
DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
if (map->pktid_lock)
DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
MFREE(osh, map, dhd_pktid_map_sz);
}
return (dhd_pktid_map_handle_t *)NULL;
}
/**
* Retrieve all allocated keys and free all <numbered_key, locker>.
* Freeing implies: unmapping the buffers and freeing the native packet
* This could have been a callback registered with the pktid mapper.
*/
static void
dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
{
void *osh;
uint32 nkey;
dhd_pktid_map_t *map;
uint32 dhd_pktid_map_sz;
dhd_pktid_item_t *locker;
uint32 map_items;
uint32 flags;
if (handle == NULL) {
return;
}
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
osh = dhd->osh;
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
nkey = 1; /* skip reserved KEY #0, and start from 1 */
locker = &map->lockers[nkey];
map_items = DHD_PKIDMAP_ITEMS(map->items);
for (; nkey <= map_items; nkey++, locker++) {
if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
locker->state = LOCKER_IS_FREE; /* force open the locker */
#if defined(DHD_PKTID_AUDIT_ENABLED)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
#endif /* DHD_PKTID_AUDIT_ENABLED */
{ /* This could be a callback registered with dhd_pktid_map */
DMA_UNMAP(osh, locker->pa, locker->len,
locker->dir, 0, DHD_DMAH_NULL);
dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
locker->pkttype, TRUE);
}
}
#if defined(DHD_PKTID_AUDIT_ENABLED)
else {
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
locker->pkt = NULL; /* clear saved pkt */
locker->len = 0;
}
#if defined(DHD_PKTID_AUDIT_ENABLED)
if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
map->pktid_audit = (struct bcm_mwbmap *)NULL;
if (map->pktid_audit_lock) {
DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
}
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
#ifdef DHD_USE_STATIC_PKTIDMAP
DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
#else
MFREE(osh, handle, dhd_pktid_map_sz);
#endif /* DHD_USE_STATIC_PKTIDMAP */
}
#ifdef IOCTLRESP_USE_CONSTMEM
/** Called in detach scenario. Releasing IOCTL buffers. */
static void
dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
{
uint32 nkey;
dhd_pktid_map_t *map;
uint32 dhd_pktid_map_sz;
dhd_pktid_item_t *locker;
uint32 map_items;
uint32 flags;
osl_t *osh = dhd->osh;
if (handle == NULL) {
return;
}
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
nkey = 1; /* skip reserved KEY #0, and start from 1 */
locker = &map->lockers[nkey];
map_items = DHD_PKIDMAP_ITEMS(map->items);
for (; nkey <= map_items; nkey++, locker++) {
if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
locker->state = LOCKER_IS_FREE; /* force open the locker */
#if defined(DHD_PKTID_AUDIT_ENABLED)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
#endif /* DHD_PKTID_AUDIT_ENABLED */
{
dhd_dma_buf_t retbuf;
retbuf.va = locker->pkt;
retbuf.len = locker->len;
retbuf.pa = locker->pa;
retbuf.dmah = locker->dmah;
retbuf.secdma = locker->secdma;
/* This could be a callback registered with dhd_pktid_map */
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
free_ioctl_return_buffer(dhd, &retbuf);
flags = DHD_PKTID_LOCK(map->pktid_lock);
}
}
#if defined(DHD_PKTID_AUDIT_ENABLED)
else {
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
locker->pkt = NULL; /* clear saved pkt */
locker->len = 0;
}
#if defined(DHD_PKTID_AUDIT_ENABLED)
if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
map->pktid_audit = (struct bcm_mwbmap *)NULL;
if (map->pktid_audit_lock) {
DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
}
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
#ifdef DHD_USE_STATIC_PKTIDMAP
DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
#else
MFREE(osh, handle, dhd_pktid_map_sz);
#endif /* DHD_USE_STATIC_PKTIDMAP */
}
#endif /* IOCTLRESP_USE_CONSTMEM */
/** Get the pktid free count */
static INLINE uint32 BCMFASTPATH
dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
{
dhd_pktid_map_t *map;
uint32 flags;
uint32 avail;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
avail = map->avail;
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return avail;
}
/**
* Allocate locker, save pkt contents, and return the locker's numbered key.
* dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility.
* Caller must treat a returned value DHD_PKTID_INVALID as a failure case,
* implying a depleted pool of pktids.
*/
static INLINE uint32
__dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
{
uint32 nkey;
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
if (map->avail <= 0) { /* no more pktids to allocate */
map->failures++;
DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
return DHD_PKTID_INVALID; /* failed alloc request */
}
ASSERT(map->avail <= map->items);
nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
locker = &map->lockers[nkey]; /* save packet metadata in locker */
map->avail--;
locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
locker->len = 0;
locker->state = LOCKER_IS_BUSY; /* reserve this locker */
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */
#endif /* DHD_PKTID_AUDIT_MAP */
ASSERT(nkey != DHD_PKTID_INVALID);
return nkey; /* return locker's numbered key */
}
/**
* dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
* yet populated. Invoke the pktid save api to populate the packet parameters
* into the locker.
* Wrapper that takes the required lock when called directly.
*/
static INLINE uint32
dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
{
dhd_pktid_map_t *map;
uint32 flags;
uint32 ret;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
ret = __dhd_pktid_map_reserve(dhd, handle, pkt);
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return ret;
}
static INLINE void
__dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
dhd_pkttype_t pkttype)
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
locker = &map->lockers[nkey];
ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
#endif /* DHD_PKTID_AUDIT_MAP */
/* store contents in locker */
locker->dir = dir;
locker->pa = pa;
locker->len = (uint16)len; /* 16bit len */
locker->dmah = dmah; /* 16bit len */
locker->secdma = secdma;
locker->pkttype = pkttype;
locker->pkt = pkt;
locker->state = LOCKER_IS_BUSY; /* make this locker busy */
}
/**
* dhd_pktid_map_save - Save a packet's parameters into a locker corresponding
* to a previously reserved unique numbered key.
* Wrapper that takes the required lock when called directly.
*/
static INLINE void
dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
dhd_pkttype_t pkttype)
{
dhd_pktid_map_t *map;
uint32 flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
__dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len,
dir, dmah, secdma, pkttype);
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
}
/**
* dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
* contents into the corresponding locker. Return the numbered key.
*/
static uint32 BCMFASTPATH
dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
dhd_pkttype_t pkttype)
{
uint32 nkey;
uint32 flags;
dhd_pktid_map_t *map;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
nkey = __dhd_pktid_map_reserve(dhd, handle, pkt);
if (nkey != DHD_PKTID_INVALID) {
__dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
len, dir, dmah, secdma, pkttype);
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
#endif /* DHD_PKTID_AUDIT_MAP */
}
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return nkey;
}
/**
* dhd_pktid_map_free - Given a numbered key, return the locker contents.
* dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
* Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
* value. Only a previously allocated pktid may be freed.
*/
static void * BCMFASTPATH
dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma,
dhd_pkttype_t pkttype, bool rsv_locker)
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
void * pkt;
uint32 flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
flags = DHD_PKTID_LOCK(map->pktid_lock);
ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
locker = &map->lockers[nkey];
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
#endif /* DHD_PKTID_AUDIT_MAP */
if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */
DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
__FUNCTION__, __LINE__, nkey));
ASSERT(locker->state != LOCKER_IS_FREE);
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return NULL;
}
/* Check for the colour of the buffer i.e The buffer posted for TX,
* should be freed for TX completion. Similarly the buffer posted for
* IOCTL should be freed for IOCT completion etc.
*/
if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
__FUNCTION__, __LINE__, nkey));
ASSERT(locker->pkttype == pkttype);
return NULL;
}
if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
map->avail++;
map->keys[map->avail] = nkey; /* make this numbered key available */
locker->state = LOCKER_IS_FREE; /* open and free Locker */
} else {
/* pktid will be reused, but the locker does not have a valid pkt */
locker->state = LOCKER_IS_RSVD;
}
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
#endif /* DHD_PKTID_AUDIT_MAP */
*pa = locker->pa; /* return contents of locker */
*len = (uint32)locker->len;
*dmah = locker->dmah;
*secdma = locker->secdma;
pkt = locker->pkt;
locker->pkt = NULL; /* Clear pkt */
locker->len = 0;
DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return pkt;
}
#else /* ! DHD_PCIE_PKTID */
typedef struct pktlist {
PKT_LIST *tx_pkt_list; /* list for tx packets */
PKT_LIST *rx_pkt_list; /* list for rx packets */
PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */
} pktlists_t;
/*
* Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
* of a one to one mapping 32bit pktptr and a 32bit pktid.
*
* - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
* - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
* a lock.
* - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
*/
#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
#define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
dhd_pkttype_t pkttype);
static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
dhd_pkttype_t pkttype);
static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
{
osl_t *osh = dhd->osh;
pktlists_t *handle = NULL;
if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
__FUNCTION__, __LINE__, sizeof(pktlists_t)));
goto error_done;
}
if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
__FUNCTION__, __LINE__, sizeof(PKT_LIST)));
goto error;
}
if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
__FUNCTION__, __LINE__, sizeof(PKT_LIST)));
goto error;
}
if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
__FUNCTION__, __LINE__, sizeof(PKT_LIST)));
goto error;
}
PKTLIST_INIT(handle->tx_pkt_list);
PKTLIST_INIT(handle->rx_pkt_list);
PKTLIST_INIT(handle->ctrl_pkt_list);
return (dhd_pktid_map_handle_t *) handle;
error:
if (handle->ctrl_pkt_list) {
MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
}
if (handle->rx_pkt_list) {
MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
}
if (handle->tx_pkt_list) {
MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
}
if (handle) {
MFREE(osh, handle, sizeof(pktlists_t));
}
error_done:
return (dhd_pktid_map_handle_t *)NULL;
}
static void
dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
{
osl_t *osh = dhd->osh;
pktlists_t *handle = (pktlists_t *) map;
ASSERT(handle != NULL);
if (handle == (pktlists_t *)NULL) {
return;
}
if (handle->ctrl_pkt_list) {
PKTLIST_FINI(handle->ctrl_pkt_list);
MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
}
if (handle->rx_pkt_list) {
PKTLIST_FINI(handle->rx_pkt_list);
MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
}
if (handle->tx_pkt_list) {
PKTLIST_FINI(handle->tx_pkt_list);
MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
}
if (handle) {
MFREE(osh, handle, sizeof(pktlists_t));
}
}
/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
static INLINE uint32
dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
dhd_pkttype_t pkttype)
{
pktlists_t *handle = (pktlists_t *) map;
ASSERT(pktptr32 != NULL);
DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
DHD_PKT_SET_DMAH(pktptr32, dmah);
DHD_PKT_SET_PA(pktptr32, pa);
DHD_PKT_SET_SECDMA(pktptr32, secdma);
if (pkttype == PKTTYPE_DATA_TX) {
PKTLIST_ENQ(handle->tx_pkt_list, pktptr32);
} else if (pkttype == PKTTYPE_DATA_RX) {
PKTLIST_ENQ(handle->rx_pkt_list, pktptr32);
} else {
PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32);
}
return DHD_PKTID32(pktptr32);
}
/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
static INLINE void *
dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
dhd_pkttype_t pkttype)
{
pktlists_t *handle = (pktlists_t *) map;
void *pktptr32;
ASSERT(pktid32 != 0U);
pktptr32 = DHD_PKTPTR32(pktid32);
*dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
*dmah = DHD_PKT_GET_DMAH(pktptr32);
*pa = DHD_PKT_GET_PA(pktptr32);
*secdma = DHD_PKT_GET_SECDMA(pktptr32);
if (pkttype == PKTTYPE_DATA_TX) {
PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32);
} else if (pkttype == PKTTYPE_DATA_RX) {
PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32);
} else {
PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32);
}
return pktptr32;
}
#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) DHD_PKTID32(pkt)
#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
(dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
})
#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
(dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
})
#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \
dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
(void **)&secdma, (dhd_pkttype_t)(pkttype)); \
})
#define DHD_PKTID_AVAIL(map) (~0)
#endif /* ! DHD_PCIE_PKTID */
/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
/**
* The PCIE FD protocol layer is constructed in two phases:
* Phase 1. dhd_prot_attach()
* Phase 2. dhd_prot_init()
*
* dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
* All Common rings are allose attached (msgbuf_ring_t objects are allocated
* with DMA-able buffers).
* All dhd_dma_buf_t objects are also allocated here.
*
* As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
* initialization of objects that requires information advertized by the dongle
* may not be performed here.
* E.g. the number of TxPost flowrings is not know at this point, neither do
* we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
* whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
* rings (common + flow).
*
* dhd_prot_init() is invoked after the bus layer has fetched the information
* advertized by the dongle in the pcie_shared_t.
*/
int
dhd_prot_attach(dhd_pub_t *dhd)
{
osl_t *osh = dhd->osh;
dhd_prot_t *prot;
/* Allocate prot structure */
if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
sizeof(dhd_prot_t)))) {
DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
goto fail;
}
memset(prot, 0, sizeof(*prot));
prot->osh = osh;
dhd->prot = prot;
/* DMAing ring completes supported? FALSE by default */
dhd->dma_d2h_ring_upd_support = FALSE;
dhd->dma_h2d_ring_upd_support = FALSE;
/* Common Ring Allocations */
/* Ring 0: H2D Control Submission */
if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
__FUNCTION__));
goto fail;
}
/* Ring 1: H2D Receive Buffer Post */
if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
__FUNCTION__));
goto fail;
}
/* Ring 2: D2H Control Completion */
if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
__FUNCTION__));
goto fail;
}
/* Ring 3: D2H Transmit Complete */
if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
__FUNCTION__));
goto fail;
}
/* Ring 4: D2H Receive Complete */
if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
__FUNCTION__));
goto fail;
}
/*
* Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
* buffers for flowrings will be instantiated, in dhd_prot_init() .
* See dhd_prot_flowrings_pool_attach()
*/
/* ioctl response buffer */
if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
goto fail;
}
/* IOCTL request buffer */
if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
goto fail;
}
/* Scratch buffer for dma rx offset */
if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
goto fail;
}
/* scratch buffer bus throughput measurement */
if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
goto fail;
}
#ifdef DHD_RX_CHAINING
dhd_rxchain_reset(&prot->rxchain);
#endif
#if defined(DHD_LB)
/* Initialize the work queues to be used by the Load Balancing logic */
#if defined(DHD_LB_TXC)
{
void *buffer;
buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
buffer, DHD_LB_WORKQ_SZ);
prot->tx_compl_prod_sync = 0;
DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
}
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
{
void *buffer;
buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ);
bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
buffer, DHD_LB_WORKQ_SZ);
prot->rx_compl_prod_sync = 0;
DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
}
#endif /* DHD_LB_RXC */
#endif /* DHD_LB */
return BCME_OK;
fail:
#ifndef CONFIG_DHD_USE_STATIC_BUF
if (prot != NULL) {
dhd_prot_detach(dhd);
}
#endif /* CONFIG_DHD_USE_STATIC_BUF */
return BCME_NOMEM;
} /* dhd_prot_attach */
/**
* dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
* completed it's initialization of the pcie_shared structure, we may now fetch
* the dongle advertized features and adjust the protocol layer accordingly.
*
* dhd_prot_init() may be invoked again after a dhd_prot_reset().
*/
int
dhd_prot_init(dhd_pub_t *dhd)
{
sh_addr_t base_addr;
dhd_prot_t *prot = dhd->prot;
/* PKTID handle INIT */
if (prot->pktid_map_handle != NULL) {
DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__));
ASSERT(0);
return BCME_ERROR;
}
#ifdef IOCTLRESP_USE_CONSTMEM
if (prot->pktid_map_handle_ioctl != NULL) {
DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__));
ASSERT(0);
return BCME_ERROR;
}
#endif /* IOCTLRESP_USE_CONSTMEM */
prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE);
if (prot->pktid_map_handle == NULL) {
DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__));
ASSERT(0);
return BCME_NOMEM;
}
#ifdef IOCTLRESP_USE_CONSTMEM
prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL);
if (prot->pktid_map_handle_ioctl == NULL) {
DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__));
ASSERT(0);
return BCME_NOMEM;
}
#endif /* IOCTLRESP_USE_CONSTMEM */
/* Max pkts in ring */
prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
/* Read max rx packets supported by dongle */
dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
if (prot->max_rxbufpost == 0) {
/* This would happen if the dongle firmware is not */
/* using the latest shared structure template */
prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
}
DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
/* Initialize. bzero() would blow away the dma pointers. */
prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
prot->cur_ioctlresp_bufs_posted = 0;
prot->active_tx_count = 0;
prot->data_seq_no = 0;
prot->ioctl_seq_no = 0;
prot->rxbufpost = 0;
prot->cur_event_bufs_posted = 0;
prot->ioctl_state = 0;
prot->curr_ioctl_cmd = 0;
prot->ioctl_received = IOCTL_WAIT;
prot->dmaxfer.srcmem.va = NULL;
prot->dmaxfer.dstmem.va = NULL;
prot->dmaxfer.in_progress = FALSE;
prot->metadata_dbg = FALSE;
prot->rx_metadata_offset = 0;
prot->tx_metadata_offset = 0;
prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
prot->ioctl_trans_id = 0;
/* Register the interrupt function upfront */
/* remove corerev checks in data path */
prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
/* Initialize Common MsgBuf Rings */
dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
#if defined(PCIE_D2H_SYNC)
dhd_prot_d2h_sync_init(dhd);
#endif /* PCIE_D2H_SYNC */
dhd_prot_h2d_sync_init(dhd);
/* init the scratch buffer */
dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_SCRATCH_BUF, 0);
dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
/* If supported by the host, indicate the memory block
* for completion writes / submission reads to shared space
*/
if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_INDX_WR_BUF, 0);
dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
H2D_DMA_INDX_RD_BUF, 0);
}
if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
H2D_DMA_INDX_WR_BUF, 0);
dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_INDX_RD_BUF, 0);
}
/*
* If the DMA-able buffers for flowring needs to come from a specific
* contiguous memory region, then setup prot->flowrings_dma_buf here.
* dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
* this contiguous memory region, for each of the flowrings.
*/
/* Pre-allocate pool of msgbuf_ring for flowrings */
if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
return BCME_ERROR;
}
/* Host should configure soft doorbells if needed ... here */
/* Post to dongle host configured soft doorbells */
dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
/* Post buffers for packet reception and ioctl/event responses */
dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
dhd_msgbuf_rxbuf_post_event_bufs(dhd);
return BCME_OK;
} /* dhd_prot_init */
/**
* dhd_prot_detach - PCIE FD protocol layer destructor.
* Unlink, frees allocated protocol memory (including dhd_prot)
*/
void
dhd_prot_detach(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
/* Stop the protocol module */
if (prot) {
/* free up all DMA-able buffers allocated during prot attach/init */
dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */
dhd_dma_buf_free(dhd, &prot->ioctbuf);
dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
/* Common MsgBuf Rings */
dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
/* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
dhd_prot_flowrings_pool_detach(dhd);
DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle);
#ifndef CONFIG_DHD_USE_STATIC_BUF
MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
#endif /* CONFIG_DHD_USE_STATIC_BUF */
#if defined(DHD_LB)
#if defined(DHD_LB_TXC)
if (prot->tx_compl_prod.buffer) {
MFREE(dhd->osh, prot->tx_compl_prod.buffer,
sizeof(void*) * DHD_LB_WORKQ_SZ);
}
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
if (prot->rx_compl_prod.buffer) {
MFREE(dhd->osh, prot->rx_compl_prod.buffer,
sizeof(void*) * DHD_LB_WORKQ_SZ);
}
#endif /* DHD_LB_RXC */
#endif /* DHD_LB */
dhd->prot = NULL;
}
} /* dhd_prot_detach */
/**
* dhd_prot_reset - Reset the protocol layer without freeing any objects. This
* may be invoked to soft reboot the dongle, without having to detach and attach
* the entire protocol layer.
*
* After dhd_prot_reset(), dhd_prot_init() may be invoked without going through
* a dhd_prot_attach() phase.
*/
void
dhd_prot_reset(dhd_pub_t *dhd)
{
struct dhd_prot *prot = dhd->prot;
DHD_TRACE(("%s\n", __FUNCTION__));
if (prot == NULL) {
return;
}
dhd_prot_flowrings_pool_reset(dhd);
dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
dhd_dma_buf_reset(dhd, &prot->retbuf);
dhd_dma_buf_reset(dhd, &prot->ioctbuf);
dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
prot->rx_metadata_offset = 0;
prot->tx_metadata_offset = 0;
prot->rxbufpost = 0;
prot->cur_event_bufs_posted = 0;
prot->cur_ioctlresp_bufs_posted = 0;
prot->active_tx_count = 0;
prot->data_seq_no = 0;
prot->ioctl_seq_no = 0;
prot->ioctl_state = 0;
prot->curr_ioctl_cmd = 0;
prot->ioctl_received = IOCTL_WAIT;
prot->ioctl_trans_id = 0;
/* dhd_flow_rings_init is located at dhd_bus_start,
* so when stopping bus, flowrings shall be deleted
*/
if (dhd->flow_rings_inited) {
dhd_flow_rings_deinit(dhd);
}
if (prot->pktid_map_handle) {
DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle);
prot->pktid_map_handle = NULL;
}
#ifdef IOCTLRESP_USE_CONSTMEM
if (prot->pktid_map_handle_ioctl) {
DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
prot->pktid_map_handle_ioctl = NULL;
}
#endif /* IOCTLRESP_USE_CONSTMEM */
} /* dhd_prot_reset */
void
dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
{
dhd_prot_t *prot = dhd->prot;
prot->rx_dataoffset = rx_offset;
}
/**
* Initialize protocol: sync w/dongle state.
* Sets dongle media info (iswl, drv_version, mac address).
*/
int
dhd_sync_with_dongle(dhd_pub_t *dhd)
{
int ret = 0;
wlc_rev_info_t revinfo;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
#ifdef DHD_FW_COREDUMP
/* Check the memdump capability */
dhd_get_memdump_info(dhd);
#endif /* DHD_FW_COREDUMP */
#ifdef BCMASSERT_LOG
dhd_get_assert_info(dhd);
#endif /* BCMASSERT_LOG */
/* Get the device rev info */
memset(&revinfo, 0, sizeof(revinfo));
ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
if (ret < 0) {
DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
goto done;
}
DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
dhd_process_cid_mac(dhd, TRUE);
ret = dhd_preinit_ioctls(dhd);
if (!ret) {
dhd_process_cid_mac(dhd, FALSE);
}
/* Always assumes wl for now */
dhd->iswl = TRUE;
done:
return ret;
} /* dhd_sync_with_dongle */
#if defined(DHD_LB)
/* DHD load balancing: deferral of work to another online CPU */
/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
/**
* dhd_lb_dispatch - load balance by dispatch work to other CPU cores
* Note: rx_compl_tasklet is dispatched explicitly.
*/
static INLINE void
dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx)
{
switch (ring_idx) {
#if defined(DHD_LB_TXC)
case BCMPCIE_D2H_MSGRING_TX_COMPLETE:
bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
break;
#endif /* DHD_LB_TXC */
case BCMPCIE_D2H_MSGRING_RX_COMPLETE:
{
#if defined(DHD_LB_RXC)
dhd_prot_t *prot = dhdp->prot;
/* Schedule the takslet only if we have to */
if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
/* flush WR index */
bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
}
#endif /* DHD_LB_RXC */
#if defined(DHD_LB_RXP)
dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
#endif /* DHD_LB_RXP */
break;
}
default:
break;
}
}
#if defined(DHD_LB_TXC)
/**
* DHD load balanced tx completion tasklet handler, that will perform the
* freeing of packets on the selected CPU. Packet pointers are delivered to
* this tasklet via the tx complete workq.
*/
void
dhd_lb_tx_compl_handler(unsigned long data)
{
int elem_ix;
void *pkt, **elem;
dmaaddr_t pa;
uint32 pa_len;
dhd_pub_t *dhd = (dhd_pub_t *)data;
dhd_prot_t *prot = dhd->prot;
bcm_workq_t *workq = &prot->tx_compl_cons;
uint32 count = 0;
DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
while (1) {
elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
if (elem_ix == BCM_RING_EMPTY) {
break;
}
elem = WORKQ_ELEMENT(void *, workq, elem_ix);
pkt = *elem;
DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
OSL_PREFETCH(PKTTAG(pkt));
OSL_PREFETCH(pkt);
pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, true);
#endif
PKTFREE(dhd->osh, pkt, TRUE);
count++;
}
/* smp_wmb(); */
bcm_workq_cons_sync(workq);
DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
}
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
void
dhd_lb_rx_compl_handler(unsigned long data)
{
dhd_pub_t *dhd = (dhd_pub_t *)data;
bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
bcm_workq_cons_sync(workq);
}
#endif /* DHD_LB_RXC */
#endif /* DHD_LB */
#define DHD_DBG_SHOW_METADATA 0
#if DHD_DBG_SHOW_METADATA
static void BCMFASTPATH
dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
{
uint8 tlv_t;
uint8 tlv_l;
uint8 *tlv_v = (uint8 *)ptr;
if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
return;
len -= BCMPCIE_D2H_METADATA_HDRLEN;
tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
while (len > TLV_HDR_LEN) {
tlv_t = tlv_v[TLV_TAG_OFF];
tlv_l = tlv_v[TLV_LEN_OFF];
len -= TLV_HDR_LEN;
tlv_v += TLV_HDR_LEN;
if (len < tlv_l)
break;
if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
break;
switch (tlv_t) {
case WLFC_CTL_TYPE_TXSTATUS: {
uint32 txs;
memcpy(&txs, tlv_v, sizeof(uint32));
if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
printf("METADATA TX_STATUS: %08x\n", txs);
} else {
wl_txstatus_additional_info_t tx_add_info;
memcpy(&tx_add_info, tlv_v + sizeof(uint32),
sizeof(wl_txstatus_additional_info_t));
printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
" rate = %08x tries = %d - %d\n", txs,
tx_add_info.seq, tx_add_info.entry_ts,
tx_add_info.enq_ts, tx_add_info.last_ts,
tx_add_info.rspec, tx_add_info.rts_cnt,
tx_add_info.tx_cnt);
}
} break;
case WLFC_CTL_TYPE_RSSI: {
if (tlv_l == 1)
printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
else
printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
(*(tlv_v + 3) << 8) | *(tlv_v + 2),
(int8)(*tlv_v), *(tlv_v + 1));
} break;
case WLFC_CTL_TYPE_FIFO_CREDITBACK:
bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
break;
case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
break;
case WLFC_CTL_TYPE_RX_STAMP: {
struct {
uint32 rspec;
uint32 bus_time;
uint32 wlan_time;
} rx_tmstamp;
memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
} break;
case WLFC_CTL_TYPE_TRANS_ID:
bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
break;
case WLFC_CTL_TYPE_COMP_TXSTATUS:
bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
break;