bnxt_en: Refactor L2 filter alloc/free firmware commands.
Refactor the L2 filter alloc/free logic so that these filters can be added/deleted by the user. The bp->ntp_fltr_bmap allocated size is also increased to allow enough IDs for L2 filters. Reviewed-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com> Reviewed-by: Andy Gospodarek <andrew.gospodarek@broadcom.com> Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com> Signed-off-by: Michael Chan <michael.chan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
bfeabf7e46
commit
96c9bedc75
2 changed files with 112 additions and 54 deletions
|
@ -4834,7 +4834,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
|
|||
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
|
||||
|
||||
bp->ntp_fltr_count = 0;
|
||||
bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
|
||||
bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
|
||||
|
||||
if (!bp->ntp_fltr_bmap)
|
||||
rc = -ENOMEM;
|
||||
|
@ -5396,6 +5396,15 @@ static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
|
|||
ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
|
||||
fltr->l2_key.vlan = key->vlan;
|
||||
fltr->base.type = BNXT_FLTR_TYPE_L2;
|
||||
if (fltr->base.flags) {
|
||||
int bit_id;
|
||||
|
||||
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
|
||||
BNXT_MAX_FLTR, 0);
|
||||
if (bit_id < 0)
|
||||
return -ENOMEM;
|
||||
fltr->base.sw_id = (u16)bit_id;
|
||||
}
|
||||
head = &bp->l2_fltr_hash_tbl[idx];
|
||||
hlist_add_head_rcu(&fltr->base.hash, head);
|
||||
atomic_set(&fltr->refcnt, 1);
|
||||
|
@ -5429,6 +5438,96 @@ static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
|
|||
return fltr;
|
||||
}
|
||||
|
||||
static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
|
||||
{
|
||||
#ifdef CONFIG_BNXT_SRIOV
|
||||
struct bnxt_vf_info *vf = &pf->vf[vf_idx];
|
||||
|
||||
return vf->fw_fid;
|
||||
#else
|
||||
return INVALID_HW_RING_ID;
|
||||
#endif
|
||||
}
|
||||
|
||||
int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
|
||||
{
|
||||
struct hwrm_cfa_l2_filter_free_input *req;
|
||||
u16 target_id = 0xffff;
|
||||
int rc;
|
||||
|
||||
if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
|
||||
struct bnxt_pf_info *pf = &bp->pf;
|
||||
|
||||
if (fltr->base.vf_idx >= pf->active_vfs)
|
||||
return -EINVAL;
|
||||
|
||||
target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
|
||||
if (target_id == INVALID_HW_RING_ID)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
req->target_id = cpu_to_le16(target_id);
|
||||
req->l2_filter_id = fltr->base.filter_id;
|
||||
return hwrm_req_send(bp, req);
|
||||
}
|
||||
|
||||
int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
|
||||
{
|
||||
struct hwrm_cfa_l2_filter_alloc_output *resp;
|
||||
struct hwrm_cfa_l2_filter_alloc_input *req;
|
||||
u16 target_id = 0xffff;
|
||||
int rc;
|
||||
|
||||
if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
|
||||
struct bnxt_pf_info *pf = &bp->pf;
|
||||
|
||||
if (fltr->base.vf_idx >= pf->active_vfs)
|
||||
return -EINVAL;
|
||||
|
||||
target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
|
||||
}
|
||||
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
req->target_id = cpu_to_le16(target_id);
|
||||
req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
|
||||
|
||||
if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
|
||||
req->flags |=
|
||||
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
|
||||
req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
|
||||
req->enables =
|
||||
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
|
||||
CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
|
||||
CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
|
||||
ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
|
||||
eth_broadcast_addr(req->l2_addr_mask);
|
||||
|
||||
if (fltr->l2_key.vlan) {
|
||||
req->enables |=
|
||||
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
|
||||
CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
|
||||
CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
|
||||
req->num_vlans = 1;
|
||||
req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
|
||||
req->l2_ivlan_mask = cpu_to_le16(0xfff);
|
||||
}
|
||||
|
||||
resp = hwrm_req_hold(bp, req);
|
||||
rc = hwrm_req_send(bp, req);
|
||||
if (!rc) {
|
||||
fltr->base.filter_id = resp->l2_filter_id;
|
||||
set_bit(BNXT_FLTR_VALID, &fltr->base.state);
|
||||
}
|
||||
hwrm_req_drop(bp, req);
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
|
||||
struct bnxt_ntuple_filter *fltr)
|
||||
|
@ -5538,8 +5637,6 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
|
|||
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
|
||||
const u8 *mac_addr)
|
||||
{
|
||||
struct hwrm_cfa_l2_filter_alloc_output *resp;
|
||||
struct hwrm_cfa_l2_filter_alloc_input *req;
|
||||
struct bnxt_l2_filter *fltr;
|
||||
struct bnxt_l2_key key;
|
||||
int rc;
|
||||
|
@ -5550,66 +5647,33 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
|
|||
if (IS_ERR(fltr))
|
||||
return PTR_ERR(fltr);
|
||||
|
||||
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
|
||||
fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
|
||||
rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
|
||||
if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
|
||||
req->flags |=
|
||||
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
|
||||
req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
|
||||
req->enables =
|
||||
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
|
||||
CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
|
||||
CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
|
||||
memcpy(req->l2_addr, mac_addr, ETH_ALEN);
|
||||
req->l2_addr_mask[0] = 0xff;
|
||||
req->l2_addr_mask[1] = 0xff;
|
||||
req->l2_addr_mask[2] = 0xff;
|
||||
req->l2_addr_mask[3] = 0xff;
|
||||
req->l2_addr_mask[4] = 0xff;
|
||||
req->l2_addr_mask[5] = 0xff;
|
||||
|
||||
resp = hwrm_req_hold(bp, req);
|
||||
rc = hwrm_req_send(bp, req);
|
||||
if (rc) {
|
||||
bnxt_del_l2_filter(bp, fltr);
|
||||
} else {
|
||||
fltr->base.filter_id = resp->l2_filter_id;
|
||||
set_bit(BNXT_FLTR_VALID, &fltr->base.state);
|
||||
else
|
||||
bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
|
||||
}
|
||||
hwrm_req_drop(bp, req);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
|
||||
{
|
||||
struct hwrm_cfa_l2_filter_free_input *req;
|
||||
u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
|
||||
int rc;
|
||||
int rc = 0;
|
||||
|
||||
/* Any associated ntuple filters will also be cleared by firmware. */
|
||||
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
|
||||
if (rc)
|
||||
return rc;
|
||||
hwrm_req_hold(bp, req);
|
||||
for (i = 0; i < num_of_vnics; i++) {
|
||||
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
|
||||
|
||||
for (j = 0; j < vnic->uc_filter_count; j++) {
|
||||
struct bnxt_l2_filter *fltr;
|
||||
struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
|
||||
|
||||
fltr = vnic->l2_filters[j];
|
||||
req->l2_filter_id = fltr->base.filter_id;
|
||||
|
||||
rc = hwrm_req_send(bp, req);
|
||||
bnxt_hwrm_l2_filter_free(bp, fltr);
|
||||
bnxt_del_l2_filter(bp, fltr);
|
||||
}
|
||||
vnic->uc_filter_count = 0;
|
||||
}
|
||||
hwrm_req_drop(bp, req);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -11898,7 +11962,6 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
|
|||
{
|
||||
struct net_device *dev = bp->dev;
|
||||
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
|
||||
struct hwrm_cfa_l2_filter_free_input *req;
|
||||
struct netdev_hw_addr *ha;
|
||||
int i, off = 0, rc;
|
||||
bool uc_update;
|
||||
|
@ -11910,19 +11973,12 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
|
|||
if (!uc_update)
|
||||
goto skip_uc;
|
||||
|
||||
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
|
||||
if (rc)
|
||||
return rc;
|
||||
hwrm_req_hold(bp, req);
|
||||
for (i = 1; i < vnic->uc_filter_count; i++) {
|
||||
struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
|
||||
|
||||
req->l2_filter_id = fltr->base.filter_id;
|
||||
|
||||
rc = hwrm_req_send(bp, req);
|
||||
bnxt_hwrm_l2_filter_free(bp, fltr);
|
||||
bnxt_del_l2_filter(bp, fltr);
|
||||
}
|
||||
hwrm_req_drop(bp, req);
|
||||
|
||||
vnic->uc_filter_count = 1;
|
||||
|
||||
|
@ -13823,8 +13879,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
|||
rcu_read_unlock();
|
||||
|
||||
spin_lock_bh(&bp->ntp_fltr_lock);
|
||||
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
|
||||
BNXT_NTP_FLTR_MAX_FLTR, 0);
|
||||
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
|
||||
if (bit_id < 0) {
|
||||
spin_unlock_bh(&bp->ntp_fltr_lock);
|
||||
rc = -ENOMEM;
|
||||
|
|
|
@ -2398,6 +2398,7 @@ struct bnxt {
|
|||
int db_size;
|
||||
|
||||
#define BNXT_NTP_FLTR_MAX_FLTR 4096
|
||||
#define BNXT_MAX_FLTR (BNXT_NTP_FLTR_MAX_FLTR + BNXT_L2_FLTR_MAX_FLTR)
|
||||
#define BNXT_NTP_FLTR_HASH_SIZE 512
|
||||
#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
|
||||
struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
|
||||
|
@ -2621,6 +2622,8 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
|
|||
int bmap_size, bool async_only);
|
||||
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
|
||||
void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
|
||||
int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
|
||||
int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
|
||||
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
|
||||
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
|
||||
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
|
||||
|
|
Loading…
Add table
Reference in a new issue