net: sched: cls_api: add filter counter
Maintain a count of filters per block. Counter updates are protected by cb_lock, which is also used to protect the offload counters. Signed-off-by: Asbjørn Sloth Tønnesen <ast@fiberby.net> Reviewed-by: Simon Horman <horms@kernel.org> Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
f631ef39d8
commit
2081fd3445
2 changed files with 21 additions and 0 deletions
|
@ -422,6 +422,7 @@ struct tcf_proto {
|
||||||
*/
|
*/
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
bool deleting;
|
bool deleting;
|
||||||
|
bool counted;
|
||||||
refcount_t refcnt;
|
refcount_t refcnt;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
struct hlist_node destroy_ht_node;
|
struct hlist_node destroy_ht_node;
|
||||||
|
@ -471,6 +472,7 @@ struct tcf_block {
|
||||||
struct flow_block flow_block;
|
struct flow_block flow_block;
|
||||||
struct list_head owner_list;
|
struct list_head owner_list;
|
||||||
bool keep_dst;
|
bool keep_dst;
|
||||||
|
atomic_t filtercnt; /* Number of filters */
|
||||||
atomic_t skipswcnt; /* Number of skip_sw filters */
|
atomic_t skipswcnt; /* Number of skip_sw filters */
|
||||||
atomic_t offloadcnt; /* Number of oddloaded filters */
|
atomic_t offloadcnt; /* Number of oddloaded filters */
|
||||||
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
|
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
|
||||||
|
|
|
@ -410,12 +410,30 @@ static void tcf_proto_get(struct tcf_proto *tp)
|
||||||
refcount_inc(&tp->refcnt);
|
refcount_inc(&tp->refcnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add)
|
||||||
|
{
|
||||||
|
lockdep_assert_not_held(&block->cb_lock);
|
||||||
|
|
||||||
|
down_write(&block->cb_lock);
|
||||||
|
if (*counted != add) {
|
||||||
|
if (add) {
|
||||||
|
atomic_inc(&block->filtercnt);
|
||||||
|
*counted = true;
|
||||||
|
} else {
|
||||||
|
atomic_dec(&block->filtercnt);
|
||||||
|
*counted = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
up_write(&block->cb_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static void tcf_chain_put(struct tcf_chain *chain);
|
static void tcf_chain_put(struct tcf_chain *chain);
|
||||||
|
|
||||||
static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
|
static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
|
||||||
bool sig_destroy, struct netlink_ext_ack *extack)
|
bool sig_destroy, struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
tp->ops->destroy(tp, rtnl_held, extack);
|
tp->ops->destroy(tp, rtnl_held, extack);
|
||||||
|
tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false);
|
||||||
if (sig_destroy)
|
if (sig_destroy)
|
||||||
tcf_proto_signal_destroyed(tp->chain, tp);
|
tcf_proto_signal_destroyed(tp->chain, tp);
|
||||||
tcf_chain_put(tp->chain);
|
tcf_chain_put(tp->chain);
|
||||||
|
@ -2364,6 +2382,7 @@ replay:
|
||||||
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
|
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
|
||||||
flags, extack);
|
flags, extack);
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
|
tcf_block_filter_cnt_update(block, &tp->counted, true);
|
||||||
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
|
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
|
||||||
RTM_NEWTFILTER, false, rtnl_held, extack);
|
RTM_NEWTFILTER, false, rtnl_held, extack);
|
||||||
tfilter_put(tp, fh);
|
tfilter_put(tp, fh);
|
||||||
|
|
Loading…
Add table
Reference in a new issue