net: convert sk_buff.users from atomic_t to refcount_t
refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: David Windsor <dwindsor@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
53869cebce
commit
633547973f
21 changed files with 67 additions and 67 deletions
|
@ -742,7 +742,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
|
||||||
|
|
||||||
if (type == NES_TIMER_TYPE_SEND) {
|
if (type == NES_TIMER_TYPE_SEND) {
|
||||||
new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
|
new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
|
||||||
atomic_inc(&new_send->skb->users);
|
refcount_inc(&new_send->skb->users);
|
||||||
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
|
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
|
||||||
cm_node->send_entry = new_send;
|
cm_node->send_entry = new_send;
|
||||||
add_ref_cm_node(cm_node);
|
add_ref_cm_node(cm_node);
|
||||||
|
@ -924,7 +924,7 @@ static void nes_cm_timer_tick(unsigned long pass)
|
||||||
flags);
|
flags);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
atomic_inc(&send_entry->skb->users);
|
refcount_inc(&send_entry->skb->users);
|
||||||
cm_packets_retrans++;
|
cm_packets_retrans++;
|
||||||
nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
|
nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
|
||||||
"for node %p, jiffies = %lu, time to send = "
|
"for node %p, jiffies = %lu, time to send = "
|
||||||
|
|
|
@ -155,7 +155,7 @@ mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||||
copied = skb->len + MISDN_HEADER_LEN;
|
copied = skb->len + MISDN_HEADER_LEN;
|
||||||
if (len < copied) {
|
if (len < copied) {
|
||||||
if (flags & MSG_PEEK)
|
if (flags & MSG_PEEK)
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
else
|
else
|
||||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
|
@ -201,7 +201,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||||
rionet_queue_tx_msg(skb, ndev,
|
rionet_queue_tx_msg(skb, ndev,
|
||||||
nets[rnet->mport->id].active[i]);
|
nets[rnet->mport->id].active[i]);
|
||||||
if (count)
|
if (count)
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
} else if (RIONET_MAC_MATCH(eth->h_dest)) {
|
} else if (RIONET_MAC_MATCH(eth->h_dest)) {
|
||||||
|
|
|
@ -483,7 +483,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||||
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
|
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
} else {
|
} else {
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
header.length = l;
|
header.length = l;
|
||||||
header.type = be16_to_cpu(skb->protocol);
|
header.type = be16_to_cpu(skb->protocol);
|
||||||
header.unused = 0;
|
header.unused = 0;
|
||||||
|
@ -500,7 +500,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||||
* Protect skb against beeing free'd by upper
|
* Protect skb against beeing free'd by upper
|
||||||
* layers.
|
* layers.
|
||||||
*/
|
*/
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
ch->prof.txlen += skb->len;
|
ch->prof.txlen += skb->len;
|
||||||
header.length = skb->len + LL_HEADER_LENGTH;
|
header.length = skb->len + LL_HEADER_LENGTH;
|
||||||
header.type = be16_to_cpu(skb->protocol);
|
header.type = be16_to_cpu(skb->protocol);
|
||||||
|
@ -517,14 +517,14 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||||
if (hi) {
|
if (hi) {
|
||||||
nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
|
nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
|
||||||
if (!nskb) {
|
if (!nskb) {
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
skb_pull(skb, LL_HEADER_LENGTH + 2);
|
skb_pull(skb, LL_HEADER_LENGTH + 2);
|
||||||
ctcm_clear_busy(ch->netdev);
|
ctcm_clear_busy(ch->netdev);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
} else {
|
} else {
|
||||||
skb_put_data(nskb, skb->data, skb->len);
|
skb_put_data(nskb, skb->data, skb->len);
|
||||||
atomic_inc(&nskb->users);
|
refcount_inc(&nskb->users);
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
dev_kfree_skb_irq(skb);
|
dev_kfree_skb_irq(skb);
|
||||||
skb = nskb;
|
skb = nskb;
|
||||||
}
|
}
|
||||||
|
@ -542,7 +542,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||||
* Remove our header. It gets added
|
* Remove our header. It gets added
|
||||||
* again on retransmit.
|
* again on retransmit.
|
||||||
*/
|
*/
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
skb_pull(skb, LL_HEADER_LENGTH + 2);
|
skb_pull(skb, LL_HEADER_LENGTH + 2);
|
||||||
ctcm_clear_busy(ch->netdev);
|
ctcm_clear_busy(ch->netdev);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -553,7 +553,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||||
ch->ccw[1].count = skb->len;
|
ch->ccw[1].count = skb->len;
|
||||||
skb_copy_from_linear_data(skb,
|
skb_copy_from_linear_data(skb,
|
||||||
skb_put(ch->trans_skb, skb->len), skb->len);
|
skb_put(ch->trans_skb, skb->len), skb->len);
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
dev_kfree_skb_irq(skb);
|
dev_kfree_skb_irq(skb);
|
||||||
ccw_idx = 0;
|
ccw_idx = 0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -679,7 +679,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||||
|
|
||||||
if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
|
if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
|
||||||
spin_lock_irqsave(&ch->collect_lock, saveflags);
|
spin_lock_irqsave(&ch->collect_lock, saveflags);
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
|
p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
|
||||||
|
|
||||||
if (!p_header) {
|
if (!p_header) {
|
||||||
|
@ -716,7 +716,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||||
* Protect skb against beeing free'd by upper
|
* Protect skb against beeing free'd by upper
|
||||||
* layers.
|
* layers.
|
||||||
*/
|
*/
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IDAL support in CTCM is broken, so we have to
|
* IDAL support in CTCM is broken, so we have to
|
||||||
|
@ -729,8 +729,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||||
goto nomem_exit;
|
goto nomem_exit;
|
||||||
} else {
|
} else {
|
||||||
skb_put_data(nskb, skb->data, skb->len);
|
skb_put_data(nskb, skb->data, skb->len);
|
||||||
atomic_inc(&nskb->users);
|
refcount_inc(&nskb->users);
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
dev_kfree_skb_irq(skb);
|
dev_kfree_skb_irq(skb);
|
||||||
skb = nskb;
|
skb = nskb;
|
||||||
}
|
}
|
||||||
|
@ -810,7 +810,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
|
||||||
ch->trans_skb->len = 0;
|
ch->trans_skb->len = 0;
|
||||||
ch->ccw[1].count = skb->len;
|
ch->ccw[1].count = skb->len;
|
||||||
skb_put_data(ch->trans_skb, skb->data, skb->len);
|
skb_put_data(ch->trans_skb, skb->data, skb->len);
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
dev_kfree_skb_irq(skb);
|
dev_kfree_skb_irq(skb);
|
||||||
ccw_idx = 0;
|
ccw_idx = 0;
|
||||||
CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
|
CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
|
||||||
|
@ -855,7 +855,7 @@ nomem_exit:
|
||||||
"%s(%s): MEMORY allocation ERROR\n",
|
"%s(%s): MEMORY allocation ERROR\n",
|
||||||
CTCM_FUNTAIL, ch->id);
|
CTCM_FUNTAIL, ch->id);
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
|
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
|
||||||
done:
|
done:
|
||||||
|
|
|
@ -743,7 +743,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
|
||||||
conn->prof.tx_pending--;
|
conn->prof.tx_pending--;
|
||||||
if (single_flag) {
|
if (single_flag) {
|
||||||
if ((skb = skb_dequeue(&conn->commit_queue))) {
|
if ((skb = skb_dequeue(&conn->commit_queue))) {
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
if (privptr) {
|
if (privptr) {
|
||||||
privptr->stats.tx_packets++;
|
privptr->stats.tx_packets++;
|
||||||
privptr->stats.tx_bytes +=
|
privptr->stats.tx_bytes +=
|
||||||
|
@ -766,7 +766,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
|
||||||
txbytes += skb->len;
|
txbytes += skb->len;
|
||||||
txpackets++;
|
txpackets++;
|
||||||
stat_maxcq++;
|
stat_maxcq++;
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
}
|
}
|
||||||
if (conn->collect_len > conn->prof.maxmulti)
|
if (conn->collect_len > conn->prof.maxmulti)
|
||||||
|
@ -958,7 +958,7 @@ static void netiucv_purge_skb_queue(struct sk_buff_head *q)
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
while ((skb = skb_dequeue(q))) {
|
while ((skb = skb_dequeue(q))) {
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1176,7 +1176,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
|
||||||
IUCV_DBF_TEXT(data, 2,
|
IUCV_DBF_TEXT(data, 2,
|
||||||
"EBUSY from netiucv_transmit_skb\n");
|
"EBUSY from netiucv_transmit_skb\n");
|
||||||
} else {
|
} else {
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
skb_queue_tail(&conn->collect_queue, skb);
|
skb_queue_tail(&conn->collect_queue, skb);
|
||||||
conn->collect_len += l;
|
conn->collect_len += l;
|
||||||
rc = 0;
|
rc = 0;
|
||||||
|
@ -1245,7 +1245,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
|
||||||
} else {
|
} else {
|
||||||
if (copied)
|
if (copied)
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
atomic_inc(&nskb->users);
|
refcount_inc(&nskb->users);
|
||||||
skb_queue_tail(&conn->commit_queue, nskb);
|
skb_queue_tail(&conn->commit_queue, nskb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1242,7 +1242,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
|
||||||
iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
|
iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
skb = skb_dequeue(&buf->skb_list);
|
skb = skb_dequeue(&buf->skb_list);
|
||||||
}
|
}
|
||||||
|
@ -3975,7 +3975,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
|
||||||
int flush_cnt = 0, hdr_len, large_send = 0;
|
int flush_cnt = 0, hdr_len, large_send = 0;
|
||||||
|
|
||||||
buffer = buf->buffer;
|
buffer = buf->buffer;
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
skb_queue_tail(&buf->skb_list, skb);
|
skb_queue_tail(&buf->skb_list, skb);
|
||||||
|
|
||||||
/*check first on TSO ....*/
|
/*check first on TSO ....*/
|
||||||
|
|
|
@ -761,7 +761,7 @@ struct sk_buff {
|
||||||
unsigned char *head,
|
unsigned char *head,
|
||||||
*data;
|
*data;
|
||||||
unsigned int truesize;
|
unsigned int truesize;
|
||||||
atomic_t users;
|
refcount_t users;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
@ -872,9 +872,9 @@ static inline bool skb_unref(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
return false;
|
return false;
|
||||||
if (likely(atomic_read(&skb->users) == 1))
|
if (likely(refcount_read(&skb->users) == 1))
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
else if (likely(!atomic_dec_and_test(&skb->users)))
|
else if (likely(!refcount_dec_and_test(&skb->users)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -1283,7 +1283,7 @@ static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
|
||||||
*/
|
*/
|
||||||
static inline struct sk_buff *skb_get(struct sk_buff *skb)
|
static inline struct sk_buff *skb_get(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1384,7 +1384,7 @@ static inline void __skb_header_release(struct sk_buff *skb)
|
||||||
*/
|
*/
|
||||||
static inline int skb_shared(const struct sk_buff *skb)
|
static inline int skb_shared(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return atomic_read(&skb->users) != 1;
|
return refcount_read(&skb->users) != 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -188,7 +188,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*peeked = 1;
|
*peeked = 1;
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
} else {
|
} else {
|
||||||
__skb_unlink(skb, queue);
|
__skb_unlink(skb, queue);
|
||||||
if (destructor)
|
if (destructor)
|
||||||
|
@ -358,7 +358,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
|
||||||
spin_lock_bh(&sk_queue->lock);
|
spin_lock_bh(&sk_queue->lock);
|
||||||
if (skb == skb_peek(sk_queue)) {
|
if (skb == skb_peek(sk_queue)) {
|
||||||
__skb_unlink(skb, sk_queue);
|
__skb_unlink(skb, sk_queue);
|
||||||
atomic_dec(&skb->users);
|
refcount_dec(&skb->users);
|
||||||
if (destructor)
|
if (destructor)
|
||||||
destructor(sk, skb);
|
destructor(sk, skb);
|
||||||
err = 0;
|
err = 0;
|
||||||
|
|
|
@ -1862,7 +1862,7 @@ static inline int deliver_skb(struct sk_buff *skb,
|
||||||
{
|
{
|
||||||
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
|
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
|
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2484,10 +2484,10 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (likely(atomic_read(&skb->users) == 1)) {
|
if (likely(refcount_read(&skb->users) == 1)) {
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
atomic_set(&skb->users, 0);
|
refcount_set(&skb->users, 0);
|
||||||
} else if (likely(!atomic_dec_and_test(&skb->users))) {
|
} else if (likely(!refcount_dec_and_test(&skb->users))) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
get_kfree_skb_cb(skb)->reason = reason;
|
get_kfree_skb_cb(skb)->reason = reason;
|
||||||
|
@ -3955,7 +3955,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
|
||||||
|
|
||||||
clist = clist->next;
|
clist = clist->next;
|
||||||
|
|
||||||
WARN_ON(atomic_read(&skb->users));
|
WARN_ON(refcount_read(&skb->users));
|
||||||
if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
|
if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
|
||||||
trace_consume_skb(skb);
|
trace_consume_skb(skb);
|
||||||
else
|
else
|
||||||
|
|
|
@ -277,7 +277,7 @@ static void zap_completion_queue(void)
|
||||||
struct sk_buff *skb = clist;
|
struct sk_buff *skb = clist;
|
||||||
clist = clist->next;
|
clist = clist->next;
|
||||||
if (!skb_irq_freeable(skb)) {
|
if (!skb_irq_freeable(skb)) {
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
dev_kfree_skb_any(skb); /* put this one back */
|
dev_kfree_skb_any(skb); /* put this one back */
|
||||||
} else {
|
} else {
|
||||||
__kfree_skb(skb);
|
__kfree_skb(skb);
|
||||||
|
@ -309,7 +309,7 @@ repeat:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_set(&skb->users, 1);
|
refcount_set(&skb->users, 1);
|
||||||
skb_reserve(skb, reserve);
|
skb_reserve(skb, reserve);
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3363,7 +3363,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
|
||||||
{
|
{
|
||||||
ktime_t idle_start = ktime_get();
|
ktime_t idle_start = ktime_get();
|
||||||
|
|
||||||
while (atomic_read(&(pkt_dev->skb->users)) != 1) {
|
while (refcount_read(&(pkt_dev->skb->users)) != 1) {
|
||||||
if (signal_pending(current))
|
if (signal_pending(current))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -3420,7 +3420,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||||
if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
|
if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
|
||||||
skb = pkt_dev->skb;
|
skb = pkt_dev->skb;
|
||||||
skb->protocol = eth_type_trans(skb, skb->dev);
|
skb->protocol = eth_type_trans(skb, skb->dev);
|
||||||
atomic_add(burst, &skb->users);
|
refcount_add(burst, &skb->users);
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
do {
|
do {
|
||||||
ret = netif_receive_skb(skb);
|
ret = netif_receive_skb(skb);
|
||||||
|
@ -3428,11 +3428,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||||
pkt_dev->errors++;
|
pkt_dev->errors++;
|
||||||
pkt_dev->sofar++;
|
pkt_dev->sofar++;
|
||||||
pkt_dev->seq_num++;
|
pkt_dev->seq_num++;
|
||||||
if (atomic_read(&skb->users) != burst) {
|
if (refcount_read(&skb->users) != burst) {
|
||||||
/* skb was queued by rps/rfs or taps,
|
/* skb was queued by rps/rfs or taps,
|
||||||
* so cannot reuse this skb
|
* so cannot reuse this skb
|
||||||
*/
|
*/
|
||||||
atomic_sub(burst - 1, &skb->users);
|
WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
|
||||||
/* get out of the loop and wait
|
/* get out of the loop and wait
|
||||||
* until skb is consumed
|
* until skb is consumed
|
||||||
*/
|
*/
|
||||||
|
@ -3446,7 +3446,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||||
goto out; /* Skips xmit_mode M_START_XMIT */
|
goto out; /* Skips xmit_mode M_START_XMIT */
|
||||||
} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
|
} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
atomic_inc(&pkt_dev->skb->users);
|
refcount_inc(&pkt_dev->skb->users);
|
||||||
|
|
||||||
ret = dev_queue_xmit(pkt_dev->skb);
|
ret = dev_queue_xmit(pkt_dev->skb);
|
||||||
switch (ret) {
|
switch (ret) {
|
||||||
|
@ -3487,7 +3487,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||||
pkt_dev->last_ok = 0;
|
pkt_dev->last_ok = 0;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
atomic_add(burst, &pkt_dev->skb->users);
|
refcount_add(burst, &pkt_dev->skb->users);
|
||||||
|
|
||||||
xmit_more:
|
xmit_more:
|
||||||
ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
|
ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
|
||||||
|
@ -3513,11 +3513,11 @@ xmit_more:
|
||||||
/* fallthru */
|
/* fallthru */
|
||||||
case NETDEV_TX_BUSY:
|
case NETDEV_TX_BUSY:
|
||||||
/* Retry it next time */
|
/* Retry it next time */
|
||||||
atomic_dec(&(pkt_dev->skb->users));
|
refcount_dec(&(pkt_dev->skb->users));
|
||||||
pkt_dev->last_ok = 0;
|
pkt_dev->last_ok = 0;
|
||||||
}
|
}
|
||||||
if (unlikely(burst))
|
if (unlikely(burst))
|
||||||
atomic_sub(burst, &pkt_dev->skb->users);
|
WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
|
||||||
unlock:
|
unlock:
|
||||||
HARD_TX_UNLOCK(odev, txq);
|
HARD_TX_UNLOCK(odev, txq);
|
||||||
|
|
||||||
|
|
|
@ -649,7 +649,7 @@ int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int g
|
||||||
|
|
||||||
NETLINK_CB(skb).dst_group = group;
|
NETLINK_CB(skb).dst_group = group;
|
||||||
if (echo)
|
if (echo)
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
|
netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
|
||||||
if (echo)
|
if (echo)
|
||||||
err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
|
err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
|
||||||
|
|
|
@ -176,7 +176,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
|
||||||
memset(skb, 0, offsetof(struct sk_buff, tail));
|
memset(skb, 0, offsetof(struct sk_buff, tail));
|
||||||
skb->head = NULL;
|
skb->head = NULL;
|
||||||
skb->truesize = sizeof(struct sk_buff);
|
skb->truesize = sizeof(struct sk_buff);
|
||||||
atomic_set(&skb->users, 1);
|
refcount_set(&skb->users, 1);
|
||||||
|
|
||||||
skb->mac_header = (typeof(skb->mac_header))~0U;
|
skb->mac_header = (typeof(skb->mac_header))~0U;
|
||||||
out:
|
out:
|
||||||
|
@ -247,7 +247,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
||||||
/* Account for allocated memory : skb + skb->head */
|
/* Account for allocated memory : skb + skb->head */
|
||||||
skb->truesize = SKB_TRUESIZE(size);
|
skb->truesize = SKB_TRUESIZE(size);
|
||||||
skb->pfmemalloc = pfmemalloc;
|
skb->pfmemalloc = pfmemalloc;
|
||||||
atomic_set(&skb->users, 1);
|
refcount_set(&skb->users, 1);
|
||||||
skb->head = data;
|
skb->head = data;
|
||||||
skb->data = data;
|
skb->data = data;
|
||||||
skb_reset_tail_pointer(skb);
|
skb_reset_tail_pointer(skb);
|
||||||
|
@ -314,7 +314,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
|
||||||
|
|
||||||
memset(skb, 0, offsetof(struct sk_buff, tail));
|
memset(skb, 0, offsetof(struct sk_buff, tail));
|
||||||
skb->truesize = SKB_TRUESIZE(size);
|
skb->truesize = SKB_TRUESIZE(size);
|
||||||
atomic_set(&skb->users, 1);
|
refcount_set(&skb->users, 1);
|
||||||
skb->head = data;
|
skb->head = data;
|
||||||
skb->data = data;
|
skb->data = data;
|
||||||
skb_reset_tail_pointer(skb);
|
skb_reset_tail_pointer(skb);
|
||||||
|
@ -915,7 +915,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
|
||||||
C(head_frag);
|
C(head_frag);
|
||||||
C(data);
|
C(data);
|
||||||
C(truesize);
|
C(truesize);
|
||||||
atomic_set(&n->users, 1);
|
refcount_set(&n->users, 1);
|
||||||
|
|
||||||
atomic_inc(&(skb_shinfo(skb)->dataref));
|
atomic_inc(&(skb_shinfo(skb)->dataref));
|
||||||
skb->cloned = 1;
|
skb->cloned = 1;
|
||||||
|
|
|
@ -353,7 +353,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||||
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
|
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
|
||||||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
||||||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
|
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
ireq->pktopts = skb;
|
ireq->pktopts = skb;
|
||||||
}
|
}
|
||||||
ireq->ir_iif = sk->sk_bound_dev_if;
|
ireq->ir_iif = sk->sk_bound_dev_if;
|
||||||
|
|
|
@ -194,7 +194,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
||||||
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
|
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
|
||||||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
||||||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
|
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
ireq->pktopts = skb;
|
ireq->pktopts = skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -734,7 +734,7 @@ static void tcp_v6_init_req(struct request_sock *req,
|
||||||
np->rxopt.bits.rxinfo ||
|
np->rxopt.bits.rxinfo ||
|
||||||
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
|
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
|
||||||
np->rxopt.bits.rxohlim || np->repflow)) {
|
np->rxopt.bits.rxohlim || np->repflow)) {
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
ireq->pktopts = skb;
|
ireq->pktopts = skb;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -203,11 +203,11 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
|
||||||
|
|
||||||
sock_hold(sk);
|
sock_hold(sk);
|
||||||
if (*skb2 == NULL) {
|
if (*skb2 == NULL) {
|
||||||
if (atomic_read(&skb->users) != 1) {
|
if (refcount_read(&skb->users) != 1) {
|
||||||
*skb2 = skb_clone(skb, allocation);
|
*skb2 = skb_clone(skb, allocation);
|
||||||
} else {
|
} else {
|
||||||
*skb2 = skb;
|
*skb2 = skb;
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (*skb2 != NULL) {
|
if (*skb2 != NULL) {
|
||||||
|
|
|
@ -1848,7 +1848,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dst_group) {
|
if (dst_group) {
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
|
netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
|
err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
|
||||||
|
@ -2226,7 +2226,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
||||||
struct netlink_sock *nlk;
|
struct netlink_sock *nlk;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
|
|
||||||
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
|
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
|
||||||
if (sk == NULL) {
|
if (sk == NULL) {
|
||||||
|
@ -2431,7 +2431,7 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
|
||||||
int exclude_portid = 0;
|
int exclude_portid = 0;
|
||||||
|
|
||||||
if (report) {
|
if (report) {
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
exclude_portid = portid;
|
exclude_portid = portid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
|
||||||
{
|
{
|
||||||
const void *here = __builtin_return_address(0);
|
const void *here = __builtin_return_address(0);
|
||||||
int n = atomic_inc_return(select_skb_count(op));
|
int n = atomic_inc_return(select_skb_count(op));
|
||||||
trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
|
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -38,7 +38,7 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
|
||||||
const void *here = __builtin_return_address(0);
|
const void *here = __builtin_return_address(0);
|
||||||
if (skb) {
|
if (skb) {
|
||||||
int n = atomic_read(select_skb_count(op));
|
int n = atomic_read(select_skb_count(op));
|
||||||
trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
|
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
|
||||||
{
|
{
|
||||||
const void *here = __builtin_return_address(0);
|
const void *here = __builtin_return_address(0);
|
||||||
int n = atomic_inc_return(select_skb_count(op));
|
int n = atomic_inc_return(select_skb_count(op));
|
||||||
trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
|
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
|
||||||
skb_get(skb);
|
skb_get(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
|
||||||
int n;
|
int n;
|
||||||
CHECK_SLAB_OKAY(&skb->users);
|
CHECK_SLAB_OKAY(&skb->users);
|
||||||
n = atomic_dec_return(select_skb_count(op));
|
n = atomic_dec_return(select_skb_count(op));
|
||||||
trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
|
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
|
||||||
int n;
|
int n;
|
||||||
CHECK_SLAB_OKAY(&skb->users);
|
CHECK_SLAB_OKAY(&skb->users);
|
||||||
n = atomic_dec_return(select_skb_count(op));
|
n = atomic_dec_return(select_skb_count(op));
|
||||||
trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
|
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -93,7 +93,7 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
|
||||||
while ((skb = skb_dequeue((list))) != NULL) {
|
while ((skb = skb_dequeue((list))) != NULL) {
|
||||||
int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
|
int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
|
||||||
trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
|
trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
|
||||||
atomic_read(&skb->users), n, here);
|
refcount_read(&skb->users), n, here);
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1102,7 +1102,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
|
||||||
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
|
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
|
||||||
"illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
|
"illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
|
||||||
chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
|
chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
|
||||||
atomic_read(&chunk->skb->users) : -1);
|
refcount_read(&chunk->skb->users) : -1);
|
||||||
|
|
||||||
/* Add the chunk to the packet. */
|
/* Add the chunk to the packet. */
|
||||||
status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
|
status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
|
||||||
|
|
|
@ -7563,7 +7563,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
|
||||||
if (flags & MSG_PEEK) {
|
if (flags & MSG_PEEK) {
|
||||||
skb = skb_peek(&sk->sk_receive_queue);
|
skb = skb_peek(&sk->sk_receive_queue);
|
||||||
if (skb)
|
if (skb)
|
||||||
atomic_inc(&skb->users);
|
refcount_inc(&skb->users);
|
||||||
} else {
|
} else {
|
||||||
skb = __skb_dequeue(&sk->sk_receive_queue);
|
skb = __skb_dequeue(&sk->sk_receive_queue);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue