blob: 8e6f45f84b9bb1cf0189c8254e1b63936afcc84c [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells17926a72007-04-26 15:48:28 -07002/* ar-skbuff.c: socket buffer destruction handling
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells17926a72007-04-26 15:48:28 -07006 */
7
Joe Perches9b6d5392016-06-02 12:08:52 -07008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
David Howells17926a72007-04-26 15:48:28 -070010#include <linux/module.h>
11#include <linux/net.h>
12#include <linux/skbuff.h>
13#include <net/sock.h>
14#include <net/af_rxrpc.h>
15#include "ar-internal.h"
16
David Howells987db9f2019-08-19 09:25:38 +010017#define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER)
18#define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
David Howells71f3ca42016-09-17 10:49:14 +010019
David Howells17926a72007-04-26 15:48:28 -070020/*
David Howells71f3ca42016-09-17 10:49:14 +010021 * Note the allocation or reception of a socket buffer.
David Howellsdf844fd2016-08-23 15:27:24 +010022 */
David Howells71f3ca42016-09-17 10:49:14 +010023void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
David Howellsdf844fd2016-08-23 15:27:24 +010024{
25 const void *here = __builtin_return_address(0);
David Howells987db9f2019-08-19 09:25:38 +010026 int n = atomic_inc_return(select_skb_count(skb));
Reshetova, Elena63354792017-06-30 13:07:58 +030027 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
David Howellsdf844fd2016-08-23 15:27:24 +010028}
29
30/*
31 * Note the re-emergence of a socket buffer from a queue or buffer.
32 */
David Howells71f3ca42016-09-17 10:49:14 +010033void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
David Howellsdf844fd2016-08-23 15:27:24 +010034{
35 const void *here = __builtin_return_address(0);
36 if (skb) {
David Howells987db9f2019-08-19 09:25:38 +010037 int n = atomic_read(select_skb_count(skb));
Reshetova, Elena63354792017-06-30 13:07:58 +030038 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
David Howellsdf844fd2016-08-23 15:27:24 +010039 }
40}
41
42/*
43 * Note the addition of a ref on a socket buffer.
44 */
David Howells71f3ca42016-09-17 10:49:14 +010045void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
David Howellsdf844fd2016-08-23 15:27:24 +010046{
47 const void *here = __builtin_return_address(0);
David Howells987db9f2019-08-19 09:25:38 +010048 int n = atomic_inc_return(select_skb_count(skb));
Reshetova, Elena63354792017-06-30 13:07:58 +030049 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
David Howellsdf844fd2016-08-23 15:27:24 +010050 skb_get(skb);
51}
52
53/*
54 * Note the destruction of a socket buffer.
55 */
David Howells71f3ca42016-09-17 10:49:14 +010056void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
David Howellsdf844fd2016-08-23 15:27:24 +010057{
58 const void *here = __builtin_return_address(0);
59 if (skb) {
60 int n;
61 CHECK_SLAB_OKAY(&skb->users);
David Howells987db9f2019-08-19 09:25:38 +010062 n = atomic_dec_return(select_skb_count(skb));
Reshetova, Elena63354792017-06-30 13:07:58 +030063 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
David Howellsdf844fd2016-08-23 15:27:24 +010064 kfree_skb(skb);
65 }
66}
67
68/*
69 * Clear a queue of socket buffers.
70 */
71void rxrpc_purge_queue(struct sk_buff_head *list)
72{
73 const void *here = __builtin_return_address(0);
74 struct sk_buff *skb;
75 while ((skb = skb_dequeue((list))) != NULL) {
David Howells987db9f2019-08-19 09:25:38 +010076 int n = atomic_dec_return(select_skb_count(skb));
77 trace_rxrpc_skb(skb, rxrpc_skb_purged,
Reshetova, Elena63354792017-06-30 13:07:58 +030078 refcount_read(&skb->users), n, here);
David Howellsdf844fd2016-08-23 15:27:24 +010079 kfree_skb(skb);
80 }
81}