-
Notifications
You must be signed in to change notification settings - Fork 32
/
backport.h
222 lines (190 loc) · 6.2 KB
/
backport.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Jim Ma */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/if_vlan.h>
#include <linux/aer.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/pm_runtime.h>
#include <linux/prefetch.h>
#include <net/pkt_sched.h>
#include <net/ipv6.h>
static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
{
return skb->head + skb->csum_start;
}
static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
{
*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
}
static inline void net_prefetch(void *p)
{
prefetch(p);
#if L1_CACHE_BYTES < 128
prefetch((u8 *)p + L1_CACHE_BYTES);
#endif
}
static inline bool dev_page_is_reusable(struct page *page)
{
return likely(page_to_nid(page) == numa_mem_id() &&
!page_is_pfmemalloc(page));
}
/**
* refcount_read - get a refcount's value
* @r: the refcount
*
* Return: the refcount's value
*/
static inline unsigned int refcount_read(atomic_t *r)
{
return atomic_read(r);
}
// static inline __must_check bool __refcount_sub_and_test(int i, atomic_t *r, int *oldp)
// {
// // int old = atomic_fetch_sub_release(i, r);
// // if (oldp)
// // *oldp = old;
// // if (old == i) {
// // smp_acquire__after_ctrl_dep();
// // return true;
// // }
// // if (unlikely(old < 0 || old - i < 0))
// // refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
// return false;
// }
/**
* refcount_sub_and_test - subtract from a refcount and test if it is 0
* @i: amount to subtract from the refcount
* @r: the refcount
*
* Similar to atomic_dec_and_test(), but it will WARN, return false and
* ultimately leak on underflow and will fail to decrement when saturated
* at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides an acquire ordering on success such that free()
* must come after.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_dec(), or one of its variants, should instead be used to
* decrement a reference count.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
// static inline __must_check bool refcount_sub_and_test(int i, atomic_t *r)
// {
// return __refcount_sub_and_test(i, r, NULL);
// }
// static inline __must_check bool __refcount_dec_and_test(atomic_t *r, int *oldp)
// {
// return __refcount_sub_and_test(1, r, oldp);
// }
/**
* refcount_dec_and_test - decrement a refcount and test if it is 0
* @r: the refcount
*
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
* decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides an acquire ordering on success such that free()
* must come after.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
// static inline __must_check bool refcount_dec_and_test(atomic_t *r)
// {
// return __refcount_dec_and_test(r, NULL);
// }
/**
* skb_unref - decrement the skb's reference count
* @skb: buffer
*
* Returns true if we can free the skb.
*/
// static inline bool skb_unref(struct sk_buff *skb)
// {
// if (unlikely(!skb))
// return false;
// if (likely(atomic_read(&skb->users) == 1))
// smp_rmb();
// else if (likely(!refcount_dec_and_test(&skb->users)))
// return false;
// return true;
// }
// static void skb_release_all(struct sk_buff *skb)
// {
// skb_release_head_state(skb);
// if (likely(skb->head))
// skb_release_data(skb);
// }
// static void napi_consume_skb(struct sk_buff *skb, int budget)
// {
// /* Zero budget indicate non-NAPI context called us, like netpoll */
// if (unlikely(!budget)) {
// dev_consume_skb_any(skb);
// return;
// }
// // lockdep_assert_in_softirq();
// if (!skb_unref(skb))
// return;
// /* if reaching here SKB is ready to free */
// trace_consume_skb(skb);
// /* if SKB is a clone, don't handle this case */
// if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
// __kfree_skb(skb);
// return;
// }
// skb_release_all(skb);
// napi_skb_cache_put(skb);
// }
static inline int
pci_request_mem_regions(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM), name);
}
static inline void
pci_release_mem_regions(struct pci_dev *pdev)
{
return pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
}
static inline bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
const unsigned long *src)
{
bool retval = true;
/* TODO: following test will soon always be true */
if (__ETHTOOL_LINK_MODE_MASK_NBITS > 32) {
__ETHTOOL_DECLARE_LINK_MODE_MASK(ext);
bitmap_zero(ext, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_fill(ext, 32);
bitmap_complement(ext, ext, __ETHTOOL_LINK_MODE_MASK_NBITS);
if (bitmap_intersects(ext, src,
__ETHTOOL_LINK_MODE_MASK_NBITS)) {
/* src mask goes beyond bit 31 */
retval = false;
}
}
*legacy_u32 = src[0];
return retval;
}
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
int ret = atomic_sub_and_test(nr, &page->_count);
return ret;
}
static inline void __page_frag_cache_drain(struct page *page, unsigned int count)
{
if (page_ref_sub_and_test(page, count)) {
unsigned int order = compound_order(page);
// TODO optimize with free_unref_page
// if (order == 0)
// free_unref_page(page);
// else
__free_pages(page, order);
}
}