Skip to content

Commit 1ab9250

Browse files
committed
Pull bitmap updates from Yury Norov: - fix the duplicated comments on bitmap_to_arr64() (Qu Wenruo) - optimize out non-atomic bitops on compile-time constants (Alexander Lobakin) - cleanup bitmap-related headers (Yury Norov) - x86/olpc: fix 'logical not is only applied to the left hand side' (Alexander Lobakin) - lib/nodemask: inline wrappers around bitmap (Yury Norov) * tag 'bitmap-6.0-rc1' of https://github.com/norov/linux: (26 commits) lib/nodemask: inline next_node_in() and node_random() powerpc: drop dependency on <asm/machdep.h> in archrandom.h x86/olpc: fix 'logical not is only applied to the left hand side' lib/cpumask: move some one-line wrappers to header file headers/deps: mm: align MANITAINERS and Docs with new gfp.h structure headers/deps: mm: Split <linux/gfp_types.h> out of <linux/gfp.h> headers/deps: mm: Optimize <linux/gfp.h> header dependencies lib/cpumask: move trivial wrappers around find_bit to the header lib/cpumask: change return types to unsigned where appropriate cpumask: change return types to bool where appropriate lib/bitmap: change type of bitmap_weight to unsigned long lib/bitmap: change return types to bool where appropriate arm: align find_bit declarations with generic kernel iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE) lib/test_bitmap: test the tail after bitmap_to_arr64() lib/bitmap: fix off-by-one in bitmap_to_arr64() lib: test_bitmap: add compile-time optimization/evaluations assertions bitmap: don't assume compiler evaluates small mem*() builtins calls net/ice: fix initializing the bitmap in the switch code bitops: let optimize out non-atomic bitops on compile-time constants ...
2 parents 3bc1bc0 + 36d4b36 commit 1ab9250

File tree

38 files changed

+1076
-789
lines changed

38 files changed

+1076
-789
lines changed

Documentation/core-api/mm-api.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,16 +22,16 @@ Memory Allocation Controls
2222
.. kernel-doc:: include/linux/gfp.h
2323
:internal:
2424

25-
.. kernel-doc:: include/linux/gfp.h
25+
.. kernel-doc:: include/linux/gfp_types.h
2626
:doc: Page mobility and placement hints
2727

28-
.. kernel-doc:: include/linux/gfp.h
28+
.. kernel-doc:: include/linux/gfp_types.h
2929
:doc: Watermark modifiers
3030

31-
.. kernel-doc:: include/linux/gfp.h
31+
.. kernel-doc:: include/linux/gfp_types.h
3232
:doc: Reclaim modifiers
3333

34-
.. kernel-doc:: include/linux/gfp.h
34+
.. kernel-doc:: include/linux/gfp_types.h
3535
:doc: Useful GFP flag combinations
3636

3737
The Slab Cache

MAINTAINERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3603,7 +3603,6 @@ F: lib/bitmap.c
36033603
F: lib/cpumask.c
36043604
F: lib/find_bit.c
36053605
F: lib/find_bit_benchmark.c
3606-
F: lib/nodemask.c
36073606
F: lib/test_bitmap.c
36083607
F: tools/include/linux/bitmap.h
36093608
F: tools/include/linux/find.h
@@ -13136,6 +13135,7 @@ W: http://www.linux-mm.org
1313613135
T: git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
1313713136
T: quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new
1313813137
F: include/linux/gfp.h
13138+
F: include/linux/gfp_types.h
1313913139
F: include/linux/memory_hotplug.h
1314013140
F: include/linux/mm.h
1314113141
F: include/linux/mmzone.h

arch/alpha/include/asm/bitops.h

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr)
4646
/*
4747
* WARNING: non atomic version.
4848
*/
49-
static inline void
50-
__set_bit(unsigned long nr, volatile void * addr)
49+
static __always_inline void
50+
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
5151
{
5252
int *m = ((int *) addr) + (nr >> 5);
5353

@@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr)
8282
/*
8383
* WARNING: non atomic version.
8484
*/
85-
static __inline__ void
86-
__clear_bit(unsigned long nr, volatile void * addr)
85+
static __always_inline void
86+
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
8787
{
8888
int *m = ((int *) addr) + (nr >> 5);
8989

@@ -94,7 +94,7 @@ static inline void
9494
__clear_bit_unlock(unsigned long nr, volatile void * addr)
9595
{
9696
smp_mb();
97-
__clear_bit(nr, addr);
97+
arch___clear_bit(nr, addr);
9898
}
9999

100100
static inline void
@@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr)
118118
/*
119119
* WARNING: non atomic version.
120120
*/
121-
static __inline__ void
122-
__change_bit(unsigned long nr, volatile void * addr)
121+
static __always_inline void
122+
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
123123
{
124124
int *m = ((int *) addr) + (nr >> 5);
125125

@@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
186186
/*
187187
* WARNING: non atomic version.
188188
*/
189-
static inline int
190-
__test_and_set_bit(unsigned long nr, volatile void * addr)
189+
static __always_inline bool
190+
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
191191
{
192192
unsigned long mask = 1 << (nr & 0x1f);
193193
int *m = ((int *) addr) + (nr >> 5);
@@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
230230
/*
231231
* WARNING: non atomic version.
232232
*/
233-
static inline int
234-
__test_and_clear_bit(unsigned long nr, volatile void * addr)
233+
static __always_inline bool
234+
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
235235
{
236236
unsigned long mask = 1 << (nr & 0x1f);
237237
int *m = ((int *) addr) + (nr >> 5);
@@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
272272
/*
273273
* WARNING: non atomic version.
274274
*/
275-
static __inline__ int
276-
__test_and_change_bit(unsigned long nr, volatile void * addr)
275+
static __always_inline bool
276+
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
277277
{
278278
unsigned long mask = 1 << (nr & 0x1f);
279279
int *m = ((int *) addr) + (nr >> 5);
@@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
283283
return (old & mask) != 0;
284284
}
285285

286-
static inline int
287-
test_bit(int nr, const volatile void * addr)
286+
static __always_inline bool
287+
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
288288
{
289289
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
290290
}
@@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2])
450450
return __ffs(tmp) + ofs;
451451
}
452452

453+
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
454+
453455
#include <asm-generic/bitops/le.h>
454456

455457
#include <asm-generic/bitops/ext2-atomic-setbit.h>

arch/arm/include/asm/bitops.h

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -160,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
160160
/*
161161
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
162162
*/
163-
extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
164-
extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
165-
extern int _find_first_bit_le(const unsigned long *p, unsigned size);
166-
extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
163+
unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
164+
unsigned long _find_next_zero_bit_le(const unsigned long *p,
165+
unsigned long size, unsigned long offset);
166+
unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
167+
unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);
167168

168169
/*
169170
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
170171
*/
171-
extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
172-
extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
173-
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
174-
extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
172+
unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
173+
unsigned long _find_next_zero_bit_be(const unsigned long *p,
174+
unsigned long size, unsigned long offset);
175+
unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
176+
unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);
175177

176178
#ifndef CONFIG_SMP
177179
/*

arch/hexagon/include/asm/bitops.h

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr)
127127
* be atomic, particularly for things like slab_lock and slab_unlock.
128128
*
129129
*/
130-
static inline void __clear_bit(int nr, volatile unsigned long *addr)
130+
static __always_inline void
131+
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
131132
{
132133
test_and_clear_bit(nr, addr);
133134
}
134135

135-
static inline void __set_bit(int nr, volatile unsigned long *addr)
136+
static __always_inline void
137+
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
136138
{
137139
test_and_set_bit(nr, addr);
138140
}
139141

140-
static inline void __change_bit(int nr, volatile unsigned long *addr)
142+
static __always_inline void
143+
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
141144
{
142145
test_and_change_bit(nr, addr);
143146
}
144147

145148
/* Apparently, at least some of these are allowed to be non-atomic */
146-
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
149+
static __always_inline bool
150+
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
147151
{
148152
return test_and_clear_bit(nr, addr);
149153
}
150154

151-
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
155+
static __always_inline bool
156+
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
152157
{
153158
return test_and_set_bit(nr, addr);
154159
}
155160

156-
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
161+
static __always_inline bool
162+
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
157163
{
158164
return test_and_change_bit(nr, addr);
159165
}
160166

161-
static inline int __test_bit(int nr, const volatile unsigned long *addr)
167+
static __always_inline bool
168+
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
162169
{
163170
int retval;
164171

@@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr)
172179
return retval;
173180
}
174181

175-
#define test_bit(nr, addr) __test_bit(nr, addr)
176-
177182
/*
178183
* ffz - find first zero in word.
179184
* @word: The word to search
@@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word)
271276
}
272277

273278
#include <asm-generic/bitops/lock.h>
279+
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
274280

275281
#include <asm-generic/bitops/fls64.h>
276282
#include <asm-generic/bitops/sched.h>

arch/ia64/include/asm/bitops.h

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -53,16 +53,16 @@ set_bit (int nr, volatile void *addr)
5353
}
5454

5555
/**
56-
* __set_bit - Set a bit in memory
56+
* arch___set_bit - Set a bit in memory
5757
* @nr: the bit to set
5858
* @addr: the address to start counting from
5959
*
6060
* Unlike set_bit(), this function is non-atomic and may be reordered.
6161
* If it's called on the same region of memory simultaneously, the effect
6262
* may be that only one operation succeeds.
6363
*/
64-
static __inline__ void
65-
__set_bit (int nr, volatile void *addr)
64+
static __always_inline void
65+
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
6666
{
6767
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
6868
}
@@ -135,16 +135,16 @@ __clear_bit_unlock(int nr, void *addr)
135135
}
136136

137137
/**
138-
* __clear_bit - Clears a bit in memory (non-atomic version)
138+
* arch___clear_bit - Clears a bit in memory (non-atomic version)
139139
* @nr: the bit to clear
140140
* @addr: the address to start counting from
141141
*
142142
* Unlike clear_bit(), this function is non-atomic and may be reordered.
143143
* If it's called on the same region of memory simultaneously, the effect
144144
* may be that only one operation succeeds.
145145
*/
146-
static __inline__ void
147-
__clear_bit (int nr, volatile void *addr)
146+
static __always_inline void
147+
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
148148
{
149149
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
150150
}
@@ -175,16 +175,16 @@ change_bit (int nr, volatile void *addr)
175175
}
176176

177177
/**
178-
* __change_bit - Toggle a bit in memory
178+
* arch___change_bit - Toggle a bit in memory
179179
* @nr: the bit to toggle
180180
* @addr: the address to start counting from
181181
*
182182
* Unlike change_bit(), this function is non-atomic and may be reordered.
183183
* If it's called on the same region of memory simultaneously, the effect
184184
* may be that only one operation succeeds.
185185
*/
186-
static __inline__ void
187-
__change_bit (int nr, volatile void *addr)
186+
static __always_inline void
187+
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
188188
{
189189
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
190190
}
@@ -224,16 +224,16 @@ test_and_set_bit (int nr, volatile void *addr)
224224
#define test_and_set_bit_lock test_and_set_bit
225225

226226
/**
227-
* __test_and_set_bit - Set a bit and return its old value
227+
* arch___test_and_set_bit - Set a bit and return its old value
228228
* @nr: Bit to set
229229
* @addr: Address to count from
230230
*
231231
* This operation is non-atomic and can be reordered.
232232
* If two examples of this operation race, one can appear to succeed
233233
* but actually fail. You must protect multiple accesses with a lock.
234234
*/
235-
static __inline__ int
236-
__test_and_set_bit (int nr, volatile void *addr)
235+
static __always_inline bool
236+
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
237237
{
238238
__u32 *p = (__u32 *) addr + (nr >> 5);
239239
__u32 m = 1 << (nr & 31);
@@ -269,16 +269,16 @@ test_and_clear_bit (int nr, volatile void *addr)
269269
}
270270

271271
/**
272-
* __test_and_clear_bit - Clear a bit and return its old value
272+
* arch___test_and_clear_bit - Clear a bit and return its old value
273273
* @nr: Bit to clear
274274
* @addr: Address to count from
275275
*
276276
* This operation is non-atomic and can be reordered.
277277
* If two examples of this operation race, one can appear to succeed
278278
* but actually fail. You must protect multiple accesses with a lock.
279279
*/
280-
static __inline__ int
281-
__test_and_clear_bit(int nr, volatile void * addr)
280+
static __always_inline bool
281+
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
282282
{
283283
__u32 *p = (__u32 *) addr + (nr >> 5);
284284
__u32 m = 1 << (nr & 31);
@@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr)
314314
}
315315

316316
/**
317-
* __test_and_change_bit - Change a bit and return its old value
317+
* arch___test_and_change_bit - Change a bit and return its old value
318318
* @nr: Bit to change
319319
* @addr: Address to count from
320320
*
321321
* This operation is non-atomic and can be reordered.
322322
*/
323-
static __inline__ int
324-
__test_and_change_bit (int nr, void *addr)
323+
static __always_inline bool
324+
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
325325
{
326326
__u32 old, bit = (1 << (nr & 31));
327327
__u32 *m = (__u32 *) addr + (nr >> 5);
@@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr)
331331
return (old & bit) != 0;
332332
}
333333

334-
static __inline__ int
335-
test_bit (int nr, const volatile void *addr)
334+
static __always_inline bool
335+
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
336336
{
337337
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
338338
}
@@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
443443

444444
#ifdef __KERNEL__
445445

446+
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
447+
446448
#include <asm-generic/bitops/le.h>
447449

448450
#include <asm-generic/bitops/ext2-atomic-setbit.h>

arch/ia64/include/asm/processor.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -538,7 +538,7 @@ ia64_get_irr(unsigned int vector)
538538
{
539539
unsigned int reg = vector / 64;
540540
unsigned int bit = vector % 64;
541-
u64 irr;
541+
unsigned long irr;
542542

543543
switch (reg) {
544544
case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;

0 commit comments

Comments
 (0)