/root/src/xen/xen/common/bitmap.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * lib/bitmap.c |
3 | | * Helper functions for bitmap.h. |
4 | | * |
5 | | * This source code is licensed under the GNU General Public License, |
6 | | * Version 2. See the file COPYING for more details. |
7 | | */ |
8 | | #include <xen/types.h> |
9 | | #include <xen/errno.h> |
10 | | #include <xen/bitmap.h> |
11 | | #include <xen/bitops.h> |
12 | | #include <asm/byteorder.h> |
13 | | |
14 | | /* |
15 | | * bitmaps provide an array of bits, implemented using an an |
16 | | * array of unsigned longs. The number of valid bits in a |
17 | | * given bitmap does _not_ need to be an exact multiple of |
18 | | * BITS_PER_LONG. |
19 | | * |
20 | | * The possible unused bits in the last, partially used word |
21 | | * of a bitmap are 'don't care'. The implementation makes |
22 | | * no particular effort to keep them zero. It ensures that |
23 | | * their value will not affect the results of any operation. |
24 | | * The bitmap operations that return Boolean (bitmap_empty, |
25 | | * for example) or scalar (bitmap_weight, for example) results |
26 | | * carefully filter out these unused bits from impacting their |
27 | | * results. |
28 | | * |
29 | | * These operations actually hold to a slightly stronger rule: |
30 | | * if you don't input any bitmaps to these ops that have some |
31 | | * unused bits set, then they won't output any set unused bits |
32 | | * in output bitmaps. |
33 | | * |
34 | | * The byte ordering of bitmaps is more natural on little |
35 | | * endian architectures. See the big-endian headers |
36 | | * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h |
37 | | * for the best explanations of this ordering. |
38 | | */ |
39 | | |
40 | | /* |
41 | | * If a bitmap has a number of bits which is not a multiple of 8 then |
42 | | * the last few bits of the last byte of the bitmap can be |
43 | | * unexpectedly set which can confuse consumers (e.g. in the tools) |
44 | | * who also round up their loops to 8 bits. Ensure we clear those left |
45 | | * over bits so as to prevent surprises. |
46 | | */ |
47 | | static void clamp_last_byte(uint8_t *bp, unsigned int nbits) |
48 | 0 | { |
49 | 0 | unsigned int remainder = nbits % 8; |
50 | 0 |
|
51 | 0 | if (remainder) |
52 | 0 | bp[nbits/8] &= (1U << remainder) - 1; |
53 | 0 | } |
54 | | |
55 | | int __bitmap_empty(const unsigned long *bitmap, int bits) |
56 | 37.7M | { |
57 | 37.7M | int k, lim = bits/BITS_PER_LONG; |
58 | 37.7M | for (k = 0; k < lim; ++k) |
59 | 3 | if (bitmap[k]) |
60 | 2 | return 0; |
61 | 37.7M | |
62 | 37.7M | if (bits % BITS_PER_LONG) |
63 | 37.7M | if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) |
64 | 36.5M | return 0; |
65 | 37.7M | |
66 | 1.19M | return 1; |
67 | 37.7M | } |
68 | | EXPORT_SYMBOL(__bitmap_empty); |
69 | | |
70 | | int __bitmap_full(const unsigned long *bitmap, int bits) |
71 | 0 | { |
72 | 0 | int k, lim = bits/BITS_PER_LONG; |
73 | 0 | for (k = 0; k < lim; ++k) |
74 | 0 | if (~bitmap[k]) |
75 | 0 | return 0; |
76 | 0 |
|
77 | 0 | if (bits % BITS_PER_LONG) |
78 | 0 | if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) |
79 | 0 | return 0; |
80 | 0 |
|
81 | 0 | return 1; |
82 | 0 | } |
83 | | EXPORT_SYMBOL(__bitmap_full); |
84 | | |
85 | | int __bitmap_equal(const unsigned long *bitmap1, |
86 | | const unsigned long *bitmap2, int bits) |
87 | 0 | { |
88 | 0 | int k, lim = bits/BITS_PER_LONG; |
89 | 0 | for (k = 0; k < lim; ++k) |
90 | 0 | if (bitmap1[k] != bitmap2[k]) |
91 | 0 | return 0; |
92 | 0 |
|
93 | 0 | if (bits % BITS_PER_LONG) |
94 | 0 | if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) |
95 | 0 | return 0; |
96 | 0 |
|
97 | 0 | return 1; |
98 | 0 | } |
99 | | EXPORT_SYMBOL(__bitmap_equal); |
100 | | |
101 | | void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits) |
102 | 11 | { |
103 | 11 | int k, lim = bits/BITS_PER_LONG; |
104 | 55 | for (k = 0; k < lim; ++k) |
105 | 44 | dst[k] = ~src[k]; |
106 | 11 | |
107 | 11 | if (bits % BITS_PER_LONG) |
108 | 0 | dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); |
109 | 11 | } |
110 | | EXPORT_SYMBOL(__bitmap_complement); |
111 | | |
112 | | /* |
113 | | * __bitmap_shift_right - logical right shift of the bits in a bitmap |
114 | | * @dst - destination bitmap |
115 | | * @src - source bitmap |
116 | | * @nbits - shift by this many bits |
117 | | * @bits - bitmap size, in bits |
118 | | * |
119 | | * Shifting right (dividing) means moving bits in the MS -> LS bit |
120 | | * direction. Zeros are fed into the vacated MS positions and the |
121 | | * LS bits shifted off the bottom are lost. |
122 | | */ |
123 | | void __bitmap_shift_right(unsigned long *dst, |
124 | | const unsigned long *src, int shift, int bits) |
125 | 0 | { |
126 | 0 | int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; |
127 | 0 | int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; |
128 | 0 | unsigned long mask = (1UL << left) - 1; |
129 | 0 | for (k = 0; off + k < lim; ++k) { |
130 | 0 | unsigned long upper, lower; |
131 | 0 |
|
132 | 0 | /* |
133 | 0 | * If shift is not word aligned, take lower rem bits of |
134 | 0 | * word above and make them the top rem bits of result. |
135 | 0 | */ |
136 | 0 | if (!rem || off + k + 1 >= lim) |
137 | 0 | upper = 0; |
138 | 0 | else { |
139 | 0 | upper = src[off + k + 1]; |
140 | 0 | if (off + k + 1 == lim - 1 && left) |
141 | 0 | upper &= mask; |
142 | 0 | } |
143 | 0 | lower = src[off + k]; |
144 | 0 | if (left && off + k == lim - 1) |
145 | 0 | lower &= mask; |
146 | 0 | dst[k] = rem |
147 | 0 | ? (upper << (BITS_PER_LONG - rem)) | (lower >> rem) |
148 | 0 | : lower; |
149 | 0 | if (left && k == lim - 1) |
150 | 0 | dst[k] &= mask; |
151 | 0 | } |
152 | 0 | if (off) |
153 | 0 | memset(&dst[lim - off], 0, off*sizeof(unsigned long)); |
154 | 0 | } |
155 | | EXPORT_SYMBOL(__bitmap_shift_right); |
156 | | |
157 | | |
158 | | /* |
159 | | * __bitmap_shift_left - logical left shift of the bits in a bitmap |
160 | | * @dst - destination bitmap |
161 | | * @src - source bitmap |
162 | | * @nbits - shift by this many bits |
163 | | * @bits - bitmap size, in bits |
164 | | * |
165 | | * Shifting left (multiplying) means moving bits in the LS -> MS |
166 | | * direction. Zeros are fed into the vacated LS bit positions |
167 | | * and those MS bits shifted off the top are lost. |
168 | | */ |
169 | | |
170 | | void __bitmap_shift_left(unsigned long *dst, |
171 | | const unsigned long *src, int shift, int bits) |
172 | 0 | { |
173 | 0 | int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; |
174 | 0 | int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; |
175 | 0 | for (k = lim - off - 1; k >= 0; --k) { |
176 | 0 | unsigned long upper, lower; |
177 | 0 |
|
178 | 0 | /* |
179 | 0 | * If shift is not word aligned, take upper rem bits of |
180 | 0 | * word below and make them the bottom rem bits of result. |
181 | 0 | */ |
182 | 0 | if (rem && k > 0) |
183 | 0 | lower = src[k - 1]; |
184 | 0 | else |
185 | 0 | lower = 0; |
186 | 0 | upper = src[k]; |
187 | 0 | if (left && k == lim - 1) |
188 | 0 | upper &= (1UL << left) - 1; |
189 | 0 | dst[k + off] = rem ? (lower >> (BITS_PER_LONG - rem)) |
190 | 0 | | (upper << rem) |
191 | 0 | : upper; |
192 | 0 | if (left && k + off == lim - 1) |
193 | 0 | dst[k + off] &= (1UL << left) - 1; |
194 | 0 | } |
195 | 0 | if (off) |
196 | 0 | memset(dst, 0, off*sizeof(unsigned long)); |
197 | 0 | } |
198 | | EXPORT_SYMBOL(__bitmap_shift_left); |
199 | | |
200 | | void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
201 | | const unsigned long *bitmap2, int bits) |
202 | 1.21M | { |
203 | 1.21M | int k; |
204 | 1.21M | int nr = BITS_TO_LONGS(bits); |
205 | 1.21M | |
206 | 6.08M | for (k = 0; k < nr; k++) |
207 | 4.86M | dst[k] = bitmap1[k] & bitmap2[k]; |
208 | 1.21M | } |
209 | | EXPORT_SYMBOL(__bitmap_and); |
210 | | |
211 | | void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, |
212 | | const unsigned long *bitmap2, int bits) |
213 | 160 | { |
214 | 160 | int k; |
215 | 160 | int nr = BITS_TO_LONGS(bits); |
216 | 160 | |
217 | 800 | for (k = 0; k < nr; k++) |
218 | 640 | dst[k] = bitmap1[k] | bitmap2[k]; |
219 | 160 | } |
220 | | EXPORT_SYMBOL(__bitmap_or); |
221 | | |
222 | | void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, |
223 | | const unsigned long *bitmap2, int bits) |
224 | 0 | { |
225 | 0 | int k; |
226 | 0 | int nr = BITS_TO_LONGS(bits); |
227 | 0 |
|
228 | 0 | for (k = 0; k < nr; k++) |
229 | 0 | dst[k] = bitmap1[k] ^ bitmap2[k]; |
230 | 0 | } |
231 | | EXPORT_SYMBOL(__bitmap_xor); |
232 | | |
233 | | void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
234 | | const unsigned long *bitmap2, int bits) |
235 | 1.33M | { |
236 | 1.33M | int k; |
237 | 1.33M | int nr = BITS_TO_LONGS(bits); |
238 | 1.33M | |
239 | 6.70M | for (k = 0; k < nr; k++) |
240 | 5.36M | dst[k] = bitmap1[k] & ~bitmap2[k]; |
241 | 1.33M | } |
242 | | EXPORT_SYMBOL(__bitmap_andnot); |
243 | | |
244 | | int __bitmap_intersects(const unsigned long *bitmap1, |
245 | | const unsigned long *bitmap2, int bits) |
246 | 195 | { |
247 | 195 | int k, lim = bits/BITS_PER_LONG; |
248 | 195 | for (k = 0; k < lim; ++k) |
249 | 0 | if (bitmap1[k] & bitmap2[k]) |
250 | 0 | return 1; |
251 | 195 | |
252 | 195 | if (bits % BITS_PER_LONG) |
253 | 195 | if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) |
254 | 188 | return 1; |
255 | 7 | return 0; |
256 | 195 | } |
257 | | EXPORT_SYMBOL(__bitmap_intersects); |
258 | | |
259 | | int __bitmap_subset(const unsigned long *bitmap1, |
260 | | const unsigned long *bitmap2, int bits) |
261 | 233k | { |
262 | 233k | int k, lim = bits/BITS_PER_LONG; |
263 | 233k | for (k = 0; k < lim; ++k) |
264 | 0 | if (bitmap1[k] & ~bitmap2[k]) |
265 | 0 | return 0; |
266 | 233k | |
267 | 233k | if (bits % BITS_PER_LONG) |
268 | 234k | if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) |
269 | 118 | return 0; |
270 | 233k | return 1; |
271 | 233k | } |
272 | | EXPORT_SYMBOL(__bitmap_subset); |
273 | | |
274 | | #if BITS_PER_LONG == 32 |
275 | | int __bitmap_weight(const unsigned long *bitmap, int bits) |
276 | | { |
277 | | int k, w = 0, lim = bits/BITS_PER_LONG; |
278 | | |
279 | | for (k = 0; k < lim; k++) |
280 | | w += hweight32(bitmap[k]); |
281 | | |
282 | | if (bits % BITS_PER_LONG) |
283 | | w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); |
284 | | |
285 | | return w; |
286 | | } |
287 | | #else |
288 | | int __bitmap_weight(const unsigned long *bitmap, int bits) |
289 | 429k | { |
290 | 429k | int k, w = 0, lim = bits/BITS_PER_LONG; |
291 | 429k | |
292 | 429k | for (k = 0; k < lim; k++) |
293 | 2 | w += hweight64(bitmap[k]); |
294 | 429k | |
295 | 429k | if (bits % BITS_PER_LONG) |
296 | 429k | w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); |
297 | 429k | |
298 | 429k | return w; |
299 | 429k | } |
300 | | #endif |
301 | | EXPORT_SYMBOL(__bitmap_weight); |
302 | | |
303 | | /* |
304 | | * Bitmap printing & parsing functions: first version by Bill Irwin, |
305 | | * second version by Paul Jackson, third by Joe Korty. |
306 | | */ |
307 | | |
308 | 0 | #define CHUNKSZ 32 |
309 | | #define nbits_to_hold_value(val) fls(val) |
310 | 0 | #define roundup_power2(val,modulus) (((val) + (modulus) - 1) & ~((modulus) - 1)) |
311 | | #define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10)) |
312 | | #define BASEDEC 10 /* fancier cpuset lists input in decimal */ |
313 | | |
314 | | /** |
315 | | * bitmap_scnprintf - convert bitmap to an ASCII hex string. |
316 | | * @buf: byte buffer into which string is placed |
317 | | * @buflen: reserved size of @buf, in bytes |
318 | | * @maskp: pointer to bitmap to convert |
319 | | * @nmaskbits: size of bitmap, in bits |
320 | | * |
321 | | * Exactly @nmaskbits bits are displayed. Hex digits are grouped into |
322 | | * comma-separated sets of eight digits per set. |
323 | | */ |
324 | | int bitmap_scnprintf(char *buf, unsigned int buflen, |
325 | | const unsigned long *maskp, int nmaskbits) |
326 | 0 | { |
327 | 0 | int i, word, bit, len = 0; |
328 | 0 | unsigned long val; |
329 | 0 | const char *sep = ""; |
330 | 0 | int chunksz; |
331 | 0 | u32 chunkmask; |
332 | 0 |
|
333 | 0 | chunksz = nmaskbits & (CHUNKSZ - 1); |
334 | 0 | if (chunksz == 0) |
335 | 0 | chunksz = CHUNKSZ; |
336 | 0 |
|
337 | 0 | i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ; |
338 | 0 | for (; i >= 0; i -= CHUNKSZ) { |
339 | 0 | chunkmask = ((1ULL << chunksz) - 1); |
340 | 0 | word = i / BITS_PER_LONG; |
341 | 0 | bit = i % BITS_PER_LONG; |
342 | 0 | val = (maskp[word] >> bit) & chunkmask; |
343 | 0 | len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep, |
344 | 0 | (chunksz+3)/4, val); |
345 | 0 | chunksz = CHUNKSZ; |
346 | 0 | sep = ","; |
347 | 0 | } |
348 | 0 | return len; |
349 | 0 | } |
350 | | EXPORT_SYMBOL(bitmap_scnprintf); |
351 | | |
352 | | /* |
353 | | * bscnl_emit(buf, buflen, rbot, rtop, bp) |
354 | | * |
355 | | * Helper routine for bitmap_scnlistprintf(). Write decimal number |
356 | | * or range to buf, suppressing output past buf+buflen, with optional |
357 | | * comma-prefix. Return len of what would be written to buf, if it |
358 | | * all fit. |
359 | | */ |
360 | | static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) |
361 | 0 | { |
362 | 0 | if (len > 0) |
363 | 0 | len += scnprintf(buf + len, buflen - len, ","); |
364 | 0 | if (rbot == rtop) |
365 | 0 | len += scnprintf(buf + len, buflen - len, "%d", rbot); |
366 | 0 | else |
367 | 0 | len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop); |
368 | 0 | return len; |
369 | 0 | } |
370 | | |
371 | | /** |
372 | | * bitmap_scnlistprintf - convert bitmap to list format ASCII string |
373 | | * @buf: byte buffer into which string is placed |
374 | | * @buflen: reserved size of @buf, in bytes |
375 | | * @maskp: pointer to bitmap to convert |
376 | | * @nmaskbits: size of bitmap, in bits |
377 | | * |
378 | | * Output format is a comma-separated list of decimal numbers and |
379 | | * ranges. Consecutively set bits are shown as two hyphen-separated |
380 | | * decimal numbers, the smallest and largest bit numbers set in |
381 | | * the range. Output format is compatible with the format |
382 | | * accepted as input by bitmap_parselist(). |
383 | | * |
384 | | * The return value is the number of characters which were output, |
385 | | * excluding the trailing '\0'. |
386 | | */ |
387 | | int bitmap_scnlistprintf(char *buf, unsigned int buflen, |
388 | | const unsigned long *maskp, int nmaskbits) |
389 | 0 | { |
390 | 0 | int len = 0; |
391 | 0 | /* current bit is 'cur', most recently seen range is [rbot, rtop] */ |
392 | 0 | int cur, rbot, rtop; |
393 | 0 |
|
394 | 0 | rbot = cur = find_first_bit(maskp, nmaskbits); |
395 | 0 | while (cur < nmaskbits) { |
396 | 0 | rtop = cur; |
397 | 0 | cur = find_next_bit(maskp, nmaskbits, cur+1); |
398 | 0 | if (cur >= nmaskbits || cur > rtop + 1) { |
399 | 0 | len = bscnl_emit(buf, buflen, rbot, rtop, len); |
400 | 0 | rbot = cur; |
401 | 0 | } |
402 | 0 | } |
403 | 0 | if (!len && buflen) |
404 | 0 | *buf = 0; |
405 | 0 | return len; |
406 | 0 | } |
407 | | EXPORT_SYMBOL(bitmap_scnlistprintf); |
408 | | |
409 | | /** |
410 | | * bitmap_find_free_region - find a contiguous aligned mem region |
411 | | * @bitmap: an array of unsigned longs corresponding to the bitmap |
412 | | * @bits: number of bits in the bitmap |
413 | | * @order: region size to find (size is actually 1<<order) |
414 | | * |
415 | | * This is used to allocate a memory region from a bitmap. The idea is |
416 | | * that the region has to be 1<<order sized and 1<<order aligned (this |
417 | | * makes the search algorithm much faster). |
418 | | * |
419 | | * The region is marked as set bits in the bitmap if a free one is |
420 | | * found. |
421 | | * |
422 | | * Returns either beginning of region or negative error |
423 | | */ |
424 | | int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) |
425 | 0 | { |
426 | 0 | unsigned long mask; |
427 | 0 | int pages = 1 << order; |
428 | 0 | int i; |
429 | 0 |
|
430 | 0 | if(pages > BITS_PER_LONG) |
431 | 0 | return -EINVAL; |
432 | 0 |
|
433 | 0 | /* make a mask of the order */ |
434 | 0 | mask = (1ul << (pages - 1)); |
435 | 0 | mask += mask - 1; |
436 | 0 |
|
437 | 0 | /* run up the bitmap pages bits at a time */ |
438 | 0 | for (i = 0; i < bits; i += pages) { |
439 | 0 | int index = i/BITS_PER_LONG; |
440 | 0 | int offset = i - (index * BITS_PER_LONG); |
441 | 0 | if((bitmap[index] & (mask << offset)) == 0) { |
442 | 0 | /* set region in bimap */ |
443 | 0 | bitmap[index] |= (mask << offset); |
444 | 0 | return i; |
445 | 0 | } |
446 | 0 | } |
447 | 0 | return -ENOMEM; |
448 | 0 | } |
449 | | EXPORT_SYMBOL(bitmap_find_free_region); |
450 | | |
451 | | /** |
452 | | * bitmap_release_region - release allocated bitmap region |
453 | | * @bitmap: a pointer to the bitmap |
454 | | * @pos: the beginning of the region |
455 | | * @order: the order of the bits to release (number is 1<<order) |
456 | | * |
457 | | * This is the complement to __bitmap_find_free_region and releases |
458 | | * the found region (by clearing it in the bitmap). |
459 | | */ |
460 | | void bitmap_release_region(unsigned long *bitmap, int pos, int order) |
461 | 0 | { |
462 | 0 | int pages = 1 << order; |
463 | 0 | unsigned long mask = (1ul << (pages - 1)); |
464 | 0 | int index = pos/BITS_PER_LONG; |
465 | 0 | int offset = pos - (index * BITS_PER_LONG); |
466 | 0 | mask += mask - 1; |
467 | 0 | bitmap[index] &= ~(mask << offset); |
468 | 0 | } |
469 | | EXPORT_SYMBOL(bitmap_release_region); |
470 | | |
471 | | int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) |
472 | 0 | { |
473 | 0 | int pages = 1 << order; |
474 | 0 | unsigned long mask = (1ul << (pages - 1)); |
475 | 0 | int index = pos/BITS_PER_LONG; |
476 | 0 | int offset = pos - (index * BITS_PER_LONG); |
477 | 0 |
|
478 | 0 | /* We don't do regions of pages > BITS_PER_LONG. The |
479 | 0 | * algorithm would be a simple look for multiple zeros in the |
480 | 0 | * array, but there's no driver today that needs this. If you |
481 | 0 | * trip this BUG(), you get to code it... */ |
482 | 0 | BUG_ON(pages > BITS_PER_LONG); |
483 | 0 | mask += mask - 1; |
484 | 0 | if (bitmap[index] & (mask << offset)) |
485 | 0 | return -EBUSY; |
486 | 0 | bitmap[index] |= (mask << offset); |
487 | 0 | return 0; |
488 | 0 | } |
489 | | EXPORT_SYMBOL(bitmap_allocate_region); |
490 | | |
491 | | #ifdef __BIG_ENDIAN |
492 | | |
493 | | void bitmap_long_to_byte(uint8_t *bp, const unsigned long *lp, int nbits) |
494 | | { |
495 | | unsigned long l; |
496 | | int i, j, b; |
497 | | |
498 | | for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) { |
499 | | l = lp[i]; |
500 | | for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) { |
501 | | bp[b+j] = l; |
502 | | l >>= 8; |
503 | | nbits -= 8; |
504 | | } |
505 | | } |
506 | | clamp_last_byte(bp, nbits); |
507 | | } |
508 | | |
509 | | void bitmap_byte_to_long(unsigned long *lp, const uint8_t *bp, int nbits) |
510 | | { |
511 | | unsigned long l; |
512 | | int i, j, b; |
513 | | |
514 | | for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) { |
515 | | l = 0; |
516 | | for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) { |
517 | | l |= (unsigned long)bp[b+j] << (j*8); |
518 | | nbits -= 8; |
519 | | } |
520 | | lp[i] = l; |
521 | | } |
522 | | } |
523 | | |
524 | | #elif defined(__LITTLE_ENDIAN) |
525 | | |
526 | | void bitmap_long_to_byte(uint8_t *bp, const unsigned long *lp, int nbits) |
527 | 0 | { |
528 | 0 | memcpy(bp, lp, (nbits+7)/8); |
529 | 0 | clamp_last_byte(bp, nbits); |
530 | 0 | } |
531 | | |
532 | | void bitmap_byte_to_long(unsigned long *lp, const uint8_t *bp, int nbits) |
533 | 0 | { |
534 | 0 | /* We may need to pad the final longword with zeroes. */ |
535 | 0 | if (nbits & (BITS_PER_LONG-1)) |
536 | 0 | lp[BITS_TO_LONGS(nbits)-1] = 0; |
537 | 0 | memcpy(lp, bp, (nbits+7)/8); |
538 | 0 | } |
539 | | |
540 | | #endif |