Rev 4103 | Rev 5270 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4103 | Rev 5056 | ||
---|---|---|---|
Line 16... | Line 16... | ||
16 | * |
16 | * |
17 | * You call it to allocate an id (an int) an associate with that id a |
17 | * You call it to allocate an id (an int) an associate with that id a |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
19 | * id to a user for him to pass back at a later time. You then pass |
19 | * id to a user for him to pass back at a later time. You then pass |
20 | * that id to this code and it returns your pointer. |
20 | * that id to this code and it returns your pointer. |
21 | - | ||
22 | * You can release ids at any time. When all ids are released, most of |
- | |
23 | * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we |
- | |
24 | * don't need to go to the memory "store" during an id allocate, just |
- | |
25 | * so you don't need to be too concerned about locking and conflicts |
- | |
26 | * with the slab allocator. |
- | |
27 | */ |
21 | */ |
Line 28... | Line 22... | ||
28 | 22 | ||
29 | #include |
23 | #include |
30 | #include |
24 | #include |
Line 134... | Line 128... | ||
134 | kfree(layer); |
128 | kfree(layer); |
135 | } |
129 | } |
Line 136... | Line 130... | ||
136 | 130 | ||
137 | static inline void free_layer(struct idr *idr, struct idr_layer *p) |
131 | static inline void free_layer(struct idr *idr, struct idr_layer *p) |
138 | { |
132 | { |
139 | if (idr->hint && idr->hint == p) |
133 | if (idr->hint == p) |
140 | RCU_INIT_POINTER(idr->hint, NULL); |
134 | RCU_INIT_POINTER(idr->hint, NULL); |
141 | idr_layer_rcu_free(&p->rcu_head); |
135 | idr_layer_rcu_free(&p->rcu_head); |
Line 142... | Line 136... | ||
142 | } |
136 | } |
Line 179... | Line 173... | ||
179 | id = id >> IDR_BITS; |
173 | id = id >> IDR_BITS; |
180 | __set_bit((id & IDR_MASK), p->bitmap); |
174 | __set_bit((id & IDR_MASK), p->bitmap); |
181 | } |
175 | } |
182 | } |
176 | } |
Line 183... | Line 177... | ||
183 | 177 | ||
184 | int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
178 | static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
185 | { |
179 | { |
186 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
180 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
187 | struct idr_layer *new; |
181 | struct idr_layer *new; |
188 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
182 | new = kzalloc(sizeof(struct idr_layer), gfp_mask); |
189 | if (new == NULL) |
183 | if (new == NULL) |
190 | return (0); |
184 | return (0); |
191 | move_to_free_list(idp, new); |
185 | move_to_free_list(idp, new); |
192 | } |
186 | } |
193 | return 1; |
187 | return 1; |
194 | } |
- | |
Line 195... | Line 188... | ||
195 | EXPORT_SYMBOL(__idr_pre_get); |
188 | } |
196 | 189 | ||
197 | /** |
190 | /** |
198 | * sub_alloc - try to allocate an id without growing the tree depth |
191 | * sub_alloc - try to allocate an id without growing the tree depth |
Line 233... | Line 226... | ||
233 | l++; |
226 | l++; |
234 | oid = id; |
227 | oid = id; |
235 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
228 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
Line 236... | Line 229... | ||
236 | 229 | ||
237 | /* if already at the top layer, we need to grow */ |
230 | /* if already at the top layer, we need to grow */ |
238 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
231 | if (id > idr_max(idp->layers)) { |
239 | *starting_id = id; |
232 | *starting_id = id; |
240 | return -EAGAIN; |
233 | return -EAGAIN; |
241 | } |
234 | } |
242 | p = pa[l]; |
235 | p = pa[l]; |
Line 357... | Line 350... | ||
357 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); |
350 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); |
358 | pa[0]->count++; |
351 | pa[0]->count++; |
359 | idr_mark_full(pa, id); |
352 | idr_mark_full(pa, id); |
360 | } |
353 | } |
Line 361... | Line -... | ||
361 | - | ||
362 | int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
- | |
363 | { |
- | |
364 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
- | |
365 | int rv; |
- | |
366 | - | ||
367 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
- | |
368 | if (rv < 0) |
- | |
369 | return rv == -ENOMEM ? -EAGAIN : rv; |
- | |
370 | - | ||
371 | idr_fill_slot(idp, ptr, rv, pa); |
- | |
372 | *id = rv; |
- | |
373 | return 0; |
- | |
374 | } |
- | |
Line 375... | Line 354... | ||
375 | EXPORT_SYMBOL(__idr_get_new_above); |
354 | |
376 | 355 | ||
377 | /** |
356 | /** |
378 | * idr_preload - preload for idr_alloc() |
357 | * idr_preload - preload for idr_alloc() |
Line 548... | Line 527... | ||
548 | struct idr_layer *to_free; |
527 | struct idr_layer *to_free; |
Line 549... | Line 528... | ||
549 | 528 | ||
550 | if (id < 0) |
529 | if (id < 0) |
Line -... | Line 530... | ||
- | 530 | return; |
|
- | 531 | ||
- | 532 | if (id > idr_max(idp->layers)) { |
|
- | 533 | idr_remove_warning(id); |
|
- | 534 | return; |
|
551 | return; |
535 | } |
552 | 536 | ||
553 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
537 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
554 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
538 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
555 | idp->top->ary[0]) { |
539 | idp->top->ary[0]) { |
Line 565... | Line 549... | ||
565 | --idp->layers; |
549 | --idp->layers; |
566 | to_free->count = 0; |
550 | to_free->count = 0; |
567 | bitmap_clear(to_free->bitmap, 0, IDR_SIZE); |
551 | bitmap_clear(to_free->bitmap, 0, IDR_SIZE); |
568 | free_layer(idp, to_free); |
552 | free_layer(idp, to_free); |
569 | } |
553 | } |
570 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
- | |
571 | p = get_from_free_list(idp); |
- | |
572 | /* |
- | |
573 | * Note: we don't call the rcu callback here, since the only |
- | |
574 | * layers that fall into the freelist are those that have been |
- | |
575 | * preallocated. |
- | |
576 | */ |
- | |
577 | kfree(p); |
- | |
578 | } |
- | |
579 | return; |
- | |
580 | } |
554 | } |
581 | EXPORT_SYMBOL(idr_remove); |
555 | EXPORT_SYMBOL(idr_remove); |
Line 582... | Line 556... | ||
582 | 556 | ||
583 | void __idr_remove_all(struct idr *idp) |
557 | static void __idr_remove_all(struct idr *idp) |
584 | { |
558 | { |
585 | int n, id, max; |
559 | int n, id, max; |
586 | int bt_mask; |
560 | int bt_mask; |
587 | struct idr_layer *p; |
561 | struct idr_layer *p; |
588 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
562 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
Line 589... | Line 563... | ||
589 | struct idr_layer **paa = &pa[0]; |
563 | struct idr_layer **paa = &pa[0]; |
590 | 564 | ||
591 | n = idp->layers * IDR_BITS; |
565 | n = idp->layers * IDR_BITS; |
592 | p = idp->top; |
566 | *paa = idp->top; |
Line 593... | Line 567... | ||
593 | rcu_assign_pointer(idp->top, NULL); |
567 | rcu_assign_pointer(idp->top, NULL); |
594 | max = idr_max(idp->layers); |
568 | max = idr_max(idp->layers); |
- | 569 | ||
595 | 570 | id = 0; |
|
596 | id = 0; |
571 | while (id >= 0 && id <= max) { |
597 | while (id >= 0 && id <= max) { |
- | |
598 | while (n > IDR_BITS && p) { |
572 | p = *paa; |
- | 573 | while (n > IDR_BITS && p) { |
|
599 | n -= IDR_BITS; |
574 | n -= IDR_BITS; |
Line 600... | Line 575... | ||
600 | *paa++ = p; |
575 | p = p->ary[(id >> n) & IDR_MASK]; |
601 | p = p->ary[(id >> n) & IDR_MASK]; |
576 | *++paa = p; |
602 | } |
577 | } |
603 | 578 | ||
604 | bt_mask = id; |
579 | bt_mask = id; |
605 | id += 1 << n; |
580 | id += 1 << n; |
606 | /* Get the highest bit that the above add changed from 0->1. */ |
581 | /* Get the highest bit that the above add changed from 0->1. */ |
607 | while (n < fls(id ^ bt_mask)) { |
582 | while (n < fls(id ^ bt_mask)) { |
608 | if (p) |
583 | if (*paa) |
609 | free_layer(idp, p); |
584 | free_layer(idp, *paa); |
610 | n += IDR_BITS; |
585 | n += IDR_BITS; |
611 | p = *--paa; |
586 | --paa; |
612 | } |
- | |
Line 613... | Line 587... | ||
613 | } |
587 | } |
614 | idp->layers = 0; |
588 | } |
615 | } |
589 | idp->layers = 0; |
616 | EXPORT_SYMBOL(__idr_remove_all); |
590 | } |
Line 690... | Line 664... | ||
690 | struct idr_layer *p; |
664 | struct idr_layer *p; |
691 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
665 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
692 | struct idr_layer **paa = &pa[0]; |
666 | struct idr_layer **paa = &pa[0]; |
Line 693... | Line 667... | ||
693 | 667 | ||
694 | n = idp->layers * IDR_BITS; |
668 | n = idp->layers * IDR_BITS; |
695 | p = rcu_dereference_raw(idp->top); |
669 | *paa = rcu_dereference_raw(idp->top); |
Line 696... | Line 670... | ||
696 | max = idr_max(idp->layers); |
670 | max = idr_max(idp->layers); |
697 | 671 | ||
- | 672 | id = 0; |
|
698 | id = 0; |
673 | while (id >= 0 && id <= max) { |
699 | while (id >= 0 && id <= max) { |
674 | p = *paa; |
700 | while (n > 0 && p) { |
- | |
701 | n -= IDR_BITS; |
675 | while (n > 0 && p) { |
- | 676 | n -= IDR_BITS; |
|
702 | *paa++ = p; |
677 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
Line 703... | Line 678... | ||
703 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
678 | *++paa = p; |
704 | } |
679 | } |
705 | 680 | ||
Line 710... | Line 685... | ||
710 | } |
685 | } |
Line 711... | Line 686... | ||
711 | 686 | ||
712 | id += 1 << n; |
687 | id += 1 << n; |
713 | while (n < fls(id)) { |
688 | while (n < fls(id)) { |
714 | n += IDR_BITS; |
689 | n += IDR_BITS; |
715 | p = *--paa; |
690 | --paa; |
716 | } |
691 | } |
Line 717... | Line 692... | ||
717 | } |
692 | } |
718 | 693 | ||
Line 738... | Line 713... | ||
738 | struct idr_layer **paa = &pa[0]; |
713 | struct idr_layer **paa = &pa[0]; |
739 | int id = *nextidp; |
714 | int id = *nextidp; |
740 | int n, max; |
715 | int n, max; |
Line 741... | Line 716... | ||
741 | 716 | ||
742 | /* find first ent */ |
717 | /* find first ent */ |
743 | p = rcu_dereference_raw(idp->top); |
718 | p = *paa = rcu_dereference_raw(idp->top); |
744 | if (!p) |
719 | if (!p) |
745 | return NULL; |
720 | return NULL; |
746 | n = (p->layer + 1) * IDR_BITS; |
721 | n = (p->layer + 1) * IDR_BITS; |
Line 747... | Line 722... | ||
747 | max = idr_max(p->layer + 1); |
722 | max = idr_max(p->layer + 1); |
- | 723 | ||
748 | 724 | while (id >= 0 && id <= max) { |
|
749 | while (id >= 0 && id <= max) { |
725 | p = *paa; |
750 | while (n > 0 && p) { |
- | |
751 | n -= IDR_BITS; |
726 | while (n > 0 && p) { |
- | 727 | n -= IDR_BITS; |
|
752 | *paa++ = p; |
728 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
Line 753... | Line 729... | ||
753 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
729 | *++paa = p; |
754 | } |
730 | } |
755 | 731 | ||
Line 766... | Line 742... | ||
766 | * beginning of the next layer using round_up(). |
742 | * beginning of the next layer using round_up(). |
767 | */ |
743 | */ |
768 | id = round_up(id + 1, 1 << n); |
744 | id = round_up(id + 1, 1 << n); |
769 | while (n < fls(id)) { |
745 | while (n < fls(id)) { |
770 | n += IDR_BITS; |
746 | n += IDR_BITS; |
771 | p = *--paa; |
747 | --paa; |
772 | } |
748 | } |
773 | } |
749 | } |
774 | return NULL; |
750 | return NULL; |
775 | } |
751 | } |
776 | EXPORT_SYMBOL(idr_get_next); |
752 | EXPORT_SYMBOL(idr_get_next); |
Line 796... | Line 772... | ||
796 | if (id < 0) |
772 | if (id < 0) |
797 | return ERR_PTR(-EINVAL); |
773 | return ERR_PTR(-EINVAL); |
Line 798... | Line 774... | ||
798 | 774 | ||
799 | p = idp->top; |
775 | p = idp->top; |
800 | if (!p) |
776 | if (!p) |
801 | return ERR_PTR(-EINVAL); |
- | |
802 | - | ||
Line 803... | Line 777... | ||
803 | n = (p->layer+1) * IDR_BITS; |
777 | return ERR_PTR(-ENOENT); |
804 | 778 | ||
Line 805... | Line 779... | ||
805 | if (id >= (1 << n)) |
779 | if (id > idr_max(p->layer + 1)) |
806 | return ERR_PTR(-EINVAL); |
780 | return ERR_PTR(-ENOENT); |
807 | 781 | ||
808 | n -= IDR_BITS; |
782 | n = p->layer * IDR_BITS; |
809 | while ((n > 0) && p) { |
783 | while ((n > 0) && p) { |
Line 840... | Line 814... | ||
840 | memset(idp, 0, sizeof(struct idr)); |
814 | memset(idp, 0, sizeof(struct idr)); |
841 | spin_lock_init(&idp->lock); |
815 | spin_lock_init(&idp->lock); |
842 | } |
816 | } |
843 | EXPORT_SYMBOL(idr_init); |
817 | EXPORT_SYMBOL(idr_init); |
Line -... | Line 818... | ||
- | 818 | ||
- | 819 | static int idr_has_entry(int id, void *p, void *data) |
|
- | 820 | { |
|
- | 821 | return 1; |
|
- | 822 | } |
|
- | 823 | ||
- | 824 | bool idr_is_empty(struct idr *idp) |
|
- | 825 | { |
|
- | 826 | return !idr_for_each(idp, idr_has_entry, NULL); |
|
- | 827 | } |
|
Line 844... | Line 828... | ||
844 | 828 | EXPORT_SYMBOL(idr_is_empty); |
|
845 | 829 | ||
846 | /** |
830 | /** |
847 | * DOC: IDA description |
831 | * DOC: IDA description |
Line 1004... | Line 988... | ||
1004 | int idr_id = id / IDA_BITMAP_BITS; |
988 | int idr_id = id / IDA_BITMAP_BITS; |
1005 | int offset = id % IDA_BITMAP_BITS; |
989 | int offset = id % IDA_BITMAP_BITS; |
1006 | int n; |
990 | int n; |
1007 | struct ida_bitmap *bitmap; |
991 | struct ida_bitmap *bitmap; |
Line -... | Line 992... | ||
- | 992 | ||
- | 993 | if (idr_id > idr_max(ida->idr.layers)) |
|
- | 994 | goto err; |
|
1008 | 995 | ||
1009 | /* clear full bits while looking up the leaf idr_layer */ |
996 | /* clear full bits while looking up the leaf idr_layer */ |
1010 | while ((shift > 0) && p) { |
997 | while ((shift > 0) && p) { |
1011 | n = (idr_id >> shift) & IDR_MASK; |
998 | n = (idr_id >> shift) & IDR_MASK; |
1012 | __clear_bit(n, p->bitmap); |
999 | __clear_bit(n, p->bitmap); |
Line 1019... | Line 1006... | ||
1019 | 1006 | ||
1020 | n = idr_id & IDR_MASK; |
1007 | n = idr_id & IDR_MASK; |
Line 1021... | Line 1008... | ||
1021 | __clear_bit(n, p->bitmap); |
1008 | __clear_bit(n, p->bitmap); |
1022 | 1009 | ||
1023 | bitmap = (void *)p->ary[n]; |
1010 | bitmap = (void *)p->ary[n]; |
Line 1024... | Line 1011... | ||
1024 | if (!test_bit(offset, bitmap->bitmap)) |
1011 | if (!bitmap || !test_bit(offset, bitmap->bitmap)) |
1025 | goto err; |
1012 | goto err; |
1026 | 1013 | ||
Line 1242... | Line 1229... | ||
1242 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
1229 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
1243 | res = res + (res >> 8); |
1230 | res = res + (res >> 8); |
1244 | return (res + (res >> 16)) & 0x000000FF; |
1231 | return (res + (res >> 16)) & 0x000000FF; |
1245 | }><>>>><>>>>>>=>>>>><>>>><>><>=>>><>=>>>><>=>>=>>>>=>=>>>>>><>><>><>>><>><> |
1232 | } |
Line -... | Line 1233... | ||
- | 1233 | ||
- | 1234 | unsigned long hweight64(__u64 w) |
|
- | 1235 | { |
|
- | 1236 | #if BITS_PER_LONG == 32 |
|
- | 1237 | return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); |
|
- | 1238 | #elif BITS_PER_LONG == 64 |
|
- | 1239 | __u64 res = w - ((w >> 1) & 0x5555555555555555ul); |
|
- | 1240 | res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); |
|
- | 1241 | res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; |
|
- | 1242 | res = res + (res >> 8); |
|
- | 1243 | res = res + (res >> 16); |
|
- | 1244 | return (res + (res >> 32)) & 0x00000000000000FFul; |
|
- | 1245 | #endif |
|
- | 1246 | }><>>>><>>>>>>=>>>>>>><>><>=>>><>=>>>><>=>>=>>>>=>=>>>>><>><>>><>><> |