kref.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * kref.h - library routines for handling generic reference counted objects
  4. *
  5. * Copyright (C) 2004 Greg Kroah-Hartman <[email protected]>
  6. * Copyright (C) 2004 IBM Corp.
  7. *
  8. * based on kobject.h which was:
  9. * Copyright (C) 2002-2003 Patrick Mochel <[email protected]>
  10. * Copyright (C) 2002-2003 Open Source Development Labs
  11. */
  12. #ifndef _KREF_H_
  13. #define _KREF_H_
  14. #include <linux/spinlock.h>
  15. #include <linux/refcount.h>
  16. struct kref {
  17. refcount_t refcount;
  18. };
  19. #define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), }
  20. /**
  21. * kref_init - initialize object.
  22. * @kref: object in question.
  23. */
  24. static inline void kref_init(struct kref *kref)
  25. {
  26. refcount_set(&kref->refcount, 1);
  27. }
  28. static inline unsigned int kref_read(const struct kref *kref)
  29. {
  30. return refcount_read(&kref->refcount);
  31. }
  32. /**
  33. * kref_get - increment refcount for object.
  34. * @kref: object.
  35. */
  36. static inline void kref_get(struct kref *kref)
  37. {
  38. refcount_inc(&kref->refcount);
  39. }
  40. /**
  41. * kref_put - decrement refcount for object.
  42. * @kref: object.
  43. * @release: pointer to the function that will clean up the object when the
  44. * last reference to the object is released.
  45. * This pointer is required, and it is not acceptable to pass kfree
  46. * in as this function.
  47. *
  48. * Decrement the refcount, and if 0, call release().
  49. * Return 1 if the object was removed, otherwise return 0. Beware, if this
  50. * function returns 0, you still can not count on the kref from remaining in
  51. * memory. Only use the return value if you want to see if the kref is now
  52. * gone, not present.
  53. */
  54. static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
  55. {
  56. if (refcount_dec_and_test(&kref->refcount)) {
  57. release(kref);
  58. return 1;
  59. }
  60. return 0;
  61. }
  62. static inline int kref_put_mutex(struct kref *kref,
  63. void (*release)(struct kref *kref),
  64. struct mutex *lock)
  65. {
  66. if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
  67. release(kref);
  68. return 1;
  69. }
  70. return 0;
  71. }
  72. static inline int kref_put_lock(struct kref *kref,
  73. void (*release)(struct kref *kref),
  74. spinlock_t *lock)
  75. {
  76. if (refcount_dec_and_lock(&kref->refcount, lock)) {
  77. release(kref);
  78. return 1;
  79. }
  80. return 0;
  81. }
  82. /**
  83. * kref_get_unless_zero - Increment refcount for object unless it is zero.
  84. * @kref: object.
  85. *
  86. * Return non-zero if the increment succeeded. Otherwise return 0.
  87. *
  88. * This function is intended to simplify locking around refcounting for
  89. * objects that can be looked up from a lookup structure, and which are
  90. * removed from that lookup structure in the object destructor.
  91. * Operations on such objects require at least a read lock around
  92. * lookup + kref_get, and a write lock around kref_put + remove from lookup
  93. * structure. Furthermore, RCU implementations become extremely tricky.
  94. * With a lookup followed by a kref_get_unless_zero *with return value check*
  95. * locking in the kref_put path can be deferred to the actual removal from
  96. * the lookup structure and RCU lookups become trivial.
  97. */
  98. static inline int __must_check kref_get_unless_zero(struct kref *kref)
  99. {
  100. return refcount_inc_not_zero(&kref->refcount);
  101. }
  102. #endif /* _KREF_H_ */