osdmap.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _FS_CEPH_OSDMAP_H
  3. #define _FS_CEPH_OSDMAP_H
  4. #include <linux/rbtree.h>
  5. #include <linux/ceph/types.h>
  6. #include <linux/ceph/decode.h>
  7. #include <linux/crush/crush.h>
  8. /*
  9. * The osd map describes the current membership of the osd cluster and
  10. * specifies the mapping of objects to placement groups and placement
  11. * groups to (sets of) osds. That is, it completely specifies the
  12. * (desired) distribution of all data objects in the system at some
  13. * point in time.
  14. *
  15. * Each map version is identified by an epoch, which increases monotonically.
  16. *
  17. * The map can be updated either via an incremental map (diff) describing
  18. * the change between two successive epochs, or as a fully encoded map.
  19. */
  20. struct ceph_pg {
  21. uint64_t pool;
  22. uint32_t seed;
  23. };
  24. #define CEPH_SPG_NOSHARD -1
  25. struct ceph_spg {
  26. struct ceph_pg pgid;
  27. s8 shard;
  28. };
  29. int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs);
  30. int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs);
  31. #define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
  32. together */
  33. #define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
  34. #define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota,
  35. will set FULL too */
  36. #define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */
  37. struct ceph_pg_pool_info {
  38. struct rb_node node;
  39. s64 id;
  40. u8 type; /* CEPH_POOL_TYPE_* */
  41. u8 size;
  42. u8 min_size;
  43. u8 crush_ruleset;
  44. u8 object_hash;
  45. u32 last_force_request_resend;
  46. u32 pg_num, pgp_num;
  47. int pg_num_mask, pgp_num_mask;
  48. s64 read_tier;
  49. s64 write_tier; /* wins for read+write ops */
  50. u64 flags; /* CEPH_POOL_FLAG_* */
  51. char *name;
  52. bool was_full; /* for handle_one_map() */
  53. };
  54. static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
  55. {
  56. switch (pool->type) {
  57. case CEPH_POOL_TYPE_REP:
  58. return true;
  59. case CEPH_POOL_TYPE_EC:
  60. return false;
  61. default:
  62. BUG();
  63. }
  64. }
  65. struct ceph_object_locator {
  66. s64 pool;
  67. struct ceph_string *pool_ns;
  68. };
  69. static inline void ceph_oloc_init(struct ceph_object_locator *oloc)
  70. {
  71. oloc->pool = -1;
  72. oloc->pool_ns = NULL;
  73. }
  74. static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
  75. {
  76. return oloc->pool == -1;
  77. }
  78. void ceph_oloc_copy(struct ceph_object_locator *dest,
  79. const struct ceph_object_locator *src);
  80. void ceph_oloc_destroy(struct ceph_object_locator *oloc);
  81. /*
  82. * 51-char inline_name is long enough for all cephfs and all but one
  83. * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be
  84. * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all
  85. * other rbd requests fit into inline_name.
  86. *
  87. * Makes ceph_object_id 64 bytes on 64-bit.
  88. */
  89. #define CEPH_OID_INLINE_LEN 52
  90. /*
  91. * Both inline and external buffers have space for a NUL-terminator,
  92. * which is carried around. It's not required though - RADOS object
  93. * names don't have to be NUL-terminated and may contain NULs.
  94. */
  95. struct ceph_object_id {
  96. char *name;
  97. char inline_name[CEPH_OID_INLINE_LEN];
  98. int name_len;
  99. };
  100. #define __CEPH_OID_INITIALIZER(oid) { .name = (oid).inline_name }
  101. #define CEPH_DEFINE_OID_ONSTACK(oid) \
  102. struct ceph_object_id oid = __CEPH_OID_INITIALIZER(oid)
  103. static inline void ceph_oid_init(struct ceph_object_id *oid)
  104. {
  105. *oid = (struct ceph_object_id) __CEPH_OID_INITIALIZER(*oid);
  106. }
  107. static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
  108. {
  109. return oid->name == oid->inline_name && !oid->name_len;
  110. }
  111. void ceph_oid_copy(struct ceph_object_id *dest,
  112. const struct ceph_object_id *src);
  113. __printf(2, 3)
  114. void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...);
  115. __printf(3, 4)
  116. int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
  117. const char *fmt, ...);
  118. void ceph_oid_destroy(struct ceph_object_id *oid);
  119. struct workspace_manager {
  120. struct list_head idle_ws;
  121. spinlock_t ws_lock;
  122. /* Number of free workspaces */
  123. int free_ws;
  124. /* Total number of allocated workspaces */
  125. atomic_t total_ws;
  126. /* Waiters for a free workspace */
  127. wait_queue_head_t ws_wait;
  128. };
  129. struct ceph_pg_mapping {
  130. struct rb_node node;
  131. struct ceph_pg pgid;
  132. union {
  133. struct {
  134. int len;
  135. int osds[];
  136. } pg_temp, pg_upmap;
  137. struct {
  138. int osd;
  139. } primary_temp;
  140. struct {
  141. int len;
  142. int from_to[][2];
  143. } pg_upmap_items;
  144. };
  145. };
  146. struct ceph_osdmap {
  147. struct ceph_fsid fsid;
  148. u32 epoch;
  149. struct ceph_timespec created, modified;
  150. u32 flags; /* CEPH_OSDMAP_* */
  151. u32 max_osd; /* size of osd_state, _offload, _addr arrays */
  152. u32 *osd_state; /* CEPH_OSD_* */
  153. u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */
  154. struct ceph_entity_addr *osd_addr;
  155. struct rb_root pg_temp;
  156. struct rb_root primary_temp;
  157. /* remap (post-CRUSH, pre-up) */
  158. struct rb_root pg_upmap; /* PG := raw set */
  159. struct rb_root pg_upmap_items; /* from -> to within raw set */
  160. u32 *osd_primary_affinity;
  161. struct rb_root pg_pools;
  162. u32 pool_max;
  163. /* the CRUSH map specifies the mapping of placement groups to
  164. * the list of osds that store+replicate them. */
  165. struct crush_map *crush;
  166. struct workspace_manager crush_wsm;
  167. };
  168. static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
  169. {
  170. return osd >= 0 && osd < map->max_osd &&
  171. (map->osd_state[osd] & CEPH_OSD_EXISTS);
  172. }
  173. static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd)
  174. {
  175. return ceph_osd_exists(map, osd) &&
  176. (map->osd_state[osd] & CEPH_OSD_UP);
  177. }
  178. static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
  179. {
  180. return !ceph_osd_is_up(map, osd);
  181. }
  182. char *ceph_osdmap_state_str(char *str, int len, u32 state);
  183. extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
  184. static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map,
  185. int osd)
  186. {
  187. if (osd >= map->max_osd)
  188. return NULL;
  189. return &map->osd_addr[osd];
  190. }
  191. #define CEPH_PGID_ENCODING_LEN (1 + 8 + 4 + 4)
  192. static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
  193. {
  194. __u8 version;
  195. if (!ceph_has_room(p, end, CEPH_PGID_ENCODING_LEN)) {
  196. pr_warn("incomplete pg encoding\n");
  197. return -EINVAL;
  198. }
  199. version = ceph_decode_8(p);
  200. if (version > 1) {
  201. pr_warn("do not understand pg encoding %d > 1\n",
  202. (int)version);
  203. return -EINVAL;
  204. }
  205. pgid->pool = ceph_decode_64(p);
  206. pgid->seed = ceph_decode_32(p);
  207. *p += 4; /* skip deprecated preferred value */
  208. return 0;
  209. }
  210. struct ceph_osdmap *ceph_osdmap_alloc(void);
  211. struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end, bool msgr2);
  212. struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2,
  213. struct ceph_osdmap *map);
  214. extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
  215. struct ceph_osds {
  216. int osds[CEPH_PG_MAX_SIZE];
  217. int size;
  218. int primary; /* id, NOT index */
  219. };
  220. static inline void ceph_osds_init(struct ceph_osds *set)
  221. {
  222. set->size = 0;
  223. set->primary = -1;
  224. }
  225. void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src);
  226. bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num,
  227. u32 new_pg_num);
  228. bool ceph_is_new_interval(const struct ceph_osds *old_acting,
  229. const struct ceph_osds *new_acting,
  230. const struct ceph_osds *old_up,
  231. const struct ceph_osds *new_up,
  232. int old_size,
  233. int new_size,
  234. int old_min_size,
  235. int new_min_size,
  236. u32 old_pg_num,
  237. u32 new_pg_num,
  238. bool old_sort_bitwise,
  239. bool new_sort_bitwise,
  240. bool old_recovery_deletes,
  241. bool new_recovery_deletes,
  242. const struct ceph_pg *pgid);
  243. bool ceph_osds_changed(const struct ceph_osds *old_acting,
  244. const struct ceph_osds *new_acting,
  245. bool any_change);
  246. void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
  247. const struct ceph_object_id *oid,
  248. const struct ceph_object_locator *oloc,
  249. struct ceph_pg *raw_pgid);
  250. int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
  251. const struct ceph_object_id *oid,
  252. const struct ceph_object_locator *oloc,
  253. struct ceph_pg *raw_pgid);
  254. void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
  255. struct ceph_pg_pool_info *pi,
  256. const struct ceph_pg *raw_pgid,
  257. struct ceph_osds *up,
  258. struct ceph_osds *acting);
  259. bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
  260. struct ceph_pg_pool_info *pi,
  261. const struct ceph_pg *raw_pgid,
  262. struct ceph_spg *spgid);
  263. int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
  264. const struct ceph_pg *raw_pgid);
  265. struct crush_loc {
  266. char *cl_type_name;
  267. char *cl_name;
  268. };
  269. struct crush_loc_node {
  270. struct rb_node cl_node;
  271. struct crush_loc cl_loc; /* pointers into cl_data */
  272. char cl_data[];
  273. };
  274. int ceph_parse_crush_location(char *crush_location, struct rb_root *locs);
  275. int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2);
  276. void ceph_clear_crush_locs(struct rb_root *locs);
  277. int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id,
  278. struct rb_root *locs);
  279. extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
  280. u64 id);
  281. extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
  282. extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
  283. u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
  284. #endif