super.h 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _FS_CEPH_SUPER_H
  3. #define _FS_CEPH_SUPER_H
  4. #include <linux/ceph/ceph_debug.h>
  5. #include <asm/unaligned.h>
  6. #include <linux/backing-dev.h>
  7. #include <linux/completion.h>
  8. #include <linux/exportfs.h>
  9. #include <linux/fs.h>
  10. #include <linux/mempool.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/wait.h>
  13. #include <linux/writeback.h>
  14. #include <linux/slab.h>
  15. #include <linux/posix_acl.h>
  16. #include <linux/refcount.h>
  17. #include <linux/security.h>
  18. #include <linux/netfs.h>
  19. #include <linux/fscache.h>
  20. #include <linux/hashtable.h>
  21. #include <linux/ceph/libceph.h>
  22. /* large granularity for statfs utilization stats to facilitate
  23. * large volume sizes on 32-bit machines. */
  24. #define CEPH_BLOCK_SHIFT 22 /* 4 MB */
  25. #define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
  26. #define CEPH_4K_BLOCK_SHIFT 12 /* 4 KB */
  27. #define CEPH_MOUNT_OPT_CLEANRECOVER (1<<1) /* auto reonnect (clean mode) after blocklisted */
  28. #define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
  29. #define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
  30. #define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
  31. #define CEPH_MOUNT_OPT_INO32 (1<<8) /* 32 bit inos */
  32. #define CEPH_MOUNT_OPT_DCACHE (1<<9) /* use dcache for readdir etc */
  33. #define CEPH_MOUNT_OPT_FSCACHE (1<<10) /* use fscache */
  34. #define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11) /* no pool permission check */
  35. #define CEPH_MOUNT_OPT_MOUNTWAIT (1<<12) /* mount waits if no mds is up */
  36. #define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */
  37. #define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */
  38. #define CEPH_MOUNT_OPT_ASYNC_DIROPS (1<<15) /* allow async directory ops */
  39. #define CEPH_MOUNT_OPT_NOPAGECACHE (1<<16) /* bypass pagecache altogether */
  40. #define CEPH_MOUNT_OPT_DEFAULT \
  41. (CEPH_MOUNT_OPT_DCACHE | \
  42. CEPH_MOUNT_OPT_NOCOPYFROM | \
  43. CEPH_MOUNT_OPT_ASYNC_DIROPS)
  44. #define ceph_set_mount_opt(fsc, opt) \
  45. (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt
  46. #define ceph_clear_mount_opt(fsc, opt) \
  47. (fsc)->mount_options->flags &= ~CEPH_MOUNT_OPT_##opt
  48. #define ceph_test_mount_opt(fsc, opt) \
  49. (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt))
  50. /* max size of osd read request, limited by libceph */
  51. #define CEPH_MAX_READ_SIZE CEPH_MSG_MAX_DATA_LEN
  52. /* osd has a configurable limitaion of max write size.
  53. * CEPH_MSG_MAX_DATA_LEN should be small enough. */
  54. #define CEPH_MAX_WRITE_SIZE CEPH_MSG_MAX_DATA_LEN
  55. #define CEPH_RASIZE_DEFAULT (8192*1024) /* max readahead */
  56. #define CEPH_MAX_READDIR_DEFAULT 1024
  57. #define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
  58. #define CEPH_SNAPDIRNAME_DEFAULT ".snap"
  59. /*
  60. * Delay telling the MDS we no longer want caps, in case we reopen
  61. * the file. Delay a minimum amount of time, even if we send a cap
  62. * message for some other reason. Otherwise, take the oppotunity to
  63. * update the mds to avoid sending another message later.
  64. */
  65. #define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
  66. #define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
  67. struct ceph_mount_options {
  68. unsigned int flags;
  69. unsigned int wsize; /* max write size */
  70. unsigned int rsize; /* max read size */
  71. unsigned int rasize; /* max readahead */
  72. unsigned int congestion_kb; /* max writeback in flight */
  73. unsigned int caps_wanted_delay_min, caps_wanted_delay_max;
  74. int caps_max;
  75. unsigned int max_readdir; /* max readdir result (entries) */
  76. unsigned int max_readdir_bytes; /* max readdir result (bytes) */
  77. bool new_dev_syntax;
  78. /*
  79. * everything above this point can be memcmp'd; everything below
  80. * is handled in compare_mount_options()
  81. */
  82. char *snapdir_name; /* default ".snap" */
  83. char *mds_namespace; /* default NULL */
  84. char *server_path; /* default NULL (means "/") */
  85. char *fscache_uniq; /* default NULL */
  86. char *mon_addr;
  87. };
  88. /* mount state */
  89. enum {
  90. CEPH_MOUNT_MOUNTING,
  91. CEPH_MOUNT_MOUNTED,
  92. CEPH_MOUNT_UNMOUNTING,
  93. CEPH_MOUNT_UNMOUNTED,
  94. CEPH_MOUNT_SHUTDOWN,
  95. CEPH_MOUNT_RECOVER,
  96. CEPH_MOUNT_FENCE_IO,
  97. };
  98. #define CEPH_ASYNC_CREATE_CONFLICT_BITS 8
  99. struct ceph_fs_client {
  100. struct super_block *sb;
  101. struct list_head metric_wakeup;
  102. struct ceph_mount_options *mount_options;
  103. struct ceph_client *client;
  104. int mount_state;
  105. bool blocklisted;
  106. bool have_copy_from2;
  107. u32 filp_gen;
  108. loff_t max_file_size;
  109. struct ceph_mds_client *mdsc;
  110. atomic_long_t writeback_count;
  111. bool write_congested;
  112. struct workqueue_struct *inode_wq;
  113. struct workqueue_struct *cap_wq;
  114. DECLARE_HASHTABLE(async_unlink_conflict, CEPH_ASYNC_CREATE_CONFLICT_BITS);
  115. spinlock_t async_unlink_conflict_lock;
  116. #ifdef CONFIG_DEBUG_FS
  117. struct dentry *debugfs_dentry_lru, *debugfs_caps;
  118. struct dentry *debugfs_congestion_kb;
  119. struct dentry *debugfs_bdi;
  120. struct dentry *debugfs_mdsc, *debugfs_mdsmap;
  121. struct dentry *debugfs_status;
  122. struct dentry *debugfs_mds_sessions;
  123. struct dentry *debugfs_metrics_dir;
  124. #endif
  125. #ifdef CONFIG_CEPH_FSCACHE
  126. struct fscache_volume *fscache;
  127. #endif
  128. };
  129. /*
  130. * File i/o capability. This tracks shared state with the metadata
  131. * server that allows us to cache or writeback attributes or to read
  132. * and write data. For any given inode, we should have one or more
  133. * capabilities, one issued by each metadata server, and our
  134. * cumulative access is the OR of all issued capabilities.
  135. *
  136. * Each cap is referenced by the inode's i_caps rbtree and by per-mds
  137. * session capability lists.
  138. */
  139. struct ceph_cap {
  140. struct ceph_inode_info *ci;
  141. struct rb_node ci_node; /* per-ci cap tree */
  142. struct ceph_mds_session *session;
  143. struct list_head session_caps; /* per-session caplist */
  144. u64 cap_id; /* unique cap id (mds provided) */
  145. union {
  146. /* in-use caps */
  147. struct {
  148. int issued; /* latest, from the mds */
  149. int implemented; /* implemented superset of
  150. issued (for revocation) */
  151. int mds; /* mds index for this cap */
  152. int mds_wanted; /* caps wanted from this mds */
  153. };
  154. /* caps to release */
  155. struct {
  156. u64 cap_ino;
  157. int queue_release;
  158. };
  159. };
  160. u32 seq, issue_seq, mseq;
  161. u32 cap_gen; /* active/stale cycle */
  162. unsigned long last_used;
  163. struct list_head caps_item;
  164. };
  165. #define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
  166. #define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
  167. #define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
  168. struct ceph_cap_flush {
  169. u64 tid;
  170. int caps;
  171. bool wake; /* wake up flush waiters when finish ? */
  172. bool is_capsnap; /* true means capsnap */
  173. struct list_head g_list; // global
  174. struct list_head i_list; // per inode
  175. };
  176. /*
  177. * Snapped cap state that is pending flush to mds. When a snapshot occurs,
  178. * we first complete any in-process sync writes and writeback any dirty
  179. * data before flushing the snapped state (tracked here) back to the MDS.
  180. */
  181. struct ceph_cap_snap {
  182. refcount_t nref;
  183. struct list_head ci_item;
  184. struct ceph_cap_flush cap_flush;
  185. u64 follows;
  186. int issued, dirty;
  187. struct ceph_snap_context *context;
  188. umode_t mode;
  189. kuid_t uid;
  190. kgid_t gid;
  191. struct ceph_buffer *xattr_blob;
  192. u64 xattr_version;
  193. u64 size;
  194. u64 change_attr;
  195. struct timespec64 mtime, atime, ctime, btime;
  196. u64 time_warp_seq;
  197. u64 truncate_size;
  198. u32 truncate_seq;
  199. int writing; /* a sync write is still in progress */
  200. int dirty_pages; /* dirty pages awaiting writeback */
  201. bool inline_data;
  202. bool need_flush;
  203. };
  204. static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
  205. {
  206. if (refcount_dec_and_test(&capsnap->nref)) {
  207. if (capsnap->xattr_blob)
  208. ceph_buffer_put(capsnap->xattr_blob);
  209. kmem_cache_free(ceph_cap_snap_cachep, capsnap);
  210. }
  211. }
  212. /*
  213. * The frag tree describes how a directory is fragmented, potentially across
  214. * multiple metadata servers. It is also used to indicate points where
  215. * metadata authority is delegated, and whether/where metadata is replicated.
  216. *
  217. * A _leaf_ frag will be present in the i_fragtree IFF there is
  218. * delegation info. That is, if mds >= 0 || ndist > 0.
  219. */
  220. #define CEPH_MAX_DIRFRAG_REP 4
  221. struct ceph_inode_frag {
  222. struct rb_node node;
  223. /* fragtree state */
  224. u32 frag;
  225. int split_by; /* i.e. 2^(split_by) children */
  226. /* delegation and replication info */
  227. int mds; /* -1 if same authority as parent */
  228. int ndist; /* >0 if replicated */
  229. int dist[CEPH_MAX_DIRFRAG_REP];
  230. };
  231. /*
  232. * We cache inode xattrs as an encoded blob until they are first used,
  233. * at which point we parse them into an rbtree.
  234. */
  235. struct ceph_inode_xattr {
  236. struct rb_node node;
  237. const char *name;
  238. int name_len;
  239. const char *val;
  240. int val_len;
  241. int dirty;
  242. int should_free_name;
  243. int should_free_val;
  244. };
  245. /*
  246. * Ceph dentry state
  247. */
  248. struct ceph_dentry_info {
  249. struct dentry *dentry;
  250. struct ceph_mds_session *lease_session;
  251. struct list_head lease_list;
  252. struct hlist_node hnode;
  253. unsigned long flags;
  254. int lease_shared_gen;
  255. u32 lease_gen;
  256. u32 lease_seq;
  257. unsigned long lease_renew_after, lease_renew_from;
  258. unsigned long time;
  259. u64 offset;
  260. };
  261. #define CEPH_DENTRY_REFERENCED (1 << 0)
  262. #define CEPH_DENTRY_LEASE_LIST (1 << 1)
  263. #define CEPH_DENTRY_SHRINK_LIST (1 << 2)
  264. #define CEPH_DENTRY_PRIMARY_LINK (1 << 3)
  265. #define CEPH_DENTRY_ASYNC_UNLINK_BIT (4)
  266. #define CEPH_DENTRY_ASYNC_UNLINK (1 << CEPH_DENTRY_ASYNC_UNLINK_BIT)
  267. #define CEPH_DENTRY_ASYNC_CREATE_BIT (5)
  268. #define CEPH_DENTRY_ASYNC_CREATE (1 << CEPH_DENTRY_ASYNC_CREATE_BIT)
  269. struct ceph_inode_xattrs_info {
  270. /*
  271. * (still encoded) xattr blob. we avoid the overhead of parsing
  272. * this until someone actually calls getxattr, etc.
  273. *
  274. * blob->vec.iov_len == 4 implies there are no xattrs; blob ==
  275. * NULL means we don't know.
  276. */
  277. struct ceph_buffer *blob, *prealloc_blob;
  278. struct rb_root index;
  279. bool dirty;
  280. int count;
  281. int names_size;
  282. int vals_size;
  283. u64 version, index_version;
  284. };
  285. /*
  286. * Ceph inode.
  287. */
  288. struct ceph_inode_info {
  289. struct netfs_inode netfs; /* Netfslib context and vfs inode */
  290. struct ceph_vino i_vino; /* ceph ino + snap */
  291. spinlock_t i_ceph_lock;
  292. u64 i_version;
  293. u64 i_inline_version;
  294. u32 i_time_warp_seq;
  295. unsigned long i_ceph_flags;
  296. atomic64_t i_release_count;
  297. atomic64_t i_ordered_count;
  298. atomic64_t i_complete_seq[2];
  299. struct ceph_dir_layout i_dir_layout;
  300. struct ceph_file_layout i_layout;
  301. struct ceph_file_layout i_cached_layout; // for async creates
  302. char *i_symlink;
  303. /* for dirs */
  304. struct timespec64 i_rctime;
  305. u64 i_rbytes, i_rfiles, i_rsubdirs, i_rsnaps;
  306. u64 i_files, i_subdirs;
  307. /* quotas */
  308. u64 i_max_bytes, i_max_files;
  309. s32 i_dir_pin;
  310. struct rb_root i_fragtree;
  311. int i_fragtree_nsplits;
  312. struct mutex i_fragtree_mutex;
  313. struct ceph_inode_xattrs_info i_xattrs;
  314. /* capabilities. protected _both_ by i_ceph_lock and cap->session's
  315. * s_mutex. */
  316. struct rb_root i_caps; /* cap list */
  317. struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
  318. unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */
  319. /*
  320. * Link to the auth cap's session's s_cap_dirty list. s_cap_dirty
  321. * is protected by the mdsc->cap_dirty_lock, but each individual item
  322. * is also protected by the inode's i_ceph_lock. Walking s_cap_dirty
  323. * requires the mdsc->cap_dirty_lock. List presence for an item can
  324. * be tested under the i_ceph_lock. Changing anything requires both.
  325. */
  326. struct list_head i_dirty_item;
  327. /*
  328. * Link to session's s_cap_flushing list. Protected in a similar
  329. * fashion to i_dirty_item, but also by the s_mutex for changes. The
  330. * s_cap_flushing list can be walked while holding either the s_mutex
  331. * or msdc->cap_dirty_lock. List presence can also be checked while
  332. * holding the i_ceph_lock for this inode.
  333. */
  334. struct list_head i_flushing_item;
  335. /* we need to track cap writeback on a per-cap-bit basis, to allow
  336. * overlapping, pipelined cap flushes to the mds. we can probably
  337. * reduce the tid to 8 bits if we're concerned about inode size. */
  338. struct ceph_cap_flush *i_prealloc_cap_flush;
  339. struct list_head i_cap_flush_list;
  340. wait_queue_head_t i_cap_wq; /* threads waiting on a capability */
  341. unsigned long i_hold_caps_max; /* jiffies */
  342. struct list_head i_cap_delay_list; /* for delayed cap release to mds */
  343. struct ceph_cap_reservation i_cap_migration_resv;
  344. struct list_head i_cap_snaps; /* snapped state pending flush to mds */
  345. struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 or
  346. dirty|flushing caps */
  347. unsigned i_snap_caps; /* cap bits for snapped files */
  348. unsigned long i_last_rd;
  349. unsigned long i_last_wr;
  350. int i_nr_by_mode[CEPH_FILE_MODE_BITS]; /* open file counts */
  351. struct mutex i_truncate_mutex;
  352. u32 i_truncate_seq; /* last truncate to smaller size */
  353. u64 i_truncate_size; /* and the size we last truncated down to */
  354. int i_truncate_pending; /* still need to call vmtruncate */
  355. u64 i_max_size; /* max file size authorized by mds */
  356. u64 i_reported_size; /* (max_)size reported to or requested of mds */
  357. u64 i_wanted_max_size; /* offset we'd like to write too */
  358. u64 i_requested_max_size; /* max_size we've requested */
  359. /* held references to caps */
  360. int i_pin_ref;
  361. int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref, i_fx_ref;
  362. int i_wrbuffer_ref, i_wrbuffer_ref_head;
  363. atomic_t i_filelock_ref;
  364. atomic_t i_shared_gen; /* increment each time we get FILE_SHARED */
  365. u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */
  366. u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
  367. struct list_head i_unsafe_dirops; /* uncommitted mds dir ops */
  368. struct list_head i_unsafe_iops; /* uncommitted mds inode ops */
  369. spinlock_t i_unsafe_lock;
  370. union {
  371. struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */
  372. struct ceph_snapid_map *i_snapid_map; /* snapid -> dev_t */
  373. };
  374. struct list_head i_snap_realm_item;
  375. struct list_head i_snap_flush_item;
  376. struct timespec64 i_btime;
  377. struct timespec64 i_snap_btime;
  378. struct work_struct i_work;
  379. unsigned long i_work_mask;
  380. };
  381. struct ceph_netfs_request_data {
  382. int caps;
  383. /*
  384. * Maximum size of a file readahead request.
  385. * The fadvise could update the bdi's default ra_pages.
  386. */
  387. unsigned int file_ra_pages;
  388. /* Set it if fadvise disables file readahead entirely */
  389. bool file_ra_disabled;
  390. };
  391. static inline struct ceph_inode_info *
  392. ceph_inode(const struct inode *inode)
  393. {
  394. return container_of(inode, struct ceph_inode_info, netfs.inode);
  395. }
  396. static inline struct ceph_fs_client *
  397. ceph_inode_to_client(const struct inode *inode)
  398. {
  399. return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
  400. }
  401. static inline struct ceph_fs_client *
  402. ceph_sb_to_client(const struct super_block *sb)
  403. {
  404. return (struct ceph_fs_client *)sb->s_fs_info;
  405. }
  406. static inline struct ceph_mds_client *
  407. ceph_sb_to_mdsc(const struct super_block *sb)
  408. {
  409. return (struct ceph_mds_client *)ceph_sb_to_client(sb)->mdsc;
  410. }
  411. static inline struct ceph_vino
  412. ceph_vino(const struct inode *inode)
  413. {
  414. return ceph_inode(inode)->i_vino;
  415. }
  416. static inline u32 ceph_ino_to_ino32(u64 vino)
  417. {
  418. u32 ino = vino & 0xffffffff;
  419. ino ^= vino >> 32;
  420. if (!ino)
  421. ino = 2;
  422. return ino;
  423. }
  424. /*
  425. * Inode numbers in cephfs are 64 bits, but inode->i_ino is 32-bits on
  426. * some arches. We generally do not use this value inside the ceph driver, but
  427. * we do want to set it to something, so that generic vfs code has an
  428. * appropriate value for tracepoints and the like.
  429. */
  430. static inline ino_t ceph_vino_to_ino_t(struct ceph_vino vino)
  431. {
  432. if (sizeof(ino_t) == sizeof(u32))
  433. return ceph_ino_to_ino32(vino.ino);
  434. return (ino_t)vino.ino;
  435. }
  436. /* for printf-style formatting */
  437. #define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
  438. static inline u64 ceph_ino(struct inode *inode)
  439. {
  440. return ceph_inode(inode)->i_vino.ino;
  441. }
  442. static inline u64 ceph_snap(struct inode *inode)
  443. {
  444. return ceph_inode(inode)->i_vino.snap;
  445. }
  446. /**
  447. * ceph_present_ino - format an inode number for presentation to userland
  448. * @sb: superblock where the inode lives
  449. * @ino: inode number to (possibly) convert
  450. *
  451. * If the user mounted with the ino32 option, then the 64-bit value needs
  452. * to be converted to something that can fit inside 32 bits. Note that
  453. * internal kernel code never uses this value, so this is entirely for
  454. * userland consumption.
  455. */
  456. static inline u64 ceph_present_ino(struct super_block *sb, u64 ino)
  457. {
  458. if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)))
  459. return ceph_ino_to_ino32(ino);
  460. return ino;
  461. }
  462. static inline u64 ceph_present_inode(struct inode *inode)
  463. {
  464. return ceph_present_ino(inode->i_sb, ceph_ino(inode));
  465. }
  466. static inline int ceph_ino_compare(struct inode *inode, void *data)
  467. {
  468. struct ceph_vino *pvino = (struct ceph_vino *)data;
  469. struct ceph_inode_info *ci = ceph_inode(inode);
  470. return ci->i_vino.ino == pvino->ino &&
  471. ci->i_vino.snap == pvino->snap;
  472. }
  473. /*
  474. * The MDS reserves a set of inodes for its own usage. These should never
  475. * be accessible by clients, and so the MDS has no reason to ever hand these
  476. * out. The range is CEPH_MDS_INO_MDSDIR_OFFSET..CEPH_INO_SYSTEM_BASE.
  477. *
  478. * These come from src/mds/mdstypes.h in the ceph sources.
  479. */
  480. #define CEPH_MAX_MDS 0x100
  481. #define CEPH_NUM_STRAY 10
  482. #define CEPH_MDS_INO_MDSDIR_OFFSET (1 * CEPH_MAX_MDS)
  483. #define CEPH_MDS_INO_LOG_OFFSET (2 * CEPH_MAX_MDS)
  484. #define CEPH_INO_SYSTEM_BASE ((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY))
  485. static inline bool ceph_vino_is_reserved(const struct ceph_vino vino)
  486. {
  487. if (vino.ino >= CEPH_INO_SYSTEM_BASE ||
  488. vino.ino < CEPH_MDS_INO_MDSDIR_OFFSET)
  489. return false;
  490. /* Don't warn on mdsdirs */
  491. WARN_RATELIMIT(vino.ino >= CEPH_MDS_INO_LOG_OFFSET,
  492. "Attempt to access reserved inode number 0x%llx",
  493. vino.ino);
  494. return true;
  495. }
  496. static inline struct inode *ceph_find_inode(struct super_block *sb,
  497. struct ceph_vino vino)
  498. {
  499. if (ceph_vino_is_reserved(vino))
  500. return NULL;
  501. /*
  502. * NB: The hashval will be run through the fs/inode.c hash function
  503. * anyway, so there is no need to squash the inode number down to
  504. * 32-bits first. Just use low-order bits on arches with 32-bit long.
  505. */
  506. return ilookup5(sb, (unsigned long)vino.ino, ceph_ino_compare, &vino);
  507. }
  508. /*
  509. * Ceph inode.
  510. */
  511. #define CEPH_I_DIR_ORDERED (1 << 0) /* dentries in dir are ordered */
  512. #define CEPH_I_FLUSH (1 << 2) /* do not delay flush of dirty metadata */
  513. #define CEPH_I_POOL_PERM (1 << 3) /* pool rd/wr bits are valid */
  514. #define CEPH_I_POOL_RD (1 << 4) /* can read from pool */
  515. #define CEPH_I_POOL_WR (1 << 5) /* can write to pool */
  516. #define CEPH_I_SEC_INITED (1 << 6) /* security initialized */
  517. #define CEPH_I_KICK_FLUSH (1 << 7) /* kick flushing caps */
  518. #define CEPH_I_FLUSH_SNAPS (1 << 8) /* need flush snapss */
  519. #define CEPH_I_ERROR_WRITE (1 << 9) /* have seen write errors */
  520. #define CEPH_I_ERROR_FILELOCK (1 << 10) /* have seen file lock errors */
  521. #define CEPH_I_ODIRECT (1 << 11) /* inode in direct I/O mode */
  522. #define CEPH_ASYNC_CREATE_BIT (12) /* async create in flight for this */
  523. #define CEPH_I_ASYNC_CREATE (1 << CEPH_ASYNC_CREATE_BIT)
  524. #define CEPH_I_SHUTDOWN (1 << 13) /* inode is no longer usable */
  525. /*
  526. * Masks of ceph inode work.
  527. */
  528. #define CEPH_I_WORK_WRITEBACK 0
  529. #define CEPH_I_WORK_INVALIDATE_PAGES 1
  530. #define CEPH_I_WORK_VMTRUNCATE 2
  531. #define CEPH_I_WORK_CHECK_CAPS 3
  532. #define CEPH_I_WORK_FLUSH_SNAPS 4
  533. /*
  534. * We set the ERROR_WRITE bit when we start seeing write errors on an inode
  535. * and then clear it when they start succeeding. Note that we do a lockless
  536. * check first, and only take the lock if it looks like it needs to be changed.
  537. * The write submission code just takes this as a hint, so we're not too
  538. * worried if a few slip through in either direction.
  539. */
  540. static inline void ceph_set_error_write(struct ceph_inode_info *ci)
  541. {
  542. if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ERROR_WRITE)) {
  543. spin_lock(&ci->i_ceph_lock);
  544. ci->i_ceph_flags |= CEPH_I_ERROR_WRITE;
  545. spin_unlock(&ci->i_ceph_lock);
  546. }
  547. }
  548. static inline void ceph_clear_error_write(struct ceph_inode_info *ci)
  549. {
  550. if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ERROR_WRITE) {
  551. spin_lock(&ci->i_ceph_lock);
  552. ci->i_ceph_flags &= ~CEPH_I_ERROR_WRITE;
  553. spin_unlock(&ci->i_ceph_lock);
  554. }
  555. }
  556. static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
  557. long long release_count,
  558. long long ordered_count)
  559. {
  560. /*
  561. * Makes sure operations that setup readdir cache (update page
  562. * cache and i_size) are strongly ordered w.r.t. the following
  563. * atomic64_set() operations.
  564. */
  565. smp_mb();
  566. atomic64_set(&ci->i_complete_seq[0], release_count);
  567. atomic64_set(&ci->i_complete_seq[1], ordered_count);
  568. }
  569. static inline void __ceph_dir_clear_complete(struct ceph_inode_info *ci)
  570. {
  571. atomic64_inc(&ci->i_release_count);
  572. }
  573. static inline void __ceph_dir_clear_ordered(struct ceph_inode_info *ci)
  574. {
  575. atomic64_inc(&ci->i_ordered_count);
  576. }
  577. static inline bool __ceph_dir_is_complete(struct ceph_inode_info *ci)
  578. {
  579. return atomic64_read(&ci->i_complete_seq[0]) ==
  580. atomic64_read(&ci->i_release_count);
  581. }
  582. static inline bool __ceph_dir_is_complete_ordered(struct ceph_inode_info *ci)
  583. {
  584. return atomic64_read(&ci->i_complete_seq[0]) ==
  585. atomic64_read(&ci->i_release_count) &&
  586. atomic64_read(&ci->i_complete_seq[1]) ==
  587. atomic64_read(&ci->i_ordered_count);
  588. }
  589. static inline void ceph_dir_clear_complete(struct inode *inode)
  590. {
  591. __ceph_dir_clear_complete(ceph_inode(inode));
  592. }
  593. static inline void ceph_dir_clear_ordered(struct inode *inode)
  594. {
  595. __ceph_dir_clear_ordered(ceph_inode(inode));
  596. }
  597. static inline bool ceph_dir_is_complete_ordered(struct inode *inode)
  598. {
  599. bool ret = __ceph_dir_is_complete_ordered(ceph_inode(inode));
  600. smp_rmb();
  601. return ret;
  602. }
  603. /* find a specific frag @f */
  604. extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci,
  605. u32 f);
  606. /*
  607. * choose fragment for value @v. copy frag content to pfrag, if leaf
  608. * exists
  609. */
  610. extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
  611. struct ceph_inode_frag *pfrag,
  612. int *found);
  613. static inline struct ceph_dentry_info *ceph_dentry(const struct dentry *dentry)
  614. {
  615. return (struct ceph_dentry_info *)dentry->d_fsdata;
  616. }
  617. /*
  618. * caps helpers
  619. */
  620. static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci)
  621. {
  622. return !RB_EMPTY_ROOT(&ci->i_caps);
  623. }
  624. extern int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented);
  625. extern int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int t);
  626. extern int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
  627. int t);
  628. extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
  629. struct ceph_cap *cap);
  630. static inline int ceph_caps_issued(struct ceph_inode_info *ci)
  631. {
  632. int issued;
  633. spin_lock(&ci->i_ceph_lock);
  634. issued = __ceph_caps_issued(ci, NULL);
  635. spin_unlock(&ci->i_ceph_lock);
  636. return issued;
  637. }
  638. static inline int ceph_caps_issued_mask_metric(struct ceph_inode_info *ci,
  639. int mask, int touch)
  640. {
  641. int r;
  642. spin_lock(&ci->i_ceph_lock);
  643. r = __ceph_caps_issued_mask_metric(ci, mask, touch);
  644. spin_unlock(&ci->i_ceph_lock);
  645. return r;
  646. }
  647. static inline int __ceph_caps_dirty(struct ceph_inode_info *ci)
  648. {
  649. return ci->i_dirty_caps | ci->i_flushing_caps;
  650. }
  651. extern struct ceph_cap_flush *ceph_alloc_cap_flush(void);
  652. extern void ceph_free_cap_flush(struct ceph_cap_flush *cf);
  653. extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
  654. struct ceph_cap_flush **pcf);
  655. extern int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
  656. struct ceph_cap *ocap, int mask);
  657. extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
  658. extern int __ceph_caps_used(struct ceph_inode_info *ci);
  659. static inline bool __ceph_is_file_opened(struct ceph_inode_info *ci)
  660. {
  661. return ci->i_nr_by_mode[0];
  662. }
  663. extern int __ceph_caps_file_wanted(struct ceph_inode_info *ci);
  664. extern int __ceph_caps_wanted(struct ceph_inode_info *ci);
  665. /* what the mds thinks we want */
  666. extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check);
  667. extern void ceph_caps_init(struct ceph_mds_client *mdsc);
  668. extern void ceph_caps_finalize(struct ceph_mds_client *mdsc);
  669. extern void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc,
  670. struct ceph_mount_options *fsopt);
  671. extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
  672. struct ceph_cap_reservation *ctx, int need);
  673. extern void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
  674. struct ceph_cap_reservation *ctx);
  675. extern void ceph_reservation_status(struct ceph_fs_client *client,
  676. int *total, int *avail, int *used,
  677. int *reserved, int *min);
  678. extern void change_auth_cap_ses(struct ceph_inode_info *ci,
  679. struct ceph_mds_session *session);
  680. /*
  681. * we keep buffered readdir results attached to file->private_data
  682. */
  683. #define CEPH_F_SYNC 1
  684. #define CEPH_F_ATEND 2
  685. struct ceph_file_info {
  686. short fmode; /* initialized on open */
  687. short flags; /* CEPH_F_* */
  688. spinlock_t rw_contexts_lock;
  689. struct list_head rw_contexts;
  690. u32 filp_gen;
  691. };
  692. struct ceph_dir_file_info {
  693. struct ceph_file_info file_info;
  694. /* readdir: position within the dir */
  695. u32 frag;
  696. struct ceph_mds_request *last_readdir;
  697. /* readdir: position within a frag */
  698. unsigned next_offset; /* offset of next chunk (last_name's + 1) */
  699. char *last_name; /* last entry in previous chunk */
  700. long long dir_release_count;
  701. long long dir_ordered_count;
  702. int readdir_cache_idx;
  703. /* used for -o dirstat read() on directory thing */
  704. char *dir_info;
  705. int dir_info_len;
  706. };
  707. struct ceph_rw_context {
  708. struct list_head list;
  709. struct task_struct *thread;
  710. int caps;
  711. };
  712. #define CEPH_DEFINE_RW_CONTEXT(_name, _caps) \
  713. struct ceph_rw_context _name = { \
  714. .thread = current, \
  715. .caps = _caps, \
  716. }
  717. static inline void ceph_add_rw_context(struct ceph_file_info *cf,
  718. struct ceph_rw_context *ctx)
  719. {
  720. spin_lock(&cf->rw_contexts_lock);
  721. list_add(&ctx->list, &cf->rw_contexts);
  722. spin_unlock(&cf->rw_contexts_lock);
  723. }
  724. static inline void ceph_del_rw_context(struct ceph_file_info *cf,
  725. struct ceph_rw_context *ctx)
  726. {
  727. spin_lock(&cf->rw_contexts_lock);
  728. list_del(&ctx->list);
  729. spin_unlock(&cf->rw_contexts_lock);
  730. }
  731. static inline struct ceph_rw_context*
  732. ceph_find_rw_context(struct ceph_file_info *cf)
  733. {
  734. struct ceph_rw_context *ctx, *found = NULL;
  735. spin_lock(&cf->rw_contexts_lock);
  736. list_for_each_entry(ctx, &cf->rw_contexts, list) {
  737. if (ctx->thread == current) {
  738. found = ctx;
  739. break;
  740. }
  741. }
  742. spin_unlock(&cf->rw_contexts_lock);
  743. return found;
  744. }
  745. struct ceph_readdir_cache_control {
  746. struct page *page;
  747. struct dentry **dentries;
  748. int index;
  749. };
  750. /*
  751. * A "snap realm" describes a subset of the file hierarchy sharing
  752. * the same set of snapshots that apply to it. The realms themselves
  753. * are organized into a hierarchy, such that children inherit (some of)
  754. * the snapshots of their parents.
  755. *
  756. * All inodes within the realm that have capabilities are linked into a
  757. * per-realm list.
  758. */
  759. struct ceph_snap_realm {
  760. u64 ino;
  761. struct inode *inode;
  762. atomic_t nref;
  763. struct rb_node node;
  764. u64 created, seq;
  765. u64 parent_ino;
  766. u64 parent_since; /* snapid when our current parent became so */
  767. u64 *prior_parent_snaps; /* snaps inherited from any parents we */
  768. u32 num_prior_parent_snaps; /* had prior to parent_since */
  769. u64 *snaps; /* snaps specific to this realm */
  770. u32 num_snaps;
  771. struct ceph_snap_realm *parent;
  772. struct list_head children; /* list of child realms */
  773. struct list_head child_item;
  774. struct list_head empty_item; /* if i have ref==0 */
  775. struct list_head dirty_item; /* if realm needs new context */
  776. struct list_head rebuild_item; /* rebuild snap realms _downward_ in hierarchy */
  777. /* the current set of snaps for this realm */
  778. struct ceph_snap_context *cached_context;
  779. struct list_head inodes_with_caps;
  780. spinlock_t inodes_with_caps_lock;
  781. };
  782. static inline int default_congestion_kb(void)
  783. {
  784. int congestion_kb;
  785. /*
  786. * Copied from NFS
  787. *
  788. * congestion size, scale with available memory.
  789. *
  790. * 64MB: 8192k
  791. * 128MB: 11585k
  792. * 256MB: 16384k
  793. * 512MB: 23170k
  794. * 1GB: 32768k
  795. * 2GB: 46340k
  796. * 4GB: 65536k
  797. * 8GB: 92681k
  798. * 16GB: 131072k
  799. *
  800. * This allows larger machines to have larger/more transfers.
  801. * Limit the default to 256M
  802. */
  803. congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
  804. if (congestion_kb > 256*1024)
  805. congestion_kb = 256*1024;
  806. return congestion_kb;
  807. }
  808. /* super.c */
  809. extern int ceph_force_reconnect(struct super_block *sb);
  810. /* snap.c */
  811. struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
  812. u64 ino);
  813. extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
  814. struct ceph_snap_realm *realm);
  815. extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
  816. struct ceph_snap_realm *realm);
  817. extern int ceph_update_snap_trace(struct ceph_mds_client *m,
  818. void *p, void *e, bool deletion,
  819. struct ceph_snap_realm **realm_ret);
  820. void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm);
  821. extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
  822. struct ceph_mds_session *session,
  823. struct ceph_msg *msg);
  824. extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
  825. struct ceph_cap_snap *capsnap);
  826. extern void ceph_cleanup_global_and_empty_realms(struct ceph_mds_client *mdsc);
  827. extern struct ceph_snapid_map *ceph_get_snapid_map(struct ceph_mds_client *mdsc,
  828. u64 snap);
  829. extern void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
  830. struct ceph_snapid_map *sm);
  831. extern void ceph_trim_snapid_map(struct ceph_mds_client *mdsc);
  832. extern void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc);
  833. void ceph_umount_begin(struct super_block *sb);
  834. /*
  835. * a cap_snap is "pending" if it is still awaiting an in-progress
  836. * sync write (that may/may not still update size, mtime, etc.).
  837. */
  838. static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
  839. {
  840. return !list_empty(&ci->i_cap_snaps) &&
  841. list_last_entry(&ci->i_cap_snaps, struct ceph_cap_snap,
  842. ci_item)->writing;
  843. }
  844. /* inode.c */
  845. struct ceph_mds_reply_info_in;
  846. struct ceph_mds_reply_dirfrag;
  847. extern const struct inode_operations ceph_file_iops;
  848. extern struct inode *ceph_alloc_inode(struct super_block *sb);
  849. extern void ceph_evict_inode(struct inode *inode);
  850. extern void ceph_free_inode(struct inode *inode);
  851. extern struct inode *ceph_get_inode(struct super_block *sb,
  852. struct ceph_vino vino);
  853. extern struct inode *ceph_get_snapdir(struct inode *parent);
  854. extern int ceph_fill_file_size(struct inode *inode, int issued,
  855. u32 truncate_seq, u64 truncate_size, u64 size);
  856. extern void ceph_fill_file_time(struct inode *inode, int issued,
  857. u64 time_warp_seq, struct timespec64 *ctime,
  858. struct timespec64 *mtime,
  859. struct timespec64 *atime);
  860. extern int ceph_fill_inode(struct inode *inode, struct page *locked_page,
  861. struct ceph_mds_reply_info_in *iinfo,
  862. struct ceph_mds_reply_dirfrag *dirinfo,
  863. struct ceph_mds_session *session, int cap_fmode,
  864. struct ceph_cap_reservation *caps_reservation);
  865. extern int ceph_fill_trace(struct super_block *sb,
  866. struct ceph_mds_request *req);
  867. extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
  868. struct ceph_mds_session *session);
  869. extern int ceph_inode_holds_cap(struct inode *inode, int mask);
  870. extern bool ceph_inode_set_size(struct inode *inode, loff_t size);
  871. extern void __ceph_do_pending_vmtruncate(struct inode *inode);
  872. void ceph_queue_inode_work(struct inode *inode, int work_bit);
  873. static inline void ceph_queue_vmtruncate(struct inode *inode)
  874. {
  875. ceph_queue_inode_work(inode, CEPH_I_WORK_VMTRUNCATE);
  876. }
  877. static inline void ceph_queue_invalidate(struct inode *inode)
  878. {
  879. ceph_queue_inode_work(inode, CEPH_I_WORK_INVALIDATE_PAGES);
  880. }
  881. static inline void ceph_queue_writeback(struct inode *inode)
  882. {
  883. ceph_queue_inode_work(inode, CEPH_I_WORK_WRITEBACK);
  884. }
  885. static inline void ceph_queue_check_caps(struct inode *inode)
  886. {
  887. ceph_queue_inode_work(inode, CEPH_I_WORK_CHECK_CAPS);
  888. }
  889. static inline void ceph_queue_flush_snaps(struct inode *inode)
  890. {
  891. ceph_queue_inode_work(inode, CEPH_I_WORK_FLUSH_SNAPS);
  892. }
  893. extern int ceph_try_to_choose_auth_mds(struct inode *inode, int mask);
  894. extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
  895. int mask, bool force);
  896. static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
  897. {
  898. return __ceph_do_getattr(inode, NULL, mask, force);
  899. }
  900. extern int ceph_permission(struct user_namespace *mnt_userns,
  901. struct inode *inode, int mask);
  902. extern int __ceph_setattr(struct inode *inode, struct iattr *attr);
  903. extern int ceph_setattr(struct user_namespace *mnt_userns,
  904. struct dentry *dentry, struct iattr *attr);
  905. extern int ceph_getattr(struct user_namespace *mnt_userns,
  906. const struct path *path, struct kstat *stat,
  907. u32 request_mask, unsigned int flags);
  908. void ceph_inode_shutdown(struct inode *inode);
  909. static inline bool ceph_inode_is_shutdown(struct inode *inode)
  910. {
  911. unsigned long flags = READ_ONCE(ceph_inode(inode)->i_ceph_flags);
  912. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  913. int state = READ_ONCE(fsc->mount_state);
  914. return (flags & CEPH_I_SHUTDOWN) || state >= CEPH_MOUNT_SHUTDOWN;
  915. }
  916. /* xattr.c */
  917. int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
  918. int ceph_do_getvxattr(struct inode *inode, const char *name, void *value, size_t size);
  919. ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
  920. extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
  921. extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
  922. extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
  923. extern const struct xattr_handler *ceph_xattr_handlers[];
  924. struct ceph_acl_sec_ctx {
  925. #ifdef CONFIG_CEPH_FS_POSIX_ACL
  926. void *default_acl;
  927. void *acl;
  928. #endif
  929. #ifdef CONFIG_CEPH_FS_SECURITY_LABEL
  930. void *sec_ctx;
  931. u32 sec_ctxlen;
  932. #endif
  933. struct ceph_pagelist *pagelist;
  934. };
  935. #ifdef CONFIG_SECURITY
  936. extern bool ceph_security_xattr_deadlock(struct inode *in);
  937. extern bool ceph_security_xattr_wanted(struct inode *in);
  938. #else
  939. static inline bool ceph_security_xattr_deadlock(struct inode *in)
  940. {
  941. return false;
  942. }
  943. static inline bool ceph_security_xattr_wanted(struct inode *in)
  944. {
  945. return false;
  946. }
  947. #endif
  948. #ifdef CONFIG_CEPH_FS_SECURITY_LABEL
  949. extern int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
  950. struct ceph_acl_sec_ctx *ctx);
  951. static inline void ceph_security_invalidate_secctx(struct inode *inode)
  952. {
  953. security_inode_invalidate_secctx(inode);
  954. }
  955. #else
  956. static inline int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
  957. struct ceph_acl_sec_ctx *ctx)
  958. {
  959. return 0;
  960. }
  961. static inline void ceph_security_invalidate_secctx(struct inode *inode)
  962. {
  963. }
  964. #endif
  965. void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx);
  966. /* acl.c */
  967. #ifdef CONFIG_CEPH_FS_POSIX_ACL
  968. struct posix_acl *ceph_get_acl(struct inode *, int, bool);
  969. int ceph_set_acl(struct user_namespace *mnt_userns,
  970. struct inode *inode, struct posix_acl *acl, int type);
  971. int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
  972. struct ceph_acl_sec_ctx *as_ctx);
  973. void ceph_init_inode_acls(struct inode *inode,
  974. struct ceph_acl_sec_ctx *as_ctx);
  975. static inline void ceph_forget_all_cached_acls(struct inode *inode)
  976. {
  977. forget_all_cached_acls(inode);
  978. }
  979. #else
  980. #define ceph_get_acl NULL
  981. #define ceph_set_acl NULL
  982. static inline int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
  983. struct ceph_acl_sec_ctx *as_ctx)
  984. {
  985. return 0;
  986. }
  987. static inline void ceph_init_inode_acls(struct inode *inode,
  988. struct ceph_acl_sec_ctx *as_ctx)
  989. {
  990. }
  991. static inline int ceph_acl_chmod(struct dentry *dentry, struct inode *inode)
  992. {
  993. return 0;
  994. }
  995. static inline void ceph_forget_all_cached_acls(struct inode *inode)
  996. {
  997. }
  998. #endif
  999. /* caps.c */
  1000. extern const char *ceph_cap_string(int c);
  1001. extern void ceph_handle_caps(struct ceph_mds_session *session,
  1002. struct ceph_msg *msg);
  1003. extern struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
  1004. struct ceph_cap_reservation *ctx);
  1005. extern void ceph_add_cap(struct inode *inode,
  1006. struct ceph_mds_session *session, u64 cap_id,
  1007. unsigned issued, unsigned wanted,
  1008. unsigned cap, unsigned seq, u64 realmino, int flags,
  1009. struct ceph_cap **new_cap);
  1010. extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
  1011. extern void ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
  1012. extern void __ceph_remove_caps(struct ceph_inode_info *ci);
  1013. extern void ceph_put_cap(struct ceph_mds_client *mdsc,
  1014. struct ceph_cap *cap);
  1015. extern int ceph_is_any_caps(struct inode *inode);
  1016. extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc);
  1017. extern int ceph_fsync(struct file *file, loff_t start, loff_t end,
  1018. int datasync);
  1019. extern void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
  1020. struct ceph_mds_session *session);
  1021. extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
  1022. struct ceph_mds_session *session);
  1023. void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
  1024. struct ceph_inode_info *ci);
  1025. extern struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci,
  1026. int mds);
  1027. extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
  1028. int mds);
  1029. extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
  1030. bool snap_rwsem_locked);
  1031. extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
  1032. extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
  1033. extern void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had);
  1034. extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
  1035. int had);
  1036. extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
  1037. struct ceph_snap_context *snapc);
  1038. extern void __ceph_remove_capsnap(struct inode *inode,
  1039. struct ceph_cap_snap *capsnap,
  1040. bool *wake_ci, bool *wake_mdsc);
  1041. extern void ceph_remove_capsnap(struct inode *inode,
  1042. struct ceph_cap_snap *capsnap,
  1043. bool *wake_ci, bool *wake_mdsc);
  1044. extern void ceph_flush_snaps(struct ceph_inode_info *ci,
  1045. struct ceph_mds_session **psession);
  1046. extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
  1047. extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
  1048. struct ceph_mds_session *session);
  1049. extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
  1050. extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
  1051. extern int ceph_drop_caps_for_unlink(struct inode *inode);
  1052. extern int ceph_encode_inode_release(void **p, struct inode *inode,
  1053. int mds, int drop, int unless, int force);
  1054. extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
  1055. struct inode *dir,
  1056. int mds, int drop, int unless);
  1057. extern int ceph_get_caps(struct file *filp, int need, int want,
  1058. loff_t endoff, int *got);
  1059. extern int ceph_try_get_caps(struct inode *inode,
  1060. int need, int want, bool nonblock, int *got);
  1061. /* for counting open files by mode */
  1062. extern void ceph_get_fmode(struct ceph_inode_info *ci, int mode, int count);
  1063. extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode, int count);
  1064. extern void __ceph_touch_fmode(struct ceph_inode_info *ci,
  1065. struct ceph_mds_client *mdsc, int fmode);
  1066. /* addr.c */
  1067. extern const struct address_space_operations ceph_aops;
  1068. extern const struct netfs_request_ops ceph_netfs_ops;
  1069. extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
  1070. extern int ceph_uninline_data(struct file *file);
  1071. extern int ceph_pool_perm_check(struct inode *inode, int need);
  1072. extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
  1073. int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate);
  1074. static inline bool ceph_has_inline_data(struct ceph_inode_info *ci)
  1075. {
  1076. if (ci->i_inline_version == CEPH_INLINE_NONE ||
  1077. ci->i_inline_version == 1) /* initial version, no data */
  1078. return false;
  1079. return true;
  1080. }
  1081. /* file.c */
  1082. extern const struct file_operations ceph_file_fops;
  1083. extern int ceph_renew_caps(struct inode *inode, int fmode);
  1084. extern int ceph_open(struct inode *inode, struct file *file);
  1085. extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
  1086. struct file *file, unsigned flags, umode_t mode);
  1087. extern int ceph_release(struct inode *inode, struct file *filp);
  1088. extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
  1089. char *data, size_t len);
  1090. /* dir.c */
  1091. extern const struct file_operations ceph_dir_fops;
  1092. extern const struct file_operations ceph_snapdir_fops;
  1093. extern const struct inode_operations ceph_dir_iops;
  1094. extern const struct inode_operations ceph_snapdir_iops;
  1095. extern const struct dentry_operations ceph_dentry_ops;
  1096. extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
  1097. extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
  1098. extern struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
  1099. struct dentry *dentry);
  1100. extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
  1101. struct dentry *dentry, int err);
  1102. extern void __ceph_dentry_lease_touch(struct ceph_dentry_info *di);
  1103. extern void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di);
  1104. extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
  1105. extern int ceph_trim_dentries(struct ceph_mds_client *mdsc);
  1106. extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
  1107. extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl);
  1108. /* ioctl.c */
  1109. extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
  1110. /* export.c */
  1111. extern const struct export_operations ceph_export_ops;
  1112. struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino);
  1113. /* locks.c */
  1114. extern __init void ceph_flock_init(void);
  1115. extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
  1116. extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
  1117. extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num);
  1118. extern int ceph_encode_locks_to_buffer(struct inode *inode,
  1119. struct ceph_filelock *flocks,
  1120. int num_fcntl_locks,
  1121. int num_flock_locks);
  1122. extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
  1123. struct ceph_pagelist *pagelist,
  1124. int num_fcntl_locks, int num_flock_locks);
  1125. /* debugfs.c */
  1126. extern void ceph_fs_debugfs_init(struct ceph_fs_client *client);
  1127. extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client);
  1128. /* quota.c */
  1129. enum quota_get_realm {
  1130. QUOTA_GET_MAX_FILES,
  1131. QUOTA_GET_MAX_BYTES,
  1132. QUOTA_GET_ANY
  1133. };
  1134. static inline bool __ceph_has_quota(struct ceph_inode_info *ci,
  1135. enum quota_get_realm which)
  1136. {
  1137. bool has_quota = false;
  1138. switch (which) {
  1139. case QUOTA_GET_MAX_BYTES:
  1140. has_quota = !!ci->i_max_bytes;
  1141. break;
  1142. case QUOTA_GET_MAX_FILES:
  1143. has_quota = !!ci->i_max_files;
  1144. break;
  1145. default:
  1146. has_quota = !!(ci->i_max_files || ci->i_max_bytes);
  1147. }
  1148. return has_quota;
  1149. }
  1150. extern void ceph_adjust_quota_realms_count(struct inode *inode, bool inc);
  1151. static inline void __ceph_update_quota(struct ceph_inode_info *ci,
  1152. u64 max_bytes, u64 max_files)
  1153. {
  1154. bool had_quota, has_quota;
  1155. had_quota = __ceph_has_quota(ci, QUOTA_GET_ANY);
  1156. ci->i_max_bytes = max_bytes;
  1157. ci->i_max_files = max_files;
  1158. has_quota = __ceph_has_quota(ci, QUOTA_GET_ANY);
  1159. if (had_quota != has_quota)
  1160. ceph_adjust_quota_realms_count(&ci->netfs.inode, has_quota);
  1161. }
  1162. extern void ceph_handle_quota(struct ceph_mds_client *mdsc,
  1163. struct ceph_mds_session *session,
  1164. struct ceph_msg *msg);
  1165. extern bool ceph_quota_is_max_files_exceeded(struct inode *inode);
  1166. extern bool ceph_quota_is_same_realm(struct inode *old, struct inode *new);
  1167. extern bool ceph_quota_is_max_bytes_exceeded(struct inode *inode,
  1168. loff_t newlen);
  1169. extern bool ceph_quota_is_max_bytes_approaching(struct inode *inode,
  1170. loff_t newlen);
  1171. extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc,
  1172. struct kstatfs *buf);
  1173. extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc);
  1174. bool ceph_inc_mds_stopping_blocker(struct ceph_mds_client *mdsc,
  1175. struct ceph_mds_session *session);
  1176. void ceph_dec_mds_stopping_blocker(struct ceph_mds_client *mdsc);
  1177. #endif /* _FS_CEPH_SUPER_H */