mtdchar.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright © 1999-2010 David Woodhouse <[email protected]>
  4. */
  5. #include <linux/device.h>
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/mutex.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/compat.h>
  17. #include <linux/mount.h>
  18. #include <linux/blkpg.h>
  19. #include <linux/magic.h>
  20. #include <linux/major.h>
  21. #include <linux/mtd/mtd.h>
  22. #include <linux/mtd/partitions.h>
  23. #include <linux/mtd/map.h>
  24. #include <linux/uaccess.h>
  25. #include "mtdcore.h"
  26. /*
  27. * Data structure to hold the pointer to the mtd device as well
  28. * as mode information of various use cases.
  29. */
  30. struct mtd_file_info {
  31. struct mtd_info *mtd;
  32. enum mtd_file_modes mode;
  33. };
  34. static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
  35. {
  36. struct mtd_file_info *mfi = file->private_data;
  37. return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
  38. }
  39. static int mtdchar_open(struct inode *inode, struct file *file)
  40. {
  41. int minor = iminor(inode);
  42. int devnum = minor >> 1;
  43. int ret = 0;
  44. struct mtd_info *mtd;
  45. struct mtd_file_info *mfi;
  46. pr_debug("MTD_open\n");
  47. /* You can't open the RO devices RW */
  48. if ((file->f_mode & FMODE_WRITE) && (minor & 1))
  49. return -EACCES;
  50. mtd = get_mtd_device(NULL, devnum);
  51. if (IS_ERR(mtd))
  52. return PTR_ERR(mtd);
  53. if (mtd->type == MTD_ABSENT) {
  54. ret = -ENODEV;
  55. goto out1;
  56. }
  57. /* You can't open it RW if it's not a writeable device */
  58. if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
  59. ret = -EACCES;
  60. goto out1;
  61. }
  62. mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
  63. if (!mfi) {
  64. ret = -ENOMEM;
  65. goto out1;
  66. }
  67. mfi->mtd = mtd;
  68. file->private_data = mfi;
  69. return 0;
  70. out1:
  71. put_mtd_device(mtd);
  72. return ret;
  73. } /* mtdchar_open */
  74. /*====================================================================*/
  75. static int mtdchar_close(struct inode *inode, struct file *file)
  76. {
  77. struct mtd_file_info *mfi = file->private_data;
  78. struct mtd_info *mtd = mfi->mtd;
  79. pr_debug("MTD_close\n");
  80. /* Only sync if opened RW */
  81. if ((file->f_mode & FMODE_WRITE))
  82. mtd_sync(mtd);
  83. put_mtd_device(mtd);
  84. file->private_data = NULL;
  85. kfree(mfi);
  86. return 0;
  87. } /* mtdchar_close */
  88. /* Back in June 2001, dwmw2 wrote:
  89. *
  90. * FIXME: This _really_ needs to die. In 2.5, we should lock the
  91. * userspace buffer down and use it directly with readv/writev.
  92. *
  93. * The implementation below, using mtd_kmalloc_up_to, mitigates
  94. * allocation failures when the system is under low-memory situations
  95. * or if memory is highly fragmented at the cost of reducing the
  96. * performance of the requested transfer due to a smaller buffer size.
  97. *
  98. * A more complex but more memory-efficient implementation based on
  99. * get_user_pages and iovecs to cover extents of those pages is a
  100. * longer-term goal, as intimated by dwmw2 above. However, for the
  101. * write case, this requires yet more complex head and tail transfer
  102. * handling when those head and tail offsets and sizes are such that
  103. * alignment requirements are not met in the NAND subdriver.
  104. */
  105. static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
  106. loff_t *ppos)
  107. {
  108. struct mtd_file_info *mfi = file->private_data;
  109. struct mtd_info *mtd = mfi->mtd;
  110. size_t retlen;
  111. size_t total_retlen=0;
  112. int ret=0;
  113. int len;
  114. size_t size = count;
  115. char *kbuf;
  116. pr_debug("MTD_read\n");
  117. if (*ppos + count > mtd->size) {
  118. if (*ppos < mtd->size)
  119. count = mtd->size - *ppos;
  120. else
  121. count = 0;
  122. }
  123. if (!count)
  124. return 0;
  125. kbuf = mtd_kmalloc_up_to(mtd, &size);
  126. if (!kbuf)
  127. return -ENOMEM;
  128. while (count) {
  129. len = min_t(size_t, count, size);
  130. switch (mfi->mode) {
  131. case MTD_FILE_MODE_OTP_FACTORY:
  132. ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
  133. &retlen, kbuf);
  134. break;
  135. case MTD_FILE_MODE_OTP_USER:
  136. ret = mtd_read_user_prot_reg(mtd, *ppos, len,
  137. &retlen, kbuf);
  138. break;
  139. case MTD_FILE_MODE_RAW:
  140. {
  141. struct mtd_oob_ops ops = {};
  142. ops.mode = MTD_OPS_RAW;
  143. ops.datbuf = kbuf;
  144. ops.oobbuf = NULL;
  145. ops.len = len;
  146. ret = mtd_read_oob(mtd, *ppos, &ops);
  147. retlen = ops.retlen;
  148. break;
  149. }
  150. default:
  151. ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
  152. }
  153. /* Nand returns -EBADMSG on ECC errors, but it returns
  154. * the data. For our userspace tools it is important
  155. * to dump areas with ECC errors!
  156. * For kernel internal usage it also might return -EUCLEAN
  157. * to signal the caller that a bitflip has occurred and has
  158. * been corrected by the ECC algorithm.
  159. * Userspace software which accesses NAND this way
  160. * must be aware of the fact that it deals with NAND
  161. */
  162. if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
  163. *ppos += retlen;
  164. if (copy_to_user(buf, kbuf, retlen)) {
  165. kfree(kbuf);
  166. return -EFAULT;
  167. }
  168. else
  169. total_retlen += retlen;
  170. count -= retlen;
  171. buf += retlen;
  172. if (retlen == 0)
  173. count = 0;
  174. }
  175. else {
  176. kfree(kbuf);
  177. return ret;
  178. }
  179. }
  180. kfree(kbuf);
  181. return total_retlen;
  182. } /* mtdchar_read */
  183. static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
  184. loff_t *ppos)
  185. {
  186. struct mtd_file_info *mfi = file->private_data;
  187. struct mtd_info *mtd = mfi->mtd;
  188. size_t size = count;
  189. char *kbuf;
  190. size_t retlen;
  191. size_t total_retlen=0;
  192. int ret=0;
  193. int len;
  194. pr_debug("MTD_write\n");
  195. if (*ppos >= mtd->size)
  196. return -ENOSPC;
  197. if (*ppos + count > mtd->size)
  198. count = mtd->size - *ppos;
  199. if (!count)
  200. return 0;
  201. kbuf = mtd_kmalloc_up_to(mtd, &size);
  202. if (!kbuf)
  203. return -ENOMEM;
  204. while (count) {
  205. len = min_t(size_t, count, size);
  206. if (copy_from_user(kbuf, buf, len)) {
  207. kfree(kbuf);
  208. return -EFAULT;
  209. }
  210. switch (mfi->mode) {
  211. case MTD_FILE_MODE_OTP_FACTORY:
  212. ret = -EROFS;
  213. break;
  214. case MTD_FILE_MODE_OTP_USER:
  215. ret = mtd_write_user_prot_reg(mtd, *ppos, len,
  216. &retlen, kbuf);
  217. break;
  218. case MTD_FILE_MODE_RAW:
  219. {
  220. struct mtd_oob_ops ops = {};
  221. ops.mode = MTD_OPS_RAW;
  222. ops.datbuf = kbuf;
  223. ops.oobbuf = NULL;
  224. ops.ooboffs = 0;
  225. ops.len = len;
  226. ret = mtd_write_oob(mtd, *ppos, &ops);
  227. retlen = ops.retlen;
  228. break;
  229. }
  230. default:
  231. ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
  232. }
  233. /*
  234. * Return -ENOSPC only if no data could be written at all.
  235. * Otherwise just return the number of bytes that actually
  236. * have been written.
  237. */
  238. if ((ret == -ENOSPC) && (total_retlen))
  239. break;
  240. if (!ret) {
  241. *ppos += retlen;
  242. total_retlen += retlen;
  243. count -= retlen;
  244. buf += retlen;
  245. }
  246. else {
  247. kfree(kbuf);
  248. return ret;
  249. }
  250. }
  251. kfree(kbuf);
  252. return total_retlen;
  253. } /* mtdchar_write */
  254. /*======================================================================
  255. IOCTL calls for getting device parameters.
  256. ======================================================================*/
  257. static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
  258. {
  259. struct mtd_info *mtd = mfi->mtd;
  260. size_t retlen;
  261. switch (mode) {
  262. case MTD_OTP_FACTORY:
  263. if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
  264. -EOPNOTSUPP)
  265. return -EOPNOTSUPP;
  266. mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
  267. break;
  268. case MTD_OTP_USER:
  269. if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
  270. -EOPNOTSUPP)
  271. return -EOPNOTSUPP;
  272. mfi->mode = MTD_FILE_MODE_OTP_USER;
  273. break;
  274. case MTD_OTP_OFF:
  275. mfi->mode = MTD_FILE_MODE_NORMAL;
  276. break;
  277. default:
  278. return -EINVAL;
  279. }
  280. return 0;
  281. }
  282. static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
  283. uint64_t start, uint32_t length, void __user *ptr,
  284. uint32_t __user *retp)
  285. {
  286. struct mtd_info *master = mtd_get_master(mtd);
  287. struct mtd_file_info *mfi = file->private_data;
  288. struct mtd_oob_ops ops = {};
  289. uint32_t retlen;
  290. int ret = 0;
  291. if (length > 4096)
  292. return -EINVAL;
  293. if (!master->_write_oob)
  294. return -EOPNOTSUPP;
  295. ops.ooblen = length;
  296. ops.ooboffs = start & (mtd->writesize - 1);
  297. ops.datbuf = NULL;
  298. ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
  299. MTD_OPS_PLACE_OOB;
  300. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  301. return -EINVAL;
  302. ops.oobbuf = memdup_user(ptr, length);
  303. if (IS_ERR(ops.oobbuf))
  304. return PTR_ERR(ops.oobbuf);
  305. start &= ~((uint64_t)mtd->writesize - 1);
  306. ret = mtd_write_oob(mtd, start, &ops);
  307. if (ops.oobretlen > 0xFFFFFFFFU)
  308. ret = -EOVERFLOW;
  309. retlen = ops.oobretlen;
  310. if (copy_to_user(retp, &retlen, sizeof(length)))
  311. ret = -EFAULT;
  312. kfree(ops.oobbuf);
  313. return ret;
  314. }
  315. static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
  316. uint64_t start, uint32_t length, void __user *ptr,
  317. uint32_t __user *retp)
  318. {
  319. struct mtd_file_info *mfi = file->private_data;
  320. struct mtd_oob_ops ops = {};
  321. int ret = 0;
  322. if (length > 4096)
  323. return -EINVAL;
  324. ops.ooblen = length;
  325. ops.ooboffs = start & (mtd->writesize - 1);
  326. ops.datbuf = NULL;
  327. ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
  328. MTD_OPS_PLACE_OOB;
  329. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  330. return -EINVAL;
  331. ops.oobbuf = kmalloc(length, GFP_KERNEL);
  332. if (!ops.oobbuf)
  333. return -ENOMEM;
  334. start &= ~((uint64_t)mtd->writesize - 1);
  335. ret = mtd_read_oob(mtd, start, &ops);
  336. if (put_user(ops.oobretlen, retp))
  337. ret = -EFAULT;
  338. else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
  339. ops.oobretlen))
  340. ret = -EFAULT;
  341. kfree(ops.oobbuf);
  342. /*
  343. * NAND returns -EBADMSG on ECC errors, but it returns the OOB
  344. * data. For our userspace tools it is important to dump areas
  345. * with ECC errors!
  346. * For kernel internal usage it also might return -EUCLEAN
  347. * to signal the caller that a bitflip has occurred and has
  348. * been corrected by the ECC algorithm.
  349. *
  350. * Note: currently the standard NAND function, nand_read_oob_std,
  351. * does not calculate ECC for the OOB area, so do not rely on
  352. * this behavior unless you have replaced it with your own.
  353. */
  354. if (mtd_is_bitflip_or_eccerr(ret))
  355. return 0;
  356. return ret;
  357. }
  358. /*
  359. * Copies (and truncates, if necessary) OOB layout information to the
  360. * deprecated layout struct, nand_ecclayout_user. This is necessary only to
  361. * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
  362. * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
  363. * can describe any kind of OOB layout with almost zero overhead from a
  364. * memory usage point of view).
  365. */
  366. static int shrink_ecclayout(struct mtd_info *mtd,
  367. struct nand_ecclayout_user *to)
  368. {
  369. struct mtd_oob_region oobregion;
  370. int i, section = 0, ret;
  371. if (!mtd || !to)
  372. return -EINVAL;
  373. memset(to, 0, sizeof(*to));
  374. to->eccbytes = 0;
  375. for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
  376. u32 eccpos;
  377. ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
  378. if (ret < 0) {
  379. if (ret != -ERANGE)
  380. return ret;
  381. break;
  382. }
  383. eccpos = oobregion.offset;
  384. for (; i < MTD_MAX_ECCPOS_ENTRIES &&
  385. eccpos < oobregion.offset + oobregion.length; i++) {
  386. to->eccpos[i] = eccpos++;
  387. to->eccbytes++;
  388. }
  389. }
  390. for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
  391. ret = mtd_ooblayout_free(mtd, i, &oobregion);
  392. if (ret < 0) {
  393. if (ret != -ERANGE)
  394. return ret;
  395. break;
  396. }
  397. to->oobfree[i].offset = oobregion.offset;
  398. to->oobfree[i].length = oobregion.length;
  399. to->oobavail += to->oobfree[i].length;
  400. }
  401. return 0;
  402. }
  403. static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
  404. {
  405. struct mtd_oob_region oobregion;
  406. int i, section = 0, ret;
  407. if (!mtd || !to)
  408. return -EINVAL;
  409. memset(to, 0, sizeof(*to));
  410. to->eccbytes = 0;
  411. for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
  412. u32 eccpos;
  413. ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
  414. if (ret < 0) {
  415. if (ret != -ERANGE)
  416. return ret;
  417. break;
  418. }
  419. if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
  420. return -EINVAL;
  421. eccpos = oobregion.offset;
  422. for (; eccpos < oobregion.offset + oobregion.length; i++) {
  423. to->eccpos[i] = eccpos++;
  424. to->eccbytes++;
  425. }
  426. }
  427. for (i = 0; i < 8; i++) {
  428. ret = mtd_ooblayout_free(mtd, i, &oobregion);
  429. if (ret < 0) {
  430. if (ret != -ERANGE)
  431. return ret;
  432. break;
  433. }
  434. to->oobfree[i][0] = oobregion.offset;
  435. to->oobfree[i][1] = oobregion.length;
  436. }
  437. to->useecc = MTD_NANDECC_AUTOPLACE;
  438. return 0;
  439. }
  440. static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
  441. struct blkpg_ioctl_arg *arg)
  442. {
  443. struct blkpg_partition p;
  444. if (!capable(CAP_SYS_ADMIN))
  445. return -EPERM;
  446. if (copy_from_user(&p, arg->data, sizeof(p)))
  447. return -EFAULT;
  448. switch (arg->op) {
  449. case BLKPG_ADD_PARTITION:
  450. /* Only master mtd device must be used to add partitions */
  451. if (mtd_is_partition(mtd))
  452. return -EINVAL;
  453. /* Sanitize user input */
  454. p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
  455. return mtd_add_partition(mtd, p.devname, p.start, p.length);
  456. case BLKPG_DEL_PARTITION:
  457. if (p.pno < 0)
  458. return -EINVAL;
  459. return mtd_del_partition(mtd, p.pno);
  460. default:
  461. return -EINVAL;
  462. }
  463. }
  464. static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
  465. struct mtd_oob_ops *ops)
  466. {
  467. uint32_t start_page, end_page;
  468. u32 oob_per_page;
  469. if (ops->len == 0 || ops->ooblen == 0)
  470. return;
  471. start_page = mtd_div_by_ws(start, mtd);
  472. end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
  473. oob_per_page = mtd_oobavail(mtd, ops);
  474. ops->ooblen = min_t(size_t, ops->ooblen,
  475. (end_page - start_page + 1) * oob_per_page);
  476. }
  477. static noinline_for_stack int
  478. mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
  479. {
  480. struct mtd_info *master = mtd_get_master(mtd);
  481. struct mtd_write_req req;
  482. const void __user *usr_data, *usr_oob;
  483. uint8_t *datbuf = NULL, *oobbuf = NULL;
  484. size_t datbuf_len, oobbuf_len;
  485. int ret = 0;
  486. if (copy_from_user(&req, argp, sizeof(req)))
  487. return -EFAULT;
  488. usr_data = (const void __user *)(uintptr_t)req.usr_data;
  489. usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
  490. if (!master->_write_oob)
  491. return -EOPNOTSUPP;
  492. if (!usr_data)
  493. req.len = 0;
  494. if (!usr_oob)
  495. req.ooblen = 0;
  496. req.len &= 0xffffffff;
  497. req.ooblen &= 0xffffffff;
  498. if (req.start + req.len > mtd->size)
  499. return -EINVAL;
  500. datbuf_len = min_t(size_t, req.len, mtd->erasesize);
  501. if (datbuf_len > 0) {
  502. datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
  503. if (!datbuf)
  504. return -ENOMEM;
  505. }
  506. oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
  507. if (oobbuf_len > 0) {
  508. oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
  509. if (!oobbuf) {
  510. kvfree(datbuf);
  511. return -ENOMEM;
  512. }
  513. }
  514. while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
  515. struct mtd_oob_ops ops = {
  516. .mode = req.mode,
  517. .len = min_t(size_t, req.len, datbuf_len),
  518. .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
  519. .datbuf = datbuf,
  520. .oobbuf = oobbuf,
  521. };
  522. /*
  523. * Shorten non-page-aligned, eraseblock-sized writes so that
  524. * the write ends on an eraseblock boundary. This is necessary
  525. * for adjust_oob_length() to properly handle non-page-aligned
  526. * writes.
  527. */
  528. if (ops.len == mtd->erasesize)
  529. ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
  530. /*
  531. * For writes which are not OOB-only, adjust the amount of OOB
  532. * data written according to the number of data pages written.
  533. * This is necessary to prevent OOB data from being skipped
  534. * over in data+OOB writes requiring multiple mtd_write_oob()
  535. * calls to be completed.
  536. */
  537. adjust_oob_length(mtd, req.start, &ops);
  538. if (copy_from_user(datbuf, usr_data, ops.len) ||
  539. copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
  540. ret = -EFAULT;
  541. break;
  542. }
  543. ret = mtd_write_oob(mtd, req.start, &ops);
  544. if (ret)
  545. break;
  546. req.start += ops.retlen;
  547. req.len -= ops.retlen;
  548. usr_data += ops.retlen;
  549. req.ooblen -= ops.oobretlen;
  550. usr_oob += ops.oobretlen;
  551. }
  552. kvfree(datbuf);
  553. kvfree(oobbuf);
  554. return ret;
  555. }
  556. static noinline_for_stack int
  557. mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
  558. {
  559. struct mtd_info *master = mtd_get_master(mtd);
  560. struct mtd_read_req req;
  561. void __user *usr_data, *usr_oob;
  562. uint8_t *datbuf = NULL, *oobbuf = NULL;
  563. size_t datbuf_len, oobbuf_len;
  564. size_t orig_len, orig_ooblen;
  565. int ret = 0;
  566. if (copy_from_user(&req, argp, sizeof(req)))
  567. return -EFAULT;
  568. orig_len = req.len;
  569. orig_ooblen = req.ooblen;
  570. usr_data = (void __user *)(uintptr_t)req.usr_data;
  571. usr_oob = (void __user *)(uintptr_t)req.usr_oob;
  572. if (!master->_read_oob)
  573. return -EOPNOTSUPP;
  574. if (!usr_data)
  575. req.len = 0;
  576. if (!usr_oob)
  577. req.ooblen = 0;
  578. req.ecc_stats.uncorrectable_errors = 0;
  579. req.ecc_stats.corrected_bitflips = 0;
  580. req.ecc_stats.max_bitflips = 0;
  581. req.len &= 0xffffffff;
  582. req.ooblen &= 0xffffffff;
  583. if (req.start + req.len > mtd->size) {
  584. ret = -EINVAL;
  585. goto out;
  586. }
  587. datbuf_len = min_t(size_t, req.len, mtd->erasesize);
  588. if (datbuf_len > 0) {
  589. datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
  590. if (!datbuf) {
  591. ret = -ENOMEM;
  592. goto out;
  593. }
  594. }
  595. oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
  596. if (oobbuf_len > 0) {
  597. oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
  598. if (!oobbuf) {
  599. ret = -ENOMEM;
  600. goto out;
  601. }
  602. }
  603. while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
  604. struct mtd_req_stats stats;
  605. struct mtd_oob_ops ops = {
  606. .mode = req.mode,
  607. .len = min_t(size_t, req.len, datbuf_len),
  608. .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
  609. .datbuf = datbuf,
  610. .oobbuf = oobbuf,
  611. .stats = &stats,
  612. };
  613. /*
  614. * Shorten non-page-aligned, eraseblock-sized reads so that the
  615. * read ends on an eraseblock boundary. This is necessary in
  616. * order to prevent OOB data for some pages from being
  617. * duplicated in the output of non-page-aligned reads requiring
  618. * multiple mtd_read_oob() calls to be completed.
  619. */
  620. if (ops.len == mtd->erasesize)
  621. ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
  622. ret = mtd_read_oob(mtd, (loff_t)req.start, &ops);
  623. req.ecc_stats.uncorrectable_errors +=
  624. stats.uncorrectable_errors;
  625. req.ecc_stats.corrected_bitflips += stats.corrected_bitflips;
  626. req.ecc_stats.max_bitflips =
  627. max(req.ecc_stats.max_bitflips, stats.max_bitflips);
  628. if (ret && !mtd_is_bitflip_or_eccerr(ret))
  629. break;
  630. if (copy_to_user(usr_data, ops.datbuf, ops.retlen) ||
  631. copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) {
  632. ret = -EFAULT;
  633. break;
  634. }
  635. req.start += ops.retlen;
  636. req.len -= ops.retlen;
  637. usr_data += ops.retlen;
  638. req.ooblen -= ops.oobretlen;
  639. usr_oob += ops.oobretlen;
  640. }
  641. /*
  642. * As multiple iterations of the above loop (and therefore multiple
  643. * mtd_read_oob() calls) may be necessary to complete the read request,
  644. * adjust the final return code to ensure it accounts for all detected
  645. * ECC errors.
  646. */
  647. if (!ret || mtd_is_bitflip(ret)) {
  648. if (req.ecc_stats.uncorrectable_errors > 0)
  649. ret = -EBADMSG;
  650. else if (req.ecc_stats.corrected_bitflips > 0)
  651. ret = -EUCLEAN;
  652. }
  653. out:
  654. req.len = orig_len - req.len;
  655. req.ooblen = orig_ooblen - req.ooblen;
  656. if (copy_to_user(argp, &req, sizeof(req)))
  657. ret = -EFAULT;
  658. kvfree(datbuf);
  659. kvfree(oobbuf);
  660. return ret;
  661. }
  662. static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
  663. {
  664. struct mtd_file_info *mfi = file->private_data;
  665. struct mtd_info *mtd = mfi->mtd;
  666. struct mtd_info *master = mtd_get_master(mtd);
  667. void __user *argp = (void __user *)arg;
  668. int ret = 0;
  669. struct mtd_info_user info;
  670. pr_debug("MTD_ioctl\n");
  671. /*
  672. * Check the file mode to require "dangerous" commands to have write
  673. * permissions.
  674. */
  675. switch (cmd) {
  676. /* "safe" commands */
  677. case MEMGETREGIONCOUNT:
  678. case MEMGETREGIONINFO:
  679. case MEMGETINFO:
  680. case MEMREADOOB:
  681. case MEMREADOOB64:
  682. case MEMREAD:
  683. case MEMISLOCKED:
  684. case MEMGETOOBSEL:
  685. case MEMGETBADBLOCK:
  686. case OTPSELECT:
  687. case OTPGETREGIONCOUNT:
  688. case OTPGETREGIONINFO:
  689. case ECCGETLAYOUT:
  690. case ECCGETSTATS:
  691. case MTDFILEMODE:
  692. case BLKPG:
  693. case BLKRRPART:
  694. break;
  695. /* "dangerous" commands */
  696. case MEMERASE:
  697. case MEMERASE64:
  698. case MEMLOCK:
  699. case MEMUNLOCK:
  700. case MEMSETBADBLOCK:
  701. case MEMWRITEOOB:
  702. case MEMWRITEOOB64:
  703. case MEMWRITE:
  704. case OTPLOCK:
  705. case OTPERASE:
  706. if (!(file->f_mode & FMODE_WRITE))
  707. return -EPERM;
  708. break;
  709. default:
  710. return -ENOTTY;
  711. }
  712. switch (cmd) {
  713. case MEMGETREGIONCOUNT:
  714. if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
  715. return -EFAULT;
  716. break;
  717. case MEMGETREGIONINFO:
  718. {
  719. uint32_t ur_idx;
  720. struct mtd_erase_region_info *kr;
  721. struct region_info_user __user *ur = argp;
  722. if (get_user(ur_idx, &(ur->regionindex)))
  723. return -EFAULT;
  724. if (ur_idx >= mtd->numeraseregions)
  725. return -EINVAL;
  726. kr = &(mtd->eraseregions[ur_idx]);
  727. if (put_user(kr->offset, &(ur->offset))
  728. || put_user(kr->erasesize, &(ur->erasesize))
  729. || put_user(kr->numblocks, &(ur->numblocks)))
  730. return -EFAULT;
  731. break;
  732. }
  733. case MEMGETINFO:
  734. memset(&info, 0, sizeof(info));
  735. info.type = mtd->type;
  736. info.flags = mtd->flags;
  737. info.size = mtd->size;
  738. info.erasesize = mtd->erasesize;
  739. info.writesize = mtd->writesize;
  740. info.oobsize = mtd->oobsize;
  741. /* The below field is obsolete */
  742. info.padding = 0;
  743. if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
  744. return -EFAULT;
  745. break;
  746. case MEMERASE:
  747. case MEMERASE64:
  748. {
  749. struct erase_info *erase;
  750. erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
  751. if (!erase)
  752. ret = -ENOMEM;
  753. else {
  754. if (cmd == MEMERASE64) {
  755. struct erase_info_user64 einfo64;
  756. if (copy_from_user(&einfo64, argp,
  757. sizeof(struct erase_info_user64))) {
  758. kfree(erase);
  759. return -EFAULT;
  760. }
  761. erase->addr = einfo64.start;
  762. erase->len = einfo64.length;
  763. } else {
  764. struct erase_info_user einfo32;
  765. if (copy_from_user(&einfo32, argp,
  766. sizeof(struct erase_info_user))) {
  767. kfree(erase);
  768. return -EFAULT;
  769. }
  770. erase->addr = einfo32.start;
  771. erase->len = einfo32.length;
  772. }
  773. ret = mtd_erase(mtd, erase);
  774. kfree(erase);
  775. }
  776. break;
  777. }
  778. case MEMWRITEOOB:
  779. {
  780. struct mtd_oob_buf buf;
  781. struct mtd_oob_buf __user *buf_user = argp;
  782. /* NOTE: writes return length to buf_user->length */
  783. if (copy_from_user(&buf, argp, sizeof(buf)))
  784. ret = -EFAULT;
  785. else
  786. ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
  787. buf.ptr, &buf_user->length);
  788. break;
  789. }
  790. case MEMREADOOB:
  791. {
  792. struct mtd_oob_buf buf;
  793. struct mtd_oob_buf __user *buf_user = argp;
  794. /* NOTE: writes return length to buf_user->start */
  795. if (copy_from_user(&buf, argp, sizeof(buf)))
  796. ret = -EFAULT;
  797. else
  798. ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
  799. buf.ptr, &buf_user->start);
  800. break;
  801. }
  802. case MEMWRITEOOB64:
  803. {
  804. struct mtd_oob_buf64 buf;
  805. struct mtd_oob_buf64 __user *buf_user = argp;
  806. if (copy_from_user(&buf, argp, sizeof(buf)))
  807. ret = -EFAULT;
  808. else
  809. ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
  810. (void __user *)(uintptr_t)buf.usr_ptr,
  811. &buf_user->length);
  812. break;
  813. }
  814. case MEMREADOOB64:
  815. {
  816. struct mtd_oob_buf64 buf;
  817. struct mtd_oob_buf64 __user *buf_user = argp;
  818. if (copy_from_user(&buf, argp, sizeof(buf)))
  819. ret = -EFAULT;
  820. else
  821. ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
  822. (void __user *)(uintptr_t)buf.usr_ptr,
  823. &buf_user->length);
  824. break;
  825. }
  826. case MEMWRITE:
  827. {
  828. ret = mtdchar_write_ioctl(mtd,
  829. (struct mtd_write_req __user *)arg);
  830. break;
  831. }
  832. case MEMREAD:
  833. {
  834. ret = mtdchar_read_ioctl(mtd,
  835. (struct mtd_read_req __user *)arg);
  836. break;
  837. }
  838. case MEMLOCK:
  839. {
  840. struct erase_info_user einfo;
  841. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  842. return -EFAULT;
  843. ret = mtd_lock(mtd, einfo.start, einfo.length);
  844. break;
  845. }
  846. case MEMUNLOCK:
  847. {
  848. struct erase_info_user einfo;
  849. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  850. return -EFAULT;
  851. ret = mtd_unlock(mtd, einfo.start, einfo.length);
  852. break;
  853. }
  854. case MEMISLOCKED:
  855. {
  856. struct erase_info_user einfo;
  857. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  858. return -EFAULT;
  859. ret = mtd_is_locked(mtd, einfo.start, einfo.length);
  860. break;
  861. }
  862. /* Legacy interface */
  863. case MEMGETOOBSEL:
  864. {
  865. struct nand_oobinfo oi;
  866. if (!master->ooblayout)
  867. return -EOPNOTSUPP;
  868. ret = get_oobinfo(mtd, &oi);
  869. if (ret)
  870. return ret;
  871. if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
  872. return -EFAULT;
  873. break;
  874. }
  875. case MEMGETBADBLOCK:
  876. {
  877. loff_t offs;
  878. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  879. return -EFAULT;
  880. return mtd_block_isbad(mtd, offs);
  881. }
  882. case MEMSETBADBLOCK:
  883. {
  884. loff_t offs;
  885. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  886. return -EFAULT;
  887. return mtd_block_markbad(mtd, offs);
  888. }
  889. case OTPSELECT:
  890. {
  891. int mode;
  892. if (copy_from_user(&mode, argp, sizeof(int)))
  893. return -EFAULT;
  894. mfi->mode = MTD_FILE_MODE_NORMAL;
  895. ret = otp_select_filemode(mfi, mode);
  896. file->f_pos = 0;
  897. break;
  898. }
  899. case OTPGETREGIONCOUNT:
  900. case OTPGETREGIONINFO:
  901. {
  902. struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
  903. size_t retlen;
  904. if (!buf)
  905. return -ENOMEM;
  906. switch (mfi->mode) {
  907. case MTD_FILE_MODE_OTP_FACTORY:
  908. ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
  909. break;
  910. case MTD_FILE_MODE_OTP_USER:
  911. ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
  912. break;
  913. default:
  914. ret = -EINVAL;
  915. break;
  916. }
  917. if (!ret) {
  918. if (cmd == OTPGETREGIONCOUNT) {
  919. int nbr = retlen / sizeof(struct otp_info);
  920. ret = copy_to_user(argp, &nbr, sizeof(int));
  921. } else
  922. ret = copy_to_user(argp, buf, retlen);
  923. if (ret)
  924. ret = -EFAULT;
  925. }
  926. kfree(buf);
  927. break;
  928. }
  929. case OTPLOCK:
  930. case OTPERASE:
  931. {
  932. struct otp_info oinfo;
  933. if (mfi->mode != MTD_FILE_MODE_OTP_USER)
  934. return -EINVAL;
  935. if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
  936. return -EFAULT;
  937. if (cmd == OTPLOCK)
  938. ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
  939. else
  940. ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length);
  941. break;
  942. }
  943. /* This ioctl is being deprecated - it truncates the ECC layout */
  944. case ECCGETLAYOUT:
  945. {
  946. struct nand_ecclayout_user *usrlay;
  947. if (!master->ooblayout)
  948. return -EOPNOTSUPP;
  949. usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
  950. if (!usrlay)
  951. return -ENOMEM;
  952. shrink_ecclayout(mtd, usrlay);
  953. if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
  954. ret = -EFAULT;
  955. kfree(usrlay);
  956. break;
  957. }
  958. case ECCGETSTATS:
  959. {
  960. if (copy_to_user(argp, &mtd->ecc_stats,
  961. sizeof(struct mtd_ecc_stats)))
  962. return -EFAULT;
  963. break;
  964. }
  965. case MTDFILEMODE:
  966. {
  967. mfi->mode = 0;
  968. switch(arg) {
  969. case MTD_FILE_MODE_OTP_FACTORY:
  970. case MTD_FILE_MODE_OTP_USER:
  971. ret = otp_select_filemode(mfi, arg);
  972. break;
  973. case MTD_FILE_MODE_RAW:
  974. if (!mtd_has_oob(mtd))
  975. return -EOPNOTSUPP;
  976. mfi->mode = arg;
  977. break;
  978. case MTD_FILE_MODE_NORMAL:
  979. break;
  980. default:
  981. ret = -EINVAL;
  982. }
  983. file->f_pos = 0;
  984. break;
  985. }
  986. case BLKPG:
  987. {
  988. struct blkpg_ioctl_arg __user *blk_arg = argp;
  989. struct blkpg_ioctl_arg a;
  990. if (copy_from_user(&a, blk_arg, sizeof(a)))
  991. ret = -EFAULT;
  992. else
  993. ret = mtdchar_blkpg_ioctl(mtd, &a);
  994. break;
  995. }
  996. case BLKRRPART:
  997. {
  998. /* No reread partition feature. Just return ok */
  999. ret = 0;
  1000. break;
  1001. }
  1002. }
  1003. return ret;
  1004. } /* memory_ioctl */
  1005. static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
  1006. {
  1007. struct mtd_file_info *mfi = file->private_data;
  1008. struct mtd_info *mtd = mfi->mtd;
  1009. struct mtd_info *master = mtd_get_master(mtd);
  1010. int ret;
  1011. mutex_lock(&master->master.chrdev_lock);
  1012. ret = mtdchar_ioctl(file, cmd, arg);
  1013. mutex_unlock(&master->master.chrdev_lock);
  1014. return ret;
  1015. }
  1016. #ifdef CONFIG_COMPAT
  1017. struct mtd_oob_buf32 {
  1018. u_int32_t start;
  1019. u_int32_t length;
  1020. compat_caddr_t ptr; /* unsigned char* */
  1021. };
  1022. #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
  1023. #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
  1024. static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
  1025. unsigned long arg)
  1026. {
  1027. struct mtd_file_info *mfi = file->private_data;
  1028. struct mtd_info *mtd = mfi->mtd;
  1029. struct mtd_info *master = mtd_get_master(mtd);
  1030. void __user *argp = compat_ptr(arg);
  1031. int ret = 0;
  1032. mutex_lock(&master->master.chrdev_lock);
  1033. switch (cmd) {
  1034. case MEMWRITEOOB32:
  1035. {
  1036. struct mtd_oob_buf32 buf;
  1037. struct mtd_oob_buf32 __user *buf_user = argp;
  1038. if (!(file->f_mode & FMODE_WRITE)) {
  1039. ret = -EPERM;
  1040. break;
  1041. }
  1042. if (copy_from_user(&buf, argp, sizeof(buf)))
  1043. ret = -EFAULT;
  1044. else
  1045. ret = mtdchar_writeoob(file, mtd, buf.start,
  1046. buf.length, compat_ptr(buf.ptr),
  1047. &buf_user->length);
  1048. break;
  1049. }
  1050. case MEMREADOOB32:
  1051. {
  1052. struct mtd_oob_buf32 buf;
  1053. struct mtd_oob_buf32 __user *buf_user = argp;
  1054. /* NOTE: writes return length to buf->start */
  1055. if (copy_from_user(&buf, argp, sizeof(buf)))
  1056. ret = -EFAULT;
  1057. else
  1058. ret = mtdchar_readoob(file, mtd, buf.start,
  1059. buf.length, compat_ptr(buf.ptr),
  1060. &buf_user->start);
  1061. break;
  1062. }
  1063. case BLKPG:
  1064. {
  1065. /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
  1066. struct blkpg_compat_ioctl_arg __user *uarg = argp;
  1067. struct blkpg_compat_ioctl_arg compat_arg;
  1068. struct blkpg_ioctl_arg a;
  1069. if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
  1070. ret = -EFAULT;
  1071. break;
  1072. }
  1073. memset(&a, 0, sizeof(a));
  1074. a.op = compat_arg.op;
  1075. a.flags = compat_arg.flags;
  1076. a.datalen = compat_arg.datalen;
  1077. a.data = compat_ptr(compat_arg.data);
  1078. ret = mtdchar_blkpg_ioctl(mtd, &a);
  1079. break;
  1080. }
  1081. default:
  1082. ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
  1083. }
  1084. mutex_unlock(&master->master.chrdev_lock);
  1085. return ret;
  1086. }
  1087. #endif /* CONFIG_COMPAT */
  1088. /*
  1089. * try to determine where a shared mapping can be made
  1090. * - only supported for NOMMU at the moment (MMU can't doesn't copy private
  1091. * mappings)
  1092. */
  1093. #ifndef CONFIG_MMU
  1094. static unsigned long mtdchar_get_unmapped_area(struct file *file,
  1095. unsigned long addr,
  1096. unsigned long len,
  1097. unsigned long pgoff,
  1098. unsigned long flags)
  1099. {
  1100. struct mtd_file_info *mfi = file->private_data;
  1101. struct mtd_info *mtd = mfi->mtd;
  1102. unsigned long offset;
  1103. int ret;
  1104. if (addr != 0)
  1105. return (unsigned long) -EINVAL;
  1106. if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
  1107. return (unsigned long) -EINVAL;
  1108. offset = pgoff << PAGE_SHIFT;
  1109. if (offset > mtd->size - len)
  1110. return (unsigned long) -EINVAL;
  1111. ret = mtd_get_unmapped_area(mtd, len, offset, flags);
  1112. return ret == -EOPNOTSUPP ? -ENODEV : ret;
  1113. }
  1114. static unsigned mtdchar_mmap_capabilities(struct file *file)
  1115. {
  1116. struct mtd_file_info *mfi = file->private_data;
  1117. return mtd_mmap_capabilities(mfi->mtd);
  1118. }
  1119. #endif
  1120. /*
  1121. * set up a mapping for shared memory segments
  1122. */
  1123. static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
  1124. {
  1125. #ifdef CONFIG_MMU
  1126. struct mtd_file_info *mfi = file->private_data;
  1127. struct mtd_info *mtd = mfi->mtd;
  1128. struct map_info *map = mtd->priv;
  1129. /* This is broken because it assumes the MTD device is map-based
  1130. and that mtd->priv is a valid struct map_info. It should be
  1131. replaced with something that uses the mtd_get_unmapped_area()
  1132. operation properly. */
  1133. if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
  1134. #ifdef pgprot_noncached
  1135. if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
  1136. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  1137. #endif
  1138. return vm_iomap_memory(vma, map->phys, map->size);
  1139. }
  1140. return -ENODEV;
  1141. #else
  1142. return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
  1143. #endif
  1144. }
  1145. static const struct file_operations mtd_fops = {
  1146. .owner = THIS_MODULE,
  1147. .llseek = mtdchar_lseek,
  1148. .read = mtdchar_read,
  1149. .write = mtdchar_write,
  1150. .unlocked_ioctl = mtdchar_unlocked_ioctl,
  1151. #ifdef CONFIG_COMPAT
  1152. .compat_ioctl = mtdchar_compat_ioctl,
  1153. #endif
  1154. .open = mtdchar_open,
  1155. .release = mtdchar_close,
  1156. .mmap = mtdchar_mmap,
  1157. #ifndef CONFIG_MMU
  1158. .get_unmapped_area = mtdchar_get_unmapped_area,
  1159. .mmap_capabilities = mtdchar_mmap_capabilities,
  1160. #endif
  1161. };
  1162. int __init init_mtdchar(void)
  1163. {
  1164. int ret;
  1165. ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
  1166. "mtd", &mtd_fops);
  1167. if (ret < 0) {
  1168. pr_err("Can't allocate major number %d for MTD\n",
  1169. MTD_CHAR_MAJOR);
  1170. return ret;
  1171. }
  1172. return ret;
  1173. }
  1174. void __exit cleanup_mtdchar(void)
  1175. {
  1176. __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
  1177. }
  1178. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);