pci_clp.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2012
  4. *
  5. * Author(s):
  6. * Jan Glauber <[email protected]>
  7. */
  8. #define KMSG_COMPONENT "zpci"
  9. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10. #include <linux/compat.h>
  11. #include <linux/kernel.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/slab.h>
  14. #include <linux/err.h>
  15. #include <linux/delay.h>
  16. #include <linux/pci.h>
  17. #include <linux/uaccess.h>
  18. #include <asm/asm-extable.h>
  19. #include <asm/pci_debug.h>
  20. #include <asm/pci_clp.h>
  21. #include <asm/clp.h>
  22. #include <uapi/asm/clp.h>
  23. #include "pci_bus.h"
  24. bool zpci_unique_uid;
  25. void update_uid_checking(bool new)
  26. {
  27. if (zpci_unique_uid != new)
  28. zpci_dbg(3, "uid checking:%d\n", new);
  29. zpci_unique_uid = new;
  30. }
  31. static inline void zpci_err_clp(unsigned int rsp, int rc)
  32. {
  33. struct {
  34. unsigned int rsp;
  35. int rc;
  36. } __packed data = {rsp, rc};
  37. zpci_err_hex(&data, sizeof(data));
  38. }
  39. /*
  40. * Call Logical Processor with c=1, lps=0 and command 1
  41. * to get the bit mask of installed logical processors
  42. */
  43. static inline int clp_get_ilp(unsigned long *ilp)
  44. {
  45. unsigned long mask;
  46. int cc = 3;
  47. asm volatile (
  48. " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
  49. "0: ipm %[cc]\n"
  50. " srl %[cc],28\n"
  51. "1:\n"
  52. EX_TABLE(0b, 1b)
  53. : [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
  54. : "cc");
  55. *ilp = mask;
  56. return cc;
  57. }
  58. /*
  59. * Call Logical Processor with c=0, the give constant lps and an lpcb request.
  60. */
  61. static __always_inline int clp_req(void *data, unsigned int lps)
  62. {
  63. struct { u8 _[CLP_BLK_SIZE]; } *req = data;
  64. u64 ignored;
  65. int cc = 3;
  66. asm volatile (
  67. " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
  68. "0: ipm %[cc]\n"
  69. " srl %[cc],28\n"
  70. "1:\n"
  71. EX_TABLE(0b, 1b)
  72. : [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
  73. : [req] "a" (req), [lps] "i" (lps)
  74. : "cc");
  75. return cc;
  76. }
  77. static void *clp_alloc_block(gfp_t gfp_mask)
  78. {
  79. return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
  80. }
  81. static void clp_free_block(void *ptr)
  82. {
  83. free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
  84. }
  85. static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
  86. struct clp_rsp_query_pci_grp *response)
  87. {
  88. zdev->tlb_refresh = response->refresh;
  89. zdev->dma_mask = response->dasm;
  90. zdev->msi_addr = response->msia;
  91. zdev->max_msi = response->noi;
  92. zdev->fmb_update = response->mui;
  93. zdev->version = response->version;
  94. zdev->maxstbl = response->maxstbl;
  95. zdev->dtsm = response->dtsm;
  96. switch (response->version) {
  97. case 1:
  98. zdev->max_bus_speed = PCIE_SPEED_5_0GT;
  99. break;
  100. default:
  101. zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
  102. break;
  103. }
  104. }
  105. static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
  106. {
  107. struct clp_req_rsp_query_pci_grp *rrb;
  108. int rc;
  109. rrb = clp_alloc_block(GFP_KERNEL);
  110. if (!rrb)
  111. return -ENOMEM;
  112. memset(rrb, 0, sizeof(*rrb));
  113. rrb->request.hdr.len = sizeof(rrb->request);
  114. rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
  115. rrb->response.hdr.len = sizeof(rrb->response);
  116. rrb->request.pfgid = pfgid;
  117. rc = clp_req(rrb, CLP_LPS_PCI);
  118. if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
  119. clp_store_query_pci_fngrp(zdev, &rrb->response);
  120. else {
  121. zpci_err("Q PCI FGRP:\n");
  122. zpci_err_clp(rrb->response.hdr.rsp, rc);
  123. rc = -EIO;
  124. }
  125. clp_free_block(rrb);
  126. return rc;
  127. }
  128. static int clp_store_query_pci_fn(struct zpci_dev *zdev,
  129. struct clp_rsp_query_pci *response)
  130. {
  131. int i;
  132. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  133. zdev->bars[i].val = le32_to_cpu(response->bar[i]);
  134. zdev->bars[i].size = response->bar_size[i];
  135. }
  136. zdev->start_dma = response->sdma;
  137. zdev->end_dma = response->edma;
  138. zdev->pchid = response->pchid;
  139. zdev->pfgid = response->pfgid;
  140. zdev->pft = response->pft;
  141. zdev->vfn = response->vfn;
  142. zdev->port = response->port;
  143. zdev->uid = response->uid;
  144. zdev->fmb_length = sizeof(u32) * response->fmb_len;
  145. zdev->rid_available = response->rid_avail;
  146. zdev->is_physfn = response->is_physfn;
  147. if (!s390_pci_no_rid && zdev->rid_available)
  148. zdev->devfn = response->rid & ZPCI_RID_MASK_DEVFN;
  149. memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
  150. if (response->util_str_avail) {
  151. memcpy(zdev->util_str, response->util_str,
  152. sizeof(zdev->util_str));
  153. zdev->util_str_avail = 1;
  154. }
  155. zdev->mio_capable = response->mio_addr_avail;
  156. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  157. if (!(response->mio.valid & (1 << (PCI_STD_NUM_BARS - i - 1))))
  158. continue;
  159. zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
  160. zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
  161. }
  162. return 0;
  163. }
  164. int clp_query_pci_fn(struct zpci_dev *zdev)
  165. {
  166. struct clp_req_rsp_query_pci *rrb;
  167. int rc;
  168. rrb = clp_alloc_block(GFP_KERNEL);
  169. if (!rrb)
  170. return -ENOMEM;
  171. memset(rrb, 0, sizeof(*rrb));
  172. rrb->request.hdr.len = sizeof(rrb->request);
  173. rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
  174. rrb->response.hdr.len = sizeof(rrb->response);
  175. rrb->request.fh = zdev->fh;
  176. rc = clp_req(rrb, CLP_LPS_PCI);
  177. if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
  178. rc = clp_store_query_pci_fn(zdev, &rrb->response);
  179. if (rc)
  180. goto out;
  181. rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
  182. } else {
  183. zpci_err("Q PCI FN:\n");
  184. zpci_err_clp(rrb->response.hdr.rsp, rc);
  185. rc = -EIO;
  186. }
  187. out:
  188. clp_free_block(rrb);
  189. return rc;
  190. }
  191. /**
  192. * clp_set_pci_fn() - Execute a command on a PCI function
  193. * @zdev: Function that will be affected
  194. * @fh: Out parameter for updated function handle
  195. * @nr_dma_as: DMA address space number
  196. * @command: The command code to execute
  197. *
  198. * Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
  199. * > 0 for non-success platform responses
  200. */
  201. static int clp_set_pci_fn(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as, u8 command)
  202. {
  203. struct clp_req_rsp_set_pci *rrb;
  204. int rc, retries = 100;
  205. u32 gisa = 0;
  206. *fh = 0;
  207. rrb = clp_alloc_block(GFP_KERNEL);
  208. if (!rrb)
  209. return -ENOMEM;
  210. if (command != CLP_SET_DISABLE_PCI_FN)
  211. gisa = zdev->gisa;
  212. do {
  213. memset(rrb, 0, sizeof(*rrb));
  214. rrb->request.hdr.len = sizeof(rrb->request);
  215. rrb->request.hdr.cmd = CLP_SET_PCI_FN;
  216. rrb->response.hdr.len = sizeof(rrb->response);
  217. rrb->request.fh = zdev->fh;
  218. rrb->request.oc = command;
  219. rrb->request.ndas = nr_dma_as;
  220. rrb->request.gisa = gisa;
  221. rc = clp_req(rrb, CLP_LPS_PCI);
  222. if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
  223. retries--;
  224. if (retries < 0)
  225. break;
  226. msleep(20);
  227. }
  228. } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
  229. if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
  230. *fh = rrb->response.fh;
  231. } else {
  232. zpci_err("Set PCI FN:\n");
  233. zpci_err_clp(rrb->response.hdr.rsp, rc);
  234. if (!rc)
  235. rc = rrb->response.hdr.rsp;
  236. }
  237. clp_free_block(rrb);
  238. return rc;
  239. }
  240. int clp_setup_writeback_mio(void)
  241. {
  242. struct clp_req_rsp_slpc_pci *rrb;
  243. u8 wb_bit_pos;
  244. int rc;
  245. rrb = clp_alloc_block(GFP_KERNEL);
  246. if (!rrb)
  247. return -ENOMEM;
  248. memset(rrb, 0, sizeof(*rrb));
  249. rrb->request.hdr.len = sizeof(rrb->request);
  250. rrb->request.hdr.cmd = CLP_SLPC;
  251. rrb->response.hdr.len = sizeof(rrb->response);
  252. rc = clp_req(rrb, CLP_LPS_PCI);
  253. if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
  254. if (rrb->response.vwb) {
  255. wb_bit_pos = rrb->response.mio_wb;
  256. set_bit_inv(wb_bit_pos, &mio_wb_bit_mask);
  257. zpci_dbg(3, "wb bit: %d\n", wb_bit_pos);
  258. } else {
  259. zpci_dbg(3, "wb bit: n.a.\n");
  260. }
  261. } else {
  262. zpci_err("SLPC PCI:\n");
  263. zpci_err_clp(rrb->response.hdr.rsp, rc);
  264. rc = -EIO;
  265. }
  266. clp_free_block(rrb);
  267. return rc;
  268. }
  269. int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as)
  270. {
  271. int rc;
  272. rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
  273. zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
  274. if (!rc && zpci_use_mio(zdev)) {
  275. rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_MIO);
  276. zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
  277. zdev->fid, *fh, rc);
  278. if (rc)
  279. clp_disable_fh(zdev, fh);
  280. }
  281. return rc;
  282. }
  283. int clp_disable_fh(struct zpci_dev *zdev, u32 *fh)
  284. {
  285. int rc;
  286. if (!zdev_enabled(zdev))
  287. return 0;
  288. rc = clp_set_pci_fn(zdev, fh, 0, CLP_SET_DISABLE_PCI_FN);
  289. zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
  290. return rc;
  291. }
  292. static int clp_list_pci_req(struct clp_req_rsp_list_pci *rrb,
  293. u64 *resume_token, int *nentries)
  294. {
  295. int rc;
  296. memset(rrb, 0, sizeof(*rrb));
  297. rrb->request.hdr.len = sizeof(rrb->request);
  298. rrb->request.hdr.cmd = CLP_LIST_PCI;
  299. /* store as many entries as possible */
  300. rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
  301. rrb->request.resume_token = *resume_token;
  302. /* Get PCI function handle list */
  303. rc = clp_req(rrb, CLP_LPS_PCI);
  304. if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
  305. zpci_err("List PCI FN:\n");
  306. zpci_err_clp(rrb->response.hdr.rsp, rc);
  307. return -EIO;
  308. }
  309. update_uid_checking(rrb->response.uid_checking);
  310. WARN_ON_ONCE(rrb->response.entry_size !=
  311. sizeof(struct clp_fh_list_entry));
  312. *nentries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
  313. rrb->response.entry_size;
  314. *resume_token = rrb->response.resume_token;
  315. return rc;
  316. }
  317. static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
  318. void (*cb)(struct clp_fh_list_entry *, void *))
  319. {
  320. u64 resume_token = 0;
  321. int nentries, i, rc;
  322. do {
  323. rc = clp_list_pci_req(rrb, &resume_token, &nentries);
  324. if (rc)
  325. return rc;
  326. for (i = 0; i < nentries; i++)
  327. cb(&rrb->response.fh_list[i], data);
  328. } while (resume_token);
  329. return rc;
  330. }
  331. static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
  332. struct clp_fh_list_entry *entry)
  333. {
  334. struct clp_fh_list_entry *fh_list;
  335. u64 resume_token = 0;
  336. int nentries, i, rc;
  337. do {
  338. rc = clp_list_pci_req(rrb, &resume_token, &nentries);
  339. if (rc)
  340. return rc;
  341. fh_list = rrb->response.fh_list;
  342. for (i = 0; i < nentries; i++) {
  343. if (fh_list[i].fid == fid) {
  344. *entry = fh_list[i];
  345. return 0;
  346. }
  347. }
  348. } while (resume_token);
  349. return -ENODEV;
  350. }
  351. static void __clp_add(struct clp_fh_list_entry *entry, void *data)
  352. {
  353. struct zpci_dev *zdev;
  354. if (!entry->vendor_id)
  355. return;
  356. zdev = get_zdev_by_fid(entry->fid);
  357. if (zdev) {
  358. zpci_zdev_put(zdev);
  359. return;
  360. }
  361. zpci_create_device(entry->fid, entry->fh, entry->config_state);
  362. }
  363. int clp_scan_pci_devices(void)
  364. {
  365. struct clp_req_rsp_list_pci *rrb;
  366. int rc;
  367. rrb = clp_alloc_block(GFP_KERNEL);
  368. if (!rrb)
  369. return -ENOMEM;
  370. rc = clp_list_pci(rrb, NULL, __clp_add);
  371. clp_free_block(rrb);
  372. return rc;
  373. }
  374. /*
  375. * Get the current function handle of the function matching @fid
  376. */
  377. int clp_refresh_fh(u32 fid, u32 *fh)
  378. {
  379. struct clp_req_rsp_list_pci *rrb;
  380. struct clp_fh_list_entry entry;
  381. int rc;
  382. rrb = clp_alloc_block(GFP_NOWAIT);
  383. if (!rrb)
  384. return -ENOMEM;
  385. rc = clp_find_pci(rrb, fid, &entry);
  386. if (!rc)
  387. *fh = entry.fh;
  388. clp_free_block(rrb);
  389. return rc;
  390. }
  391. int clp_get_state(u32 fid, enum zpci_state *state)
  392. {
  393. struct clp_req_rsp_list_pci *rrb;
  394. struct clp_fh_list_entry entry;
  395. int rc;
  396. rrb = clp_alloc_block(GFP_ATOMIC);
  397. if (!rrb)
  398. return -ENOMEM;
  399. rc = clp_find_pci(rrb, fid, &entry);
  400. if (!rc) {
  401. *state = entry.config_state;
  402. } else if (rc == -ENODEV) {
  403. *state = ZPCI_FN_STATE_RESERVED;
  404. rc = 0;
  405. }
  406. clp_free_block(rrb);
  407. return rc;
  408. }
  409. static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
  410. {
  411. unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
  412. if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
  413. lpcb->response.hdr.len > limit)
  414. return -EINVAL;
  415. return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
  416. }
  417. static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
  418. {
  419. switch (lpcb->cmd) {
  420. case 0x0001: /* store logical-processor characteristics */
  421. return clp_base_slpc(req, (void *) lpcb);
  422. default:
  423. return -EINVAL;
  424. }
  425. }
  426. static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc_pci *lpcb)
  427. {
  428. unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
  429. if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
  430. lpcb->response.hdr.len > limit)
  431. return -EINVAL;
  432. return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
  433. }
  434. static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
  435. {
  436. unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
  437. if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
  438. lpcb->response.hdr.len > limit)
  439. return -EINVAL;
  440. if (lpcb->request.reserved2 != 0)
  441. return -EINVAL;
  442. return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
  443. }
  444. static int clp_pci_query(struct clp_req *req,
  445. struct clp_req_rsp_query_pci *lpcb)
  446. {
  447. unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
  448. if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
  449. lpcb->response.hdr.len > limit)
  450. return -EINVAL;
  451. if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
  452. return -EINVAL;
  453. return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
  454. }
  455. static int clp_pci_query_grp(struct clp_req *req,
  456. struct clp_req_rsp_query_pci_grp *lpcb)
  457. {
  458. unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
  459. if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
  460. lpcb->response.hdr.len > limit)
  461. return -EINVAL;
  462. if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
  463. lpcb->request.reserved4 != 0)
  464. return -EINVAL;
  465. return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
  466. }
  467. static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
  468. {
  469. switch (lpcb->cmd) {
  470. case 0x0001: /* store logical-processor characteristics */
  471. return clp_pci_slpc(req, (void *) lpcb);
  472. case 0x0002: /* list PCI functions */
  473. return clp_pci_list(req, (void *) lpcb);
  474. case 0x0003: /* query PCI function */
  475. return clp_pci_query(req, (void *) lpcb);
  476. case 0x0004: /* query PCI function group */
  477. return clp_pci_query_grp(req, (void *) lpcb);
  478. default:
  479. return -EINVAL;
  480. }
  481. }
  482. static int clp_normal_command(struct clp_req *req)
  483. {
  484. struct clp_req_hdr *lpcb;
  485. void __user *uptr;
  486. int rc;
  487. rc = -EINVAL;
  488. if (req->lps != 0 && req->lps != 2)
  489. goto out;
  490. rc = -ENOMEM;
  491. lpcb = clp_alloc_block(GFP_KERNEL);
  492. if (!lpcb)
  493. goto out;
  494. rc = -EFAULT;
  495. uptr = (void __force __user *)(unsigned long) req->data_p;
  496. if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
  497. goto out_free;
  498. rc = -EINVAL;
  499. if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
  500. goto out_free;
  501. switch (req->lps) {
  502. case 0:
  503. rc = clp_base_command(req, lpcb);
  504. break;
  505. case 2:
  506. rc = clp_pci_command(req, lpcb);
  507. break;
  508. }
  509. if (rc)
  510. goto out_free;
  511. rc = -EFAULT;
  512. if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
  513. goto out_free;
  514. rc = 0;
  515. out_free:
  516. clp_free_block(lpcb);
  517. out:
  518. return rc;
  519. }
  520. static int clp_immediate_command(struct clp_req *req)
  521. {
  522. void __user *uptr;
  523. unsigned long ilp;
  524. int exists;
  525. if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
  526. return -EINVAL;
  527. uptr = (void __force __user *)(unsigned long) req->data_p;
  528. if (req->cmd == 0) {
  529. /* Command code 0: test for a specific processor */
  530. exists = test_bit_inv(req->lps, &ilp);
  531. return put_user(exists, (int __user *) uptr);
  532. }
  533. /* Command code 1: return bit mask of installed processors */
  534. return put_user(ilp, (unsigned long __user *) uptr);
  535. }
  536. static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
  537. unsigned long arg)
  538. {
  539. struct clp_req req;
  540. void __user *argp;
  541. if (cmd != CLP_SYNC)
  542. return -EINVAL;
  543. argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
  544. if (copy_from_user(&req, argp, sizeof(req)))
  545. return -EFAULT;
  546. if (req.r != 0)
  547. return -EINVAL;
  548. return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
  549. }
  550. static int clp_misc_release(struct inode *inode, struct file *filp)
  551. {
  552. return 0;
  553. }
  554. static const struct file_operations clp_misc_fops = {
  555. .owner = THIS_MODULE,
  556. .open = nonseekable_open,
  557. .release = clp_misc_release,
  558. .unlocked_ioctl = clp_misc_ioctl,
  559. .compat_ioctl = clp_misc_ioctl,
  560. .llseek = no_llseek,
  561. };
  562. static struct miscdevice clp_misc_device = {
  563. .minor = MISC_DYNAMIC_MINOR,
  564. .name = "clp",
  565. .fops = &clp_misc_fops,
  566. };
  567. static int __init clp_misc_init(void)
  568. {
  569. return misc_register(&clp_misc_device);
  570. }
  571. device_initcall(clp_misc_init);