cpcmd.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * S390 version
  4. * Copyright IBM Corp. 1999, 2007
  5. * Author(s): Martin Schwidefsky ([email protected]),
  6. * Christian Borntraeger ([email protected]),
  7. */
  8. #define KMSG_COMPONENT "cpcmd"
  9. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10. #include <linux/kernel.h>
  11. #include <linux/export.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/stddef.h>
  15. #include <linux/string.h>
  16. #include <linux/mm.h>
  17. #include <asm/diag.h>
  18. #include <asm/ebcdic.h>
  19. #include <asm/cpcmd.h>
  20. #include <asm/io.h>
  21. static DEFINE_SPINLOCK(cpcmd_lock);
  22. static char cpcmd_buf[241];
  23. static int diag8_noresponse(int cmdlen)
  24. {
  25. asm volatile(
  26. " diag %[rx],%[ry],0x8\n"
  27. : [ry] "+&d" (cmdlen)
  28. : [rx] "d" (__pa(cpcmd_buf))
  29. : "cc");
  30. return cmdlen;
  31. }
  32. static int diag8_response(int cmdlen, char *response, int *rlen)
  33. {
  34. union register_pair rx, ry;
  35. int cc;
  36. rx.even = __pa(cpcmd_buf);
  37. rx.odd = __pa(response);
  38. ry.even = cmdlen | 0x40000000L;
  39. ry.odd = *rlen;
  40. asm volatile(
  41. " diag %[rx],%[ry],0x8\n"
  42. " ipm %[cc]\n"
  43. " srl %[cc],28\n"
  44. : [cc] "=&d" (cc), [ry] "+&d" (ry.pair)
  45. : [rx] "d" (rx.pair)
  46. : "cc");
  47. if (cc)
  48. *rlen += ry.odd;
  49. else
  50. *rlen = ry.odd;
  51. return ry.even;
  52. }
  53. /*
  54. * __cpcmd has some restrictions over cpcmd
  55. * - __cpcmd is unlocked and therefore not SMP-safe
  56. */
  57. int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
  58. {
  59. int cmdlen;
  60. int rc;
  61. int response_len;
  62. cmdlen = strlen(cmd);
  63. BUG_ON(cmdlen > 240);
  64. memcpy(cpcmd_buf, cmd, cmdlen);
  65. ASCEBC(cpcmd_buf, cmdlen);
  66. diag_stat_inc(DIAG_STAT_X008);
  67. if (response) {
  68. memset(response, 0, rlen);
  69. response_len = rlen;
  70. rc = diag8_response(cmdlen, response, &rlen);
  71. EBCASC(response, response_len);
  72. } else {
  73. rc = diag8_noresponse(cmdlen);
  74. }
  75. if (response_code)
  76. *response_code = rc;
  77. return rlen;
  78. }
  79. EXPORT_SYMBOL(__cpcmd);
  80. int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
  81. {
  82. unsigned long flags;
  83. char *lowbuf;
  84. int len;
  85. if (is_vmalloc_or_module_addr(response)) {
  86. lowbuf = kmalloc(rlen, GFP_KERNEL);
  87. if (!lowbuf) {
  88. pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
  89. return -ENOMEM;
  90. }
  91. spin_lock_irqsave(&cpcmd_lock, flags);
  92. len = __cpcmd(cmd, lowbuf, rlen, response_code);
  93. spin_unlock_irqrestore(&cpcmd_lock, flags);
  94. memcpy(response, lowbuf, rlen);
  95. kfree(lowbuf);
  96. } else {
  97. spin_lock_irqsave(&cpcmd_lock, flags);
  98. len = __cpcmd(cmd, response, rlen, response_code);
  99. spin_unlock_irqrestore(&cpcmd_lock, flags);
  100. }
  101. return len;
  102. }
  103. EXPORT_SYMBOL(cpcmd);