sysfs.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * sysfs.c - ACPI sysfs interface to userspace.
  4. */
  5. #define pr_fmt(fmt) "ACPI: " fmt
  6. #include <linux/acpi.h>
  7. #include <linux/bitmap.h>
  8. #include <linux/init.h>
  9. #include <linux/kernel.h>
  10. #include <linux/moduleparam.h>
  11. #include "internal.h"
  12. #ifdef CONFIG_ACPI_DEBUG
  13. /*
  14. * ACPI debug sysfs I/F, including:
  15. * /sys/modules/acpi/parameters/debug_layer
  16. * /sys/modules/acpi/parameters/debug_level
  17. * /sys/modules/acpi/parameters/trace_method_name
  18. * /sys/modules/acpi/parameters/trace_state
  19. * /sys/modules/acpi/parameters/trace_debug_layer
  20. * /sys/modules/acpi/parameters/trace_debug_level
  21. */
  22. struct acpi_dlayer {
  23. const char *name;
  24. unsigned long value;
  25. };
  26. struct acpi_dlevel {
  27. const char *name;
  28. unsigned long value;
  29. };
  30. #define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
  31. static const struct acpi_dlayer acpi_debug_layers[] = {
  32. ACPI_DEBUG_INIT(ACPI_UTILITIES),
  33. ACPI_DEBUG_INIT(ACPI_HARDWARE),
  34. ACPI_DEBUG_INIT(ACPI_EVENTS),
  35. ACPI_DEBUG_INIT(ACPI_TABLES),
  36. ACPI_DEBUG_INIT(ACPI_NAMESPACE),
  37. ACPI_DEBUG_INIT(ACPI_PARSER),
  38. ACPI_DEBUG_INIT(ACPI_DISPATCHER),
  39. ACPI_DEBUG_INIT(ACPI_EXECUTER),
  40. ACPI_DEBUG_INIT(ACPI_RESOURCES),
  41. ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
  42. ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
  43. ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
  44. ACPI_DEBUG_INIT(ACPI_COMPILER),
  45. ACPI_DEBUG_INIT(ACPI_TOOLS),
  46. };
  47. static const struct acpi_dlevel acpi_debug_levels[] = {
  48. ACPI_DEBUG_INIT(ACPI_LV_INIT),
  49. ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
  50. ACPI_DEBUG_INIT(ACPI_LV_INFO),
  51. ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
  52. ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
  53. ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
  54. ACPI_DEBUG_INIT(ACPI_LV_PARSE),
  55. ACPI_DEBUG_INIT(ACPI_LV_LOAD),
  56. ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
  57. ACPI_DEBUG_INIT(ACPI_LV_EXEC),
  58. ACPI_DEBUG_INIT(ACPI_LV_NAMES),
  59. ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
  60. ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
  61. ACPI_DEBUG_INIT(ACPI_LV_TABLES),
  62. ACPI_DEBUG_INIT(ACPI_LV_VALUES),
  63. ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
  64. ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
  65. ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
  66. ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
  67. ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
  68. ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
  69. ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
  70. ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
  71. ACPI_DEBUG_INIT(ACPI_LV_THREADS),
  72. ACPI_DEBUG_INIT(ACPI_LV_IO),
  73. ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
  74. ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
  75. ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
  76. ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
  77. ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
  78. };
  79. static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
  80. {
  81. int result = 0;
  82. int i;
  83. result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
  84. for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
  85. result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
  86. acpi_debug_layers[i].name,
  87. acpi_debug_layers[i].value,
  88. (acpi_dbg_layer & acpi_debug_layers[i].value)
  89. ? '*' : ' ');
  90. }
  91. result +=
  92. sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
  93. ACPI_ALL_DRIVERS,
  94. (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
  95. ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
  96. == 0 ? ' ' : '-');
  97. result +=
  98. sprintf(buffer + result,
  99. "--\ndebug_layer = 0x%08X ( * = enabled)\n",
  100. acpi_dbg_layer);
  101. return result;
  102. }
  103. static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
  104. {
  105. int result = 0;
  106. int i;
  107. result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
  108. for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
  109. result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
  110. acpi_debug_levels[i].name,
  111. acpi_debug_levels[i].value,
  112. (acpi_dbg_level & acpi_debug_levels[i].value)
  113. ? '*' : ' ');
  114. }
  115. result +=
  116. sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
  117. acpi_dbg_level);
  118. return result;
  119. }
  120. static const struct kernel_param_ops param_ops_debug_layer = {
  121. .set = param_set_uint,
  122. .get = param_get_debug_layer,
  123. };
  124. static const struct kernel_param_ops param_ops_debug_level = {
  125. .set = param_set_uint,
  126. .get = param_get_debug_level,
  127. };
  128. module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
  129. module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
  130. static char trace_method_name[1024];
  131. static int param_set_trace_method_name(const char *val,
  132. const struct kernel_param *kp)
  133. {
  134. u32 saved_flags = 0;
  135. bool is_abs_path = true;
  136. if (*val != '\\')
  137. is_abs_path = false;
  138. if ((is_abs_path && strlen(val) > 1023) ||
  139. (!is_abs_path && strlen(val) > 1022)) {
  140. pr_err("%s: string parameter too long\n", kp->name);
  141. return -ENOSPC;
  142. }
  143. /*
  144. * It's not safe to update acpi_gbl_trace_method_name without
  145. * having the tracer stopped, so we save the original tracer
  146. * state and disable it.
  147. */
  148. saved_flags = acpi_gbl_trace_flags;
  149. (void)acpi_debug_trace(NULL,
  150. acpi_gbl_trace_dbg_level,
  151. acpi_gbl_trace_dbg_layer,
  152. 0);
  153. /* This is a hack. We can't kmalloc in early boot. */
  154. if (is_abs_path)
  155. strcpy(trace_method_name, val);
  156. else {
  157. trace_method_name[0] = '\\';
  158. strcpy(trace_method_name+1, val);
  159. }
  160. /* Restore the original tracer state */
  161. (void)acpi_debug_trace(trace_method_name,
  162. acpi_gbl_trace_dbg_level,
  163. acpi_gbl_trace_dbg_layer,
  164. saved_flags);
  165. return 0;
  166. }
  167. static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
  168. {
  169. return scnprintf(buffer, PAGE_SIZE, "%s\n", acpi_gbl_trace_method_name);
  170. }
  171. static const struct kernel_param_ops param_ops_trace_method = {
  172. .set = param_set_trace_method_name,
  173. .get = param_get_trace_method_name,
  174. };
  175. static const struct kernel_param_ops param_ops_trace_attrib = {
  176. .set = param_set_uint,
  177. .get = param_get_uint,
  178. };
  179. module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
  180. module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
  181. module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
  182. static int param_set_trace_state(const char *val,
  183. const struct kernel_param *kp)
  184. {
  185. acpi_status status;
  186. const char *method = trace_method_name;
  187. u32 flags = 0;
  188. /* So "xxx-once" comparison should go prior than "xxx" comparison */
  189. #define acpi_compare_param(val, key) \
  190. strncmp((val), (key), sizeof(key) - 1)
  191. if (!acpi_compare_param(val, "enable")) {
  192. method = NULL;
  193. flags = ACPI_TRACE_ENABLED;
  194. } else if (!acpi_compare_param(val, "disable"))
  195. method = NULL;
  196. else if (!acpi_compare_param(val, "method-once"))
  197. flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
  198. else if (!acpi_compare_param(val, "method"))
  199. flags = ACPI_TRACE_ENABLED;
  200. else if (!acpi_compare_param(val, "opcode-once"))
  201. flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
  202. else if (!acpi_compare_param(val, "opcode"))
  203. flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
  204. else
  205. return -EINVAL;
  206. status = acpi_debug_trace(method,
  207. acpi_gbl_trace_dbg_level,
  208. acpi_gbl_trace_dbg_layer,
  209. flags);
  210. if (ACPI_FAILURE(status))
  211. return -EBUSY;
  212. return 0;
  213. }
  214. static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
  215. {
  216. if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
  217. return sprintf(buffer, "disable\n");
  218. if (!acpi_gbl_trace_method_name)
  219. return sprintf(buffer, "enable\n");
  220. if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
  221. return sprintf(buffer, "method-once\n");
  222. else
  223. return sprintf(buffer, "method\n");
  224. }
  225. module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
  226. NULL, 0644);
  227. #endif /* CONFIG_ACPI_DEBUG */
  228. /* /sys/modules/acpi/parameters/aml_debug_output */
  229. module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
  230. byte, 0644);
  231. MODULE_PARM_DESC(aml_debug_output,
  232. "To enable/disable the ACPI Debug Object output.");
  233. /* /sys/module/acpi/parameters/acpica_version */
  234. static int param_get_acpica_version(char *buffer,
  235. const struct kernel_param *kp)
  236. {
  237. int result;
  238. result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
  239. return result;
  240. }
  241. module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
  242. /*
  243. * ACPI table sysfs I/F:
  244. * /sys/firmware/acpi/tables/
  245. * /sys/firmware/acpi/tables/data/
  246. * /sys/firmware/acpi/tables/dynamic/
  247. */
  248. static LIST_HEAD(acpi_table_attr_list);
  249. static struct kobject *tables_kobj;
  250. static struct kobject *tables_data_kobj;
  251. static struct kobject *dynamic_tables_kobj;
  252. static struct kobject *hotplug_kobj;
  253. #define ACPI_MAX_TABLE_INSTANCES 999
  254. #define ACPI_INST_SIZE 4 /* including trailing 0 */
  255. struct acpi_table_attr {
  256. struct bin_attribute attr;
  257. char name[ACPI_NAMESEG_SIZE];
  258. int instance;
  259. char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
  260. struct list_head node;
  261. };
  262. struct acpi_data_attr {
  263. struct bin_attribute attr;
  264. u64 addr;
  265. };
  266. static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
  267. struct bin_attribute *bin_attr, char *buf,
  268. loff_t offset, size_t count)
  269. {
  270. struct acpi_table_attr *table_attr =
  271. container_of(bin_attr, struct acpi_table_attr, attr);
  272. struct acpi_table_header *table_header = NULL;
  273. acpi_status status;
  274. ssize_t rc;
  275. status = acpi_get_table(table_attr->name, table_attr->instance,
  276. &table_header);
  277. if (ACPI_FAILURE(status))
  278. return -ENODEV;
  279. rc = memory_read_from_buffer(buf, count, &offset, table_header,
  280. table_header->length);
  281. acpi_put_table(table_header);
  282. return rc;
  283. }
  284. static int acpi_table_attr_init(struct kobject *tables_obj,
  285. struct acpi_table_attr *table_attr,
  286. struct acpi_table_header *table_header)
  287. {
  288. struct acpi_table_header *header = NULL;
  289. struct acpi_table_attr *attr = NULL;
  290. char instance_str[ACPI_INST_SIZE];
  291. sysfs_attr_init(&table_attr->attr.attr);
  292. ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
  293. list_for_each_entry(attr, &acpi_table_attr_list, node) {
  294. if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
  295. if (table_attr->instance < attr->instance)
  296. table_attr->instance = attr->instance;
  297. }
  298. table_attr->instance++;
  299. if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
  300. pr_warn("%4.4s: too many table instances\n", table_attr->name);
  301. return -ERANGE;
  302. }
  303. ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
  304. table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
  305. if (table_attr->instance > 1 || (table_attr->instance == 1 &&
  306. !acpi_get_table
  307. (table_header->signature, 2, &header))) {
  308. snprintf(instance_str, sizeof(instance_str), "%u",
  309. table_attr->instance);
  310. strcat(table_attr->filename, instance_str);
  311. }
  312. table_attr->attr.size = table_header->length;
  313. table_attr->attr.read = acpi_table_show;
  314. table_attr->attr.attr.name = table_attr->filename;
  315. table_attr->attr.attr.mode = 0400;
  316. return sysfs_create_bin_file(tables_obj, &table_attr->attr);
  317. }
  318. acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
  319. {
  320. struct acpi_table_attr *table_attr;
  321. switch (event) {
  322. case ACPI_TABLE_EVENT_INSTALL:
  323. table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
  324. if (!table_attr)
  325. return AE_NO_MEMORY;
  326. if (acpi_table_attr_init(dynamic_tables_kobj,
  327. table_attr, table)) {
  328. kfree(table_attr);
  329. return AE_ERROR;
  330. }
  331. list_add_tail(&table_attr->node, &acpi_table_attr_list);
  332. break;
  333. case ACPI_TABLE_EVENT_LOAD:
  334. case ACPI_TABLE_EVENT_UNLOAD:
  335. case ACPI_TABLE_EVENT_UNINSTALL:
  336. /*
  337. * we do not need to do anything right now
  338. * because the table is not deleted from the
  339. * global table list when unloading it.
  340. */
  341. break;
  342. default:
  343. return AE_BAD_PARAMETER;
  344. }
  345. return AE_OK;
  346. }
  347. static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
  348. struct bin_attribute *bin_attr, char *buf,
  349. loff_t offset, size_t count)
  350. {
  351. struct acpi_data_attr *data_attr;
  352. void __iomem *base;
  353. ssize_t size;
  354. data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
  355. size = data_attr->attr.size;
  356. if (offset < 0)
  357. return -EINVAL;
  358. if (offset >= size)
  359. return 0;
  360. if (count > size - offset)
  361. count = size - offset;
  362. base = acpi_os_map_iomem(data_attr->addr, size);
  363. if (!base)
  364. return -ENOMEM;
  365. memcpy_fromio(buf, base + offset, count);
  366. acpi_os_unmap_iomem(base, size);
  367. return count;
  368. }
  369. static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
  370. {
  371. struct acpi_table_bert *bert = th;
  372. if (bert->header.length < sizeof(struct acpi_table_bert) ||
  373. bert->region_length < sizeof(struct acpi_hest_generic_status)) {
  374. kfree(data_attr);
  375. return -EINVAL;
  376. }
  377. data_attr->addr = bert->address;
  378. data_attr->attr.size = bert->region_length;
  379. data_attr->attr.attr.name = "BERT";
  380. return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
  381. }
  382. static struct acpi_data_obj {
  383. char *name;
  384. int (*fn)(void *, struct acpi_data_attr *);
  385. } acpi_data_objs[] = {
  386. { ACPI_SIG_BERT, acpi_bert_data_init },
  387. };
  388. #define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
  389. static int acpi_table_data_init(struct acpi_table_header *th)
  390. {
  391. struct acpi_data_attr *data_attr;
  392. int i;
  393. for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
  394. if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
  395. data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
  396. if (!data_attr)
  397. return -ENOMEM;
  398. sysfs_attr_init(&data_attr->attr.attr);
  399. data_attr->attr.read = acpi_data_show;
  400. data_attr->attr.attr.mode = 0400;
  401. return acpi_data_objs[i].fn(th, data_attr);
  402. }
  403. }
  404. return 0;
  405. }
  406. static int acpi_tables_sysfs_init(void)
  407. {
  408. struct acpi_table_attr *table_attr;
  409. struct acpi_table_header *table_header = NULL;
  410. int table_index;
  411. acpi_status status;
  412. int ret;
  413. tables_kobj = kobject_create_and_add("tables", acpi_kobj);
  414. if (!tables_kobj)
  415. goto err;
  416. tables_data_kobj = kobject_create_and_add("data", tables_kobj);
  417. if (!tables_data_kobj)
  418. goto err_tables_data;
  419. dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
  420. if (!dynamic_tables_kobj)
  421. goto err_dynamic_tables;
  422. for (table_index = 0;; table_index++) {
  423. status = acpi_get_table_by_index(table_index, &table_header);
  424. if (status == AE_BAD_PARAMETER)
  425. break;
  426. if (ACPI_FAILURE(status))
  427. continue;
  428. table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
  429. if (!table_attr)
  430. return -ENOMEM;
  431. ret = acpi_table_attr_init(tables_kobj,
  432. table_attr, table_header);
  433. if (ret) {
  434. kfree(table_attr);
  435. return ret;
  436. }
  437. list_add_tail(&table_attr->node, &acpi_table_attr_list);
  438. acpi_table_data_init(table_header);
  439. }
  440. kobject_uevent(tables_kobj, KOBJ_ADD);
  441. kobject_uevent(tables_data_kobj, KOBJ_ADD);
  442. kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
  443. return 0;
  444. err_dynamic_tables:
  445. kobject_put(tables_data_kobj);
  446. err_tables_data:
  447. kobject_put(tables_kobj);
  448. err:
  449. return -ENOMEM;
  450. }
  451. /*
  452. * Detailed ACPI IRQ counters:
  453. * /sys/firmware/acpi/interrupts/
  454. */
  455. u32 acpi_irq_handled;
  456. u32 acpi_irq_not_handled;
  457. #define COUNT_GPE 0
  458. #define COUNT_SCI 1 /* acpi_irq_handled */
  459. #define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
  460. #define COUNT_ERROR 3 /* other */
  461. #define NUM_COUNTERS_EXTRA 4
  462. struct event_counter {
  463. u32 count;
  464. u32 flags;
  465. };
  466. static struct event_counter *all_counters;
  467. static u32 num_gpes;
  468. static u32 num_counters;
  469. static struct attribute **all_attrs;
  470. static u32 acpi_gpe_count;
  471. static struct attribute_group interrupt_stats_attr_group = {
  472. .name = "interrupts",
  473. };
  474. static struct kobj_attribute *counter_attrs;
  475. static void delete_gpe_attr_array(void)
  476. {
  477. struct event_counter *tmp = all_counters;
  478. all_counters = NULL;
  479. kfree(tmp);
  480. if (counter_attrs) {
  481. int i;
  482. for (i = 0; i < num_gpes; i++)
  483. kfree(counter_attrs[i].attr.name);
  484. kfree(counter_attrs);
  485. }
  486. kfree(all_attrs);
  487. }
  488. static void gpe_count(u32 gpe_number)
  489. {
  490. acpi_gpe_count++;
  491. if (!all_counters)
  492. return;
  493. if (gpe_number < num_gpes)
  494. all_counters[gpe_number].count++;
  495. else
  496. all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
  497. COUNT_ERROR].count++;
  498. }
  499. static void fixed_event_count(u32 event_number)
  500. {
  501. if (!all_counters)
  502. return;
  503. if (event_number < ACPI_NUM_FIXED_EVENTS)
  504. all_counters[num_gpes + event_number].count++;
  505. else
  506. all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
  507. COUNT_ERROR].count++;
  508. }
  509. static void acpi_global_event_handler(u32 event_type, acpi_handle device,
  510. u32 event_number, void *context)
  511. {
  512. if (event_type == ACPI_EVENT_TYPE_GPE) {
  513. gpe_count(event_number);
  514. pr_debug("GPE event 0x%02x\n", event_number);
  515. } else if (event_type == ACPI_EVENT_TYPE_FIXED) {
  516. fixed_event_count(event_number);
  517. pr_debug("Fixed event 0x%02x\n", event_number);
  518. } else {
  519. pr_debug("Other event 0x%02x\n", event_number);
  520. }
  521. }
  522. static int get_status(u32 index, acpi_event_status *ret,
  523. acpi_handle *handle)
  524. {
  525. acpi_status status;
  526. if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
  527. return -EINVAL;
  528. if (index < num_gpes) {
  529. status = acpi_get_gpe_device(index, handle);
  530. if (ACPI_FAILURE(status)) {
  531. pr_warn("Invalid GPE 0x%x", index);
  532. return -ENXIO;
  533. }
  534. status = acpi_get_gpe_status(*handle, index, ret);
  535. } else {
  536. status = acpi_get_event_status(index - num_gpes, ret);
  537. }
  538. if (ACPI_FAILURE(status))
  539. return -EIO;
  540. return 0;
  541. }
  542. static ssize_t counter_show(struct kobject *kobj,
  543. struct kobj_attribute *attr, char *buf)
  544. {
  545. int index = attr - counter_attrs;
  546. int size;
  547. acpi_handle handle;
  548. acpi_event_status status;
  549. int result = 0;
  550. all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
  551. acpi_irq_handled;
  552. all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
  553. acpi_irq_not_handled;
  554. all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
  555. acpi_gpe_count;
  556. size = sprintf(buf, "%8u", all_counters[index].count);
  557. /* "gpe_all" or "sci" */
  558. if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
  559. goto end;
  560. result = get_status(index, &status, &handle);
  561. if (result)
  562. goto end;
  563. if (status & ACPI_EVENT_FLAG_ENABLE_SET)
  564. size += sprintf(buf + size, " EN");
  565. else
  566. size += sprintf(buf + size, " ");
  567. if (status & ACPI_EVENT_FLAG_STATUS_SET)
  568. size += sprintf(buf + size, " STS");
  569. else
  570. size += sprintf(buf + size, " ");
  571. if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
  572. size += sprintf(buf + size, " invalid ");
  573. else if (status & ACPI_EVENT_FLAG_ENABLED)
  574. size += sprintf(buf + size, " enabled ");
  575. else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
  576. size += sprintf(buf + size, " wake_enabled");
  577. else
  578. size += sprintf(buf + size, " disabled ");
  579. if (status & ACPI_EVENT_FLAG_MASKED)
  580. size += sprintf(buf + size, " masked ");
  581. else
  582. size += sprintf(buf + size, " unmasked");
  583. end:
  584. size += sprintf(buf + size, "\n");
  585. return result ? result : size;
  586. }
  587. /*
  588. * counter_set() sets the specified counter.
  589. * setting the total "sci" file to any value clears all counters.
  590. * enable/disable/clear a gpe/fixed event in user space.
  591. */
  592. static ssize_t counter_set(struct kobject *kobj,
  593. struct kobj_attribute *attr, const char *buf,
  594. size_t size)
  595. {
  596. int index = attr - counter_attrs;
  597. acpi_event_status status;
  598. acpi_handle handle;
  599. int result = 0;
  600. unsigned long tmp;
  601. if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
  602. int i;
  603. for (i = 0; i < num_counters; ++i)
  604. all_counters[i].count = 0;
  605. acpi_gpe_count = 0;
  606. acpi_irq_handled = 0;
  607. acpi_irq_not_handled = 0;
  608. goto end;
  609. }
  610. /* show the event status for both GPEs and Fixed Events */
  611. result = get_status(index, &status, &handle);
  612. if (result)
  613. goto end;
  614. if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
  615. pr_warn("Can not change Invalid GPE/Fixed Event status\n");
  616. return -EINVAL;
  617. }
  618. if (index < num_gpes) {
  619. if (!strcmp(buf, "disable\n") &&
  620. (status & ACPI_EVENT_FLAG_ENABLED))
  621. result = acpi_disable_gpe(handle, index);
  622. else if (!strcmp(buf, "enable\n") &&
  623. !(status & ACPI_EVENT_FLAG_ENABLED))
  624. result = acpi_enable_gpe(handle, index);
  625. else if (!strcmp(buf, "clear\n") &&
  626. (status & ACPI_EVENT_FLAG_STATUS_SET))
  627. result = acpi_clear_gpe(handle, index);
  628. else if (!strcmp(buf, "mask\n"))
  629. result = acpi_mask_gpe(handle, index, TRUE);
  630. else if (!strcmp(buf, "unmask\n"))
  631. result = acpi_mask_gpe(handle, index, FALSE);
  632. else if (!kstrtoul(buf, 0, &tmp))
  633. all_counters[index].count = tmp;
  634. else
  635. result = -EINVAL;
  636. } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
  637. int event = index - num_gpes;
  638. if (!strcmp(buf, "disable\n") &&
  639. (status & ACPI_EVENT_FLAG_ENABLE_SET))
  640. result = acpi_disable_event(event, ACPI_NOT_ISR);
  641. else if (!strcmp(buf, "enable\n") &&
  642. !(status & ACPI_EVENT_FLAG_ENABLE_SET))
  643. result = acpi_enable_event(event, ACPI_NOT_ISR);
  644. else if (!strcmp(buf, "clear\n") &&
  645. (status & ACPI_EVENT_FLAG_STATUS_SET))
  646. result = acpi_clear_event(event);
  647. else if (!kstrtoul(buf, 0, &tmp))
  648. all_counters[index].count = tmp;
  649. else
  650. result = -EINVAL;
  651. } else
  652. all_counters[index].count = strtoul(buf, NULL, 0);
  653. if (ACPI_FAILURE(result))
  654. result = -EINVAL;
  655. end:
  656. return result ? result : size;
  657. }
  658. /*
  659. * A Quirk Mechanism for GPE Flooding Prevention:
  660. *
  661. * Quirks may be needed to prevent GPE flooding on a specific GPE. The
  662. * flooding typically cannot be detected and automatically prevented by
  663. * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
  664. * the AML tables. This normally indicates a feature gap in Linux, thus
  665. * instead of providing endless quirk tables, we provide a boot parameter
  666. * for those who want this quirk. For example, if the users want to prevent
  667. * the GPE flooding for GPE 00, they need to specify the following boot
  668. * parameter:
  669. * acpi_mask_gpe=0x00
  670. * Note, the parameter can be a list (see bitmap_parselist() for the details).
  671. * The masking status can be modified by the following runtime controlling
  672. * interface:
  673. * echo unmask > /sys/firmware/acpi/interrupts/gpe00
  674. */
  675. #define ACPI_MASKABLE_GPE_MAX 0x100
  676. static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
  677. static int __init acpi_gpe_set_masked_gpes(char *val)
  678. {
  679. int ret;
  680. u8 gpe;
  681. ret = kstrtou8(val, 0, &gpe);
  682. if (ret) {
  683. ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
  684. if (ret)
  685. return ret;
  686. } else
  687. set_bit(gpe, acpi_masked_gpes_map);
  688. return 1;
  689. }
  690. __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
  691. void __init acpi_gpe_apply_masked_gpes(void)
  692. {
  693. acpi_handle handle;
  694. acpi_status status;
  695. u16 gpe;
  696. for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
  697. status = acpi_get_gpe_device(gpe, &handle);
  698. if (ACPI_SUCCESS(status)) {
  699. pr_info("Masking GPE 0x%x.\n", gpe);
  700. (void)acpi_mask_gpe(handle, gpe, TRUE);
  701. }
  702. }
  703. }
  704. void acpi_irq_stats_init(void)
  705. {
  706. acpi_status status;
  707. int i;
  708. if (all_counters)
  709. return;
  710. num_gpes = acpi_current_gpe_count;
  711. num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
  712. all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
  713. if (all_attrs == NULL)
  714. return;
  715. all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
  716. if (all_counters == NULL)
  717. goto fail;
  718. status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
  719. if (ACPI_FAILURE(status))
  720. goto fail;
  721. counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
  722. if (counter_attrs == NULL)
  723. goto fail;
  724. for (i = 0; i < num_counters; ++i) {
  725. char buffer[12];
  726. char *name;
  727. if (i < num_gpes)
  728. sprintf(buffer, "gpe%02X", i);
  729. else if (i == num_gpes + ACPI_EVENT_PMTIMER)
  730. sprintf(buffer, "ff_pmtimer");
  731. else if (i == num_gpes + ACPI_EVENT_GLOBAL)
  732. sprintf(buffer, "ff_gbl_lock");
  733. else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
  734. sprintf(buffer, "ff_pwr_btn");
  735. else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
  736. sprintf(buffer, "ff_slp_btn");
  737. else if (i == num_gpes + ACPI_EVENT_RTC)
  738. sprintf(buffer, "ff_rt_clk");
  739. else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
  740. sprintf(buffer, "gpe_all");
  741. else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
  742. sprintf(buffer, "sci");
  743. else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
  744. sprintf(buffer, "sci_not");
  745. else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
  746. sprintf(buffer, "error");
  747. else
  748. sprintf(buffer, "bug%02X", i);
  749. name = kstrdup(buffer, GFP_KERNEL);
  750. if (name == NULL)
  751. goto fail;
  752. sysfs_attr_init(&counter_attrs[i].attr);
  753. counter_attrs[i].attr.name = name;
  754. counter_attrs[i].attr.mode = 0644;
  755. counter_attrs[i].show = counter_show;
  756. counter_attrs[i].store = counter_set;
  757. all_attrs[i] = &counter_attrs[i].attr;
  758. }
  759. interrupt_stats_attr_group.attrs = all_attrs;
  760. if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
  761. return;
  762. fail:
  763. delete_gpe_attr_array();
  764. }
  765. static void __exit interrupt_stats_exit(void)
  766. {
  767. sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
  768. delete_gpe_attr_array();
  769. }
  770. static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  771. {
  772. return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
  773. }
  774. static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
  775. static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  776. {
  777. struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
  778. return sprintf(buf, "%d\n", hotplug->enabled);
  779. }
  780. static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
  781. const char *buf, size_t size)
  782. {
  783. struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
  784. unsigned int val;
  785. if (kstrtouint(buf, 10, &val) || val > 1)
  786. return -EINVAL;
  787. acpi_scan_hotplug_enabled(hotplug, val);
  788. return size;
  789. }
  790. static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
  791. static struct attribute *hotplug_profile_attrs[] = {
  792. &hotplug_enabled_attr.attr,
  793. NULL
  794. };
  795. ATTRIBUTE_GROUPS(hotplug_profile);
  796. static struct kobj_type acpi_hotplug_profile_ktype = {
  797. .sysfs_ops = &kobj_sysfs_ops,
  798. .default_groups = hotplug_profile_groups,
  799. };
  800. void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
  801. const char *name)
  802. {
  803. int error;
  804. if (!hotplug_kobj)
  805. goto err_out;
  806. error = kobject_init_and_add(&hotplug->kobj,
  807. &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
  808. if (error) {
  809. kobject_put(&hotplug->kobj);
  810. goto err_out;
  811. }
  812. kobject_uevent(&hotplug->kobj, KOBJ_ADD);
  813. return;
  814. err_out:
  815. pr_err("Unable to add hotplug profile '%s'\n", name);
  816. }
  817. static ssize_t force_remove_show(struct kobject *kobj,
  818. struct kobj_attribute *attr, char *buf)
  819. {
  820. return sprintf(buf, "%d\n", 0);
  821. }
  822. static ssize_t force_remove_store(struct kobject *kobj,
  823. struct kobj_attribute *attr,
  824. const char *buf, size_t size)
  825. {
  826. bool val;
  827. int ret;
  828. ret = strtobool(buf, &val);
  829. if (ret < 0)
  830. return ret;
  831. if (val) {
  832. pr_err("Enabling force_remove is not supported anymore. Please report to [email protected] if you depend on this functionality\n");
  833. return -EINVAL;
  834. }
  835. return size;
  836. }
  837. static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
  838. int __init acpi_sysfs_init(void)
  839. {
  840. int result;
  841. result = acpi_tables_sysfs_init();
  842. if (result)
  843. return result;
  844. hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
  845. if (!hotplug_kobj)
  846. return -ENOMEM;
  847. result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
  848. if (result)
  849. return result;
  850. result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
  851. return result;
  852. }