irq_ia64.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/arch/ia64/kernel/irq_ia64.c
  4. *
  5. * Copyright (C) 1998-2001 Hewlett-Packard Co
  6. * Stephane Eranian <[email protected]>
  7. * David Mosberger-Tang <[email protected]>
  8. *
  9. * 6/10/99: Updated to bring in sync with x86 version to facilitate
  10. * support for SMP and different interrupt controllers.
  11. *
  12. * 09/15/00 Goutham Rao <[email protected]> Implemented pci_irq_to_vector
  13. * PCI to vector allocation routine.
  14. * 04/14/2004 Ashok Raj <[email protected]>
  15. * Added CPU Hotplug handling for IPF.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/pgtable.h>
  19. #include <linux/jiffies.h>
  20. #include <linux/errno.h>
  21. #include <linux/init.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/ioport.h>
  24. #include <linux/kernel_stat.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/signal.h>
  27. #include <linux/smp.h>
  28. #include <linux/threads.h>
  29. #include <linux/bitops.h>
  30. #include <linux/irq.h>
  31. #include <linux/ratelimit.h>
  32. #include <linux/acpi.h>
  33. #include <linux/sched.h>
  34. #include <asm/delay.h>
  35. #include <asm/intrinsics.h>
  36. #include <asm/io.h>
  37. #include <asm/hw_irq.h>
  38. #include <asm/tlbflush.h>
  39. #define IRQ_DEBUG 0
  40. #define IRQ_VECTOR_UNASSIGNED (0)
  41. #define IRQ_UNUSED (0)
  42. #define IRQ_USED (1)
  43. #define IRQ_RSVD (2)
  44. int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
  45. int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
  46. /* default base addr of IPI table */
  47. void __iomem *ipi_base_addr = ((void __iomem *)
  48. (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
  49. static cpumask_t vector_allocation_domain(int cpu);
  50. /*
  51. * Legacy IRQ to IA-64 vector translation table.
  52. */
  53. __u8 isa_irq_to_vector_map[16] = {
  54. /* 8259 IRQ translation, first 16 entries */
  55. 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
  56. 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
  57. };
  58. EXPORT_SYMBOL(isa_irq_to_vector_map);
  59. DEFINE_SPINLOCK(vector_lock);
  60. struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
  61. [0 ... NR_IRQS - 1] = {
  62. .vector = IRQ_VECTOR_UNASSIGNED,
  63. .domain = CPU_MASK_NONE
  64. }
  65. };
  66. DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
  67. [0 ... IA64_NUM_VECTORS - 1] = -1
  68. };
  69. static cpumask_t vector_table[IA64_NUM_VECTORS] = {
  70. [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
  71. };
  72. static int irq_status[NR_IRQS] = {
  73. [0 ... NR_IRQS -1] = IRQ_UNUSED
  74. };
  75. static inline int find_unassigned_irq(void)
  76. {
  77. int irq;
  78. for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
  79. if (irq_status[irq] == IRQ_UNUSED)
  80. return irq;
  81. return -ENOSPC;
  82. }
  83. static inline int find_unassigned_vector(cpumask_t domain)
  84. {
  85. cpumask_t mask;
  86. int pos, vector;
  87. cpumask_and(&mask, &domain, cpu_online_mask);
  88. if (cpumask_empty(&mask))
  89. return -EINVAL;
  90. for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
  91. vector = IA64_FIRST_DEVICE_VECTOR + pos;
  92. cpumask_and(&mask, &domain, &vector_table[vector]);
  93. if (!cpumask_empty(&mask))
  94. continue;
  95. return vector;
  96. }
  97. return -ENOSPC;
  98. }
  99. static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
  100. {
  101. cpumask_t mask;
  102. int cpu;
  103. struct irq_cfg *cfg = &irq_cfg[irq];
  104. BUG_ON((unsigned)irq >= NR_IRQS);
  105. BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
  106. cpumask_and(&mask, &domain, cpu_online_mask);
  107. if (cpumask_empty(&mask))
  108. return -EINVAL;
  109. if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
  110. return 0;
  111. if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
  112. return -EBUSY;
  113. for_each_cpu(cpu, &mask)
  114. per_cpu(vector_irq, cpu)[vector] = irq;
  115. cfg->vector = vector;
  116. cfg->domain = domain;
  117. irq_status[irq] = IRQ_USED;
  118. cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
  119. return 0;
  120. }
  121. int bind_irq_vector(int irq, int vector, cpumask_t domain)
  122. {
  123. unsigned long flags;
  124. int ret;
  125. spin_lock_irqsave(&vector_lock, flags);
  126. ret = __bind_irq_vector(irq, vector, domain);
  127. spin_unlock_irqrestore(&vector_lock, flags);
  128. return ret;
  129. }
  130. static void __clear_irq_vector(int irq)
  131. {
  132. int vector, cpu;
  133. cpumask_t domain;
  134. struct irq_cfg *cfg = &irq_cfg[irq];
  135. BUG_ON((unsigned)irq >= NR_IRQS);
  136. BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
  137. vector = cfg->vector;
  138. domain = cfg->domain;
  139. for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
  140. per_cpu(vector_irq, cpu)[vector] = -1;
  141. cfg->vector = IRQ_VECTOR_UNASSIGNED;
  142. cfg->domain = CPU_MASK_NONE;
  143. irq_status[irq] = IRQ_UNUSED;
  144. cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
  145. }
  146. static void clear_irq_vector(int irq)
  147. {
  148. unsigned long flags;
  149. spin_lock_irqsave(&vector_lock, flags);
  150. __clear_irq_vector(irq);
  151. spin_unlock_irqrestore(&vector_lock, flags);
  152. }
  153. int
  154. ia64_native_assign_irq_vector (int irq)
  155. {
  156. unsigned long flags;
  157. int vector, cpu;
  158. cpumask_t domain = CPU_MASK_NONE;
  159. vector = -ENOSPC;
  160. spin_lock_irqsave(&vector_lock, flags);
  161. for_each_online_cpu(cpu) {
  162. domain = vector_allocation_domain(cpu);
  163. vector = find_unassigned_vector(domain);
  164. if (vector >= 0)
  165. break;
  166. }
  167. if (vector < 0)
  168. goto out;
  169. if (irq == AUTO_ASSIGN)
  170. irq = vector;
  171. BUG_ON(__bind_irq_vector(irq, vector, domain));
  172. out:
  173. spin_unlock_irqrestore(&vector_lock, flags);
  174. return vector;
  175. }
  176. void
  177. ia64_native_free_irq_vector (int vector)
  178. {
  179. if (vector < IA64_FIRST_DEVICE_VECTOR ||
  180. vector > IA64_LAST_DEVICE_VECTOR)
  181. return;
  182. clear_irq_vector(vector);
  183. }
  184. int
  185. reserve_irq_vector (int vector)
  186. {
  187. if (vector < IA64_FIRST_DEVICE_VECTOR ||
  188. vector > IA64_LAST_DEVICE_VECTOR)
  189. return -EINVAL;
  190. return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
  191. }
  192. /*
  193. * Initialize vector_irq on a new cpu. This function must be called
  194. * with vector_lock held.
  195. */
  196. void __setup_vector_irq(int cpu)
  197. {
  198. int irq, vector;
  199. /* Clear vector_irq */
  200. for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
  201. per_cpu(vector_irq, cpu)[vector] = -1;
  202. /* Mark the inuse vectors */
  203. for (irq = 0; irq < NR_IRQS; ++irq) {
  204. if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
  205. continue;
  206. vector = irq_to_vector(irq);
  207. per_cpu(vector_irq, cpu)[vector] = irq;
  208. }
  209. }
  210. #ifdef CONFIG_SMP
  211. static enum vector_domain_type {
  212. VECTOR_DOMAIN_NONE,
  213. VECTOR_DOMAIN_PERCPU
  214. } vector_domain_type = VECTOR_DOMAIN_NONE;
  215. static cpumask_t vector_allocation_domain(int cpu)
  216. {
  217. if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
  218. return *cpumask_of(cpu);
  219. return CPU_MASK_ALL;
  220. }
  221. static int __irq_prepare_move(int irq, int cpu)
  222. {
  223. struct irq_cfg *cfg = &irq_cfg[irq];
  224. int vector;
  225. cpumask_t domain;
  226. if (cfg->move_in_progress || cfg->move_cleanup_count)
  227. return -EBUSY;
  228. if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
  229. return -EINVAL;
  230. if (cpumask_test_cpu(cpu, &cfg->domain))
  231. return 0;
  232. domain = vector_allocation_domain(cpu);
  233. vector = find_unassigned_vector(domain);
  234. if (vector < 0)
  235. return -ENOSPC;
  236. cfg->move_in_progress = 1;
  237. cfg->old_domain = cfg->domain;
  238. cfg->vector = IRQ_VECTOR_UNASSIGNED;
  239. cfg->domain = CPU_MASK_NONE;
  240. BUG_ON(__bind_irq_vector(irq, vector, domain));
  241. return 0;
  242. }
  243. int irq_prepare_move(int irq, int cpu)
  244. {
  245. unsigned long flags;
  246. int ret;
  247. spin_lock_irqsave(&vector_lock, flags);
  248. ret = __irq_prepare_move(irq, cpu);
  249. spin_unlock_irqrestore(&vector_lock, flags);
  250. return ret;
  251. }
  252. void irq_complete_move(unsigned irq)
  253. {
  254. struct irq_cfg *cfg = &irq_cfg[irq];
  255. cpumask_t cleanup_mask;
  256. int i;
  257. if (likely(!cfg->move_in_progress))
  258. return;
  259. if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
  260. return;
  261. cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
  262. cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
  263. for_each_cpu(i, &cleanup_mask)
  264. ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
  265. cfg->move_in_progress = 0;
  266. }
  267. static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
  268. {
  269. int me = smp_processor_id();
  270. ia64_vector vector;
  271. unsigned long flags;
  272. for (vector = IA64_FIRST_DEVICE_VECTOR;
  273. vector < IA64_LAST_DEVICE_VECTOR; vector++) {
  274. int irq;
  275. struct irq_desc *desc;
  276. struct irq_cfg *cfg;
  277. irq = __this_cpu_read(vector_irq[vector]);
  278. if (irq < 0)
  279. continue;
  280. desc = irq_to_desc(irq);
  281. cfg = irq_cfg + irq;
  282. raw_spin_lock(&desc->lock);
  283. if (!cfg->move_cleanup_count)
  284. goto unlock;
  285. if (!cpumask_test_cpu(me, &cfg->old_domain))
  286. goto unlock;
  287. spin_lock_irqsave(&vector_lock, flags);
  288. __this_cpu_write(vector_irq[vector], -1);
  289. cpumask_clear_cpu(me, &vector_table[vector]);
  290. spin_unlock_irqrestore(&vector_lock, flags);
  291. cfg->move_cleanup_count--;
  292. unlock:
  293. raw_spin_unlock(&desc->lock);
  294. }
  295. return IRQ_HANDLED;
  296. }
  297. static int __init parse_vector_domain(char *arg)
  298. {
  299. if (!arg)
  300. return -EINVAL;
  301. if (!strcmp(arg, "percpu")) {
  302. vector_domain_type = VECTOR_DOMAIN_PERCPU;
  303. no_int_routing = 1;
  304. }
  305. return 0;
  306. }
  307. early_param("vector", parse_vector_domain);
  308. #else
  309. static cpumask_t vector_allocation_domain(int cpu)
  310. {
  311. return CPU_MASK_ALL;
  312. }
  313. #endif
  314. void destroy_and_reserve_irq(unsigned int irq)
  315. {
  316. unsigned long flags;
  317. irq_init_desc(irq);
  318. spin_lock_irqsave(&vector_lock, flags);
  319. __clear_irq_vector(irq);
  320. irq_status[irq] = IRQ_RSVD;
  321. spin_unlock_irqrestore(&vector_lock, flags);
  322. }
  323. /*
  324. * Dynamic irq allocate and deallocation for MSI
  325. */
  326. int create_irq(void)
  327. {
  328. unsigned long flags;
  329. int irq, vector, cpu;
  330. cpumask_t domain = CPU_MASK_NONE;
  331. irq = vector = -ENOSPC;
  332. spin_lock_irqsave(&vector_lock, flags);
  333. for_each_online_cpu(cpu) {
  334. domain = vector_allocation_domain(cpu);
  335. vector = find_unassigned_vector(domain);
  336. if (vector >= 0)
  337. break;
  338. }
  339. if (vector < 0)
  340. goto out;
  341. irq = find_unassigned_irq();
  342. if (irq < 0)
  343. goto out;
  344. BUG_ON(__bind_irq_vector(irq, vector, domain));
  345. out:
  346. spin_unlock_irqrestore(&vector_lock, flags);
  347. if (irq >= 0)
  348. irq_init_desc(irq);
  349. return irq;
  350. }
  351. void destroy_irq(unsigned int irq)
  352. {
  353. irq_init_desc(irq);
  354. clear_irq_vector(irq);
  355. }
  356. #ifdef CONFIG_SMP
  357. # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
  358. # define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
  359. #else
  360. # define IS_RESCHEDULE(vec) (0)
  361. # define IS_LOCAL_TLB_FLUSH(vec) (0)
  362. #endif
  363. /*
  364. * That's where the IVT branches when we get an external
  365. * interrupt. This branches to the correct hardware IRQ handler via
  366. * function ptr.
  367. */
  368. void
  369. ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
  370. {
  371. struct pt_regs *old_regs = set_irq_regs(regs);
  372. unsigned long saved_tpr;
  373. #if IRQ_DEBUG
  374. {
  375. unsigned long bsp, sp;
  376. /*
  377. * Note: if the interrupt happened while executing in
  378. * the context switch routine (ia64_switch_to), we may
  379. * get a spurious stack overflow here. This is
  380. * because the register and the memory stack are not
  381. * switched atomically.
  382. */
  383. bsp = ia64_getreg(_IA64_REG_AR_BSP);
  384. sp = ia64_getreg(_IA64_REG_SP);
  385. if ((sp - bsp) < 1024) {
  386. static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
  387. if (__ratelimit(&ratelimit)) {
  388. printk("ia64_handle_irq: DANGER: less than "
  389. "1KB of free stack space!!\n"
  390. "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
  391. }
  392. }
  393. }
  394. #endif /* IRQ_DEBUG */
  395. /*
  396. * Always set TPR to limit maximum interrupt nesting depth to
  397. * 16 (without this, it would be ~240, which could easily lead
  398. * to kernel stack overflows).
  399. */
  400. irq_enter();
  401. saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
  402. ia64_srlz_d();
  403. while (vector != IA64_SPURIOUS_INT_VECTOR) {
  404. int irq = local_vector_to_irq(vector);
  405. if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
  406. smp_local_flush_tlb();
  407. kstat_incr_irq_this_cpu(irq);
  408. } else if (unlikely(IS_RESCHEDULE(vector))) {
  409. scheduler_ipi();
  410. kstat_incr_irq_this_cpu(irq);
  411. } else {
  412. ia64_setreg(_IA64_REG_CR_TPR, vector);
  413. ia64_srlz_d();
  414. if (unlikely(irq < 0)) {
  415. printk(KERN_ERR "%s: Unexpected interrupt "
  416. "vector %d on CPU %d is not mapped "
  417. "to any IRQ!\n", __func__, vector,
  418. smp_processor_id());
  419. } else
  420. generic_handle_irq(irq);
  421. /*
  422. * Disable interrupts and send EOI:
  423. */
  424. local_irq_disable();
  425. ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
  426. }
  427. ia64_eoi();
  428. vector = ia64_get_ivr();
  429. }
  430. /*
  431. * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
  432. * handler needs to be able to wait for further keyboard interrupts, which can't
  433. * come through until ia64_eoi() has been done.
  434. */
  435. irq_exit();
  436. set_irq_regs(old_regs);
  437. }
  438. #ifdef CONFIG_HOTPLUG_CPU
  439. /*
  440. * This function emulates a interrupt processing when a cpu is about to be
  441. * brought down.
  442. */
  443. void ia64_process_pending_intr(void)
  444. {
  445. ia64_vector vector;
  446. unsigned long saved_tpr;
  447. extern unsigned int vectors_in_migration[NR_IRQS];
  448. vector = ia64_get_ivr();
  449. irq_enter();
  450. saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
  451. ia64_srlz_d();
  452. /*
  453. * Perform normal interrupt style processing
  454. */
  455. while (vector != IA64_SPURIOUS_INT_VECTOR) {
  456. int irq = local_vector_to_irq(vector);
  457. if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
  458. smp_local_flush_tlb();
  459. kstat_incr_irq_this_cpu(irq);
  460. } else if (unlikely(IS_RESCHEDULE(vector))) {
  461. kstat_incr_irq_this_cpu(irq);
  462. } else {
  463. struct pt_regs *old_regs = set_irq_regs(NULL);
  464. ia64_setreg(_IA64_REG_CR_TPR, vector);
  465. ia64_srlz_d();
  466. /*
  467. * Now try calling normal ia64_handle_irq as it would have got called
  468. * from a real intr handler. Try passing null for pt_regs, hopefully
  469. * it will work. I hope it works!.
  470. * Probably could shared code.
  471. */
  472. if (unlikely(irq < 0)) {
  473. printk(KERN_ERR "%s: Unexpected interrupt "
  474. "vector %d on CPU %d not being mapped "
  475. "to any IRQ!!\n", __func__, vector,
  476. smp_processor_id());
  477. } else {
  478. vectors_in_migration[irq]=0;
  479. generic_handle_irq(irq);
  480. }
  481. set_irq_regs(old_regs);
  482. /*
  483. * Disable interrupts and send EOI
  484. */
  485. local_irq_disable();
  486. ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
  487. }
  488. ia64_eoi();
  489. vector = ia64_get_ivr();
  490. }
  491. irq_exit();
  492. }
  493. #endif
  494. #ifdef CONFIG_SMP
  495. static irqreturn_t dummy_handler (int irq, void *dev_id)
  496. {
  497. BUG();
  498. return IRQ_NONE;
  499. }
  500. /*
  501. * KVM uses this interrupt to force a cpu out of guest mode
  502. */
  503. #endif
  504. void
  505. register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
  506. const char *name)
  507. {
  508. unsigned int irq;
  509. irq = vec;
  510. BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
  511. irq_set_status_flags(irq, IRQ_PER_CPU);
  512. irq_set_chip(irq, &irq_type_ia64_lsapic);
  513. if (handler)
  514. if (request_irq(irq, handler, flags, name, NULL))
  515. pr_err("Failed to request irq %u (%s)\n", irq, name);
  516. irq_set_handler(irq, handle_percpu_irq);
  517. }
  518. void __init
  519. ia64_native_register_ipi(void)
  520. {
  521. #ifdef CONFIG_SMP
  522. register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
  523. register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
  524. register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
  525. "tlb_flush");
  526. #endif
  527. }
  528. void __init
  529. init_IRQ (void)
  530. {
  531. acpi_boot_init();
  532. ia64_register_ipi();
  533. register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
  534. #ifdef CONFIG_SMP
  535. if (vector_domain_type != VECTOR_DOMAIN_NONE) {
  536. register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
  537. smp_irq_move_cleanup_interrupt, 0,
  538. "irq_move");
  539. }
  540. #endif
  541. }
  542. void
  543. ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
  544. {
  545. void __iomem *ipi_addr;
  546. unsigned long ipi_data;
  547. unsigned long phys_cpu_id;
  548. phys_cpu_id = cpu_physical_id(cpu);
  549. /*
  550. * cpu number is in 8bit ID and 8bit EID
  551. */
  552. ipi_data = (delivery_mode << 8) | (vector & 0xff);
  553. ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
  554. writeq(ipi_data, ipi_addr);
  555. }