p54pci.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Linux device driver for PCI based Prism54
  4. *
  5. * Copyright (c) 2006, Michael Wu <[email protected]>
  6. * Copyright (c) 2008, Christian Lamparter <[email protected]>
  7. *
  8. * Based on the islsm (softmac prism54) driver, which is:
  9. * Copyright 2004-2006 Jean-Baptiste Note <[email protected]>, et al.
  10. */
  11. #include <linux/pci.h>
  12. #include <linux/slab.h>
  13. #include <linux/firmware.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/delay.h>
  16. #include <linux/completion.h>
  17. #include <linux/module.h>
  18. #include <net/mac80211.h>
  19. #include "p54.h"
  20. #include "lmac.h"
  21. #include "p54pci.h"
  22. MODULE_AUTHOR("Michael Wu <[email protected]>");
  23. MODULE_DESCRIPTION("Prism54 PCI wireless driver");
  24. MODULE_LICENSE("GPL");
  25. MODULE_ALIAS("prism54pci");
  26. MODULE_FIRMWARE("isl3886pci");
  27. static const struct pci_device_id p54p_table[] = {
  28. /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
  29. { PCI_DEVICE(0x1260, 0x3890) },
  30. /* 3COM 3CRWE154G72 Wireless LAN adapter */
  31. { PCI_DEVICE(0x10b7, 0x6001) },
  32. /* Intersil PRISM Indigo Wireless LAN adapter */
  33. { PCI_DEVICE(0x1260, 0x3877) },
  34. /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
  35. { PCI_DEVICE(0x1260, 0x3886) },
  36. /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
  37. { PCI_DEVICE(0x1260, 0xffff) },
  38. { },
  39. };
  40. MODULE_DEVICE_TABLE(pci, p54p_table);
  41. static int p54p_upload_firmware(struct ieee80211_hw *dev)
  42. {
  43. struct p54p_priv *priv = dev->priv;
  44. __le32 reg;
  45. int err;
  46. __le32 *data;
  47. u32 remains, left, device_addr;
  48. P54P_WRITE(int_enable, cpu_to_le32(0));
  49. P54P_READ(int_enable);
  50. udelay(10);
  51. reg = P54P_READ(ctrl_stat);
  52. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
  53. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT);
  54. P54P_WRITE(ctrl_stat, reg);
  55. P54P_READ(ctrl_stat);
  56. udelay(10);
  57. reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
  58. P54P_WRITE(ctrl_stat, reg);
  59. wmb();
  60. udelay(10);
  61. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
  62. P54P_WRITE(ctrl_stat, reg);
  63. wmb();
  64. /* wait for the firmware to reset properly */
  65. mdelay(10);
  66. err = p54_parse_firmware(dev, priv->firmware);
  67. if (err)
  68. return err;
  69. if (priv->common.fw_interface != FW_LM86) {
  70. dev_err(&priv->pdev->dev, "wrong firmware, "
  71. "please get a LM86(PCI) firmware a try again.\n");
  72. return -EINVAL;
  73. }
  74. data = (__le32 *) priv->firmware->data;
  75. remains = priv->firmware->size;
  76. device_addr = ISL38XX_DEV_FIRMWARE_ADDR;
  77. while (remains) {
  78. u32 i = 0;
  79. left = min((u32)0x1000, remains);
  80. P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr));
  81. P54P_READ(int_enable);
  82. device_addr += 0x1000;
  83. while (i < left) {
  84. P54P_WRITE(direct_mem_win[i], *data++);
  85. i += sizeof(u32);
  86. }
  87. remains -= left;
  88. P54P_READ(int_enable);
  89. }
  90. reg = P54P_READ(ctrl_stat);
  91. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN);
  92. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
  93. reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT);
  94. P54P_WRITE(ctrl_stat, reg);
  95. P54P_READ(ctrl_stat);
  96. udelay(10);
  97. reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
  98. P54P_WRITE(ctrl_stat, reg);
  99. wmb();
  100. udelay(10);
  101. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
  102. P54P_WRITE(ctrl_stat, reg);
  103. wmb();
  104. udelay(10);
  105. /* wait for the firmware to boot properly */
  106. mdelay(100);
  107. return 0;
  108. }
  109. static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
  110. int ring_index, struct p54p_desc *ring, u32 ring_limit,
  111. struct sk_buff **rx_buf, u32 index)
  112. {
  113. struct p54p_priv *priv = dev->priv;
  114. struct p54p_ring_control *ring_control = priv->ring_control;
  115. u32 limit, idx, i;
  116. idx = le32_to_cpu(ring_control->host_idx[ring_index]);
  117. limit = idx;
  118. limit -= index;
  119. limit = ring_limit - limit;
  120. i = idx % ring_limit;
  121. while (limit-- > 1) {
  122. struct p54p_desc *desc = &ring[i];
  123. if (!desc->host_addr) {
  124. struct sk_buff *skb;
  125. dma_addr_t mapping;
  126. skb = dev_alloc_skb(priv->common.rx_mtu + 32);
  127. if (!skb)
  128. break;
  129. mapping = dma_map_single(&priv->pdev->dev,
  130. skb_tail_pointer(skb),
  131. priv->common.rx_mtu + 32,
  132. DMA_FROM_DEVICE);
  133. if (dma_mapping_error(&priv->pdev->dev, mapping)) {
  134. dev_kfree_skb_any(skb);
  135. dev_err(&priv->pdev->dev,
  136. "RX DMA Mapping error\n");
  137. break;
  138. }
  139. desc->host_addr = cpu_to_le32(mapping);
  140. desc->device_addr = 0; // FIXME: necessary?
  141. desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
  142. desc->flags = 0;
  143. rx_buf[i] = skb;
  144. }
  145. i++;
  146. idx++;
  147. i %= ring_limit;
  148. }
  149. wmb();
  150. ring_control->host_idx[ring_index] = cpu_to_le32(idx);
  151. }
  152. static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
  153. int ring_index, struct p54p_desc *ring, u32 ring_limit,
  154. struct sk_buff **rx_buf)
  155. {
  156. struct p54p_priv *priv = dev->priv;
  157. struct p54p_ring_control *ring_control = priv->ring_control;
  158. struct p54p_desc *desc;
  159. u32 idx, i;
  160. i = (*index) % ring_limit;
  161. (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
  162. idx %= ring_limit;
  163. while (i != idx) {
  164. u16 len;
  165. struct sk_buff *skb;
  166. dma_addr_t dma_addr;
  167. desc = &ring[i];
  168. len = le16_to_cpu(desc->len);
  169. skb = rx_buf[i];
  170. if (!skb) {
  171. i++;
  172. i %= ring_limit;
  173. continue;
  174. }
  175. if (unlikely(len > priv->common.rx_mtu)) {
  176. if (net_ratelimit())
  177. dev_err(&priv->pdev->dev, "rx'd frame size "
  178. "exceeds length threshold.\n");
  179. len = priv->common.rx_mtu;
  180. }
  181. dma_addr = le32_to_cpu(desc->host_addr);
  182. dma_sync_single_for_cpu(&priv->pdev->dev, dma_addr,
  183. priv->common.rx_mtu + 32,
  184. DMA_FROM_DEVICE);
  185. skb_put(skb, len);
  186. if (p54_rx(dev, skb)) {
  187. dma_unmap_single(&priv->pdev->dev, dma_addr,
  188. priv->common.rx_mtu + 32,
  189. DMA_FROM_DEVICE);
  190. rx_buf[i] = NULL;
  191. desc->host_addr = cpu_to_le32(0);
  192. } else {
  193. skb_trim(skb, 0);
  194. dma_sync_single_for_device(&priv->pdev->dev, dma_addr,
  195. priv->common.rx_mtu + 32,
  196. DMA_FROM_DEVICE);
  197. desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
  198. }
  199. i++;
  200. i %= ring_limit;
  201. }
  202. p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
  203. }
  204. static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
  205. int ring_index, struct p54p_desc *ring, u32 ring_limit,
  206. struct sk_buff **tx_buf)
  207. {
  208. struct p54p_priv *priv = dev->priv;
  209. struct p54p_ring_control *ring_control = priv->ring_control;
  210. struct p54p_desc *desc;
  211. struct sk_buff *skb;
  212. u32 idx, i;
  213. i = (*index) % ring_limit;
  214. (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
  215. idx %= ring_limit;
  216. while (i != idx) {
  217. desc = &ring[i];
  218. skb = tx_buf[i];
  219. tx_buf[i] = NULL;
  220. dma_unmap_single(&priv->pdev->dev,
  221. le32_to_cpu(desc->host_addr),
  222. le16_to_cpu(desc->len), DMA_TO_DEVICE);
  223. desc->host_addr = 0;
  224. desc->device_addr = 0;
  225. desc->len = 0;
  226. desc->flags = 0;
  227. if (skb && FREE_AFTER_TX(skb))
  228. p54_free_skb(dev, skb);
  229. i++;
  230. i %= ring_limit;
  231. }
  232. }
  233. static void p54p_tasklet(struct tasklet_struct *t)
  234. {
  235. struct p54p_priv *priv = from_tasklet(priv, t, tasklet);
  236. struct ieee80211_hw *dev = pci_get_drvdata(priv->pdev);
  237. struct p54p_ring_control *ring_control = priv->ring_control;
  238. p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
  239. ARRAY_SIZE(ring_control->tx_mgmt),
  240. priv->tx_buf_mgmt);
  241. p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
  242. ARRAY_SIZE(ring_control->tx_data),
  243. priv->tx_buf_data);
  244. p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
  245. ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
  246. p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data,
  247. ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data);
  248. wmb();
  249. P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
  250. }
  251. static irqreturn_t p54p_interrupt(int irq, void *dev_id)
  252. {
  253. struct ieee80211_hw *dev = dev_id;
  254. struct p54p_priv *priv = dev->priv;
  255. __le32 reg;
  256. reg = P54P_READ(int_ident);
  257. if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
  258. goto out;
  259. }
  260. P54P_WRITE(int_ack, reg);
  261. reg &= P54P_READ(int_enable);
  262. if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
  263. tasklet_schedule(&priv->tasklet);
  264. else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
  265. complete(&priv->boot_comp);
  266. out:
  267. return reg ? IRQ_HANDLED : IRQ_NONE;
  268. }
  269. static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
  270. {
  271. unsigned long flags;
  272. struct p54p_priv *priv = dev->priv;
  273. struct p54p_ring_control *ring_control = priv->ring_control;
  274. struct p54p_desc *desc;
  275. dma_addr_t mapping;
  276. u32 idx, i;
  277. __le32 device_addr;
  278. spin_lock_irqsave(&priv->lock, flags);
  279. idx = le32_to_cpu(ring_control->host_idx[1]);
  280. i = idx % ARRAY_SIZE(ring_control->tx_data);
  281. device_addr = ((struct p54_hdr *)skb->data)->req_id;
  282. mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
  283. DMA_TO_DEVICE);
  284. if (dma_mapping_error(&priv->pdev->dev, mapping)) {
  285. spin_unlock_irqrestore(&priv->lock, flags);
  286. p54_free_skb(dev, skb);
  287. dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
  288. return ;
  289. }
  290. priv->tx_buf_data[i] = skb;
  291. desc = &ring_control->tx_data[i];
  292. desc->host_addr = cpu_to_le32(mapping);
  293. desc->device_addr = device_addr;
  294. desc->len = cpu_to_le16(skb->len);
  295. desc->flags = 0;
  296. wmb();
  297. ring_control->host_idx[1] = cpu_to_le32(idx + 1);
  298. spin_unlock_irqrestore(&priv->lock, flags);
  299. P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
  300. P54P_READ(dev_int);
  301. }
  302. static void p54p_stop(struct ieee80211_hw *dev)
  303. {
  304. struct p54p_priv *priv = dev->priv;
  305. struct p54p_ring_control *ring_control = priv->ring_control;
  306. unsigned int i;
  307. struct p54p_desc *desc;
  308. P54P_WRITE(int_enable, cpu_to_le32(0));
  309. P54P_READ(int_enable);
  310. udelay(10);
  311. free_irq(priv->pdev->irq, dev);
  312. tasklet_kill(&priv->tasklet);
  313. P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
  314. for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
  315. desc = &ring_control->rx_data[i];
  316. if (desc->host_addr)
  317. dma_unmap_single(&priv->pdev->dev,
  318. le32_to_cpu(desc->host_addr),
  319. priv->common.rx_mtu + 32,
  320. DMA_FROM_DEVICE);
  321. kfree_skb(priv->rx_buf_data[i]);
  322. priv->rx_buf_data[i] = NULL;
  323. }
  324. for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
  325. desc = &ring_control->rx_mgmt[i];
  326. if (desc->host_addr)
  327. dma_unmap_single(&priv->pdev->dev,
  328. le32_to_cpu(desc->host_addr),
  329. priv->common.rx_mtu + 32,
  330. DMA_FROM_DEVICE);
  331. kfree_skb(priv->rx_buf_mgmt[i]);
  332. priv->rx_buf_mgmt[i] = NULL;
  333. }
  334. for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
  335. desc = &ring_control->tx_data[i];
  336. if (desc->host_addr)
  337. dma_unmap_single(&priv->pdev->dev,
  338. le32_to_cpu(desc->host_addr),
  339. le16_to_cpu(desc->len),
  340. DMA_TO_DEVICE);
  341. p54_free_skb(dev, priv->tx_buf_data[i]);
  342. priv->tx_buf_data[i] = NULL;
  343. }
  344. for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
  345. desc = &ring_control->tx_mgmt[i];
  346. if (desc->host_addr)
  347. dma_unmap_single(&priv->pdev->dev,
  348. le32_to_cpu(desc->host_addr),
  349. le16_to_cpu(desc->len),
  350. DMA_TO_DEVICE);
  351. p54_free_skb(dev, priv->tx_buf_mgmt[i]);
  352. priv->tx_buf_mgmt[i] = NULL;
  353. }
  354. memset(ring_control, 0, sizeof(*ring_control));
  355. }
  356. static int p54p_open(struct ieee80211_hw *dev)
  357. {
  358. struct p54p_priv *priv = dev->priv;
  359. int err;
  360. long timeout;
  361. init_completion(&priv->boot_comp);
  362. err = request_irq(priv->pdev->irq, p54p_interrupt,
  363. IRQF_SHARED, "p54pci", dev);
  364. if (err) {
  365. dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
  366. return err;
  367. }
  368. memset(priv->ring_control, 0, sizeof(*priv->ring_control));
  369. err = p54p_upload_firmware(dev);
  370. if (err) {
  371. free_irq(priv->pdev->irq, dev);
  372. return err;
  373. }
  374. priv->rx_idx_data = priv->tx_idx_data = 0;
  375. priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
  376. p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
  377. ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
  378. p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
  379. ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
  380. P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
  381. P54P_READ(ring_control_base);
  382. wmb();
  383. udelay(10);
  384. P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
  385. P54P_READ(int_enable);
  386. wmb();
  387. udelay(10);
  388. P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
  389. P54P_READ(dev_int);
  390. timeout = wait_for_completion_interruptible_timeout(
  391. &priv->boot_comp, HZ);
  392. if (timeout <= 0) {
  393. wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
  394. p54p_stop(dev);
  395. return timeout ? -ERESTARTSYS : -ETIMEDOUT;
  396. }
  397. P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
  398. P54P_READ(int_enable);
  399. wmb();
  400. udelay(10);
  401. P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
  402. P54P_READ(dev_int);
  403. wmb();
  404. udelay(10);
  405. return 0;
  406. }
  407. static void p54p_firmware_step2(const struct firmware *fw,
  408. void *context)
  409. {
  410. struct p54p_priv *priv = context;
  411. struct ieee80211_hw *dev = priv->common.hw;
  412. struct pci_dev *pdev = priv->pdev;
  413. int err;
  414. if (!fw) {
  415. dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
  416. err = -ENOENT;
  417. goto out;
  418. }
  419. priv->firmware = fw;
  420. err = p54p_open(dev);
  421. if (err)
  422. goto out;
  423. err = p54_read_eeprom(dev);
  424. p54p_stop(dev);
  425. if (err)
  426. goto out;
  427. err = p54_register_common(dev, &pdev->dev);
  428. if (err)
  429. goto out;
  430. out:
  431. complete(&priv->fw_loaded);
  432. if (err) {
  433. struct device *parent = pdev->dev.parent;
  434. if (parent)
  435. device_lock(parent);
  436. /*
  437. * This will indirectly result in a call to p54p_remove.
  438. * Hence, we don't need to bother with freeing any
  439. * allocated ressources at all.
  440. */
  441. device_release_driver(&pdev->dev);
  442. if (parent)
  443. device_unlock(parent);
  444. }
  445. pci_dev_put(pdev);
  446. }
  447. static int p54p_probe(struct pci_dev *pdev,
  448. const struct pci_device_id *id)
  449. {
  450. struct p54p_priv *priv;
  451. struct ieee80211_hw *dev;
  452. unsigned long mem_addr, mem_len;
  453. int err;
  454. pci_dev_get(pdev);
  455. err = pci_enable_device(pdev);
  456. if (err) {
  457. dev_err(&pdev->dev, "Cannot enable new PCI device\n");
  458. goto err_put;
  459. }
  460. mem_addr = pci_resource_start(pdev, 0);
  461. mem_len = pci_resource_len(pdev, 0);
  462. if (mem_len < sizeof(struct p54p_csr)) {
  463. dev_err(&pdev->dev, "Too short PCI resources\n");
  464. err = -ENODEV;
  465. goto err_disable_dev;
  466. }
  467. err = pci_request_regions(pdev, "p54pci");
  468. if (err) {
  469. dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
  470. goto err_disable_dev;
  471. }
  472. err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  473. if (!err)
  474. err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  475. if (err) {
  476. dev_err(&pdev->dev, "No suitable DMA available\n");
  477. goto err_free_reg;
  478. }
  479. pci_set_master(pdev);
  480. pci_try_set_mwi(pdev);
  481. pci_write_config_byte(pdev, 0x40, 0);
  482. pci_write_config_byte(pdev, 0x41, 0);
  483. dev = p54_init_common(sizeof(*priv));
  484. if (!dev) {
  485. dev_err(&pdev->dev, "ieee80211 alloc failed\n");
  486. err = -ENOMEM;
  487. goto err_free_reg;
  488. }
  489. priv = dev->priv;
  490. priv->pdev = pdev;
  491. init_completion(&priv->fw_loaded);
  492. SET_IEEE80211_DEV(dev, &pdev->dev);
  493. pci_set_drvdata(pdev, dev);
  494. priv->map = ioremap(mem_addr, mem_len);
  495. if (!priv->map) {
  496. dev_err(&pdev->dev, "Cannot map device memory\n");
  497. err = -ENOMEM;
  498. goto err_free_dev;
  499. }
  500. priv->ring_control = dma_alloc_coherent(&pdev->dev,
  501. sizeof(*priv->ring_control),
  502. &priv->ring_control_dma, GFP_KERNEL);
  503. if (!priv->ring_control) {
  504. dev_err(&pdev->dev, "Cannot allocate rings\n");
  505. err = -ENOMEM;
  506. goto err_iounmap;
  507. }
  508. priv->common.open = p54p_open;
  509. priv->common.stop = p54p_stop;
  510. priv->common.tx = p54p_tx;
  511. spin_lock_init(&priv->lock);
  512. tasklet_setup(&priv->tasklet, p54p_tasklet);
  513. err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
  514. &priv->pdev->dev, GFP_KERNEL,
  515. priv, p54p_firmware_step2);
  516. if (!err)
  517. return 0;
  518. dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
  519. priv->ring_control, priv->ring_control_dma);
  520. err_iounmap:
  521. iounmap(priv->map);
  522. err_free_dev:
  523. p54_free_common(dev);
  524. err_free_reg:
  525. pci_release_regions(pdev);
  526. err_disable_dev:
  527. pci_disable_device(pdev);
  528. err_put:
  529. pci_dev_put(pdev);
  530. return err;
  531. }
  532. static void p54p_remove(struct pci_dev *pdev)
  533. {
  534. struct ieee80211_hw *dev = pci_get_drvdata(pdev);
  535. struct p54p_priv *priv;
  536. if (!dev)
  537. return;
  538. priv = dev->priv;
  539. wait_for_completion(&priv->fw_loaded);
  540. p54_unregister_common(dev);
  541. release_firmware(priv->firmware);
  542. dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
  543. priv->ring_control, priv->ring_control_dma);
  544. iounmap(priv->map);
  545. pci_release_regions(pdev);
  546. pci_disable_device(pdev);
  547. p54_free_common(dev);
  548. }
  549. #ifdef CONFIG_PM_SLEEP
  550. static int p54p_suspend(struct device *device)
  551. {
  552. struct pci_dev *pdev = to_pci_dev(device);
  553. pci_save_state(pdev);
  554. pci_set_power_state(pdev, PCI_D3hot);
  555. pci_disable_device(pdev);
  556. return 0;
  557. }
  558. static int p54p_resume(struct device *device)
  559. {
  560. struct pci_dev *pdev = to_pci_dev(device);
  561. int err;
  562. err = pci_reenable_device(pdev);
  563. if (err)
  564. return err;
  565. return pci_set_power_state(pdev, PCI_D0);
  566. }
  567. static SIMPLE_DEV_PM_OPS(p54pci_pm_ops, p54p_suspend, p54p_resume);
  568. #define P54P_PM_OPS (&p54pci_pm_ops)
  569. #else
  570. #define P54P_PM_OPS (NULL)
  571. #endif /* CONFIG_PM_SLEEP */
  572. static struct pci_driver p54p_driver = {
  573. .name = "p54pci",
  574. .id_table = p54p_table,
  575. .probe = p54p_probe,
  576. .remove = p54p_remove,
  577. .driver.pm = P54P_PM_OPS,
  578. };
  579. module_pci_driver(p54p_driver);