1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
- *
- */
- #include <linux/delay.h>
- #include <linux/device.h>
- #include <linux/dma-direction.h>
- #include <linux/dma-mapping.h>
- #include <linux/interrupt.h>
- #include <linux/list.h>
- #include <linux/mhi.h>
- #include <linux/module.h>
- #include <linux/skbuff.h>
- #include <linux/slab.h>
- #include "internal.h"
- int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
- void __iomem *base, u32 offset, u32 *out)
- {
- return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
- }
- int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
- void __iomem *base, u32 offset,
- u32 mask, u32 *out)
- {
- u32 tmp;
- int ret;
- ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
- if (ret)
- return ret;
- *out = (tmp & mask) >> __ffs(mask);
- return 0;
- }
- int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
- void __iomem *base, u32 offset,
- u32 mask, u32 val, u32 delayus)
- {
- int ret;
- u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
- while (retry--) {
- ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
- if (ret)
- return ret;
- if (out == val)
- return 0;
- fsleep(delayus);
- }
- return -ETIMEDOUT;
- }
- void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
- u32 offset, u32 val)
- {
- mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
- }
- int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
- void __iomem *base, u32 offset, u32 mask,
- u32 val)
- {
- int ret;
- u32 tmp;
- ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
- if (ret)
- return ret;
- tmp &= ~mask;
- tmp |= (val << __ffs(mask));
- mhi_write_reg(mhi_cntrl, base, offset, tmp);
- return 0;
- }
- void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
- dma_addr_t db_val)
- {
- mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
- mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
- }
- void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
- struct db_cfg *db_cfg,
- void __iomem *db_addr,
- dma_addr_t db_val)
- {
- if (db_cfg->db_mode) {
- db_cfg->db_val = db_val;
- mhi_write_db(mhi_cntrl, db_addr, db_val);
- db_cfg->db_mode = 0;
- }
- }
- void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
- struct db_cfg *db_cfg,
- void __iomem *db_addr,
- dma_addr_t db_val)
- {
- db_cfg->db_val = db_val;
- mhi_write_db(mhi_cntrl, db_addr, db_val);
- }
- void mhi_ring_er_db(struct mhi_event *mhi_event)
- {
- struct mhi_ring *ring = &mhi_event->ring;
- mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
- ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
- }
- void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
- {
- dma_addr_t db;
- struct mhi_ring *ring = &mhi_cmd->ring;
- db = ring->iommu_base + (ring->wp - ring->base);
- *ring->ctxt_wp = cpu_to_le64(db);
- mhi_write_db(mhi_cntrl, ring->db_addr, db);
- }
- void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan)
- {
- struct mhi_ring *ring = &mhi_chan->tre_ring;
- dma_addr_t db;
- db = ring->iommu_base + (ring->wp - ring->base);
- /*
- * Writes to the new ring element must be visible to the hardware
- * before letting h/w know there is new element to fetch.
- */
- dma_wmb();
- *ring->ctxt_wp = cpu_to_le64(db);
- mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
- ring->db_addr, db);
- }
- enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
- {
- u32 exec;
- int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
- return (ret) ? MHI_EE_MAX : exec;
- }
- EXPORT_SYMBOL_GPL(mhi_get_exec_env);
- enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
- {
- u32 state;
- int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
- MHISTATUS_MHISTATE_MASK, &state);
- return ret ? MHI_STATE_MAX : state;
- }
- EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
- void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
- {
- if (mhi_cntrl->reset) {
- mhi_cntrl->reset(mhi_cntrl);
- return;
- }
- /* Generic MHI SoC reset */
- mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
- MHI_SOC_RESET_REQ);
- }
- EXPORT_SYMBOL_GPL(mhi_soc_reset);
- int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
- struct mhi_buf_info *buf_info)
- {
- buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
- buf_info->v_addr, buf_info->len,
- buf_info->dir);
- if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
- return -ENOMEM;
- return 0;
- }
- int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
- struct mhi_buf_info *buf_info)
- {
- void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
- &buf_info->p_addr, GFP_ATOMIC);
- if (!buf)
- return -ENOMEM;
- if (buf_info->dir == DMA_TO_DEVICE)
- memcpy(buf, buf_info->v_addr, buf_info->len);
- buf_info->bb_addr = buf;
- return 0;
- }
- void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
- struct mhi_buf_info *buf_info)
- {
- dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
- buf_info->dir);
- }
- void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
- struct mhi_buf_info *buf_info)
- {
- if (buf_info->dir == DMA_FROM_DEVICE)
- memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
- dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
- buf_info->bb_addr, buf_info->p_addr);
- }
- static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
- struct mhi_ring *ring)
- {
- int nr_el;
- if (ring->wp < ring->rp) {
- nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
- } else {
- nr_el = (ring->rp - ring->base) / ring->el_size;
- nr_el += ((ring->base + ring->len - ring->wp) /
- ring->el_size) - 1;
- }
- return nr_el;
- }
- void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
- {
- return (addr - ring->iommu_base) + ring->base;
- }
- dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr)
- {
- return (addr - ring->base) + ring->iommu_base;
- }
- static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
- struct mhi_ring *ring)
- {
- ring->wp += ring->el_size;
- if (ring->wp >= (ring->base + ring->len))
- ring->wp = ring->base;
- /* smp update */
- smp_wmb();
- }
- static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
- struct mhi_ring *ring)
- {
- ring->rp += ring->el_size;
- if (ring->rp >= (ring->base + ring->len))
- ring->rp = ring->base;
- /* smp update */
- smp_wmb();
- }
- int mhi_destroy_device(struct device *dev, void *data)
- {
- struct mhi_chan *ul_chan, *dl_chan;
- struct mhi_device *mhi_dev;
- struct mhi_controller *mhi_cntrl;
- enum mhi_ee_type ee = MHI_EE_MAX;
- if (dev->bus != &mhi_bus_type)
- return 0;
- mhi_dev = to_mhi_device(dev);
- mhi_cntrl = mhi_dev->mhi_cntrl;
- /* Only destroy virtual devices thats attached to bus */
- if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
- return 0;
- ul_chan = mhi_dev->ul_chan;
- dl_chan = mhi_dev->dl_chan;
- /*
- * If execution environment is specified, remove only those devices that
- * started in them based on ee_mask for the channels as we move on to a
- * different execution environment
- */
- if (data)
- ee = *(enum mhi_ee_type *)data;
- /*
- * For the suspend and resume case, this function will get called
- * without mhi_unregister_controller(). Hence, we need to drop the
- * references to mhi_dev created for ul and dl channels. We can
- * be sure that there will be no instances of mhi_dev left after
- * this.
- */
- if (ul_chan) {
- if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
- return 0;
- put_device(&ul_chan->mhi_dev->dev);
- }
- if (dl_chan) {
- if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
- return 0;
- put_device(&dl_chan->mhi_dev->dev);
- }
- MHI_VERB(dev, "destroy device for chan:%s\n",
- mhi_dev->name);
- /* Notify the client and remove the device from MHI bus */
- device_del(dev);
- put_device(dev);
- return 0;
- }
- int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
- enum dma_data_direction dir)
- {
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
- mhi_dev->ul_chan : mhi_dev->dl_chan;
- struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
- }
- EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
- void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
- {
- struct mhi_driver *mhi_drv;
- if (!mhi_dev->dev.driver)
- return;
- mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
- if (mhi_drv->status_cb)
- mhi_drv->status_cb(mhi_dev, cb_reason);
- }
- EXPORT_SYMBOL_GPL(mhi_notify);
- /* Bind MHI channels to MHI devices */
- void mhi_create_devices(struct mhi_controller *mhi_cntrl)
- {
- struct mhi_chan *mhi_chan;
- struct mhi_device *mhi_dev;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- int i, ret;
- mhi_chan = mhi_cntrl->mhi_chan;
- for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
- if (!mhi_chan->configured || mhi_chan->mhi_dev ||
- !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
- continue;
- mhi_dev = mhi_alloc_device(mhi_cntrl);
- if (IS_ERR(mhi_dev))
- return;
- mhi_dev->dev_type = MHI_DEVICE_XFER;
- switch (mhi_chan->dir) {
- case DMA_TO_DEVICE:
- mhi_dev->ul_chan = mhi_chan;
- mhi_dev->ul_chan_id = mhi_chan->chan;
- mhi_dev->ul_event_id = mhi_chan->er_index;
- break;
- case DMA_NONE:
- __attribute__((__fallthrough__));
- case DMA_BIDIRECTIONAL:
- mhi_dev->ul_chan_id = mhi_chan->chan;
- mhi_dev->ul_event_id = mhi_chan->er_index;
- __attribute__((__fallthrough__));
- case DMA_FROM_DEVICE:
- /* We use dl_chan as offload channels */
- mhi_dev->dl_chan = mhi_chan;
- mhi_dev->dl_chan_id = mhi_chan->chan;
- mhi_dev->dl_event_id = mhi_chan->er_index;
- break;
- default:
- MHI_ERR(dev, "Direction not supported\n");
- put_device(&mhi_dev->dev);
- return;
- }
- get_device(&mhi_dev->dev);
- mhi_chan->mhi_dev = mhi_dev;
- /* Check next channel if it matches */
- if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
- if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
- i++;
- mhi_chan++;
- if (mhi_chan->dir == DMA_TO_DEVICE) {
- mhi_dev->ul_chan = mhi_chan;
- mhi_dev->ul_chan_id = mhi_chan->chan;
- mhi_dev->ul_event_id = mhi_chan->er_index;
- } else {
- mhi_dev->dl_chan = mhi_chan;
- mhi_dev->dl_chan_id = mhi_chan->chan;
- mhi_dev->dl_event_id = mhi_chan->er_index;
- }
- get_device(&mhi_dev->dev);
- mhi_chan->mhi_dev = mhi_dev;
- }
- }
- /* Channel name is same for both UL and DL */
- mhi_dev->name = mhi_chan->name;
- dev_set_name(&mhi_dev->dev, "%s_%s",
- dev_name(&mhi_cntrl->mhi_dev->dev),
- mhi_dev->name);
- /* Init wakeup source if available */
- if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
- device_init_wakeup(&mhi_dev->dev, true);
- ret = device_add(&mhi_dev->dev);
- if (ret)
- put_device(&mhi_dev->dev);
- }
- }
- void mhi_process_sleeping_events(struct mhi_controller *mhi_cntrl)
- {
- struct mhi_event *mhi_event;
- struct mhi_event_ctxt *er_ctxt;
- struct mhi_ring *ev_ring;
- int i;
- mhi_event = mhi_cntrl->mhi_event;
- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
- if (mhi_event->offload_ev || mhi_event->priority !=
- MHI_ER_PRIORITY_HI_SLEEP)
- continue;
- er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
- ev_ring = &mhi_event->ring;
- /* Only proceed if event ring has pending events */
- if (ev_ring->rp == mhi_to_virtual(ev_ring, er_ctxt->rp))
- continue;
- queue_work(mhi_cntrl->hiprio_wq, &mhi_event->work);
- }
- }
- irqreturn_t mhi_irq_handler(int irq_number, void *priv)
- {
- struct mhi_event *mhi_event = priv;
- struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct mhi_event_ctxt *er_ctxt;
- struct mhi_ring *ev_ring = &mhi_event->ring;
- dma_addr_t ptr;
- void *dev_rp;
- /*
- * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
- * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
- * before handling the IRQs.
- */
- if (!mhi_cntrl->mhi_ctxt) {
- dev_dbg(&mhi_cntrl->mhi_dev->dev,
- "mhi_ctxt has been freed\n");
- return IRQ_HANDLED;
- }
- er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
- ptr = le64_to_cpu(er_ctxt->rp);
- if (!is_valid_ring_ptr(ev_ring, ptr)) {
- MHI_ERR(dev,
- "Event ring rp points outside of the event ring\n");
- return IRQ_HANDLED;
- }
- dev_rp = mhi_to_virtual(ev_ring, ptr);
- /* Only proceed if event ring has pending events */
- if (ev_ring->rp == dev_rp)
- return IRQ_HANDLED;
- /* For client managed event ring, notify pending data */
- if (mhi_event->cl_manage) {
- struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
- struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
- if (mhi_dev)
- mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
- return IRQ_HANDLED;
- }
- switch (mhi_event->priority) {
- case MHI_ER_PRIORITY_HI_NOSLEEP:
- tasklet_hi_schedule(&mhi_event->task);
- break;
- case MHI_ER_PRIORITY_DEFAULT_NOSLEEP:
- tasklet_schedule(&mhi_event->task);
- break;
- case MHI_ER_PRIORITY_HI_SLEEP:
- queue_work(mhi_cntrl->hiprio_wq, &mhi_event->work);
- break;
- default:
- MHI_VERB(dev, "skip unknown priority event\n");
- break;
- }
- return IRQ_HANDLED;
- }
- irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
- {
- struct mhi_controller *mhi_cntrl = priv;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- enum mhi_state state;
- enum mhi_pm_state pm_state = 0;
- enum mhi_ee_type ee;
- write_lock_irq(&mhi_cntrl->pm_lock);
- if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- write_unlock_irq(&mhi_cntrl->pm_lock);
- goto exit_intvec;
- }
- state = mhi_get_mhi_state(mhi_cntrl);
- ee = mhi_get_exec_env(mhi_cntrl);
- MHI_VERB(dev, "local ee: %s state: %s device ee: %s state: %s\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee),
- mhi_state_str(mhi_cntrl->dev_state),
- TO_MHI_EXEC_STR(ee), mhi_state_str(state));
- if (state == MHI_STATE_SYS_ERR) {
- MHI_VERB(dev, "System error detected\n");
- pm_state = mhi_tryset_pm_state(mhi_cntrl,
- MHI_PM_SYS_ERR_DETECT);
- }
- write_unlock_irq(&mhi_cntrl->pm_lock);
- if (pm_state != MHI_PM_SYS_ERR_DETECT)
- goto exit_intvec;
- switch (ee) {
- case MHI_EE_RDDM:
- /* proceed if power down is not already in progress */
- if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
- /* notify critical clients with early notifications */
- mhi_report_error(mhi_cntrl);
- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
- mhi_cntrl->ee = ee;
- wake_up_all(&mhi_cntrl->state_event);
- }
- break;
- case MHI_EE_PBL:
- case MHI_EE_EDL:
- case MHI_EE_PTHRU:
- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
- mhi_cntrl->ee = ee;
- wake_up_all(&mhi_cntrl->state_event);
- mhi_pm_sys_err_handler(mhi_cntrl);
- break;
- default:
- wake_up_all(&mhi_cntrl->state_event);
- mhi_pm_sys_err_handler(mhi_cntrl);
- break;
- }
- exit_intvec:
- return IRQ_HANDLED;
- }
- irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
- {
- struct mhi_controller *mhi_cntrl = dev;
- /* Wake up events waiting for state change */
- wake_up_all(&mhi_cntrl->state_event);
- return IRQ_WAKE_THREAD;
- }
- static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
- struct mhi_ring *ring)
- {
- /* Update the WP */
- ring->wp += ring->el_size;
- if (ring->wp >= (ring->base + ring->len))
- ring->wp = ring->base;
- *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
- /* Update the RP */
- ring->rp += ring->el_size;
- if (ring->rp >= (ring->base + ring->len))
- ring->rp = ring->base;
- /* Update to all cores */
- smp_wmb();
- }
- static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
- struct mhi_ring_element *event,
- struct mhi_chan *mhi_chan)
- {
- struct mhi_ring *buf_ring, *tre_ring;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct mhi_result result;
- unsigned long flags = 0;
- u32 ev_code;
- ev_code = MHI_TRE_GET_EV_CODE(event);
- buf_ring = &mhi_chan->buf_ring;
- tre_ring = &mhi_chan->tre_ring;
- result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
- -EOVERFLOW : 0;
- /*
- * If it's a DB Event then we need to grab the lock
- * with preemption disabled and as a write because we
- * have to update db register and there are chances that
- * another thread could be doing the same.
- */
- if (ev_code >= MHI_EV_CC_OOB)
- write_lock_irqsave(&mhi_chan->lock, flags);
- else
- read_lock_bh(&mhi_chan->lock);
- if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
- goto end_process_tx_event;
- switch (ev_code) {
- case MHI_EV_CC_OVERFLOW:
- case MHI_EV_CC_EOB:
- case MHI_EV_CC_EOT:
- {
- dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
- struct mhi_ring_element *local_rp, *ev_tre;
- void *dev_rp;
- struct mhi_buf_info *buf_info;
- u16 xfer_len;
- if (!is_valid_ring_ptr(tre_ring, ptr)) {
- MHI_ERR(dev,
- "Event element points outside of the tre ring\n");
- break;
- }
- /* Get the TRB this event points to */
- ev_tre = mhi_to_virtual(tre_ring, ptr);
- dev_rp = ev_tre + 1;
- if (dev_rp >= (tre_ring->base + tre_ring->len))
- dev_rp = tre_ring->base;
- result.dir = mhi_chan->dir;
- local_rp = tre_ring->rp;
- while (local_rp != dev_rp) {
- buf_info = buf_ring->rp;
- /* If it's the last TRE, get length from the event */
- if (local_rp == ev_tre)
- xfer_len = MHI_TRE_GET_EV_LEN(event);
- else
- xfer_len = buf_info->len;
- /* Unmap if it's not pre-mapped by client */
- if (likely(!buf_info->pre_mapped))
- mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
- result.buf_addr = buf_info->cb_buf;
- /* truncate to buf len if xfer_len is larger */
- result.bytes_xferd =
- min_t(u16, xfer_len, buf_info->len);
- mhi_del_ring_element(mhi_cntrl, buf_ring);
- mhi_del_ring_element(mhi_cntrl, tre_ring);
- local_rp = tre_ring->rp;
- read_unlock_bh(&mhi_chan->lock);
- /* notify client */
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
- if (mhi_chan->dir == DMA_TO_DEVICE)
- atomic_dec(&mhi_cntrl->pending_pkts);
- /*
- * Recycle the buffer if buffer is pre-allocated,
- * if there is an error, not much we can do apart
- * from dropping the packet
- */
- if (mhi_chan->pre_alloc) {
- if (mhi_queue_buf(mhi_chan->mhi_dev,
- mhi_chan->dir,
- buf_info->cb_buf,
- buf_info->len, MHI_EOT)) {
- MHI_ERR(dev,
- "Error recycling buffer for chan:%d\n",
- mhi_chan->chan);
- kfree(buf_info->cb_buf);
- }
- }
- read_lock_bh(&mhi_chan->lock);
- }
- break;
- } /* CC_EOT */
- case MHI_EV_CC_OOB:
- case MHI_EV_CC_DB_MODE:
- {
- unsigned long pm_lock_flags;
- mhi_chan->db_cfg.db_mode = 1;
- read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
- if (tre_ring->wp != tre_ring->rp &&
- MHI_DB_ACCESS_VALID(mhi_cntrl)) {
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- }
- read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
- break;
- }
- case MHI_EV_CC_BAD_TRE:
- default:
- panic("Unknown event 0x%x\n", ev_code);
- break;
- } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
- end_process_tx_event:
- if (ev_code >= MHI_EV_CC_OOB)
- write_unlock_irqrestore(&mhi_chan->lock, flags);
- else
- read_unlock_bh(&mhi_chan->lock);
- return 0;
- }
- static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
- struct mhi_ring_element *event,
- struct mhi_chan *mhi_chan)
- {
- struct mhi_ring *buf_ring, *tre_ring;
- struct mhi_buf_info *buf_info;
- struct mhi_result result;
- int ev_code;
- u32 cookie; /* offset to local descriptor */
- u16 xfer_len;
- buf_ring = &mhi_chan->buf_ring;
- tre_ring = &mhi_chan->tre_ring;
- ev_code = MHI_TRE_GET_EV_CODE(event);
- cookie = MHI_TRE_GET_EV_COOKIE(event);
- xfer_len = MHI_TRE_GET_EV_LEN(event);
- /* Received out of bound cookie */
- WARN_ON(cookie >= buf_ring->len);
- buf_info = buf_ring->base + cookie;
- result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
- -EOVERFLOW : 0;
- /* truncate to buf len if xfer_len is larger */
- result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
- result.buf_addr = buf_info->cb_buf;
- result.dir = mhi_chan->dir;
- read_lock_bh(&mhi_chan->lock);
- if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
- goto end_process_rsc_event;
- WARN_ON(!buf_info->used);
- /* notify the client */
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
- /*
- * Note: We're arbitrarily incrementing RP even though, completion
- * packet we processed might not be the same one, reason we can do this
- * is because device guaranteed to cache descriptors in order it
- * receive, so even though completion event is different we can re-use
- * all descriptors in between.
- * Example:
- * Transfer Ring has descriptors: A, B, C, D
- * Last descriptor host queue is D (WP) and first descriptor
- * host queue is A (RP).
- * The completion event we just serviced is descriptor C.
- * Then we can safely queue descriptors to replace A, B, and C
- * even though host did not receive any completions.
- */
- mhi_del_ring_element(mhi_cntrl, tre_ring);
- buf_info->used = false;
- end_process_rsc_event:
- read_unlock_bh(&mhi_chan->lock);
- return 0;
- }
- static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
- struct mhi_ring_element *tre)
- {
- dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
- struct mhi_ring *mhi_ring = &cmd_ring->ring;
- struct mhi_ring_element *cmd_pkt;
- struct mhi_chan *mhi_chan;
- u32 chan;
- if (!is_valid_ring_ptr(mhi_ring, ptr)) {
- MHI_ERR(dev,
- "Event element points outside of the cmd ring\n");
- return;
- }
- cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
- if (cmd_pkt != mhi_ring->rp)
- panic("Out of order cmd completion: 0x%llx. Expected: 0x%llx\n",
- cmd_pkt, mhi_ring->rp);
- if (MHI_TRE_GET_CMD_TYPE(cmd_pkt) == MHI_CMD_SFR_CFG) {
- mhi_misc_cmd_completion(mhi_cntrl, MHI_CMD_SFR_CFG,
- MHI_TRE_GET_EV_CODE(tre));
- goto exit_cmd_completion;
- }
- chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
- if (chan < mhi_cntrl->max_chan &&
- mhi_cntrl->mhi_chan[chan].configured) {
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
- write_lock_bh(&mhi_chan->lock);
- mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
- complete(&mhi_chan->completion);
- write_unlock_bh(&mhi_chan->lock);
- } else {
- MHI_ERR(dev,
- "Completion packet for invalid channel ID: %d\n", chan);
- }
- exit_cmd_completion:
- mhi_del_ring_element(mhi_cntrl, mhi_ring);
- }
- int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
- struct mhi_event *mhi_event,
- u32 event_quota)
- {
- struct mhi_ring_element *dev_rp, *local_rp;
- struct mhi_ring *ev_ring = &mhi_event->ring;
- struct mhi_event_ctxt *er_ctxt =
- &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
- struct mhi_chan *mhi_chan;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- u32 chan;
- int count = 0;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
- /*
- * This is a quick check to avoid unnecessary event processing
- * in case MHI is already in error state, but it's still possible
- * to transition to error state while processing events
- */
- if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
- return -EIO;
- if (!is_valid_ring_ptr(ev_ring, ptr)) {
- MHI_ERR(dev,
- "Event ring rp points outside of the event ring\n");
- return -EIO;
- }
- dev_rp = mhi_to_virtual(ev_ring, ptr);
- local_rp = ev_ring->rp;
- while (dev_rp != local_rp) {
- enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
- MHI_VERB(dev, "RP:0x%llx Processing Event:0x%llx 0x%08x 0x%08x\n",
- (u64)mhi_to_physical(ev_ring, local_rp),
- local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
- switch (type) {
- case MHI_PKT_TYPE_BW_REQ_EVENT:
- {
- struct mhi_link_info *link_info;
- link_info = &mhi_cntrl->mhi_link_info;
- write_lock_irq(&mhi_cntrl->pm_lock);
- link_info->target_link_speed =
- MHI_TRE_GET_EV_LINKSPEED(local_rp);
- link_info->target_link_width =
- MHI_TRE_GET_EV_LINKWIDTH(local_rp);
- write_unlock_irq(&mhi_cntrl->pm_lock);
- MHI_VERB(dev, "Received BW_REQ event\n");
- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
- break;
- }
- case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
- {
- enum mhi_state new_state;
- new_state = MHI_TRE_GET_EV_STATE(local_rp);
- MHI_VERB(dev, "State change event to state: %s\n",
- mhi_state_str(new_state));
- switch (new_state) {
- case MHI_STATE_M0:
- mhi_pm_m0_transition(mhi_cntrl);
- break;
- case MHI_STATE_M1:
- mhi_pm_m1_transition(mhi_cntrl);
- break;
- case MHI_STATE_M3:
- mhi_pm_m3_transition(mhi_cntrl);
- break;
- case MHI_STATE_SYS_ERR:
- {
- enum mhi_pm_state pm_state;
- MHI_VERB(dev, "System error detected\n");
- write_lock_irq(&mhi_cntrl->pm_lock);
- pm_state = mhi_tryset_pm_state(mhi_cntrl,
- MHI_PM_SYS_ERR_DETECT);
- write_unlock_irq(&mhi_cntrl->pm_lock);
- if (pm_state == MHI_PM_SYS_ERR_DETECT)
- mhi_pm_sys_err_handler(mhi_cntrl);
- break;
- }
- default:
- MHI_ERR(dev, "Invalid state: %s\n",
- mhi_state_str(new_state));
- }
- break;
- }
- case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
- mhi_process_cmd_completion(mhi_cntrl, local_rp);
- break;
- case MHI_PKT_TYPE_EE_EVENT:
- {
- enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
- enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
- MHI_VERB(dev, "Received EE event: %s\n",
- TO_MHI_EXEC_STR(event));
- switch (event) {
- case MHI_EE_SBL:
- st = DEV_ST_TRANSITION_SBL;
- break;
- case MHI_EE_WFW:
- case MHI_EE_AMSS:
- st = DEV_ST_TRANSITION_MISSION_MODE;
- break;
- case MHI_EE_FP:
- st = DEV_ST_TRANSITION_FP;
- break;
- case MHI_EE_RDDM:
- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
- write_lock_irq(&mhi_cntrl->pm_lock);
- mhi_cntrl->ee = event;
- write_unlock_irq(&mhi_cntrl->pm_lock);
- wake_up_all(&mhi_cntrl->state_event);
- break;
- default:
- MHI_ERR(dev,
- "Unhandled EE event: 0x%x\n", type);
- }
- if (st != DEV_ST_TRANSITION_MAX)
- mhi_queue_state_transition(mhi_cntrl, st);
- break;
- }
- case MHI_PKT_TYPE_TX_EVENT:
- chan = MHI_TRE_GET_EV_CHID(local_rp);
- WARN_ON(chan >= mhi_cntrl->max_chan);
- /*
- * Only process the event ring elements whose channel
- * ID is within the maximum supported range.
- */
- if (chan < mhi_cntrl->max_chan) {
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
- if (!mhi_chan->configured)
- break;
- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
- }
- break;
- default:
- MHI_ERR(dev, "Unhandled event type: %d\n", type);
- break;
- }
- mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
- local_rp = ev_ring->rp;
- ptr = le64_to_cpu(er_ctxt->rp);
- if (!is_valid_ring_ptr(ev_ring, ptr)) {
- MHI_ERR(dev,
- "Event ring rp points outside of the event ring\n");
- return -EIO;
- }
- dev_rp = mhi_to_virtual(ev_ring, ptr);
- count++;
- }
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
- mhi_ring_er_db(mhi_event);
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return count;
- }
- int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
- struct mhi_event *mhi_event,
- u32 event_quota)
- {
- struct mhi_ring_element *dev_rp, *local_rp;
- struct mhi_ring *ev_ring = &mhi_event->ring;
- struct mhi_event_ctxt *er_ctxt =
- &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- int count = 0;
- u32 chan;
- struct mhi_chan *mhi_chan;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
- if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
- return -EIO;
- if (!is_valid_ring_ptr(ev_ring, ptr)) {
- MHI_ERR(dev,
- "Event ring rp points outside of the event ring\n");
- return -EIO;
- }
- dev_rp = mhi_to_virtual(ev_ring, ptr);
- local_rp = ev_ring->rp;
- while (dev_rp != local_rp && event_quota > 0) {
- enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
- MHI_VERB(dev, "Processing Event:0x%llx 0x%08x 0x%08x\n",
- local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
- chan = MHI_TRE_GET_EV_CHID(local_rp);
- WARN_ON(chan >= mhi_cntrl->max_chan);
- /*
- * Only process the event ring elements whose channel
- * ID is within the maximum supported range.
- */
- if (chan < mhi_cntrl->max_chan &&
- mhi_cntrl->mhi_chan[chan].configured) {
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
- if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
- } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
- parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
- }
- }
- mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
- local_rp = ev_ring->rp;
- ptr = le64_to_cpu(er_ctxt->rp);
- if (!is_valid_ring_ptr(ev_ring, ptr)) {
- MHI_ERR(dev,
- "Event ring rp points outside of the event ring\n");
- return -EIO;
- }
- dev_rp = mhi_to_virtual(ev_ring, ptr);
- count++;
- }
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
- mhi_ring_er_db(mhi_event);
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return count;
- }
- void mhi_ev_task(unsigned long data)
- {
- struct mhi_event *mhi_event = (struct mhi_event *)data;
- struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
- /* process all pending events */
- spin_lock_bh(&mhi_event->lock);
- mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
- spin_unlock_bh(&mhi_event->lock);
- }
- void mhi_ctrl_ev_task(unsigned long data)
- {
- struct mhi_event *mhi_event = (struct mhi_event *)data;
- struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- enum mhi_state state;
- enum mhi_pm_state pm_state = 0;
- int ret;
- /*
- * We can check PM state w/o a lock here because there is no way
- * PM state can change from reg access valid to no access while this
- * thread being executed.
- */
- if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- /*
- * We may have a pending event but not allowed to
- * process it since we are probably in a suspended state,
- * so trigger a resume.
- */
- mhi_trigger_resume(mhi_cntrl);
- return;
- }
- /* Process ctrl events */
- ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
- /*
- * We received an IRQ but no events to process, maybe device went to
- * SYS_ERR state? Check the state to confirm.
- */
- if (!ret) {
- write_lock_irq(&mhi_cntrl->pm_lock);
- state = mhi_get_mhi_state(mhi_cntrl);
- if (state == MHI_STATE_SYS_ERR) {
- MHI_VERB(dev, "System error detected\n");
- pm_state = mhi_tryset_pm_state(mhi_cntrl,
- MHI_PM_SYS_ERR_DETECT);
- }
- write_unlock_irq(&mhi_cntrl->pm_lock);
- if (pm_state == MHI_PM_SYS_ERR_DETECT)
- mhi_pm_sys_err_handler(mhi_cntrl);
- }
- }
- void mhi_process_ev_work(struct work_struct *work)
- {
- struct mhi_event *mhi_event = container_of(work, struct mhi_event,
- work);
- struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
- struct device *dev = mhi_cntrl->cntrl_dev;
- MHI_VERB(dev, "Enter with pm_state:%s MHI_STATE:%s ee:%s\n",
- to_mhi_pm_state_str(mhi_cntrl->pm_state),
- mhi_state_str(mhi_cntrl->dev_state),
- TO_MHI_EXEC_STR(mhi_cntrl->ee));
- if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
- return;
- mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
- }
- static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
- struct mhi_ring *ring)
- {
- void *tmp = ring->wp + ring->el_size;
- if (tmp >= (ring->base + ring->len))
- tmp = ring->base;
- return (tmp == ring->rp);
- }
- static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
- enum dma_data_direction dir, enum mhi_flags mflags)
- {
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- unsigned long flags;
- int ret;
- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
- return -EIO;
- ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
- if (unlikely(ret))
- return -EAGAIN;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
- if (unlikely(ret))
- return ret;
- /* Let controller mark last busy for runtime PM framework if needed */
- if (mhi_cntrl->runtime_last_busy)
- mhi_cntrl->runtime_last_busy(mhi_cntrl);
- read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
- /* trigger M3 exit if necessary */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
- /* Assert dev_wake (to exit/prevent M1/M2)*/
- mhi_cntrl->wake_toggle(mhi_cntrl);
- if (mhi_chan->dir == DMA_TO_DEVICE)
- atomic_inc(&mhi_cntrl->pending_pkts);
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
- return ret;
- }
- int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct sk_buff *skb, size_t len, enum mhi_flags mflags)
- {
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct mhi_buf_info buf_info = { };
- buf_info.v_addr = skb->data;
- buf_info.cb_buf = skb;
- buf_info.len = len;
- if (unlikely(mhi_chan->pre_alloc))
- return -EINVAL;
- return mhi_queue(mhi_dev, &buf_info, dir, mflags);
- }
- EXPORT_SYMBOL_GPL(mhi_queue_skb);
- int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
- {
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct mhi_buf_info buf_info = { };
- buf_info.p_addr = mhi_buf->dma_addr;
- buf_info.cb_buf = mhi_buf;
- buf_info.pre_mapped = true;
- buf_info.len = len;
- if (unlikely(mhi_chan->pre_alloc))
- return -EINVAL;
- return mhi_queue(mhi_dev, &buf_info, dir, mflags);
- }
- EXPORT_SYMBOL_GPL(mhi_queue_dma);
- int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
- struct mhi_buf_info *info, enum mhi_flags flags)
- {
- struct device *dev = &mhi_chan->mhi_dev->dev;
- struct mhi_ring *buf_ring, *tre_ring;
- struct mhi_ring_element *mhi_tre;
- struct mhi_buf_info *buf_info;
- int eot, eob, chain, bei;
- int ret;
- /* Protect accesses for reading and incrementing WP */
- write_lock_bh(&mhi_chan->lock);
- buf_ring = &mhi_chan->buf_ring;
- tre_ring = &mhi_chan->tre_ring;
- buf_info = buf_ring->wp;
- WARN_ON(buf_info->used);
- buf_info->pre_mapped = info->pre_mapped;
- if (info->pre_mapped)
- buf_info->p_addr = info->p_addr;
- else
- buf_info->v_addr = info->v_addr;
- buf_info->cb_buf = info->cb_buf;
- buf_info->wp = tre_ring->wp;
- buf_info->dir = mhi_chan->dir;
- buf_info->len = info->len;
- if (!info->pre_mapped) {
- ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret) {
- write_unlock_bh(&mhi_chan->lock);
- return ret;
- }
- }
- eob = !!(flags & MHI_EOB);
- eot = !!(flags & MHI_EOT);
- chain = !!(flags & MHI_CHAIN);
- bei = !!(mhi_chan->intmod);
- mhi_tre = tre_ring->wp;
- mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
- mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
- MHI_VERB_EXTRA(dev, "Chan: %d WP: 0x%llx TRE: 0x%llx 0x%08x 0x%08x\n",
- mhi_chan->chan, (u64)mhi_to_physical(tre_ring, mhi_tre),
- mhi_tre->ptr, mhi_tre->dword[0], mhi_tre->dword[1]);
- /* increment WP */
- mhi_add_ring_element(mhi_cntrl, tre_ring);
- mhi_add_ring_element(mhi_cntrl, buf_ring);
- write_unlock_bh(&mhi_chan->lock);
- return 0;
- }
- int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- void *buf, size_t len, enum mhi_flags mflags)
- {
- struct mhi_buf_info buf_info = { };
- buf_info.v_addr = buf;
- buf_info.cb_buf = buf;
- buf_info.len = len;
- return mhi_queue(mhi_dev, &buf_info, dir, mflags);
- }
- EXPORT_SYMBOL_GPL(mhi_queue_buf);
- bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
- {
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
- mhi_dev->ul_chan : mhi_dev->dl_chan;
- struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- return mhi_is_ring_full(mhi_cntrl, tre_ring);
- }
- EXPORT_SYMBOL_GPL(mhi_queue_is_full);
- int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan,
- enum mhi_cmd_type cmd)
- {
- struct mhi_ring_element *cmd_tre = NULL;
- struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
- struct mhi_ring *ring = &mhi_cmd->ring;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- int chan = 0;
- if (mhi_chan)
- chan = mhi_chan->chan;
- spin_lock_bh(&mhi_cmd->lock);
- if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
- spin_unlock_bh(&mhi_cmd->lock);
- return -ENOMEM;
- }
- /* prepare the cmd tre */
- cmd_tre = ring->wp;
- switch (cmd) {
- case MHI_CMD_RESET_CHAN:
- cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
- cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
- cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
- break;
- case MHI_CMD_STOP_CHAN:
- cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
- cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
- cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
- break;
- case MHI_CMD_START_CHAN:
- cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
- cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
- cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
- break;
- case MHI_CMD_SFR_CFG:
- mhi_misc_cmd_configure(mhi_cntrl, MHI_CMD_SFR_CFG,
- &cmd_tre->ptr, &cmd_tre->dword[0],
- &cmd_tre->dword[1]);
- break;
- default:
- MHI_ERR(dev, "Command not supported\n");
- break;
- }
- /* queue to hardware */
- mhi_add_ring_element(mhi_cntrl, ring);
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
- mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
- read_unlock_bh(&mhi_cntrl->pm_lock);
- spin_unlock_bh(&mhi_cmd->lock);
- return 0;
- }
- static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan,
- enum mhi_ch_state_type to_state)
- {
- struct device *dev = &mhi_chan->mhi_dev->dev;
- enum mhi_cmd_type cmd = MHI_CMD_NOP;
- int ret;
- MHI_VERB(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
- TO_CH_STATE_TYPE_STR(to_state));
- switch (to_state) {
- case MHI_CH_STATE_TYPE_RESET:
- write_lock_irq(&mhi_chan->lock);
- if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
- mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
- mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
- write_unlock_irq(&mhi_chan->lock);
- return -EINVAL;
- }
- mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
- write_unlock_irq(&mhi_chan->lock);
- cmd = MHI_CMD_RESET_CHAN;
- break;
- case MHI_CH_STATE_TYPE_STOP:
- if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
- return -EINVAL;
- cmd = MHI_CMD_STOP_CHAN;
- break;
- case MHI_CH_STATE_TYPE_START:
- if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
- mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
- return -EINVAL;
- cmd = MHI_CMD_START_CHAN;
- break;
- default:
- MHI_ERR(dev, "%d: Channel state update to %s not allowed\n",
- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
- return -EINVAL;
- }
- /* bring host and device out of suspended states */
- ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
- if (ret)
- return ret;
- mhi_cntrl->runtime_get(mhi_cntrl);
- reinit_completion(&mhi_chan->completion);
- ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
- if (ret) {
- MHI_ERR(dev, "%d: Failed to send %s channel command\n",
- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
- goto exit_channel_update;
- }
- ret = wait_for_completion_timeout(&mhi_chan->completion,
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
- if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
- MHI_ERR(dev,
- "%d: Failed to receive %s channel command completion\n",
- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
- ret = -EIO;
- goto exit_channel_update;
- }
- ret = 0;
- if (to_state != MHI_CH_STATE_TYPE_RESET) {
- write_lock_irq(&mhi_chan->lock);
- mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
- MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
- write_unlock_irq(&mhi_chan->lock);
- }
- MHI_VERB(dev, "%d: Channel state change to %s successful\n",
- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
- exit_channel_update:
- mhi_cntrl->runtime_put(mhi_cntrl);
- mhi_device_put(mhi_cntrl->mhi_dev);
- return ret;
- }
- static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan)
- {
- int ret;
- struct device *dev = &mhi_chan->mhi_dev->dev;
- mutex_lock(&mhi_chan->mutex);
- if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
- MHI_VERB(dev, "Current EE: %s Required EE Mask: 0x%x\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
- goto exit_unprepare_channel;
- }
- /* no more processing events for this channel */
- ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
- MHI_CH_STATE_TYPE_RESET);
- if (ret)
- MHI_ERR(dev, "%d: Failed to reset channel, still resetting\n",
- mhi_chan->chan);
- exit_unprepare_channel:
- write_lock_irq(&mhi_chan->lock);
- mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
- write_unlock_irq(&mhi_chan->lock);
- if (!mhi_chan->offload_ch) {
- mhi_reset_chan(mhi_cntrl, mhi_chan);
- mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
- }
- MHI_VERB(dev, "%d: successfully reset\n", mhi_chan->chan);
- mutex_unlock(&mhi_chan->mutex);
- }
- int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan, unsigned int flags)
- {
- int ret = 0;
- struct device *dev = &mhi_chan->mhi_dev->dev;
- if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
- MHI_ERR(dev, "Current EE: %s Required EE Mask: 0x%x\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
- return -ENOTCONN;
- }
- mutex_lock(&mhi_chan->mutex);
- /* Check of client manages channel context for offload channels */
- if (!mhi_chan->offload_ch) {
- ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
- if (ret)
- goto error_init_chan;
- }
- ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
- MHI_CH_STATE_TYPE_START);
- if (ret)
- goto error_pm_state;
- if (mhi_chan->dir == DMA_FROM_DEVICE)
- mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
- /* Pre-allocate buffer for xfer ring */
- if (mhi_chan->pre_alloc) {
- int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
- &mhi_chan->tre_ring);
- size_t len = mhi_cntrl->buffer_len;
- while (nr_el--) {
- void *buf;
- struct mhi_buf_info info = { };
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto error_pre_alloc;
- }
- /* Prepare transfer descriptors */
- info.v_addr = buf;
- info.cb_buf = buf;
- info.len = len;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
- if (ret) {
- kfree(buf);
- goto error_pre_alloc;
- }
- }
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
- read_lock_irq(&mhi_chan->lock);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_irq(&mhi_chan->lock);
- }
- read_unlock_bh(&mhi_cntrl->pm_lock);
- }
- mutex_unlock(&mhi_chan->mutex);
- return 0;
- error_pm_state:
- if (!mhi_chan->offload_ch)
- mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
- error_init_chan:
- mutex_unlock(&mhi_chan->mutex);
- return ret;
- error_pre_alloc:
- mutex_unlock(&mhi_chan->mutex);
- mhi_unprepare_channel(mhi_cntrl, mhi_chan);
- return ret;
- }
- static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
- struct mhi_event *mhi_event,
- struct mhi_event_ctxt *er_ctxt,
- int chan)
- {
- struct mhi_ring_element *dev_rp, *local_rp;
- struct mhi_ring *ev_ring;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- unsigned long flags;
- dma_addr_t ptr;
- MHI_VERB(dev, "Marking all events for chan: %d as stale\n", chan);
- ev_ring = &mhi_event->ring;
- /* mark all stale events related to channel as STALE event */
- spin_lock_irqsave(&mhi_event->lock, flags);
- ptr = le64_to_cpu(er_ctxt->rp);
- if (!is_valid_ring_ptr(ev_ring, ptr)) {
- MHI_ERR(dev,
- "Event ring rp points outside of the event ring\n");
- dev_rp = ev_ring->rp;
- } else {
- dev_rp = mhi_to_virtual(ev_ring, ptr);
- }
- local_rp = ev_ring->rp;
- while (dev_rp != local_rp) {
- if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
- chan == MHI_TRE_GET_EV_CHID(local_rp))
- local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
- MHI_PKT_TYPE_STALE_EVENT);
- local_rp++;
- if (local_rp == (ev_ring->base + ev_ring->len))
- local_rp = ev_ring->base;
- }
- MHI_VERB(dev, "Finished marking events as stale events\n");
- spin_unlock_irqrestore(&mhi_event->lock, flags);
- }
- static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan)
- {
- struct mhi_ring *buf_ring, *tre_ring;
- struct mhi_result result;
- /* Reset any pending buffers */
- buf_ring = &mhi_chan->buf_ring;
- tre_ring = &mhi_chan->tre_ring;
- result.transaction_status = -ENOTCONN;
- result.bytes_xferd = 0;
- while (tre_ring->rp != tre_ring->wp) {
- struct mhi_buf_info *buf_info = buf_ring->rp;
- if (mhi_chan->dir == DMA_TO_DEVICE)
- atomic_dec(&mhi_cntrl->pending_pkts);
- if (!buf_info->pre_mapped)
- mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
- mhi_del_ring_element(mhi_cntrl, buf_ring);
- mhi_del_ring_element(mhi_cntrl, tre_ring);
- if (mhi_chan->pre_alloc) {
- kfree(buf_info->cb_buf);
- } else {
- result.buf_addr = buf_info->cb_buf;
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
- }
- }
- }
- void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
- {
- struct mhi_event *mhi_event;
- struct mhi_event_ctxt *er_ctxt;
- int chan = mhi_chan->chan;
- /* Nothing to reset, client doesn't queue buffers */
- if (mhi_chan->offload_ch)
- return;
- read_lock_bh(&mhi_cntrl->pm_lock);
- mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
- er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
- mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
- mhi_reset_data_chan(mhi_cntrl, mhi_chan);
- read_unlock_bh(&mhi_cntrl->pm_lock);
- }
- static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
- {
- int ret, dir;
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan;
- for (dir = 0; dir < 2; dir++) {
- mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
- if (!mhi_chan)
- continue;
- ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
- if (ret)
- goto error_open_chan;
- }
- return 0;
- error_open_chan:
- for (--dir; dir >= 0; dir--) {
- mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
- if (!mhi_chan)
- continue;
- mhi_unprepare_channel(mhi_cntrl, mhi_chan);
- }
- return ret;
- }
- int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
- {
- return __mhi_prepare_for_transfer(mhi_dev, 0);
- }
- EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
- int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
- {
- return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
- }
- EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
- void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
- {
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan;
- int dir;
- /* Get out of suspended state */
- mhi_cntrl->runtime_get(mhi_cntrl);
- for (dir = 0; dir < 2; dir++) {
- mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
- if (!mhi_chan)
- continue;
- mhi_unprepare_channel(mhi_cntrl, mhi_chan);
- }
- /* Allow suspend */
- mhi_cntrl->runtime_put(mhi_cntrl);
- }
- EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
- static int mhi_update_transfer_state(struct mhi_device *mhi_dev,
- enum mhi_ch_state_type to_state)
- {
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan;
- int dir, ret;
- for (dir = 0; dir < 2; dir++) {
- mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
- if (!mhi_chan)
- continue;
- /*
- * Bail out if one of the channels fails as client will reset
- * both upon failure
- */
- mutex_lock(&mhi_chan->mutex);
- ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, to_state);
- if (ret) {
- mutex_unlock(&mhi_chan->mutex);
- return ret;
- }
- mutex_unlock(&mhi_chan->mutex);
- }
- return 0;
- }
- int mhi_stop_transfer(struct mhi_device *mhi_dev)
- {
- return mhi_update_transfer_state(mhi_dev, MHI_CH_STATE_TYPE_STOP);
- }
- EXPORT_SYMBOL(mhi_stop_transfer);
- int mhi_start_transfer(struct mhi_device *mhi_dev)
- {
- return mhi_update_transfer_state(mhi_dev, MHI_CH_STATE_TYPE_START);
- }
- EXPORT_SYMBOL(mhi_start_transfer);
- int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
- {
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
- struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
- int ret;
- spin_lock_bh(&mhi_event->lock);
- ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
- spin_unlock_bh(&mhi_event->lock);
- return ret;
- }
- EXPORT_SYMBOL_GPL(mhi_poll);
|