ARM: PL011: Fix DMA support
commit 58ac1b3799799069d53f5bf95c093f2fe8dd3cc5 upstream.
Since there is no guarantee that the memory returned by
dma_alloc_coherent() is associated with a 'struct page', using the
architecture specific phys_to_page() is wrong, but using
virt_to_page() would be as well.
Stop using sg lists altogether and just use the *_single() functions
instead. This also simplifies the code a bit since the scatterlists in
this driver always have only one entry anyway.
https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/
Use consistent names for dma buffers
gc: Add a commit log from the initial thread:
https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/
Use consistent names for dma buffers
Fixes: cb06ff102e
("ARM: PL011: Add support for Rx DMA buffer polling.")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
Cc: stable <stable@kernel.org>
Link: https://lore.kernel.org/r/20231122171503.235649-1-gregory.clement@bootlin.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
713f6ff326
commit
e4ed324746
@@ -222,8 +222,9 @@ static struct vendor_data vendor_zte = {
|
|||||||
|
|
||||||
/* Deals with DMA transactions */
|
/* Deals with DMA transactions */
|
||||||
|
|
||||||
struct pl011_sgbuf {
|
struct pl011_dmabuf {
|
||||||
struct scatterlist sg;
|
dma_addr_t dma;
|
||||||
|
size_t len;
|
||||||
char *buf;
|
char *buf;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -231,8 +232,8 @@ struct pl011_dmarx_data {
|
|||||||
struct dma_chan *chan;
|
struct dma_chan *chan;
|
||||||
struct completion complete;
|
struct completion complete;
|
||||||
bool use_buf_b;
|
bool use_buf_b;
|
||||||
struct pl011_sgbuf sgbuf_a;
|
struct pl011_dmabuf dbuf_a;
|
||||||
struct pl011_sgbuf sgbuf_b;
|
struct pl011_dmabuf dbuf_b;
|
||||||
dma_cookie_t cookie;
|
dma_cookie_t cookie;
|
||||||
bool running;
|
bool running;
|
||||||
struct timer_list timer;
|
struct timer_list timer;
|
||||||
@@ -245,7 +246,8 @@ struct pl011_dmarx_data {
|
|||||||
|
|
||||||
struct pl011_dmatx_data {
|
struct pl011_dmatx_data {
|
||||||
struct dma_chan *chan;
|
struct dma_chan *chan;
|
||||||
struct scatterlist sg;
|
dma_addr_t dma;
|
||||||
|
size_t len;
|
||||||
char *buf;
|
char *buf;
|
||||||
bool queued;
|
bool queued;
|
||||||
};
|
};
|
||||||
@@ -366,32 +368,24 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
|
|||||||
|
|
||||||
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
|
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
|
||||||
|
|
||||||
static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
|
static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
dma_addr_t dma_addr;
|
db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
|
||||||
|
&db->dma, GFP_KERNEL);
|
||||||
sg->buf = dma_alloc_coherent(chan->device->dev,
|
if (!db->buf)
|
||||||
PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
|
|
||||||
if (!sg->buf)
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
db->len = PL011_DMA_BUFFER_SIZE;
|
||||||
sg_init_table(&sg->sg, 1);
|
|
||||||
sg_set_page(&sg->sg, phys_to_page(dma_addr),
|
|
||||||
PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
|
|
||||||
sg_dma_address(&sg->sg) = dma_addr;
|
|
||||||
sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
|
static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
if (sg->buf) {
|
if (db->buf) {
|
||||||
dma_free_coherent(chan->device->dev,
|
dma_free_coherent(chan->device->dev,
|
||||||
PL011_DMA_BUFFER_SIZE, sg->buf,
|
PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
|
||||||
sg_dma_address(&sg->sg));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -552,8 +546,8 @@ static void pl011_dma_tx_callback(void *data)
|
|||||||
|
|
||||||
spin_lock_irqsave(&uap->port.lock, flags);
|
spin_lock_irqsave(&uap->port.lock, flags);
|
||||||
if (uap->dmatx.queued)
|
if (uap->dmatx.queued)
|
||||||
dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
|
dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
|
||||||
DMA_TO_DEVICE);
|
dmatx->len, DMA_TO_DEVICE);
|
||||||
|
|
||||||
dmacr = uap->dmacr;
|
dmacr = uap->dmacr;
|
||||||
uap->dmacr = dmacr & ~UART011_TXDMAE;
|
uap->dmacr = dmacr & ~UART011_TXDMAE;
|
||||||
@@ -639,18 +633,19 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
|
|||||||
memcpy(&dmatx->buf[first], &xmit->buf[0], second);
|
memcpy(&dmatx->buf[first], &xmit->buf[0], second);
|
||||||
}
|
}
|
||||||
|
|
||||||
dmatx->sg.length = count;
|
dmatx->len = count;
|
||||||
|
dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
|
||||||
if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
|
DMA_TO_DEVICE);
|
||||||
|
if (dmatx->dma == DMA_MAPPING_ERROR) {
|
||||||
uap->dmatx.queued = false;
|
uap->dmatx.queued = false;
|
||||||
dev_dbg(uap->port.dev, "unable to map TX DMA\n");
|
dev_dbg(uap->port.dev, "unable to map TX DMA\n");
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
|
desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
|
||||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||||
if (!desc) {
|
if (!desc) {
|
||||||
dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
|
dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
|
||||||
uap->dmatx.queued = false;
|
uap->dmatx.queued = false;
|
||||||
/*
|
/*
|
||||||
* If DMA cannot be used right now, we complete this
|
* If DMA cannot be used right now, we complete this
|
||||||
@@ -814,8 +809,8 @@ __acquires(&uap->port.lock)
|
|||||||
dmaengine_terminate_async(uap->dmatx.chan);
|
dmaengine_terminate_async(uap->dmatx.chan);
|
||||||
|
|
||||||
if (uap->dmatx.queued) {
|
if (uap->dmatx.queued) {
|
||||||
dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
|
dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
|
||||||
DMA_TO_DEVICE);
|
uap->dmatx.len, DMA_TO_DEVICE);
|
||||||
uap->dmatx.queued = false;
|
uap->dmatx.queued = false;
|
||||||
uap->dmacr &= ~UART011_TXDMAE;
|
uap->dmacr &= ~UART011_TXDMAE;
|
||||||
pl011_write(uap->dmacr, uap, REG_DMACR);
|
pl011_write(uap->dmacr, uap, REG_DMACR);
|
||||||
@@ -829,15 +824,15 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
|
|||||||
struct dma_chan *rxchan = uap->dmarx.chan;
|
struct dma_chan *rxchan = uap->dmarx.chan;
|
||||||
struct pl011_dmarx_data *dmarx = &uap->dmarx;
|
struct pl011_dmarx_data *dmarx = &uap->dmarx;
|
||||||
struct dma_async_tx_descriptor *desc;
|
struct dma_async_tx_descriptor *desc;
|
||||||
struct pl011_sgbuf *sgbuf;
|
struct pl011_dmabuf *dbuf;
|
||||||
|
|
||||||
if (!rxchan)
|
if (!rxchan)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
/* Start the RX DMA job */
|
/* Start the RX DMA job */
|
||||||
sgbuf = uap->dmarx.use_buf_b ?
|
dbuf = uap->dmarx.use_buf_b ?
|
||||||
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
|
&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
|
||||||
desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
|
desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
|
||||||
DMA_DEV_TO_MEM,
|
DMA_DEV_TO_MEM,
|
||||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||||
/*
|
/*
|
||||||
@@ -877,8 +872,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
|
|||||||
bool readfifo)
|
bool readfifo)
|
||||||
{
|
{
|
||||||
struct tty_port *port = &uap->port.state->port;
|
struct tty_port *port = &uap->port.state->port;
|
||||||
struct pl011_sgbuf *sgbuf = use_buf_b ?
|
struct pl011_dmabuf *dbuf = use_buf_b ?
|
||||||
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
|
&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
|
||||||
int dma_count = 0;
|
int dma_count = 0;
|
||||||
u32 fifotaken = 0; /* only used for vdbg() */
|
u32 fifotaken = 0; /* only used for vdbg() */
|
||||||
|
|
||||||
@@ -887,7 +882,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
|
|||||||
|
|
||||||
if (uap->dmarx.poll_rate) {
|
if (uap->dmarx.poll_rate) {
|
||||||
/* The data can be taken by polling */
|
/* The data can be taken by polling */
|
||||||
dmataken = sgbuf->sg.length - dmarx->last_residue;
|
dmataken = dbuf->len - dmarx->last_residue;
|
||||||
/* Recalculate the pending size */
|
/* Recalculate the pending size */
|
||||||
if (pending >= dmataken)
|
if (pending >= dmataken)
|
||||||
pending -= dmataken;
|
pending -= dmataken;
|
||||||
@@ -901,7 +896,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
|
|||||||
* Note that tty_insert_flip_buf() tries to take as many chars
|
* Note that tty_insert_flip_buf() tries to take as many chars
|
||||||
* as it can.
|
* as it can.
|
||||||
*/
|
*/
|
||||||
dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
|
dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
|
||||||
pending);
|
pending);
|
||||||
|
|
||||||
uap->port.icount.rx += dma_count;
|
uap->port.icount.rx += dma_count;
|
||||||
@@ -912,7 +907,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
|
|||||||
|
|
||||||
/* Reset the last_residue for Rx DMA poll */
|
/* Reset the last_residue for Rx DMA poll */
|
||||||
if (uap->dmarx.poll_rate)
|
if (uap->dmarx.poll_rate)
|
||||||
dmarx->last_residue = sgbuf->sg.length;
|
dmarx->last_residue = dbuf->len;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only continue with trying to read the FIFO if all DMA chars have
|
* Only continue with trying to read the FIFO if all DMA chars have
|
||||||
@@ -949,8 +944,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
|
|||||||
{
|
{
|
||||||
struct pl011_dmarx_data *dmarx = &uap->dmarx;
|
struct pl011_dmarx_data *dmarx = &uap->dmarx;
|
||||||
struct dma_chan *rxchan = dmarx->chan;
|
struct dma_chan *rxchan = dmarx->chan;
|
||||||
struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
|
struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
|
||||||
&dmarx->sgbuf_b : &dmarx->sgbuf_a;
|
&dmarx->dbuf_b : &dmarx->dbuf_a;
|
||||||
size_t pending;
|
size_t pending;
|
||||||
struct dma_tx_state state;
|
struct dma_tx_state state;
|
||||||
enum dma_status dmastat;
|
enum dma_status dmastat;
|
||||||
@@ -972,7 +967,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
|
|||||||
pl011_write(uap->dmacr, uap, REG_DMACR);
|
pl011_write(uap->dmacr, uap, REG_DMACR);
|
||||||
uap->dmarx.running = false;
|
uap->dmarx.running = false;
|
||||||
|
|
||||||
pending = sgbuf->sg.length - state.residue;
|
pending = dbuf->len - state.residue;
|
||||||
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
|
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
|
||||||
/* Then we terminate the transfer - we now know our residue */
|
/* Then we terminate the transfer - we now know our residue */
|
||||||
dmaengine_terminate_all(rxchan);
|
dmaengine_terminate_all(rxchan);
|
||||||
@@ -999,8 +994,8 @@ static void pl011_dma_rx_callback(void *data)
|
|||||||
struct pl011_dmarx_data *dmarx = &uap->dmarx;
|
struct pl011_dmarx_data *dmarx = &uap->dmarx;
|
||||||
struct dma_chan *rxchan = dmarx->chan;
|
struct dma_chan *rxchan = dmarx->chan;
|
||||||
bool lastbuf = dmarx->use_buf_b;
|
bool lastbuf = dmarx->use_buf_b;
|
||||||
struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
|
struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
|
||||||
&dmarx->sgbuf_b : &dmarx->sgbuf_a;
|
&dmarx->dbuf_b : &dmarx->dbuf_a;
|
||||||
size_t pending;
|
size_t pending;
|
||||||
struct dma_tx_state state;
|
struct dma_tx_state state;
|
||||||
int ret;
|
int ret;
|
||||||
@@ -1018,7 +1013,7 @@ static void pl011_dma_rx_callback(void *data)
|
|||||||
* the DMA irq handler. So we check the residue here.
|
* the DMA irq handler. So we check the residue here.
|
||||||
*/
|
*/
|
||||||
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
|
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
|
||||||
pending = sgbuf->sg.length - state.residue;
|
pending = dbuf->len - state.residue;
|
||||||
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
|
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
|
||||||
/* Then we terminate the transfer - we now know our residue */
|
/* Then we terminate the transfer - we now know our residue */
|
||||||
dmaengine_terminate_all(rxchan);
|
dmaengine_terminate_all(rxchan);
|
||||||
@@ -1070,16 +1065,16 @@ static void pl011_dma_rx_poll(struct timer_list *t)
|
|||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
unsigned int dmataken = 0;
|
unsigned int dmataken = 0;
|
||||||
unsigned int size = 0;
|
unsigned int size = 0;
|
||||||
struct pl011_sgbuf *sgbuf;
|
struct pl011_dmabuf *dbuf;
|
||||||
int dma_count;
|
int dma_count;
|
||||||
struct dma_tx_state state;
|
struct dma_tx_state state;
|
||||||
|
|
||||||
sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
|
dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
|
||||||
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
|
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
|
||||||
if (likely(state.residue < dmarx->last_residue)) {
|
if (likely(state.residue < dmarx->last_residue)) {
|
||||||
dmataken = sgbuf->sg.length - dmarx->last_residue;
|
dmataken = dbuf->len - dmarx->last_residue;
|
||||||
size = dmarx->last_residue - state.residue;
|
size = dmarx->last_residue - state.residue;
|
||||||
dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
|
dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
|
||||||
size);
|
size);
|
||||||
if (dma_count == size)
|
if (dma_count == size)
|
||||||
dmarx->last_residue = state.residue;
|
dmarx->last_residue = state.residue;
|
||||||
@@ -1126,7 +1121,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
|
uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
|
||||||
|
|
||||||
/* The DMA buffer is now the FIFO the TTY subsystem can use */
|
/* The DMA buffer is now the FIFO the TTY subsystem can use */
|
||||||
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
|
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
|
||||||
@@ -1136,7 +1131,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
|
|||||||
goto skip_rx;
|
goto skip_rx;
|
||||||
|
|
||||||
/* Allocate and map DMA RX buffers */
|
/* Allocate and map DMA RX buffers */
|
||||||
ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
|
ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
|
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
|
||||||
@@ -1144,12 +1139,12 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
|
|||||||
goto skip_rx;
|
goto skip_rx;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
|
ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
|
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
|
||||||
"RX buffer B", ret);
|
"RX buffer B", ret);
|
||||||
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
|
pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
goto skip_rx;
|
goto skip_rx;
|
||||||
}
|
}
|
||||||
@@ -1203,7 +1198,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
|
|||||||
/* In theory, this should already be done by pl011_dma_flush_buffer */
|
/* In theory, this should already be done by pl011_dma_flush_buffer */
|
||||||
dmaengine_terminate_all(uap->dmatx.chan);
|
dmaengine_terminate_all(uap->dmatx.chan);
|
||||||
if (uap->dmatx.queued) {
|
if (uap->dmatx.queued) {
|
||||||
dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
|
dma_unmap_single(uap->dmatx.chan->device->dev,
|
||||||
|
uap->dmatx.dma, uap->dmatx.len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
uap->dmatx.queued = false;
|
uap->dmatx.queued = false;
|
||||||
}
|
}
|
||||||
@@ -1215,8 +1211,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
|
|||||||
if (uap->using_rx_dma) {
|
if (uap->using_rx_dma) {
|
||||||
dmaengine_terminate_all(uap->dmarx.chan);
|
dmaengine_terminate_all(uap->dmarx.chan);
|
||||||
/* Clean up the RX DMA */
|
/* Clean up the RX DMA */
|
||||||
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
|
pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
|
||||||
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
|
pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
|
||||||
if (uap->dmarx.poll_rate)
|
if (uap->dmarx.poll_rate)
|
||||||
del_timer_sync(&uap->dmarx.timer);
|
del_timer_sync(&uap->dmarx.timer);
|
||||||
uap->using_rx_dma = false;
|
uap->using_rx_dma = false;
|
||||||
|
Reference in New Issue
Block a user