Merge eded3ad80a ("ia64: fix an addr to taddr in huge_pte_offset()") into android12-5.10-lts
Steps on the way to 5.10.180 Change-Id: Ied555d8ab53844823d31e3221bd0fb155f0baeae Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -58,7 +58,7 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
|
|||||||
|
|
||||||
pgd = pgd_offset(mm, taddr);
|
pgd = pgd_offset(mm, taddr);
|
||||||
if (pgd_present(*pgd)) {
|
if (pgd_present(*pgd)) {
|
||||||
p4d = p4d_offset(pgd, addr);
|
p4d = p4d_offset(pgd, taddr);
|
||||||
if (p4d_present(*p4d)) {
|
if (p4d_present(*p4d)) {
|
||||||
pud = pud_offset(p4d, taddr);
|
pud = pud_offset(p4d, taddr);
|
||||||
if (pud_present(*pud)) {
|
if (pud_present(*pud)) {
|
||||||
|
|||||||
@@ -173,7 +173,6 @@ handler: ;\
|
|||||||
l.sw PT_GPR28(r1),r28 ;\
|
l.sw PT_GPR28(r1),r28 ;\
|
||||||
l.sw PT_GPR29(r1),r29 ;\
|
l.sw PT_GPR29(r1),r29 ;\
|
||||||
/* r30 already save */ ;\
|
/* r30 already save */ ;\
|
||||||
/* l.sw PT_GPR30(r1),r30*/ ;\
|
|
||||||
l.sw PT_GPR31(r1),r31 ;\
|
l.sw PT_GPR31(r1),r31 ;\
|
||||||
TRACE_IRQS_OFF_ENTRY ;\
|
TRACE_IRQS_OFF_ENTRY ;\
|
||||||
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
|
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
|
||||||
@@ -211,9 +210,8 @@ handler: ;\
|
|||||||
l.sw PT_GPR27(r1),r27 ;\
|
l.sw PT_GPR27(r1),r27 ;\
|
||||||
l.sw PT_GPR28(r1),r28 ;\
|
l.sw PT_GPR28(r1),r28 ;\
|
||||||
l.sw PT_GPR29(r1),r29 ;\
|
l.sw PT_GPR29(r1),r29 ;\
|
||||||
/* r31 already saved */ ;\
|
/* r30 already saved */ ;\
|
||||||
l.sw PT_GPR30(r1),r30 ;\
|
l.sw PT_GPR31(r1),r31 ;\
|
||||||
/* l.sw PT_GPR31(r1),r31 */ ;\
|
|
||||||
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
|
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
|
||||||
l.addi r30,r0,-1 ;\
|
l.addi r30,r0,-1 ;\
|
||||||
l.sw PT_ORIG_GPR11(r1),r30 ;\
|
l.sw PT_ORIG_GPR11(r1),r30 ;\
|
||||||
|
|||||||
@@ -33,9 +33,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
|
|||||||
else
|
else
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
if (clkspec.np == node && !clk_supplier)
|
if (clkspec.np == node && !clk_supplier) {
|
||||||
|
of_node_put(clkspec.np);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
pclk = of_clk_get_from_provider(&clkspec);
|
pclk = of_clk_get_from_provider(&clkspec);
|
||||||
|
of_node_put(clkspec.np);
|
||||||
if (IS_ERR(pclk)) {
|
if (IS_ERR(pclk)) {
|
||||||
if (PTR_ERR(pclk) != -EPROBE_DEFER)
|
if (PTR_ERR(pclk) != -EPROBE_DEFER)
|
||||||
pr_warn("clk: couldn't get parent clock %d for %pOF\n",
|
pr_warn("clk: couldn't get parent clock %d for %pOF\n",
|
||||||
@@ -48,10 +51,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
|
|||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto err;
|
goto err;
|
||||||
if (clkspec.np == node && !clk_supplier) {
|
if (clkspec.np == node && !clk_supplier) {
|
||||||
|
of_node_put(clkspec.np);
|
||||||
rc = 0;
|
rc = 0;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
clk = of_clk_get_from_provider(&clkspec);
|
clk = of_clk_get_from_provider(&clkspec);
|
||||||
|
of_node_put(clkspec.np);
|
||||||
if (IS_ERR(clk)) {
|
if (IS_ERR(clk)) {
|
||||||
if (PTR_ERR(clk) != -EPROBE_DEFER)
|
if (PTR_ERR(clk) != -EPROBE_DEFER)
|
||||||
pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
|
pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
|
||||||
@@ -93,10 +98,13 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
|
|||||||
else
|
else
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
if (clkspec.np == node && !clk_supplier)
|
if (clkspec.np == node && !clk_supplier) {
|
||||||
|
of_node_put(clkspec.np);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
clk = of_clk_get_from_provider(&clkspec);
|
clk = of_clk_get_from_provider(&clkspec);
|
||||||
|
of_node_put(clkspec.np);
|
||||||
if (IS_ERR(clk)) {
|
if (IS_ERR(clk)) {
|
||||||
if (PTR_ERR(clk) != -EPROBE_DEFER)
|
if (PTR_ERR(clk) != -EPROBE_DEFER)
|
||||||
pr_warn("clk: couldn't get clock %d for %pOF\n",
|
pr_warn("clk: couldn't get clock %d for %pOF\n",
|
||||||
|
|||||||
@@ -258,21 +258,25 @@ int __init davinci_timer_register(struct clk *clk,
|
|||||||
resource_size(&timer_cfg->reg),
|
resource_size(&timer_cfg->reg),
|
||||||
"davinci-timer")) {
|
"davinci-timer")) {
|
||||||
pr_err("Unable to request memory region\n");
|
pr_err("Unable to request memory region\n");
|
||||||
return -EBUSY;
|
rv = -EBUSY;
|
||||||
|
goto exit_clk_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
|
base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
|
||||||
if (!base) {
|
if (!base) {
|
||||||
pr_err("Unable to map the register range\n");
|
pr_err("Unable to map the register range\n");
|
||||||
return -ENOMEM;
|
rv = -ENOMEM;
|
||||||
|
goto exit_mem_region;
|
||||||
}
|
}
|
||||||
|
|
||||||
davinci_timer_init(base);
|
davinci_timer_init(base);
|
||||||
tick_rate = clk_get_rate(clk);
|
tick_rate = clk_get_rate(clk);
|
||||||
|
|
||||||
clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
|
clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
|
||||||
if (!clockevent)
|
if (!clockevent) {
|
||||||
return -ENOMEM;
|
rv = -ENOMEM;
|
||||||
|
goto exit_iounmap_base;
|
||||||
|
}
|
||||||
|
|
||||||
clockevent->dev.name = "tim12";
|
clockevent->dev.name = "tim12";
|
||||||
clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
|
clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
|
||||||
@@ -297,7 +301,7 @@ int __init davinci_timer_register(struct clk *clk,
|
|||||||
"clockevent/tim12", clockevent);
|
"clockevent/tim12", clockevent);
|
||||||
if (rv) {
|
if (rv) {
|
||||||
pr_err("Unable to request the clockevent interrupt\n");
|
pr_err("Unable to request the clockevent interrupt\n");
|
||||||
return rv;
|
goto exit_free_clockevent;
|
||||||
}
|
}
|
||||||
|
|
||||||
davinci_clocksource.dev.rating = 300;
|
davinci_clocksource.dev.rating = 300;
|
||||||
@@ -324,13 +328,27 @@ int __init davinci_timer_register(struct clk *clk,
|
|||||||
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
|
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
|
||||||
if (rv) {
|
if (rv) {
|
||||||
pr_err("Unable to register clocksource\n");
|
pr_err("Unable to register clocksource\n");
|
||||||
return rv;
|
goto exit_free_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
sched_clock_register(davinci_timer_read_sched_clock,
|
sched_clock_register(davinci_timer_read_sched_clock,
|
||||||
DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
|
DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
exit_free_irq:
|
||||||
|
free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
|
||||||
|
clockevent);
|
||||||
|
exit_free_clockevent:
|
||||||
|
kfree(clockevent);
|
||||||
|
exit_iounmap_base:
|
||||||
|
iounmap(base);
|
||||||
|
exit_mem_region:
|
||||||
|
release_mem_region(timer_cfg->reg.start,
|
||||||
|
resource_size(&timer_cfg->reg));
|
||||||
|
exit_clk_disable:
|
||||||
|
clk_disable_unprepare(clk);
|
||||||
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init of_davinci_timer_register(struct device_node *np)
|
static int __init of_davinci_timer_register(struct device_node *np)
|
||||||
|
|||||||
@@ -212,6 +212,7 @@ struct at_xdmac {
|
|||||||
int irq;
|
int irq;
|
||||||
struct clk *clk;
|
struct clk *clk;
|
||||||
u32 save_gim;
|
u32 save_gim;
|
||||||
|
u32 save_gs;
|
||||||
struct dma_pool *at_xdmac_desc_pool;
|
struct dma_pool *at_xdmac_desc_pool;
|
||||||
struct at_xdmac_chan chan[];
|
struct at_xdmac_chan chan[];
|
||||||
};
|
};
|
||||||
@@ -1910,6 +1911,7 @@ static int atmel_xdmac_suspend(struct device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
|
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
|
||||||
|
atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
|
||||||
|
|
||||||
at_xdmac_off(atxdmac);
|
at_xdmac_off(atxdmac);
|
||||||
clk_disable_unprepare(atxdmac->clk);
|
clk_disable_unprepare(atxdmac->clk);
|
||||||
@@ -1946,6 +1948,7 @@ static int atmel_xdmac_resume(struct device *dev)
|
|||||||
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
|
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
|
||||||
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
|
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
|
||||||
wmb();
|
wmb();
|
||||||
|
if (atxdmac->save_gs & atchan->mask)
|
||||||
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
|
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -166,7 +166,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
|
|||||||
dw_edma_free_desc(vd2dw_edma_desc(vdesc));
|
dw_edma_free_desc(vd2dw_edma_desc(vdesc));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dw_edma_start_transfer(struct dw_edma_chan *chan)
|
static int dw_edma_start_transfer(struct dw_edma_chan *chan)
|
||||||
{
|
{
|
||||||
struct dw_edma_chunk *child;
|
struct dw_edma_chunk *child;
|
||||||
struct dw_edma_desc *desc;
|
struct dw_edma_desc *desc;
|
||||||
@@ -174,16 +174,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
|
|||||||
|
|
||||||
vd = vchan_next_desc(&chan->vc);
|
vd = vchan_next_desc(&chan->vc);
|
||||||
if (!vd)
|
if (!vd)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
desc = vd2dw_edma_desc(vd);
|
desc = vd2dw_edma_desc(vd);
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
child = list_first_entry_or_null(&desc->chunk->list,
|
child = list_first_entry_or_null(&desc->chunk->list,
|
||||||
struct dw_edma_chunk, list);
|
struct dw_edma_chunk, list);
|
||||||
if (!child)
|
if (!child)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
dw_edma_v0_core_start(child, !desc->xfer_sz);
|
dw_edma_v0_core_start(child, !desc->xfer_sz);
|
||||||
desc->xfer_sz += child->ll_region.sz;
|
desc->xfer_sz += child->ll_region.sz;
|
||||||
@@ -191,6 +191,8 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
|
|||||||
list_del(&child->list);
|
list_del(&child->list);
|
||||||
kfree(child);
|
kfree(child);
|
||||||
desc->chunks_alloc--;
|
desc->chunks_alloc--;
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dw_edma_device_config(struct dma_chan *dchan,
|
static int dw_edma_device_config(struct dma_chan *dchan,
|
||||||
@@ -274,9 +276,12 @@ static void dw_edma_device_issue_pending(struct dma_chan *dchan)
|
|||||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (!chan->configured)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||||
if (chan->configured && chan->request == EDMA_REQ_NONE &&
|
if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
|
||||||
chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
|
chan->status == EDMA_ST_IDLE) {
|
||||||
chan->status = EDMA_ST_BUSY;
|
chan->status = EDMA_ST_BUSY;
|
||||||
dw_edma_start_transfer(chan);
|
dw_edma_start_transfer(chan);
|
||||||
}
|
}
|
||||||
@@ -497,14 +502,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
|
|||||||
switch (chan->request) {
|
switch (chan->request) {
|
||||||
case EDMA_REQ_NONE:
|
case EDMA_REQ_NONE:
|
||||||
desc = vd2dw_edma_desc(vd);
|
desc = vd2dw_edma_desc(vd);
|
||||||
if (desc->chunks_alloc) {
|
if (!desc->chunks_alloc) {
|
||||||
chan->status = EDMA_ST_BUSY;
|
|
||||||
dw_edma_start_transfer(chan);
|
|
||||||
} else {
|
|
||||||
list_del(&vd->node);
|
list_del(&vd->node);
|
||||||
vchan_cookie_complete(vd);
|
vchan_cookie_complete(vd);
|
||||||
chan->status = EDMA_ST_IDLE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Continue transferring if there are remaining chunks or issued requests.
|
||||||
|
*/
|
||||||
|
chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case EDMA_REQ_STOP:
|
case EDMA_REQ_STOP:
|
||||||
|
|||||||
@@ -756,7 +756,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
|
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||||
if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
|
if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
|
||||||
ret = EPROBE_DEFER;
|
ret = -EPROBE_DEFER;
|
||||||
goto disable_reg_clk;
|
goto disable_reg_clk;
|
||||||
}
|
}
|
||||||
if (!IS_ERR(xor_dev->clk)) {
|
if (!IS_ERR(xor_dev->clk)) {
|
||||||
|
|||||||
@@ -243,6 +243,13 @@ void rpi_firmware_put(struct rpi_firmware *fw)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpi_firmware_put);
|
EXPORT_SYMBOL_GPL(rpi_firmware_put);
|
||||||
|
|
||||||
|
static void devm_rpi_firmware_put(void *data)
|
||||||
|
{
|
||||||
|
struct rpi_firmware *fw = data;
|
||||||
|
|
||||||
|
rpi_firmware_put(fw);
|
||||||
|
}
|
||||||
|
|
||||||
static int rpi_firmware_probe(struct platform_device *pdev)
|
static int rpi_firmware_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
@@ -338,6 +345,28 @@ err_put_device:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpi_firmware_get);
|
EXPORT_SYMBOL_GPL(rpi_firmware_get);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* devm_rpi_firmware_get - Get pointer to rpi_firmware structure.
|
||||||
|
* @firmware_node: Pointer to the firmware Device Tree node.
|
||||||
|
*
|
||||||
|
* Returns NULL is the firmware device is not ready.
|
||||||
|
*/
|
||||||
|
struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
|
||||||
|
struct device_node *firmware_node)
|
||||||
|
{
|
||||||
|
struct rpi_firmware *fw;
|
||||||
|
|
||||||
|
fw = rpi_firmware_get(firmware_node);
|
||||||
|
if (!fw)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (devm_add_action_or_reset(dev, devm_rpi_firmware_put, fw))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return fw;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(devm_rpi_firmware_get);
|
||||||
|
|
||||||
static const struct of_device_id rpi_firmware_of_match[] = {
|
static const struct of_device_id rpi_firmware_of_match[] = {
|
||||||
{ .compatible = "raspberrypi,bcm2835-firmware", },
|
{ .compatible = "raspberrypi,bcm2835-firmware", },
|
||||||
{},
|
{},
|
||||||
|
|||||||
@@ -2924,6 +2924,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
|||||||
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
|
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
trace_icm_send_rej(&cm_id_priv->id, reason);
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case IB_CM_REQ_SENT:
|
case IB_CM_REQ_SENT:
|
||||||
case IB_CM_MRA_REQ_RCVD:
|
case IB_CM_MRA_REQ_RCVD:
|
||||||
@@ -2954,7 +2956,6 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_icm_send_rej(&cm_id_priv->id, reason);
|
|
||||||
ret = ib_post_send_mad(msg, NULL);
|
ret = ib_post_send_mad(msg, NULL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
cm_free_msg(msg);
|
cm_free_msg(msg);
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
#include "verbs.h"
|
#include "verbs.h"
|
||||||
#include "trace_ibhdrs.h"
|
#include "trace_ibhdrs.h"
|
||||||
#include "ipoib.h"
|
#include "ipoib.h"
|
||||||
|
#include "trace_tx.h"
|
||||||
|
|
||||||
/* Add a convenience helper */
|
/* Add a convenience helper */
|
||||||
#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
|
#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
|
||||||
@@ -63,12 +64,14 @@ static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
|
|||||||
|
|
||||||
static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
|
static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
|
||||||
{
|
{
|
||||||
|
trace_hfi1_txq_stop(txq);
|
||||||
if (atomic_inc_return(&txq->stops) == 1)
|
if (atomic_inc_return(&txq->stops) == 1)
|
||||||
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
|
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
|
static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
|
||||||
{
|
{
|
||||||
|
trace_hfi1_txq_wake(txq);
|
||||||
if (atomic_dec_and_test(&txq->stops))
|
if (atomic_dec_and_test(&txq->stops))
|
||||||
netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
|
netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
|
||||||
}
|
}
|
||||||
@@ -89,9 +92,11 @@ static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
|
|||||||
{
|
{
|
||||||
++txq->sent_txreqs;
|
++txq->sent_txreqs;
|
||||||
if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
|
if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
|
||||||
!atomic_xchg(&txq->ring_full, 1))
|
!atomic_xchg(&txq->ring_full, 1)) {
|
||||||
|
trace_hfi1_txq_full(txq);
|
||||||
hfi1_ipoib_stop_txq(txq);
|
hfi1_ipoib_stop_txq(txq);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
|
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
|
||||||
{
|
{
|
||||||
@@ -112,9 +117,11 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
|
|||||||
* to protect against ring overflow.
|
* to protect against ring overflow.
|
||||||
*/
|
*/
|
||||||
if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
|
if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
|
||||||
atomic_xchg(&txq->ring_full, 0))
|
atomic_xchg(&txq->ring_full, 0)) {
|
||||||
|
trace_hfi1_txq_xmit_unstopped(txq);
|
||||||
hfi1_ipoib_wake_txq(txq);
|
hfi1_ipoib_wake_txq(txq);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
|
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
|
||||||
{
|
{
|
||||||
@@ -244,6 +251,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
|
|||||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||||
|
|
||||||
ret = sdma_txadd_page(dd,
|
ret = sdma_txadd_page(dd,
|
||||||
|
NULL,
|
||||||
txreq,
|
txreq,
|
||||||
skb_frag_page(frag),
|
skb_frag_page(frag),
|
||||||
frag->bv_offset,
|
frag->bv_offset,
|
||||||
@@ -405,6 +413,7 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
|
|||||||
sdma_select_engine_sc(priv->dd,
|
sdma_select_engine_sc(priv->dd,
|
||||||
txp->flow.tx_queue,
|
txp->flow.tx_queue,
|
||||||
txp->flow.sc5);
|
txp->flow.sc5);
|
||||||
|
trace_hfi1_flow_switch(txp->txq);
|
||||||
}
|
}
|
||||||
|
|
||||||
return tx;
|
return tx;
|
||||||
@@ -525,6 +534,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
|
|||||||
if (txq->flow.as_int != txp->flow.as_int) {
|
if (txq->flow.as_int != txp->flow.as_int) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
trace_hfi1_flow_flush(txq);
|
||||||
ret = hfi1_ipoib_flush_tx_list(dev, txq);
|
ret = hfi1_ipoib_flush_tx_list(dev, txq);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
if (ret == -EBUSY)
|
if (ret == -EBUSY)
|
||||||
@@ -635,8 +645,10 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
|
|||||||
/* came from non-list submit */
|
/* came from non-list submit */
|
||||||
list_add_tail(&txreq->list, &txq->tx_list);
|
list_add_tail(&txreq->list, &txq->tx_list);
|
||||||
if (list_empty(&txq->wait.list)) {
|
if (list_empty(&txq->wait.list)) {
|
||||||
if (!atomic_xchg(&txq->no_desc, 1))
|
if (!atomic_xchg(&txq->no_desc, 1)) {
|
||||||
|
trace_hfi1_txq_queued(txq);
|
||||||
hfi1_ipoib_stop_txq(txq);
|
hfi1_ipoib_stop_txq(txq);
|
||||||
|
}
|
||||||
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
|
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -659,6 +671,7 @@ static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
|
|||||||
struct hfi1_ipoib_txq *txq =
|
struct hfi1_ipoib_txq *txq =
|
||||||
container_of(wait, struct hfi1_ipoib_txq, wait);
|
container_of(wait, struct hfi1_ipoib_txq, wait);
|
||||||
|
|
||||||
|
trace_hfi1_txq_wakeup(txq);
|
||||||
if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
|
if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
|
||||||
iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
|
iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -167,11 +167,11 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
|||||||
spin_lock_irqsave(&handler->lock, flags);
|
spin_lock_irqsave(&handler->lock, flags);
|
||||||
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
|
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
|
||||||
if (node) {
|
if (node) {
|
||||||
ret = -EINVAL;
|
ret = -EEXIST;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
__mmu_int_rb_insert(mnode, &handler->root);
|
__mmu_int_rb_insert(mnode, &handler->root);
|
||||||
list_add(&mnode->list, &handler->lru_list);
|
list_add_tail(&mnode->list, &handler->lru_list);
|
||||||
|
|
||||||
ret = handler->ops->insert(handler->ops_arg, mnode);
|
ret = handler->ops->insert(handler->ops_arg, mnode);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@@ -184,6 +184,19 @@ unlock:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Caller must hold handler lock */
|
||||||
|
struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
|
||||||
|
unsigned long addr, unsigned long len)
|
||||||
|
{
|
||||||
|
struct mmu_rb_node *node;
|
||||||
|
|
||||||
|
trace_hfi1_mmu_rb_search(addr, len);
|
||||||
|
node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
|
||||||
|
if (node)
|
||||||
|
list_move_tail(&node->list, &handler->lru_list);
|
||||||
|
return node;
|
||||||
|
}
|
||||||
|
|
||||||
/* Caller must hold handler lock */
|
/* Caller must hold handler lock */
|
||||||
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
|
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
|
||||||
unsigned long addr,
|
unsigned long addr,
|
||||||
@@ -208,32 +221,6 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
|
|||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
|
|
||||||
unsigned long addr, unsigned long len,
|
|
||||||
struct mmu_rb_node **rb_node)
|
|
||||||
{
|
|
||||||
struct mmu_rb_node *node;
|
|
||||||
unsigned long flags;
|
|
||||||
bool ret = false;
|
|
||||||
|
|
||||||
if (current->mm != handler->mn.mm)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&handler->lock, flags);
|
|
||||||
node = __mmu_rb_search(handler, addr, len);
|
|
||||||
if (node) {
|
|
||||||
if (node->addr == addr && node->len == len)
|
|
||||||
goto unlock;
|
|
||||||
__mmu_int_rb_remove(node, &handler->root);
|
|
||||||
list_del(&node->list); /* remove from LRU list */
|
|
||||||
ret = true;
|
|
||||||
}
|
|
||||||
unlock:
|
|
||||||
spin_unlock_irqrestore(&handler->lock, flags);
|
|
||||||
*rb_node = node;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
||||||
{
|
{
|
||||||
struct mmu_rb_node *rbnode, *ptr;
|
struct mmu_rb_node *rbnode, *ptr;
|
||||||
@@ -247,8 +234,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
|||||||
INIT_LIST_HEAD(&del_list);
|
INIT_LIST_HEAD(&del_list);
|
||||||
|
|
||||||
spin_lock_irqsave(&handler->lock, flags);
|
spin_lock_irqsave(&handler->lock, flags);
|
||||||
list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
|
list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
|
||||||
list) {
|
|
||||||
if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
|
if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
|
||||||
&stop)) {
|
&stop)) {
|
||||||
__mmu_int_rb_remove(rbnode, &handler->root);
|
__mmu_int_rb_remove(rbnode, &handler->root);
|
||||||
@@ -260,36 +246,11 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
|||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&handler->lock, flags);
|
spin_unlock_irqrestore(&handler->lock, flags);
|
||||||
|
|
||||||
while (!list_empty(&del_list)) {
|
list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
|
||||||
rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
|
|
||||||
list_del(&rbnode->list);
|
|
||||||
handler->ops->remove(handler->ops_arg, rbnode);
|
handler->ops->remove(handler->ops_arg, rbnode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* It is up to the caller to ensure that this function does not race with the
|
|
||||||
* mmu invalidate notifier which may be calling the users remove callback on
|
|
||||||
* 'node'.
|
|
||||||
*/
|
|
||||||
void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
|
|
||||||
struct mmu_rb_node *node)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (current->mm != handler->mn.mm)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Validity of handler and node pointers has been checked by caller. */
|
|
||||||
trace_hfi1_mmu_rb_remove(node->addr, node->len);
|
|
||||||
spin_lock_irqsave(&handler->lock, flags);
|
|
||||||
__mmu_int_rb_remove(node, &handler->root);
|
|
||||||
list_del(&node->list); /* remove from LRU list */
|
|
||||||
spin_unlock_irqrestore(&handler->lock, flags);
|
|
||||||
|
|
||||||
handler->ops->remove(handler->ops_arg, node);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmu_notifier_range_start(struct mmu_notifier *mn,
|
static int mmu_notifier_range_start(struct mmu_notifier *mn,
|
||||||
const struct mmu_notifier_range *range)
|
const struct mmu_notifier_range *range)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -93,10 +93,8 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
|
|||||||
int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
||||||
struct mmu_rb_node *mnode);
|
struct mmu_rb_node *mnode);
|
||||||
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
|
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
|
||||||
void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
|
struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
|
||||||
struct mmu_rb_node *mnode);
|
unsigned long addr,
|
||||||
bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
|
unsigned long len);
|
||||||
unsigned long addr, unsigned long len,
|
|
||||||
struct mmu_rb_node **rb_node);
|
|
||||||
|
|
||||||
#endif /* _HFI1_MMU_RB_H */
|
#endif /* _HFI1_MMU_RB_H */
|
||||||
|
|||||||
@@ -1635,22 +1635,7 @@ static inline void sdma_unmap_desc(
|
|||||||
struct hfi1_devdata *dd,
|
struct hfi1_devdata *dd,
|
||||||
struct sdma_desc *descp)
|
struct sdma_desc *descp)
|
||||||
{
|
{
|
||||||
switch (sdma_mapping_type(descp)) {
|
system_descriptor_complete(dd, descp);
|
||||||
case SDMA_MAP_SINGLE:
|
|
||||||
dma_unmap_single(
|
|
||||||
&dd->pcidev->dev,
|
|
||||||
sdma_mapping_addr(descp),
|
|
||||||
sdma_mapping_len(descp),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
break;
|
|
||||||
case SDMA_MAP_PAGE:
|
|
||||||
dma_unmap_page(
|
|
||||||
&dd->pcidev->dev,
|
|
||||||
sdma_mapping_addr(descp),
|
|
||||||
sdma_mapping_len(descp),
|
|
||||||
DMA_TO_DEVICE);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -3170,7 +3155,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
|
|||||||
|
|
||||||
/* Add descriptor for coalesce buffer */
|
/* Add descriptor for coalesce buffer */
|
||||||
tx->desc_limit = MAX_DESC;
|
tx->desc_limit = MAX_DESC;
|
||||||
return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
|
return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx,
|
||||||
addr, tx->tlen);
|
addr, tx->tlen);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3210,10 +3195,12 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
|||||||
return rval;
|
return rval;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* finish the one just added */
|
/* finish the one just added */
|
||||||
make_tx_sdma_desc(
|
make_tx_sdma_desc(
|
||||||
tx,
|
tx,
|
||||||
SDMA_MAP_NONE,
|
SDMA_MAP_NONE,
|
||||||
|
NULL,
|
||||||
dd->sdma_pad_phys,
|
dd->sdma_pad_phys,
|
||||||
sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
|
sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
|
||||||
_sdma_close_tx(dd, tx);
|
_sdma_close_tx(dd, tx);
|
||||||
|
|||||||
@@ -635,6 +635,7 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
|
|||||||
static inline void make_tx_sdma_desc(
|
static inline void make_tx_sdma_desc(
|
||||||
struct sdma_txreq *tx,
|
struct sdma_txreq *tx,
|
||||||
int type,
|
int type,
|
||||||
|
void *pinning_ctx,
|
||||||
dma_addr_t addr,
|
dma_addr_t addr,
|
||||||
size_t len)
|
size_t len)
|
||||||
{
|
{
|
||||||
@@ -653,6 +654,7 @@ static inline void make_tx_sdma_desc(
|
|||||||
<< SDMA_DESC0_PHY_ADDR_SHIFT) |
|
<< SDMA_DESC0_PHY_ADDR_SHIFT) |
|
||||||
(((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
|
(((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
|
||||||
<< SDMA_DESC0_BYTE_COUNT_SHIFT);
|
<< SDMA_DESC0_BYTE_COUNT_SHIFT);
|
||||||
|
desc->pinning_ctx = pinning_ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* helper to extend txreq */
|
/* helper to extend txreq */
|
||||||
@@ -685,6 +687,7 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
|
|||||||
static inline int _sdma_txadd_daddr(
|
static inline int _sdma_txadd_daddr(
|
||||||
struct hfi1_devdata *dd,
|
struct hfi1_devdata *dd,
|
||||||
int type,
|
int type,
|
||||||
|
void *pinning_ctx,
|
||||||
struct sdma_txreq *tx,
|
struct sdma_txreq *tx,
|
||||||
dma_addr_t addr,
|
dma_addr_t addr,
|
||||||
u16 len)
|
u16 len)
|
||||||
@@ -694,6 +697,7 @@ static inline int _sdma_txadd_daddr(
|
|||||||
make_tx_sdma_desc(
|
make_tx_sdma_desc(
|
||||||
tx,
|
tx,
|
||||||
type,
|
type,
|
||||||
|
pinning_ctx,
|
||||||
addr, len);
|
addr, len);
|
||||||
WARN_ON(len > tx->tlen);
|
WARN_ON(len > tx->tlen);
|
||||||
tx->tlen -= len;
|
tx->tlen -= len;
|
||||||
@@ -714,6 +718,7 @@ static inline int _sdma_txadd_daddr(
|
|||||||
/**
|
/**
|
||||||
* sdma_txadd_page() - add a page to the sdma_txreq
|
* sdma_txadd_page() - add a page to the sdma_txreq
|
||||||
* @dd: the device to use for mapping
|
* @dd: the device to use for mapping
|
||||||
|
* @pinning_ctx: context to be released at descriptor retirement
|
||||||
* @tx: tx request to which the page is added
|
* @tx: tx request to which the page is added
|
||||||
* @page: page to map
|
* @page: page to map
|
||||||
* @offset: offset within the page
|
* @offset: offset within the page
|
||||||
@@ -729,6 +734,7 @@ static inline int _sdma_txadd_daddr(
|
|||||||
*/
|
*/
|
||||||
static inline int sdma_txadd_page(
|
static inline int sdma_txadd_page(
|
||||||
struct hfi1_devdata *dd,
|
struct hfi1_devdata *dd,
|
||||||
|
void *pinning_ctx,
|
||||||
struct sdma_txreq *tx,
|
struct sdma_txreq *tx,
|
||||||
struct page *page,
|
struct page *page,
|
||||||
unsigned long offset,
|
unsigned long offset,
|
||||||
@@ -756,8 +762,7 @@ static inline int sdma_txadd_page(
|
|||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
return _sdma_txadd_daddr(
|
return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, pinning_ctx, tx, addr, len);
|
||||||
dd, SDMA_MAP_PAGE, tx, addr, len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -791,7 +796,8 @@ static inline int sdma_txadd_daddr(
|
|||||||
return rval;
|
return rval;
|
||||||
}
|
}
|
||||||
|
|
||||||
return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
|
return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, NULL, tx,
|
||||||
|
addr, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -837,8 +843,7 @@ static inline int sdma_txadd_kvaddr(
|
|||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
return _sdma_txadd_daddr(
|
return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, len);
|
||||||
dd, SDMA_MAP_SINGLE, tx, addr, len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct iowait_work;
|
struct iowait_work;
|
||||||
@@ -1090,4 +1095,5 @@ extern uint mod_num_sdma;
|
|||||||
|
|
||||||
void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
|
void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
|
||||||
|
|
||||||
|
void system_descriptor_complete(struct hfi1_devdata *dd, struct sdma_desc *descp);
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -61,6 +61,7 @@
|
|||||||
struct sdma_desc {
|
struct sdma_desc {
|
||||||
/* private: don't use directly */
|
/* private: don't use directly */
|
||||||
u64 qw[2];
|
u64 qw[2];
|
||||||
|
void *pinning_ctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -78,10 +78,6 @@ DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_search,
|
|||||||
TP_PROTO(unsigned long addr, unsigned long len),
|
TP_PROTO(unsigned long addr, unsigned long len),
|
||||||
TP_ARGS(addr, len));
|
TP_ARGS(addr, len));
|
||||||
|
|
||||||
DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_remove,
|
|
||||||
TP_PROTO(unsigned long addr, unsigned long len),
|
|
||||||
TP_ARGS(addr, len));
|
|
||||||
|
|
||||||
DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate,
|
DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate,
|
||||||
TP_PROTO(unsigned long addr, unsigned long len),
|
TP_PROTO(unsigned long addr, unsigned long len),
|
||||||
TP_ARGS(addr, len));
|
TP_ARGS(addr, len));
|
||||||
|
|||||||
@@ -53,6 +53,8 @@
|
|||||||
#include "hfi.h"
|
#include "hfi.h"
|
||||||
#include "mad.h"
|
#include "mad.h"
|
||||||
#include "sdma.h"
|
#include "sdma.h"
|
||||||
|
#include "ipoib.h"
|
||||||
|
#include "user_sdma.h"
|
||||||
|
|
||||||
const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
|
const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
|
||||||
|
|
||||||
@@ -653,6 +655,80 @@ TRACE_EVENT(hfi1_sdma_user_completion,
|
|||||||
__entry->code)
|
__entry->code)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT(hfi1_usdma_defer,
|
||||||
|
TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
|
||||||
|
struct sdma_engine *sde,
|
||||||
|
struct iowait *wait),
|
||||||
|
TP_ARGS(pq, sde, wait),
|
||||||
|
TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
|
||||||
|
__field(struct hfi1_user_sdma_pkt_q *, pq)
|
||||||
|
__field(struct sdma_engine *, sde)
|
||||||
|
__field(struct iowait *, wait)
|
||||||
|
__field(int, engine)
|
||||||
|
__field(int, empty)
|
||||||
|
),
|
||||||
|
TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
|
||||||
|
__entry->pq = pq;
|
||||||
|
__entry->sde = sde;
|
||||||
|
__entry->wait = wait;
|
||||||
|
__entry->engine = sde->this_idx;
|
||||||
|
__entry->empty = list_empty(&__entry->wait->list);
|
||||||
|
),
|
||||||
|
TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
|
||||||
|
__get_str(dev),
|
||||||
|
(unsigned long long)__entry->pq,
|
||||||
|
(unsigned long long)__entry->sde,
|
||||||
|
(unsigned long long)__entry->wait,
|
||||||
|
__entry->engine,
|
||||||
|
__entry->empty
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT(hfi1_usdma_activate,
|
||||||
|
TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
|
||||||
|
struct iowait *wait,
|
||||||
|
int reason),
|
||||||
|
TP_ARGS(pq, wait, reason),
|
||||||
|
TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
|
||||||
|
__field(struct hfi1_user_sdma_pkt_q *, pq)
|
||||||
|
__field(struct iowait *, wait)
|
||||||
|
__field(int, reason)
|
||||||
|
),
|
||||||
|
TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
|
||||||
|
__entry->pq = pq;
|
||||||
|
__entry->wait = wait;
|
||||||
|
__entry->reason = reason;
|
||||||
|
),
|
||||||
|
TP_printk("[%s] pq %llx wait %llx reason %d",
|
||||||
|
__get_str(dev),
|
||||||
|
(unsigned long long)__entry->pq,
|
||||||
|
(unsigned long long)__entry->wait,
|
||||||
|
__entry->reason
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT(hfi1_usdma_we,
|
||||||
|
TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
|
||||||
|
int we_ret),
|
||||||
|
TP_ARGS(pq, we_ret),
|
||||||
|
TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
|
||||||
|
__field(struct hfi1_user_sdma_pkt_q *, pq)
|
||||||
|
__field(int, state)
|
||||||
|
__field(int, we_ret)
|
||||||
|
),
|
||||||
|
TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
|
||||||
|
__entry->pq = pq;
|
||||||
|
__entry->state = pq->state;
|
||||||
|
__entry->we_ret = we_ret;
|
||||||
|
),
|
||||||
|
TP_printk("[%s] pq %llx state %d we_ret %d",
|
||||||
|
__get_str(dev),
|
||||||
|
(unsigned long long)__entry->pq,
|
||||||
|
__entry->state,
|
||||||
|
__entry->we_ret
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
const char *print_u32_array(struct trace_seq *, u32 *, int);
|
const char *print_u32_array(struct trace_seq *, u32 *, int);
|
||||||
#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
|
#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
|
||||||
|
|
||||||
@@ -858,6 +934,109 @@ DEFINE_EVENT(
|
|||||||
TP_ARGS(qp, flag)
|
TP_ARGS(qp, flag)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
DECLARE_EVENT_CLASS(/* AIP */
|
||||||
|
hfi1_ipoib_txq_template,
|
||||||
|
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||||
|
TP_ARGS(txq),
|
||||||
|
TP_STRUCT__entry(/* entry */
|
||||||
|
DD_DEV_ENTRY(txq->priv->dd)
|
||||||
|
__field(struct hfi1_ipoib_txq *, txq)
|
||||||
|
__field(struct sdma_engine *, sde)
|
||||||
|
__field(ulong, head)
|
||||||
|
__field(ulong, tail)
|
||||||
|
__field(uint, used)
|
||||||
|
__field(uint, flow)
|
||||||
|
__field(int, stops)
|
||||||
|
__field(int, no_desc)
|
||||||
|
__field(u8, idx)
|
||||||
|
__field(u8, stopped)
|
||||||
|
),
|
||||||
|
TP_fast_assign(/* assign */
|
||||||
|
DD_DEV_ASSIGN(txq->priv->dd)
|
||||||
|
__entry->txq = txq;
|
||||||
|
__entry->sde = txq->sde;
|
||||||
|
__entry->head = txq->tx_ring.head;
|
||||||
|
__entry->tail = txq->tx_ring.tail;
|
||||||
|
__entry->idx = txq->q_idx;
|
||||||
|
__entry->used =
|
||||||
|
txq->sent_txreqs -
|
||||||
|
atomic64_read(&txq->complete_txreqs);
|
||||||
|
__entry->flow = txq->flow.as_int;
|
||||||
|
__entry->stops = atomic_read(&txq->stops);
|
||||||
|
__entry->no_desc = atomic_read(&txq->no_desc);
|
||||||
|
__entry->stopped =
|
||||||
|
__netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
|
||||||
|
),
|
||||||
|
TP_printk(/* print */
|
||||||
|
"[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
|
||||||
|
__get_str(dev),
|
||||||
|
(unsigned long long)__entry->txq,
|
||||||
|
__entry->idx,
|
||||||
|
(unsigned long long)__entry->sde,
|
||||||
|
__entry->head,
|
||||||
|
__entry->tail,
|
||||||
|
__entry->flow,
|
||||||
|
__entry->used,
|
||||||
|
__entry->stops,
|
||||||
|
__entry->no_desc,
|
||||||
|
__entry->stopped
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(/* queue stop */
|
||||||
|
hfi1_ipoib_txq_template, hfi1_txq_stop,
|
||||||
|
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||||
|
TP_ARGS(txq)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(/* queue wake */
|
||||||
|
hfi1_ipoib_txq_template, hfi1_txq_wake,
|
||||||
|
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||||
|
TP_ARGS(txq)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(/* flow flush */
|
||||||
|
hfi1_ipoib_txq_template, hfi1_flow_flush,
|
||||||
|
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||||
|
TP_ARGS(txq)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(/* flow switch */
|
||||||
|
hfi1_ipoib_txq_template, hfi1_flow_switch,
|
||||||
|
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||||
|
TP_ARGS(txq)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(/* wakeup */
|
||||||
|
hfi1_ipoib_txq_template, hfi1_txq_wakeup,
|
||||||
|
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||||
|
TP_ARGS(txq)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(/* full */
|
||||||
|
hfi1_ipoib_txq_template, hfi1_txq_full,
|
||||||
|
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||||
|
TP_ARGS(txq)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(/* queued */
|
||||||
|
hfi1_ipoib_txq_template, hfi1_txq_queued,
|
||||||
|
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||||
|
TP_ARGS(txq)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(/* xmit_stopped */
|
||||||
|
hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
|
||||||
|
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||||
|
TP_ARGS(txq)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(/* xmit_unstopped */
|
||||||
|
hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
|
||||||
|
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||||
|
TP_ARGS(txq)
|
||||||
|
);
|
||||||
|
|
||||||
#endif /* __HFI1_TRACE_TX_H */
|
#endif /* __HFI1_TRACE_TX_H */
|
||||||
|
|
||||||
#undef TRACE_INCLUDE_PATH
|
#undef TRACE_INCLUDE_PATH
|
||||||
|
|||||||
@@ -65,7 +65,6 @@
|
|||||||
|
|
||||||
#include "hfi.h"
|
#include "hfi.h"
|
||||||
#include "sdma.h"
|
#include "sdma.h"
|
||||||
#include "mmu_rb.h"
|
|
||||||
#include "user_sdma.h"
|
#include "user_sdma.h"
|
||||||
#include "verbs.h" /* for the headers */
|
#include "verbs.h" /* for the headers */
|
||||||
#include "common.h" /* for struct hfi1_tid_info */
|
#include "common.h" /* for struct hfi1_tid_info */
|
||||||
@@ -80,11 +79,7 @@ static unsigned initial_pkt_count = 8;
|
|||||||
static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
|
static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
|
||||||
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
|
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
|
||||||
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
|
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
|
||||||
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
|
static void user_sdma_free_request(struct user_sdma_request *req);
|
||||||
static int pin_vector_pages(struct user_sdma_request *req,
|
|
||||||
struct user_sdma_iovec *iovec);
|
|
||||||
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
|
|
||||||
unsigned start, unsigned npages);
|
|
||||||
static int check_header_template(struct user_sdma_request *req,
|
static int check_header_template(struct user_sdma_request *req,
|
||||||
struct hfi1_pkt_header *hdr, u32 lrhlen,
|
struct hfi1_pkt_header *hdr, u32 lrhlen,
|
||||||
u32 datalen);
|
u32 datalen);
|
||||||
@@ -122,6 +117,11 @@ static struct mmu_rb_ops sdma_rb_ops = {
|
|||||||
.invalidate = sdma_rb_invalidate
|
.invalidate = sdma_rb_invalidate
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
|
||||||
|
struct user_sdma_txreq *tx,
|
||||||
|
struct user_sdma_iovec *iovec,
|
||||||
|
u32 *pkt_remaining);
|
||||||
|
|
||||||
static int defer_packet_queue(
|
static int defer_packet_queue(
|
||||||
struct sdma_engine *sde,
|
struct sdma_engine *sde,
|
||||||
struct iowait_work *wait,
|
struct iowait_work *wait,
|
||||||
@@ -133,6 +133,7 @@ static int defer_packet_queue(
|
|||||||
container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
|
container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
|
||||||
|
|
||||||
write_seqlock(&sde->waitlock);
|
write_seqlock(&sde->waitlock);
|
||||||
|
trace_hfi1_usdma_defer(pq, sde, &pq->busy);
|
||||||
if (sdma_progress(sde, seq, txreq))
|
if (sdma_progress(sde, seq, txreq))
|
||||||
goto eagain;
|
goto eagain;
|
||||||
/*
|
/*
|
||||||
@@ -157,7 +158,8 @@ static void activate_packet_queue(struct iowait *wait, int reason)
|
|||||||
{
|
{
|
||||||
struct hfi1_user_sdma_pkt_q *pq =
|
struct hfi1_user_sdma_pkt_q *pq =
|
||||||
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
|
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
|
||||||
pq->busy.lock = NULL;
|
|
||||||
|
trace_hfi1_usdma_activate(pq, wait, reason);
|
||||||
xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
|
xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
|
||||||
wake_up(&wait->wait_dma);
|
wake_up(&wait->wait_dma);
|
||||||
};
|
};
|
||||||
@@ -451,6 +453,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto free_req;
|
goto free_req;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy the header from the user buffer */
|
/* Copy the header from the user buffer */
|
||||||
ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
|
ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
|
||||||
sizeof(req->hdr));
|
sizeof(req->hdr));
|
||||||
@@ -525,9 +528,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|||||||
memcpy(&req->iovs[i].iov,
|
memcpy(&req->iovs[i].iov,
|
||||||
iovec + idx++,
|
iovec + idx++,
|
||||||
sizeof(req->iovs[i].iov));
|
sizeof(req->iovs[i].iov));
|
||||||
ret = pin_vector_pages(req, &req->iovs[i]);
|
if (req->iovs[i].iov.iov_len == 0) {
|
||||||
if (ret) {
|
ret = -EINVAL;
|
||||||
req->data_iovs = i;
|
|
||||||
goto free_req;
|
goto free_req;
|
||||||
}
|
}
|
||||||
req->data_len += req->iovs[i].iov.iov_len;
|
req->data_len += req->iovs[i].iov.iov_len;
|
||||||
@@ -599,13 +601,17 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|||||||
while (req->seqsubmitted != req->info.npkts) {
|
while (req->seqsubmitted != req->info.npkts) {
|
||||||
ret = user_sdma_send_pkts(req, pcount);
|
ret = user_sdma_send_pkts(req, pcount);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
int we_ret;
|
||||||
|
|
||||||
if (ret != -EBUSY)
|
if (ret != -EBUSY)
|
||||||
goto free_req;
|
goto free_req;
|
||||||
if (wait_event_interruptible_timeout(
|
we_ret = wait_event_interruptible_timeout(
|
||||||
pq->busy.wait_dma,
|
pq->busy.wait_dma,
|
||||||
pq->state == SDMA_PKT_Q_ACTIVE,
|
pq->state == SDMA_PKT_Q_ACTIVE,
|
||||||
msecs_to_jiffies(
|
msecs_to_jiffies(
|
||||||
SDMA_IOWAIT_TIMEOUT)) <= 0)
|
SDMA_IOWAIT_TIMEOUT));
|
||||||
|
trace_hfi1_usdma_we(pq, we_ret);
|
||||||
|
if (we_ret <= 0)
|
||||||
flush_pq_iowait(pq);
|
flush_pq_iowait(pq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -621,7 +627,7 @@ free_req:
|
|||||||
if (req->seqsubmitted)
|
if (req->seqsubmitted)
|
||||||
wait_event(pq->busy.wait_dma,
|
wait_event(pq->busy.wait_dma,
|
||||||
(req->seqcomp == req->seqsubmitted - 1));
|
(req->seqcomp == req->seqsubmitted - 1));
|
||||||
user_sdma_free_request(req, true);
|
user_sdma_free_request(req);
|
||||||
pq_update(pq);
|
pq_update(pq);
|
||||||
set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
|
set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
|
||||||
}
|
}
|
||||||
@@ -733,48 +739,6 @@ static int user_sdma_txadd_ahg(struct user_sdma_request *req,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int user_sdma_txadd(struct user_sdma_request *req,
|
|
||||||
struct user_sdma_txreq *tx,
|
|
||||||
struct user_sdma_iovec *iovec, u32 datalen,
|
|
||||||
u32 *queued_ptr, u32 *data_sent_ptr,
|
|
||||||
u64 *iov_offset_ptr)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
unsigned int pageidx, len;
|
|
||||||
unsigned long base, offset;
|
|
||||||
u64 iov_offset = *iov_offset_ptr;
|
|
||||||
u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
|
|
||||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
|
||||||
|
|
||||||
base = (unsigned long)iovec->iov.iov_base;
|
|
||||||
offset = offset_in_page(base + iovec->offset + iov_offset);
|
|
||||||
pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >>
|
|
||||||
PAGE_SHIFT);
|
|
||||||
len = offset + req->info.fragsize > PAGE_SIZE ?
|
|
||||||
PAGE_SIZE - offset : req->info.fragsize;
|
|
||||||
len = min((datalen - queued), len);
|
|
||||||
ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
|
|
||||||
offset, len);
|
|
||||||
if (ret) {
|
|
||||||
SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
iov_offset += len;
|
|
||||||
queued += len;
|
|
||||||
data_sent += len;
|
|
||||||
if (unlikely(queued < datalen && pageidx == iovec->npages &&
|
|
||||||
req->iov_idx < req->data_iovs - 1)) {
|
|
||||||
iovec->offset += iov_offset;
|
|
||||||
iovec = &req->iovs[++req->iov_idx];
|
|
||||||
iov_offset = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
*queued_ptr = queued;
|
|
||||||
*data_sent_ptr = data_sent;
|
|
||||||
*iov_offset_ptr = iov_offset;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
|
static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@@ -806,8 +770,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
|
|||||||
maxpkts = req->info.npkts - req->seqnum;
|
maxpkts = req->info.npkts - req->seqnum;
|
||||||
|
|
||||||
while (npkts < maxpkts) {
|
while (npkts < maxpkts) {
|
||||||
u32 datalen = 0, queued = 0, data_sent = 0;
|
u32 datalen = 0;
|
||||||
u64 iov_offset = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether any of the completions have come back
|
* Check whether any of the completions have come back
|
||||||
@@ -900,27 +863,17 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
|
|||||||
goto free_txreq;
|
goto free_txreq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If the request contains any data vectors, add up to
|
|
||||||
* fragsize bytes to the descriptor.
|
|
||||||
*/
|
|
||||||
while (queued < datalen &&
|
|
||||||
(req->sent + data_sent) < req->data_len) {
|
|
||||||
ret = user_sdma_txadd(req, tx, iovec, datalen,
|
|
||||||
&queued, &data_sent, &iov_offset);
|
|
||||||
if (ret)
|
|
||||||
goto free_txreq;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* The txreq was submitted successfully so we can update
|
|
||||||
* the counters.
|
|
||||||
*/
|
|
||||||
req->koffset += datalen;
|
req->koffset += datalen;
|
||||||
if (req_opcode(req->info.ctrl) == EXPECTED)
|
if (req_opcode(req->info.ctrl) == EXPECTED)
|
||||||
req->tidoffset += datalen;
|
req->tidoffset += datalen;
|
||||||
req->sent += data_sent;
|
req->sent += datalen;
|
||||||
if (req->data_len)
|
while (datalen) {
|
||||||
iovec->offset += iov_offset;
|
ret = add_system_pages_to_sdma_packet(req, tx, iovec,
|
||||||
|
&datalen);
|
||||||
|
if (ret)
|
||||||
|
goto free_txreq;
|
||||||
|
iovec = &req->iovs[req->iov_idx];
|
||||||
|
}
|
||||||
list_add_tail(&tx->txreq.list, &req->txps);
|
list_add_tail(&tx->txreq.list, &req->txps);
|
||||||
/*
|
/*
|
||||||
* It is important to increment this here as it is used to
|
* It is important to increment this here as it is used to
|
||||||
@@ -957,133 +910,14 @@ free_tx:
|
|||||||
static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
|
static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
|
||||||
{
|
{
|
||||||
struct evict_data evict_data;
|
struct evict_data evict_data;
|
||||||
|
struct mmu_rb_handler *handler = pq->handler;
|
||||||
|
|
||||||
evict_data.cleared = 0;
|
evict_data.cleared = 0;
|
||||||
evict_data.target = npages;
|
evict_data.target = npages;
|
||||||
hfi1_mmu_rb_evict(pq->handler, &evict_data);
|
hfi1_mmu_rb_evict(handler, &evict_data);
|
||||||
return evict_data.cleared;
|
return evict_data.cleared;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pin_sdma_pages(struct user_sdma_request *req,
|
|
||||||
struct user_sdma_iovec *iovec,
|
|
||||||
struct sdma_mmu_node *node,
|
|
||||||
int npages)
|
|
||||||
{
|
|
||||||
int pinned, cleared;
|
|
||||||
struct page **pages;
|
|
||||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
|
||||||
|
|
||||||
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
|
||||||
if (!pages)
|
|
||||||
return -ENOMEM;
|
|
||||||
memcpy(pages, node->pages, node->npages * sizeof(*pages));
|
|
||||||
|
|
||||||
npages -= node->npages;
|
|
||||||
retry:
|
|
||||||
if (!hfi1_can_pin_pages(pq->dd, current->mm,
|
|
||||||
atomic_read(&pq->n_locked), npages)) {
|
|
||||||
cleared = sdma_cache_evict(pq, npages);
|
|
||||||
if (cleared >= npages)
|
|
||||||
goto retry;
|
|
||||||
}
|
|
||||||
pinned = hfi1_acquire_user_pages(current->mm,
|
|
||||||
((unsigned long)iovec->iov.iov_base +
|
|
||||||
(node->npages * PAGE_SIZE)), npages, 0,
|
|
||||||
pages + node->npages);
|
|
||||||
if (pinned < 0) {
|
|
||||||
kfree(pages);
|
|
||||||
return pinned;
|
|
||||||
}
|
|
||||||
if (pinned != npages) {
|
|
||||||
unpin_vector_pages(current->mm, pages, node->npages, pinned);
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
kfree(node->pages);
|
|
||||||
node->rb.len = iovec->iov.iov_len;
|
|
||||||
node->pages = pages;
|
|
||||||
atomic_add(pinned, &pq->n_locked);
|
|
||||||
return pinned;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void unpin_sdma_pages(struct sdma_mmu_node *node)
|
|
||||||
{
|
|
||||||
if (node->npages) {
|
|
||||||
unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
|
|
||||||
node->npages);
|
|
||||||
atomic_sub(node->npages, &node->pq->n_locked);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pin_vector_pages(struct user_sdma_request *req,
|
|
||||||
struct user_sdma_iovec *iovec)
|
|
||||||
{
|
|
||||||
int ret = 0, pinned, npages;
|
|
||||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
|
||||||
struct sdma_mmu_node *node = NULL;
|
|
||||||
struct mmu_rb_node *rb_node;
|
|
||||||
struct iovec *iov;
|
|
||||||
bool extracted;
|
|
||||||
|
|
||||||
extracted =
|
|
||||||
hfi1_mmu_rb_remove_unless_exact(pq->handler,
|
|
||||||
(unsigned long)
|
|
||||||
iovec->iov.iov_base,
|
|
||||||
iovec->iov.iov_len, &rb_node);
|
|
||||||
if (rb_node) {
|
|
||||||
node = container_of(rb_node, struct sdma_mmu_node, rb);
|
|
||||||
if (!extracted) {
|
|
||||||
atomic_inc(&node->refcount);
|
|
||||||
iovec->pages = node->pages;
|
|
||||||
iovec->npages = node->npages;
|
|
||||||
iovec->node = node;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!node) {
|
|
||||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
|
||||||
if (!node)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
node->rb.addr = (unsigned long)iovec->iov.iov_base;
|
|
||||||
node->pq = pq;
|
|
||||||
atomic_set(&node->refcount, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
iov = &iovec->iov;
|
|
||||||
npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len);
|
|
||||||
if (node->npages < npages) {
|
|
||||||
pinned = pin_sdma_pages(req, iovec, node, npages);
|
|
||||||
if (pinned < 0) {
|
|
||||||
ret = pinned;
|
|
||||||
goto bail;
|
|
||||||
}
|
|
||||||
node->npages += pinned;
|
|
||||||
npages = node->npages;
|
|
||||||
}
|
|
||||||
iovec->pages = node->pages;
|
|
||||||
iovec->npages = npages;
|
|
||||||
iovec->node = node;
|
|
||||||
|
|
||||||
ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
|
|
||||||
if (ret) {
|
|
||||||
iovec->node = NULL;
|
|
||||||
goto bail;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
bail:
|
|
||||||
unpin_sdma_pages(node);
|
|
||||||
kfree(node);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
|
|
||||||
unsigned start, unsigned npages)
|
|
||||||
{
|
|
||||||
hfi1_release_user_pages(mm, pages + start, npages, false);
|
|
||||||
kfree(pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int check_header_template(struct user_sdma_request *req,
|
static int check_header_template(struct user_sdma_request *req,
|
||||||
struct hfi1_pkt_header *hdr, u32 lrhlen,
|
struct hfi1_pkt_header *hdr, u32 lrhlen,
|
||||||
u32 datalen)
|
u32 datalen)
|
||||||
@@ -1425,7 +1259,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
|
|||||||
if (req->seqcomp != req->info.npkts - 1)
|
if (req->seqcomp != req->info.npkts - 1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
user_sdma_free_request(req, false);
|
user_sdma_free_request(req);
|
||||||
set_comp_state(pq, cq, req->info.comp_idx, state, status);
|
set_comp_state(pq, cq, req->info.comp_idx, state, status);
|
||||||
pq_update(pq);
|
pq_update(pq);
|
||||||
}
|
}
|
||||||
@@ -1436,10 +1270,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
|
|||||||
wake_up(&pq->wait);
|
wake_up(&pq->wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
|
static void user_sdma_free_request(struct user_sdma_request *req)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!list_empty(&req->txps)) {
|
if (!list_empty(&req->txps)) {
|
||||||
struct sdma_txreq *t, *p;
|
struct sdma_txreq *t, *p;
|
||||||
|
|
||||||
@@ -1452,21 +1284,6 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < req->data_iovs; i++) {
|
|
||||||
struct sdma_mmu_node *node = req->iovs[i].node;
|
|
||||||
|
|
||||||
if (!node)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
req->iovs[i].node = NULL;
|
|
||||||
|
|
||||||
if (unpin)
|
|
||||||
hfi1_mmu_rb_remove(req->pq->handler,
|
|
||||||
&node->rb);
|
|
||||||
else
|
|
||||||
atomic_dec(&node->refcount);
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(req->tids);
|
kfree(req->tids);
|
||||||
clear_bit(req->info.comp_idx, req->pq->req_in_use);
|
clear_bit(req->info.comp_idx, req->pq->req_in_use);
|
||||||
}
|
}
|
||||||
@@ -1484,6 +1301,368 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
|
|||||||
idx, state, ret);
|
idx, state, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
|
||||||
|
unsigned int start, unsigned int npages)
|
||||||
|
{
|
||||||
|
hfi1_release_user_pages(mm, pages + start, npages, false);
|
||||||
|
kfree(pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void free_system_node(struct sdma_mmu_node *node)
|
||||||
|
{
|
||||||
|
if (node->npages) {
|
||||||
|
unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
|
||||||
|
node->npages);
|
||||||
|
atomic_sub(node->npages, &node->pq->n_locked);
|
||||||
|
}
|
||||||
|
kfree(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void acquire_node(struct sdma_mmu_node *node)
|
||||||
|
{
|
||||||
|
atomic_inc(&node->refcount);
|
||||||
|
WARN_ON(atomic_read(&node->refcount) < 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void release_node(struct mmu_rb_handler *handler,
|
||||||
|
struct sdma_mmu_node *node)
|
||||||
|
{
|
||||||
|
atomic_dec(&node->refcount);
|
||||||
|
WARN_ON(atomic_read(&node->refcount) < 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
|
||||||
|
unsigned long start,
|
||||||
|
unsigned long end)
|
||||||
|
{
|
||||||
|
struct mmu_rb_node *rb_node;
|
||||||
|
struct sdma_mmu_node *node;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&handler->lock, flags);
|
||||||
|
rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
|
||||||
|
if (!rb_node) {
|
||||||
|
spin_unlock_irqrestore(&handler->lock, flags);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
node = container_of(rb_node, struct sdma_mmu_node, rb);
|
||||||
|
acquire_node(node);
|
||||||
|
spin_unlock_irqrestore(&handler->lock, flags);
|
||||||
|
|
||||||
|
return node;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int pin_system_pages(struct user_sdma_request *req,
|
||||||
|
uintptr_t start_address, size_t length,
|
||||||
|
struct sdma_mmu_node *node, int npages)
|
||||||
|
{
|
||||||
|
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||||
|
int pinned, cleared;
|
||||||
|
struct page **pages;
|
||||||
|
|
||||||
|
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
||||||
|
if (!pages)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
retry:
|
||||||
|
if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
|
||||||
|
npages)) {
|
||||||
|
SDMA_DBG(req, "Evicting: nlocked %u npages %u",
|
||||||
|
atomic_read(&pq->n_locked), npages);
|
||||||
|
cleared = sdma_cache_evict(pq, npages);
|
||||||
|
if (cleared >= npages)
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
|
||||||
|
SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
|
||||||
|
start_address, node->npages, npages);
|
||||||
|
pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
|
||||||
|
pages);
|
||||||
|
|
||||||
|
if (pinned < 0) {
|
||||||
|
kfree(pages);
|
||||||
|
SDMA_DBG(req, "pinned %d", pinned);
|
||||||
|
return pinned;
|
||||||
|
}
|
||||||
|
if (pinned != npages) {
|
||||||
|
unpin_vector_pages(current->mm, pages, node->npages, pinned);
|
||||||
|
SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
node->rb.addr = start_address;
|
||||||
|
node->rb.len = length;
|
||||||
|
node->pages = pages;
|
||||||
|
node->npages = npages;
|
||||||
|
atomic_add(pinned, &pq->n_locked);
|
||||||
|
SDMA_DBG(req, "done. pinned %d", pinned);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int add_system_pinning(struct user_sdma_request *req,
|
||||||
|
struct sdma_mmu_node **node_p,
|
||||||
|
unsigned long start, unsigned long len)
|
||||||
|
|
||||||
|
{
|
||||||
|
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||||
|
struct sdma_mmu_node *node;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||||
|
if (!node)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
node->pq = pq;
|
||||||
|
ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
|
||||||
|
if (ret == 0) {
|
||||||
|
ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
|
||||||
|
if (ret)
|
||||||
|
free_system_node(node);
|
||||||
|
else
|
||||||
|
*node_p = node;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(node);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int get_system_cache_entry(struct user_sdma_request *req,
|
||||||
|
struct sdma_mmu_node **node_p,
|
||||||
|
size_t req_start, size_t req_len)
|
||||||
|
{
|
||||||
|
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||||
|
u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
|
||||||
|
u64 end = PFN_ALIGN(req_start + req_len);
|
||||||
|
struct mmu_rb_handler *handler = pq->handler;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if ((end - start) == 0) {
|
||||||
|
SDMA_DBG(req,
|
||||||
|
"Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
|
||||||
|
req_start, req_len, start, end);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
struct sdma_mmu_node *node =
|
||||||
|
find_system_node(handler, start, end);
|
||||||
|
u64 prepend_len = 0;
|
||||||
|
|
||||||
|
SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
|
||||||
|
if (!node) {
|
||||||
|
ret = add_system_pinning(req, node_p, start,
|
||||||
|
end - start);
|
||||||
|
if (ret == -EEXIST) {
|
||||||
|
/*
|
||||||
|
* Another execution context has inserted a
|
||||||
|
* conficting entry first.
|
||||||
|
*/
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (node->rb.addr <= start) {
|
||||||
|
/*
|
||||||
|
* This entry covers at least part of the region. If it doesn't extend
|
||||||
|
* to the end, then this will be called again for the next segment.
|
||||||
|
*/
|
||||||
|
*node_p = node;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
SDMA_DBG(req, "prepend: node->rb.addr %lx, node->refcount %d",
|
||||||
|
node->rb.addr, atomic_read(&node->refcount));
|
||||||
|
prepend_len = node->rb.addr - start;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This node will not be returned, instead a new node
|
||||||
|
* will be. So release the reference.
|
||||||
|
*/
|
||||||
|
release_node(handler, node);
|
||||||
|
|
||||||
|
/* Prepend a node to cover the beginning of the allocation */
|
||||||
|
ret = add_system_pinning(req, node_p, start, prepend_len);
|
||||||
|
if (ret == -EEXIST) {
|
||||||
|
/* Another execution context has inserted a conficting entry first. */
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
|
||||||
|
struct user_sdma_txreq *tx,
|
||||||
|
struct sdma_mmu_node *cache_entry,
|
||||||
|
size_t start,
|
||||||
|
size_t from_this_cache_entry)
|
||||||
|
{
|
||||||
|
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||||
|
unsigned int page_offset;
|
||||||
|
unsigned int from_this_page;
|
||||||
|
size_t page_index;
|
||||||
|
void *ctx;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Because the cache may be more fragmented than the memory that is being accessed,
|
||||||
|
* it's not strictly necessary to have a descriptor per cache entry.
|
||||||
|
*/
|
||||||
|
|
||||||
|
while (from_this_cache_entry) {
|
||||||
|
page_index = PFN_DOWN(start - cache_entry->rb.addr);
|
||||||
|
|
||||||
|
if (page_index >= cache_entry->npages) {
|
||||||
|
SDMA_DBG(req,
|
||||||
|
"Request for page_index %zu >= cache_entry->npages %u",
|
||||||
|
page_index, cache_entry->npages);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
|
||||||
|
from_this_page = PAGE_SIZE - page_offset;
|
||||||
|
|
||||||
|
if (from_this_page < from_this_cache_entry) {
|
||||||
|
ctx = NULL;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* In the case they are equal the next line has no practical effect,
|
||||||
|
* but it's better to do a register to register copy than a conditional
|
||||||
|
* branch.
|
||||||
|
*/
|
||||||
|
from_this_page = from_this_cache_entry;
|
||||||
|
ctx = cache_entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = sdma_txadd_page(pq->dd, ctx, &tx->txreq,
|
||||||
|
cache_entry->pages[page_index],
|
||||||
|
page_offset, from_this_page);
|
||||||
|
if (ret) {
|
||||||
|
/*
|
||||||
|
* When there's a failure, the entire request is freed by
|
||||||
|
* user_sdma_send_pkts().
|
||||||
|
*/
|
||||||
|
SDMA_DBG(req,
|
||||||
|
"sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
|
||||||
|
ret, page_index, page_offset, from_this_page);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
start += from_this_page;
|
||||||
|
from_this_cache_entry -= from_this_page;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
|
||||||
|
struct user_sdma_txreq *tx,
|
||||||
|
struct user_sdma_iovec *iovec,
|
||||||
|
size_t from_this_iovec)
|
||||||
|
{
|
||||||
|
struct mmu_rb_handler *handler = req->pq->handler;
|
||||||
|
|
||||||
|
while (from_this_iovec > 0) {
|
||||||
|
struct sdma_mmu_node *cache_entry;
|
||||||
|
size_t from_this_cache_entry;
|
||||||
|
size_t start;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
|
||||||
|
ret = get_system_cache_entry(req, &cache_entry, start,
|
||||||
|
from_this_iovec);
|
||||||
|
if (ret) {
|
||||||
|
SDMA_DBG(req, "pin system segment failed %d", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
|
||||||
|
if (from_this_cache_entry > from_this_iovec)
|
||||||
|
from_this_cache_entry = from_this_iovec;
|
||||||
|
|
||||||
|
ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
|
||||||
|
from_this_cache_entry);
|
||||||
|
if (ret) {
|
||||||
|
/*
|
||||||
|
* We're guaranteed that there will be no descriptor
|
||||||
|
* completion callback that releases this node
|
||||||
|
* because only the last descriptor referencing it
|
||||||
|
* has a context attached, and a failure means the
|
||||||
|
* last descriptor was never added.
|
||||||
|
*/
|
||||||
|
release_node(handler, cache_entry);
|
||||||
|
SDMA_DBG(req, "add system segment failed %d", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
iovec->offset += from_this_cache_entry;
|
||||||
|
from_this_iovec -= from_this_cache_entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
|
||||||
|
struct user_sdma_txreq *tx,
|
||||||
|
struct user_sdma_iovec *iovec,
|
||||||
|
u32 *pkt_data_remaining)
|
||||||
|
{
|
||||||
|
size_t remaining_to_add = *pkt_data_remaining;
|
||||||
|
/*
|
||||||
|
* Walk through iovec entries, ensure the associated pages
|
||||||
|
* are pinned and mapped, add data to the packet until no more
|
||||||
|
* data remains to be added.
|
||||||
|
*/
|
||||||
|
while (remaining_to_add > 0) {
|
||||||
|
struct user_sdma_iovec *cur_iovec;
|
||||||
|
size_t from_this_iovec;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
cur_iovec = iovec;
|
||||||
|
from_this_iovec = iovec->iov.iov_len - iovec->offset;
|
||||||
|
|
||||||
|
if (from_this_iovec > remaining_to_add) {
|
||||||
|
from_this_iovec = remaining_to_add;
|
||||||
|
} else {
|
||||||
|
/* The current iovec entry will be consumed by this pass. */
|
||||||
|
req->iov_idx++;
|
||||||
|
iovec++;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
|
||||||
|
from_this_iovec);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
remaining_to_add -= from_this_iovec;
|
||||||
|
}
|
||||||
|
*pkt_data_remaining = remaining_to_add;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void system_descriptor_complete(struct hfi1_devdata *dd,
|
||||||
|
struct sdma_desc *descp)
|
||||||
|
{
|
||||||
|
switch (sdma_mapping_type(descp)) {
|
||||||
|
case SDMA_MAP_SINGLE:
|
||||||
|
dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
|
||||||
|
sdma_mapping_len(descp), DMA_TO_DEVICE);
|
||||||
|
break;
|
||||||
|
case SDMA_MAP_PAGE:
|
||||||
|
dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
|
||||||
|
sdma_mapping_len(descp), DMA_TO_DEVICE);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (descp->pinning_ctx) {
|
||||||
|
struct sdma_mmu_node *node = descp->pinning_ctx;
|
||||||
|
|
||||||
|
release_node(node->rb.handler, node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
|
static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
|
||||||
unsigned long len)
|
unsigned long len)
|
||||||
{
|
{
|
||||||
@@ -1530,8 +1709,7 @@ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
|
|||||||
struct sdma_mmu_node *node =
|
struct sdma_mmu_node *node =
|
||||||
container_of(mnode, struct sdma_mmu_node, rb);
|
container_of(mnode, struct sdma_mmu_node, rb);
|
||||||
|
|
||||||
unpin_sdma_pages(node);
|
free_system_node(node);
|
||||||
kfree(node);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
|
static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
|
||||||
|
|||||||
@@ -53,6 +53,7 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "iowait.h"
|
#include "iowait.h"
|
||||||
#include "user_exp_rcv.h"
|
#include "user_exp_rcv.h"
|
||||||
|
#include "mmu_rb.h"
|
||||||
|
|
||||||
/* The maximum number of Data io vectors per message/request */
|
/* The maximum number of Data io vectors per message/request */
|
||||||
#define MAX_VECTORS_PER_REQ 8
|
#define MAX_VECTORS_PER_REQ 8
|
||||||
@@ -152,16 +153,11 @@ struct sdma_mmu_node {
|
|||||||
struct user_sdma_iovec {
|
struct user_sdma_iovec {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct iovec iov;
|
struct iovec iov;
|
||||||
/* number of pages in this vector */
|
|
||||||
unsigned int npages;
|
|
||||||
/* array of pinned pages for this vector */
|
|
||||||
struct page **pages;
|
|
||||||
/*
|
/*
|
||||||
* offset into the virtual address space of the vector at
|
* offset into the virtual address space of the vector at
|
||||||
* which we last left off.
|
* which we last left off.
|
||||||
*/
|
*/
|
||||||
u64 offset;
|
u64 offset;
|
||||||
struct sdma_mmu_node *node;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* evict operation argument */
|
/* evict operation argument */
|
||||||
|
|||||||
@@ -820,8 +820,8 @@ static int build_verbs_tx_desc(
|
|||||||
|
|
||||||
/* add icrc, lt byte, and padding to flit */
|
/* add icrc, lt byte, and padding to flit */
|
||||||
if (extra_bytes)
|
if (extra_bytes)
|
||||||
ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
|
ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys,
|
||||||
sde->dd->sdma_pad_phys, extra_bytes);
|
extra_bytes);
|
||||||
|
|
||||||
bail_txadd:
|
bail_txadd:
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -106,6 +106,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
|
|||||||
|
|
||||||
/* combine physically continuous fragments later? */
|
/* combine physically continuous fragments later? */
|
||||||
ret = sdma_txadd_page(sde->dd,
|
ret = sdma_txadd_page(sde->dd,
|
||||||
|
NULL,
|
||||||
&tx->txreq,
|
&tx->txreq,
|
||||||
skb_frag_page(frag),
|
skb_frag_page(frag),
|
||||||
skb_frag_off(frag),
|
skb_frag_off(frag),
|
||||||
|
|||||||
@@ -412,9 +412,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
|
|||||||
struct mlx4_ib_qp *qp,
|
struct mlx4_ib_qp *qp,
|
||||||
struct mlx4_ib_create_qp *ucmd)
|
struct mlx4_ib_create_qp *ucmd)
|
||||||
{
|
{
|
||||||
|
u32 cnt;
|
||||||
|
|
||||||
/* Sanity check SQ size before proceeding */
|
/* Sanity check SQ size before proceeding */
|
||||||
if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
|
if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
|
||||||
ucmd->log_sq_stride >
|
cnt > dev->dev->caps.max_wqes)
|
||||||
|
return -EINVAL;
|
||||||
|
if (ucmd->log_sq_stride >
|
||||||
ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
|
ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
|
||||||
ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
|
ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|||||||
@@ -595,7 +595,21 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
|
|||||||
obj_id;
|
obj_id;
|
||||||
|
|
||||||
case MLX5_IB_OBJECT_DEVX_OBJ:
|
case MLX5_IB_OBJECT_DEVX_OBJ:
|
||||||
return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
|
{
|
||||||
|
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
|
||||||
|
struct devx_obj *devx_uobj = uobj->object;
|
||||||
|
|
||||||
|
if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER &&
|
||||||
|
devx_uobj->flow_counter_bulk_size) {
|
||||||
|
u64 end;
|
||||||
|
|
||||||
|
end = devx_uobj->obj_id +
|
||||||
|
devx_uobj->flow_counter_bulk_size;
|
||||||
|
return devx_uobj->obj_id <= obj_id && end > obj_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
return devx_uobj->obj_id == obj_id;
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
@@ -1416,10 +1430,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
|
|||||||
goto obj_free;
|
goto obj_free;
|
||||||
|
|
||||||
if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
|
if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
|
||||||
u8 bulk = MLX5_GET(alloc_flow_counter_in,
|
u32 bulk = MLX5_GET(alloc_flow_counter_in,
|
||||||
|
cmd_in,
|
||||||
|
flow_counter_bulk_log_size);
|
||||||
|
|
||||||
|
if (bulk)
|
||||||
|
bulk = 1 << bulk;
|
||||||
|
else
|
||||||
|
bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
|
||||||
cmd_in,
|
cmd_in,
|
||||||
flow_counter_bulk);
|
flow_counter_bulk);
|
||||||
obj->flow_counter_bulk_size = 128UL * bulk;
|
obj->flow_counter_bulk_size = bulk;
|
||||||
}
|
}
|
||||||
|
|
||||||
uobj->object = obj;
|
uobj->object = obj;
|
||||||
|
|||||||
@@ -4164,7 +4164,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (attr->port_num == 0 ||
|
if (attr->port_num == 0 ||
|
||||||
attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
|
attr->port_num > dev->num_ports) {
|
||||||
mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
|
mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
|
||||||
attr->port_num, dev->num_ports);
|
attr->port_num, dev->num_ports);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|||||||
@@ -458,9 +458,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
|
|||||||
|
|
||||||
dev_dbg(&netdev->dev, "siw: event %lu\n", event);
|
dev_dbg(&netdev->dev, "siw: event %lu\n", event);
|
||||||
|
|
||||||
if (dev_net(netdev) != &init_net)
|
|
||||||
return NOTIFY_OK;
|
|
||||||
|
|
||||||
base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW);
|
base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW);
|
||||||
if (!base_dev)
|
if (!base_dev)
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|||||||
@@ -549,6 +549,7 @@ static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
|
|||||||
*/
|
*/
|
||||||
static int srpt_refresh_port(struct srpt_port *sport)
|
static int srpt_refresh_port(struct srpt_port *sport)
|
||||||
{
|
{
|
||||||
|
struct ib_mad_agent *mad_agent;
|
||||||
struct ib_mad_reg_req reg_req;
|
struct ib_mad_reg_req reg_req;
|
||||||
struct ib_port_modify port_modify;
|
struct ib_port_modify port_modify;
|
||||||
struct ib_port_attr port_attr;
|
struct ib_port_attr port_attr;
|
||||||
@@ -593,24 +594,26 @@ static int srpt_refresh_port(struct srpt_port *sport)
|
|||||||
set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
|
set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
|
||||||
set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
|
set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
|
||||||
|
|
||||||
sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
|
mad_agent = ib_register_mad_agent(sport->sdev->device,
|
||||||
sport->port,
|
sport->port,
|
||||||
IB_QPT_GSI,
|
IB_QPT_GSI,
|
||||||
®_req, 0,
|
®_req, 0,
|
||||||
srpt_mad_send_handler,
|
srpt_mad_send_handler,
|
||||||
srpt_mad_recv_handler,
|
srpt_mad_recv_handler,
|
||||||
sport, 0);
|
sport, 0);
|
||||||
if (IS_ERR(sport->mad_agent)) {
|
if (IS_ERR(mad_agent)) {
|
||||||
pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
|
pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
|
||||||
dev_name(&sport->sdev->device->dev), sport->port,
|
dev_name(&sport->sdev->device->dev), sport->port,
|
||||||
PTR_ERR(sport->mad_agent));
|
PTR_ERR(mad_agent));
|
||||||
sport->mad_agent = NULL;
|
sport->mad_agent = NULL;
|
||||||
memset(&port_modify, 0, sizeof(port_modify));
|
memset(&port_modify, 0, sizeof(port_modify));
|
||||||
port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
|
port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
|
||||||
ib_modify_port(sport->sdev->device, sport->port, 0,
|
ib_modify_port(sport->sdev->device, sport->port, 0,
|
||||||
&port_modify);
|
&port_modify);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sport->mad_agent = mad_agent;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -134,7 +134,7 @@ static int rpi_ts_probe(struct platform_device *pdev)
|
|||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
fw = rpi_firmware_get(fw_node);
|
fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
|
||||||
of_node_put(fw_node);
|
of_node_put(fw_node);
|
||||||
if (!fw)
|
if (!fw)
|
||||||
return -EPROBE_DEFER;
|
return -EPROBE_DEFER;
|
||||||
@@ -160,7 +160,6 @@ static int rpi_ts_probe(struct platform_device *pdev)
|
|||||||
touchbuf = (u32)ts->fw_regs_phys;
|
touchbuf = (u32)ts->fw_regs_phys;
|
||||||
error = rpi_firmware_property(fw, RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF,
|
error = rpi_firmware_property(fw, RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF,
|
||||||
&touchbuf, sizeof(touchbuf));
|
&touchbuf, sizeof(touchbuf));
|
||||||
|
|
||||||
if (error || touchbuf != 0) {
|
if (error || touchbuf != 0) {
|
||||||
dev_warn(dev, "Failed to set touchbuf, %d\n", error);
|
dev_warn(dev, "Failed to set touchbuf, %d\n", error);
|
||||||
return error;
|
return error;
|
||||||
|
|||||||
@@ -871,7 +871,7 @@ config LEDS_SPI_BYTE
|
|||||||
config LEDS_TI_LMU_COMMON
|
config LEDS_TI_LMU_COMMON
|
||||||
tristate "LED driver for TI LMU"
|
tristate "LED driver for TI LMU"
|
||||||
depends on LEDS_CLASS
|
depends on LEDS_CLASS
|
||||||
depends on REGMAP
|
select REGMAP
|
||||||
help
|
help
|
||||||
Say Y to enable the LED driver for TI LMU devices.
|
Say Y to enable the LED driver for TI LMU devices.
|
||||||
This supports common features between the TI LM3532, LM3631, LM3632,
|
This supports common features between the TI LM3532, LM3631, LM3632,
|
||||||
|
|||||||
@@ -696,8 +696,9 @@ tca6507_led_dt_init(struct device *dev)
|
|||||||
if (fwnode_property_read_string(child, "label", &led.name))
|
if (fwnode_property_read_string(child, "label", &led.name))
|
||||||
led.name = fwnode_get_name(child);
|
led.name = fwnode_get_name(child);
|
||||||
|
|
||||||
fwnode_property_read_string(child, "linux,default-trigger",
|
if (fwnode_property_read_string(child, "linux,default-trigger",
|
||||||
&led.default_trigger);
|
&led.default_trigger))
|
||||||
|
led.default_trigger = NULL;
|
||||||
|
|
||||||
led.flags = 0;
|
led.flags = 0;
|
||||||
if (fwnode_property_match_string(child, "compatible",
|
if (fwnode_property_match_string(child, "compatible",
|
||||||
|
|||||||
@@ -16,8 +16,8 @@
|
|||||||
#include <linux/platform_data/i2c-ocores.h>
|
#include <linux/platform_data/i2c-ocores.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
|
||||||
#define TQMX86_IOBASE 0x160
|
#define TQMX86_IOBASE 0x180
|
||||||
#define TQMX86_IOSIZE 0x3f
|
#define TQMX86_IOSIZE 0x20
|
||||||
#define TQMX86_IOBASE_I2C 0x1a0
|
#define TQMX86_IOBASE_I2C 0x1a0
|
||||||
#define TQMX86_IOSIZE_I2C 0xa
|
#define TQMX86_IOSIZE_I2C 0xa
|
||||||
#define TQMX86_IOBASE_WATCHDOG 0x18b
|
#define TQMX86_IOBASE_WATCHDOG 0x18b
|
||||||
@@ -25,29 +25,33 @@
|
|||||||
#define TQMX86_IOBASE_GPIO 0x18d
|
#define TQMX86_IOBASE_GPIO 0x18d
|
||||||
#define TQMX86_IOSIZE_GPIO 0x4
|
#define TQMX86_IOSIZE_GPIO 0x4
|
||||||
|
|
||||||
#define TQMX86_REG_BOARD_ID 0x20
|
#define TQMX86_REG_BOARD_ID 0x00
|
||||||
#define TQMX86_REG_BOARD_ID_E38M 1
|
#define TQMX86_REG_BOARD_ID_E38M 1
|
||||||
#define TQMX86_REG_BOARD_ID_50UC 2
|
#define TQMX86_REG_BOARD_ID_50UC 2
|
||||||
#define TQMX86_REG_BOARD_ID_E38C 3
|
#define TQMX86_REG_BOARD_ID_E38C 3
|
||||||
#define TQMX86_REG_BOARD_ID_60EB 4
|
#define TQMX86_REG_BOARD_ID_60EB 4
|
||||||
#define TQMX86_REG_BOARD_ID_E39M 5
|
#define TQMX86_REG_BOARD_ID_E39MS 5
|
||||||
#define TQMX86_REG_BOARD_ID_E39C 6
|
#define TQMX86_REG_BOARD_ID_E39C1 6
|
||||||
#define TQMX86_REG_BOARD_ID_E39x 7
|
#define TQMX86_REG_BOARD_ID_E39C2 7
|
||||||
#define TQMX86_REG_BOARD_ID_70EB 8
|
#define TQMX86_REG_BOARD_ID_70EB 8
|
||||||
#define TQMX86_REG_BOARD_ID_80UC 9
|
#define TQMX86_REG_BOARD_ID_80UC 9
|
||||||
#define TQMX86_REG_BOARD_ID_90UC 10
|
#define TQMX86_REG_BOARD_ID_110EB 11
|
||||||
#define TQMX86_REG_BOARD_REV 0x21
|
#define TQMX86_REG_BOARD_ID_E40M 12
|
||||||
#define TQMX86_REG_IO_EXT_INT 0x26
|
#define TQMX86_REG_BOARD_ID_E40S 13
|
||||||
|
#define TQMX86_REG_BOARD_ID_E40C1 14
|
||||||
|
#define TQMX86_REG_BOARD_ID_E40C2 15
|
||||||
|
#define TQMX86_REG_BOARD_REV 0x01
|
||||||
|
#define TQMX86_REG_IO_EXT_INT 0x06
|
||||||
#define TQMX86_REG_IO_EXT_INT_NONE 0
|
#define TQMX86_REG_IO_EXT_INT_NONE 0
|
||||||
#define TQMX86_REG_IO_EXT_INT_7 1
|
#define TQMX86_REG_IO_EXT_INT_7 1
|
||||||
#define TQMX86_REG_IO_EXT_INT_9 2
|
#define TQMX86_REG_IO_EXT_INT_9 2
|
||||||
#define TQMX86_REG_IO_EXT_INT_12 3
|
#define TQMX86_REG_IO_EXT_INT_12 3
|
||||||
#define TQMX86_REG_IO_EXT_INT_MASK 0x3
|
#define TQMX86_REG_IO_EXT_INT_MASK 0x3
|
||||||
#define TQMX86_REG_IO_EXT_INT_GPIO_SHIFT 4
|
#define TQMX86_REG_IO_EXT_INT_GPIO_SHIFT 4
|
||||||
|
#define TQMX86_REG_SAUC 0x17
|
||||||
|
|
||||||
#define TQMX86_REG_I2C_DETECT 0x47
|
#define TQMX86_REG_I2C_DETECT 0x1a7
|
||||||
#define TQMX86_REG_I2C_DETECT_SOFT 0xa5
|
#define TQMX86_REG_I2C_DETECT_SOFT 0xa5
|
||||||
#define TQMX86_REG_I2C_INT_EN 0x49
|
|
||||||
|
|
||||||
static uint gpio_irq;
|
static uint gpio_irq;
|
||||||
module_param(gpio_irq, uint, 0);
|
module_param(gpio_irq, uint, 0);
|
||||||
@@ -107,7 +111,7 @@ static const struct mfd_cell tqmx86_devs[] = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char *tqmx86_board_id_to_name(u8 board_id)
|
static const char *tqmx86_board_id_to_name(u8 board_id, u8 sauc)
|
||||||
{
|
{
|
||||||
switch (board_id) {
|
switch (board_id) {
|
||||||
case TQMX86_REG_BOARD_ID_E38M:
|
case TQMX86_REG_BOARD_ID_E38M:
|
||||||
@@ -118,18 +122,26 @@ static const char *tqmx86_board_id_to_name(u8 board_id)
|
|||||||
return "TQMxE38C";
|
return "TQMxE38C";
|
||||||
case TQMX86_REG_BOARD_ID_60EB:
|
case TQMX86_REG_BOARD_ID_60EB:
|
||||||
return "TQMx60EB";
|
return "TQMx60EB";
|
||||||
case TQMX86_REG_BOARD_ID_E39M:
|
case TQMX86_REG_BOARD_ID_E39MS:
|
||||||
return "TQMxE39M";
|
return (sauc == 0xff) ? "TQMxE39M" : "TQMxE39S";
|
||||||
case TQMX86_REG_BOARD_ID_E39C:
|
case TQMX86_REG_BOARD_ID_E39C1:
|
||||||
return "TQMxE39C";
|
return "TQMxE39C1";
|
||||||
case TQMX86_REG_BOARD_ID_E39x:
|
case TQMX86_REG_BOARD_ID_E39C2:
|
||||||
return "TQMxE39x";
|
return "TQMxE39C2";
|
||||||
case TQMX86_REG_BOARD_ID_70EB:
|
case TQMX86_REG_BOARD_ID_70EB:
|
||||||
return "TQMx70EB";
|
return "TQMx70EB";
|
||||||
case TQMX86_REG_BOARD_ID_80UC:
|
case TQMX86_REG_BOARD_ID_80UC:
|
||||||
return "TQMx80UC";
|
return "TQMx80UC";
|
||||||
case TQMX86_REG_BOARD_ID_90UC:
|
case TQMX86_REG_BOARD_ID_110EB:
|
||||||
return "TQMx90UC";
|
return "TQMx110EB";
|
||||||
|
case TQMX86_REG_BOARD_ID_E40M:
|
||||||
|
return "TQMxE40M";
|
||||||
|
case TQMX86_REG_BOARD_ID_E40S:
|
||||||
|
return "TQMxE40S";
|
||||||
|
case TQMX86_REG_BOARD_ID_E40C1:
|
||||||
|
return "TQMxE40C1";
|
||||||
|
case TQMX86_REG_BOARD_ID_E40C2:
|
||||||
|
return "TQMxE40C2";
|
||||||
default:
|
default:
|
||||||
return "Unknown";
|
return "Unknown";
|
||||||
}
|
}
|
||||||
@@ -142,11 +154,15 @@ static int tqmx86_board_id_to_clk_rate(u8 board_id)
|
|||||||
case TQMX86_REG_BOARD_ID_60EB:
|
case TQMX86_REG_BOARD_ID_60EB:
|
||||||
case TQMX86_REG_BOARD_ID_70EB:
|
case TQMX86_REG_BOARD_ID_70EB:
|
||||||
case TQMX86_REG_BOARD_ID_80UC:
|
case TQMX86_REG_BOARD_ID_80UC:
|
||||||
case TQMX86_REG_BOARD_ID_90UC:
|
case TQMX86_REG_BOARD_ID_110EB:
|
||||||
|
case TQMX86_REG_BOARD_ID_E40M:
|
||||||
|
case TQMX86_REG_BOARD_ID_E40S:
|
||||||
|
case TQMX86_REG_BOARD_ID_E40C1:
|
||||||
|
case TQMX86_REG_BOARD_ID_E40C2:
|
||||||
return 24000;
|
return 24000;
|
||||||
case TQMX86_REG_BOARD_ID_E39M:
|
case TQMX86_REG_BOARD_ID_E39MS:
|
||||||
case TQMX86_REG_BOARD_ID_E39C:
|
case TQMX86_REG_BOARD_ID_E39C1:
|
||||||
case TQMX86_REG_BOARD_ID_E39x:
|
case TQMX86_REG_BOARD_ID_E39C2:
|
||||||
return 25000;
|
return 25000;
|
||||||
case TQMX86_REG_BOARD_ID_E38M:
|
case TQMX86_REG_BOARD_ID_E38M:
|
||||||
case TQMX86_REG_BOARD_ID_E38C:
|
case TQMX86_REG_BOARD_ID_E38C:
|
||||||
@@ -158,7 +174,7 @@ static int tqmx86_board_id_to_clk_rate(u8 board_id)
|
|||||||
|
|
||||||
static int tqmx86_probe(struct platform_device *pdev)
|
static int tqmx86_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
u8 board_id, rev, i2c_det, io_ext_int_val;
|
u8 board_id, sauc, rev, i2c_det, io_ext_int_val;
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
u8 gpio_irq_cfg, readback;
|
u8 gpio_irq_cfg, readback;
|
||||||
const char *board_name;
|
const char *board_name;
|
||||||
@@ -188,14 +204,20 @@ static int tqmx86_probe(struct platform_device *pdev)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
board_id = ioread8(io_base + TQMX86_REG_BOARD_ID);
|
board_id = ioread8(io_base + TQMX86_REG_BOARD_ID);
|
||||||
board_name = tqmx86_board_id_to_name(board_id);
|
sauc = ioread8(io_base + TQMX86_REG_SAUC);
|
||||||
|
board_name = tqmx86_board_id_to_name(board_id, sauc);
|
||||||
rev = ioread8(io_base + TQMX86_REG_BOARD_REV);
|
rev = ioread8(io_base + TQMX86_REG_BOARD_REV);
|
||||||
|
|
||||||
dev_info(dev,
|
dev_info(dev,
|
||||||
"Found %s - Board ID %d, PCB Revision %d, PLD Revision %d\n",
|
"Found %s - Board ID %d, PCB Revision %d, PLD Revision %d\n",
|
||||||
board_name, board_id, rev >> 4, rev & 0xf);
|
board_name, board_id, rev >> 4, rev & 0xf);
|
||||||
|
|
||||||
i2c_det = ioread8(io_base + TQMX86_REG_I2C_DETECT);
|
/*
|
||||||
|
* The I2C_DETECT register is in the range assigned to the I2C driver
|
||||||
|
* later, so we don't extend TQMX86_IOSIZE. Use inb() for this one-off
|
||||||
|
* access instead of ioport_map + unmap.
|
||||||
|
*/
|
||||||
|
i2c_det = inb(TQMX86_REG_I2C_DETECT);
|
||||||
|
|
||||||
if (gpio_irq_cfg) {
|
if (gpio_irq_cfg) {
|
||||||
io_ext_int_val =
|
io_ext_int_val =
|
||||||
|
|||||||
@@ -775,6 +775,7 @@ static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
|
|||||||
usb2->base.lane = usb2->base.ops->map(&usb2->base);
|
usb2->base.lane = usb2->base.ops->map(&usb2->base);
|
||||||
if (IS_ERR(usb2->base.lane)) {
|
if (IS_ERR(usb2->base.lane)) {
|
||||||
err = PTR_ERR(usb2->base.lane);
|
err = PTR_ERR(usb2->base.lane);
|
||||||
|
tegra_xusb_port_unregister(&usb2->base);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -841,6 +842,7 @@ static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl,
|
|||||||
ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
|
ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
|
||||||
if (IS_ERR(ulpi->base.lane)) {
|
if (IS_ERR(ulpi->base.lane)) {
|
||||||
err = PTR_ERR(ulpi->base.lane);
|
err = PTR_ERR(ulpi->base.lane);
|
||||||
|
tegra_xusb_port_unregister(&ulpi->base);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -138,6 +138,9 @@ static int read_channel(struct gab *adc_bat, enum power_supply_property psp,
|
|||||||
result);
|
result);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
pr_err("read channel error\n");
|
pr_err("read channel error\n");
|
||||||
|
else
|
||||||
|
*result *= 1000;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -74,6 +74,19 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
|
|||||||
u64 div, rate;
|
u64 div, rate;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
err = clk_prepare_enable(mdp->clk_main);
|
||||||
|
if (err < 0) {
|
||||||
|
dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = clk_prepare_enable(mdp->clk_mm);
|
||||||
|
if (err < 0) {
|
||||||
|
dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
|
||||||
|
clk_disable_unprepare(mdp->clk_main);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find period, high_width and clk_div to suit duty_ns and period_ns.
|
* Find period, high_width and clk_div to suit duty_ns and period_ns.
|
||||||
* Calculate proper div value to keep period value in the bound.
|
* Calculate proper div value to keep period value in the bound.
|
||||||
@@ -87,8 +100,11 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
|
|||||||
rate = clk_get_rate(mdp->clk_main);
|
rate = clk_get_rate(mdp->clk_main);
|
||||||
clk_div = div_u64(rate * period_ns, NSEC_PER_SEC) >>
|
clk_div = div_u64(rate * period_ns, NSEC_PER_SEC) >>
|
||||||
PWM_PERIOD_BIT_WIDTH;
|
PWM_PERIOD_BIT_WIDTH;
|
||||||
if (clk_div > PWM_CLKDIV_MAX)
|
if (clk_div > PWM_CLKDIV_MAX) {
|
||||||
|
clk_disable_unprepare(mdp->clk_mm);
|
||||||
|
clk_disable_unprepare(mdp->clk_main);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
div = NSEC_PER_SEC * (clk_div + 1);
|
div = NSEC_PER_SEC * (clk_div + 1);
|
||||||
period = div64_u64(rate * period_ns, div);
|
period = div64_u64(rate * period_ns, div);
|
||||||
@@ -98,14 +114,17 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
|
|||||||
high_width = div64_u64(rate * duty_ns, div);
|
high_width = div64_u64(rate * duty_ns, div);
|
||||||
value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);
|
value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);
|
||||||
|
|
||||||
err = clk_enable(mdp->clk_main);
|
if (mdp->data->bls_debug && !mdp->data->has_commit) {
|
||||||
if (err < 0)
|
/*
|
||||||
return err;
|
* For MT2701, disable double buffer before writing register
|
||||||
|
* and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
|
||||||
err = clk_enable(mdp->clk_mm);
|
*/
|
||||||
if (err < 0) {
|
mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
|
||||||
clk_disable(mdp->clk_main);
|
mdp->data->bls_debug_mask,
|
||||||
return err;
|
mdp->data->bls_debug_mask);
|
||||||
|
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
|
||||||
|
mdp->data->con0_sel,
|
||||||
|
mdp->data->con0_sel);
|
||||||
}
|
}
|
||||||
|
|
||||||
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
|
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
|
||||||
@@ -124,8 +143,8 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
|
|||||||
0x0);
|
0x0);
|
||||||
}
|
}
|
||||||
|
|
||||||
clk_disable(mdp->clk_mm);
|
clk_disable_unprepare(mdp->clk_mm);
|
||||||
clk_disable(mdp->clk_main);
|
clk_disable_unprepare(mdp->clk_main);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -135,13 +154,16 @@ static int mtk_disp_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
|
|||||||
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
|
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = clk_enable(mdp->clk_main);
|
err = clk_prepare_enable(mdp->clk_main);
|
||||||
if (err < 0)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
err = clk_enable(mdp->clk_mm);
|
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
clk_disable(mdp->clk_main);
|
dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = clk_prepare_enable(mdp->clk_mm);
|
||||||
|
if (err < 0) {
|
||||||
|
dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
|
||||||
|
clk_disable_unprepare(mdp->clk_main);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -158,8 +180,8 @@ static void mtk_disp_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
|
|||||||
mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
|
mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
|
||||||
0x0);
|
0x0);
|
||||||
|
|
||||||
clk_disable(mdp->clk_mm);
|
clk_disable_unprepare(mdp->clk_mm);
|
||||||
clk_disable(mdp->clk_main);
|
clk_disable_unprepare(mdp->clk_main);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pwm_ops mtk_disp_pwm_ops = {
|
static const struct pwm_ops mtk_disp_pwm_ops = {
|
||||||
@@ -194,14 +216,6 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
|
|||||||
if (IS_ERR(mdp->clk_mm))
|
if (IS_ERR(mdp->clk_mm))
|
||||||
return PTR_ERR(mdp->clk_mm);
|
return PTR_ERR(mdp->clk_mm);
|
||||||
|
|
||||||
ret = clk_prepare(mdp->clk_main);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = clk_prepare(mdp->clk_mm);
|
|
||||||
if (ret < 0)
|
|
||||||
goto disable_clk_main;
|
|
||||||
|
|
||||||
mdp->chip.dev = &pdev->dev;
|
mdp->chip.dev = &pdev->dev;
|
||||||
mdp->chip.ops = &mtk_disp_pwm_ops;
|
mdp->chip.ops = &mtk_disp_pwm_ops;
|
||||||
mdp->chip.base = -1;
|
mdp->chip.base = -1;
|
||||||
@@ -209,44 +223,22 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
ret = pwmchip_add(&mdp->chip);
|
ret = pwmchip_add(&mdp->chip);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
|
dev_err(&pdev->dev, "pwmchip_add() failed: %pe\n", ERR_PTR(ret));
|
||||||
goto disable_clk_mm;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
platform_set_drvdata(pdev, mdp);
|
platform_set_drvdata(pdev, mdp);
|
||||||
|
|
||||||
/*
|
|
||||||
* For MT2701, disable double buffer before writing register
|
|
||||||
* and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
|
|
||||||
*/
|
|
||||||
if (!mdp->data->has_commit) {
|
|
||||||
mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
|
|
||||||
mdp->data->bls_debug_mask,
|
|
||||||
mdp->data->bls_debug_mask);
|
|
||||||
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
|
|
||||||
mdp->data->con0_sel,
|
|
||||||
mdp->data->con0_sel);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
disable_clk_mm:
|
|
||||||
clk_unprepare(mdp->clk_mm);
|
|
||||||
disable_clk_main:
|
|
||||||
clk_unprepare(mdp->clk_main);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mtk_disp_pwm_remove(struct platform_device *pdev)
|
static int mtk_disp_pwm_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct mtk_disp_pwm *mdp = platform_get_drvdata(pdev);
|
struct mtk_disp_pwm *mdp = platform_get_drvdata(pdev);
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = pwmchip_remove(&mdp->chip);
|
pwmchip_remove(&mdp->chip);
|
||||||
clk_unprepare(mdp->clk_mm);
|
|
||||||
clk_unprepare(mdp->clk_main);
|
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mtk_pwm_data mt2701_pwm_data = {
|
static const struct mtk_pwm_data mt2701_pwm_data = {
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ static int meson_vrtc_read_time(struct device *dev, struct rtc_time *tm)
|
|||||||
struct timespec64 time;
|
struct timespec64 time;
|
||||||
|
|
||||||
dev_dbg(dev, "%s\n", __func__);
|
dev_dbg(dev, "%s\n", __func__);
|
||||||
ktime_get_raw_ts64(&time);
|
ktime_get_real_ts64(&time);
|
||||||
rtc_time64_to_tm(time.tv_sec, tm);
|
rtc_time64_to_tm(time.tv_sec, tm);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -96,7 +96,7 @@ static int __maybe_unused meson_vrtc_suspend(struct device *dev)
|
|||||||
long alarm_secs;
|
long alarm_secs;
|
||||||
struct timespec64 time;
|
struct timespec64 time;
|
||||||
|
|
||||||
ktime_get_raw_ts64(&time);
|
ktime_get_real_ts64(&time);
|
||||||
local_time = time.tv_sec;
|
local_time = time.tv_sec;
|
||||||
|
|
||||||
dev_dbg(dev, "alarm_time = %lus, local_time=%lus\n",
|
dev_dbg(dev, "alarm_time = %lus, local_time=%lus\n",
|
||||||
|
|||||||
@@ -25,6 +25,7 @@
|
|||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/rtc.h>
|
#include <linux/rtc.h>
|
||||||
|
#include <linux/rtc/rtc-omap.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
|
* The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
|
||||||
|
|||||||
@@ -3000,7 +3000,7 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
|
|||||||
return 0;
|
return 0;
|
||||||
spin_lock_irq(&cqr->dq->lock);
|
spin_lock_irq(&cqr->dq->lock);
|
||||||
req = (struct request *) cqr->callback_data;
|
req = (struct request *) cqr->callback_data;
|
||||||
blk_mq_requeue_request(req, false);
|
blk_mq_requeue_request(req, true);
|
||||||
spin_unlock_irq(&cqr->dq->lock);
|
spin_unlock_irq(&cqr->dq->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -1026,7 +1026,12 @@ static int mtk_thermal_probe(struct platform_device *pdev)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
auxadc_base = of_iomap(auxadc, 0);
|
auxadc_base = devm_of_iomap(&pdev->dev, auxadc, 0, NULL);
|
||||||
|
if (IS_ERR(auxadc_base)) {
|
||||||
|
of_node_put(auxadc);
|
||||||
|
return PTR_ERR(auxadc_base);
|
||||||
|
}
|
||||||
|
|
||||||
auxadc_phys_base = of_get_phys_base(auxadc);
|
auxadc_phys_base = of_get_phys_base(auxadc);
|
||||||
|
|
||||||
of_node_put(auxadc);
|
of_node_put(auxadc);
|
||||||
@@ -1042,7 +1047,12 @@ static int mtk_thermal_probe(struct platform_device *pdev)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
apmixed_base = of_iomap(apmixedsys, 0);
|
apmixed_base = devm_of_iomap(&pdev->dev, apmixedsys, 0, NULL);
|
||||||
|
if (IS_ERR(apmixed_base)) {
|
||||||
|
of_node_put(apmixedsys);
|
||||||
|
return PTR_ERR(apmixed_base);
|
||||||
|
}
|
||||||
|
|
||||||
apmixed_phys_base = of_get_phys_base(apmixedsys);
|
apmixed_phys_base = of_get_phys_base(apmixedsys);
|
||||||
|
|
||||||
of_node_put(apmixedsys);
|
of_node_put(apmixedsys);
|
||||||
|
|||||||
@@ -218,6 +218,7 @@ static void afs_apply_status(struct afs_operation *op,
|
|||||||
set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
|
set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
|
||||||
}
|
}
|
||||||
change_size = true;
|
change_size = true;
|
||||||
|
data_changed = true;
|
||||||
} else if (vnode->status.type == AFS_FTYPE_DIR) {
|
} else if (vnode->status.type == AFS_FTYPE_DIR) {
|
||||||
/* Expected directory change is handled elsewhere so
|
/* Expected directory change is handled elsewhere so
|
||||||
* that we can locally edit the directory and save on a
|
* that we can locally edit the directory and save on a
|
||||||
|
|||||||
@@ -3702,6 +3702,11 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
|
|||||||
if (IS_ERR(sa))
|
if (IS_ERR(sa))
|
||||||
return PTR_ERR(sa);
|
return PTR_ERR(sa);
|
||||||
|
|
||||||
|
if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) {
|
||||||
|
ret = -EOPNOTSUPP;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
|
if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
|
||||||
ret = mnt_want_write_file(file);
|
ret = mnt_want_write_file(file);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|||||||
@@ -5805,7 +5805,8 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
|
|||||||
* mapped - no physical clusters have been allocated, and the
|
* mapped - no physical clusters have been allocated, and the
|
||||||
* file has no extents
|
* file has no extents
|
||||||
*/
|
*/
|
||||||
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
|
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
|
||||||
|
ext4_has_inline_data(inode))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* search for the extent closest to the first block in the cluster */
|
/* search for the extent closest to the first block in the cluster */
|
||||||
|
|||||||
@@ -67,6 +67,8 @@
|
|||||||
|
|
||||||
#define OPENOWNER_POOL_SIZE 8
|
#define OPENOWNER_POOL_SIZE 8
|
||||||
|
|
||||||
|
static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp);
|
||||||
|
|
||||||
const nfs4_stateid zero_stateid = {
|
const nfs4_stateid zero_stateid = {
|
||||||
{ .data = { 0 } },
|
{ .data = { 0 } },
|
||||||
.type = NFS4_SPECIAL_STATEID_TYPE,
|
.type = NFS4_SPECIAL_STATEID_TYPE,
|
||||||
@@ -330,6 +332,8 @@ do_confirm:
|
|||||||
status = nfs4_proc_create_session(clp, cred);
|
status = nfs4_proc_create_session(clp, cred);
|
||||||
if (status != 0)
|
if (status != 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R))
|
||||||
|
nfs4_state_start_reclaim_reboot(clp);
|
||||||
nfs41_finish_session_reset(clp);
|
nfs41_finish_session_reset(clp);
|
||||||
nfs_mark_client_ready(clp, NFS_CS_READY);
|
nfs_mark_client_ready(clp, NFS_CS_READY);
|
||||||
out:
|
out:
|
||||||
|
|||||||
@@ -8331,7 +8331,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
|
|||||||
u8 reserved_at_20[0x10];
|
u8 reserved_at_20[0x10];
|
||||||
u8 op_mod[0x10];
|
u8 op_mod[0x10];
|
||||||
|
|
||||||
u8 reserved_at_40[0x38];
|
u8 reserved_at_40[0x33];
|
||||||
|
u8 flow_counter_bulk_log_size[0x5];
|
||||||
u8 flow_counter_bulk[0x8];
|
u8 flow_counter_bulk[0x8];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -90,8 +90,7 @@ struct rpc_task {
|
|||||||
#endif
|
#endif
|
||||||
unsigned char tk_priority : 2,/* Task priority */
|
unsigned char tk_priority : 2,/* Task priority */
|
||||||
tk_garb_retry : 2,
|
tk_garb_retry : 2,
|
||||||
tk_cred_retry : 2,
|
tk_cred_retry : 2;
|
||||||
tk_rebind_retry : 2;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef void (*rpc_action)(struct rpc_task *);
|
typedef void (*rpc_action)(struct rpc_task *);
|
||||||
|
|||||||
@@ -142,6 +142,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
|
|||||||
void *data, size_t tag_size);
|
void *data, size_t tag_size);
|
||||||
void rpi_firmware_put(struct rpi_firmware *fw);
|
void rpi_firmware_put(struct rpi_firmware *fw);
|
||||||
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
|
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
|
||||||
|
struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
|
||||||
|
struct device_node *firmware_node);
|
||||||
#else
|
#else
|
||||||
static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
|
static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
|
||||||
void *data, size_t len)
|
void *data, size_t len)
|
||||||
@@ -160,6 +162,12 @@ static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware
|
|||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
|
||||||
|
struct device_node *firmware_node)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* __SOC_RASPBERRY_FIRMWARE_H__ */
|
#endif /* __SOC_RASPBERRY_FIRMWARE_H__ */
|
||||||
|
|||||||
@@ -181,6 +181,7 @@ struct btrfs_scrub_progress {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define BTRFS_SCRUB_READONLY 1
|
#define BTRFS_SCRUB_READONLY 1
|
||||||
|
#define BTRFS_SCRUB_SUPPORTED_FLAGS (BTRFS_SCRUB_READONLY)
|
||||||
struct btrfs_ioctl_scrub_args {
|
struct btrfs_ioctl_scrub_args {
|
||||||
__u64 devid; /* in */
|
__u64 devid; /* in */
|
||||||
__u64 start; /* in */
|
__u64 start; /* in */
|
||||||
|
|||||||
@@ -1967,9 +1967,6 @@ call_bind_status(struct rpc_task *task)
|
|||||||
status = -EOPNOTSUPP;
|
status = -EOPNOTSUPP;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (task->tk_rebind_retry == 0)
|
|
||||||
break;
|
|
||||||
task->tk_rebind_retry--;
|
|
||||||
rpc_delay(task, 3*HZ);
|
rpc_delay(task, 3*HZ);
|
||||||
goto retry_timeout;
|
goto retry_timeout;
|
||||||
case -ENOBUFS:
|
case -ENOBUFS:
|
||||||
|
|||||||
@@ -796,7 +796,6 @@ rpc_init_task_statistics(struct rpc_task *task)
|
|||||||
/* Initialize retry counters */
|
/* Initialize retry counters */
|
||||||
task->tk_garb_retry = 2;
|
task->tk_garb_retry = 2;
|
||||||
task->tk_cred_retry = 2;
|
task->tk_cred_retry = 2;
|
||||||
task->tk_rebind_retry = 2;
|
|
||||||
|
|
||||||
/* starting timestamp */
|
/* starting timestamp */
|
||||||
task->tk_start = ktime_get();
|
task->tk_start = ktime_get();
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ def print_cpu(hrtimer_bases, cpu, max_clock_bases):
|
|||||||
ts = cpus.per_cpu(tick_sched_ptr, cpu)
|
ts = cpus.per_cpu(tick_sched_ptr, cpu)
|
||||||
|
|
||||||
text = "cpu: {}\n".format(cpu)
|
text = "cpu: {}\n".format(cpu)
|
||||||
for i in xrange(max_clock_bases):
|
for i in range(max_clock_bases):
|
||||||
text += " clock {}:\n".format(i)
|
text += " clock {}:\n".format(i)
|
||||||
text += print_base(cpu_base['clock_base'][i])
|
text += print_base(cpu_base['clock_base'][i])
|
||||||
|
|
||||||
@@ -158,6 +158,8 @@ def pr_cpumask(mask):
|
|||||||
num_bytes = (nr_cpu_ids + 7) / 8
|
num_bytes = (nr_cpu_ids + 7) / 8
|
||||||
buf = utils.read_memoryview(inf, bits, num_bytes).tobytes()
|
buf = utils.read_memoryview(inf, bits, num_bytes).tobytes()
|
||||||
buf = binascii.b2a_hex(buf)
|
buf = binascii.b2a_hex(buf)
|
||||||
|
if type(buf) is not str:
|
||||||
|
buf=buf.decode()
|
||||||
|
|
||||||
chunks = []
|
chunks = []
|
||||||
i = num_bytes
|
i = num_bytes
|
||||||
|
|||||||
@@ -89,7 +89,10 @@ def get_target_endianness():
|
|||||||
|
|
||||||
|
|
||||||
def read_memoryview(inf, start, length):
|
def read_memoryview(inf, start, length):
|
||||||
return memoryview(inf.read_memory(start, length))
|
m = inf.read_memory(start, length)
|
||||||
|
if type(m) is memoryview:
|
||||||
|
return m
|
||||||
|
return memoryview(m)
|
||||||
|
|
||||||
|
|
||||||
def read_u16(buffer, offset):
|
def read_u16(buffer, offset):
|
||||||
|
|||||||
Reference in New Issue
Block a user