Merge branches 'ib-mfd-arm-leds-5.2', 'ib-mfd-gpio-input-leds-power-5.2', 'ib-mfd-pinctrl-5.2-2' and 'ib-mfd-regulator-5.2', tag 'ib-mfd-arm-net-5.2' into ibs-for-mfd-merged

Immutable branch between MFD, ARM and Net due for the 5.2 merge window
This commit is contained in:
Lee Jones
2019-05-14 08:09:23 +01:00
1796 changed files with 25796 additions and 15286 deletions

View File

@@ -29,11 +29,13 @@
/* Specific functions used for Ring mode */
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
int bfsize)
{
p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
<< ERDES1_BUFFER2_SIZE_SHIFT)
& ERDES1_BUFFER2_SIZE_MASK);
if (bfsize == BUF_SIZE_16KiB)
p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
<< ERDES1_BUFFER2_SIZE_SHIFT)
& ERDES1_BUFFER2_SIZE_MASK);
if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
}
/* Normal descriptors */
static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
{
p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
<< RDES1_BUFFER2_SIZE_SHIFT)
& RDES1_BUFFER2_SIZE_MASK);
if (bfsize >= BUF_SIZE_2KiB) {
int bfsize2;
bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
& RDES1_BUFFER2_SIZE_MASK);
}
if (end)
p->des1 |= cpu_to_le32(RDES1_END_RING);

View File

@@ -15,7 +15,7 @@
* Adopted from dwmac-sti.c
*/
#include <linux/mfd/syscon.h>
#include <linux/mfd/altera-sysmgr.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_net.h>
@@ -114,7 +114,8 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
dwmac->interface = of_get_phy_mode(np);
sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
sys_mgr_base_addr =
altr_sysmgr_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
if (IS_ERR(sys_mgr_base_addr)) {
dev_info(dev, "No sysmgr-syscon node found\n");
return PTR_ERR(sys_mgr_base_addr);

View File

@@ -333,6 +333,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
*/
dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev,
"stm32_pwr_wakeup");
if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) {
err = device_init_wakeup(&pdev->dev, true);
if (err) {

View File

@@ -296,7 +296,7 @@ exit:
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
int mode, int end, int bfsize)
{
dwmac4_set_rx_owner(p, disable_rx_ic);
}

View File

@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
}
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
int mode, int end, int bfsize)
{
dwxgmac2_set_rx_owner(p, disable_rx_ic);
}

View File

@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
stats->rx_length_errors++;
return discard_frame;
}
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
!!(rdes0 & RDES0_FRAME_TYPE),
!!(rdes0 & ERDES0_RX_MAC_ADDR));
if (likely(ret == good_frame))
ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
!!(rdes0 & RDES0_FRAME_TYPE),
!!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
int mode, int end, int bfsize)
{
int bfsize1;
p->des0 |= cpu_to_le32(RDES0_OWN);
p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
bfsize1 = min(bfsize, BUF_SIZE_8KiB);
p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
else
ehn_desc_rx_set_on_ring(p, end);
ehn_desc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);

View File

@@ -33,7 +33,7 @@ struct dma_extended_desc;
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
int end);
int end, int bfsize);
/* DMA TX descriptor ring initialization */
void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
/* Invoked by the xmit function to prepare the tx descriptor */

View File

@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
pr_warn("%s: Oversized frame spanned multiple buffers\n",
__func__);
stats->rx_length_errors++;
return discard_frame;
}
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
int end)
int end, int bfsize)
{
int bfsize1;
p->des0 |= cpu_to_le32(RDES0_OWN);
p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
else
ndesc_rx_set_on_ring(p, end);
ndesc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);

View File

@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
STMMAC_RING_MODE, 1, false, skb->len);
STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
@@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
STMMAC_RING_MODE, 1, true, skb->len);
STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
skb->len);
} else {
des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
@@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
STMMAC_RING_MODE, 1, true, skb->len);
STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
skb->len);
}
tx_q->cur_tx = entry;
@@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
static void refill_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
struct stmmac_rx_queue *rx_q = priv_ptr;
struct stmmac_priv *priv = rx_q->priv_data;
/* Fill DES3 in case of RING mode */
if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
}

View File

@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
(i == DMA_RX_SIZE - 1));
(i == DMA_RX_SIZE - 1),
priv->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
(i == DMA_RX_SIZE - 1));
(i == DMA_RX_SIZE - 1),
priv->dma_buf_sz);
}
/**
@@ -2614,8 +2616,6 @@ static int stmmac_open(struct net_device *dev)
u32 chan;
int ret;
stmmac_check_ether_addr(priv);
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -3216,14 +3216,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
csum_insertion, priv->mode, 1, last_segment,
skb->len);
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
wmb();
} else {
stmmac_set_tx_owner(priv, first);
}
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
wmb();
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -3350,9 +3352,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int entry = rx_q->cur_rx;
unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
unsigned int next_entry;
unsigned int count = 0;
bool xmac;
@@ -3370,10 +3371,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
int status;
int entry, status;
struct dma_desc *p;
struct dma_desc *np;
entry = next_entry;
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
@@ -3429,11 +3432,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
netdev_err(priv->dev,
"len %d larger than size (%d)\n",
frame_len, priv->dma_buf_sz);
if (net_ratelimit())
netdev_err(priv->dev,
"len %d larger than size (%d)\n",
frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
break;
continue;
}
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3468,7 +3472,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
dev_warn(priv->device,
"packet dropped\n");
priv->dev->stats.rx_dropped++;
break;
continue;
}
dma_sync_single_for_cpu(priv->device,
@@ -3488,11 +3492,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
} else {
skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
priv->dev->name);
if (net_ratelimit())
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
priv->dev->name);
priv->dev->stats.rx_dropped++;
break;
continue;
}
prefetch(skb->data - NET_IP_ALIGN);
rx_q->rx_skbuff[entry] = NULL;
@@ -3527,7 +3532,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
}
entry = next_entry;
}
stmmac_rx_refill(priv, queue);
@@ -4297,6 +4301,8 @@ int stmmac_dvr_probe(struct device *device,
if (ret)
goto error_hw_init;
stmmac_check_ether_addr(priv);
/* Configure real RX and TX queues */
netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);

View File

@@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
},
.driver_data = (void *)&galileo_stmmac_dmi_data,
},
/*
* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
* The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
* has only one pci network device while other asset tags are
* for IOT2040 which has two.
*/
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
@@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
"6ES7647-0AA00-1YA2"),
},
.driver_data = (void *)&iot2040_stmmac_dmi_data,
},