Commit d0320f75 authored by Joe Perches's avatar Joe Perches Committed by David S. Miller
Browse files

drivers:net: Remove dma_alloc_coherent OOM messages



I believe these error messages are already logged
on allocation failure by warn_alloc_failed and so
get a dump_stack on OOM.

Remove the unnecessary additional error logging.

Around these deletions:

o Alignment neatening.
o Remove unnecessary casts of dma_alloc_coherent.
o Hoist assigns from ifs.
Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 68c45a2d
......@@ -1464,14 +1464,10 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* Allocate TX descriptor ring in coherent memory */
greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1024,
&greth->tx_bd_base_phys,
GFP_KERNEL);
greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
&greth->tx_bd_base_phys,
GFP_KERNEL);
if (!greth->tx_bd_base) {
if (netif_msg_probe(greth))
dev_err(&dev->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error3;
}
......@@ -1479,14 +1475,10 @@ static int greth_of_probe(struct platform_device *ofdev)
memset(greth->tx_bd_base, 0, 1024);
/* Allocate RX descriptor ring in coherent memory */
greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1024,
&greth->rx_bd_base_phys,
GFP_KERNEL);
greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
&greth->rx_bd_base_phys,
GFP_KERNEL);
if (!greth->rx_bd_base) {
if (netif_msg_probe(greth))
dev_err(greth->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error4;
}
......
......@@ -1373,10 +1373,9 @@ static int sparc_lance_probe_one(struct platform_device *op,
dma_alloc_coherent(&op->dev,
sizeof(struct lance_init_block),
&lp->init_block_dvma, GFP_ATOMIC);
if (!lp->init_block_mem) {
printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
if (!lp->init_block_mem)
goto fail;
}
lp->pio_buffer = 0;
lp->init_ring = lance_init_ring_dvma;
lp->rx = lance_rx_dvma;
......
......@@ -386,20 +386,16 @@ static int mace_open(struct net_device *dev)
/* Allocate the DMA ring buffers */
mp->tx_ring = dma_alloc_coherent(mp->device,
N_TX_RING * MACE_BUFF_SIZE,
&mp->tx_ring_phys, GFP_KERNEL);
if (mp->tx_ring == NULL) {
printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
N_TX_RING * MACE_BUFF_SIZE,
&mp->tx_ring_phys, GFP_KERNEL);
if (mp->tx_ring == NULL)
goto out1;
}
mp->rx_ring = dma_alloc_coherent(mp->device,
N_RX_RING * MACE_BUFF_SIZE,
&mp->rx_ring_phys, GFP_KERNEL);
if (mp->rx_ring == NULL) {
printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
N_RX_RING * MACE_BUFF_SIZE,
&mp->rx_ring_phys, GFP_KERNEL);
if (mp->rx_ring == NULL)
goto out2;
}
mace_dma_off(dev);
......
......@@ -864,7 +864,6 @@ static int bcm_enet_open(struct net_device *dev)
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
......@@ -877,7 +876,6 @@ static int bcm_enet_open(struct net_device *dev)
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
......
......@@ -47,22 +47,19 @@ static int at91ether_start(struct net_device *dev)
int i;
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
MAX_RX_DESCR * sizeof(struct macb_dma_desc),
&lp->rx_ring_dma, GFP_KERNEL);
if (!lp->rx_ring) {
netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
(MAX_RX_DESCR *
sizeof(struct macb_dma_desc)),
&lp->rx_ring_dma, GFP_KERNEL);
if (!lp->rx_ring)
return -ENOMEM;
}
lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
MAX_RX_DESCR * MAX_RBUFF_SZ,
&lp->rx_buffers_dma, GFP_KERNEL);
MAX_RX_DESCR * MAX_RBUFF_SZ,
&lp->rx_buffers_dma, GFP_KERNEL);
if (!lp->rx_buffers) {
netdev_err(dev, "unable to alloc rx data DMA buffer\n");
dma_free_coherent(&lp->pdev->dev,
MAX_RX_DESCR * sizeof(struct macb_dma_desc),
lp->rx_ring, lp->rx_ring_dma);
MAX_RX_DESCR * sizeof(struct macb_dma_desc),
lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
return -ENOMEM;
}
......
......@@ -236,17 +236,14 @@ static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
private->rx_buffer = dma_alloc_coherent(d, 8192,
&private->rx_dma_handle,
GFP_KERNEL);
if (private->rx_buffer == NULL) {
pr_err("%s: no memory for rx buffer\n", __func__);
if (private->rx_buffer == NULL)
goto rx_buf_fail;
}
private->tx_buffer = dma_alloc_coherent(d, 8192,
&private->tx_dma_handle,
GFP_KERNEL);
if (private->tx_buffer == NULL) {
pr_err("%s: no memory for tx buffer\n", __func__);
if (private->tx_buffer == NULL)
goto tx_buf_fail;
}
SET_NETDEV_DEV(dev, &pdev->dev);
......
......@@ -2667,10 +2667,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
&cmd.dma, GFP_KERNEL);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
if (!cmd.va)
return -ENOMEM;
}
spin_lock_bh(&adapter->mcc_lock);
......
......@@ -719,10 +719,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
&ddrdma_cmd.dma, GFP_KERNEL);
if (!ddrdma_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
if (!ddrdma_cmd.va)
return -ENOMEM;
}
for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i],
......@@ -845,11 +843,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
&eeprom_cmd.dma, GFP_KERNEL);
if (!eeprom_cmd.va) {
dev_err(&adapter->pdev->dev,
"Memory allocation failure. Could not read eeprom\n");
if (!eeprom_cmd.va)
return -ENOMEM;
}
status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
......
......@@ -3464,11 +3464,9 @@ static int lancer_fw_download(struct be_adapter *adapter,
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK;
flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
&flash_cmd.dma, GFP_KERNEL);
&flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
dev_err(&adapter->pdev->dev,
"Memory allocation failure while flashing\n");
goto lancer_fw_exit;
}
......@@ -3570,8 +3568,6 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
&flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
dev_err(&adapter->pdev->dev,
"Memory allocation failure while flashing\n");
goto be_fw_exit;
}
......
......@@ -1594,11 +1594,9 @@ static int fec_enet_init(struct net_device *ndev)
/* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
GFP_KERNEL);
if (!cbd_base) {
printk("FEC: allocate descriptor memory failed?\n");
GFP_KERNEL);
if (!cbd_base)
return -ENOMEM;
}
spin_lock_init(&fep->hw_lock);
......
......@@ -245,14 +245,13 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
/* Allocate memory for the buffer descriptors */
vaddr = dma_alloc_coherent(dev,
sizeof(struct txbd8) * priv->total_tx_ring_size +
sizeof(struct rxbd8) * priv->total_rx_ring_size,
&addr, GFP_KERNEL);
if (!vaddr) {
netif_err(priv, ifup, ndev,
"Could not allocate buffer descriptors!\n");
(priv->total_tx_ring_size *
sizeof(struct txbd8)) +
(priv->total_rx_ring_size *
sizeof(struct rxbd8)),
&addr, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
}
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
......
......@@ -637,13 +637,9 @@ static int mal_probe(struct platform_device *ofdev)
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
mal->bd_virt =
dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
GFP_KERNEL);
mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
GFP_KERNEL);
if (mal->bd_virt == NULL) {
printk(KERN_ERR
"mal%d: out of memory allocating RX/TX descriptors!\n",
index);
err = -ENOMEM;
goto fail_unmap;
}
......
......@@ -556,11 +556,9 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
rxq_entries;
adapter->rx_queue.queue_addr =
dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
&adapter->rx_queue.queue_dma, GFP_KERNEL);
dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
&adapter->rx_queue.queue_dma, GFP_KERNEL);
if (!adapter->rx_queue.queue_addr) {
netdev_err(netdev, "unable to allocate rx queue pages\n");
rc = -ENOMEM;
goto err_out;
}
......
......@@ -1516,8 +1516,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
e_err(probe, "Unable to allocate memory for the Tx descriptor "
"ring\n");
return -ENOMEM;
}
......@@ -1707,10 +1705,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL);
if (!rxdr->desc) {
e_err(probe, "Unable to allocate memory for the Rx descriptor "
"ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
return -ENOMEM;
......@@ -1729,8 +1724,6 @@ setup_rx_desc_die:
if (!rxdr->desc) {
dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma);
e_err(probe, "Unable to allocate memory for the Rx "
"descriptor ring\n");
goto setup_rx_desc_die;
}
......
......@@ -720,8 +720,6 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
netif_err(adapter, probe, adapter->netdev,
"Unable to allocate transmit descriptor memory\n");
return -ENOMEM;
}
memset(txdr->desc, 0, txdr->size);
......@@ -807,8 +805,6 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
netif_err(adapter, probe, adapter->netdev,
"Unable to allocate receive descriptors\n");
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
......
......@@ -2423,9 +2423,6 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
hw_dbg(&adapter->hw,
"Unable to allocate memory for "
"the receive descriptor ring\n");
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
goto alloc_failed;
......
......@@ -1969,13 +1969,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
&rxq->descs_phys, GFP_KERNEL);
if (rxq->descs == NULL) {
netdev_err(pp->dev,
"rxq=%d: Can't allocate %d bytes for %d RX descr\n",
rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
rxq->size);
if (rxq->descs == NULL)
return -ENOMEM;
}
BUG_ON(rxq->descs !=
PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
......@@ -2029,13 +2024,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
&txq->descs_phys, GFP_KERNEL);
if (txq->descs == NULL) {
netdev_err(pp->dev,
"txQ=%d: Can't allocate %d bytes for %d TX descr\n",
txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->size);
if (txq->descs == NULL)
return -ENOMEM;
}
/* Make sure descriptor address is cache line size aligned */
BUG_ON(txq->descs !=
......
......@@ -1024,11 +1024,9 @@ static int rxq_init(struct net_device *dev)
pep->rx_desc_area_size = size;
pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
&pep->rx_desc_dma, GFP_KERNEL);
if (!pep->p_rx_desc_area) {
printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
dev->name, size);
if (!pep->p_rx_desc_area)
goto out;
}
memset((void *)pep->p_rx_desc_area, 0, size);
/* initialize the next_desc_ptr links in the Rx descriptors ring */
p_rx_desc = pep->p_rx_desc_area;
......@@ -1087,11 +1085,8 @@ static int txq_init(struct net_device *dev)
pep->tx_desc_area_size = size;
pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
&pep->tx_desc_dma, GFP_KERNEL);
if (!pep->p_tx_desc_area) {
printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
dev->name, size);
if (!pep->p_tx_desc_area)
goto out;
}
memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
p_tx_desc = pep->p_tx_desc_area;
......
......@@ -1837,10 +1837,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
&priv->mfunc.vhcr_dma,
GFP_KERNEL);
if (!priv->mfunc.vhcr) {
mlx4_err(dev, "Couldn't allocate VHCR.\n");
if (!priv->mfunc.vhcr)
goto err_hcr;
}
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
......
......@@ -175,13 +175,13 @@ static int sonic_probe1(struct net_device *dev)
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
if ((lp->descriptors = dma_alloc_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
&lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
dev_name(lp->device));
lp->descriptors = dma_alloc_coherent(lp->device,
SIZEOF_SONIC_DESC *
SONIC_BUS_SCALE(lp->dma_bitmode),
&lp->descriptors_laddr,
GFP_KERNEL);
if (lp->descriptors == NULL)
goto out;
}
/* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment