mirror of
https://github.com/ipxe/ipxe
synced 2025-12-29 19:38:37 +03:00
[malloc] Rename malloc_dma() to malloc_phys()
The malloc_dma() function allocates memory with specified physical alignment, and is typically (though not exclusively) used to allocate memory for DMA. Rename to malloc_phys() to more closely match the functionality, and to create name space for functions that specifically allocate and map DMA-capable buffers. Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
@@ -249,7 +249,7 @@ static int a3c90x_setup_tx_ring(struct INF_3C90X *p)
|
||||
{
|
||||
DBGP("a3c90x_setup_tx_ring\n");
|
||||
p->tx_ring =
|
||||
malloc_dma(TX_RING_SIZE * sizeof(struct TXD), TX_RING_ALIGN);
|
||||
malloc_phys(TX_RING_SIZE * sizeof(struct TXD), TX_RING_ALIGN);
|
||||
|
||||
if (!p->tx_ring) {
|
||||
DBG("Could not allocate TX-ring\n");
|
||||
@@ -304,7 +304,7 @@ static void a3c90x_free_tx_ring(struct INF_3C90X *p)
|
||||
{
|
||||
DBGP("a3c90x_free_tx_ring\n");
|
||||
|
||||
free_dma(p->tx_ring, TX_RING_SIZE * sizeof(struct TXD));
|
||||
free_phys(p->tx_ring, TX_RING_SIZE * sizeof(struct TXD));
|
||||
p->tx_ring = NULL;
|
||||
/* io_buffers are free()ed by netdev_tx_complete[,_err]() */
|
||||
}
|
||||
@@ -461,7 +461,7 @@ static int a3c90x_setup_rx_ring(struct INF_3C90X *p)
|
||||
DBGP("a3c90x_setup_rx_ring\n");
|
||||
|
||||
p->rx_ring =
|
||||
malloc_dma(RX_RING_SIZE * sizeof(struct RXD), RX_RING_ALIGN);
|
||||
malloc_phys(RX_RING_SIZE * sizeof(struct RXD), RX_RING_ALIGN);
|
||||
|
||||
if (!p->rx_ring) {
|
||||
DBG("Could not allocate RX-ring\n");
|
||||
@@ -491,7 +491,7 @@ static void a3c90x_free_rx_ring(struct INF_3C90X *p)
|
||||
{
|
||||
DBGP("a3c90x_free_rx_ring\n");
|
||||
|
||||
free_dma(p->rx_ring, RX_RING_SIZE * sizeof(struct RXD));
|
||||
free_phys(p->rx_ring, RX_RING_SIZE * sizeof(struct RXD));
|
||||
p->rx_ring = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -877,7 +877,7 @@ ath5k_desc_alloc(struct ath5k_softc *sc)
|
||||
|
||||
/* allocate descriptors */
|
||||
sc->desc_len = sizeof(struct ath5k_desc) * (ATH_TXBUF + ATH_RXBUF + 1);
|
||||
sc->desc = malloc_dma(sc->desc_len, ATH5K_DESC_ALIGN);
|
||||
sc->desc = malloc_phys(sc->desc_len, ATH5K_DESC_ALIGN);
|
||||
if (sc->desc == NULL) {
|
||||
DBG("ath5k: can't allocate descriptors\n");
|
||||
ret = -ENOMEM;
|
||||
@@ -915,7 +915,7 @@ ath5k_desc_alloc(struct ath5k_softc *sc)
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
free_dma(sc->desc, sc->desc_len);
|
||||
free_phys(sc->desc, sc->desc_len);
|
||||
err:
|
||||
sc->desc = NULL;
|
||||
return ret;
|
||||
@@ -932,7 +932,7 @@ ath5k_desc_free(struct ath5k_softc *sc)
|
||||
ath5k_rxbuf_free(sc, bf);
|
||||
|
||||
/* Free memory associated with all descriptors */
|
||||
free_dma(sc->desc, sc->desc_len);
|
||||
free_phys(sc->desc, sc->desc_len);
|
||||
|
||||
free(sc->bufptr);
|
||||
sc->bufptr = NULL;
|
||||
|
||||
@@ -223,7 +223,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
|
||||
}
|
||||
|
||||
/* allocate descriptors */
|
||||
dd->dd_desc = malloc_dma(dd->dd_desc_len, 16);
|
||||
dd->dd_desc = malloc_phys(dd->dd_desc_len, 16);
|
||||
if (dd->dd_desc == NULL) {
|
||||
error = -ENOMEM;
|
||||
goto fail;
|
||||
@@ -264,7 +264,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
|
||||
}
|
||||
return 0;
|
||||
fail2:
|
||||
free_dma(dd->dd_desc, dd->dd_desc_len);
|
||||
free_phys(dd->dd_desc, dd->dd_desc_len);
|
||||
fail:
|
||||
memset(dd, 0, sizeof(*dd));
|
||||
return error;
|
||||
@@ -588,7 +588,7 @@ void ath_descdma_cleanup(struct ath_softc *sc __unused,
|
||||
struct ath_descdma *dd,
|
||||
struct list_head *head)
|
||||
{
|
||||
free_dma(dd->dd_desc, dd->dd_desc_len);
|
||||
free_phys(dd->dd_desc, dd->dd_desc_len);
|
||||
|
||||
INIT_LIST_HEAD(head);
|
||||
free(dd->dd_bufptr);
|
||||
|
||||
@@ -370,7 +370,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
|
||||
atl1e_clean_rx_ring(adapter);
|
||||
|
||||
if (adapter->ring_vir_addr) {
|
||||
free_dma(adapter->ring_vir_addr, adapter->ring_size);
|
||||
free_phys(adapter->ring_vir_addr, adapter->ring_size);
|
||||
adapter->ring_vir_addr = NULL;
|
||||
adapter->ring_dma = 0;
|
||||
}
|
||||
@@ -405,7 +405,7 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
|
||||
/* real ring DMA buffer */
|
||||
|
||||
size = adapter->ring_size;
|
||||
adapter->ring_vir_addr = malloc_dma(adapter->ring_size, 32);
|
||||
adapter->ring_vir_addr = malloc_phys(adapter->ring_size, 32);
|
||||
|
||||
if (adapter->ring_vir_addr == NULL) {
|
||||
DBG("atl1e: out of memory allocating %d bytes for %s ring\n",
|
||||
|
||||
@@ -436,7 +436,7 @@ static void b44_free_rx_ring(struct b44_private *bp)
|
||||
free_iob(bp->rx_iobuf[i]);
|
||||
bp->rx_iobuf[i] = NULL;
|
||||
}
|
||||
free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
|
||||
free_phys(bp->rx, B44_RX_RING_LEN_BYTES);
|
||||
bp->rx = NULL;
|
||||
}
|
||||
}
|
||||
@@ -446,11 +446,11 @@ static int b44_init_rx_ring(struct b44_private *bp)
|
||||
{
|
||||
b44_free_rx_ring(bp);
|
||||
|
||||
bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
|
||||
bp->rx = malloc_phys(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
|
||||
if (!bp->rx)
|
||||
return -ENOMEM;
|
||||
if (!b44_address_ok(bp->rx)) {
|
||||
free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
|
||||
free_phys(bp->rx, B44_RX_RING_LEN_BYTES);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
@@ -468,7 +468,7 @@ static int b44_init_rx_ring(struct b44_private *bp)
|
||||
static void b44_free_tx_ring(struct b44_private *bp)
|
||||
{
|
||||
if (bp->tx) {
|
||||
free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
|
||||
free_phys(bp->tx, B44_TX_RING_LEN_BYTES);
|
||||
bp->tx = NULL;
|
||||
}
|
||||
}
|
||||
@@ -478,11 +478,11 @@ static int b44_init_tx_ring(struct b44_private *bp)
|
||||
{
|
||||
b44_free_tx_ring(bp);
|
||||
|
||||
bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
|
||||
bp->tx = malloc_phys(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
|
||||
if (!bp->tx)
|
||||
return -ENOMEM;
|
||||
if (!b44_address_ok(bp->tx)) {
|
||||
free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
|
||||
free_phys(bp->tx, B44_TX_RING_LEN_BYTES);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
|
||||
@@ -495,39 +495,39 @@ void bnxt_free_mem ( struct bnxt *bp )
|
||||
{
|
||||
DBGP ( "%s\n", __func__ );
|
||||
if ( bp->nq.bd_virt ) {
|
||||
free_dma ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
|
||||
free_phys ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
|
||||
bp->nq.bd_virt = NULL;
|
||||
}
|
||||
|
||||
if ( bp->cq.bd_virt ) {
|
||||
free_dma ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
|
||||
free_phys ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
|
||||
bp->cq.bd_virt = NULL;
|
||||
}
|
||||
|
||||
if ( bp->rx.bd_virt ) {
|
||||
free_dma ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
|
||||
free_phys ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
|
||||
bp->rx.bd_virt = NULL;
|
||||
}
|
||||
|
||||
if ( bp->tx.bd_virt ) {
|
||||
free_dma ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
|
||||
free_phys ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
|
||||
bp->tx.bd_virt = NULL;
|
||||
}
|
||||
|
||||
if ( bp->hwrm_addr_dma ) {
|
||||
free_dma ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
|
||||
free_phys ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
|
||||
bp->dma_addr_mapping = 0;
|
||||
bp->hwrm_addr_dma = NULL;
|
||||
}
|
||||
|
||||
if ( bp->hwrm_addr_resp ) {
|
||||
free_dma ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
|
||||
free_phys ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
|
||||
bp->resp_addr_mapping = 0;
|
||||
bp->hwrm_addr_resp = NULL;
|
||||
}
|
||||
|
||||
if ( bp->hwrm_addr_req ) {
|
||||
free_dma ( bp->hwrm_addr_req, REQ_BUFFER_SIZE );
|
||||
free_phys ( bp->hwrm_addr_req, REQ_BUFFER_SIZE );
|
||||
bp->req_addr_mapping = 0;
|
||||
bp->hwrm_addr_req = NULL;
|
||||
}
|
||||
@@ -537,14 +537,14 @@ void bnxt_free_mem ( struct bnxt *bp )
|
||||
int bnxt_alloc_mem ( struct bnxt *bp )
|
||||
{
|
||||
DBGP ( "%s\n", __func__ );
|
||||
bp->hwrm_addr_req = malloc_dma ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->hwrm_addr_resp = malloc_dma ( RESP_BUFFER_SIZE,
|
||||
BNXT_DMA_ALIGNMENT );
|
||||
bp->hwrm_addr_dma = malloc_dma ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->tx.bd_virt = malloc_dma ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
|
||||
bp->rx.bd_virt = malloc_dma ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
|
||||
bp->cq.bd_virt = malloc_dma ( CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->nq.bd_virt = malloc_dma ( NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->hwrm_addr_req = malloc_phys ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->hwrm_addr_resp = malloc_phys ( RESP_BUFFER_SIZE,
|
||||
BNXT_DMA_ALIGNMENT );
|
||||
bp->hwrm_addr_dma = malloc_phys ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->tx.bd_virt = malloc_phys ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
|
||||
bp->rx.bd_virt = malloc_phys ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
|
||||
bp->cq.bd_virt = malloc_phys ( CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->nq.bd_virt = malloc_phys ( NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
test_if ( bp->hwrm_addr_req &&
|
||||
bp->hwrm_addr_resp &&
|
||||
bp->hwrm_addr_dma &&
|
||||
|
||||
@@ -93,7 +93,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
|
||||
|
||||
/*
|
||||
* Debugging levels:
|
||||
* - DBG() is for any errors, i.e. failed alloc_iob(), malloc_dma(),
|
||||
* - DBG() is for any errors, i.e. failed alloc_iob(), malloc_phys(),
|
||||
* TX overflow, corrupted packets, ...
|
||||
* - DBG2() is for successful events, like packet received,
|
||||
* packet transmitted, and other general notifications.
|
||||
@@ -335,7 +335,7 @@ static int ifec_net_open ( struct net_device *netdev )
|
||||
ifec_mdio_setup ( netdev, options );
|
||||
|
||||
/* Prepare MAC address w/ Individual Address Setup (ias) command.*/
|
||||
ias = malloc_dma ( sizeof ( *ias ), CB_ALIGN );
|
||||
ias = malloc_phys ( sizeof ( *ias ), CB_ALIGN );
|
||||
if ( !ias ) {
|
||||
rc = -ENOMEM;
|
||||
goto error;
|
||||
@@ -345,7 +345,7 @@ static int ifec_net_open ( struct net_device *netdev )
|
||||
memcpy ( ias->ia, netdev->ll_addr, ETH_ALEN );
|
||||
|
||||
/* Prepare operating parameters w/ a configure command. */
|
||||
cfg = malloc_dma ( sizeof ( *cfg ), CB_ALIGN );
|
||||
cfg = malloc_phys ( sizeof ( *cfg ), CB_ALIGN );
|
||||
if ( !cfg ) {
|
||||
rc = -ENOMEM;
|
||||
goto error;
|
||||
@@ -367,8 +367,8 @@ static int ifec_net_open ( struct net_device *netdev )
|
||||
DBG ( "Failed to initiate!\n" );
|
||||
goto error;
|
||||
}
|
||||
free_dma ( ias, sizeof ( *ias ) );
|
||||
free_dma ( cfg, sizeof ( *cfg ) );
|
||||
free_phys ( ias, sizeof ( *ias ) );
|
||||
free_phys ( cfg, sizeof ( *cfg ) );
|
||||
DBG2 ( "cfg " );
|
||||
|
||||
/* Enable rx by sending ring address to card */
|
||||
@@ -381,8 +381,8 @@ static int ifec_net_open ( struct net_device *netdev )
|
||||
return 0;
|
||||
|
||||
error:
|
||||
free_dma ( cfg, sizeof ( *cfg ) );
|
||||
free_dma ( ias, sizeof ( *ias ) );
|
||||
free_phys ( cfg, sizeof ( *cfg ) );
|
||||
free_phys ( ias, sizeof ( *ias ) );
|
||||
ifec_free ( netdev );
|
||||
ifec_reset ( netdev );
|
||||
return rc;
|
||||
@@ -703,7 +703,7 @@ static void ifec_free ( struct net_device *netdev )
|
||||
}
|
||||
|
||||
/* free TX ring buffer */
|
||||
free_dma ( priv->tcbs, TX_RING_BYTES );
|
||||
free_phys ( priv->tcbs, TX_RING_BYTES );
|
||||
|
||||
priv->tcbs = NULL;
|
||||
}
|
||||
@@ -1025,7 +1025,7 @@ static int ifec_tx_setup ( struct net_device *netdev )
|
||||
DBGP ( "ifec_tx_setup\n" );
|
||||
|
||||
/* allocate tx ring */
|
||||
priv->tcbs = malloc_dma ( TX_RING_BYTES, CB_ALIGN );
|
||||
priv->tcbs = malloc_phys ( TX_RING_BYTES, CB_ALIGN );
|
||||
if ( !priv->tcbs ) {
|
||||
DBG ( "TX-ring allocation failed\n" );
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -164,7 +164,7 @@ static int ena_create_admin ( struct ena_nic *ena ) {
|
||||
int rc;
|
||||
|
||||
/* Allocate admin completion queue */
|
||||
ena->acq.rsp = malloc_dma ( acq_len, acq_len );
|
||||
ena->acq.rsp = malloc_phys ( acq_len, acq_len );
|
||||
if ( ! ena->acq.rsp ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_acq;
|
||||
@@ -172,7 +172,7 @@ static int ena_create_admin ( struct ena_nic *ena ) {
|
||||
memset ( ena->acq.rsp, 0, acq_len );
|
||||
|
||||
/* Allocate admin queue */
|
||||
ena->aq.req = malloc_dma ( aq_len, aq_len );
|
||||
ena->aq.req = malloc_phys ( aq_len, aq_len );
|
||||
if ( ! ena->aq.req ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_aq;
|
||||
@@ -196,9 +196,9 @@ static int ena_create_admin ( struct ena_nic *ena ) {
|
||||
|
||||
ena_clear_caps ( ena, ENA_AQ_CAPS );
|
||||
ena_clear_caps ( ena, ENA_ACQ_CAPS );
|
||||
free_dma ( ena->aq.req, aq_len );
|
||||
free_phys ( ena->aq.req, aq_len );
|
||||
err_alloc_aq:
|
||||
free_dma ( ena->acq.rsp, acq_len );
|
||||
free_phys ( ena->acq.rsp, acq_len );
|
||||
err_alloc_acq:
|
||||
return rc;
|
||||
}
|
||||
@@ -218,8 +218,8 @@ static void ena_destroy_admin ( struct ena_nic *ena ) {
|
||||
wmb();
|
||||
|
||||
/* Free queues */
|
||||
free_dma ( ena->aq.req, aq_len );
|
||||
free_dma ( ena->acq.rsp, acq_len );
|
||||
free_phys ( ena->aq.req, aq_len );
|
||||
free_phys ( ena->acq.rsp, acq_len );
|
||||
DBGC ( ena, "ENA %p AQ and ACQ destroyed\n", ena );
|
||||
}
|
||||
|
||||
@@ -338,7 +338,7 @@ static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq,
|
||||
int rc;
|
||||
|
||||
/* Allocate submission queue entries */
|
||||
sq->sqe.raw = malloc_dma ( sq->len, ENA_ALIGN );
|
||||
sq->sqe.raw = malloc_phys ( sq->len, ENA_ALIGN );
|
||||
if ( ! sq->sqe.raw ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
@@ -375,7 +375,7 @@ static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq,
|
||||
return 0;
|
||||
|
||||
err_admin:
|
||||
free_dma ( sq->sqe.raw, sq->len );
|
||||
free_phys ( sq->sqe.raw, sq->len );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
@@ -403,7 +403,7 @@ static int ena_destroy_sq ( struct ena_nic *ena, struct ena_sq *sq ) {
|
||||
return rc;
|
||||
|
||||
/* Free submission queue entries */
|
||||
free_dma ( sq->sqe.raw, sq->len );
|
||||
free_phys ( sq->sqe.raw, sq->len );
|
||||
|
||||
DBGC ( ena, "ENA %p %s SQ%d destroyed\n",
|
||||
ena, ena_direction ( sq->direction ), sq->id );
|
||||
@@ -423,7 +423,7 @@ static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
|
||||
int rc;
|
||||
|
||||
/* Allocate completion queue entries */
|
||||
cq->cqe.raw = malloc_dma ( cq->len, ENA_ALIGN );
|
||||
cq->cqe.raw = malloc_phys ( cq->len, ENA_ALIGN );
|
||||
if ( ! cq->cqe.raw ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
@@ -461,7 +461,7 @@ static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
|
||||
return 0;
|
||||
|
||||
err_admin:
|
||||
free_dma ( cq->cqe.raw, cq->len );
|
||||
free_phys ( cq->cqe.raw, cq->len );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
@@ -488,7 +488,7 @@ static int ena_destroy_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
|
||||
return rc;
|
||||
|
||||
/* Free completion queue entries */
|
||||
free_dma ( cq->cqe.raw, cq->len );
|
||||
free_phys ( cq->cqe.raw, cq->len );
|
||||
|
||||
DBGC ( ena, "ENA %p CQ%d destroyed\n", ena, cq->id );
|
||||
return 0;
|
||||
|
||||
@@ -3025,7 +3025,7 @@ falcon_free_special_buffer ( void *p )
|
||||
{
|
||||
/* We don't bother cleaning up the buffer table entries -
|
||||
* we're hardly limited */
|
||||
free_dma ( p, EFAB_BUF_ALIGN );
|
||||
free_phys ( p, EFAB_BUF_ALIGN );
|
||||
}
|
||||
|
||||
static void*
|
||||
@@ -3038,7 +3038,7 @@ falcon_alloc_special_buffer ( struct efab_nic *efab, int bytes,
|
||||
unsigned long dma_addr;
|
||||
|
||||
/* Allocate the buffer, aligned on a buffer address boundary */
|
||||
buffer = malloc_dma ( bytes, EFAB_BUF_ALIGN );
|
||||
buffer = malloc_phys ( bytes, EFAB_BUF_ALIGN );
|
||||
if ( ! buffer )
|
||||
return NULL;
|
||||
|
||||
|
||||
@@ -831,7 +831,7 @@ static int exanic_probe ( struct pci_device *pci ) {
|
||||
}
|
||||
|
||||
/* Allocate transmit feedback region (shared between all ports) */
|
||||
exanic->txf = malloc_dma ( EXANIC_TXF_LEN, EXANIC_ALIGN );
|
||||
exanic->txf = malloc_phys ( EXANIC_TXF_LEN, EXANIC_ALIGN );
|
||||
if ( ! exanic->txf ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_txf;
|
||||
@@ -853,7 +853,7 @@ static int exanic_probe ( struct pci_device *pci ) {
|
||||
for ( i-- ; i >= 0 ; i-- )
|
||||
exanic_remove_port ( exanic, i );
|
||||
exanic_reset ( exanic );
|
||||
free_dma ( exanic->txf, EXANIC_TXF_LEN );
|
||||
free_phys ( exanic->txf, EXANIC_TXF_LEN );
|
||||
err_alloc_txf:
|
||||
iounmap ( exanic->tx );
|
||||
err_ioremap_tx:
|
||||
@@ -882,7 +882,7 @@ static void exanic_remove ( struct pci_device *pci ) {
|
||||
exanic_reset ( exanic );
|
||||
|
||||
/* Free transmit feedback region */
|
||||
free_dma ( exanic->txf, EXANIC_TXF_LEN );
|
||||
free_phys ( exanic->txf, EXANIC_TXF_LEN );
|
||||
|
||||
/* Unmap transmit region */
|
||||
iounmap ( exanic->tx );
|
||||
|
||||
@@ -267,7 +267,7 @@ nv_init_rings ( struct forcedeth_private *priv )
|
||||
|
||||
/* Allocate ring for both TX and RX */
|
||||
priv->rx_ring =
|
||||
malloc_dma ( sizeof(struct ring_desc) * RXTX_RING_SIZE, 32 );
|
||||
malloc_phys ( sizeof(struct ring_desc) * RXTX_RING_SIZE, 32 );
|
||||
if ( ! priv->rx_ring )
|
||||
goto err_malloc;
|
||||
priv->tx_ring = &priv->rx_ring[RX_RING_SIZE];
|
||||
@@ -308,7 +308,7 @@ nv_free_rxtx_resources ( struct forcedeth_private *priv )
|
||||
|
||||
DBGP ( "nv_free_rxtx_resources\n" );
|
||||
|
||||
free_dma ( priv->rx_ring, sizeof(struct ring_desc) * RXTX_RING_SIZE );
|
||||
free_phys ( priv->rx_ring, sizeof(struct ring_desc) * RXTX_RING_SIZE );
|
||||
|
||||
for ( i = 0; i < RX_RING_SIZE; i++ ) {
|
||||
free_iob ( priv->rx_iobuf[i] );
|
||||
|
||||
@@ -343,7 +343,7 @@ static int icplus_create_ring ( struct icplus_nic *icp, struct icplus_ring *ring
|
||||
struct icplus_descriptor *next;
|
||||
|
||||
/* Allocate descriptor ring */
|
||||
ring->entry = malloc_dma ( len, ICP_ALIGN );
|
||||
ring->entry = malloc_phys ( len, ICP_ALIGN );
|
||||
if ( ! ring->entry ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
@@ -369,7 +369,7 @@ static int icplus_create_ring ( struct icplus_nic *icp, struct icplus_ring *ring
|
||||
( virt_to_bus ( ring->entry ) + len ) );
|
||||
return 0;
|
||||
|
||||
free_dma ( ring->entry, len );
|
||||
free_phys ( ring->entry, len );
|
||||
ring->entry = NULL;
|
||||
err_alloc:
|
||||
return rc;
|
||||
@@ -386,7 +386,7 @@ static void icplus_destroy_ring ( struct icplus_nic *icp __unused,
|
||||
size_t len = ( sizeof ( ring->entry[0] ) * ICP_NUM_DESC );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->entry, len );
|
||||
free_phys ( ring->entry, len );
|
||||
ring->entry = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ int igbvf_setup_tx_resources ( struct igbvf_adapter *adapter )
|
||||
|
||||
/* Allocate transmit descriptor ring memory.
|
||||
It must not cross a 64K boundary because of hardware errata #23
|
||||
so we use malloc_dma() requesting a 128 byte block that is
|
||||
so we use malloc_phys() requesting a 128 byte block that is
|
||||
128 byte aligned. This should guarantee that the memory
|
||||
allocated will not cross a 64K boundary, because 128 is an
|
||||
even multiple of 65536 ( 65536 / 128 == 512 ), so all possible
|
||||
@@ -55,7 +55,7 @@ int igbvf_setup_tx_resources ( struct igbvf_adapter *adapter )
|
||||
*/
|
||||
|
||||
adapter->tx_base =
|
||||
malloc_dma ( adapter->tx_ring_size, adapter->tx_ring_size );
|
||||
malloc_phys ( adapter->tx_ring_size, adapter->tx_ring_size );
|
||||
|
||||
if ( ! adapter->tx_base ) {
|
||||
return -ENOMEM;
|
||||
@@ -78,7 +78,7 @@ void igbvf_free_tx_resources ( struct igbvf_adapter *adapter )
|
||||
{
|
||||
DBG ( "igbvf_free_tx_resources\n" );
|
||||
|
||||
free_dma ( adapter->tx_base, adapter->tx_ring_size );
|
||||
free_phys ( adapter->tx_base, adapter->tx_ring_size );
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -93,7 +93,7 @@ void igbvf_free_rx_resources ( struct igbvf_adapter *adapter )
|
||||
|
||||
DBG ( "igbvf_free_rx_resources\n" );
|
||||
|
||||
free_dma ( adapter->rx_base, adapter->rx_ring_size );
|
||||
free_phys ( adapter->rx_base, adapter->rx_ring_size );
|
||||
|
||||
for ( i = 0; i < NUM_RX_DESC; i++ ) {
|
||||
free_iob ( adapter->rx_iobuf[i] );
|
||||
@@ -574,7 +574,7 @@ int igbvf_setup_rx_resources ( struct igbvf_adapter *adapter )
|
||||
*/
|
||||
|
||||
adapter->rx_base =
|
||||
malloc_dma ( adapter->rx_ring_size, adapter->rx_ring_size );
|
||||
malloc_phys ( adapter->rx_ring_size, adapter->rx_ring_size );
|
||||
|
||||
if ( ! adapter->rx_base ) {
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -504,7 +504,7 @@ int intel_create_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
|
||||
* prevent any possible page-crossing errors due to hardware
|
||||
* errata.
|
||||
*/
|
||||
ring->desc = malloc_dma ( ring->len, ring->len );
|
||||
ring->desc = malloc_phys ( ring->len, ring->len );
|
||||
if ( ! ring->desc )
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -553,7 +553,7 @@ void intel_destroy_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
|
||||
intel_reset_ring ( intel, ring->reg );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc, ring->len );
|
||||
free_phys ( ring->desc, ring->len );
|
||||
ring->desc = NULL;
|
||||
ring->prod = 0;
|
||||
ring->cons = 0;
|
||||
|
||||
@@ -195,7 +195,7 @@ static int intelxl_alloc_admin ( struct intelxl_nic *intelxl,
|
||||
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
|
||||
|
||||
/* Allocate admin queue */
|
||||
admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN );
|
||||
admin->buf = malloc_phys ( ( buf_len + len ), INTELXL_ALIGN );
|
||||
if ( ! admin->buf )
|
||||
return -ENOMEM;
|
||||
admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
|
||||
@@ -277,7 +277,7 @@ static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused,
|
||||
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
|
||||
|
||||
/* Free queue */
|
||||
free_dma ( admin->buf, ( buf_len + len ) );
|
||||
free_phys ( admin->buf, ( buf_len + len ) );
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -926,7 +926,7 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
|
||||
int rc;
|
||||
|
||||
/* Allocate descriptor ring */
|
||||
ring->desc.raw = malloc_dma ( ring->len, INTELXL_ALIGN );
|
||||
ring->desc.raw = malloc_phys ( ring->len, INTELXL_ALIGN );
|
||||
if ( ! ring->desc.raw ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
@@ -950,7 +950,7 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
|
||||
|
||||
return 0;
|
||||
|
||||
free_dma ( ring->desc.raw, ring->len );
|
||||
free_phys ( ring->desc.raw, ring->len );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
@@ -965,7 +965,7 @@ void intelxl_free_ring ( struct intelxl_nic *intelxl __unused,
|
||||
struct intelxl_ring *ring ) {
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc.raw, ring->len );
|
||||
free_phys ( ring->desc.raw, ring->len );
|
||||
ring->desc.raw = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -262,7 +262,7 @@ jme_free_tx_resources(struct jme_adapter *jme)
|
||||
sizeof(struct io_buffer *) * jme->tx_ring_size);
|
||||
free(txring->bufinf);
|
||||
}
|
||||
free_dma(txring->desc, jme->tx_ring_size * TX_DESC_SIZE);
|
||||
free_phys(txring->desc, jme->tx_ring_size * TX_DESC_SIZE);
|
||||
txring->desc = NULL;
|
||||
txring->dma = 0;
|
||||
txring->bufinf = NULL;
|
||||
@@ -277,7 +277,7 @@ jme_alloc_tx_resources(struct jme_adapter *jme)
|
||||
{
|
||||
struct jme_ring *txring = &jme->txring;
|
||||
|
||||
txring->desc = malloc_dma(jme->tx_ring_size * TX_DESC_SIZE,
|
||||
txring->desc = malloc_phys(jme->tx_ring_size * TX_DESC_SIZE,
|
||||
RING_DESC_ALIGN);
|
||||
if (!txring->desc) {
|
||||
DBG("Can not allocate transmit ring descriptors.\n");
|
||||
@@ -442,7 +442,7 @@ jme_free_rx_resources(struct jme_adapter *jme)
|
||||
free(rxring->bufinf);
|
||||
}
|
||||
|
||||
free_dma(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE);
|
||||
free_phys(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE);
|
||||
rxring->desc = NULL;
|
||||
rxring->dma = 0;
|
||||
rxring->bufinf = NULL;
|
||||
@@ -458,7 +458,7 @@ jme_alloc_rx_resources(struct jme_adapter *jme)
|
||||
struct jme_ring *rxring = &jme->rxring;
|
||||
struct io_buffer **bufinf;
|
||||
|
||||
rxring->desc = malloc_dma(jme->rx_ring_size * RX_DESC_SIZE,
|
||||
rxring->desc = malloc_phys(jme->rx_ring_size * RX_DESC_SIZE,
|
||||
RING_DESC_ALIGN);
|
||||
if (!rxring->desc) {
|
||||
DBG("Can not allocate receive ring descriptors.\n");
|
||||
|
||||
@@ -66,7 +66,7 @@ FILE_LICENCE ( GPL2_ONLY );
|
||||
|
||||
/*
|
||||
* Debugging levels:
|
||||
* - DBG() is for any errors, i.e. failed alloc_iob(), malloc_dma(),
|
||||
* - DBG() is for any errors, i.e. failed alloc_iob(), malloc_phys(),
|
||||
* TX overflow, corrupted packets, ...
|
||||
* - DBG2() is for successful events, like packet received,
|
||||
* packet transmitted, and other general notifications.
|
||||
@@ -918,7 +918,7 @@ static void myri10ge_net_close ( struct net_device *netdev )
|
||||
|
||||
/* Release DMAable memory. */
|
||||
|
||||
free_dma ( priv->dma, sizeof ( *priv->dma ) );
|
||||
free_phys ( priv->dma, sizeof ( *priv->dma ) );
|
||||
|
||||
/* Erase all state from the open. */
|
||||
|
||||
@@ -988,7 +988,7 @@ static int myri10ge_net_open ( struct net_device *netdev )
|
||||
|
||||
/* Allocate cleared DMAable buffers. */
|
||||
|
||||
priv->dma = malloc_dma ( sizeof ( *priv->dma ) , 128 );
|
||||
priv->dma = malloc_phys ( sizeof ( *priv->dma ) , 128 );
|
||||
if ( !priv->dma ) {
|
||||
rc = -ENOMEM;
|
||||
dbg = "DMA";
|
||||
@@ -1152,7 +1152,7 @@ abort_with_receives_posted:
|
||||
free_iob ( priv->receive_iob[priv->receives_posted] );
|
||||
abort_with_dma:
|
||||
/* Because the link is not up, we don't have to reset the NIC here. */
|
||||
free_dma ( priv->dma, sizeof ( *priv->dma ) );
|
||||
free_phys ( priv->dma, sizeof ( *priv->dma ) );
|
||||
abort_with_nothing:
|
||||
/* Erase all signs of the failed open. */
|
||||
memset ( priv, 0, sizeof ( *priv ) );
|
||||
|
||||
@@ -165,7 +165,7 @@ static int myson_create_ring ( struct myson_nic *myson,
|
||||
int rc;
|
||||
|
||||
/* Allocate descriptor ring */
|
||||
ring->desc = malloc_dma ( len, MYSON_RING_ALIGN );
|
||||
ring->desc = malloc_phys ( len, MYSON_RING_ALIGN );
|
||||
if ( ! ring->desc ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
@@ -197,7 +197,7 @@ static int myson_create_ring ( struct myson_nic *myson,
|
||||
return 0;
|
||||
|
||||
err_64bit:
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
ring->desc = NULL;
|
||||
err_alloc:
|
||||
return rc;
|
||||
@@ -217,7 +217,7 @@ static void myson_destroy_ring ( struct myson_nic *myson,
|
||||
writel ( 0, myson->regs + ring->reg );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
ring->desc = NULL;
|
||||
ring->prod = 0;
|
||||
ring->cons = 0;
|
||||
|
||||
@@ -408,7 +408,7 @@ static int natsemi_create_ring ( struct natsemi_nic *natsemi,
|
||||
* ensure that it can't possibly cross the boundary of 32-bit
|
||||
* address space.
|
||||
*/
|
||||
ring->desc = malloc_dma ( len, len );
|
||||
ring->desc = malloc_phys ( len, len );
|
||||
if ( ! ring->desc ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
@@ -454,7 +454,7 @@ static int natsemi_create_ring ( struct natsemi_nic *natsemi,
|
||||
return 0;
|
||||
|
||||
err_64bit:
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
ring->desc = NULL;
|
||||
err_alloc:
|
||||
return rc;
|
||||
@@ -476,7 +476,7 @@ static void natsemi_destroy_ring ( struct natsemi_nic *natsemi,
|
||||
writel ( 0, natsemi->regs + ring->reg + 4 );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
ring->desc = NULL;
|
||||
ring->prod = 0;
|
||||
ring->cons = 0;
|
||||
|
||||
@@ -338,7 +338,7 @@ static int netfront_create_ring ( struct netfront_nic *netfront,
|
||||
ring->id_cons = 0;
|
||||
|
||||
/* Allocate and initialise shared ring */
|
||||
ring->sring.raw = malloc_dma ( PAGE_SIZE, PAGE_SIZE );
|
||||
ring->sring.raw = malloc_phys ( PAGE_SIZE, PAGE_SIZE );
|
||||
if ( ! ring->sring.raw ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
@@ -368,7 +368,7 @@ static int netfront_create_ring ( struct netfront_nic *netfront,
|
||||
err_write_num:
|
||||
xengrant_invalidate ( xen, ring->ref );
|
||||
err_permit_access:
|
||||
free_dma ( ring->sring.raw, PAGE_SIZE );
|
||||
free_phys ( ring->sring.raw, PAGE_SIZE );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
@@ -490,7 +490,7 @@ static void netfront_destroy_ring ( struct netfront_nic *netfront,
|
||||
xengrant_invalidate ( xen, ring->ref );
|
||||
|
||||
/* Free page */
|
||||
free_dma ( ring->sring.raw, PAGE_SIZE );
|
||||
free_phys ( ring->sring.raw, PAGE_SIZE );
|
||||
ring->sring.raw = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -246,7 +246,7 @@ pcnet32_setup_rx_resources ( struct pcnet32_private *priv )
|
||||
{
|
||||
DBGP ( "pcnet32_setup_rx_resources\n" );
|
||||
|
||||
priv->rx_base = malloc_dma ( RX_RING_BYTES, RX_RING_ALIGN );
|
||||
priv->rx_base = malloc_phys ( RX_RING_BYTES, RX_RING_ALIGN );
|
||||
|
||||
DBG ( "priv->rx_base = %#08lx\n", virt_to_bus ( priv->rx_base ) );
|
||||
|
||||
@@ -270,7 +270,7 @@ pcnet32_free_rx_resources ( struct pcnet32_private *priv )
|
||||
|
||||
DBGP ( "pcnet32_free_rx_resources\n" );
|
||||
|
||||
free_dma ( priv->rx_base, RX_RING_BYTES );
|
||||
free_phys ( priv->rx_base, RX_RING_BYTES );
|
||||
|
||||
for ( i = 0; i < RX_RING_SIZE; i++ ) {
|
||||
free_iob ( priv->rx_iobuf[i] );
|
||||
@@ -290,7 +290,7 @@ pcnet32_setup_tx_resources ( struct pcnet32_private *priv )
|
||||
{
|
||||
DBGP ( "pcnet32_setup_tx_resources\n" );
|
||||
|
||||
priv->tx_base = malloc_dma ( TX_RING_BYTES, TX_RING_ALIGN );
|
||||
priv->tx_base = malloc_phys ( TX_RING_BYTES, TX_RING_ALIGN );
|
||||
|
||||
if ( ! priv->tx_base ) {
|
||||
return -ENOMEM;
|
||||
@@ -312,7 +312,7 @@ pcnet32_free_tx_resources ( struct pcnet32_private *priv )
|
||||
{
|
||||
DBGP ( "pcnet32_free_tx_resources\n" );
|
||||
|
||||
free_dma ( priv->tx_base, TX_RING_BYTES );
|
||||
free_phys ( priv->tx_base, TX_RING_BYTES );
|
||||
}
|
||||
|
||||
static int
|
||||
|
||||
@@ -640,7 +640,7 @@ static int phantom_create_rx_ctx ( struct phantom_nic *phantom ) {
|
||||
int rc;
|
||||
|
||||
/* Allocate context creation buffer */
|
||||
buf = malloc_dma ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
|
||||
buf = malloc_phys ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
|
||||
if ( ! buf ) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
@@ -716,7 +716,7 @@ static int phantom_create_rx_ctx ( struct phantom_nic *phantom ) {
|
||||
phantom, phantom->sds_irq_mask_crb );
|
||||
|
||||
out:
|
||||
free_dma ( buf, sizeof ( *buf ) );
|
||||
free_phys ( buf, sizeof ( *buf ) );
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -765,7 +765,7 @@ static int phantom_create_tx_ctx ( struct phantom_nic *phantom ) {
|
||||
int rc;
|
||||
|
||||
/* Allocate context creation buffer */
|
||||
buf = malloc_dma ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
|
||||
buf = malloc_phys ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
|
||||
if ( ! buf ) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
@@ -821,7 +821,7 @@ static int phantom_create_tx_ctx ( struct phantom_nic *phantom ) {
|
||||
phantom, phantom->cds_producer_crb );
|
||||
|
||||
out:
|
||||
free_dma ( buf, sizeof ( *buf ) );
|
||||
free_phys ( buf, sizeof ( *buf ) );
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1164,8 +1164,8 @@ static int phantom_open ( struct net_device *netdev ) {
|
||||
int rc;
|
||||
|
||||
/* Allocate and zero descriptor rings */
|
||||
phantom->desc = malloc_dma ( sizeof ( *(phantom->desc) ),
|
||||
UNM_DMA_BUFFER_ALIGN );
|
||||
phantom->desc = malloc_phys ( sizeof ( *(phantom->desc) ),
|
||||
UNM_DMA_BUFFER_ALIGN );
|
||||
if ( ! phantom->desc ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_desc;
|
||||
@@ -1208,7 +1208,7 @@ static int phantom_open ( struct net_device *netdev ) {
|
||||
err_create_tx_ctx:
|
||||
phantom_destroy_rx_ctx ( phantom );
|
||||
err_create_rx_ctx:
|
||||
free_dma ( phantom->desc, sizeof ( *(phantom->desc) ) );
|
||||
free_phys ( phantom->desc, sizeof ( *(phantom->desc) ) );
|
||||
phantom->desc = NULL;
|
||||
err_alloc_desc:
|
||||
return rc;
|
||||
@@ -1229,7 +1229,7 @@ static void phantom_close ( struct net_device *netdev ) {
|
||||
phantom_del_macaddr ( phantom, netdev->ll_broadcast );
|
||||
phantom_destroy_tx_ctx ( phantom );
|
||||
phantom_destroy_rx_ctx ( phantom );
|
||||
free_dma ( phantom->desc, sizeof ( *(phantom->desc) ) );
|
||||
free_phys ( phantom->desc, sizeof ( *(phantom->desc) ) );
|
||||
phantom->desc = NULL;
|
||||
|
||||
/* Flush any uncompleted descriptors */
|
||||
|
||||
@@ -514,7 +514,7 @@ static int realtek_create_buffer ( struct realtek_nic *rtl ) {
|
||||
return 0;
|
||||
|
||||
/* Allocate buffer */
|
||||
rtl->rx_buffer = malloc_dma ( len, RTL_RXBUF_ALIGN );
|
||||
rtl->rx_buffer = malloc_phys ( len, RTL_RXBUF_ALIGN );
|
||||
if ( ! rtl->rx_buffer ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
@@ -539,7 +539,7 @@ static int realtek_create_buffer ( struct realtek_nic *rtl ) {
|
||||
return 0;
|
||||
|
||||
err_64bit:
|
||||
free_dma ( rtl->rx_buffer, len );
|
||||
free_phys ( rtl->rx_buffer, len );
|
||||
rtl->rx_buffer = NULL;
|
||||
err_alloc:
|
||||
return rc;
|
||||
@@ -561,7 +561,7 @@ static void realtek_destroy_buffer ( struct realtek_nic *rtl ) {
|
||||
writel ( 0, rtl->regs + RTL_RBSTART );
|
||||
|
||||
/* Free buffer */
|
||||
free_dma ( rtl->rx_buffer, len );
|
||||
free_phys ( rtl->rx_buffer, len );
|
||||
rtl->rx_buffer = NULL;
|
||||
rtl->rx_offset = 0;
|
||||
}
|
||||
@@ -582,7 +582,7 @@ static int realtek_create_ring ( struct realtek_nic *rtl,
|
||||
return 0;
|
||||
|
||||
/* Allocate descriptor ring */
|
||||
ring->desc = malloc_dma ( ring->len, RTL_RING_ALIGN );
|
||||
ring->desc = malloc_phys ( ring->len, RTL_RING_ALIGN );
|
||||
if ( ! ring->desc )
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -623,7 +623,7 @@ static void realtek_destroy_ring ( struct realtek_nic *rtl,
|
||||
writel ( 0, rtl->regs + ring->reg + 4 );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc, ring->len );
|
||||
free_phys ( ring->desc, ring->len );
|
||||
ring->desc = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -292,7 +292,7 @@ static int rhine_create_ring ( struct rhine_nic *rhn,
|
||||
unsigned int i;
|
||||
|
||||
/* Allocate descriptors */
|
||||
ring->desc = malloc_dma ( len, RHINE_RING_ALIGN );
|
||||
ring->desc = malloc_phys ( len, RHINE_RING_ALIGN );
|
||||
if ( ! ring->desc )
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -328,7 +328,7 @@ static void rhine_destroy_ring ( struct rhine_nic *rhn,
|
||||
writel ( 0, rhn->regs + ring->reg );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
ring->desc = NULL;
|
||||
ring->prod = 0;
|
||||
ring->cons = 0;
|
||||
|
||||
@@ -328,8 +328,8 @@ static int rtl818x_init_rx_ring(struct net80211_device *dev)
|
||||
struct rtl818x_rx_desc *entry;
|
||||
int i;
|
||||
|
||||
priv->rx_ring = malloc_dma(sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE,
|
||||
RTL818X_RING_ALIGN);
|
||||
priv->rx_ring = malloc_phys(sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE,
|
||||
RTL818X_RING_ALIGN);
|
||||
priv->rx_ring_dma = virt_to_bus(priv->rx_ring);
|
||||
if (!priv->rx_ring) {
|
||||
DBG("rtl818x %s: cannot allocate RX ring\n", dev->netdev->name);
|
||||
@@ -364,7 +364,7 @@ static void rtl818x_free_rx_ring(struct net80211_device *dev)
|
||||
priv->rx_buf[i] = NULL;
|
||||
}
|
||||
|
||||
free_dma(priv->rx_ring, sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE);
|
||||
free_phys(priv->rx_ring, sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE);
|
||||
priv->rx_ring = NULL;
|
||||
}
|
||||
|
||||
@@ -373,8 +373,8 @@ static int rtl818x_init_tx_ring(struct net80211_device *dev)
|
||||
struct rtl818x_priv *priv = dev->priv;
|
||||
int i;
|
||||
|
||||
priv->tx_ring = malloc_dma(sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE,
|
||||
RTL818X_RING_ALIGN);
|
||||
priv->tx_ring = malloc_phys(sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE,
|
||||
RTL818X_RING_ALIGN);
|
||||
priv->tx_ring_dma = virt_to_bus(priv->tx_ring);
|
||||
if (!priv->tx_ring) {
|
||||
DBG("rtl818x %s: cannot allocate TX ring\n", dev->netdev->name);
|
||||
@@ -402,7 +402,7 @@ static void rtl818x_free_tx_ring(struct net80211_device *dev)
|
||||
priv->tx_buf[i] = NULL;
|
||||
}
|
||||
|
||||
free_dma(priv->tx_ring, sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE);
|
||||
free_phys(priv->tx_ring, sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE);
|
||||
priv->tx_ring = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
||||
|
||||
void efx_hunt_free_special_buffer(void *buf, int bytes)
|
||||
{
|
||||
free_dma(buf, bytes);
|
||||
free_phys(buf, bytes);
|
||||
}
|
||||
|
||||
static void *efx_hunt_alloc_special_buffer(int bytes,
|
||||
@@ -50,7 +50,7 @@ static void *efx_hunt_alloc_special_buffer(int bytes,
|
||||
* buffer will be passed into an MC_CMD_INIT_*Q command to setup the
|
||||
* appropriate type of queue via MCDI.
|
||||
*/
|
||||
buffer = malloc_dma(bytes, EFX_BUF_ALIGN);
|
||||
buffer = malloc_phys(bytes, EFX_BUF_ALIGN);
|
||||
if (!buffer)
|
||||
return NULL;
|
||||
|
||||
|
||||
@@ -552,7 +552,7 @@ static int sis190_open(struct net_device *dev)
|
||||
int rc;
|
||||
|
||||
/* Allocate TX ring */
|
||||
tp->TxDescRing = malloc_dma(TX_RING_BYTES, RING_ALIGNMENT);
|
||||
tp->TxDescRing = malloc_phys(TX_RING_BYTES, RING_ALIGNMENT);
|
||||
if (!tp->TxDescRing) {
|
||||
DBG("sis190: TX ring allocation failed\n");
|
||||
rc = -ENOMEM;
|
||||
@@ -561,7 +561,7 @@ static int sis190_open(struct net_device *dev)
|
||||
tp->tx_dma = cpu_to_le32(virt_to_bus(tp->TxDescRing));
|
||||
|
||||
/* Allocate RX ring */
|
||||
tp->RxDescRing = malloc_dma(RX_RING_BYTES, RING_ALIGNMENT);
|
||||
tp->RxDescRing = malloc_phys(RX_RING_BYTES, RING_ALIGNMENT);
|
||||
if (!tp->RxDescRing) {
|
||||
DBG("sis190: RX ring allocation failed\n");
|
||||
rc = -ENOMEM;
|
||||
@@ -600,8 +600,8 @@ static void sis190_free(struct net_device *dev)
|
||||
struct sis190_private *tp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
free_dma(tp->TxDescRing, TX_RING_BYTES);
|
||||
free_dma(tp->RxDescRing, RX_RING_BYTES);
|
||||
free_phys(tp->TxDescRing, TX_RING_BYTES);
|
||||
free_phys(tp->RxDescRing, RX_RING_BYTES);
|
||||
|
||||
tp->TxDescRing = NULL;
|
||||
tp->RxDescRing = NULL;
|
||||
|
||||
@@ -1699,7 +1699,7 @@ void skge_free(struct net_device *dev)
|
||||
free(skge->tx_ring.start);
|
||||
skge->tx_ring.start = NULL;
|
||||
|
||||
free_dma(skge->mem, RING_SIZE);
|
||||
free_phys(skge->mem, RING_SIZE);
|
||||
skge->mem = NULL;
|
||||
skge->dma = 0;
|
||||
}
|
||||
@@ -1714,7 +1714,7 @@ static int skge_up(struct net_device *dev)
|
||||
|
||||
DBG2(PFX "%s: enabling interface\n", dev->name);
|
||||
|
||||
skge->mem = malloc_dma(RING_SIZE, SKGE_RING_ALIGN);
|
||||
skge->mem = malloc_phys(RING_SIZE, SKGE_RING_ALIGN);
|
||||
skge->dma = virt_to_bus(skge->mem);
|
||||
if (!skge->mem)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1112,10 +1112,10 @@ nomem:
|
||||
/* Free the le and ring buffers */
|
||||
static void sky2_free_rings(struct sky2_port *sky2)
|
||||
{
|
||||
free_dma(sky2->rx_le, RX_LE_BYTES);
|
||||
free_phys(sky2->rx_le, RX_LE_BYTES);
|
||||
free(sky2->rx_ring);
|
||||
|
||||
free_dma(sky2->tx_le, TX_RING_SIZE * sizeof(struct sky2_tx_le));
|
||||
free_phys(sky2->tx_le, TX_RING_SIZE * sizeof(struct sky2_tx_le));
|
||||
free(sky2->tx_ring);
|
||||
|
||||
sky2->tx_le = NULL;
|
||||
@@ -1137,7 +1137,7 @@ static int sky2_up(struct net_device *dev)
|
||||
netdev_link_down(dev);
|
||||
|
||||
/* must be power of 2 */
|
||||
sky2->tx_le = malloc_dma(TX_RING_SIZE * sizeof(struct sky2_tx_le), TX_RING_ALIGN);
|
||||
sky2->tx_le = malloc_phys(TX_RING_SIZE * sizeof(struct sky2_tx_le), TX_RING_ALIGN);
|
||||
sky2->tx_le_map = virt_to_bus(sky2->tx_le);
|
||||
if (!sky2->tx_le)
|
||||
goto err_out;
|
||||
@@ -1149,7 +1149,7 @@ static int sky2_up(struct net_device *dev)
|
||||
|
||||
tx_init(sky2);
|
||||
|
||||
sky2->rx_le = malloc_dma(RX_LE_BYTES, RX_RING_ALIGN);
|
||||
sky2->rx_le = malloc_phys(RX_LE_BYTES, RX_RING_ALIGN);
|
||||
sky2->rx_le_map = virt_to_bus(sky2->rx_le);
|
||||
if (!sky2->rx_le)
|
||||
goto err_out;
|
||||
@@ -2285,7 +2285,7 @@ static int sky2_probe(struct pci_device *pdev)
|
||||
}
|
||||
|
||||
/* ring for status responses */
|
||||
hw->st_le = malloc_dma(STATUS_LE_BYTES, STATUS_RING_ALIGN);
|
||||
hw->st_le = malloc_phys(STATUS_LE_BYTES, STATUS_RING_ALIGN);
|
||||
if (!hw->st_le)
|
||||
goto err_out_iounmap;
|
||||
hw->st_dma = virt_to_bus(hw->st_le);
|
||||
@@ -2344,7 +2344,7 @@ err_out_free_netdev:
|
||||
netdev_put(dev);
|
||||
err_out_free_pci:
|
||||
sky2_write8(hw, B0_CTST, CS_RST_SET);
|
||||
free_dma(hw->st_le, STATUS_LE_BYTES);
|
||||
free_phys(hw->st_le, STATUS_LE_BYTES);
|
||||
err_out_iounmap:
|
||||
iounmap((void *)hw->regs);
|
||||
err_out_free_hw:
|
||||
@@ -2373,7 +2373,7 @@ static void sky2_remove(struct pci_device *pdev)
|
||||
sky2_write8(hw, B0_CTST, CS_RST_SET);
|
||||
sky2_read8(hw, B0_CTST);
|
||||
|
||||
free_dma(hw->st_le, STATUS_LE_BYTES);
|
||||
free_phys(hw->st_le, STATUS_LE_BYTES);
|
||||
|
||||
for (i = hw->ports-1; i >= 0; --i) {
|
||||
netdev_nullify(hw->dev[i]);
|
||||
|
||||
@@ -42,7 +42,7 @@ void tg3_rx_prodring_fini(struct tg3_rx_prodring_set *tpr)
|
||||
{ DBGP("%s\n", __func__);
|
||||
|
||||
if (tpr->rx_std) {
|
||||
free_dma(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
|
||||
free_phys(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
|
||||
tpr->rx_std = NULL;
|
||||
}
|
||||
}
|
||||
@@ -55,7 +55,7 @@ static void tg3_free_consistent(struct tg3 *tp)
|
||||
{ DBGP("%s\n", __func__);
|
||||
|
||||
if (tp->tx_ring) {
|
||||
free_dma(tp->tx_ring, TG3_TX_RING_BYTES);
|
||||
free_phys(tp->tx_ring, TG3_TX_RING_BYTES);
|
||||
tp->tx_ring = NULL;
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ static void tg3_free_consistent(struct tg3 *tp)
|
||||
tp->tx_buffers = NULL;
|
||||
|
||||
if (tp->rx_rcb) {
|
||||
free_dma(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
|
||||
free_phys(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
|
||||
tp->rx_rcb_mapping = 0;
|
||||
tp->rx_rcb = NULL;
|
||||
}
|
||||
@@ -71,7 +71,7 @@ static void tg3_free_consistent(struct tg3 *tp)
|
||||
tg3_rx_prodring_fini(&tp->prodring);
|
||||
|
||||
if (tp->hw_status) {
|
||||
free_dma(tp->hw_status, TG3_HW_STATUS_SIZE);
|
||||
free_phys(tp->hw_status, TG3_HW_STATUS_SIZE);
|
||||
tp->status_mapping = 0;
|
||||
tp->hw_status = NULL;
|
||||
}
|
||||
@@ -87,7 +87,7 @@ int tg3_alloc_consistent(struct tg3 *tp)
|
||||
struct tg3_hw_status *sblk;
|
||||
struct tg3_rx_prodring_set *tpr = &tp->prodring;
|
||||
|
||||
tp->hw_status = malloc_dma(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
|
||||
tp->hw_status = malloc_phys(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
|
||||
if (!tp->hw_status) {
|
||||
DBGC(tp->dev, "hw_status alloc failed\n");
|
||||
goto err_out;
|
||||
@@ -97,7 +97,7 @@ int tg3_alloc_consistent(struct tg3 *tp)
|
||||
memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
|
||||
sblk = tp->hw_status;
|
||||
|
||||
tpr->rx_std = malloc_dma(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
|
||||
tpr->rx_std = malloc_phys(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
|
||||
if (!tpr->rx_std) {
|
||||
DBGC(tp->dev, "rx prodring alloc failed\n");
|
||||
goto err_out;
|
||||
@@ -109,7 +109,7 @@ int tg3_alloc_consistent(struct tg3 *tp)
|
||||
if (!tp->tx_buffers)
|
||||
goto err_out;
|
||||
|
||||
tp->tx_ring = malloc_dma(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
|
||||
tp->tx_ring = malloc_phys(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
|
||||
if (!tp->tx_ring)
|
||||
goto err_out;
|
||||
tp->tx_desc_mapping = virt_to_bus(tp->tx_ring);
|
||||
@@ -123,7 +123,7 @@ int tg3_alloc_consistent(struct tg3 *tp)
|
||||
|
||||
tp->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
|
||||
|
||||
tp->rx_rcb = malloc_dma(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
|
||||
tp->rx_rcb = malloc_phys(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
|
||||
if (!tp->rx_rcb)
|
||||
goto err_out;
|
||||
tp->rx_rcb_mapping = virt_to_bus(tp->rx_rcb);
|
||||
@@ -541,7 +541,7 @@ static int tg3_test_dma(struct tg3 *tp)
|
||||
u32 *buf;
|
||||
int ret = 0;
|
||||
|
||||
buf = malloc_dma(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
|
||||
buf = malloc_phys(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_nofree;
|
||||
@@ -708,7 +708,7 @@ static int tg3_test_dma(struct tg3 *tp)
|
||||
}
|
||||
|
||||
out:
|
||||
free_dma(buf, TEST_BUFFER_SIZE);
|
||||
free_phys(buf, TEST_BUFFER_SIZE);
|
||||
out_nofree:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -320,7 +320,8 @@ static int velocity_alloc_rings ( struct velocity_nic *vlc ) {
|
||||
vlc->rx_prod = 0;
|
||||
vlc->rx_cons = 0;
|
||||
vlc->rx_commit = 0;
|
||||
vlc->rx_ring = malloc_dma ( VELOCITY_RXDESC_SIZE, VELOCITY_RING_ALIGN );
|
||||
vlc->rx_ring = malloc_phys ( VELOCITY_RXDESC_SIZE,
|
||||
VELOCITY_RING_ALIGN );
|
||||
if ( ! vlc->rx_ring )
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -332,7 +333,8 @@ static int velocity_alloc_rings ( struct velocity_nic *vlc ) {
|
||||
/* Allocate TX descriptor ring */
|
||||
vlc->tx_prod = 0;
|
||||
vlc->tx_cons = 0;
|
||||
vlc->tx_ring = malloc_dma ( VELOCITY_TXDESC_SIZE, VELOCITY_RING_ALIGN );
|
||||
vlc->tx_ring = malloc_phys ( VELOCITY_TXDESC_SIZE,
|
||||
VELOCITY_RING_ALIGN );
|
||||
if ( ! vlc->tx_ring ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_tx_alloc;
|
||||
@@ -356,7 +358,7 @@ static int velocity_alloc_rings ( struct velocity_nic *vlc ) {
|
||||
return 0;
|
||||
|
||||
err_tx_alloc:
|
||||
free_dma ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
|
||||
free_phys ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -482,7 +484,7 @@ static void velocity_close ( struct net_device *netdev ) {
|
||||
writew ( 0, vlc->regs + VELOCITY_RXDESCNUM );
|
||||
|
||||
/* Destroy RX ring */
|
||||
free_dma ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
|
||||
free_phys ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
|
||||
vlc->rx_ring = NULL;
|
||||
vlc->rx_prod = 0;
|
||||
vlc->rx_cons = 0;
|
||||
@@ -499,7 +501,7 @@ static void velocity_close ( struct net_device *netdev ) {
|
||||
writew ( 0, vlc->regs + VELOCITY_TXDESCNUM );
|
||||
|
||||
/* Destroy TX ring */
|
||||
free_dma ( vlc->tx_ring, VELOCITY_TXDESC_SIZE );
|
||||
free_phys ( vlc->tx_ring, VELOCITY_TXDESC_SIZE );
|
||||
vlc->tx_ring = NULL;
|
||||
vlc->tx_prod = 0;
|
||||
vlc->tx_cons = 0;
|
||||
|
||||
@@ -465,7 +465,8 @@ static int vmxnet3_open ( struct net_device *netdev ) {
|
||||
int rc;
|
||||
|
||||
/* Allocate DMA areas */
|
||||
vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN );
|
||||
vmxnet->dma = malloc_phys ( sizeof ( *vmxnet->dma ),
|
||||
VMXNET3_DMA_ALIGN );
|
||||
if ( ! vmxnet->dma ) {
|
||||
DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
|
||||
vmxnet );
|
||||
@@ -542,7 +543,7 @@ static int vmxnet3_open ( struct net_device *netdev ) {
|
||||
err_activate:
|
||||
vmxnet3_flush_tx ( netdev );
|
||||
vmxnet3_flush_rx ( netdev );
|
||||
free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
|
||||
free_phys ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
|
||||
err_alloc_dma:
|
||||
return rc;
|
||||
}
|
||||
@@ -559,7 +560,7 @@ static void vmxnet3_close ( struct net_device *netdev ) {
|
||||
vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
|
||||
vmxnet3_flush_tx ( netdev );
|
||||
vmxnet3_flush_rx ( netdev );
|
||||
free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
|
||||
free_phys ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
|
||||
}
|
||||
|
||||
/** vmxnet3 net device operations */
|
||||
|
||||
@@ -624,10 +624,10 @@ __vxge_hw_ring_create(struct __vxge_hw_virtualpath *vpath,
|
||||
hldev = vpath->hldev;
|
||||
vp_id = vpath->vp_id;
|
||||
|
||||
ring->rxdl = malloc_dma(sizeof(struct __vxge_hw_ring_block),
|
||||
ring->rxdl = malloc_phys(sizeof(struct __vxge_hw_ring_block),
|
||||
sizeof(struct __vxge_hw_ring_block));
|
||||
if (!ring->rxdl) {
|
||||
vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
|
||||
vxge_debug(VXGE_ERR, "%s:%d malloc_phys error\n",
|
||||
__func__, __LINE__);
|
||||
status = VXGE_HW_ERR_OUT_OF_MEMORY;
|
||||
goto exit;
|
||||
@@ -667,7 +667,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_ring *ring)
|
||||
}
|
||||
|
||||
if (ring->rxdl) {
|
||||
free_dma(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
|
||||
free_phys(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
|
||||
ring->rxdl = NULL;
|
||||
}
|
||||
ring->rxd_offset = 0;
|
||||
@@ -826,10 +826,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_virtualpath *vpath,
|
||||
fifo->tx_intr_num = (vpath->vp_id * VXGE_HW_MAX_INTR_PER_VP)
|
||||
+ VXGE_HW_VPATH_INTR_TX;
|
||||
|
||||
fifo->txdl = malloc_dma(sizeof(struct vxge_hw_fifo_txd)
|
||||
fifo->txdl = malloc_phys(sizeof(struct vxge_hw_fifo_txd)
|
||||
* fifo->depth, fifo->depth);
|
||||
if (!fifo->txdl) {
|
||||
vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
|
||||
vxge_debug(VXGE_ERR, "%s:%d malloc_phys error\n",
|
||||
__func__, __LINE__);
|
||||
return VXGE_HW_ERR_OUT_OF_MEMORY;
|
||||
}
|
||||
@@ -846,7 +846,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_fifo *fifo)
|
||||
vxge_trace();
|
||||
|
||||
if (fifo->txdl)
|
||||
free_dma(fifo->txdl,
|
||||
free_phys(fifo->txdl,
|
||||
sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
|
||||
|
||||
fifo->txdl = NULL;
|
||||
|
||||
Reference in New Issue
Block a user