mirror of
https://github.com/ipxe/ipxe
synced 2026-02-14 02:31:26 +03:00
[dma] Record DMA device as part of DMA mapping if needed
Allow for dma_unmap() to be called by code other than the DMA device driver itself. Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
@@ -504,8 +504,8 @@ int intel_create_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
|
||||
* prevent any possible page-crossing errors due to hardware
|
||||
* errata.
|
||||
*/
|
||||
ring->desc = dma_alloc ( intel->dma, ring->len, ring->len,
|
||||
&ring->map );
|
||||
ring->desc = dma_alloc ( intel->dma, &ring->map, ring->len,
|
||||
ring->len );
|
||||
if ( ! ring->desc )
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -554,7 +554,7 @@ void intel_destroy_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
|
||||
intel_reset_ring ( intel, ring->reg );
|
||||
|
||||
/* Free descriptor ring */
|
||||
dma_free ( intel->dma, ring->desc, ring->len, &ring->map );
|
||||
dma_free ( &ring->map, ring->desc, ring->len );
|
||||
ring->desc = NULL;
|
||||
ring->prod = 0;
|
||||
ring->cons = 0;
|
||||
@@ -584,7 +584,7 @@ void intel_refill_rx ( struct intel_nic *intel ) {
|
||||
assert ( intel->rx.iobuf[rx_idx] == NULL );
|
||||
|
||||
/* Allocate I/O buffer */
|
||||
iobuf = dma_alloc_rx_iob ( intel->dma, INTEL_RX_MAX_LEN, map );
|
||||
iobuf = dma_alloc_rx_iob ( intel->dma, map, INTEL_RX_MAX_LEN );
|
||||
if ( ! iobuf ) {
|
||||
/* Wait for next refill */
|
||||
break;
|
||||
@@ -630,7 +630,7 @@ void intel_flush ( struct intel_nic *intel ) {
|
||||
/* Discard unused receive buffers */
|
||||
for ( i = 0 ; i < INTEL_NUM_RX_DESC ; i++ ) {
|
||||
if ( intel->rx.iobuf[i] ) {
|
||||
dma_unmap ( intel->dma, &intel->rx.map[i] );
|
||||
dma_unmap ( &intel->rx.map[i] );
|
||||
free_iob ( intel->rx.iobuf[i] );
|
||||
}
|
||||
intel->rx.iobuf[i] = NULL;
|
||||
@@ -639,7 +639,7 @@ void intel_flush ( struct intel_nic *intel ) {
|
||||
/* Unmap incomplete transmit buffers */
|
||||
for ( i = intel->tx.ring.cons ; i != intel->tx.ring.prod ; i++ ) {
|
||||
tx_idx = ( i % INTEL_NUM_TX_DESC );
|
||||
dma_unmap ( intel->dma, &intel->tx.map[tx_idx] );
|
||||
dma_unmap ( &intel->tx.map[tx_idx] );
|
||||
}
|
||||
}
|
||||
|
||||
@@ -773,7 +773,7 @@ int intel_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
|
||||
map = &intel->tx.map[tx_idx];
|
||||
|
||||
/* Map I/O buffer */
|
||||
if ( ( rc = dma_map_tx_iob ( intel->dma, iobuf, map ) ) != 0 )
|
||||
if ( ( rc = dma_map_tx_iob ( intel->dma, map, iobuf ) ) != 0 )
|
||||
return rc;
|
||||
|
||||
/* Update producer index */
|
||||
@@ -822,7 +822,7 @@ void intel_poll_tx ( struct net_device *netdev ) {
|
||||
DBGC2 ( intel, "INTEL %p TX %d complete\n", intel, tx_idx );
|
||||
|
||||
/* Unmap I/O buffer */
|
||||
dma_unmap ( intel->dma, &intel->tx.map[tx_idx] );
|
||||
dma_unmap ( &intel->tx.map[tx_idx] );
|
||||
|
||||
/* Complete TX descriptor */
|
||||
netdev_tx_complete_next ( netdev );
|
||||
@@ -854,7 +854,7 @@ void intel_poll_rx ( struct net_device *netdev ) {
|
||||
return;
|
||||
|
||||
/* Unmap I/O buffer */
|
||||
dma_unmap ( intel->dma, &intel->rx.map[rx_idx] );
|
||||
dma_unmap ( &intel->rx.map[rx_idx] );
|
||||
|
||||
/* Populate I/O buffer */
|
||||
iobuf = intel->rx.iobuf[rx_idx];
|
||||
|
||||
@@ -136,9 +136,9 @@ int intelxl_msix_enable ( struct intelxl_nic *intelxl,
|
||||
int rc;
|
||||
|
||||
/* Map dummy target location */
|
||||
if ( ( rc = dma_map ( intelxl->dma, virt_to_phys ( &intelxl->msix.msg ),
|
||||
sizeof ( intelxl->msix.msg ), DMA_RX,
|
||||
&intelxl->msix.map ) ) != 0 ) {
|
||||
if ( ( rc = dma_map ( intelxl->dma, &intelxl->msix.map,
|
||||
virt_to_phys ( &intelxl->msix.msg ),
|
||||
sizeof ( intelxl->msix.msg ), DMA_RX ) ) != 0 ) {
|
||||
DBGC ( intelxl, "INTELXL %p could not map MSI-X target: %s\n",
|
||||
intelxl, strerror ( rc ) );
|
||||
goto err_map;
|
||||
@@ -162,7 +162,7 @@ int intelxl_msix_enable ( struct intelxl_nic *intelxl,
|
||||
|
||||
pci_msix_disable ( pci, &intelxl->msix.cap );
|
||||
err_enable:
|
||||
dma_unmap ( intelxl->dma, &intelxl->msix.map );
|
||||
dma_unmap ( &intelxl->msix.map );
|
||||
err_map:
|
||||
return rc;
|
||||
}
|
||||
@@ -183,7 +183,7 @@ void intelxl_msix_disable ( struct intelxl_nic *intelxl,
|
||||
pci_msix_disable ( pci, &intelxl->msix.cap );
|
||||
|
||||
/* Unmap dummy target location */
|
||||
dma_unmap ( intelxl->dma, &intelxl->msix.map );
|
||||
dma_unmap ( &intelxl->msix.map );
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@@ -215,8 +215,8 @@ static int intelxl_alloc_admin ( struct intelxl_nic *intelxl,
|
||||
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
|
||||
|
||||
/* Allocate admin queue */
|
||||
admin->buf = dma_alloc ( intelxl->dma, ( buf_len + len ),
|
||||
INTELXL_ALIGN, &admin->map );
|
||||
admin->buf = dma_alloc ( intelxl->dma, &admin->map, ( buf_len + len ),
|
||||
INTELXL_ALIGN );
|
||||
if ( ! admin->buf )
|
||||
return -ENOMEM;
|
||||
admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
|
||||
@@ -291,13 +291,13 @@ static void intelxl_disable_admin ( struct intelxl_nic *intelxl,
|
||||
* @v intelxl Intel device
|
||||
* @v admin Admin queue
|
||||
*/
|
||||
static void intelxl_free_admin ( struct intelxl_nic *intelxl,
|
||||
static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused,
|
||||
struct intelxl_admin *admin ) {
|
||||
size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
|
||||
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
|
||||
|
||||
/* Free queue */
|
||||
dma_free ( intelxl->dma, admin->buf, ( buf_len + len ), &admin->map );
|
||||
dma_free ( &admin->map, admin->buf, ( buf_len + len ) );
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -945,8 +945,8 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
|
||||
int rc;
|
||||
|
||||
/* Allocate descriptor ring */
|
||||
ring->desc.raw = dma_alloc ( intelxl->dma, ring->len, INTELXL_ALIGN,
|
||||
&ring->map );
|
||||
ring->desc.raw = dma_alloc ( intelxl->dma, &ring->map, ring->len,
|
||||
INTELXL_ALIGN );
|
||||
if ( ! ring->desc.raw ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
@@ -969,7 +969,7 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
|
||||
|
||||
return 0;
|
||||
|
||||
dma_free ( intelxl->dma, ring->desc.raw, ring->len, &ring->map );
|
||||
dma_free ( &ring->map, ring->desc.raw, ring->len );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
@@ -980,11 +980,11 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
|
||||
* @v intelxl Intel device
|
||||
* @v ring Descriptor ring
|
||||
*/
|
||||
void intelxl_free_ring ( struct intelxl_nic *intelxl,
|
||||
void intelxl_free_ring ( struct intelxl_nic *intelxl __unused,
|
||||
struct intelxl_ring *ring ) {
|
||||
|
||||
/* Free descriptor ring */
|
||||
dma_free ( intelxl->dma, ring->desc.raw, ring->len, &ring->map );
|
||||
dma_free ( &ring->map, ring->desc.raw, ring->len );
|
||||
ring->desc.raw = NULL;
|
||||
}
|
||||
|
||||
@@ -1322,7 +1322,7 @@ static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
|
||||
assert ( intelxl->rx.iobuf[rx_idx] == NULL );
|
||||
|
||||
/* Allocate I/O buffer */
|
||||
iobuf = dma_alloc_rx_iob ( intelxl->dma, intelxl->mfs, map );
|
||||
iobuf = dma_alloc_rx_iob ( intelxl->dma, map, intelxl->mfs );
|
||||
if ( ! iobuf ) {
|
||||
/* Wait for next refill */
|
||||
break;
|
||||
@@ -1365,7 +1365,7 @@ void intelxl_flush ( struct intelxl_nic *intelxl ) {
|
||||
/* Discard any unused receive buffers */
|
||||
for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
|
||||
if ( intelxl->rx.iobuf[i] ) {
|
||||
dma_unmap ( intelxl->dma, &intelxl->rx.map[i] );
|
||||
dma_unmap ( &intelxl->rx.map[i] );
|
||||
free_iob ( intelxl->rx.iobuf[i] );
|
||||
}
|
||||
intelxl->rx.iobuf[i] = NULL;
|
||||
@@ -1374,7 +1374,7 @@ void intelxl_flush ( struct intelxl_nic *intelxl ) {
|
||||
/* Unmap incomplete transmit buffers */
|
||||
for ( i = intelxl->tx.ring.cons ; i != intelxl->tx.ring.prod ; i++ ) {
|
||||
tx_idx = ( i % INTELXL_TX_NUM_DESC );
|
||||
dma_unmap ( intelxl->dma, &intelxl->tx.map[tx_idx] );
|
||||
dma_unmap ( &intelxl->tx.map[tx_idx] );
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1516,7 +1516,7 @@ int intelxl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
|
||||
map = &intelxl->tx.map[tx_idx];
|
||||
|
||||
/* Map I/O buffer */
|
||||
if ( ( rc = dma_map_tx_iob ( intelxl->dma, iobuf, map ) ) != 0 )
|
||||
if ( ( rc = dma_map_tx_iob ( intelxl->dma, map, iobuf ) ) != 0 )
|
||||
return rc;
|
||||
|
||||
/* Update producer index */
|
||||
@@ -1564,7 +1564,7 @@ static void intelxl_poll_tx ( struct net_device *netdev ) {
|
||||
intelxl, tx_idx );
|
||||
|
||||
/* Unmap I/O buffer */
|
||||
dma_unmap ( intelxl->dma, &intelxl->tx.map[tx_idx] );
|
||||
dma_unmap ( &intelxl->tx.map[tx_idx] );
|
||||
|
||||
/* Complete TX descriptor */
|
||||
netdev_tx_complete_next ( netdev );
|
||||
@@ -1597,7 +1597,7 @@ static void intelxl_poll_rx ( struct net_device *netdev ) {
|
||||
return;
|
||||
|
||||
/* Unmap I/O buffer */
|
||||
dma_unmap ( intelxl->dma, &intelxl->rx.map[rx_idx] );
|
||||
dma_unmap ( &intelxl->rx.map[rx_idx] );
|
||||
|
||||
/* Populate I/O buffer */
|
||||
iobuf = intelxl->rx.iobuf[rx_idx];
|
||||
|
||||
@@ -514,7 +514,8 @@ static int realtek_create_buffer ( struct realtek_nic *rtl ) {
|
||||
return 0;
|
||||
|
||||
/* Allocate buffer */
|
||||
rxbuf->data = dma_alloc ( rtl->dma, len, RTL_RXBUF_ALIGN, &rxbuf->map );
|
||||
rxbuf->data = dma_alloc ( rtl->dma, &rxbuf->map, len,
|
||||
RTL_RXBUF_ALIGN );
|
||||
if ( ! rxbuf->data )
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -545,7 +546,7 @@ static void realtek_destroy_buffer ( struct realtek_nic *rtl ) {
|
||||
writel ( 0, rtl->regs + RTL_RBSTART );
|
||||
|
||||
/* Free buffer */
|
||||
dma_free ( rtl->dma, rxbuf->data, len, &rxbuf->map );
|
||||
dma_free ( &rxbuf->map, rxbuf->data, len );
|
||||
rxbuf->data = NULL;
|
||||
rxbuf->offset = 0;
|
||||
}
|
||||
@@ -566,8 +567,8 @@ static int realtek_create_ring ( struct realtek_nic *rtl,
|
||||
return 0;
|
||||
|
||||
/* Allocate descriptor ring */
|
||||
ring->desc = dma_alloc ( rtl->dma, ring->len, RTL_RING_ALIGN,
|
||||
&ring->map );
|
||||
ring->desc = dma_alloc ( rtl->dma, &ring->map, ring->len,
|
||||
RTL_RING_ALIGN );
|
||||
if ( ! ring->desc )
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -608,7 +609,7 @@ static void realtek_destroy_ring ( struct realtek_nic *rtl,
|
||||
writel ( 0, rtl->regs + ring->reg + 4 );
|
||||
|
||||
/* Free descriptor ring */
|
||||
dma_free ( rtl->dma, ring->desc, ring->len, &ring->map );
|
||||
dma_free ( &ring->map, ring->desc, ring->len );
|
||||
ring->desc = NULL;
|
||||
}
|
||||
|
||||
@@ -638,7 +639,7 @@ static void realtek_refill_rx ( struct realtek_nic *rtl ) {
|
||||
assert ( rtl->rx.iobuf[rx_idx] == NULL );
|
||||
|
||||
/* Allocate I/O buffer */
|
||||
iobuf = dma_alloc_rx_iob ( rtl->dma, RTL_RX_MAX_LEN, map );
|
||||
iobuf = dma_alloc_rx_iob ( rtl->dma, map, RTL_RX_MAX_LEN );
|
||||
if ( ! iobuf ) {
|
||||
/* Wait for next refill */
|
||||
return;
|
||||
@@ -748,7 +749,7 @@ static void realtek_close ( struct net_device *netdev ) {
|
||||
/* Discard any unused receive buffers */
|
||||
for ( i = 0 ; i < RTL_NUM_RX_DESC ; i++ ) {
|
||||
if ( rtl->rx.iobuf[i] ) {
|
||||
dma_unmap ( rtl->dma, &rtl->rx.map[i] );
|
||||
dma_unmap ( &rtl->rx.map[i] );
|
||||
free_iob ( rtl->rx.iobuf[i] );
|
||||
}
|
||||
rtl->rx.iobuf[i] = NULL;
|
||||
@@ -756,7 +757,7 @@ static void realtek_close ( struct net_device *netdev ) {
|
||||
|
||||
/* Unmap any incomplete transmit buffers */
|
||||
for ( i = rtl->tx.ring.cons ; i != rtl->tx.ring.prod ; i++ )
|
||||
dma_unmap ( rtl->dma, &rtl->tx.map[ i % RTL_NUM_TX_DESC ] );
|
||||
dma_unmap ( &rtl->tx.map[ i % RTL_NUM_TX_DESC ] );
|
||||
|
||||
/* Destroy transmit descriptor ring */
|
||||
realtek_destroy_ring ( rtl, &rtl->tx.ring );
|
||||
@@ -796,7 +797,7 @@ static int realtek_transmit ( struct net_device *netdev,
|
||||
iob_pad ( iobuf, ETH_ZLEN );
|
||||
|
||||
/* Map I/O buffer */
|
||||
if ( ( rc = dma_map_tx_iob ( rtl->dma, iobuf, map ) ) != 0 )
|
||||
if ( ( rc = dma_map_tx_iob ( rtl->dma, map, iobuf ) ) != 0 )
|
||||
return rc;
|
||||
address = dma ( map, iobuf->data );
|
||||
|
||||
@@ -870,7 +871,7 @@ static void realtek_poll_tx ( struct net_device *netdev ) {
|
||||
DBGC2 ( rtl, "REALTEK %p TX %d complete\n", rtl, tx_idx );
|
||||
|
||||
/* Unmap I/O buffer */
|
||||
dma_unmap ( rtl->dma, &rtl->tx.map[tx_idx] );
|
||||
dma_unmap ( &rtl->tx.map[tx_idx] );
|
||||
|
||||
/* Complete TX descriptor */
|
||||
rtl->tx.ring.cons++;
|
||||
@@ -964,7 +965,7 @@ static void realtek_poll_rx ( struct net_device *netdev ) {
|
||||
return;
|
||||
|
||||
/* Unmap buffer */
|
||||
dma_unmap ( rtl->dma, &rtl->rx.map[rx_idx] );
|
||||
dma_unmap ( &rtl->rx.map[rx_idx] );
|
||||
|
||||
/* Populate I/O buffer */
|
||||
iobuf = rtl->rx.iobuf[rx_idx];
|
||||
|
||||
Reference in New Issue
Block a user