mirror of
https://github.com/ipxe/ipxe
synced 2026-02-14 02:31:26 +03:00
[malloc] Rename malloc_dma() to malloc_phys()
The malloc_dma() function allocates memory with specified physical alignment, and is typically (though not exclusively) used to allocate memory for DMA. Rename to malloc_phys() to more closely match the functionality, and to create name space for functions that specifically allocate and map DMA-capable buffers. Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
@@ -639,8 +639,8 @@ static int arbel_create_cq ( struct ib_device *ibdev,
|
||||
|
||||
/* Allocate completion queue itself */
|
||||
arbel_cq->cqe_size = ( cq->num_cqes * sizeof ( arbel_cq->cqe[0] ) );
|
||||
arbel_cq->cqe = malloc_dma ( arbel_cq->cqe_size,
|
||||
sizeof ( arbel_cq->cqe[0] ) );
|
||||
arbel_cq->cqe = malloc_phys ( arbel_cq->cqe_size,
|
||||
sizeof ( arbel_cq->cqe[0] ) );
|
||||
if ( ! arbel_cq->cqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_cqe;
|
||||
@@ -697,7 +697,7 @@ static int arbel_create_cq ( struct ib_device *ibdev,
|
||||
err_sw2hw_cq:
|
||||
MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
|
||||
free_phys ( arbel_cq->cqe, arbel_cq->cqe_size );
|
||||
err_cqe:
|
||||
free ( arbel_cq );
|
||||
err_arbel_cq:
|
||||
@@ -737,7 +737,7 @@ static void arbel_destroy_cq ( struct ib_device *ibdev,
|
||||
MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
|
||||
free_phys ( arbel_cq->cqe, arbel_cq->cqe_size );
|
||||
free ( arbel_cq );
|
||||
|
||||
/* Mark queue number as free */
|
||||
@@ -873,8 +873,8 @@ static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
|
||||
/* Allocate work queue */
|
||||
arbel_send_wq->wqe_size = ( num_wqes *
|
||||
sizeof ( arbel_send_wq->wqe[0] ) );
|
||||
arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
|
||||
sizeof ( arbel_send_wq->wqe[0] ) );
|
||||
arbel_send_wq->wqe = malloc_phys ( arbel_send_wq->wqe_size,
|
||||
sizeof ( arbel_send_wq->wqe[0] ) );
|
||||
if ( ! arbel_send_wq->wqe )
|
||||
return -ENOMEM;
|
||||
memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
|
||||
@@ -914,8 +914,8 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
|
||||
/* Allocate work queue */
|
||||
arbel_recv_wq->wqe_size = ( num_wqes *
|
||||
sizeof ( arbel_recv_wq->wqe[0] ) );
|
||||
arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
|
||||
sizeof ( arbel_recv_wq->wqe[0] ) );
|
||||
arbel_recv_wq->wqe = malloc_phys ( arbel_recv_wq->wqe_size,
|
||||
sizeof ( arbel_recv_wq->wqe[0] ) );
|
||||
if ( ! arbel_recv_wq->wqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_wqe;
|
||||
@@ -927,8 +927,8 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
|
||||
( type == IB_QPT_UD ) ) {
|
||||
arbel_recv_wq->grh_size = ( num_wqes *
|
||||
sizeof ( arbel_recv_wq->grh[0] ) );
|
||||
arbel_recv_wq->grh = malloc_dma ( arbel_recv_wq->grh_size,
|
||||
sizeof ( void * ) );
|
||||
arbel_recv_wq->grh = malloc_phys ( arbel_recv_wq->grh_size,
|
||||
sizeof ( void * ) );
|
||||
if ( ! arbel_recv_wq->grh ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_grh;
|
||||
@@ -954,9 +954,9 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
|
||||
|
||||
return 0;
|
||||
|
||||
free_dma ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
|
||||
free_phys ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
|
||||
err_alloc_grh:
|
||||
free_dma ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
|
||||
free_phys ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
|
||||
err_alloc_wqe:
|
||||
return rc;
|
||||
}
|
||||
@@ -1102,10 +1102,10 @@ static int arbel_create_qp ( struct ib_device *ibdev,
|
||||
MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
err_unsupported_address_split:
|
||||
free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
|
||||
free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
|
||||
free_phys ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
|
||||
free_phys ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
|
||||
err_create_recv_wq:
|
||||
free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
|
||||
free_phys ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
|
||||
err_create_send_wq:
|
||||
free ( arbel_qp );
|
||||
err_arbel_qp:
|
||||
@@ -1231,9 +1231,9 @@ static void arbel_destroy_qp ( struct ib_device *ibdev,
|
||||
MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
|
||||
free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
|
||||
free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
|
||||
free_phys ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
|
||||
free_phys ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
|
||||
free_phys ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
|
||||
free ( arbel_qp );
|
||||
|
||||
/* Mark queue number as free */
|
||||
@@ -1758,8 +1758,8 @@ static int arbel_create_eq ( struct arbel *arbel ) {
|
||||
/* Allocate event queue itself */
|
||||
arbel_eq->eqe_size =
|
||||
( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) );
|
||||
arbel_eq->eqe = malloc_dma ( arbel_eq->eqe_size,
|
||||
sizeof ( arbel_eq->eqe[0] ) );
|
||||
arbel_eq->eqe = malloc_phys ( arbel_eq->eqe_size,
|
||||
sizeof ( arbel_eq->eqe[0] ) );
|
||||
if ( ! arbel_eq->eqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_eqe;
|
||||
@@ -1806,7 +1806,7 @@ static int arbel_create_eq ( struct arbel *arbel ) {
|
||||
err_map_eq:
|
||||
arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx );
|
||||
err_sw2hw_eq:
|
||||
free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
|
||||
free_phys ( arbel_eq->eqe, arbel_eq->eqe_size );
|
||||
err_eqe:
|
||||
memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
|
||||
return rc;
|
||||
@@ -1844,7 +1844,7 @@ static void arbel_destroy_eq ( struct arbel *arbel ) {
|
||||
}
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
|
||||
free_phys ( arbel_eq->eqe, arbel_eq->eqe_size );
|
||||
memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
|
||||
}
|
||||
|
||||
@@ -2455,7 +2455,7 @@ static int arbel_alloc_icm ( struct arbel *arbel,
|
||||
icm_phys = user_to_phys ( arbel->icm, 0 );
|
||||
|
||||
/* Allocate doorbell UAR */
|
||||
arbel->db_rec = malloc_dma ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
|
||||
arbel->db_rec = malloc_phys ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
|
||||
if ( ! arbel->db_rec ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_doorbell;
|
||||
@@ -2513,7 +2513,7 @@ static int arbel_alloc_icm ( struct arbel *arbel,
|
||||
err_map_icm:
|
||||
arbel_cmd_unmap_icm_aux ( arbel );
|
||||
err_map_icm_aux:
|
||||
free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
|
||||
free_phys ( arbel->db_rec, ARBEL_PAGE_SIZE );
|
||||
arbel->db_rec= NULL;
|
||||
err_alloc_doorbell:
|
||||
err_alloc_icm:
|
||||
@@ -2536,7 +2536,7 @@ static void arbel_free_icm ( struct arbel *arbel ) {
|
||||
arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
|
||||
&unmap_icm );
|
||||
arbel_cmd_unmap_icm_aux ( arbel );
|
||||
free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
|
||||
free_phys ( arbel->db_rec, ARBEL_PAGE_SIZE );
|
||||
arbel->db_rec = NULL;
|
||||
}
|
||||
|
||||
@@ -2984,18 +2984,18 @@ static struct arbel * arbel_alloc ( void ) {
|
||||
goto err_arbel;
|
||||
|
||||
/* Allocate space for mailboxes */
|
||||
arbel->mailbox_in = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
|
||||
arbel->mailbox_in = malloc_phys ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
|
||||
if ( ! arbel->mailbox_in )
|
||||
goto err_mailbox_in;
|
||||
arbel->mailbox_out = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
|
||||
arbel->mailbox_out = malloc_phys ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
|
||||
if ( ! arbel->mailbox_out )
|
||||
goto err_mailbox_out;
|
||||
|
||||
return arbel;
|
||||
|
||||
free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
|
||||
free_phys ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
|
||||
err_mailbox_out:
|
||||
free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
|
||||
free_phys ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
|
||||
err_mailbox_in:
|
||||
free ( arbel );
|
||||
err_arbel:
|
||||
@@ -3011,8 +3011,8 @@ static void arbel_free ( struct arbel *arbel ) {
|
||||
|
||||
ufree ( arbel->icm );
|
||||
ufree ( arbel->firmware_area );
|
||||
free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
|
||||
free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
|
||||
free_phys ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
|
||||
free_phys ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
|
||||
free ( arbel );
|
||||
}
|
||||
|
||||
|
||||
@@ -585,9 +585,9 @@ static inline int golan_set_access_reg ( struct golan *golan __attribute__ (( un
|
||||
|
||||
static inline void golan_cmd_uninit ( struct golan *golan )
|
||||
{
|
||||
free_dma(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
|
||||
free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
|
||||
free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
|
||||
free_phys(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
|
||||
free_phys(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
|
||||
free_phys(golan->cmd.addr, GOLAN_PAGE_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -602,17 +602,17 @@ static inline int golan_cmd_init ( struct golan *golan )
|
||||
int rc = 0;
|
||||
uint32_t addr_l_sz;
|
||||
|
||||
if (!(golan->cmd.addr = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
if (!(golan->cmd.addr = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
rc = -ENOMEM;
|
||||
goto malloc_dma_failed;
|
||||
goto malloc_phys_failed;
|
||||
}
|
||||
if (!(golan->mboxes.inbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
if (!(golan->mboxes.inbox = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
rc = -ENOMEM;
|
||||
goto malloc_dma_inbox_failed;
|
||||
goto malloc_phys_inbox_failed;
|
||||
}
|
||||
if (!(golan->mboxes.outbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
if (!(golan->mboxes.outbox = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
rc = -ENOMEM;
|
||||
goto malloc_dma_outbox_failed;
|
||||
goto malloc_phys_outbox_failed;
|
||||
}
|
||||
addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
|
||||
|
||||
@@ -629,11 +629,11 @@ static inline int golan_cmd_init ( struct golan *golan )
|
||||
DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__);
|
||||
return 0;
|
||||
|
||||
malloc_dma_outbox_failed:
|
||||
free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
|
||||
malloc_dma_inbox_failed:
|
||||
free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
|
||||
malloc_dma_failed:
|
||||
malloc_phys_outbox_failed:
|
||||
free_phys(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
|
||||
malloc_phys_inbox_failed:
|
||||
free_phys(golan->cmd.addr, GOLAN_PAGE_SIZE);
|
||||
malloc_phys_failed:
|
||||
DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n",
|
||||
__FUNCTION__, rc);
|
||||
return rc;
|
||||
@@ -743,7 +743,7 @@ static int golan_create_eq(struct golan *golan)
|
||||
|
||||
eq->cons_index = 0;
|
||||
eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
|
||||
eq->eqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
eq->eqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
if (!eq->eqes) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_eq_eqe_alloc;
|
||||
@@ -781,7 +781,7 @@ static int golan_create_eq(struct golan *golan)
|
||||
return 0;
|
||||
|
||||
err_create_eq_cmd:
|
||||
free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
|
||||
free_phys ( eq->eqes , GOLAN_PAGE_SIZE );
|
||||
err_create_eq_eqe_alloc:
|
||||
DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
|
||||
return rc;
|
||||
@@ -806,7 +806,7 @@ static void golan_destory_eq(struct golan *golan)
|
||||
rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
|
||||
GOLAN_PRINT_RC_AND_CMD_STATUS;
|
||||
|
||||
free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE );
|
||||
free_phys ( golan->eq.eqes , GOLAN_PAGE_SIZE );
|
||||
golan->eq.eqn = 0;
|
||||
|
||||
DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
|
||||
@@ -962,14 +962,14 @@ static int golan_create_cq(struct ib_device *ibdev,
|
||||
goto err_create_cq;
|
||||
}
|
||||
golan_cq->size = sizeof(golan_cq->cqes[0]) * cq->num_cqes;
|
||||
golan_cq->doorbell_record = malloc_dma(GOLAN_CQ_DB_RECORD_SIZE,
|
||||
golan_cq->doorbell_record = malloc_phys(GOLAN_CQ_DB_RECORD_SIZE,
|
||||
GOLAN_CQ_DB_RECORD_SIZE);
|
||||
if (!golan_cq->doorbell_record) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_cq_db_alloc;
|
||||
}
|
||||
|
||||
golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
golan_cq->cqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
if (!golan_cq->cqes) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_cq_cqe_alloc;
|
||||
@@ -1008,9 +1008,9 @@ static int golan_create_cq(struct ib_device *ibdev,
|
||||
return 0;
|
||||
|
||||
err_create_cq_cmd:
|
||||
free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
||||
free_phys( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
||||
err_create_cq_cqe_alloc:
|
||||
free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
||||
free_phys(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
||||
err_create_cq_db_alloc:
|
||||
free ( golan_cq );
|
||||
err_create_cq:
|
||||
@@ -1045,8 +1045,8 @@ static void golan_destroy_cq(struct ib_device *ibdev,
|
||||
cq->cqn = 0;
|
||||
|
||||
ib_cq_set_drvdata(cq, NULL);
|
||||
free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
||||
free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
||||
free_phys ( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
||||
free_phys(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
||||
free(golan_cq);
|
||||
|
||||
DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn);
|
||||
@@ -1138,7 +1138,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
||||
golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
|
||||
|
||||
/* allocate dma memory for WQEs (1 page is enough) - should change it */
|
||||
golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
golan_qp->wqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
if (!golan_qp->wqes) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_qp_wqe_alloc;
|
||||
@@ -1160,7 +1160,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
||||
data++;
|
||||
}
|
||||
|
||||
golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db),
|
||||
golan_qp->doorbell_record = malloc_phys(sizeof(struct golan_qp_db),
|
||||
sizeof(struct golan_qp_db));
|
||||
if (!golan_qp->doorbell_record) {
|
||||
rc = -ENOMEM;
|
||||
@@ -1213,9 +1213,9 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
||||
return 0;
|
||||
|
||||
err_create_qp_cmd:
|
||||
free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
||||
free_phys(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
||||
err_create_qp_db_alloc:
|
||||
free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
||||
free_phys ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
||||
err_create_qp_wqe_alloc:
|
||||
err_create_qp_sq_size:
|
||||
err_create_qp_sq_wqe_size:
|
||||
@@ -1422,8 +1422,8 @@ static void golan_destroy_qp(struct ib_device *ibdev,
|
||||
qp->qpn = 0;
|
||||
|
||||
ib_qp_set_drvdata(qp, NULL);
|
||||
free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
||||
free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
||||
free_phys(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
||||
free_phys ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
||||
free(golan_qp);
|
||||
|
||||
DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
|
||||
|
||||
@@ -864,8 +864,8 @@ static int hermon_create_cq ( struct ib_device *ibdev,
|
||||
}
|
||||
|
||||
/* Allocate doorbell */
|
||||
hermon_cq->doorbell = malloc_dma ( sizeof ( hermon_cq->doorbell[0] ),
|
||||
sizeof ( hermon_cq->doorbell[0] ) );
|
||||
hermon_cq->doorbell = malloc_phys ( sizeof ( hermon_cq->doorbell[0] ),
|
||||
sizeof ( hermon_cq->doorbell[0] ) );
|
||||
if ( ! hermon_cq->doorbell ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_doorbell;
|
||||
@@ -874,8 +874,8 @@ static int hermon_create_cq ( struct ib_device *ibdev,
|
||||
|
||||
/* Allocate completion queue itself */
|
||||
hermon_cq->cqe_size = ( cq->num_cqes * sizeof ( hermon_cq->cqe[0] ) );
|
||||
hermon_cq->cqe = malloc_dma ( hermon_cq->cqe_size,
|
||||
sizeof ( hermon_cq->cqe[0] ) );
|
||||
hermon_cq->cqe = malloc_phys ( hermon_cq->cqe_size,
|
||||
sizeof ( hermon_cq->cqe[0] ) );
|
||||
if ( ! hermon_cq->cqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_cqe;
|
||||
@@ -925,9 +925,9 @@ static int hermon_create_cq ( struct ib_device *ibdev,
|
||||
err_sw2hw_cq:
|
||||
hermon_free_mtt ( hermon, &hermon_cq->mtt );
|
||||
err_alloc_mtt:
|
||||
free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
|
||||
free_phys ( hermon_cq->cqe, hermon_cq->cqe_size );
|
||||
err_cqe:
|
||||
free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
|
||||
free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
|
||||
err_doorbell:
|
||||
free ( hermon_cq );
|
||||
err_hermon_cq:
|
||||
@@ -962,8 +962,8 @@ static void hermon_destroy_cq ( struct ib_device *ibdev,
|
||||
hermon_free_mtt ( hermon, &hermon_cq->mtt );
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
|
||||
free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
|
||||
free_phys ( hermon_cq->cqe, hermon_cq->cqe_size );
|
||||
free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
|
||||
free ( hermon_cq );
|
||||
|
||||
/* Mark queue number as free */
|
||||
@@ -1128,8 +1128,8 @@ static int hermon_create_qp ( struct ib_device *ibdev,
|
||||
|
||||
/* Allocate doorbells */
|
||||
hermon_qp->recv.doorbell =
|
||||
malloc_dma ( sizeof ( hermon_qp->recv.doorbell[0] ),
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
malloc_phys ( sizeof ( hermon_qp->recv.doorbell[0] ),
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
if ( ! hermon_qp->recv.doorbell ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_recv_doorbell;
|
||||
@@ -1157,8 +1157,8 @@ static int hermon_create_qp ( struct ib_device *ibdev,
|
||||
hermon_qp->wqe_size = ( hermon_qp->send.wqe_size +
|
||||
hermon_qp->recv.wqe_size +
|
||||
hermon_qp->recv.grh_size );
|
||||
hermon_qp->wqe = malloc_dma ( hermon_qp->wqe_size,
|
||||
sizeof ( hermon_qp->send.wqe[0] ) );
|
||||
hermon_qp->wqe = malloc_phys ( hermon_qp->wqe_size,
|
||||
sizeof ( hermon_qp->send.wqe[0] ) );
|
||||
if ( ! hermon_qp->wqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_wqe;
|
||||
@@ -1248,10 +1248,10 @@ static int hermon_create_qp ( struct ib_device *ibdev,
|
||||
err_rst2init_qp:
|
||||
hermon_free_mtt ( hermon, &hermon_qp->mtt );
|
||||
err_alloc_mtt:
|
||||
free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
|
||||
free_phys ( hermon_qp->wqe, hermon_qp->wqe_size );
|
||||
err_alloc_wqe:
|
||||
free_dma ( hermon_qp->recv.doorbell,
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
free_phys ( hermon_qp->recv.doorbell,
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
err_recv_doorbell:
|
||||
free ( hermon_qp );
|
||||
err_hermon_qp:
|
||||
@@ -1363,9 +1363,9 @@ static void hermon_destroy_qp ( struct ib_device *ibdev,
|
||||
hermon_free_mtt ( hermon, &hermon_qp->mtt );
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
|
||||
free_dma ( hermon_qp->recv.doorbell,
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
free_phys ( hermon_qp->wqe, hermon_qp->wqe_size );
|
||||
free_phys ( hermon_qp->recv.doorbell,
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
free ( hermon_qp );
|
||||
|
||||
/* Mark queue number as free */
|
||||
@@ -1887,8 +1887,8 @@ static int hermon_create_eq ( struct hermon *hermon ) {
|
||||
/* Allocate event queue itself */
|
||||
hermon_eq->eqe_size =
|
||||
( HERMON_NUM_EQES * sizeof ( hermon_eq->eqe[0] ) );
|
||||
hermon_eq->eqe = malloc_dma ( hermon_eq->eqe_size,
|
||||
sizeof ( hermon_eq->eqe[0] ) );
|
||||
hermon_eq->eqe = malloc_phys ( hermon_eq->eqe_size,
|
||||
sizeof ( hermon_eq->eqe[0] ) );
|
||||
if ( ! hermon_eq->eqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_eqe;
|
||||
@@ -1946,7 +1946,7 @@ static int hermon_create_eq ( struct hermon *hermon ) {
|
||||
err_sw2hw_eq:
|
||||
hermon_free_mtt ( hermon, &hermon_eq->mtt );
|
||||
err_alloc_mtt:
|
||||
free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
|
||||
free_phys ( hermon_eq->eqe, hermon_eq->eqe_size );
|
||||
err_eqe:
|
||||
memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
|
||||
return rc;
|
||||
@@ -1986,7 +1986,7 @@ static void hermon_destroy_eq ( struct hermon *hermon ) {
|
||||
hermon_free_mtt ( hermon, &hermon_eq->mtt );
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
|
||||
free_phys ( hermon_eq->eqe, hermon_eq->eqe_size );
|
||||
memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
|
||||
}
|
||||
|
||||
@@ -3736,20 +3736,20 @@ static struct hermon * hermon_alloc ( void ) {
|
||||
goto err_hermon;
|
||||
|
||||
/* Allocate space for mailboxes */
|
||||
hermon->mailbox_in = malloc_dma ( HERMON_MBOX_SIZE,
|
||||
HERMON_MBOX_ALIGN );
|
||||
hermon->mailbox_in = malloc_phys ( HERMON_MBOX_SIZE,
|
||||
HERMON_MBOX_ALIGN );
|
||||
if ( ! hermon->mailbox_in )
|
||||
goto err_mailbox_in;
|
||||
hermon->mailbox_out = malloc_dma ( HERMON_MBOX_SIZE,
|
||||
HERMON_MBOX_ALIGN );
|
||||
hermon->mailbox_out = malloc_phys ( HERMON_MBOX_SIZE,
|
||||
HERMON_MBOX_ALIGN );
|
||||
if ( ! hermon->mailbox_out )
|
||||
goto err_mailbox_out;
|
||||
|
||||
return hermon;
|
||||
|
||||
free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
|
||||
free_phys ( hermon->mailbox_out, HERMON_MBOX_SIZE );
|
||||
err_mailbox_out:
|
||||
free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
|
||||
free_phys ( hermon->mailbox_in, HERMON_MBOX_SIZE );
|
||||
err_mailbox_in:
|
||||
free ( hermon );
|
||||
err_hermon:
|
||||
@@ -3765,8 +3765,8 @@ static void hermon_free ( struct hermon *hermon ) {
|
||||
|
||||
ufree ( hermon->icm );
|
||||
ufree ( hermon->firmware_area );
|
||||
free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
|
||||
free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
|
||||
free_phys ( hermon->mailbox_out, HERMON_MBOX_SIZE );
|
||||
free_phys ( hermon->mailbox_in, HERMON_MBOX_SIZE );
|
||||
free ( hermon );
|
||||
}
|
||||
|
||||
|
||||
@@ -531,8 +531,8 @@ static int linda_init_send ( struct linda *linda ) {
|
||||
linda->send_buf[i] = i;
|
||||
|
||||
/* Allocate space for the SendBufAvail array */
|
||||
linda->sendbufavail = malloc_dma ( sizeof ( *linda->sendbufavail ),
|
||||
LINDA_SENDBUFAVAIL_ALIGN );
|
||||
linda->sendbufavail = malloc_phys ( sizeof ( *linda->sendbufavail ),
|
||||
LINDA_SENDBUFAVAIL_ALIGN );
|
||||
if ( ! linda->sendbufavail ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_sendbufavail;
|
||||
@@ -555,7 +555,7 @@ static int linda_init_send ( struct linda *linda ) {
|
||||
|
||||
return 0;
|
||||
|
||||
free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
|
||||
free_phys ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
|
||||
err_alloc_sendbufavail:
|
||||
return rc;
|
||||
}
|
||||
@@ -576,7 +576,7 @@ static void linda_fini_send ( struct linda *linda ) {
|
||||
/* Ensure hardware has seen this disable */
|
||||
linda_readq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
|
||||
|
||||
free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
|
||||
free_phys ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
@@ -613,8 +613,8 @@ static int linda_create_recv_wq ( struct linda *linda,
|
||||
linda_wq->eager_cons = 0;
|
||||
|
||||
/* Allocate receive header buffer */
|
||||
linda_wq->header = malloc_dma ( LINDA_RECV_HEADERS_SIZE,
|
||||
LINDA_RECV_HEADERS_ALIGN );
|
||||
linda_wq->header = malloc_phys ( LINDA_RECV_HEADERS_SIZE,
|
||||
LINDA_RECV_HEADERS_ALIGN );
|
||||
if ( ! linda_wq->header ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_header;
|
||||
@@ -650,7 +650,7 @@ static int linda_create_recv_wq ( struct linda *linda,
|
||||
virt_to_bus ( &linda_wq->header_prod ) );
|
||||
return 0;
|
||||
|
||||
free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
|
||||
free_phys ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
|
||||
err_alloc_header:
|
||||
return rc;
|
||||
}
|
||||
@@ -679,7 +679,7 @@ static void linda_destroy_recv_wq ( struct linda *linda,
|
||||
mb();
|
||||
|
||||
/* Free headers ring */
|
||||
free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
|
||||
free_phys ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
|
||||
|
||||
/* Free context */
|
||||
linda_free_ctx ( linda, ctx );
|
||||
|
||||
@@ -61,7 +61,7 @@ mlx_memory_alloc_dma_priv(
|
||||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
*ptr = malloc_dma(size, align);
|
||||
*ptr = malloc_phys(size, align);
|
||||
if (*ptr == NULL) {
|
||||
status = MLX_OUT_OF_RESOURCES;
|
||||
} else {
|
||||
@@ -78,7 +78,7 @@ mlx_memory_free_dma_priv(
|
||||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
free_dma(ptr, size);
|
||||
free_phys(ptr, size);
|
||||
return status;
|
||||
}
|
||||
mlx_status
|
||||
|
||||
@@ -669,8 +669,8 @@ static int qib7322_init_send ( struct qib7322 *qib7322 ) {
|
||||
}
|
||||
|
||||
/* Allocate space for the SendBufAvail array */
|
||||
qib7322->sendbufavail = malloc_dma ( sizeof ( *qib7322->sendbufavail ),
|
||||
QIB7322_SENDBUFAVAIL_ALIGN );
|
||||
qib7322->sendbufavail = malloc_phys ( sizeof ( *qib7322->sendbufavail ),
|
||||
QIB7322_SENDBUFAVAIL_ALIGN );
|
||||
if ( ! qib7322->sendbufavail ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_sendbufavail;
|
||||
@@ -697,7 +697,7 @@ static int qib7322_init_send ( struct qib7322 *qib7322 ) {
|
||||
|
||||
return 0;
|
||||
|
||||
free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
|
||||
free_phys ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
|
||||
err_alloc_sendbufavail:
|
||||
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 );
|
||||
err_create_send_bufs_vl15_port1:
|
||||
@@ -724,7 +724,7 @@ static void qib7322_fini_send ( struct qib7322 *qib7322 ) {
|
||||
/* Ensure hardware has seen this disable */
|
||||
qib7322_readq ( qib7322, &sendctrl, QIB_7322_SendCtrl_offset );
|
||||
|
||||
free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
|
||||
free_phys ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
|
||||
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 );
|
||||
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port0 );
|
||||
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_small );
|
||||
@@ -767,8 +767,8 @@ static int qib7322_create_recv_wq ( struct ib_device *ibdev,
|
||||
qib7322_wq->eager_cons = 0;
|
||||
|
||||
/* Allocate receive header buffer */
|
||||
qib7322_wq->header = malloc_dma ( QIB7322_RECV_HEADERS_SIZE,
|
||||
QIB7322_RECV_HEADERS_ALIGN );
|
||||
qib7322_wq->header = malloc_phys ( QIB7322_RECV_HEADERS_SIZE,
|
||||
QIB7322_RECV_HEADERS_ALIGN );
|
||||
if ( ! qib7322_wq->header ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_header;
|
||||
@@ -810,7 +810,7 @@ static int qib7322_create_recv_wq ( struct ib_device *ibdev,
|
||||
virt_to_bus ( &qib7322_wq->header_prod ) );
|
||||
return 0;
|
||||
|
||||
free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
|
||||
free_phys ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
|
||||
err_alloc_header:
|
||||
return rc;
|
||||
}
|
||||
@@ -846,7 +846,7 @@ static void qib7322_destroy_recv_wq ( struct ib_device *ibdev,
|
||||
mb();
|
||||
|
||||
/* Free headers ring */
|
||||
free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
|
||||
free_phys ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user