[arbel] Allocate space for GRH on UD queue pairs

As with the previous commit (for Hermon), allocate a separate ring
buffer to hold received GRHs.

Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
Michael Brown
2016-03-21 08:55:02 +00:00
parent e84c917f39
commit 57c63047e3
2 changed files with 55 additions and 16 deletions

View File

@@ -897,26 +897,44 @@ static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
* *
* @v arbel_recv_wq Receive work queue * @v arbel_recv_wq Receive work queue
* @v num_wqes Number of work queue entries * @v num_wqes Number of work queue entries
* @v type Queue pair type
* @ret rc Return status code * @ret rc Return status code
*/ */
static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq, static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
unsigned int num_wqes ) { unsigned int num_wqes,
enum ib_queue_pair_type type ) {
struct arbelprm_recv_wqe *wqe; struct arbelprm_recv_wqe *wqe;
struct arbelprm_recv_wqe *next_wqe; struct arbelprm_recv_wqe *next_wqe;
unsigned int wqe_idx_mask; unsigned int wqe_idx_mask;
size_t nds; size_t nds;
unsigned int i; unsigned int i;
unsigned int j; unsigned int j;
int rc;
/* Allocate work queue */ /* Allocate work queue */
arbel_recv_wq->wqe_size = ( num_wqes * arbel_recv_wq->wqe_size = ( num_wqes *
sizeof ( arbel_recv_wq->wqe[0] ) ); sizeof ( arbel_recv_wq->wqe[0] ) );
arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size, arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
sizeof ( arbel_recv_wq->wqe[0] ) ); sizeof ( arbel_recv_wq->wqe[0] ) );
if ( ! arbel_recv_wq->wqe ) if ( ! arbel_recv_wq->wqe ) {
return -ENOMEM; rc = -ENOMEM;
goto err_alloc_wqe;
}
memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size ); memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size );
/* Allocate GRH entries, if needed */
if ( ( type == IB_QPT_SMI ) || ( type == IB_QPT_GSI ) ||
( type == IB_QPT_UD ) ) {
arbel_recv_wq->grh_size = ( num_wqes *
sizeof ( arbel_recv_wq->grh[0] ) );
arbel_recv_wq->grh = malloc_dma ( arbel_recv_wq->grh_size,
sizeof ( void * ) );
if ( ! arbel_recv_wq->grh ) {
rc = -ENOMEM;
goto err_alloc_grh;
}
}
/* Link work queue entries */ /* Link work queue entries */
wqe_idx_mask = ( num_wqes - 1 ); wqe_idx_mask = ( num_wqes - 1 );
nds = ( ( offsetof ( typeof ( *wqe ), data ) + nds = ( ( offsetof ( typeof ( *wqe ), data ) +
@@ -935,6 +953,12 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
} }
return 0; return 0;
free_dma ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
err_alloc_grh:
free_dma ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
err_alloc_wqe:
return rc;
} }
/** /**
@@ -985,8 +1009,8 @@ static int arbel_create_qp ( struct ib_device *ibdev,
if ( ( rc = arbel_create_send_wq ( &arbel_qp->send, if ( ( rc = arbel_create_send_wq ( &arbel_qp->send,
qp->send.num_wqes ) ) != 0 ) qp->send.num_wqes ) ) != 0 )
goto err_create_send_wq; goto err_create_send_wq;
if ( ( rc = arbel_create_recv_wq ( &arbel_qp->recv, if ( ( rc = arbel_create_recv_wq ( &arbel_qp->recv, qp->recv.num_wqes,
qp->recv.num_wqes ) ) != 0 ) qp->type ) ) != 0 )
goto err_create_recv_wq; goto err_create_recv_wq;
/* Send and receive work queue entries must be within the same 4GB */ /* Send and receive work queue entries must be within the same 4GB */
@@ -1078,6 +1102,7 @@ static int arbel_create_qp ( struct ib_device *ibdev,
MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE ); MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE ); MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
err_unsupported_address_split: err_unsupported_address_split:
free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size ); free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
err_create_recv_wq: err_create_recv_wq:
free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size ); free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
@@ -1206,8 +1231,9 @@ static void arbel_destroy_qp ( struct ib_device *ibdev,
MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE ); MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
/* Free memory */ /* Free memory */
free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size ); free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size ); free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
free ( arbel_qp ); free ( arbel_qp );
/* Mark queue number as free */ /* Mark queue number as free */
@@ -1477,6 +1503,8 @@ static int arbel_post_recv ( struct ib_device *ibdev,
struct ib_work_queue *wq = &qp->recv; struct ib_work_queue *wq = &qp->recv;
struct arbel_recv_work_queue *arbel_recv_wq = &arbel_qp->recv; struct arbel_recv_work_queue *arbel_recv_wq = &arbel_qp->recv;
struct arbelprm_recv_wqe *wqe; struct arbelprm_recv_wqe *wqe;
struct arbelprm_wqe_segment_data_ptr *data;
struct ib_global_route_header *grh;
union arbelprm_doorbell_record *db_rec; union arbelprm_doorbell_record *db_rec;
unsigned int wqe_idx_mask; unsigned int wqe_idx_mask;
@@ -1491,12 +1519,19 @@ static int arbel_post_recv ( struct ib_device *ibdev,
wqe = &arbel_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv; wqe = &arbel_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
/* Construct work queue entry */ /* Construct work queue entry */
MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) ); data = &wqe->data[0];
MLX_FILL_1 ( &wqe->data[0], 1, l_key, arbel->lkey ); if ( arbel_recv_wq->grh ) {
MLX_FILL_H ( &wqe->data[0], 2, grh = &arbel_recv_wq->grh[wq->next_idx & wqe_idx_mask];
local_address_h, virt_to_bus ( iobuf->data ) ); MLX_FILL_1 ( data, 0, byte_count, sizeof ( *grh ) );
MLX_FILL_1 ( &wqe->data[0], 3, MLX_FILL_1 ( data, 1, l_key, arbel->lkey );
local_address_l, virt_to_bus ( iobuf->data ) ); MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( grh ) );
MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( grh ) );
data++;
}
MLX_FILL_1 ( data, 0, byte_count, iob_tailroom ( iobuf ) );
MLX_FILL_1 ( data, 1, l_key, arbel->lkey );
MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( iobuf->data ) );
MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( iobuf->data ) );
/* Update doorbell record */ /* Update doorbell record */
barrier(); barrier();
@@ -1619,9 +1654,9 @@ static int arbel_complete ( struct ib_device *ibdev,
case IB_QPT_SMI: case IB_QPT_SMI:
case IB_QPT_GSI: case IB_QPT_GSI:
case IB_QPT_UD: case IB_QPT_UD:
assert ( iob_len ( iobuf ) >= sizeof ( *grh ) ); /* Locate corresponding GRH */
grh = iobuf->data; assert ( arbel_recv_wq->grh != NULL );
iob_pull ( iobuf, sizeof ( *grh ) ); grh = &arbel_recv_wq->grh[wqe_idx];
/* Construct address vector */ /* Construct address vector */
source = &recv_source; source = &recv_source;
memset ( source, 0, sizeof ( *source ) ); memset ( source, 0, sizeof ( *source ) );

View File

@@ -237,7 +237,7 @@ struct arbelprm_rc_send_wqe {
struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_GATHER]; struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_GATHER];
} __attribute__ (( packed )); } __attribute__ (( packed ));
#define ARBEL_MAX_SCATTER 1 #define ARBEL_MAX_SCATTER 2
struct arbelprm_recv_wqe { struct arbelprm_recv_wqe {
/* The autogenerated header is inconsistent between send and /* The autogenerated header is inconsistent between send and
@@ -369,6 +369,10 @@ struct arbel_recv_work_queue {
union arbel_recv_wqe *wqe; union arbel_recv_wqe *wqe;
/** Size of work queue */ /** Size of work queue */
size_t wqe_size; size_t wqe_size;
/** GRH buffers (if applicable) */
struct ib_global_route_header *grh;
/** Size of GRB buffers */
size_t grh_size;
}; };
/** Number of special queue pairs */ /** Number of special queue pairs */