[efi] Do not attempt EFI_USB_IO_PROTOCOL transfers during shutdown
[ipxe.git] / src / drivers / infiniband / flexboot_nodnic.c
1 /*
2 * Copyright (C) 2015 Mellanox Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 */
19
20 FILE_LICENCE ( GPL2_OR_LATER );
21
22 #include <stdio.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <ipxe/pci.h>
26 #include <ipxe/malloc.h>
27 #include <ipxe/umalloc.h>
28 #include <ipxe/if_ether.h>
29 #include <ipxe/ethernet.h>
30 #include <ipxe/vlan.h>
31 #include <ipxe/io.h>
32 #include "flexboot_nodnic.h"
33 #include "mlx_utils/include/public/mlx_types.h"
34 #include "mlx_utils/include/public/mlx_utils.h"
35 #include "mlx_utils/include/public/mlx_bail.h"
36 #include "mlx_nodnic/include/mlx_cmd.h"
37 #include "mlx_utils/include/public/mlx_memory.h"
38 #include "mlx_utils/include/public/mlx_pci.h"
39 #include "mlx_nodnic/include/mlx_device.h"
40 #include "mlx_nodnic/include/mlx_port.h"
41 #include <byteswap.h>
42 #include <usr/ifmgmt.h>
43 #include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig.h"
44 #include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h"
45 #include "mlx_utils/include/public/mlx_pci_gw.h"
46 #include "mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h"
47 #include "mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h"
48
49 /***************************************************************************
50 *
51 * Completion queue operations
52 *
53 ***************************************************************************
54 */
55 static int flexboot_nodnic_arm_cq ( struct flexboot_nodnic_port *port ) {
56 #ifndef DEVICE_CX3
57 mlx_uint32 val32 = 0;
58 union arm_cq_uar cq_uar;
59
60 #define ARM_CQ_UAR_CQ_CI_MASK 0xffffff
61 #define ARM_CQ_UAR_CMDSN_MASK 3
62 #define ARM_CQ_UAR_CMDSN_OFFSET 28
63 #define ARM_CQ_UAR_CQ_CI_OFFSET 0x20
64 if ( port->port_priv.device->device_cap.support_bar_cq_ctrl ) {
65 cq_uar.dword[0] = cpu_to_be32((port->eth_cq->next_idx & ARM_CQ_UAR_CQ_CI_MASK) |
66 ((port->cmdsn++ & ARM_CQ_UAR_CMDSN_MASK) << ARM_CQ_UAR_CMDSN_OFFSET));
67 cq_uar.dword[1] = cpu_to_be32(port->eth_cq->cqn);
68 wmb();
69 writeq(cq_uar.qword, port->port_priv.device->uar.virt + ARM_CQ_UAR_CQ_CI_OFFSET);
70 port->port_priv.arm_cq_doorbell_record->dword[0] = cq_uar.dword[1];
71 port->port_priv.arm_cq_doorbell_record->dword[1] = cq_uar.dword[0];
72 } else {
73 val32 = ( port->eth_cq->next_idx & 0xffffff );
74 if ( nodnic_port_set ( & port->port_priv, nodnic_port_option_arm_cq, val32 ) ) {
75 MLX_DEBUG_ERROR( port->port_priv.device, "Failed to arm the CQ\n" );
76 return MLX_FAILED;
77 }
78 }
79 #else
80 mlx_utils *utils = port->port_priv.device->utils;
81 nodnic_port_data_flow_gw *ptr = port->port_priv.data_flow_gw;
82 mlx_uint32 data = 0;
83 mlx_uint32 val = 0;
84
85 if ( port->port_priv.device->device_cap.crspace_doorbells == 0 ) {
86 val = ( port->eth_cq->next_idx & 0xffff );
87 if ( nodnic_port_set ( & port->port_priv, nodnic_port_option_arm_cq, val ) ) {
88 MLX_DEBUG_ERROR( port->port_priv.device, "Failed to arm the CQ\n" );
89 return MLX_FAILED;
90 }
91 } else {
92 /* Arming the CQ with CQ CI should be with this format -
93 * 16 bit - CQ CI - same endianness as the FW (don't swap bytes)
94 * 15 bit - reserved
95 * 1 bit - arm CQ - must correct the endianness with the reserved above */
96 data = ( ( ( port->eth_cq->next_idx & 0xffff ) << 16 ) | 0x0080 );
97 /* Write the new index and update FW that new data was submitted */
98 mlx_pci_mem_write ( utils, MlxPciWidthUint32, 0,
99 ( mlx_uintn ) & ( ptr->armcq_cq_ci_dword ), 1, &data );
100 }
101 #endif
102 return 0;
103 }
104
105 /**
106 * Create completion queue
107 *
108 * @v ibdev Infiniband device
109 * @v cq Completion queue
110 * @ret rc Return status code
111 */
112 static int flexboot_nodnic_create_cq ( struct ib_device *ibdev ,
113 struct ib_completion_queue *cq ) {
114 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
115 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
116 struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq;
117 mlx_status status = MLX_SUCCESS;
118 mlx_uint32 cqn;
119
120 flexboot_nodnic_cq = (struct flexboot_nodnic_completion_queue *)
121 zalloc(sizeof(*flexboot_nodnic_cq));
122 if ( flexboot_nodnic_cq == NULL ) {
123 status = MLX_OUT_OF_RESOURCES;
124 goto qp_alloc_err;
125 }
126
127 status = nodnic_port_create_cq(&port->port_priv,
128 cq->num_cqes *
129 flexboot_nodnic->callbacks->get_cqe_size(),
130 &flexboot_nodnic_cq->nodnic_completion_queue
131 );
132 MLX_FATAL_CHECK_STATUS(status, create_err,
133 "nodnic_port_create_cq failed");
134 flexboot_nodnic->callbacks->cqe_set_owner(
135 flexboot_nodnic_cq->nodnic_completion_queue->cq_virt,
136 cq->num_cqes);
137 if ( flexboot_nodnic->device_priv.device_cap.support_bar_cq_ctrl ) {
138 status = nodnic_port_query(&port->port_priv,
139 nodnic_port_option_cq_n_index,
140 (mlx_uint32 *)&cqn );
141 MLX_FATAL_CHECK_STATUS(status, read_cqn_err,
142 "failed to query cqn");
143 cq->cqn = cqn;
144 }
145
146 ib_cq_set_drvdata ( cq, flexboot_nodnic_cq );
147 return status;
148 read_cqn_err:
149 create_err:
150 free(flexboot_nodnic_cq);
151 qp_alloc_err:
152 return status;
153 }
154
155 /**
156 * Destroy completion queue
157 *
158 * @v ibdev Infiniband device
159 * @v cq Completion queue
160 */
161 static void flexboot_nodnic_destroy_cq ( struct ib_device *ibdev ,
162 struct ib_completion_queue *cq ) {
163 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
164 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
165 struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq = ib_cq_get_drvdata ( cq );
166
167 nodnic_port_destroy_cq(&port->port_priv,
168 flexboot_nodnic_cq->nodnic_completion_queue);
169
170 free(flexboot_nodnic_cq);
171 }
172
173 static
174 struct ib_work_queue * flexboot_nodnic_find_wq ( struct ib_device *ibdev ,
175 struct ib_completion_queue *cq,
176 unsigned long qpn, int is_send ) {
177 struct ib_work_queue *wq;
178 struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp;
179 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
180 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
181 struct nodnic_ring *ring;
182 mlx_uint32 out_qpn;
183 list_for_each_entry ( wq, &cq->work_queues, list ) {
184 flexboot_nodnic_qp = ib_qp_get_drvdata ( wq->qp );
185 if( wq->is_send == is_send && wq->is_send == TRUE ) {
186 ring = &flexboot_nodnic_qp->nodnic_queue_pair->send.nodnic_ring;
187 } else if( wq->is_send == is_send && wq->is_send == FALSE ) {
188 ring = &flexboot_nodnic_qp->nodnic_queue_pair->receive.nodnic_ring;
189 } else {
190 continue;
191 }
192 nodnic_port_get_qpn(&port->port_priv, ring, &out_qpn);
193 if ( out_qpn == qpn )
194 return wq;
195 }
196 return NULL;
197 }
198
199 /**
200 * Handle completion
201 *
202 * @v ibdev Infiniband device
203 * @v cq Completion queue
204 * @v cqe Hardware completion queue entry
205 * @ret rc Return status code
206 */
207 static int flexboot_nodnic_complete ( struct ib_device *ibdev,
208 struct ib_completion_queue *cq,
209 struct cqe_data *cqe_data ) {
210 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
211 struct ib_work_queue *wq;
212 struct ib_queue_pair *qp;
213 struct io_buffer *iobuf;
214 struct ib_address_vector recv_dest;
215 struct ib_address_vector recv_source;
216 unsigned long qpn;
217 unsigned long wqe_idx;
218 unsigned long wqe_idx_mask;
219 size_t len;
220 int rc = 0;
221
222 /* Parse completion */
223 qpn = cqe_data->qpn;
224
225 if ( cqe_data->is_error == TRUE ) {
226 DBGC ( flexboot_nodnic, "flexboot_nodnic %p CQN %#lx syndrome %x vendor %x\n",
227 flexboot_nodnic, cq->cqn, cqe_data->syndrome,
228 cqe_data->vendor_err_syndrome );
229 rc = -EIO;
230 /* Don't return immediately; propagate error to completer */
231 }
232
233 /* Identify work queue */
234 wq = flexboot_nodnic_find_wq( ibdev, cq, qpn, cqe_data->is_send );
235 if ( wq == NULL ) {
236 DBGC ( flexboot_nodnic,
237 "flexboot_nodnic %p CQN %#lx unknown %s QPN %#lx\n",
238 flexboot_nodnic, cq->cqn,
239 ( cqe_data->is_send ? "send" : "recv" ), qpn );
240 return -EIO;
241 }
242 qp = wq->qp;
243
244 /* Identify work queue entry */
245 wqe_idx = cqe_data->wqe_counter;
246 wqe_idx_mask = ( wq->num_wqes - 1 );
247 DBGCP ( flexboot_nodnic,
248 "NODNIC %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
249 flexboot_nodnic, cq->cqn, qp->qpn,
250 ( cqe_data->is_send ? "send" : "recv" ),
251 wqe_idx );
252
253 /* Identify I/O buffer */
254 iobuf = wq->iobufs[wqe_idx & wqe_idx_mask];
255 if ( iobuf == NULL ) {
256 DBGC ( flexboot_nodnic,
257 "NODNIC %p CQN %#lx QPN %#lx empty %s WQE %#lx\n",
258 flexboot_nodnic, cq->cqn, qp->qpn,
259 ( cqe_data->is_send ? "send" : "recv" ), wqe_idx );
260 return -EIO;
261 }
262 wq->iobufs[wqe_idx & wqe_idx_mask] = NULL;
263
264 if ( cqe_data->is_send == TRUE ) {
265 /* Hand off to completion handler */
266 ib_complete_send ( ibdev, qp, iobuf, rc );
267 } else if ( rc != 0 ) {
268 /* Propagate error to receive completion handler */
269 ib_complete_recv ( ibdev, qp, NULL, NULL, iobuf, rc );
270 } else {
271 /* Set received length */
272 len = cqe_data->byte_cnt;
273 assert ( len <= iob_tailroom ( iobuf ) );
274 iob_put ( iobuf, len );
275 memset ( &recv_dest, 0, sizeof ( recv_dest ) );
276 recv_dest.qpn = qpn;
277 memset ( &recv_source, 0, sizeof ( recv_source ) );
278 switch ( qp->type ) {
279 case IB_QPT_SMI:
280 case IB_QPT_GSI:
281 case IB_QPT_UD:
282 case IB_QPT_RC:
283 break;
284 case IB_QPT_ETH:
285 break;
286 default:
287 assert ( 0 );
288 return -EINVAL;
289 }
290 /* Hand off to completion handler */
291 ib_complete_recv ( ibdev, qp, &recv_dest,
292 &recv_source, iobuf, rc );
293 }
294
295 return rc;
296 }
297 /**
298 * Poll completion queue
299 *
300 * @v ibdev Infiniband device
301 * @v cq Completion queues
302 */
303 static void flexboot_nodnic_poll_cq ( struct ib_device *ibdev,
304 struct ib_completion_queue *cq) {
305 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
306 struct flexboot_nodnic_completion_queue *flexboot_nodnic_cq = ib_cq_get_drvdata ( cq );
307 void *cqe;
308 mlx_size cqe_size;
309 struct cqe_data cqe_data;
310 unsigned int cqe_idx_mask;
311 int rc;
312
313 cqe_size = flexboot_nodnic->callbacks->get_cqe_size();
314 while ( TRUE ) {
315 /* Look for completion entry */
316 cqe_idx_mask = ( cq->num_cqes - 1 );
317 cqe = ((uint8_t *)flexboot_nodnic_cq->nodnic_completion_queue->cq_virt) +
318 cqe_size * (cq->next_idx & cqe_idx_mask);
319
320 /* TODO: check fill_completion */
321 flexboot_nodnic->callbacks->fill_completion(cqe, &cqe_data);
322 if ( cqe_data.owner ^
323 ( ( cq->next_idx & cq->num_cqes ) ? 1 : 0 ) ) {
324 /* Entry still owned by hardware; end of poll */
325 break;
326 }
327 /* Handle completion */
328 rc = flexboot_nodnic_complete ( ibdev, cq, &cqe_data );
329 if ( rc != 0 ) {
330 DBGC ( flexboot_nodnic, "flexboot_nodnic %p CQN %#lx failed to complete: %s\n",
331 flexboot_nodnic, cq->cqn, strerror ( rc ) );
332 DBGC_HDA ( flexboot_nodnic, virt_to_phys ( cqe ),
333 cqe, sizeof ( *cqe ) );
334 }
335
336 /* Update completion queue's index */
337 cq->next_idx++;
338 }
339 }
340 /***************************************************************************
341 *
342 * Queue pair operations
343 *
344 ***************************************************************************
345 */
346
347
348 /**
349 * Create queue pair
350 *
351 * @v ibdev Infiniband device
352 * @v qp Queue pair
353 * @ret rc Return status code
354 */
355 static int flexboot_nodnic_create_qp ( struct ib_device *ibdev,
356 struct ib_queue_pair *qp ) {
357 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
358 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
359 struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp;
360 mlx_status status = MLX_SUCCESS;
361
362 flexboot_nodnic_qp = (struct flexboot_nodnic_queue_pair *)zalloc(sizeof(*flexboot_nodnic_qp));
363 if ( flexboot_nodnic_qp == NULL ) {
364 status = MLX_OUT_OF_RESOURCES;
365 goto qp_alloc_err;
366 }
367
368 status = nodnic_port_create_qp(&port->port_priv, qp->type,
369 qp->send.num_wqes * sizeof(struct nodnic_send_wqbb),
370 qp->send.num_wqes,
371 qp->recv.num_wqes * sizeof(struct nodnic_recv_wqe),
372 qp->recv.num_wqes,
373 &flexboot_nodnic_qp->nodnic_queue_pair);
374 MLX_FATAL_CHECK_STATUS(status, create_err,
375 "nodnic_port_create_qp failed");
376 ib_qp_set_drvdata ( qp, flexboot_nodnic_qp );
377 return status;
378 create_err:
379 free(flexboot_nodnic_qp);
380 qp_alloc_err:
381 return status;
382 }
383
384 /**
385 * Modify queue pair
386 *
387 * @v ibdev Infiniband device
388 * @v qp Queue pair
389 * @ret rc Return status code
390 */
391 static int flexboot_nodnic_modify_qp ( struct ib_device *ibdev __unused,
392 struct ib_queue_pair *qp __unused) {
393 /*not needed*/
394 return 0;
395 }
396
397 /**
398 * Destroy queue pair
399 *
400 * @v ibdev Infiniband device
401 * @v qp Queue pair
402 */
403 static void flexboot_nodnic_destroy_qp ( struct ib_device *ibdev,
404 struct ib_queue_pair *qp ) {
405 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
406 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
407 struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp );
408
409 nodnic_port_destroy_qp(&port->port_priv, qp->type,
410 flexboot_nodnic_qp->nodnic_queue_pair);
411
412 free(flexboot_nodnic_qp);
413 }
414
415 /***************************************************************************
416 *
417 * Work request operations
418 *
419 ***************************************************************************
420 */
421
422 /**
423 * Post send work queue entry
424 *
425 * @v ibdev Infiniband device
426 * @v qp Queue pair
427 * @v av Address vector
428 * @v iobuf I/O buffer
429 * @ret rc Return status code
430 */
431 static int flexboot_nodnic_post_send ( struct ib_device *ibdev,
432 struct ib_queue_pair *qp,
433 struct ib_address_vector *av,
434 struct io_buffer *iobuf) {
435
436 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
437 struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp );
438 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
439 struct ib_work_queue *wq = &qp->send;
440 struct nodnic_send_wqbb *wqbb;
441 nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
442 struct nodnic_send_ring *send_ring = &nodnic_qp->send;
443 mlx_status status = MLX_SUCCESS;
444 unsigned int wqe_idx_mask;
445 unsigned long wqe_idx;
446
447 if ( ( port->port_priv.dma_state == FALSE ) ||
448 ( port->port_priv.port_state & NODNIC_PORT_DISABLING_DMA ) ) {
449 DBGC ( flexboot_nodnic, "flexboot_nodnic DMA disabled\n");
450 status = -ENETDOWN;
451 goto post_send_done;
452 }
453
454 /* Allocate work queue entry */
455 wqe_idx = wq->next_idx;
456 wqe_idx_mask = ( wq->num_wqes - 1 );
457 if ( wq->iobufs[wqe_idx & wqe_idx_mask] ) {
458 DBGC ( flexboot_nodnic, "flexboot_nodnic %p QPN %#lx send queue full\n",
459 flexboot_nodnic, qp->qpn );
460 status = -ENOBUFS;
461 goto post_send_done;
462 }
463 wqbb = &send_ring->wqe_virt[wqe_idx & wqe_idx_mask];
464 wq->iobufs[wqe_idx & wqe_idx_mask] = iobuf;
465
466 assert ( flexboot_nodnic->callbacks->
467 fill_send_wqe[qp->type] != NULL );
468 status = flexboot_nodnic->callbacks->
469 fill_send_wqe[qp->type] ( ibdev, qp, av, iobuf,
470 wqbb, wqe_idx );
471 if ( status != 0 ) {
472 DBGC ( flexboot_nodnic, "flexboot_nodnic %p QPN %#lx fill send wqe failed\n",
473 flexboot_nodnic, qp->qpn );
474 goto post_send_done;
475 }
476
477 wq->next_idx++;
478
479 status = port->port_priv.send_doorbell ( &port->port_priv,
480 &send_ring->nodnic_ring, ( mlx_uint16 ) wq->next_idx );
481 if ( flexboot_nodnic->callbacks->tx_uar_send_doorbell_fn ) {
482 flexboot_nodnic->callbacks->tx_uar_send_doorbell_fn ( ibdev, wqbb );
483 }
484 if ( status != 0 ) {
485 DBGC ( flexboot_nodnic, "flexboot_nodnic %p ring send doorbell failed\n", flexboot_nodnic );
486 }
487
488 post_send_done:
489 return status;
490 }
491
492 /**
493 * Post receive work queue entry
494 *
495 * @v ibdev Infiniband device
496 * @v qp Queue pair
497 * @v iobuf I/O buffer
498 * @ret rc Return status code
499 */
500 static int flexboot_nodnic_post_recv ( struct ib_device *ibdev,
501 struct ib_queue_pair *qp,
502 struct io_buffer *iobuf ) {
503 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
504 struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = ib_qp_get_drvdata ( qp );
505 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
506 struct ib_work_queue *wq = &qp->recv;
507 nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
508 struct nodnic_recv_ring *recv_ring = &nodnic_qp->receive;
509 struct nodnic_recv_wqe *wqe;
510 unsigned int wqe_idx_mask;
511 mlx_status status = MLX_SUCCESS;
512
513 /* Allocate work queue entry */
514 wqe_idx_mask = ( wq->num_wqes - 1 );
515 if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
516 DBGC ( flexboot_nodnic,
517 "flexboot_nodnic %p QPN %#lx receive queue full\n",
518 flexboot_nodnic, qp->qpn );
519 status = -ENOBUFS;
520 goto post_recv_done;
521 }
522 wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
523 wqe = &((struct nodnic_recv_wqe*)recv_ring->wqe_virt)[wq->next_idx & wqe_idx_mask];
524
525 MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
526 MLX_FILL_1 ( &wqe->data[0], 1, l_key, flexboot_nodnic->device_priv.lkey );
527 MLX_FILL_H ( &wqe->data[0], 2,
528 local_address_h, virt_to_bus ( iobuf->data ) );
529 MLX_FILL_1 ( &wqe->data[0], 3,
530 local_address_l, virt_to_bus ( iobuf->data ) );
531
532 wq->next_idx++;
533
534 status = port->port_priv.recv_doorbell ( &port->port_priv,
535 &recv_ring->nodnic_ring, ( mlx_uint16 ) wq->next_idx );
536 if ( status != 0 ) {
537 DBGC ( flexboot_nodnic, "flexboot_nodnic %p ring receive doorbell failed\n", flexboot_nodnic );
538 }
539 post_recv_done:
540 return status;
541 }
542
543 /***************************************************************************
544 *
545 * Event queues
546 *
547 ***************************************************************************
548 */
549
550 static void flexboot_nodnic_poll_eq ( struct ib_device *ibdev ) {
551 struct flexboot_nodnic *flexboot_nodnic;
552 struct flexboot_nodnic_port *port;
553 struct net_device *netdev;
554 nodnic_port_state state = 0;
555 mlx_status status;
556
557 if ( ! ibdev ) {
558 DBG ( "%s: ibdev = NULL!!!\n", __FUNCTION__ );
559 return;
560 }
561
562 flexboot_nodnic = ib_get_drvdata ( ibdev );
563 port = &flexboot_nodnic->port[ibdev->port - 1];
564 netdev = port->netdev;
565
566 if ( ! netdev_is_open ( netdev ) ) {
567 DBG2( "%s: port %d is closed\n", __FUNCTION__, port->ibdev->port );
568 return;
569 }
570
571 /* we don't poll EQ. Just poll link status if it's not active */
572 if ( ! netdev_link_ok ( netdev ) ) {
573 status = nodnic_port_get_state ( &port->port_priv, &state );
574 MLX_FATAL_CHECK_STATUS(status, state_err, "nodnic_port_get_state failed");
575
576 if ( state == nodnic_port_state_active ) {
577 DBG( "%s: port %d physical link is up\n", __FUNCTION__,
578 port->ibdev->port );
579 port->type->state_change ( flexboot_nodnic, port, 1 );
580 }
581 }
582 state_err:
583 return;
584 }
585
586 /***************************************************************************
587 *
588 * Multicast group operations
589 *
590 ***************************************************************************
591 */
592 static int flexboot_nodnic_mcast_attach ( struct ib_device *ibdev,
593 struct ib_queue_pair *qp,
594 union ib_gid *gid) {
595 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
596 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
597 mlx_mac_address mac;
598 mlx_status status = MLX_SUCCESS;
599
600 switch (qp->type) {
601 case IB_QPT_ETH:
602 memcpy(&mac, gid, sizeof(mac));
603 status = nodnic_port_add_mac_filter(&port->port_priv, mac);
604 MLX_CHECK_STATUS(flexboot_nodnic->device_priv, status, mac_err,
605 "nodnic_port_add_mac_filter failed");
606 break;
607 default:
608 break;
609 }
610 mac_err:
611 return status;
612 }
613 static void flexboot_nodnic_mcast_detach ( struct ib_device *ibdev,
614 struct ib_queue_pair *qp,
615 union ib_gid *gid ) {
616 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
617 struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
618 mlx_mac_address mac;
619 mlx_status status = MLX_SUCCESS;
620
621 switch (qp->type) {
622 case IB_QPT_ETH:
623 memcpy(&mac, gid, sizeof(mac));
624 status = nodnic_port_remove_mac_filter(&port->port_priv, mac);
625 MLX_CHECK_STATUS(flexboot_nodnic->device_priv, status, mac_err,
626 "nodnic_port_remove_mac_filter failed");
627 break;
628 default:
629 break;
630 }
631 mac_err:
632 return;
633 }
634 /***************************************************************************
635 *
636 * Infiniband link-layer operations
637 *
638 ***************************************************************************
639 */
640
641 /**
642 * Initialise Infiniband link
643 *
644 * @v ibdev Infiniband device
645 * @ret rc Return status code
646 */
647 static int flexboot_nodnic_ib_open ( struct ib_device *ibdev __unused) {
648 int rc = 0;
649
650 /*TODO: add implementation*/
651 return rc;
652 }
653
654 /**
655 * Close Infiniband link
656 *
657 * @v ibdev Infiniband device
658 */
659 static void flexboot_nodnic_ib_close ( struct ib_device *ibdev __unused) {
660 /*TODO: add implementation*/
661 }
662
663 /**
664 * Inform embedded subnet management agent of a received MAD
665 *
666 * @v ibdev Infiniband device
667 * @v mad MAD
668 * @ret rc Return status code
669 */
670 static int flexboot_nodnic_inform_sma ( struct ib_device *ibdev __unused,
671 union ib_mad *mad __unused) {
672 /*TODO: add implementation*/
673 return 0;
674 }
675
676 /** flexboot_nodnic Infiniband operations */
677 static struct ib_device_operations flexboot_nodnic_ib_operations = {
678 .create_cq = flexboot_nodnic_create_cq,
679 .destroy_cq = flexboot_nodnic_destroy_cq,
680 .create_qp = flexboot_nodnic_create_qp,
681 .modify_qp = flexboot_nodnic_modify_qp,
682 .destroy_qp = flexboot_nodnic_destroy_qp,
683 .post_send = flexboot_nodnic_post_send,
684 .post_recv = flexboot_nodnic_post_recv,
685 .poll_cq = flexboot_nodnic_poll_cq,
686 .poll_eq = flexboot_nodnic_poll_eq,
687 .open = flexboot_nodnic_ib_open,
688 .close = flexboot_nodnic_ib_close,
689 .mcast_attach = flexboot_nodnic_mcast_attach,
690 .mcast_detach = flexboot_nodnic_mcast_detach,
691 .set_port_info = flexboot_nodnic_inform_sma,
692 .set_pkey_table = flexboot_nodnic_inform_sma,
693 };
694 /***************************************************************************
695 *
696 *
697 *
698 ***************************************************************************
699 */
700
701 #define FLEX_NODNIC_TX_POLL_TOUT 500000
702 #define FLEX_NODNIC_TX_POLL_USLEEP 10
703
704 static void flexboot_nodnic_complete_all_tx ( struct flexboot_nodnic_port *port ) {
705 struct ib_device *ibdev = port->ibdev;
706 struct ib_completion_queue *cq;
707 struct ib_work_queue *wq;
708 int keep_polling = 0;
709 int timeout = FLEX_NODNIC_TX_POLL_TOUT;
710
711 list_for_each_entry ( cq, &ibdev->cqs, list ) {
712 do {
713 ib_poll_cq ( ibdev, cq );
714 keep_polling = 0;
715 list_for_each_entry ( wq, &cq->work_queues, list ) {
716 if ( wq->is_send )
717 keep_polling += ( wq->fill > 0 );
718 }
719 udelay ( FLEX_NODNIC_TX_POLL_USLEEP );
720 } while ( keep_polling && ( timeout-- > 0 ) );
721 }
722 }
723
724 static void flexboot_nodnic_port_disable_dma ( struct flexboot_nodnic_port *port ) {
725 nodnic_port_priv *port_priv = & ( port->port_priv );
726 mlx_status status;
727
728 if ( ! ( port_priv->port_state & NODNIC_PORT_OPENED ) )
729 return;
730
731 port_priv->port_state |= NODNIC_PORT_DISABLING_DMA;
732 flexboot_nodnic_complete_all_tx ( port );
733 if ( ( status = nodnic_port_disable_dma ( port_priv ) ) ) {
734 MLX_DEBUG_WARN ( port, "Failed to disable DMA %d\n", status );
735 }
736
737 port_priv->port_state &= ~NODNIC_PORT_DISABLING_DMA;
738 }
739
740 /***************************************************************************
741 *
742 * Ethernet operation
743 *
744 ***************************************************************************
745 */
746
747 /** Number of flexboot_nodnic Ethernet send work queue entries */
748 #define FLEXBOOT_NODNIC_ETH_NUM_SEND_WQES 64
749
750 /** Number of flexboot_nodnic Ethernet receive work queue entries */
751 #define FLEXBOOT_NODNIC_ETH_NUM_RECV_WQES 64
752 /** flexboot nodnic Ethernet queue pair operations */
753 static struct ib_queue_pair_operations flexboot_nodnic_eth_qp_op = {
754 .alloc_iob = alloc_iob,
755 };
756
757 /**
758 * Transmit packet via flexboot_nodnic Ethernet device
759 *
760 * @v netdev Network device
761 * @v iobuf I/O buffer
762 * @ret rc Return status code
763 */
764 static int flexboot_nodnic_eth_transmit ( struct net_device *netdev,
765 struct io_buffer *iobuf) {
766 struct flexboot_nodnic_port *port = netdev->priv;
767 struct ib_device *ibdev = port->ibdev;
768 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
769 int rc;
770
771 rc = ib_post_send ( ibdev, port->eth_qp, NULL, iobuf);
772 /* Transmit packet */
773 if ( rc != 0) {
774 DBGC ( flexboot_nodnic, "NODNIC %p port %d could not transmit: %s\n",
775 flexboot_nodnic, ibdev->port, strerror ( rc ) );
776 return rc;
777 }
778
779 return 0;
780 }
781
782 /**
783 * Handle flexboot_nodnic Ethernet device send completion
784 *
785 * @v ibdev Infiniband device
786 * @v qp Queue pair
787 * @v iobuf I/O buffer
788 * @v rc Completion status code
789 */
790 static void flexboot_nodnic_eth_complete_send ( struct ib_device *ibdev __unused,
791 struct ib_queue_pair *qp,
792 struct io_buffer *iobuf,
793 int rc) {
794 struct net_device *netdev = ib_qp_get_ownerdata ( qp );
795
796 netdev_tx_complete_err ( netdev, iobuf, rc );
797 }
798
799 /**
800 * Handle flexboot_nodnic Ethernet device receive completion
801 *
802 * @v ibdev Infiniband device
803 * @v qp Queue pair
804 * @v av Address vector, or NULL
805 * @v iobuf I/O buffer
806 * @v rc Completion status code
807 */
808 static void flexboot_nodnic_eth_complete_recv ( struct ib_device *ibdev __unused,
809 struct ib_queue_pair *qp,
810 struct ib_address_vector *dest __unused,
811 struct ib_address_vector *source,
812 struct io_buffer *iobuf,
813 int rc) {
814 struct net_device *netdev = ib_qp_get_ownerdata ( qp );
815
816 if ( rc != 0 ) {
817 DBG ( "Received packet with error\n" );
818 netdev_rx_err ( netdev, iobuf, rc );
819 return;
820 }
821
822 if ( source == NULL ) {
823 DBG ( "Received packet without address vector\n" );
824 netdev_rx_err ( netdev, iobuf, -ENOTTY );
825 return;
826 }
827
828 netdev_rx ( netdev, iobuf );
829 }
830
831 /** flexboot_nodnic Ethernet device completion operations */
832 static struct ib_completion_queue_operations flexboot_nodnic_eth_cq_op = {
833 .complete_send = flexboot_nodnic_eth_complete_send,
834 .complete_recv = flexboot_nodnic_eth_complete_recv,
835 };
836
837 /**
838 * Poll flexboot_nodnic Ethernet device
839 *
840 * @v netdev Network device
841 */
842 static void flexboot_nodnic_eth_poll ( struct net_device *netdev) {
843 struct flexboot_nodnic_port *port = netdev->priv;
844 struct ib_device *ibdev = port->ibdev;
845
846 ib_poll_eq ( ibdev );
847 }
848
849 /**
850 * Open flexboot_nodnic Ethernet device
851 *
852 * @v netdev Network device
853 * @ret rc Return status code
854 */
855 static int flexboot_nodnic_eth_open ( struct net_device *netdev ) {
856 struct flexboot_nodnic_port *port = netdev->priv;
857 struct ib_device *ibdev = port->ibdev;
858 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
859 mlx_status status = MLX_SUCCESS;
860 struct ib_completion_queue *dummy_cq = NULL;
861 struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp = NULL;
862 mlx_uint64 cq_size = 0;
863 mlx_uint32 qpn = 0;
864 nodnic_port_state state = nodnic_port_state_down;
865 int rc;
866
867 if ( port->port_priv.port_state & NODNIC_PORT_OPENED ) {
868 DBGC ( flexboot_nodnic, "%s: port %d is already opened\n",
869 __FUNCTION__, port->ibdev->port );
870 return 0;
871 }
872
873 port->port_priv.port_state |= NODNIC_PORT_OPENED;
874
875 dummy_cq = zalloc ( sizeof ( struct ib_completion_queue ) );
876 if ( dummy_cq == NULL ) {
877 DBGC ( flexboot_nodnic, "%s: Failed to allocate dummy CQ\n", __FUNCTION__ );
878 status = MLX_OUT_OF_RESOURCES;
879 goto err_create_dummy_cq;
880 }
881 INIT_LIST_HEAD ( &dummy_cq->work_queues );
882
883 if ( ( rc = ib_create_qp ( ibdev, IB_QPT_ETH,
884 FLEXBOOT_NODNIC_ETH_NUM_SEND_WQES, dummy_cq,
885 FLEXBOOT_NODNIC_ETH_NUM_RECV_WQES, dummy_cq,
886 &flexboot_nodnic_eth_qp_op, netdev->name,
887 &port->eth_qp ) ) != 0 ) {
888 DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not create queue pair\n",
889 flexboot_nodnic, ibdev->port );
890 status = MLX_OUT_OF_RESOURCES;
891 goto err_create_qp;
892 }
893
894 ib_qp_set_ownerdata ( port->eth_qp, netdev );
895
896 status = nodnic_port_get_cq_size(&port->port_priv, &cq_size);
897 MLX_FATAL_CHECK_STATUS(status, get_cq_size_err,
898 "nodnic_port_get_cq_size failed");
899
900 if ( ( rc = ib_create_cq ( ibdev, cq_size, &flexboot_nodnic_eth_cq_op,
901 &port->eth_cq ) ) != 0 ) {
902 DBGC ( flexboot_nodnic,
903 "flexboot_nodnic %p port %d could not create completion queue\n",
904 flexboot_nodnic, ibdev->port );
905 status = MLX_OUT_OF_RESOURCES;
906 goto err_create_cq;
907 }
908 port->eth_qp->send.cq = port->eth_cq;
909 list_del(&port->eth_qp->send.list);
910 list_add ( &port->eth_qp->send.list, &port->eth_cq->work_queues );
911 port->eth_qp->recv.cq = port->eth_cq;
912 port->cmdsn = 0;
913 list_del(&port->eth_qp->recv.list);
914 list_add ( &port->eth_qp->recv.list, &port->eth_cq->work_queues );
915
916 status = nodnic_port_allocate_eq(&port->port_priv,
917 flexboot_nodnic->device_priv.device_cap.log_working_buffer_size);
918 MLX_FATAL_CHECK_STATUS(status, eq_alloc_err,
919 "nodnic_port_allocate_eq failed");
920
921 status = nodnic_port_init(&port->port_priv);
922 MLX_FATAL_CHECK_STATUS(status, init_err,
923 "nodnic_port_init failed");
924
925 /* update qp - qpn */
926 flexboot_nodnic_qp = ib_qp_get_drvdata ( port->eth_qp );
927 status = nodnic_port_get_qpn(&port->port_priv,
928 &flexboot_nodnic_qp->nodnic_queue_pair->send.nodnic_ring,
929 &qpn);
930 MLX_FATAL_CHECK_STATUS(status, qpn_err,
931 "nodnic_port_get_qpn failed");
932 port->eth_qp->qpn = qpn;
933
934 /* Fill receive rings */
935 ib_refill_recv ( ibdev, port->eth_qp );
936
937 status = nodnic_port_enable_dma(&port->port_priv);
938 MLX_FATAL_CHECK_STATUS(status, dma_err,
939 "nodnic_port_enable_dma failed");
940
941 if (flexboot_nodnic->device_priv.device_cap.support_promisc_filter) {
942 status = nodnic_port_set_promisc(&port->port_priv, TRUE);
943 MLX_FATAL_CHECK_STATUS(status, promisc_err,
944 "nodnic_port_set_promisc failed");
945 }
946
947 status = nodnic_port_get_state(&port->port_priv, &state);
948 MLX_FATAL_CHECK_STATUS(status, state_err,
949 "nodnic_port_get_state failed");
950
951 port->type->state_change (
952 flexboot_nodnic, port, state == nodnic_port_state_active );
953
954 DBGC ( flexboot_nodnic, "%s: port %d opened (link is %s)\n",
955 __FUNCTION__, port->ibdev->port,
956 ( ( state == nodnic_port_state_active ) ? "Up" : "Down" ) );
957
958 free(dummy_cq);
959 return 0;
960 state_err:
961 promisc_err:
962 dma_err:
963 qpn_err:
964 nodnic_port_close(&port->port_priv);
965 init_err:
966 nodnic_port_free_eq(&port->port_priv);
967 eq_alloc_err:
968 err_create_cq:
969 get_cq_size_err:
970 ib_destroy_qp(ibdev, port->eth_qp );
971 err_create_qp:
972 free(dummy_cq);
973 err_create_dummy_cq:
974 port->port_priv.port_state &= ~NODNIC_PORT_OPENED;
975 return status;
976 }
977
978 /**
979 * Close flexboot_nodnic Ethernet device
980 *
981 * @v netdev Network device
982 */
983 static void flexboot_nodnic_eth_close ( struct net_device *netdev) {
984 struct flexboot_nodnic_port *port = netdev->priv;
985 struct ib_device *ibdev = port->ibdev;
986 struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
987 mlx_status status = MLX_SUCCESS;
988
989 if ( ! ( port->port_priv.port_state & NODNIC_PORT_OPENED ) ) {
990 DBGC ( flexboot_nodnic, "%s: port %d is already closed\n",
991 __FUNCTION__, port->ibdev->port );
992 return;
993 }
994
995 if (flexboot_nodnic->device_priv.device_cap.support_promisc_filter) {
996 if ( ( status = nodnic_port_set_promisc( &port->port_priv, FALSE ) ) ) {
997 DBGC ( flexboot_nodnic,
998 "nodnic_port_set_promisc failed (status = %d)\n", status );
999 }
1000 }
1001
1002 flexboot_nodnic_port_disable_dma ( port );
1003
1004 port->port_priv.port_state &= ~NODNIC_PORT_OPENED;
1005
1006 port->type->state_change ( flexboot_nodnic, port, FALSE );
1007
1008 /* Close port */
1009 status = nodnic_port_close(&port->port_priv);
1010 if ( status != MLX_SUCCESS ) {
1011 DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not close port: %s\n",
1012 flexboot_nodnic, ibdev->port, strerror ( status ) );
1013 /* Nothing we can do about this */
1014 }
1015
1016 ib_destroy_qp ( ibdev, port->eth_qp );
1017 port->eth_qp = NULL;
1018 ib_destroy_cq ( ibdev, port->eth_cq );
1019 port->eth_cq = NULL;
1020
1021 nodnic_port_free_eq(&port->port_priv);
1022
1023 DBGC ( flexboot_nodnic, "%s: port %d closed\n", __FUNCTION__, port->ibdev->port );
1024 }
1025
1026 void flexboot_nodnic_eth_irq ( struct net_device *netdev, int enable ) {
1027 struct flexboot_nodnic_port *port = netdev->priv;
1028
1029 if ( enable ) {
1030 if ( ( port->port_priv.port_state & NODNIC_PORT_OPENED ) &&
1031 ! ( port->port_priv.port_state & NODNIC_PORT_DISABLING_DMA ) ) {
1032 flexboot_nodnic_arm_cq ( port );
1033 } else {
1034 /* do nothing */
1035 }
1036 } else {
1037 nodnic_device_clear_int( port->port_priv.device );
1038 }
1039 }
1040
1041 /** flexboot_nodnic Ethernet network device operations */
1042 static struct net_device_operations flexboot_nodnic_eth_operations = {
1043 .open = flexboot_nodnic_eth_open,
1044 .close = flexboot_nodnic_eth_close,
1045 .transmit = flexboot_nodnic_eth_transmit,
1046 .poll = flexboot_nodnic_eth_poll,
1047 };
1048
1049 /**
1050 * Register flexboot_nodnic Ethernet device
1051 */
1052 static int flexboot_nodnic_register_netdev ( struct flexboot_nodnic *flexboot_nodnic,
1053 struct flexboot_nodnic_port *port) {
1054 mlx_status status = MLX_SUCCESS;
1055 struct net_device *netdev;
1056 struct ib_device *ibdev = port->ibdev;
1057 union {
1058 uint8_t bytes[8];
1059 uint32_t dwords[2];
1060 } mac;
1061
1062 /* Allocate network devices */
1063 netdev = alloc_etherdev ( 0 );
1064 if ( netdev == NULL ) {
1065 DBGC ( flexboot_nodnic, "flexboot_nodnic %p port %d could not allocate net device\n",
1066 flexboot_nodnic, ibdev->port );
1067 status = MLX_OUT_OF_RESOURCES;
1068 goto alloc_err;
1069 }
1070 port->netdev = netdev;
1071 netdev_init ( netdev, &flexboot_nodnic_eth_operations );
1072 netdev->dev = ibdev->dev;
1073 netdev->priv = port;
1074
1075 status = nodnic_port_query(&port->port_priv,
1076 nodnic_port_option_mac_high,
1077 &mac.dwords[0]);
1078 MLX_FATAL_CHECK_STATUS(status, mac_err,
1079 "failed to query mac high");
1080 status = nodnic_port_query(&port->port_priv,
1081 nodnic_port_option_mac_low,
1082 &mac.dwords[1]);
1083 MLX_FATAL_CHECK_STATUS(status, mac_err,
1084 "failed to query mac low");
1085 mac.dwords[0] = htonl(mac.dwords[0]);
1086 mac.dwords[1] = htonl(mac.dwords[1]);
1087 memcpy ( netdev->hw_addr,
1088 &mac.bytes[2], ETH_ALEN);
1089 /* Register network device */
1090 status = register_netdev ( netdev );
1091 if ( status != MLX_SUCCESS ) {
1092 DBGC ( flexboot_nodnic,
1093 "flexboot_nodnic %p port %d could not register network device: %s\n",
1094 flexboot_nodnic, ibdev->port, strerror ( status ) );
1095 goto reg_err;
1096 }
1097 return status;
1098 reg_err:
1099 mac_err:
1100 netdev_put ( netdev );
1101 alloc_err:
1102 return status;
1103 }
1104
1105 /**
1106 * Handle flexboot_nodnic Ethernet device port state change
1107 */
1108 static void flexboot_nodnic_state_change_netdev ( struct flexboot_nodnic *flexboot_nodnic __unused,
1109 struct flexboot_nodnic_port *port,
1110 int link_up ) {
1111 struct net_device *netdev = port->netdev;
1112
1113 if ( link_up )
1114 netdev_link_up ( netdev );
1115 else
1116 netdev_link_down ( netdev );
1117
1118 }
1119
1120 /**
1121 * Unregister flexboot_nodnic Ethernet device
1122 */
1123 static void flexboot_nodnic_unregister_netdev ( struct flexboot_nodnic *flexboot_nodnic __unused,
1124 struct flexboot_nodnic_port *port ) {
1125 struct net_device *netdev = port->netdev;
1126 unregister_netdev ( netdev );
1127 netdev_nullify ( netdev );
1128 netdev_put ( netdev );
1129 }
1130
1131 /** flexboot_nodnic Ethernet port type */
1132 static struct flexboot_nodnic_port_type flexboot_nodnic_port_type_eth = {
1133 .register_dev = flexboot_nodnic_register_netdev,
1134 .state_change = flexboot_nodnic_state_change_netdev,
1135 .unregister_dev = flexboot_nodnic_unregister_netdev,
1136 };
1137
1138 /***************************************************************************
1139 *
1140 * PCI interface helper functions
1141 *
1142 ***************************************************************************
1143 */
1144 static
1145 mlx_status
1146 flexboot_nodnic_allocate_infiniband_devices( struct flexboot_nodnic *flexboot_nodnic_priv ) {
1147 mlx_status status = MLX_SUCCESS;
1148 nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
1149 struct pci_device *pci = flexboot_nodnic_priv->pci;
1150 struct ib_device *ibdev = NULL;
1151 unsigned int i = 0;
1152
1153 /* Allocate Infiniband devices */
1154 for (; i < device_priv->device_cap.num_ports; i++) {
1155 if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
1156 continue;
1157 ibdev = alloc_ibdev(0);
1158 if (ibdev == NULL) {
1159 status = MLX_OUT_OF_RESOURCES;
1160 goto err_alloc_ibdev;
1161 }
1162 flexboot_nodnic_priv->port[i].ibdev = ibdev;
1163 ibdev->op = &flexboot_nodnic_ib_operations;
1164 ibdev->dev = &pci->dev;
1165 ibdev->port = ( FLEXBOOT_NODNIC_PORT_BASE + i);
1166 ib_set_drvdata(ibdev, flexboot_nodnic_priv);
1167 }
1168 return status;
1169 err_alloc_ibdev:
1170 for ( i-- ; ( signed int ) i >= 0 ; i-- )
1171 ibdev_put ( flexboot_nodnic_priv->port[i].ibdev );
1172 return status;
1173 }
1174
1175 static
1176 mlx_status
1177 flexboot_nodnic_thin_init_ports( struct flexboot_nodnic *flexboot_nodnic_priv ) {
1178 mlx_status status = MLX_SUCCESS;
1179 nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
1180 nodnic_port_priv *port_priv = NULL;
1181 unsigned int i = 0;
1182
1183 for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) {
1184 if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
1185 continue;
1186 port_priv = &flexboot_nodnic_priv->port[i].port_priv;
1187 status = nodnic_port_thin_init( device_priv, port_priv, i );
1188 MLX_FATAL_CHECK_STATUS(status, thin_init_err,
1189 "flexboot_nodnic_thin_init_ports failed");
1190 }
1191 thin_init_err:
1192 return status;
1193 }
1194
1195
1196 static
1197 mlx_status
1198 flexboot_nodnic_set_ports_type ( struct flexboot_nodnic *flexboot_nodnic_priv ) {
1199 mlx_status status = MLX_SUCCESS;
1200 nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
1201 nodnic_port_priv *port_priv = NULL;
1202 nodnic_port_type type = NODNIC_PORT_TYPE_UNKNOWN;
1203 unsigned int i = 0;
1204
1205 for ( i = 0 ; i < device_priv->device_cap.num_ports ; i++ ) {
1206 if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
1207 continue;
1208 port_priv = &flexboot_nodnic_priv->port[i].port_priv;
1209 status = nodnic_port_get_type(port_priv, &type);
1210 MLX_FATAL_CHECK_STATUS(status, type_err,
1211 "nodnic_port_get_type failed");
1212 switch ( type ) {
1213 case NODNIC_PORT_TYPE_ETH:
1214 DBGC ( flexboot_nodnic_priv, "Port %d type is Ethernet\n", i );
1215 flexboot_nodnic_priv->port[i].type = &flexboot_nodnic_port_type_eth;
1216 break;
1217 case NODNIC_PORT_TYPE_IB:
1218 DBGC ( flexboot_nodnic_priv, "Port %d type is Infiniband\n", i );
1219 status = MLX_UNSUPPORTED;
1220 goto type_err;
1221 default:
1222 DBGC ( flexboot_nodnic_priv, "Port %d type is unknown\n", i );
1223 status = MLX_UNSUPPORTED;
1224 goto type_err;
1225 }
1226 }
1227 type_err:
1228 return status;
1229 }
1230
1231 static
1232 mlx_status
1233 flexboot_nodnic_ports_register_dev( struct flexboot_nodnic *flexboot_nodnic_priv ) {
1234 mlx_status status = MLX_SUCCESS;
1235 nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
1236 struct flexboot_nodnic_port *port = NULL;
1237 unsigned int i = 0;
1238
1239 for (; i < device_priv->device_cap.num_ports; i++) {
1240 if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
1241 continue;
1242 port = &flexboot_nodnic_priv->port[i];
1243 status = port->type->register_dev ( flexboot_nodnic_priv, port );
1244 MLX_FATAL_CHECK_STATUS(status, reg_err,
1245 "port register_dev failed");
1246 }
1247 reg_err:
1248 return status;
1249 }
1250
1251 static
1252 mlx_status
1253 flexboot_nodnic_ports_unregister_dev ( struct flexboot_nodnic *flexboot_nodnic_priv ) {
1254 struct flexboot_nodnic_port *port;
1255 nodnic_device_priv *device_priv = &flexboot_nodnic_priv->device_priv;
1256 int i = (device_priv->device_cap.num_ports - 1);
1257
1258 for (; i >= 0; i--) {
1259 if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
1260 continue;
1261 port = &flexboot_nodnic_priv->port[i];
1262 port->type->unregister_dev(flexboot_nodnic_priv, port);
1263 ibdev_put(flexboot_nodnic_priv->port[i].ibdev);
1264 }
1265 return MLX_SUCCESS;
1266 }
1267
1268 /***************************************************************************
1269 *
1270 * flexboot nodnic interface
1271 *
1272 ***************************************************************************
1273 */
1274 __unused static void flexboot_nodnic_enable_dma ( struct flexboot_nodnic *nodnic ) {
1275 nodnic_port_priv *port_priv;
1276 mlx_status status;
1277 int i;
1278
1279 for ( i = 0; i < nodnic->device_priv.device_cap.num_ports; i++ ) {
1280 if ( ! ( nodnic->port_mask & ( i + 1 ) ) )
1281 continue;
1282 port_priv = & ( nodnic->port[i].port_priv );
1283 if ( ! ( port_priv->port_state & NODNIC_PORT_OPENED ) )
1284 continue;
1285
1286 if ( ( status = nodnic_port_enable_dma ( port_priv ) ) ) {
1287 MLX_DEBUG_WARN ( nodnic, "Failed to enable DMA %d\n", status );
1288 }
1289 }
1290 }
1291
1292 __unused static void flexboot_nodnic_disable_dma ( struct flexboot_nodnic *nodnic ) {
1293 int i;
1294
1295 for ( i = 0; i < nodnic->device_priv.device_cap.num_ports; i++ ) {
1296 if ( ! ( nodnic->port_mask & ( i + 1 ) ) )
1297 continue;
1298 flexboot_nodnic_port_disable_dma ( & ( nodnic->port[i] ) );
1299 }
1300 }
1301
1302 int flexboot_nodnic_is_supported ( struct pci_device *pci ) {
1303 mlx_utils utils;
1304 mlx_pci_gw_buffer buffer;
1305 mlx_status status;
1306 int is_supported = 0;
1307
1308 DBG ( "%s: start\n", __FUNCTION__ );
1309
1310 memset ( &utils, 0, sizeof ( utils ) );
1311
1312 status = mlx_utils_init ( &utils, pci );
1313 MLX_CHECK_STATUS ( pci, status, utils_init_err, "mlx_utils_init failed" );
1314
1315 status = mlx_pci_gw_init ( &utils );
1316 MLX_CHECK_STATUS ( pci, status, pci_gw_init_err, "mlx_pci_gw_init failed" );
1317
1318 status = mlx_pci_gw_read ( &utils, PCI_GW_SPACE_NODNIC,
1319 NODNIC_NIC_INTERFACE_SUPPORTED_OFFSET, &buffer );
1320
1321 if ( status == MLX_SUCCESS ) {
1322 buffer >>= NODNIC_NIC_INTERFACE_SUPPORTED_BIT;
1323 is_supported = ( buffer & 0x1 );
1324 }
1325
1326 mlx_pci_gw_teardown( &utils );
1327
1328 pci_gw_init_err:
1329 mlx_utils_teardown(&utils);
1330 utils_init_err:
1331 DBG ( "%s: NODNIC is %s supported (status = %d)\n",
1332 __FUNCTION__, ( is_supported ? "": "not" ), status );
1333 return is_supported;
1334 }
1335
1336
1337 void flexboot_nodnic_copy_mac ( uint8_t mac_addr[], uint32_t low_byte,
1338 uint16_t high_byte ) {
1339 union mac_addr {
1340 struct {
1341 uint32_t low_byte;
1342 uint16_t high_byte;
1343 };
1344 uint8_t mac_addr[ETH_ALEN];
1345 } mac_addr_aux;
1346
1347 mac_addr_aux.high_byte = high_byte;
1348 mac_addr_aux.low_byte = low_byte;
1349
1350 mac_addr[0] = mac_addr_aux.mac_addr[5];
1351 mac_addr[1] = mac_addr_aux.mac_addr[4];
1352 mac_addr[2] = mac_addr_aux.mac_addr[3];
1353 mac_addr[3] = mac_addr_aux.mac_addr[2];
1354 mac_addr[4] = mac_addr_aux.mac_addr[1];
1355 mac_addr[5] = mac_addr_aux.mac_addr[0];
1356 }
1357
1358 static mlx_status flexboot_nodnic_get_factory_mac (
1359 struct flexboot_nodnic *flexboot_nodnic_priv, uint8_t port __unused ) {
1360 struct mlx_vmac_query_virt_mac virt_mac;
1361 mlx_status status;
1362
1363 memset ( & virt_mac, 0, sizeof ( virt_mac ) );
1364 status = mlx_vmac_query_virt_mac ( flexboot_nodnic_priv->device_priv.utils,
1365 &virt_mac );
1366 if ( ! status ) {
1367 DBGC ( flexboot_nodnic_priv, "NODNIC %p Failed to set the virtual MAC\n"
1368 ,flexboot_nodnic_priv );
1369 }
1370
1371 return status;
1372 }
1373
1374
1375 /**
1376 * Set port masking
1377 *
1378 * @v flexboot_nodnic nodnic device
1379 * @ret rc Return status code
1380 */
1381 static int flexboot_nodnic_set_port_masking ( struct flexboot_nodnic *flexboot_nodnic ) {
1382 unsigned int i;
1383 nodnic_device_priv *device_priv = &flexboot_nodnic->device_priv;
1384
1385 flexboot_nodnic->port_mask = 0;
1386 for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) {
1387 flexboot_nodnic->port_mask |= (i + 1);
1388 }
1389
1390 if ( ! flexboot_nodnic->port_mask ) {
1391 /* No port was enabled */
1392 DBGC ( flexboot_nodnic, "NODNIC %p No port was enabled for "
1393 "booting\n", flexboot_nodnic );
1394 return -ENETUNREACH;
1395 }
1396
1397 return 0;
1398 }
1399
1400 int init_mlx_utils ( mlx_utils **utils, struct pci_device *pci ) {
1401 int rc = 0;
1402
1403 *utils = ( mlx_utils * ) zalloc ( sizeof ( mlx_utils ) );
1404 if ( *utils == NULL ) {
1405 DBGC ( utils, "%s: Failed to allocate utils\n", __FUNCTION__ );
1406 rc = -1;
1407 goto err_utils_alloc;
1408 }
1409 if ( mlx_utils_init ( *utils, pci ) ) {
1410 DBGC ( utils, "%s: mlx_utils_init failed\n", __FUNCTION__ );
1411 rc = -1;
1412 goto err_utils_init;
1413 }
1414 if ( mlx_pci_gw_init ( *utils ) ){
1415 DBGC ( utils, "%s: mlx_pci_gw_init failed\n", __FUNCTION__ );
1416 rc = -1;
1417 goto err_cmd_init;
1418 }
1419
1420 return 0;
1421
1422 mlx_pci_gw_teardown ( *utils );
1423 err_cmd_init:
1424 mlx_utils_teardown ( *utils );
1425 err_utils_init:
1426 free ( *utils );
1427 err_utils_alloc:
1428 *utils = NULL;
1429
1430 return rc;
1431 }
1432
1433 void free_mlx_utils ( mlx_utils **utils ) {
1434
1435 mlx_pci_gw_teardown ( *utils );
1436 mlx_utils_teardown ( *utils );
1437 free ( *utils );
1438 *utils = NULL;
1439 }
1440
1441 /**
1442 * Initialise Nodnic PCI parameters
1443 *
1444 * @v hermon Nodnic device
1445 */
1446 static int flexboot_nodnic_alloc_uar ( struct flexboot_nodnic *flexboot_nodnic ) {
1447 mlx_status status = MLX_SUCCESS;
1448 struct pci_device *pci = flexboot_nodnic->pci;
1449 nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
1450
1451 if ( ! flexboot_nodnic->device_priv.device_cap.support_uar_tx_db ) {
1452 DBGC ( flexboot_nodnic, "%s: tx db using uar is not supported \n", __FUNCTION__ );
1453 return -ENOTSUP;
1454 }
1455 /* read uar offset then allocate */
1456 if ( ( status = nodnic_port_set_send_uar_offset ( &flexboot_nodnic->port[0].port_priv ) ) ) {
1457 DBGC ( flexboot_nodnic, "%s: nodnic_port_set_send_uar_offset failed,"
1458 "status = %d\n", __FUNCTION__, status );
1459 return -EINVAL;
1460 }
1461 uar->phys = ( pci_bar_start ( pci, FLEXBOOT_NODNIC_HCA_BAR ) + (mlx_uint32)uar->offset );
1462 uar->virt = ( void * )( ioremap ( uar->phys, FLEXBOOT_NODNIC_PAGE_SIZE ) );
1463
1464 return status;
1465 }
1466
1467 static int flexboot_nodnic_dealloc_uar ( struct flexboot_nodnic *flexboot_nodnic ) {
1468 nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
1469
1470 if ( uar->virt ) {
1471 iounmap( uar->virt );
1472 uar->virt = NULL;
1473 }
1474
1475 return MLX_SUCCESS;
1476 }
1477
1478
1479 int flexboot_nodnic_probe ( struct pci_device *pci,
1480 struct flexboot_nodnic_callbacks *callbacks,
1481 void *drv_priv __unused ) {
1482 mlx_status status = MLX_SUCCESS;
1483 struct flexboot_nodnic *flexboot_nodnic_priv = NULL;
1484 nodnic_device_priv *device_priv = NULL;
1485 int i = 0;
1486
1487 if ( ( pci == NULL ) || ( callbacks == NULL ) ) {
1488 DBGC ( flexboot_nodnic_priv, "%s: Bad Parameter\n", __FUNCTION__ );
1489 return -EINVAL;
1490 }
1491
1492 flexboot_nodnic_priv = zalloc( sizeof ( *flexboot_nodnic_priv ) );
1493 if ( flexboot_nodnic_priv == NULL ) {
1494 DBGC ( flexboot_nodnic_priv, "%s: Failed to allocate priv data\n", __FUNCTION__ );
1495 status = MLX_OUT_OF_RESOURCES;
1496 goto device_err_alloc;
1497 }
1498
1499 /* Register settings
1500 * Note that pci->priv will be the device private data */
1501 flexboot_nodnic_priv->pci = pci;
1502 flexboot_nodnic_priv->callbacks = callbacks;
1503 pci_set_drvdata ( pci, flexboot_nodnic_priv );
1504
1505 device_priv = &flexboot_nodnic_priv->device_priv;
1506 /* init mlx utils */
1507 status = init_mlx_utils ( & device_priv->utils, pci );
1508 MLX_FATAL_CHECK_STATUS(status, err_utils_init,
1509 "init_mlx_utils failed");
1510
1511 /* init device */
1512 status = nodnic_device_init( device_priv );
1513 MLX_FATAL_CHECK_STATUS(status, device_init_err,
1514 "nodnic_device_init failed");
1515
1516 status = nodnic_device_get_cap( device_priv );
1517 MLX_FATAL_CHECK_STATUS(status, get_cap_err,
1518 "nodnic_device_get_cap failed");
1519
1520 if ( mlx_set_admin_mtu ( device_priv->utils, 1, EN_DEFAULT_ADMIN_MTU ) ) {
1521 MLX_DEBUG_ERROR( device_priv->utils, "Failed to set admin mtu\n" );
1522 }
1523
1524 status = flexboot_nodnic_set_port_masking ( flexboot_nodnic_priv );
1525 MLX_FATAL_CHECK_STATUS(status, err_set_masking,
1526 "flexboot_nodnic_set_port_masking failed");
1527
1528 status = flexboot_nodnic_allocate_infiniband_devices( flexboot_nodnic_priv );
1529 MLX_FATAL_CHECK_STATUS(status, err_alloc_ibdev,
1530 "flexboot_nodnic_allocate_infiniband_devices failed");
1531
1532 /* port init */
1533 status = flexboot_nodnic_thin_init_ports( flexboot_nodnic_priv );
1534 MLX_FATAL_CHECK_STATUS(status, err_thin_init_ports,
1535 "flexboot_nodnic_thin_init_ports failed");
1536
1537 if ( ( status = flexboot_nodnic_alloc_uar ( flexboot_nodnic_priv ) ) ) {
1538 DBGC(flexboot_nodnic_priv, "%s: flexboot_nodnic_alloc_uar failed"
1539 " ( status = %d )\n",__FUNCTION__, status );
1540 }
1541
1542 /* device reg */
1543 status = flexboot_nodnic_set_ports_type( flexboot_nodnic_priv );
1544 MLX_CHECK_STATUS( flexboot_nodnic_priv, status, err_set_ports_types,
1545 "flexboot_nodnic_set_ports_type failed");
1546
1547 status = flexboot_nodnic_ports_register_dev( flexboot_nodnic_priv );
1548 MLX_FATAL_CHECK_STATUS(status, reg_err,
1549 "flexboot_nodnic_ports_register_dev failed");
1550
1551 for ( i = 0; i < device_priv->device_cap.num_ports; i++ ) {
1552 if ( ! ( flexboot_nodnic_priv->port_mask & ( i + 1 ) ) )
1553 continue;
1554 flexboot_nodnic_get_factory_mac ( flexboot_nodnic_priv, i );
1555 }
1556
1557 /* Update ETH operations with IRQ function if supported */
1558 DBGC ( flexboot_nodnic_priv, "%s: %s IRQ function\n",
1559 __FUNCTION__, ( callbacks->irq ? "Valid" : "No" ) );
1560 flexboot_nodnic_eth_operations.irq = callbacks->irq;
1561 return 0;
1562
1563 flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
1564 reg_err:
1565 err_set_ports_types:
1566 flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv );
1567 err_thin_init_ports:
1568 err_alloc_ibdev:
1569 err_set_masking:
1570 get_cap_err:
1571 nodnic_device_teardown ( device_priv );
1572 device_init_err:
1573 free_mlx_utils ( & device_priv->utils );
1574 err_utils_init:
1575 free ( flexboot_nodnic_priv );
1576 device_err_alloc:
1577 return status;
1578 }
1579
1580 void flexboot_nodnic_remove ( struct pci_device *pci )
1581 {
1582 struct flexboot_nodnic *flexboot_nodnic_priv = pci_get_drvdata ( pci );
1583 nodnic_device_priv *device_priv = & ( flexboot_nodnic_priv->device_priv );
1584
1585 flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv );
1586 flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
1587 nodnic_device_teardown( device_priv );
1588 free_mlx_utils ( & device_priv->utils );
1589 free( flexboot_nodnic_priv );
1590 }