[intelxl] Use VLAN tag in receive descriptor if present
[ipxe.git] / src / drivers / net / intelxl.c
1 /*
2 * Copyright (C) 2018 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <byteswap.h>
32 #include <ipxe/netdevice.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/if_ether.h>
35 #include <ipxe/vlan.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/malloc.h>
38 #include <ipxe/pci.h>
39 #include <ipxe/version.h>
40 #include "intelxl.h"
41
42 /** @file
43 *
44 * Intel 40 Gigabit Ethernet network card driver
45 *
46 */
47
48 /******************************************************************************
49 *
50 * Device reset
51 *
52 ******************************************************************************
53 */
54
55 /**
56 * Reset hardware
57 *
58 * @v intelxl Intel device
59 * @ret rc Return status code
60 */
61 static int intelxl_reset ( struct intelxl_nic *intelxl ) {
62 uint32_t pfgen_ctrl;
63
64 /* Perform a global software reset */
65 pfgen_ctrl = readl ( intelxl->regs + INTELXL_PFGEN_CTRL );
66 writel ( ( pfgen_ctrl | INTELXL_PFGEN_CTRL_PFSWR ),
67 intelxl->regs + INTELXL_PFGEN_CTRL );
68 mdelay ( INTELXL_RESET_DELAY_MS );
69
70 return 0;
71 }
72
73 /******************************************************************************
74 *
75 * MAC address
76 *
77 ******************************************************************************
78 */
79
80 /**
81 * Fetch initial MAC address and maximum frame size
82 *
83 * @v intelxl Intel device
84 * @v netdev Network device
85 * @ret rc Return status code
86 */
87 static int intelxl_fetch_mac ( struct intelxl_nic *intelxl,
88 struct net_device *netdev ) {
89 union intelxl_receive_address mac;
90 uint32_t prtgl_sal;
91 uint32_t prtgl_sah;
92 size_t mfs;
93
94 /* Read NVM-loaded address */
95 prtgl_sal = readl ( intelxl->regs + INTELXL_PRTGL_SAL );
96 prtgl_sah = readl ( intelxl->regs + INTELXL_PRTGL_SAH );
97 mac.reg.low = cpu_to_le32 ( prtgl_sal );
98 mac.reg.high = cpu_to_le32 ( prtgl_sah );
99
100 /* Check that address is valid */
101 if ( ! is_valid_ether_addr ( mac.raw ) ) {
102 DBGC ( intelxl, "INTELXL %p has invalid MAC address (%s)\n",
103 intelxl, eth_ntoa ( mac.raw ) );
104 return -ENOENT;
105 }
106
107 /* Copy MAC address */
108 DBGC ( intelxl, "INTELXL %p has autoloaded MAC address %s\n",
109 intelxl, eth_ntoa ( mac.raw ) );
110 memcpy ( netdev->hw_addr, mac.raw, ETH_ALEN );
111
112 /* Get maximum frame size */
113 mfs = INTELXL_PRTGL_SAH_MFS_GET ( prtgl_sah );
114 netdev->max_pkt_len = ( mfs - 4 /* CRC */ );
115
116 return 0;
117 }
118
119 /******************************************************************************
120 *
121 * Admin queue
122 *
123 ******************************************************************************
124 */
125
126 /**
127 * Create admin queue
128 *
129 * @v intelxl Intel device
130 * @v admin Admin queue
131 * @ret rc Return status code
132 */
133 static int intelxl_create_admin ( struct intelxl_nic *intelxl,
134 struct intelxl_admin *admin ) {
135 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
136 void *admin_regs = ( intelxl->regs + admin->reg );
137 physaddr_t address;
138
139 /* Allocate admin queue */
140 admin->desc = malloc_dma ( ( len + sizeof ( *admin->buffer ) ),
141 INTELXL_ALIGN );
142 if ( ! admin->desc )
143 return -ENOMEM;
144 admin->buffer = ( ( ( void * ) admin->desc ) + len );
145
146 /* Initialise admin queue */
147 memset ( admin->desc, 0, len );
148
149 /* Reset head and tail registers */
150 writel ( 0, admin_regs + INTELXL_ADMIN_HEAD );
151 writel ( 0, admin_regs + INTELXL_ADMIN_TAIL );
152
153 /* Reset queue index */
154 admin->index = 0;
155
156 /* Program queue address */
157 address = virt_to_bus ( admin->desc );
158 writel ( ( address & 0xffffffffUL ), admin_regs + INTELXL_ADMIN_BAL );
159 if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) {
160 writel ( ( ( ( uint64_t ) address ) >> 32 ),
161 admin_regs + INTELXL_ADMIN_BAH );
162 } else {
163 writel ( 0, admin_regs + INTELXL_ADMIN_BAH );
164 }
165
166 /* Program queue length and enable queue */
167 writel ( ( INTELXL_ADMIN_LEN_LEN ( INTELXL_ADMIN_NUM_DESC ) |
168 INTELXL_ADMIN_LEN_ENABLE ),
169 admin_regs + INTELXL_ADMIN_LEN );
170
171 DBGC ( intelxl, "INTELXL %p A%cQ is at [%08llx,%08llx) buf "
172 "[%08llx,%08llx)\n", intelxl,
173 ( ( admin->reg == INTELXL_ADMIN_CMD ) ? 'T' : 'R' ),
174 ( ( unsigned long long ) address ),
175 ( ( unsigned long long ) address + len ),
176 ( ( unsigned long long ) virt_to_bus ( admin->buffer ) ),
177 ( ( unsigned long long ) ( virt_to_bus ( admin->buffer ) +
178 sizeof ( admin->buffer[0] ) ) ) );
179 return 0;
180 }
181
182 /**
183 * Destroy admin queue
184 *
185 * @v intelxl Intel device
186 * @v admin Admin queue
187 */
188 static void intelxl_destroy_admin ( struct intelxl_nic *intelxl,
189 struct intelxl_admin *admin ) {
190 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
191 void *admin_regs = ( intelxl->regs + admin->reg );
192
193 /* Disable queue */
194 writel ( 0, admin_regs + INTELXL_ADMIN_LEN );
195
196 /* Free queue */
197 free_dma ( admin->desc, ( len + sizeof ( *admin->buffer ) ) );
198 }
199
200 /**
201 * Issue admin queue command
202 *
203 * @v intelxl Intel device
204 * @v cmd Command descriptor
205 * @ret rc Return status code
206 */
207 static int intelxl_admin_command ( struct intelxl_nic *intelxl,
208 struct intelxl_admin_descriptor *cmd ) {
209 struct intelxl_admin *admin = &intelxl->command;
210 void *admin_regs = ( intelxl->regs + admin->reg );
211 struct intelxl_admin_descriptor *desc;
212 uint64_t buffer;
213 unsigned int index;
214 unsigned int tail;
215 unsigned int i;
216 int rc;
217
218 /* Get next queue entry */
219 index = admin->index++;
220 tail = ( admin->index % INTELXL_ADMIN_NUM_DESC );
221 desc = &admin->desc[index % INTELXL_ADMIN_NUM_DESC];
222
223 /* Clear must-be-zero flags */
224 cmd->flags &= ~cpu_to_le16 ( INTELXL_ADMIN_FL_DD |
225 INTELXL_ADMIN_FL_CMP |
226 INTELXL_ADMIN_FL_ERR );
227
228 /* Clear return value */
229 cmd->ret = 0;
230
231 /* Populate cookie */
232 cmd->cookie = cpu_to_le32 ( index );
233
234 /* Populate data buffer address if applicable */
235 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
236 buffer = virt_to_bus ( admin->buffer );
237 cmd->params.buffer.high = cpu_to_le32 ( buffer >> 32 );
238 cmd->params.buffer.low = cpu_to_le32 ( buffer & 0xffffffffUL );
239 }
240
241 /* Copy command descriptor to queue entry */
242 memcpy ( desc, cmd, sizeof ( *desc ) );
243 DBGC2 ( intelxl, "INTELXL %p admin command %#x:\n", intelxl, index );
244 DBGC2_HDA ( intelxl, virt_to_phys ( desc ), desc, sizeof ( *desc ) );
245
246 /* Post command descriptor */
247 wmb();
248 writel ( tail, admin_regs + INTELXL_ADMIN_TAIL );
249
250 /* Wait for completion */
251 for ( i = 0 ; i < INTELXL_ADMIN_MAX_WAIT_MS ; i++ ) {
252
253 /* If response is not complete, delay 1ms and retry */
254 if ( ! ( desc->flags & INTELXL_ADMIN_FL_DD ) ) {
255 mdelay ( 1 );
256 continue;
257 }
258 DBGC2 ( intelxl, "INTELXL %p admin command %#x response:\n",
259 intelxl, index );
260 DBGC2_HDA ( intelxl, virt_to_phys ( desc ), desc,
261 sizeof ( *desc ) );
262
263 /* Check for cookie mismatch */
264 if ( desc->cookie != cmd->cookie ) {
265 DBGC ( intelxl, "INTELXL %p admin command %#x bad "
266 "cookie %#x\n", intelxl, index,
267 le32_to_cpu ( desc->cookie ) );
268 rc = -EPROTO;
269 goto err;
270 }
271
272 /* Check for errors */
273 if ( desc->ret != 0 ) {
274 DBGC ( intelxl, "INTELXL %p admin command %#x error "
275 "%d\n", intelxl, index,
276 le16_to_cpu ( desc->ret ) );
277 rc = -EIO;
278 goto err;
279 }
280
281 /* Copy response back to command descriptor */
282 memcpy ( cmd, desc, sizeof ( *cmd ) );
283
284 /* Success */
285 return 0;
286 }
287
288 rc = -ETIMEDOUT;
289 DBGC ( intelxl, "INTELXL %p timed out waiting for admin command %#x:\n",
290 intelxl, index );
291 err:
292 DBGC_HDA ( intelxl, virt_to_phys ( desc ), cmd, sizeof ( *cmd ) );
293 DBGC_HDA ( intelxl, virt_to_phys ( desc ), desc, sizeof ( *desc ) );
294 return rc;
295 }
296
297 /**
298 * Get firmware version
299 *
300 * @v intelxl Intel device
301 * @ret rc Return status code
302 */
303 static int intelxl_admin_version ( struct intelxl_nic *intelxl ) {
304 struct intelxl_admin_descriptor cmd;
305 struct intelxl_admin_version_params *version = &cmd.params.version;
306 unsigned int api;
307 int rc;
308
309 /* Populate descriptor */
310 memset ( &cmd, 0, sizeof ( cmd ) );
311 cmd.opcode = cpu_to_le16 ( INTELXL_ADMIN_VERSION );
312
313 /* Issue command */
314 if ( ( rc = intelxl_admin_command ( intelxl, &cmd ) ) != 0 )
315 return rc;
316 api = le16_to_cpu ( version->api.major );
317 DBGC ( intelxl, "INTELXL %p firmware v%d.%d API v%d.%d\n",
318 intelxl, le16_to_cpu ( version->firmware.major ),
319 le16_to_cpu ( version->firmware.minor ),
320 api, le16_to_cpu ( version->api.minor ) );
321
322 /* Check for API compatibility */
323 if ( api > INTELXL_ADMIN_API_MAJOR ) {
324 DBGC ( intelxl, "INTELXL %p unsupported API v%d\n",
325 intelxl, api );
326 return -ENOTSUP;
327 }
328
329 return 0;
330 }
331
332 /**
333 * Report driver version
334 *
335 * @v intelxl Intel device
336 * @ret rc Return status code
337 */
338 static int intelxl_admin_driver ( struct intelxl_nic *intelxl ) {
339 struct intelxl_admin_descriptor cmd;
340 struct intelxl_admin_driver_params *driver = &cmd.params.driver;
341 struct intelxl_admin_driver_buffer *buf =
342 &intelxl->command.buffer->driver;
343 int rc;
344
345 /* Populate descriptor */
346 memset ( &cmd, 0, sizeof ( cmd ) );
347 cmd.opcode = cpu_to_le16 ( INTELXL_ADMIN_DRIVER );
348 cmd.flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF );
349 cmd.len = cpu_to_le16 ( sizeof ( *buf ) );
350 driver->major = product_major_version;
351 driver->minor = product_minor_version;
352 snprintf ( buf->name, sizeof ( buf->name ), "%s",
353 ( product_name[0] ? product_name : product_short_name ) );
354
355 /* Issue command */
356 if ( ( rc = intelxl_admin_command ( intelxl, &cmd ) ) != 0 )
357 return rc;
358
359 return 0;
360 }
361
362 /**
363 * Shutdown admin queues
364 *
365 * @v intelxl Intel device
366 * @ret rc Return status code
367 */
368 static int intelxl_admin_shutdown ( struct intelxl_nic *intelxl ) {
369 struct intelxl_admin_descriptor cmd;
370 struct intelxl_admin_shutdown_params *shutdown = &cmd.params.shutdown;
371 int rc;
372
373 /* Populate descriptor */
374 memset ( &cmd, 0, sizeof ( cmd ) );
375 cmd.opcode = cpu_to_le16 ( INTELXL_ADMIN_SHUTDOWN );
376 shutdown->unloading = INTELXL_ADMIN_SHUTDOWN_UNLOADING;
377
378 /* Issue command */
379 if ( ( rc = intelxl_admin_command ( intelxl, &cmd ) ) != 0 )
380 return rc;
381
382 return 0;
383 }
384
385 /**
386 * Get switch configuration
387 *
388 * @v intelxl Intel device
389 * @ret rc Return status code
390 */
391 static int intelxl_admin_switch ( struct intelxl_nic *intelxl ) {
392 struct intelxl_admin_descriptor cmd;
393 struct intelxl_admin_switch_params *sw = &cmd.params.sw;
394 struct intelxl_admin_switch_buffer *buf = &intelxl->command.buffer->sw;
395 struct intelxl_admin_switch_config *cfg = &buf->cfg;
396 int rc;
397
398 /* Populate descriptor */
399 memset ( &cmd, 0, sizeof ( cmd ) );
400 cmd.opcode = cpu_to_le16 ( INTELXL_ADMIN_SWITCH );
401 cmd.flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
402 cmd.len = cpu_to_le16 ( sizeof ( *buf ) );
403
404 /* Get each configuration in turn */
405 do {
406 /* Issue command */
407 if ( ( rc = intelxl_admin_command ( intelxl, &cmd ) ) != 0 )
408 return rc;
409
410 /* Dump raw configuration */
411 DBGC2 ( intelxl, "INTELXL %p SEID %#04x:\n",
412 intelxl, le16_to_cpu ( cfg->seid ) );
413 DBGC2_HDA ( intelxl, 0, cfg, sizeof ( *cfg ) );
414
415 /* Parse response */
416 if ( cfg->type == INTELXL_ADMIN_SWITCH_TYPE_VSI ) {
417 intelxl->vsi = le16_to_cpu ( cfg->seid );
418 DBGC ( intelxl, "INTELXL %p VSI %#04x uplink %#04x "
419 "downlink %#04x conn %#02x\n", intelxl,
420 intelxl->vsi, le16_to_cpu ( cfg->uplink ),
421 le16_to_cpu ( cfg->downlink ), cfg->connection );
422 }
423
424 } while ( sw->next );
425
426 /* Check that we found a VSI */
427 if ( ! intelxl->vsi ) {
428 DBGC ( intelxl, "INTELXL %p has no VSI\n", intelxl );
429 return -ENOENT;
430 }
431
432 return 0;
433 }
434
435 /**
436 * Get VSI parameters
437 *
438 * @v intelxl Intel device
439 * @ret rc Return status code
440 */
441 static int intelxl_admin_vsi ( struct intelxl_nic *intelxl ) {
442 struct intelxl_admin_descriptor cmd;
443 struct intelxl_admin_vsi_params *vsi = &cmd.params.vsi;
444 struct intelxl_admin_vsi_buffer *buf = &intelxl->command.buffer->vsi;
445 int rc;
446
447 /* Populate descriptor */
448 memset ( &cmd, 0, sizeof ( cmd ) );
449 cmd.opcode = cpu_to_le16 ( INTELXL_ADMIN_VSI );
450 cmd.flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
451 cmd.len = cpu_to_le16 ( sizeof ( *buf ) );
452 vsi->vsi = cpu_to_le16 ( intelxl->vsi );
453
454 /* Issue command */
455 if ( ( rc = intelxl_admin_command ( intelxl, &cmd ) ) != 0 )
456 return rc;
457
458 /* Parse response */
459 intelxl->queue = le16_to_cpu ( buf->queue[0] );
460 intelxl->qset = le16_to_cpu ( buf->qset[0] );
461 DBGC ( intelxl, "INTELXL %p VSI %#04x queue %#04x qset %#04x\n",
462 intelxl, intelxl->vsi, intelxl->queue, intelxl->qset );
463
464 return 0;
465 }
466
467 /**
468 * Set VSI promiscuous modes
469 *
470 * @v intelxl Intel device
471 * @ret rc Return status code
472 */
473 static int intelxl_admin_promisc ( struct intelxl_nic *intelxl ) {
474 struct intelxl_admin_descriptor cmd;
475 struct intelxl_admin_promisc_params *promisc = &cmd.params.promisc;
476 uint16_t flags;
477 int rc;
478
479 /* Populate descriptor */
480 memset ( &cmd, 0, sizeof ( cmd ) );
481 cmd.opcode = cpu_to_le16 ( INTELXL_ADMIN_PROMISC );
482 flags = ( INTELXL_ADMIN_PROMISC_FL_UNICAST |
483 INTELXL_ADMIN_PROMISC_FL_MULTICAST |
484 INTELXL_ADMIN_PROMISC_FL_BROADCAST |
485 INTELXL_ADMIN_PROMISC_FL_VLAN );
486 promisc->flags = cpu_to_le16 ( flags );
487 promisc->valid = cpu_to_le16 ( flags );
488 promisc->vsi = cpu_to_le16 ( intelxl->vsi );
489
490 /* Issue command */
491 if ( ( rc = intelxl_admin_command ( intelxl, &cmd ) ) != 0 )
492 return rc;
493
494 return 0;
495 }
496
497 /**
498 * Restart autonegotiation
499 *
500 * @v intelxl Intel device
501 * @ret rc Return status code
502 */
503 static int intelxl_admin_autoneg ( struct intelxl_nic *intelxl ) {
504 struct intelxl_admin_descriptor cmd;
505 struct intelxl_admin_autoneg_params *autoneg = &cmd.params.autoneg;
506 int rc;
507
508 /* Populate descriptor */
509 memset ( &cmd, 0, sizeof ( cmd ) );
510 cmd.opcode = cpu_to_le16 ( INTELXL_ADMIN_AUTONEG );
511 autoneg->flags = ( INTELXL_ADMIN_AUTONEG_FL_RESTART |
512 INTELXL_ADMIN_AUTONEG_FL_ENABLE );
513
514 /* Issue command */
515 if ( ( rc = intelxl_admin_command ( intelxl, &cmd ) ) != 0 )
516 return rc;
517
518 return 0;
519 }
520
521 /**
522 * Get link status
523 *
524 * @v netdev Network device
525 * @ret rc Return status code
526 */
527 static int intelxl_admin_link ( struct net_device *netdev ) {
528 struct intelxl_nic *intelxl = netdev->priv;
529 struct intelxl_admin_descriptor cmd;
530 struct intelxl_admin_link_params *link = &cmd.params.link;
531 int rc;
532
533 /* Populate descriptor */
534 memset ( &cmd, 0, sizeof ( cmd ) );
535 cmd.opcode = cpu_to_le16 ( INTELXL_ADMIN_LINK );
536 link->notify = INTELXL_ADMIN_LINK_NOTIFY;
537
538 /* Issue command */
539 if ( ( rc = intelxl_admin_command ( intelxl, &cmd ) ) != 0 )
540 return rc;
541 DBGC ( intelxl, "INTELXL %p PHY %#02x speed %#02x status %#02x\n",
542 intelxl, link->phy, link->speed, link->status );
543
544 /* Update network device */
545 if ( link->status & INTELXL_ADMIN_LINK_UP ) {
546 netdev_link_up ( netdev );
547 } else {
548 netdev_link_down ( netdev );
549 }
550
551 return 0;
552 }
553
554 /**
555 * Refill admin event queue
556 *
557 * @v intelxl Intel device
558 */
559 static void intelxl_refill_admin ( struct intelxl_nic *intelxl ) {
560 struct intelxl_admin *admin = &intelxl->event;
561 void *admin_regs = ( intelxl->regs + admin->reg );
562 unsigned int tail;
563
564 /* Update tail pointer */
565 tail = ( ( admin->index + INTELXL_ADMIN_NUM_DESC - 1 ) %
566 INTELXL_ADMIN_NUM_DESC );
567 writel ( tail, admin_regs + INTELXL_ADMIN_TAIL );
568 }
569
570 /**
571 * Poll admin event queue
572 *
573 * @v netdev Network device
574 */
575 static void intelxl_poll_admin ( struct net_device *netdev ) {
576 struct intelxl_nic *intelxl = netdev->priv;
577 struct intelxl_admin *admin = &intelxl->event;
578 struct intelxl_admin_descriptor *desc;
579
580 /* Check for events */
581 while ( 1 ) {
582
583 /* Get next event descriptor */
584 desc = &admin->desc[admin->index % INTELXL_ADMIN_NUM_DESC];
585
586 /* Stop if descriptor is not yet completed */
587 if ( ! ( desc->flags & INTELXL_ADMIN_FL_DD ) )
588 return;
589 DBGC2 ( intelxl, "INTELXL %p admin event %#x:\n",
590 intelxl, admin->index );
591 DBGC2_HDA ( intelxl, virt_to_phys ( desc ), desc,
592 sizeof ( *desc ) );
593
594 /* Handle event */
595 switch ( desc->opcode ) {
596 case cpu_to_le16 ( INTELXL_ADMIN_LINK ):
597 intelxl_admin_link ( netdev );
598 break;
599 default:
600 DBGC ( intelxl, "INTELXL %p admin event %#x "
601 "unrecognised opcode %#04x\n", intelxl,
602 admin->index, le16_to_cpu ( desc->opcode ) );
603 break;
604 }
605
606 /* Clear event completion flag */
607 desc->flags = 0;
608 wmb();
609
610 /* Update index and refill queue */
611 admin->index++;
612 intelxl_refill_admin ( intelxl );
613 }
614 }
615
616 /**
617 * Open admin queues
618 *
619 * @v intelxl Intel device
620 * @ret rc Return status code
621 */
622 static int intelxl_open_admin ( struct intelxl_nic *intelxl ) {
623 int rc;
624
625 /* Create admin event queue */
626 if ( ( rc = intelxl_create_admin ( intelxl, &intelxl->event ) ) != 0 )
627 goto err_create_event;
628
629 /* Create admin command queue */
630 if ( ( rc = intelxl_create_admin ( intelxl, &intelxl->command ) ) != 0 )
631 goto err_create_command;
632
633 /* Post all descriptors to event queue */
634 intelxl_refill_admin ( intelxl );
635
636 /* Get firmware version */
637 if ( ( rc = intelxl_admin_version ( intelxl ) ) != 0 )
638 goto err_version;
639
640 /* Report driver version */
641 if ( ( rc = intelxl_admin_driver ( intelxl ) ) != 0 )
642 goto err_driver;
643
644 return 0;
645
646 err_driver:
647 err_version:
648 intelxl_destroy_admin ( intelxl, &intelxl->command );
649 err_create_command:
650 intelxl_destroy_admin ( intelxl, &intelxl->event );
651 err_create_event:
652 return rc;
653 }
654
655 /**
656 * Close admin queues
657 *
658 * @v intelxl Intel device
659 */
660 static void intelxl_close_admin ( struct intelxl_nic *intelxl ) {
661
662 /* Shut down admin queues */
663 intelxl_admin_shutdown ( intelxl );
664
665 /* Destroy admin command queue */
666 intelxl_destroy_admin ( intelxl, &intelxl->command );
667
668 /* Destroy admin event queue */
669 intelxl_destroy_admin ( intelxl, &intelxl->event );
670 }
671
672 /******************************************************************************
673 *
674 * Descriptor rings
675 *
676 ******************************************************************************
677 */
678
679 /**
680 * Dump queue context (for debugging)
681 *
682 * @v intelxl Intel device
683 * @v op Context operation
684 * @v len Size of context
685 */
686 static __attribute__ (( unused )) void
687 intelxl_context_dump ( struct intelxl_nic *intelxl, uint32_t op, size_t len ) {
688 struct intelxl_context_line line;
689 uint32_t pfcm_lanctxctl;
690 uint32_t pfcm_lanctxstat;
691 unsigned int queue;
692 unsigned int index;
693 unsigned int i;
694
695 /* Do nothing unless debug output is enabled */
696 if ( ! DBG_EXTRA )
697 return;
698
699 /* Dump context */
700 DBGC2 ( intelxl, "INTELXL %p context %#08x:\n", intelxl, op );
701 for ( index = 0 ; ( sizeof ( line ) * index ) < len ; index++ ) {
702
703 /* Start context operation */
704 queue = ( intelxl->base + intelxl->queue );
705 pfcm_lanctxctl =
706 ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
707 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
708 INTELXL_PFCM_LANCTXCTL_OP_CODE_READ | op );
709 writel ( pfcm_lanctxctl,
710 intelxl->regs + INTELXL_PFCM_LANCTXCTL );
711
712 /* Wait for operation to complete */
713 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
714
715 /* Check if operation is complete */
716 pfcm_lanctxstat = readl ( intelxl->regs +
717 INTELXL_PFCM_LANCTXSTAT );
718 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
719 break;
720
721 /* Delay */
722 mdelay ( 1 );
723 }
724
725 /* Read context data */
726 for ( i = 0 ; i < ( sizeof ( line ) /
727 sizeof ( line.raw[0] ) ) ; i++ ) {
728 line.raw[i] = readl ( intelxl->regs +
729 INTELXL_PFCM_LANCTXDATA ( i ) );
730 }
731 DBGC2_HDA ( intelxl, ( sizeof ( line ) * index ),
732 &line, sizeof ( line ) );
733 }
734 }
735
736 /**
737 * Program queue context line
738 *
739 * @v intelxl Intel device
740 * @v line Queue context line
741 * @v index Line number
742 * @v op Context operation
743 * @ret rc Return status code
744 */
745 static int intelxl_context_line ( struct intelxl_nic *intelxl,
746 struct intelxl_context_line *line,
747 unsigned int index, uint32_t op ) {
748 uint32_t pfcm_lanctxctl;
749 uint32_t pfcm_lanctxstat;
750 unsigned int queue;
751 unsigned int i;
752
753 /* Write context data */
754 for ( i = 0; i < ( sizeof ( *line ) / sizeof ( line->raw[0] ) ); i++ ) {
755 writel ( le32_to_cpu ( line->raw[i] ),
756 intelxl->regs + INTELXL_PFCM_LANCTXDATA ( i ) );
757 }
758
759 /* Start context operation */
760 queue = ( intelxl->base + intelxl->queue );
761 pfcm_lanctxctl = ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
762 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
763 INTELXL_PFCM_LANCTXCTL_OP_CODE_WRITE | op );
764 writel ( pfcm_lanctxctl, intelxl->regs + INTELXL_PFCM_LANCTXCTL );
765
766 /* Wait for operation to complete */
767 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
768
769 /* Check if operation is complete */
770 pfcm_lanctxstat = readl ( intelxl->regs +
771 INTELXL_PFCM_LANCTXSTAT );
772 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
773 return 0;
774
775 /* Delay */
776 mdelay ( 1 );
777 }
778
779 DBGC ( intelxl, "INTELXL %p timed out waiting for context: %#08x\n",
780 intelxl, pfcm_lanctxctl );
781 return -ETIMEDOUT;
782 }
783
784 /**
785 * Program queue context
786 *
787 * @v intelxl Intel device
788 * @v line Queue context lines
789 * @v len Size of context
790 * @v op Context operation
791 * @ret rc Return status code
792 */
793 static int intelxl_context ( struct intelxl_nic *intelxl,
794 struct intelxl_context_line *line,
795 size_t len, uint32_t op ) {
796 unsigned int index;
797 int rc;
798
799 DBGC2 ( intelxl, "INTELXL %p context %#08x len %#zx:\n",
800 intelxl, op, len );
801 DBGC2_HDA ( intelxl, 0, line, len );
802
803 /* Program one line at a time */
804 for ( index = 0 ; ( sizeof ( *line ) * index ) < len ; index++ ) {
805 if ( ( rc = intelxl_context_line ( intelxl, line++, index,
806 op ) ) != 0 )
807 return rc;
808 }
809
810 return 0;
811 }
812
813 /**
814 * Program transmit queue context
815 *
816 * @v intelxl Intel device
817 * @v address Descriptor ring base address
818 * @ret rc Return status code
819 */
820 static int intelxl_context_tx ( struct intelxl_nic *intelxl,
821 physaddr_t address ) {
822 union {
823 struct intelxl_context_tx tx;
824 struct intelxl_context_line line;
825 } ctx;
826 int rc;
827
828 /* Initialise context */
829 memset ( &ctx, 0, sizeof ( ctx ) );
830 ctx.tx.flags = cpu_to_le16 ( INTELXL_CTX_TX_FL_NEW );
831 ctx.tx.base = cpu_to_le64 ( INTELXL_CTX_TX_BASE ( address ) );
832 ctx.tx.count =
833 cpu_to_le16 ( INTELXL_CTX_TX_COUNT ( INTELXL_TX_NUM_DESC ) );
834 ctx.tx.qset = INTELXL_CTX_TX_QSET ( intelxl->qset );
835
836 /* Program context */
837 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
838 INTELXL_PFCM_LANCTXCTL_TYPE_TX ) ) != 0 )
839 return rc;
840
841 return 0;
842 }
843
844 /**
845 * Program receive queue context
846 *
847 * @v intelxl Intel device
848 * @v address Descriptor ring base address
849 * @ret rc Return status code
850 */
851 static int intelxl_context_rx ( struct intelxl_nic *intelxl,
852 physaddr_t address ) {
853 union {
854 struct intelxl_context_rx rx;
855 struct intelxl_context_line line;
856 } ctx;
857 uint64_t base_count;
858 int rc;
859
860 /* Initialise context */
861 memset ( &ctx, 0, sizeof ( ctx ) );
862 base_count = INTELXL_CTX_RX_BASE_COUNT ( address, INTELXL_RX_NUM_DESC );
863 ctx.rx.base_count = cpu_to_le64 ( base_count );
864 ctx.rx.len = cpu_to_le16 ( INTELXL_CTX_RX_LEN ( intelxl->mfs ) );
865 ctx.rx.flags = INTELXL_CTX_RX_FL_CRCSTRIP;
866 ctx.rx.mfs = cpu_to_le16 ( INTELXL_CTX_RX_MFS ( intelxl->mfs ) );
867
868 /* Program context */
869 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
870 INTELXL_PFCM_LANCTXCTL_TYPE_RX ) ) != 0 )
871 return rc;
872
873 return 0;
874 }
875
876 /**
877 * Enable descriptor ring
878 *
879 * @v intelxl Intel device
880 * @v ring Descriptor ring
881 * @ret rc Return status code
882 */
883 static int intelxl_enable_ring ( struct intelxl_nic *intelxl,
884 struct intelxl_ring *ring ) {
885 void *ring_regs = ( intelxl->regs + ring->reg );
886 uint32_t qxx_ena;
887
888 /* Enable ring */
889 writel ( INTELXL_QXX_ENA_REQ, ( ring_regs + INTELXL_QXX_ENA ) );
890 udelay ( INTELXL_QUEUE_ENABLE_DELAY_US );
891 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
892 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) ) {
893 DBGC ( intelxl, "INTELXL %p ring %06x failed to enable: "
894 "%#08x\n", intelxl, ring->reg, qxx_ena );
895 return -EIO;
896 }
897
898 return 0;
899 }
900
901 /**
902 * Disable descriptor ring
903 *
904 * @v intelxl Intel device
905 * @v ring Descriptor ring
906 * @ret rc Return status code
907 */
908 static int intelxl_disable_ring ( struct intelxl_nic *intelxl,
909 struct intelxl_ring *ring ) {
910 void *ring_regs = ( intelxl->regs + ring->reg );
911 uint32_t qxx_ena;
912 unsigned int i;
913
914 /* Disable ring */
915 writel ( 0, ( ring_regs + INTELXL_QXX_ENA ) );
916
917 /* Wait for ring to be disabled */
918 for ( i = 0 ; i < INTELXL_QUEUE_DISABLE_MAX_WAIT_MS ; i++ ) {
919
920 /* Check if ring is disabled */
921 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
922 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) )
923 return 0;
924
925 /* Delay */
926 mdelay ( 1 );
927 }
928
929 DBGC ( intelxl, "INTELXL %p ring %06x timed out waiting for disable: "
930 "%#08x\n", intelxl, ring->reg, qxx_ena );
931 return -ETIMEDOUT;
932 }
933
934 /**
935 * Create descriptor ring
936 *
937 * @v intelxl Intel device
938 * @v ring Descriptor ring
939 * @ret rc Return status code
940 */
941 static int intelxl_create_ring ( struct intelxl_nic *intelxl,
942 struct intelxl_ring *ring ) {
943 void *ring_regs = ( intelxl->regs + ring->reg );
944 physaddr_t address;
945 int rc;
946
947 /* Allocate descriptor ring */
948 ring->desc = malloc_dma ( ring->len, INTELXL_ALIGN );
949 if ( ! ring->desc ) {
950 rc = -ENOMEM;
951 goto err_alloc;
952 }
953
954 /* Initialise descriptor ring */
955 memset ( ring->desc, 0, ring->len );
956
957 /* Reset tail pointer */
958 writel ( 0, ( ring_regs + INTELXL_QXX_TAIL ) );
959
960 /* Program queue context */
961 address = virt_to_bus ( ring->desc );
962 if ( ( rc = ring->context ( intelxl, address ) ) != 0 )
963 goto err_context;
964
965 /* Enable ring */
966 if ( ( rc = intelxl_enable_ring ( intelxl, ring ) ) != 0 )
967 goto err_enable;
968
969 /* Reset counters */
970 ring->prod = 0;
971 ring->cons = 0;
972
973 DBGC ( intelxl, "INTELXL %p ring %06x is at [%08llx,%08llx)\n",
974 intelxl, ring->reg, ( ( unsigned long long ) address ),
975 ( ( unsigned long long ) address + ring->len ) );
976
977 return 0;
978
979 intelxl_disable_ring ( intelxl, ring );
980 err_enable:
981 err_context:
982 free_dma ( ring->desc, ring->len );
983 err_alloc:
984 return rc;
985 }
986
987 /**
988 * Destroy descriptor ring
989 *
990 * @v intelxl Intel device
991 * @v ring Descriptor ring
992 */
993 static void intelxl_destroy_ring ( struct intelxl_nic *intelxl,
994 struct intelxl_ring *ring ) {
995 int rc;
996
997 /* Disable ring */
998 if ( ( rc = intelxl_disable_ring ( intelxl, ring ) ) != 0 ) {
999 /* Leak memory; there's nothing else we can do */
1000 return;
1001 }
1002
1003 /* Free descriptor ring */
1004 free_dma ( ring->desc, ring->len );
1005 ring->desc = NULL;
1006 }
1007
1008 /**
1009 * Refill receive descriptor ring
1010 *
1011 * @v intelxl Intel device
1012 */
1013 static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
1014 struct intelxl_rx_data_descriptor *rx;
1015 struct io_buffer *iobuf;
1016 unsigned int rx_idx;
1017 unsigned int rx_tail;
1018 physaddr_t address;
1019 unsigned int refilled = 0;
1020
1021 /* Refill ring */
1022 while ( ( intelxl->rx.prod - intelxl->rx.cons ) < INTELXL_RX_FILL ) {
1023
1024 /* Allocate I/O buffer */
1025 iobuf = alloc_iob ( intelxl->mfs );
1026 if ( ! iobuf ) {
1027 /* Wait for next refill */
1028 break;
1029 }
1030
1031 /* Get next receive descriptor */
1032 rx_idx = ( intelxl->rx.prod++ % INTELXL_RX_NUM_DESC );
1033 rx = &intelxl->rx.desc[rx_idx].rx;
1034
1035 /* Populate receive descriptor */
1036 address = virt_to_bus ( iobuf->data );
1037 rx->address = cpu_to_le64 ( address );
1038 rx->flags = 0;
1039
1040 /* Record I/O buffer */
1041 assert ( intelxl->rx_iobuf[rx_idx] == NULL );
1042 intelxl->rx_iobuf[rx_idx] = iobuf;
1043
1044 DBGC2 ( intelxl, "INTELXL %p RX %d is [%llx,%llx)\n", intelxl,
1045 rx_idx, ( ( unsigned long long ) address ),
1046 ( ( unsigned long long ) address + intelxl->mfs ) );
1047 refilled++;
1048 }
1049
1050 /* Push descriptors to card, if applicable */
1051 if ( refilled ) {
1052 wmb();
1053 rx_tail = ( intelxl->rx.prod % INTELXL_RX_NUM_DESC );
1054 writel ( rx_tail,
1055 ( intelxl->regs + intelxl->rx.reg + INTELXL_QXX_TAIL));
1056 }
1057 }
1058
1059 /******************************************************************************
1060 *
1061 * Network device interface
1062 *
1063 ******************************************************************************
1064 */
1065
1066 /**
1067 * Open network device
1068 *
1069 * @v netdev Network device
1070 * @ret rc Return status code
1071 */
1072 static int intelxl_open ( struct net_device *netdev ) {
1073 struct intelxl_nic *intelxl = netdev->priv;
1074 union intelxl_receive_address mac;
1075 unsigned int queue;
1076 uint32_t prtgl_sal;
1077 uint32_t prtgl_sah;
1078 int rc;
1079
1080 /* Calculate maximum frame size */
1081 intelxl->mfs = ( ( ETH_HLEN + netdev->mtu + 4 /* CRC */ +
1082 INTELXL_ALIGN - 1 ) & ~( INTELXL_ALIGN - 1 ) );
1083
1084 /* Program MAC address and maximum frame size */
1085 memset ( &mac, 0, sizeof ( mac ) );
1086 memcpy ( mac.raw, netdev->ll_addr, sizeof ( mac.raw ) );
1087 prtgl_sal = le32_to_cpu ( mac.reg.low );
1088 prtgl_sah = ( le32_to_cpu ( mac.reg.high ) |
1089 INTELXL_PRTGL_SAH_MFS_SET ( intelxl->mfs ) );
1090 writel ( prtgl_sal, intelxl->regs + INTELXL_PRTGL_SAL );
1091 writel ( prtgl_sah, intelxl->regs + INTELXL_PRTGL_SAH );
1092
1093 /* Associate transmit queue to PF */
1094 writel ( ( INTELXL_QXX_CTL_PFVF_Q_PF |
1095 INTELXL_QXX_CTL_PFVF_PF_INDX ( intelxl->pf ) ),
1096 ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_CTL ) );
1097
1098 /* Clear transmit pre queue disable */
1099 queue = ( intelxl->base + intelxl->queue );
1100 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_CLEAR_QDIS |
1101 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1102 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1103
1104 /* Reset transmit queue head */
1105 writel ( 0, ( intelxl->regs + INTELXL_QTX_HEAD ( intelxl->queue ) ) );
1106
1107 /* Create receive descriptor ring */
1108 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->rx ) ) != 0 )
1109 goto err_create_rx;
1110
1111 /* Create transmit descriptor ring */
1112 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->tx ) ) != 0 )
1113 goto err_create_tx;
1114
1115 /* Fill receive ring */
1116 intelxl_refill_rx ( intelxl );
1117
1118 /* Restart autonegotiation */
1119 intelxl_admin_autoneg ( intelxl );
1120
1121 /* Update link state */
1122 intelxl_admin_link ( netdev );
1123
1124 return 0;
1125
1126 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1127 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1128 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1129 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1130 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1131 err_create_tx:
1132 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1133 err_create_rx:
1134 return rc;
1135 }
1136
1137 /**
1138 * Close network device
1139 *
1140 * @v netdev Network device
1141 */
1142 static void intelxl_close ( struct net_device *netdev ) {
1143 struct intelxl_nic *intelxl = netdev->priv;
1144 unsigned int queue;
1145 unsigned int i;
1146
1147 /* Dump contexts (for debugging) */
1148 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_TX,
1149 sizeof ( struct intelxl_context_tx ) );
1150 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_RX,
1151 sizeof ( struct intelxl_context_rx ) );
1152
1153 /* Pre-disable transmit queue */
1154 queue = ( intelxl->base + intelxl->queue );
1155 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1156 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1157 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1158 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1159
1160 /* Destroy transmit descriptor ring */
1161 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1162
1163 /* Destroy receive descriptor ring */
1164 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1165
1166 /* Discard any unused receive buffers */
1167 for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
1168 if ( intelxl->rx_iobuf[i] )
1169 free_iob ( intelxl->rx_iobuf[i] );
1170 intelxl->rx_iobuf[i] = NULL;
1171 }
1172 }
1173
1174 /**
1175 * Transmit packet
1176 *
1177 * @v netdev Network device
1178 * @v iobuf I/O buffer
1179 * @ret rc Return status code
1180 */
1181 static int intelxl_transmit ( struct net_device *netdev,
1182 struct io_buffer *iobuf ) {
1183 struct intelxl_nic *intelxl = netdev->priv;
1184 struct intelxl_tx_data_descriptor *tx;
1185 unsigned int tx_idx;
1186 unsigned int tx_tail;
1187 physaddr_t address;
1188 size_t len;
1189
1190 /* Get next transmit descriptor */
1191 if ( ( intelxl->tx.prod - intelxl->tx.cons ) >= INTELXL_TX_FILL ) {
1192 DBGC ( intelxl, "INTELXL %p out of transmit descriptors\n",
1193 intelxl );
1194 return -ENOBUFS;
1195 }
1196 tx_idx = ( intelxl->tx.prod++ % INTELXL_TX_NUM_DESC );
1197 tx_tail = ( intelxl->tx.prod % INTELXL_TX_NUM_DESC );
1198 tx = &intelxl->tx.desc[tx_idx].tx;
1199
1200 /* Populate transmit descriptor */
1201 address = virt_to_bus ( iobuf->data );
1202 len = iob_len ( iobuf );
1203 tx->address = cpu_to_le64 ( address );
1204 tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) );
1205 tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP |
1206 INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI );
1207 wmb();
1208
1209 /* Notify card that there are packets ready to transmit */
1210 writel ( tx_tail,
1211 ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_TAIL ) );
1212
1213 DBGC2 ( intelxl, "INTELXL %p TX %d is [%llx,%llx)\n", intelxl, tx_idx,
1214 ( ( unsigned long long ) address ),
1215 ( ( unsigned long long ) address + len ) );
1216 return 0;
1217 }
1218
1219 /**
1220 * Poll for completed packets
1221 *
1222 * @v netdev Network device
1223 */
1224 static void intelxl_poll_tx ( struct net_device *netdev ) {
1225 struct intelxl_nic *intelxl = netdev->priv;
1226 struct intelxl_tx_writeback_descriptor *tx_wb;
1227 unsigned int tx_idx;
1228
1229 /* Check for completed packets */
1230 while ( intelxl->tx.cons != intelxl->tx.prod ) {
1231
1232 /* Get next transmit descriptor */
1233 tx_idx = ( intelxl->tx.cons % INTELXL_TX_NUM_DESC );
1234 tx_wb = &intelxl->tx.desc[tx_idx].tx_wb;
1235
1236 /* Stop if descriptor is still in use */
1237 if ( ! ( tx_wb->flags & INTELXL_TX_WB_FL_DD ) )
1238 return;
1239 DBGC2 ( intelxl, "INTELXL %p TX %d complete\n",
1240 intelxl, tx_idx );
1241
1242 /* Complete TX descriptor */
1243 netdev_tx_complete_next ( netdev );
1244 intelxl->tx.cons++;
1245 }
1246 }
1247
1248 /**
1249 * Poll for received packets
1250 *
1251 * @v netdev Network device
1252 */
1253 static void intelxl_poll_rx ( struct net_device *netdev ) {
1254 struct intelxl_nic *intelxl = netdev->priv;
1255 struct intelxl_rx_writeback_descriptor *rx_wb;
1256 struct io_buffer *iobuf;
1257 unsigned int rx_idx;
1258 unsigned int tag;
1259 size_t len;
1260
1261 /* Check for received packets */
1262 while ( intelxl->rx.cons != intelxl->rx.prod ) {
1263
1264 /* Get next receive descriptor */
1265 rx_idx = ( intelxl->rx.cons % INTELXL_RX_NUM_DESC );
1266 rx_wb = &intelxl->rx.desc[rx_idx].rx_wb;
1267
1268 /* Stop if descriptor is still in use */
1269 if ( ! ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_DD ) ) )
1270 return;
1271
1272 /* Populate I/O buffer */
1273 iobuf = intelxl->rx_iobuf[rx_idx];
1274 intelxl->rx_iobuf[rx_idx] = NULL;
1275 len = INTELXL_RX_WB_LEN ( le32_to_cpu ( rx_wb->len ) );
1276 iob_put ( iobuf, len );
1277
1278 /* Find VLAN device, if applicable */
1279 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_VLAN ) ) {
1280 tag = VLAN_TAG ( le16_to_cpu ( rx_wb->vlan ) );
1281 } else {
1282 tag = 0;
1283 }
1284
1285 /* Hand off to network stack */
1286 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_RXE ) ) {
1287 DBGC ( intelxl, "INTELXL %p RX %d error (length %zd, "
1288 "flags %08x)\n", intelxl, rx_idx, len,
1289 le32_to_cpu ( rx_wb->flags ) );
1290 vlan_netdev_rx_err ( netdev, tag, iobuf, -EIO );
1291 } else {
1292 DBGC2 ( intelxl, "INTELXL %p RX %d complete (length "
1293 "%zd)\n", intelxl, rx_idx, len );
1294 vlan_netdev_rx ( netdev, tag, iobuf );
1295 }
1296 intelxl->rx.cons++;
1297 }
1298 }
1299
1300 /**
1301 * Poll for completed and received packets
1302 *
1303 * @v netdev Network device
1304 */
1305 static void intelxl_poll ( struct net_device *netdev ) {
1306 struct intelxl_nic *intelxl = netdev->priv;
1307
1308 /* Acknowledge interrupts, if applicable */
1309 if ( netdev_irq_enabled ( netdev ) ) {
1310 writel ( ( INTELXL_PFINT_DYN_CTL0_CLEARPBA |
1311 INTELXL_PFINT_DYN_CTL0_INTENA_MASK ),
1312 intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1313 }
1314
1315 /* Poll for completed packets */
1316 intelxl_poll_tx ( netdev );
1317
1318 /* Poll for received packets */
1319 intelxl_poll_rx ( netdev );
1320
1321 /* Poll for admin events */
1322 intelxl_poll_admin ( netdev );
1323
1324 /* Refill RX ring */
1325 intelxl_refill_rx ( intelxl );
1326 }
1327
1328 /**
1329 * Enable or disable interrupts
1330 *
1331 * @v netdev Network device
1332 * @v enable Interrupts should be enabled
1333 */
1334 static void intelxl_irq ( struct net_device *netdev, int enable ) {
1335 struct intelxl_nic *intelxl = netdev->priv;
1336
1337 if ( enable ) {
1338 writel ( INTELXL_PFINT_DYN_CTL0_INTENA,
1339 intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1340 } else {
1341 writel ( 0, intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1342 }
1343 }
1344
1345 /** Network device operations */
1346 static struct net_device_operations intelxl_operations = {
1347 .open = intelxl_open,
1348 .close = intelxl_close,
1349 .transmit = intelxl_transmit,
1350 .poll = intelxl_poll,
1351 .irq = intelxl_irq,
1352 };
1353
1354 /******************************************************************************
1355 *
1356 * PCI interface
1357 *
1358 ******************************************************************************
1359 */
1360
1361 /**
1362 * Probe PCI device
1363 *
1364 * @v pci PCI device
1365 * @ret rc Return status code
1366 */
1367 static int intelxl_probe ( struct pci_device *pci ) {
1368 struct net_device *netdev;
1369 struct intelxl_nic *intelxl;
1370 uint32_t pfgen_portnum;
1371 uint32_t pflan_qalloc;
1372 int rc;
1373
1374 /* Allocate and initialise net device */
1375 netdev = alloc_etherdev ( sizeof ( *intelxl ) );
1376 if ( ! netdev ) {
1377 rc = -ENOMEM;
1378 goto err_alloc;
1379 }
1380 netdev_init ( netdev, &intelxl_operations );
1381 intelxl = netdev->priv;
1382 pci_set_drvdata ( pci, netdev );
1383 netdev->dev = &pci->dev;
1384 memset ( intelxl, 0, sizeof ( *intelxl ) );
1385 intelxl->pf = PCI_FUNC ( pci->busdevfn );
1386 intelxl_init_admin ( &intelxl->command, INTELXL_ADMIN_CMD );
1387 intelxl_init_admin ( &intelxl->event, INTELXL_ADMIN_EVT );
1388 intelxl_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC,
1389 intelxl_context_tx );
1390 intelxl_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC,
1391 intelxl_context_rx );
1392
1393 /* Fix up PCI device */
1394 adjust_pci_device ( pci );
1395
1396 /* Map registers */
1397 intelxl->regs = ioremap ( pci->membase, INTELXL_BAR_SIZE );
1398 if ( ! intelxl->regs ) {
1399 rc = -ENODEV;
1400 goto err_ioremap;
1401 }
1402
1403 /* Reset the NIC */
1404 if ( ( rc = intelxl_reset ( intelxl ) ) != 0 )
1405 goto err_reset;
1406
1407 /* Get port number and base queue number */
1408 pfgen_portnum = readl ( intelxl->regs + INTELXL_PFGEN_PORTNUM );
1409 intelxl->port = INTELXL_PFGEN_PORTNUM_PORT_NUM ( pfgen_portnum );
1410 pflan_qalloc = readl ( intelxl->regs + INTELXL_PFLAN_QALLOC );
1411 intelxl->base = INTELXL_PFLAN_QALLOC_FIRSTQ ( pflan_qalloc );
1412 DBGC ( intelxl, "INTELXL %p PF %d using port %d queues [%#04x-%#04x]\n",
1413 intelxl, intelxl->pf, intelxl->port, intelxl->base,
1414 INTELXL_PFLAN_QALLOC_LASTQ ( pflan_qalloc ) );
1415
1416 /* Fetch MAC address and maximum frame size */
1417 if ( ( rc = intelxl_fetch_mac ( intelxl, netdev ) ) != 0 )
1418 goto err_fetch_mac;
1419
1420 /* Open admin queues */
1421 if ( ( rc = intelxl_open_admin ( intelxl ) ) != 0 )
1422 goto err_open_admin;
1423
1424 /* Get switch configuration */
1425 if ( ( rc = intelxl_admin_switch ( intelxl ) ) != 0 )
1426 goto err_admin_switch;
1427
1428 /* Get VSI configuration */
1429 if ( ( rc = intelxl_admin_vsi ( intelxl ) ) != 0 )
1430 goto err_admin_vsi;
1431
1432 /* Configure switch for promiscuous mode */
1433 if ( ( rc = intelxl_admin_promisc ( intelxl ) ) != 0 )
1434 goto err_admin_promisc;
1435
1436 /* Configure queue register addresses */
1437 intelxl->tx.reg = INTELXL_QTX ( intelxl->queue );
1438 intelxl->rx.reg = INTELXL_QRX ( intelxl->queue );
1439
1440 /* Configure interrupt causes */
1441 writel ( ( INTELXL_QINT_TQCTL_NEXTQ_INDX_NONE |
1442 INTELXL_QINT_TQCTL_CAUSE_ENA ),
1443 intelxl->regs + INTELXL_QINT_TQCTL ( intelxl->queue ) );
1444 writel ( ( INTELXL_QINT_RQCTL_NEXTQ_INDX ( intelxl->queue ) |
1445 INTELXL_QINT_RQCTL_NEXTQ_TYPE_TX |
1446 INTELXL_QINT_RQCTL_CAUSE_ENA ),
1447 intelxl->regs + INTELXL_QINT_RQCTL ( intelxl->queue ) );
1448 writel ( ( INTELXL_PFINT_LNKLST0_FIRSTQ_INDX ( intelxl->queue ) |
1449 INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE_RX ),
1450 intelxl->regs + INTELXL_PFINT_LNKLST0 );
1451 writel ( INTELXL_PFINT_ICR0_ENA_ADMINQ,
1452 intelxl->regs + INTELXL_PFINT_ICR0_ENA );
1453
1454 /* Register network device */
1455 if ( ( rc = register_netdev ( netdev ) ) != 0 )
1456 goto err_register_netdev;
1457
1458 /* Set initial link state */
1459 intelxl_admin_link ( netdev );
1460
1461 return 0;
1462
1463 unregister_netdev ( netdev );
1464 err_register_netdev:
1465 err_admin_promisc:
1466 err_admin_vsi:
1467 err_admin_switch:
1468 intelxl_close_admin ( intelxl );
1469 err_open_admin:
1470 err_fetch_mac:
1471 intelxl_reset ( intelxl );
1472 err_reset:
1473 iounmap ( intelxl->regs );
1474 err_ioremap:
1475 netdev_nullify ( netdev );
1476 netdev_put ( netdev );
1477 err_alloc:
1478 return rc;
1479 }
1480
1481 /**
1482 * Remove PCI device
1483 *
1484 * @v pci PCI device
1485 */
1486 static void intelxl_remove ( struct pci_device *pci ) {
1487 struct net_device *netdev = pci_get_drvdata ( pci );
1488 struct intelxl_nic *intelxl = netdev->priv;
1489
1490 /* Unregister network device */
1491 unregister_netdev ( netdev );
1492
1493 /* Close admin queues */
1494 intelxl_close_admin ( intelxl );
1495
1496 /* Reset the NIC */
1497 intelxl_reset ( intelxl );
1498
1499 /* Free network device */
1500 iounmap ( intelxl->regs );
1501 netdev_nullify ( netdev );
1502 netdev_put ( netdev );
1503 }
1504
1505 /** PCI device IDs */
1506 static struct pci_device_id intelxl_nics[] = {
1507 PCI_ROM ( 0x8086, 0x1572, "x710-sfp", "X710 10GbE SFP+", 0 ),
1508 PCI_ROM ( 0x8086, 0x1574, "xl710-qemu", "Virtual XL710", 0 ),
1509 PCI_ROM ( 0x8086, 0x1580, "xl710-kx-b", "XL710 40GbE backplane", 0 ),
1510 PCI_ROM ( 0x8086, 0x1581, "xl710-kx-c", "XL710 10GbE backplane", 0 ),
1511 PCI_ROM ( 0x8086, 0x1583, "xl710-qda2", "XL710 40GbE QSFP+", 0 ),
1512 PCI_ROM ( 0x8086, 0x1584, "xl710-qda1", "XL710 40GbE QSFP+", 0 ),
1513 PCI_ROM ( 0x8086, 0x1585, "x710-qsfp", "X710 10GbE QSFP+", 0 ),
1514 PCI_ROM ( 0x8086, 0x1586, "x710-10gt", "X710 10GBASE-T", 0 ),
1515 PCI_ROM ( 0x8086, 0x1587, "x710-kr2", "XL710 20GbE backplane", 0 ),
1516 PCI_ROM ( 0x8086, 0x1588, "x710-kr2-a", "XL710 20GbE backplane", 0 ),
1517 PCI_ROM ( 0x8086, 0x1589, "x710-10gt4", "X710 10GBASE-T4", 0 ),
1518 PCI_ROM ( 0x8086, 0x158a, "xxv710", "XXV710 25GbE backplane", 0 ),
1519 PCI_ROM ( 0x8086, 0x158b, "xxv710-sfp28", "XXV710 25GbE SFP28", 0 ),
1520 PCI_ROM ( 0x8086, 0x37ce, "x722-kx", "X722 10GbE backplane", 0 ),
1521 PCI_ROM ( 0x8086, 0x37cf, "x722-qsfp", "X722 10GbE QSFP+", 0 ),
1522 PCI_ROM ( 0x8086, 0x37d0, "x722-sfp", "X722 10GbE SFP+", 0 ),
1523 PCI_ROM ( 0x8086, 0x37d1, "x722-1gt", "X722 1GBASE-T", 0 ),
1524 PCI_ROM ( 0x8086, 0x37d2, "x722-10gt", "X722 10GBASE-T", 0 ),
1525 PCI_ROM ( 0x8086, 0x37d3, "x722-sfp-i", "X722 10GbE SFP+", 0 ),
1526 };
1527
1528 /** PCI driver */
1529 struct pci_driver intelxl_driver __pci_driver = {
1530 .ids = intelxl_nics,
1531 .id_count = ( sizeof ( intelxl_nics ) / sizeof ( intelxl_nics[0] ) ),
1532 .probe = intelxl_probe,
1533 .remove = intelxl_remove,
1534 };