[intelxl] Choose to operate in non-PXE mode
[ipxe.git] / src / drivers / net / intelxl.c
1 /*
2 * Copyright (C) 2018 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <byteswap.h>
32 #include <ipxe/netdevice.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/if_ether.h>
35 #include <ipxe/vlan.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/malloc.h>
38 #include <ipxe/pci.h>
39 #include <ipxe/version.h>
40 #include "intelxl.h"
41
42 /** @file
43 *
44 * Intel 40 Gigabit Ethernet network card driver
45 *
46 */
47
48 /******************************************************************************
49 *
50 * Device reset
51 *
52 ******************************************************************************
53 */
54
55 /**
56 * Reset hardware
57 *
58 * @v intelxl Intel device
59 * @ret rc Return status code
60 */
61 static int intelxl_reset ( struct intelxl_nic *intelxl ) {
62 uint32_t pfgen_ctrl;
63
64 /* Perform a global software reset */
65 pfgen_ctrl = readl ( intelxl->regs + INTELXL_PFGEN_CTRL );
66 writel ( ( pfgen_ctrl | INTELXL_PFGEN_CTRL_PFSWR ),
67 intelxl->regs + INTELXL_PFGEN_CTRL );
68 mdelay ( INTELXL_RESET_DELAY_MS );
69
70 return 0;
71 }
72
73 /******************************************************************************
74 *
75 * MAC address
76 *
77 ******************************************************************************
78 */
79
80 /**
81 * Fetch initial MAC address and maximum frame size
82 *
83 * @v intelxl Intel device
84 * @v netdev Network device
85 * @ret rc Return status code
86 */
87 static int intelxl_fetch_mac ( struct intelxl_nic *intelxl,
88 struct net_device *netdev ) {
89 union intelxl_receive_address mac;
90 uint32_t prtgl_sal;
91 uint32_t prtgl_sah;
92 size_t mfs;
93
94 /* Read NVM-loaded address */
95 prtgl_sal = readl ( intelxl->regs + INTELXL_PRTGL_SAL );
96 prtgl_sah = readl ( intelxl->regs + INTELXL_PRTGL_SAH );
97 mac.reg.low = cpu_to_le32 ( prtgl_sal );
98 mac.reg.high = cpu_to_le32 ( prtgl_sah );
99
100 /* Check that address is valid */
101 if ( ! is_valid_ether_addr ( mac.raw ) ) {
102 DBGC ( intelxl, "INTELXL %p has invalid MAC address (%s)\n",
103 intelxl, eth_ntoa ( mac.raw ) );
104 return -ENOENT;
105 }
106
107 /* Copy MAC address */
108 DBGC ( intelxl, "INTELXL %p has autoloaded MAC address %s\n",
109 intelxl, eth_ntoa ( mac.raw ) );
110 memcpy ( netdev->hw_addr, mac.raw, ETH_ALEN );
111
112 /* Get maximum frame size */
113 mfs = INTELXL_PRTGL_SAH_MFS_GET ( prtgl_sah );
114 netdev->max_pkt_len = ( mfs - 4 /* CRC */ );
115
116 return 0;
117 }
118
119 /******************************************************************************
120 *
121 * MSI-X interrupts
122 *
123 ******************************************************************************
124 */
125
126 /**
127 * Enable MSI-X dummy interrupt
128 *
129 * @v intelxl Intel device
130 * @v pci PCI device
131 * @ret rc Return status code
132 */
133 int intelxl_msix_enable ( struct intelxl_nic *intelxl,
134 struct pci_device *pci ) {
135 int rc;
136
137 /* Enable MSI-X capability */
138 if ( ( rc = pci_msix_enable ( pci, &intelxl->msix ) ) != 0 ) {
139 DBGC ( intelxl, "INTELXL %p could not enable MSI-X: %s\n",
140 intelxl, strerror ( rc ) );
141 return rc;
142 }
143
144 /* Configure interrupt zero to write to dummy location */
145 pci_msix_map ( &intelxl->msix, 0, virt_to_bus ( &intelxl->msg ), 0 );
146
147 /* Enable dummy interrupt zero */
148 pci_msix_unmask ( &intelxl->msix, 0 );
149
150 return 0;
151 }
152
153 /**
154 * Disable MSI-X dummy interrupt
155 *
156 * @v intelxl Intel device
157 * @v pci PCI device
158 */
159 void intelxl_msix_disable ( struct intelxl_nic *intelxl,
160 struct pci_device *pci ) {
161
162 /* Disable dummy interrupt zero */
163 pci_msix_mask ( &intelxl->msix, 0 );
164
165 /* Disable MSI-X capability */
166 pci_msix_disable ( pci, &intelxl->msix );
167 }
168
169 /******************************************************************************
170 *
171 * Admin queue
172 *
173 ******************************************************************************
174 */
175
176 /** Admin queue register offsets */
177 static const struct intelxl_admin_offsets intelxl_admin_offsets = {
178 .bal = INTELXL_ADMIN_BAL,
179 .bah = INTELXL_ADMIN_BAH,
180 .len = INTELXL_ADMIN_LEN,
181 .head = INTELXL_ADMIN_HEAD,
182 .tail = INTELXL_ADMIN_TAIL,
183 };
184
185 /**
186 * Allocate admin queue
187 *
188 * @v intelxl Intel device
189 * @v admin Admin queue
190 * @ret rc Return status code
191 */
192 static int intelxl_alloc_admin ( struct intelxl_nic *intelxl,
193 struct intelxl_admin *admin ) {
194 size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
195 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
196
197 /* Allocate admin queue */
198 admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN );
199 if ( ! admin->buf )
200 return -ENOMEM;
201 admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
202
203 DBGC ( intelxl, "INTELXL %p A%cQ is at [%08llx,%08llx) buf "
204 "[%08llx,%08llx)\n", intelxl,
205 ( ( admin == &intelxl->command ) ? 'T' : 'R' ),
206 ( ( unsigned long long ) virt_to_bus ( admin->desc ) ),
207 ( ( unsigned long long ) ( virt_to_bus ( admin->desc ) + len ) ),
208 ( ( unsigned long long ) virt_to_bus ( admin->buf ) ),
209 ( ( unsigned long long ) ( virt_to_bus ( admin->buf ) +
210 buf_len ) ) );
211 return 0;
212 }
213
214 /**
215 * Enable admin queue
216 *
217 * @v intelxl Intel device
218 * @v admin Admin queue
219 */
220 static void intelxl_enable_admin ( struct intelxl_nic *intelxl,
221 struct intelxl_admin *admin ) {
222 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
223 const struct intelxl_admin_offsets *regs = admin->regs;
224 void *admin_regs = ( intelxl->regs + admin->base );
225 physaddr_t address;
226
227 /* Initialise admin queue */
228 memset ( admin->desc, 0, len );
229
230 /* Reset head and tail registers */
231 writel ( 0, admin_regs + regs->head );
232 writel ( 0, admin_regs + regs->tail );
233
234 /* Reset queue index */
235 admin->index = 0;
236
237 /* Program queue address */
238 address = virt_to_bus ( admin->desc );
239 writel ( ( address & 0xffffffffUL ), admin_regs + regs->bal );
240 if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) {
241 writel ( ( ( ( uint64_t ) address ) >> 32 ),
242 admin_regs + regs->bah );
243 } else {
244 writel ( 0, admin_regs + regs->bah );
245 }
246
247 /* Program queue length and enable queue */
248 writel ( ( INTELXL_ADMIN_LEN_LEN ( INTELXL_ADMIN_NUM_DESC ) |
249 INTELXL_ADMIN_LEN_ENABLE ),
250 admin_regs + regs->len );
251 }
252
253 /**
254 * Disable admin queue
255 *
256 * @v intelxl Intel device
257 * @v admin Admin queue
258 */
259 static void intelxl_disable_admin ( struct intelxl_nic *intelxl,
260 struct intelxl_admin *admin ) {
261 const struct intelxl_admin_offsets *regs = admin->regs;
262 void *admin_regs = ( intelxl->regs + admin->base );
263
264 /* Disable queue */
265 writel ( 0, admin_regs + regs->len );
266 }
267
268 /**
269 * Free admin queue
270 *
271 * @v intelxl Intel device
272 * @v admin Admin queue
273 */
274 static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused,
275 struct intelxl_admin *admin ) {
276 size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
277 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
278
279 /* Free queue */
280 free_dma ( admin->buf, ( buf_len + len ) );
281 }
282
283 /**
284 * Get next admin command queue descriptor
285 *
286 * @v intelxl Intel device
287 * @ret cmd Command descriptor
288 */
289 struct intelxl_admin_descriptor *
290 intelxl_admin_command_descriptor ( struct intelxl_nic *intelxl ) {
291 struct intelxl_admin *admin = &intelxl->command;
292 struct intelxl_admin_descriptor *cmd;
293
294 /* Get and initialise next descriptor */
295 cmd = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
296 memset ( cmd, 0, sizeof ( *cmd ) );
297 return cmd;
298 }
299
300 /**
301 * Get next admin command queue data buffer
302 *
303 * @v intelxl Intel device
304 * @ret buf Data buffer
305 */
306 union intelxl_admin_buffer *
307 intelxl_admin_command_buffer ( struct intelxl_nic *intelxl ) {
308 struct intelxl_admin *admin = &intelxl->command;
309 union intelxl_admin_buffer *buf;
310
311 /* Get next data buffer */
312 buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
313 memset ( buf, 0, sizeof ( *buf ) );
314 return buf;
315 }
316
317 /**
318 * Initialise admin event queue descriptor
319 *
320 * @v intelxl Intel device
321 * @v index Event queue index
322 */
323 static void intelxl_admin_event_init ( struct intelxl_nic *intelxl,
324 unsigned int index ) {
325 struct intelxl_admin *admin = &intelxl->event;
326 struct intelxl_admin_descriptor *evt;
327 union intelxl_admin_buffer *buf;
328 uint64_t address;
329
330 /* Initialise descriptor */
331 evt = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
332 buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
333 address = virt_to_bus ( buf );
334 evt->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
335 evt->len = cpu_to_le16 ( sizeof ( *buf ) );
336 evt->params.buffer.high = cpu_to_le32 ( address >> 32 );
337 evt->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
338 }
339
340 /**
341 * Issue admin queue command
342 *
343 * @v intelxl Intel device
344 * @ret rc Return status code
345 */
346 int intelxl_admin_command ( struct intelxl_nic *intelxl ) {
347 struct intelxl_admin *admin = &intelxl->command;
348 const struct intelxl_admin_offsets *regs = admin->regs;
349 void *admin_regs = ( intelxl->regs + admin->base );
350 struct intelxl_admin_descriptor *cmd;
351 union intelxl_admin_buffer *buf;
352 uint64_t address;
353 uint32_t cookie;
354 unsigned int index;
355 unsigned int tail;
356 unsigned int i;
357 int rc;
358
359 /* Get next queue entry */
360 index = admin->index++;
361 tail = ( admin->index % INTELXL_ADMIN_NUM_DESC );
362 cmd = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
363 buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
364 DBGC2 ( intelxl, "INTELXL %p admin command %#x opcode %#04x",
365 intelxl, index, le16_to_cpu ( cmd->opcode ) );
366 if ( cmd->vopcode )
367 DBGC2 ( intelxl, "/%#08x", le32_to_cpu ( cmd->vopcode ) );
368 DBGC2 ( intelxl, ":\n" );
369
370 /* Sanity checks */
371 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_DD ) ) );
372 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_CMP ) ) );
373 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_ERR ) ) );
374 assert ( cmd->ret == 0 );
375
376 /* Populate data buffer address if applicable */
377 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
378 address = virt_to_bus ( buf );
379 cmd->params.buffer.high = cpu_to_le32 ( address >> 32 );
380 cmd->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
381 }
382
383 /* Populate cookie, if not being (ab)used for VF opcode */
384 if ( ! cmd->vopcode )
385 cmd->cookie = cpu_to_le32 ( index );
386
387 /* Record cookie */
388 cookie = cmd->cookie;
389
390 /* Post command descriptor */
391 DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
392 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
393 DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
394 le16_to_cpu ( cmd->len ) );
395 }
396 wmb();
397 writel ( tail, admin_regs + regs->tail );
398
399 /* Wait for completion */
400 for ( i = 0 ; i < INTELXL_ADMIN_MAX_WAIT_MS ; i++ ) {
401
402 /* If response is not complete, delay 1ms and retry */
403 if ( ! ( cmd->flags & INTELXL_ADMIN_FL_DD ) ) {
404 mdelay ( 1 );
405 continue;
406 }
407 DBGC2 ( intelxl, "INTELXL %p admin command %#x response:\n",
408 intelxl, index );
409 DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd,
410 sizeof ( *cmd ) );
411
412 /* Check for cookie mismatch */
413 if ( cmd->cookie != cookie ) {
414 DBGC ( intelxl, "INTELXL %p admin command %#x bad "
415 "cookie %#x\n", intelxl, index,
416 le32_to_cpu ( cmd->cookie ) );
417 rc = -EPROTO;
418 goto err;
419 }
420
421 /* Check for errors */
422 if ( cmd->ret != 0 ) {
423 DBGC ( intelxl, "INTELXL %p admin command %#x error "
424 "%d\n", intelxl, index,
425 le16_to_cpu ( cmd->ret ) );
426 rc = -EIO;
427 goto err;
428 }
429
430 /* Success */
431 return 0;
432 }
433
434 rc = -ETIMEDOUT;
435 DBGC ( intelxl, "INTELXL %p timed out waiting for admin command %#x:\n",
436 intelxl, index );
437 err:
438 DBGC_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
439 return rc;
440 }
441
442 /**
443 * Get firmware version
444 *
445 * @v intelxl Intel device
446 * @ret rc Return status code
447 */
448 static int intelxl_admin_version ( struct intelxl_nic *intelxl ) {
449 struct intelxl_admin_descriptor *cmd;
450 struct intelxl_admin_version_params *version;
451 unsigned int api;
452 int rc;
453
454 /* Populate descriptor */
455 cmd = intelxl_admin_command_descriptor ( intelxl );
456 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VERSION );
457 version = &cmd->params.version;
458
459 /* Issue command */
460 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
461 return rc;
462 api = le16_to_cpu ( version->api.major );
463 DBGC ( intelxl, "INTELXL %p firmware v%d.%d API v%d.%d\n",
464 intelxl, le16_to_cpu ( version->firmware.major ),
465 le16_to_cpu ( version->firmware.minor ),
466 api, le16_to_cpu ( version->api.minor ) );
467
468 /* Check for API compatibility */
469 if ( api > INTELXL_ADMIN_API_MAJOR ) {
470 DBGC ( intelxl, "INTELXL %p unsupported API v%d\n",
471 intelxl, api );
472 return -ENOTSUP;
473 }
474
475 return 0;
476 }
477
478 /**
479 * Report driver version
480 *
481 * @v intelxl Intel device
482 * @ret rc Return status code
483 */
484 static int intelxl_admin_driver ( struct intelxl_nic *intelxl ) {
485 struct intelxl_admin_descriptor *cmd;
486 struct intelxl_admin_driver_params *driver;
487 union intelxl_admin_buffer *buf;
488 int rc;
489
490 /* Populate descriptor */
491 cmd = intelxl_admin_command_descriptor ( intelxl );
492 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_DRIVER );
493 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF );
494 cmd->len = cpu_to_le16 ( sizeof ( buf->driver ) );
495 driver = &cmd->params.driver;
496 driver->major = product_major_version;
497 driver->minor = product_minor_version;
498 buf = intelxl_admin_command_buffer ( intelxl );
499 snprintf ( buf->driver.name, sizeof ( buf->driver.name ), "%s",
500 ( product_name[0] ? product_name : product_short_name ) );
501
502 /* Issue command */
503 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
504 return rc;
505
506 return 0;
507 }
508
509 /**
510 * Shutdown admin queues
511 *
512 * @v intelxl Intel device
513 * @ret rc Return status code
514 */
515 static int intelxl_admin_shutdown ( struct intelxl_nic *intelxl ) {
516 struct intelxl_admin_descriptor *cmd;
517 struct intelxl_admin_shutdown_params *shutdown;
518 int rc;
519
520 /* Populate descriptor */
521 cmd = intelxl_admin_command_descriptor ( intelxl );
522 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SHUTDOWN );
523 shutdown = &cmd->params.shutdown;
524 shutdown->unloading = INTELXL_ADMIN_SHUTDOWN_UNLOADING;
525
526 /* Issue command */
527 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
528 return rc;
529
530 return 0;
531 }
532
533 /**
534 * Clear PXE mode
535 *
536 * @v intelxl Intel device
537 * @ret rc Return status code
538 */
539 static int intelxl_admin_clear_pxe ( struct intelxl_nic *intelxl ) {
540 struct intelxl_admin_descriptor *cmd;
541 struct intelxl_admin_clear_pxe_params *pxe;
542 uint32_t gllan_rctl_0;
543 int rc;
544
545 /* Do nothing if device is already out of PXE mode */
546 gllan_rctl_0 = readl ( intelxl->regs + INTELXL_GLLAN_RCTL_0 );
547 if ( ! ( gllan_rctl_0 & INTELXL_GLLAN_RCTL_0_PXE_MODE ) ) {
548 DBGC2 ( intelxl, "INTELXL %p already in non-PXE mode\n",
549 intelxl );
550 return 0;
551 }
552
553 /* Populate descriptor */
554 cmd = intelxl_admin_command_descriptor ( intelxl );
555 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_CLEAR_PXE );
556 pxe = &cmd->params.pxe;
557 pxe->magic = INTELXL_ADMIN_CLEAR_PXE_MAGIC;
558
559 /* Issue command */
560 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
561 return rc;
562
563 return 0;
564 }
565
566 /**
567 * Get switch configuration
568 *
569 * @v intelxl Intel device
570 * @ret rc Return status code
571 */
572 static int intelxl_admin_switch ( struct intelxl_nic *intelxl ) {
573 struct intelxl_admin_descriptor *cmd;
574 struct intelxl_admin_switch_params *sw;
575 union intelxl_admin_buffer *buf;
576 int rc;
577
578 /* Populate descriptor */
579 cmd = intelxl_admin_command_descriptor ( intelxl );
580 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SWITCH );
581 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
582 cmd->len = cpu_to_le16 ( sizeof ( buf->sw ) );
583 sw = &cmd->params.sw;
584 buf = intelxl_admin_command_buffer ( intelxl );
585
586 /* Get each configuration in turn */
587 do {
588 /* Issue command */
589 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
590 return rc;
591
592 /* Dump raw configuration */
593 DBGC2 ( intelxl, "INTELXL %p SEID %#04x:\n",
594 intelxl, le16_to_cpu ( buf->sw.cfg.seid ) );
595 DBGC2_HDA ( intelxl, 0, &buf->sw.cfg, sizeof ( buf->sw.cfg ) );
596
597 /* Parse response */
598 if ( buf->sw.cfg.type == INTELXL_ADMIN_SWITCH_TYPE_VSI ) {
599 intelxl->vsi = le16_to_cpu ( buf->sw.cfg.seid );
600 DBGC ( intelxl, "INTELXL %p VSI %#04x uplink %#04x "
601 "downlink %#04x conn %#02x\n", intelxl,
602 intelxl->vsi, le16_to_cpu ( buf->sw.cfg.uplink ),
603 le16_to_cpu ( buf->sw.cfg.downlink ),
604 buf->sw.cfg.connection );
605 }
606
607 } while ( sw->next );
608
609 /* Check that we found a VSI */
610 if ( ! intelxl->vsi ) {
611 DBGC ( intelxl, "INTELXL %p has no VSI\n", intelxl );
612 return -ENOENT;
613 }
614
615 return 0;
616 }
617
618 /**
619 * Get VSI parameters
620 *
621 * @v intelxl Intel device
622 * @ret rc Return status code
623 */
624 static int intelxl_admin_vsi ( struct intelxl_nic *intelxl ) {
625 struct intelxl_admin_descriptor *cmd;
626 struct intelxl_admin_vsi_params *vsi;
627 union intelxl_admin_buffer *buf;
628 int rc;
629
630 /* Populate descriptor */
631 cmd = intelxl_admin_command_descriptor ( intelxl );
632 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VSI );
633 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
634 cmd->len = cpu_to_le16 ( sizeof ( buf->vsi ) );
635 vsi = &cmd->params.vsi;
636 vsi->vsi = cpu_to_le16 ( intelxl->vsi );
637 buf = intelxl_admin_command_buffer ( intelxl );
638
639 /* Issue command */
640 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
641 return rc;
642
643 /* Parse response */
644 intelxl->queue = le16_to_cpu ( buf->vsi.queue[0] );
645 intelxl->qset = le16_to_cpu ( buf->vsi.qset[0] );
646 DBGC ( intelxl, "INTELXL %p VSI %#04x queue %#04x qset %#04x\n",
647 intelxl, intelxl->vsi, intelxl->queue, intelxl->qset );
648
649 return 0;
650 }
651
652 /**
653 * Set VSI promiscuous modes
654 *
655 * @v intelxl Intel device
656 * @ret rc Return status code
657 */
658 static int intelxl_admin_promisc ( struct intelxl_nic *intelxl ) {
659 struct intelxl_admin_descriptor *cmd;
660 struct intelxl_admin_promisc_params *promisc;
661 uint16_t flags;
662 int rc;
663
664 /* Populate descriptor */
665 cmd = intelxl_admin_command_descriptor ( intelxl );
666 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_PROMISC );
667 flags = ( INTELXL_ADMIN_PROMISC_FL_UNICAST |
668 INTELXL_ADMIN_PROMISC_FL_MULTICAST |
669 INTELXL_ADMIN_PROMISC_FL_BROADCAST |
670 INTELXL_ADMIN_PROMISC_FL_VLAN );
671 promisc = &cmd->params.promisc;
672 promisc->flags = cpu_to_le16 ( flags );
673 promisc->valid = cpu_to_le16 ( flags );
674 promisc->vsi = cpu_to_le16 ( intelxl->vsi );
675
676 /* Issue command */
677 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
678 return rc;
679
680 return 0;
681 }
682
683 /**
684 * Restart autonegotiation
685 *
686 * @v intelxl Intel device
687 * @ret rc Return status code
688 */
689 static int intelxl_admin_autoneg ( struct intelxl_nic *intelxl ) {
690 struct intelxl_admin_descriptor *cmd;
691 struct intelxl_admin_autoneg_params *autoneg;
692 int rc;
693
694 /* Populate descriptor */
695 cmd = intelxl_admin_command_descriptor ( intelxl );
696 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_AUTONEG );
697 autoneg = &cmd->params.autoneg;
698 autoneg->flags = ( INTELXL_ADMIN_AUTONEG_FL_RESTART |
699 INTELXL_ADMIN_AUTONEG_FL_ENABLE );
700
701 /* Issue command */
702 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
703 return rc;
704
705 return 0;
706 }
707
708 /**
709 * Get link status
710 *
711 * @v netdev Network device
712 * @ret rc Return status code
713 */
714 static int intelxl_admin_link ( struct net_device *netdev ) {
715 struct intelxl_nic *intelxl = netdev->priv;
716 struct intelxl_admin_descriptor *cmd;
717 struct intelxl_admin_link_params *link;
718 int rc;
719
720 /* Populate descriptor */
721 cmd = intelxl_admin_command_descriptor ( intelxl );
722 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_LINK );
723 link = &cmd->params.link;
724 link->notify = INTELXL_ADMIN_LINK_NOTIFY;
725
726 /* Issue command */
727 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
728 return rc;
729 DBGC ( intelxl, "INTELXL %p PHY %#02x speed %#02x status %#02x\n",
730 intelxl, link->phy, link->speed, link->status );
731
732 /* Update network device */
733 if ( link->status & INTELXL_ADMIN_LINK_UP ) {
734 netdev_link_up ( netdev );
735 } else {
736 netdev_link_down ( netdev );
737 }
738
739 return 0;
740 }
741
742 /**
743 * Handle virtual function event (when VF driver is not present)
744 *
745 * @v netdev Network device
746 * @v evt Admin queue event descriptor
747 * @v buf Admin queue event data buffer
748 */
749 __weak void
750 intelxlvf_admin_event ( struct net_device *netdev __unused,
751 struct intelxl_admin_descriptor *evt __unused,
752 union intelxl_admin_buffer *buf __unused ) {
753
754 /* Nothing to do */
755 }
756
757 /**
758 * Refill admin event queue
759 *
760 * @v intelxl Intel device
761 */
762 static void intelxl_refill_admin ( struct intelxl_nic *intelxl ) {
763 struct intelxl_admin *admin = &intelxl->event;
764 const struct intelxl_admin_offsets *regs = admin->regs;
765 void *admin_regs = ( intelxl->regs + admin->base );
766 unsigned int tail;
767
768 /* Update tail pointer */
769 tail = ( ( admin->index + INTELXL_ADMIN_NUM_DESC - 1 ) %
770 INTELXL_ADMIN_NUM_DESC );
771 wmb();
772 writel ( tail, admin_regs + regs->tail );
773 }
774
775 /**
776 * Poll admin event queue
777 *
778 * @v netdev Network device
779 */
780 void intelxl_poll_admin ( struct net_device *netdev ) {
781 struct intelxl_nic *intelxl = netdev->priv;
782 struct intelxl_admin *admin = &intelxl->event;
783 struct intelxl_admin_descriptor *evt;
784 union intelxl_admin_buffer *buf;
785
786 /* Check for events */
787 while ( 1 ) {
788
789 /* Get next event descriptor and data buffer */
790 evt = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
791 buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
792
793 /* Stop if descriptor is not yet completed */
794 if ( ! ( evt->flags & INTELXL_ADMIN_FL_DD ) )
795 return;
796 DBGC2 ( intelxl, "INTELXL %p admin event %#x:\n",
797 intelxl, admin->index );
798 DBGC2_HDA ( intelxl, virt_to_phys ( evt ), evt,
799 sizeof ( *evt ) );
800 if ( evt->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
801 DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
802 le16_to_cpu ( evt->len ) );
803 }
804
805 /* Handle event */
806 switch ( evt->opcode ) {
807 case cpu_to_le16 ( INTELXL_ADMIN_LINK ):
808 intelxl_admin_link ( netdev );
809 break;
810 case cpu_to_le16 ( INTELXL_ADMIN_SEND_TO_VF ):
811 intelxlvf_admin_event ( netdev, evt, buf );
812 break;
813 default:
814 DBGC ( intelxl, "INTELXL %p admin event %#x "
815 "unrecognised opcode %#04x\n", intelxl,
816 admin->index, le16_to_cpu ( evt->opcode ) );
817 break;
818 }
819
820 /* Reset descriptor and refill queue */
821 intelxl_admin_event_init ( intelxl, admin->index );
822 admin->index++;
823 intelxl_refill_admin ( intelxl );
824 }
825 }
826
827 /**
828 * Open admin queues
829 *
830 * @v intelxl Intel device
831 * @ret rc Return status code
832 */
833 int intelxl_open_admin ( struct intelxl_nic *intelxl ) {
834 int rc;
835
836 /* Allocate admin event queue */
837 if ( ( rc = intelxl_alloc_admin ( intelxl, &intelxl->event ) ) != 0 )
838 goto err_alloc_event;
839
840 /* Allocate admin command queue */
841 if ( ( rc = intelxl_alloc_admin ( intelxl, &intelxl->command ) ) != 0 )
842 goto err_alloc_command;
843
844 /* (Re)open admin queues */
845 intelxl_reopen_admin ( intelxl );
846
847 /* Get firmware version */
848 if ( ( rc = intelxl_admin_version ( intelxl ) ) != 0 )
849 goto err_version;
850
851 /* Report driver version */
852 if ( ( rc = intelxl_admin_driver ( intelxl ) ) != 0 )
853 goto err_driver;
854
855 return 0;
856
857 err_driver:
858 err_version:
859 intelxl_disable_admin ( intelxl, &intelxl->command );
860 intelxl_disable_admin ( intelxl, &intelxl->event );
861 intelxl_free_admin ( intelxl, &intelxl->command );
862 err_alloc_command:
863 intelxl_free_admin ( intelxl, &intelxl->event );
864 err_alloc_event:
865 return rc;
866 }
867
868 /**
869 * Reopen admin queues (after virtual function reset)
870 *
871 * @v intelxl Intel device
872 */
873 void intelxl_reopen_admin ( struct intelxl_nic *intelxl ) {
874 unsigned int i;
875
876 /* Enable admin event queue */
877 intelxl_enable_admin ( intelxl, &intelxl->event );
878
879 /* Enable admin command queue */
880 intelxl_enable_admin ( intelxl, &intelxl->command );
881
882 /* Initialise all admin event queue descriptors */
883 for ( i = 0 ; i < INTELXL_ADMIN_NUM_DESC ; i++ )
884 intelxl_admin_event_init ( intelxl, i );
885
886 /* Post all descriptors to event queue */
887 intelxl_refill_admin ( intelxl );
888 }
889
890 /**
891 * Close admin queues
892 *
893 * @v intelxl Intel device
894 */
895 void intelxl_close_admin ( struct intelxl_nic *intelxl ) {
896
897 /* Shut down admin queues */
898 intelxl_admin_shutdown ( intelxl );
899
900 /* Disable admin queues */
901 intelxl_disable_admin ( intelxl, &intelxl->command );
902 intelxl_disable_admin ( intelxl, &intelxl->event );
903
904 /* Free admin queues */
905 intelxl_free_admin ( intelxl, &intelxl->command );
906 intelxl_free_admin ( intelxl, &intelxl->event );
907 }
908
909 /******************************************************************************
910 *
911 * Descriptor rings
912 *
913 ******************************************************************************
914 */
915
916 /**
917 * Allocate descriptor ring
918 *
919 * @v intelxl Intel device
920 * @v ring Descriptor ring
921 * @ret rc Return status code
922 */
923 int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
924 struct intelxl_ring *ring ) {
925 physaddr_t address;
926 int rc;
927
928 /* Allocate descriptor ring */
929 ring->desc.raw = malloc_dma ( ring->len, INTELXL_ALIGN );
930 if ( ! ring->desc.raw ) {
931 rc = -ENOMEM;
932 goto err_alloc;
933 }
934 address = virt_to_bus ( ring->desc.raw );
935
936 /* Initialise descriptor ring */
937 memset ( ring->desc.raw, 0, ring->len );
938
939 /* Reset tail pointer */
940 writel ( 0, ( intelxl->regs + ring->tail ) );
941
942 /* Reset counters */
943 ring->prod = 0;
944 ring->cons = 0;
945
946 DBGC ( intelxl, "INTELXL %p ring %06x is at [%08llx,%08llx)\n",
947 intelxl, ( ring->reg + ring->tail ),
948 ( ( unsigned long long ) address ),
949 ( ( unsigned long long ) address + ring->len ) );
950
951 return 0;
952
953 free_dma ( ring->desc.raw, ring->len );
954 err_alloc:
955 return rc;
956 }
957
958 /**
959 * Free descriptor ring
960 *
961 * @v intelxl Intel device
962 * @v ring Descriptor ring
963 */
964 void intelxl_free_ring ( struct intelxl_nic *intelxl __unused,
965 struct intelxl_ring *ring ) {
966
967 /* Free descriptor ring */
968 free_dma ( ring->desc.raw, ring->len );
969 ring->desc.raw = NULL;
970 }
971
972 /**
973 * Dump queue context (for debugging)
974 *
975 * @v intelxl Intel device
976 * @v op Context operation
977 * @v len Size of context
978 */
979 static __attribute__ (( unused )) void
980 intelxl_context_dump ( struct intelxl_nic *intelxl, uint32_t op, size_t len ) {
981 struct intelxl_context_line line;
982 uint32_t pfcm_lanctxctl;
983 uint32_t pfcm_lanctxstat;
984 unsigned int queue;
985 unsigned int index;
986 unsigned int i;
987
988 /* Do nothing unless debug output is enabled */
989 if ( ! DBG_EXTRA )
990 return;
991
992 /* Dump context */
993 DBGC2 ( intelxl, "INTELXL %p context %#08x:\n", intelxl, op );
994 for ( index = 0 ; ( sizeof ( line ) * index ) < len ; index++ ) {
995
996 /* Start context operation */
997 queue = ( intelxl->base + intelxl->queue );
998 pfcm_lanctxctl =
999 ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
1000 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
1001 INTELXL_PFCM_LANCTXCTL_OP_CODE_READ | op );
1002 writel ( pfcm_lanctxctl,
1003 intelxl->regs + INTELXL_PFCM_LANCTXCTL );
1004
1005 /* Wait for operation to complete */
1006 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
1007
1008 /* Check if operation is complete */
1009 pfcm_lanctxstat = readl ( intelxl->regs +
1010 INTELXL_PFCM_LANCTXSTAT );
1011 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
1012 break;
1013
1014 /* Delay */
1015 mdelay ( 1 );
1016 }
1017
1018 /* Read context data */
1019 for ( i = 0 ; i < ( sizeof ( line ) /
1020 sizeof ( line.raw[0] ) ) ; i++ ) {
1021 line.raw[i] = readl ( intelxl->regs +
1022 INTELXL_PFCM_LANCTXDATA ( i ) );
1023 }
1024 DBGC2_HDA ( intelxl, ( sizeof ( line ) * index ),
1025 &line, sizeof ( line ) );
1026 }
1027 }
1028
1029 /**
1030 * Program queue context line
1031 *
1032 * @v intelxl Intel device
1033 * @v line Queue context line
1034 * @v index Line number
1035 * @v op Context operation
1036 * @ret rc Return status code
1037 */
1038 static int intelxl_context_line ( struct intelxl_nic *intelxl,
1039 struct intelxl_context_line *line,
1040 unsigned int index, uint32_t op ) {
1041 uint32_t pfcm_lanctxctl;
1042 uint32_t pfcm_lanctxstat;
1043 unsigned int queue;
1044 unsigned int i;
1045
1046 /* Write context data */
1047 for ( i = 0; i < ( sizeof ( *line ) / sizeof ( line->raw[0] ) ); i++ ) {
1048 writel ( le32_to_cpu ( line->raw[i] ),
1049 intelxl->regs + INTELXL_PFCM_LANCTXDATA ( i ) );
1050 }
1051
1052 /* Start context operation */
1053 queue = ( intelxl->base + intelxl->queue );
1054 pfcm_lanctxctl = ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
1055 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
1056 INTELXL_PFCM_LANCTXCTL_OP_CODE_WRITE | op );
1057 writel ( pfcm_lanctxctl, intelxl->regs + INTELXL_PFCM_LANCTXCTL );
1058
1059 /* Wait for operation to complete */
1060 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
1061
1062 /* Check if operation is complete */
1063 pfcm_lanctxstat = readl ( intelxl->regs +
1064 INTELXL_PFCM_LANCTXSTAT );
1065 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
1066 return 0;
1067
1068 /* Delay */
1069 mdelay ( 1 );
1070 }
1071
1072 DBGC ( intelxl, "INTELXL %p timed out waiting for context: %#08x\n",
1073 intelxl, pfcm_lanctxctl );
1074 return -ETIMEDOUT;
1075 }
1076
1077 /**
1078 * Program queue context
1079 *
1080 * @v intelxl Intel device
1081 * @v line Queue context lines
1082 * @v len Size of context
1083 * @v op Context operation
1084 * @ret rc Return status code
1085 */
1086 static int intelxl_context ( struct intelxl_nic *intelxl,
1087 struct intelxl_context_line *line,
1088 size_t len, uint32_t op ) {
1089 unsigned int index;
1090 int rc;
1091
1092 DBGC2 ( intelxl, "INTELXL %p context %#08x len %#zx:\n",
1093 intelxl, op, len );
1094 DBGC2_HDA ( intelxl, 0, line, len );
1095
1096 /* Program one line at a time */
1097 for ( index = 0 ; ( sizeof ( *line ) * index ) < len ; index++ ) {
1098 if ( ( rc = intelxl_context_line ( intelxl, line++, index,
1099 op ) ) != 0 )
1100 return rc;
1101 }
1102
1103 return 0;
1104 }
1105
1106 /**
1107 * Program transmit queue context
1108 *
1109 * @v intelxl Intel device
1110 * @v address Descriptor ring base address
1111 * @ret rc Return status code
1112 */
1113 static int intelxl_context_tx ( struct intelxl_nic *intelxl,
1114 physaddr_t address ) {
1115 union {
1116 struct intelxl_context_tx tx;
1117 struct intelxl_context_line line;
1118 } ctx;
1119 int rc;
1120
1121 /* Initialise context */
1122 memset ( &ctx, 0, sizeof ( ctx ) );
1123 ctx.tx.flags = cpu_to_le16 ( INTELXL_CTX_TX_FL_NEW );
1124 ctx.tx.base = cpu_to_le64 ( INTELXL_CTX_TX_BASE ( address ) );
1125 ctx.tx.count =
1126 cpu_to_le16 ( INTELXL_CTX_TX_COUNT ( INTELXL_TX_NUM_DESC ) );
1127 ctx.tx.qset = INTELXL_CTX_TX_QSET ( intelxl->qset );
1128
1129 /* Program context */
1130 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
1131 INTELXL_PFCM_LANCTXCTL_TYPE_TX ) ) != 0 )
1132 return rc;
1133
1134 return 0;
1135 }
1136
1137 /**
1138 * Program receive queue context
1139 *
1140 * @v intelxl Intel device
1141 * @v address Descriptor ring base address
1142 * @ret rc Return status code
1143 */
1144 static int intelxl_context_rx ( struct intelxl_nic *intelxl,
1145 physaddr_t address ) {
1146 union {
1147 struct intelxl_context_rx rx;
1148 struct intelxl_context_line line;
1149 } ctx;
1150 uint64_t base_count;
1151 int rc;
1152
1153 /* Initialise context */
1154 memset ( &ctx, 0, sizeof ( ctx ) );
1155 base_count = INTELXL_CTX_RX_BASE_COUNT ( address, INTELXL_RX_NUM_DESC );
1156 ctx.rx.base_count = cpu_to_le64 ( base_count );
1157 ctx.rx.len = cpu_to_le16 ( INTELXL_CTX_RX_LEN ( intelxl->mfs ) );
1158 ctx.rx.flags = ( INTELXL_CTX_RX_FL_DSIZE | INTELXL_CTX_RX_FL_CRCSTRIP );
1159 ctx.rx.mfs = cpu_to_le16 ( INTELXL_CTX_RX_MFS ( intelxl->mfs ) );
1160
1161 /* Program context */
1162 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
1163 INTELXL_PFCM_LANCTXCTL_TYPE_RX ) ) != 0 )
1164 return rc;
1165
1166 return 0;
1167 }
1168
1169 /**
1170 * Enable descriptor ring
1171 *
1172 * @v intelxl Intel device
1173 * @v ring Descriptor ring
1174 * @ret rc Return status code
1175 */
1176 static int intelxl_enable_ring ( struct intelxl_nic *intelxl,
1177 struct intelxl_ring *ring ) {
1178 void *ring_regs = ( intelxl->regs + ring->reg );
1179 uint32_t qxx_ena;
1180
1181 /* Enable ring */
1182 writel ( INTELXL_QXX_ENA_REQ, ( ring_regs + INTELXL_QXX_ENA ) );
1183 udelay ( INTELXL_QUEUE_ENABLE_DELAY_US );
1184 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
1185 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) ) {
1186 DBGC ( intelxl, "INTELXL %p ring %06x failed to enable: "
1187 "%#08x\n", intelxl, ring->reg, qxx_ena );
1188 return -EIO;
1189 }
1190
1191 return 0;
1192 }
1193
1194 /**
1195 * Disable descriptor ring
1196 *
1197 * @v intelxl Intel device
1198 * @v ring Descriptor ring
1199 * @ret rc Return status code
1200 */
1201 static int intelxl_disable_ring ( struct intelxl_nic *intelxl,
1202 struct intelxl_ring *ring ) {
1203 void *ring_regs = ( intelxl->regs + ring->reg );
1204 uint32_t qxx_ena;
1205 unsigned int i;
1206
1207 /* Disable ring */
1208 writel ( 0, ( ring_regs + INTELXL_QXX_ENA ) );
1209
1210 /* Wait for ring to be disabled */
1211 for ( i = 0 ; i < INTELXL_QUEUE_DISABLE_MAX_WAIT_MS ; i++ ) {
1212
1213 /* Check if ring is disabled */
1214 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
1215 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) )
1216 return 0;
1217
1218 /* Delay */
1219 mdelay ( 1 );
1220 }
1221
1222 DBGC ( intelxl, "INTELXL %p ring %06x timed out waiting for disable: "
1223 "%#08x\n", intelxl, ring->reg, qxx_ena );
1224 return -ETIMEDOUT;
1225 }
1226
1227 /**
1228 * Create descriptor ring
1229 *
1230 * @v intelxl Intel device
1231 * @v ring Descriptor ring
1232 * @ret rc Return status code
1233 */
1234 static int intelxl_create_ring ( struct intelxl_nic *intelxl,
1235 struct intelxl_ring *ring ) {
1236 physaddr_t address;
1237 int rc;
1238
1239 /* Allocate descriptor ring */
1240 if ( ( rc = intelxl_alloc_ring ( intelxl, ring ) ) != 0 )
1241 goto err_alloc;
1242
1243 /* Program queue context */
1244 address = virt_to_bus ( ring->desc.raw );
1245 if ( ( rc = ring->context ( intelxl, address ) ) != 0 )
1246 goto err_context;
1247
1248 /* Enable ring */
1249 if ( ( rc = intelxl_enable_ring ( intelxl, ring ) ) != 0 )
1250 goto err_enable;
1251
1252 return 0;
1253
1254 intelxl_disable_ring ( intelxl, ring );
1255 err_enable:
1256 err_context:
1257 intelxl_free_ring ( intelxl, ring );
1258 err_alloc:
1259 return rc;
1260 }
1261
1262 /**
1263 * Destroy descriptor ring
1264 *
1265 * @v intelxl Intel device
1266 * @v ring Descriptor ring
1267 */
1268 static void intelxl_destroy_ring ( struct intelxl_nic *intelxl,
1269 struct intelxl_ring *ring ) {
1270 int rc;
1271
1272 /* Disable ring */
1273 if ( ( rc = intelxl_disable_ring ( intelxl, ring ) ) != 0 ) {
1274 /* Leak memory; there's nothing else we can do */
1275 return;
1276 }
1277
1278 /* Free descriptor ring */
1279 intelxl_free_ring ( intelxl, ring );
1280 }
1281
1282 /**
1283 * Refill receive descriptor ring
1284 *
1285 * @v intelxl Intel device
1286 */
1287 static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
1288 struct intelxl_rx_data_descriptor *rx;
1289 struct io_buffer *iobuf;
1290 unsigned int rx_idx;
1291 unsigned int rx_tail;
1292 physaddr_t address;
1293 unsigned int refilled = 0;
1294
1295 /* Refill ring */
1296 while ( ( intelxl->rx.prod - intelxl->rx.cons ) < INTELXL_RX_FILL ) {
1297
1298 /* Allocate I/O buffer */
1299 iobuf = alloc_iob ( intelxl->mfs );
1300 if ( ! iobuf ) {
1301 /* Wait for next refill */
1302 break;
1303 }
1304
1305 /* Get next receive descriptor */
1306 rx_idx = ( intelxl->rx.prod++ % INTELXL_RX_NUM_DESC );
1307 rx = &intelxl->rx.desc.rx[rx_idx].data;
1308
1309 /* Populate receive descriptor */
1310 address = virt_to_bus ( iobuf->data );
1311 rx->address = cpu_to_le64 ( address );
1312 rx->flags = 0;
1313
1314 /* Record I/O buffer */
1315 assert ( intelxl->rx_iobuf[rx_idx] == NULL );
1316 intelxl->rx_iobuf[rx_idx] = iobuf;
1317
1318 DBGC2 ( intelxl, "INTELXL %p RX %d is [%llx,%llx)\n", intelxl,
1319 rx_idx, ( ( unsigned long long ) address ),
1320 ( ( unsigned long long ) address + intelxl->mfs ) );
1321 refilled++;
1322 }
1323
1324 /* Push descriptors to card, if applicable */
1325 if ( refilled ) {
1326 wmb();
1327 rx_tail = ( intelxl->rx.prod % INTELXL_RX_NUM_DESC );
1328 writel ( rx_tail, ( intelxl->regs + intelxl->rx.tail ) );
1329 }
1330 }
1331
1332 /**
1333 * Discard unused receive I/O buffers
1334 *
1335 * @v intelxl Intel device
1336 */
1337 void intelxl_empty_rx ( struct intelxl_nic *intelxl ) {
1338 unsigned int i;
1339
1340 /* Discard any unused receive buffers */
1341 for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
1342 if ( intelxl->rx_iobuf[i] )
1343 free_iob ( intelxl->rx_iobuf[i] );
1344 intelxl->rx_iobuf[i] = NULL;
1345 }
1346 }
1347
1348 /******************************************************************************
1349 *
1350 * Network device interface
1351 *
1352 ******************************************************************************
1353 */
1354
1355 /**
1356 * Open network device
1357 *
1358 * @v netdev Network device
1359 * @ret rc Return status code
1360 */
1361 static int intelxl_open ( struct net_device *netdev ) {
1362 struct intelxl_nic *intelxl = netdev->priv;
1363 union intelxl_receive_address mac;
1364 unsigned int queue;
1365 uint32_t prtgl_sal;
1366 uint32_t prtgl_sah;
1367 int rc;
1368
1369 /* Calculate maximum frame size */
1370 intelxl->mfs = ( ( ETH_HLEN + netdev->mtu + 4 /* CRC */ +
1371 INTELXL_ALIGN - 1 ) & ~( INTELXL_ALIGN - 1 ) );
1372
1373 /* Program MAC address and maximum frame size */
1374 memset ( &mac, 0, sizeof ( mac ) );
1375 memcpy ( mac.raw, netdev->ll_addr, sizeof ( mac.raw ) );
1376 prtgl_sal = le32_to_cpu ( mac.reg.low );
1377 prtgl_sah = ( le32_to_cpu ( mac.reg.high ) |
1378 INTELXL_PRTGL_SAH_MFS_SET ( intelxl->mfs ) );
1379 writel ( prtgl_sal, intelxl->regs + INTELXL_PRTGL_SAL );
1380 writel ( prtgl_sah, intelxl->regs + INTELXL_PRTGL_SAH );
1381
1382 /* Associate transmit queue to PF */
1383 writel ( ( INTELXL_QXX_CTL_PFVF_Q_PF |
1384 INTELXL_QXX_CTL_PFVF_PF_INDX ( intelxl->pf ) ),
1385 ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_CTL ) );
1386
1387 /* Clear transmit pre queue disable */
1388 queue = ( intelxl->base + intelxl->queue );
1389 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_CLEAR_QDIS |
1390 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1391 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1392
1393 /* Reset transmit queue head */
1394 writel ( 0, ( intelxl->regs + INTELXL_QTX_HEAD ( intelxl->queue ) ) );
1395
1396 /* Create receive descriptor ring */
1397 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->rx ) ) != 0 )
1398 goto err_create_rx;
1399
1400 /* Create transmit descriptor ring */
1401 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->tx ) ) != 0 )
1402 goto err_create_tx;
1403
1404 /* Fill receive ring */
1405 intelxl_refill_rx ( intelxl );
1406
1407 /* Restart autonegotiation */
1408 intelxl_admin_autoneg ( intelxl );
1409
1410 /* Update link state */
1411 intelxl_admin_link ( netdev );
1412
1413 return 0;
1414
1415 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1416 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1417 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1418 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1419 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1420 err_create_tx:
1421 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1422 err_create_rx:
1423 return rc;
1424 }
1425
1426 /**
1427 * Close network device
1428 *
1429 * @v netdev Network device
1430 */
1431 static void intelxl_close ( struct net_device *netdev ) {
1432 struct intelxl_nic *intelxl = netdev->priv;
1433 unsigned int queue;
1434
1435 /* Dump contexts (for debugging) */
1436 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_TX,
1437 sizeof ( struct intelxl_context_tx ) );
1438 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_RX,
1439 sizeof ( struct intelxl_context_rx ) );
1440
1441 /* Pre-disable transmit queue */
1442 queue = ( intelxl->base + intelxl->queue );
1443 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1444 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1445 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1446 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1447
1448 /* Destroy transmit descriptor ring */
1449 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1450
1451 /* Destroy receive descriptor ring */
1452 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1453
1454 /* Discard any unused receive buffers */
1455 intelxl_empty_rx ( intelxl );
1456 }
1457
1458 /**
1459 * Transmit packet
1460 *
1461 * @v netdev Network device
1462 * @v iobuf I/O buffer
1463 * @ret rc Return status code
1464 */
1465 int intelxl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
1466 struct intelxl_nic *intelxl = netdev->priv;
1467 struct intelxl_tx_data_descriptor *tx;
1468 unsigned int tx_idx;
1469 unsigned int tx_tail;
1470 physaddr_t address;
1471 size_t len;
1472
1473 /* Get next transmit descriptor */
1474 if ( ( intelxl->tx.prod - intelxl->tx.cons ) >= INTELXL_TX_FILL ) {
1475 DBGC ( intelxl, "INTELXL %p out of transmit descriptors\n",
1476 intelxl );
1477 return -ENOBUFS;
1478 }
1479 tx_idx = ( intelxl->tx.prod++ % INTELXL_TX_NUM_DESC );
1480 tx_tail = ( intelxl->tx.prod % INTELXL_TX_NUM_DESC );
1481 tx = &intelxl->tx.desc.tx[tx_idx].data;
1482
1483 /* Populate transmit descriptor */
1484 address = virt_to_bus ( iobuf->data );
1485 len = iob_len ( iobuf );
1486 tx->address = cpu_to_le64 ( address );
1487 tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) );
1488 tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP |
1489 INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI );
1490 wmb();
1491
1492 /* Notify card that there are packets ready to transmit */
1493 writel ( tx_tail, ( intelxl->regs + intelxl->tx.tail ) );
1494
1495 DBGC2 ( intelxl, "INTELXL %p TX %d is [%llx,%llx)\n", intelxl, tx_idx,
1496 ( ( unsigned long long ) address ),
1497 ( ( unsigned long long ) address + len ) );
1498 return 0;
1499 }
1500
1501 /**
1502 * Poll for completed packets
1503 *
1504 * @v netdev Network device
1505 */
1506 static void intelxl_poll_tx ( struct net_device *netdev ) {
1507 struct intelxl_nic *intelxl = netdev->priv;
1508 struct intelxl_tx_writeback_descriptor *tx_wb;
1509 unsigned int tx_idx;
1510
1511 /* Check for completed packets */
1512 while ( intelxl->tx.cons != intelxl->tx.prod ) {
1513
1514 /* Get next transmit descriptor */
1515 tx_idx = ( intelxl->tx.cons % INTELXL_TX_NUM_DESC );
1516 tx_wb = &intelxl->tx.desc.tx[tx_idx].wb;
1517
1518 /* Stop if descriptor is still in use */
1519 if ( ! ( tx_wb->flags & INTELXL_TX_WB_FL_DD ) )
1520 return;
1521 DBGC2 ( intelxl, "INTELXL %p TX %d complete\n",
1522 intelxl, tx_idx );
1523
1524 /* Complete TX descriptor */
1525 netdev_tx_complete_next ( netdev );
1526 intelxl->tx.cons++;
1527 }
1528 }
1529
1530 /**
1531 * Poll for received packets
1532 *
1533 * @v netdev Network device
1534 */
1535 static void intelxl_poll_rx ( struct net_device *netdev ) {
1536 struct intelxl_nic *intelxl = netdev->priv;
1537 struct intelxl_rx_writeback_descriptor *rx_wb;
1538 struct io_buffer *iobuf;
1539 unsigned int rx_idx;
1540 unsigned int tag;
1541 size_t len;
1542
1543 /* Check for received packets */
1544 while ( intelxl->rx.cons != intelxl->rx.prod ) {
1545
1546 /* Get next receive descriptor */
1547 rx_idx = ( intelxl->rx.cons % INTELXL_RX_NUM_DESC );
1548 rx_wb = &intelxl->rx.desc.rx[rx_idx].wb;
1549
1550 /* Stop if descriptor is still in use */
1551 if ( ! ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_DD ) ) )
1552 return;
1553
1554 /* Populate I/O buffer */
1555 iobuf = intelxl->rx_iobuf[rx_idx];
1556 intelxl->rx_iobuf[rx_idx] = NULL;
1557 len = INTELXL_RX_WB_LEN ( le32_to_cpu ( rx_wb->len ) );
1558 iob_put ( iobuf, len );
1559
1560 /* Find VLAN device, if applicable */
1561 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_VLAN ) ) {
1562 tag = VLAN_TAG ( le16_to_cpu ( rx_wb->vlan ) );
1563 } else {
1564 tag = 0;
1565 }
1566
1567 /* Hand off to network stack */
1568 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_RXE ) ) {
1569 DBGC ( intelxl, "INTELXL %p RX %d error (length %zd, "
1570 "flags %08x)\n", intelxl, rx_idx, len,
1571 le32_to_cpu ( rx_wb->flags ) );
1572 vlan_netdev_rx_err ( netdev, tag, iobuf, -EIO );
1573 } else {
1574 DBGC2 ( intelxl, "INTELXL %p RX %d complete (length "
1575 "%zd)\n", intelxl, rx_idx, len );
1576 vlan_netdev_rx ( netdev, tag, iobuf );
1577 }
1578 intelxl->rx.cons++;
1579 }
1580 }
1581
1582 /**
1583 * Poll for completed and received packets
1584 *
1585 * @v netdev Network device
1586 */
1587 void intelxl_poll ( struct net_device *netdev ) {
1588 struct intelxl_nic *intelxl = netdev->priv;
1589
1590 /* Poll for completed packets */
1591 intelxl_poll_tx ( netdev );
1592
1593 /* Poll for received packets */
1594 intelxl_poll_rx ( netdev );
1595
1596 /* Poll for admin events */
1597 intelxl_poll_admin ( netdev );
1598
1599 /* Refill RX ring */
1600 intelxl_refill_rx ( intelxl );
1601
1602 /* Rearm interrupt, since otherwise receive descriptors will
1603 * be written back only after a complete cacheline (four
1604 * packets) have been received.
1605 *
1606 * There is unfortunately no efficient way to determine
1607 * whether or not rearming the interrupt is necessary. If we
1608 * are running inside a hypervisor (e.g. using a VF or PF as a
1609 * passed-through PCI device), then the MSI-X write is
1610 * redirected by the hypervisor to the real host APIC and the
1611 * host ISR then raises an interrupt within the guest. We
1612 * therefore cannot poll the nominal MSI-X target location to
1613 * watch for the value being written. We could read from the
1614 * INT_DYN_CTL register, but this is even less efficient than
1615 * just unconditionally rearming the interrupt.
1616 */
1617 writel ( INTELXL_INT_DYN_CTL_INTENA, intelxl->regs + intelxl->intr );
1618 }
1619
1620 /** Network device operations */
1621 static struct net_device_operations intelxl_operations = {
1622 .open = intelxl_open,
1623 .close = intelxl_close,
1624 .transmit = intelxl_transmit,
1625 .poll = intelxl_poll,
1626 };
1627
1628 /******************************************************************************
1629 *
1630 * PCI interface
1631 *
1632 ******************************************************************************
1633 */
1634
1635 /**
1636 * Probe PCI device
1637 *
1638 * @v pci PCI device
1639 * @ret rc Return status code
1640 */
1641 static int intelxl_probe ( struct pci_device *pci ) {
1642 struct net_device *netdev;
1643 struct intelxl_nic *intelxl;
1644 uint32_t pfgen_portnum;
1645 uint32_t pflan_qalloc;
1646 int rc;
1647
1648 /* Allocate and initialise net device */
1649 netdev = alloc_etherdev ( sizeof ( *intelxl ) );
1650 if ( ! netdev ) {
1651 rc = -ENOMEM;
1652 goto err_alloc;
1653 }
1654 netdev_init ( netdev, &intelxl_operations );
1655 intelxl = netdev->priv;
1656 pci_set_drvdata ( pci, netdev );
1657 netdev->dev = &pci->dev;
1658 memset ( intelxl, 0, sizeof ( *intelxl ) );
1659 intelxl->pf = PCI_FUNC ( pci->busdevfn );
1660 intelxl->intr = INTELXL_PFINT_DYN_CTL0;
1661 intelxl_init_admin ( &intelxl->command, INTELXL_ADMIN_CMD,
1662 &intelxl_admin_offsets );
1663 intelxl_init_admin ( &intelxl->event, INTELXL_ADMIN_EVT,
1664 &intelxl_admin_offsets );
1665 intelxl_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC,
1666 sizeof ( intelxl->tx.desc.tx[0] ),
1667 intelxl_context_tx );
1668 intelxl_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC,
1669 sizeof ( intelxl->rx.desc.rx[0] ),
1670 intelxl_context_rx );
1671
1672 /* Fix up PCI device */
1673 adjust_pci_device ( pci );
1674
1675 /* Map registers */
1676 intelxl->regs = ioremap ( pci->membase, INTELXL_BAR_SIZE );
1677 if ( ! intelxl->regs ) {
1678 rc = -ENODEV;
1679 goto err_ioremap;
1680 }
1681
1682 /* Reset the NIC */
1683 if ( ( rc = intelxl_reset ( intelxl ) ) != 0 )
1684 goto err_reset;
1685
1686 /* Get port number and base queue number */
1687 pfgen_portnum = readl ( intelxl->regs + INTELXL_PFGEN_PORTNUM );
1688 intelxl->port = INTELXL_PFGEN_PORTNUM_PORT_NUM ( pfgen_portnum );
1689 pflan_qalloc = readl ( intelxl->regs + INTELXL_PFLAN_QALLOC );
1690 intelxl->base = INTELXL_PFLAN_QALLOC_FIRSTQ ( pflan_qalloc );
1691 DBGC ( intelxl, "INTELXL %p PF %d using port %d queues [%#04x-%#04x]\n",
1692 intelxl, intelxl->pf, intelxl->port, intelxl->base,
1693 INTELXL_PFLAN_QALLOC_LASTQ ( pflan_qalloc ) );
1694
1695 /* Fetch MAC address and maximum frame size */
1696 if ( ( rc = intelxl_fetch_mac ( intelxl, netdev ) ) != 0 )
1697 goto err_fetch_mac;
1698
1699 /* Enable MSI-X dummy interrupt */
1700 if ( ( rc = intelxl_msix_enable ( intelxl, pci ) ) != 0 )
1701 goto err_msix;
1702
1703 /* Open admin queues */
1704 if ( ( rc = intelxl_open_admin ( intelxl ) ) != 0 )
1705 goto err_open_admin;
1706
1707 /* Clear PXE mode */
1708 if ( ( rc = intelxl_admin_clear_pxe ( intelxl ) ) != 0 )
1709 goto err_admin_clear_pxe;
1710
1711 /* Get switch configuration */
1712 if ( ( rc = intelxl_admin_switch ( intelxl ) ) != 0 )
1713 goto err_admin_switch;
1714
1715 /* Get VSI configuration */
1716 if ( ( rc = intelxl_admin_vsi ( intelxl ) ) != 0 )
1717 goto err_admin_vsi;
1718
1719 /* Configure switch for promiscuous mode */
1720 if ( ( rc = intelxl_admin_promisc ( intelxl ) ) != 0 )
1721 goto err_admin_promisc;
1722
1723 /* Configure queue register addresses */
1724 intelxl->tx.reg = INTELXL_QTX ( intelxl->queue );
1725 intelxl->tx.tail = ( intelxl->tx.reg + INTELXL_QXX_TAIL );
1726 intelxl->rx.reg = INTELXL_QRX ( intelxl->queue );
1727 intelxl->rx.tail = ( intelxl->rx.reg + INTELXL_QXX_TAIL );
1728
1729 /* Configure interrupt causes */
1730 writel ( ( INTELXL_QINT_TQCTL_NEXTQ_INDX_NONE |
1731 INTELXL_QINT_TQCTL_CAUSE_ENA ),
1732 intelxl->regs + INTELXL_QINT_TQCTL ( intelxl->queue ) );
1733 writel ( ( INTELXL_QINT_RQCTL_NEXTQ_INDX ( intelxl->queue ) |
1734 INTELXL_QINT_RQCTL_NEXTQ_TYPE_TX |
1735 INTELXL_QINT_RQCTL_CAUSE_ENA ),
1736 intelxl->regs + INTELXL_QINT_RQCTL ( intelxl->queue ) );
1737 writel ( ( INTELXL_PFINT_LNKLST0_FIRSTQ_INDX ( intelxl->queue ) |
1738 INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE_RX ),
1739 intelxl->regs + INTELXL_PFINT_LNKLST0 );
1740 writel ( INTELXL_PFINT_ICR0_ENA_ADMINQ,
1741 intelxl->regs + INTELXL_PFINT_ICR0_ENA );
1742
1743 /* Register network device */
1744 if ( ( rc = register_netdev ( netdev ) ) != 0 )
1745 goto err_register_netdev;
1746
1747 /* Set initial link state */
1748 intelxl_admin_link ( netdev );
1749
1750 return 0;
1751
1752 unregister_netdev ( netdev );
1753 err_register_netdev:
1754 err_admin_promisc:
1755 err_admin_vsi:
1756 err_admin_switch:
1757 err_admin_clear_pxe:
1758 intelxl_close_admin ( intelxl );
1759 err_open_admin:
1760 intelxl_msix_disable ( intelxl, pci );
1761 err_msix:
1762 err_fetch_mac:
1763 intelxl_reset ( intelxl );
1764 err_reset:
1765 iounmap ( intelxl->regs );
1766 err_ioremap:
1767 netdev_nullify ( netdev );
1768 netdev_put ( netdev );
1769 err_alloc:
1770 return rc;
1771 }
1772
1773 /**
1774 * Remove PCI device
1775 *
1776 * @v pci PCI device
1777 */
1778 static void intelxl_remove ( struct pci_device *pci ) {
1779 struct net_device *netdev = pci_get_drvdata ( pci );
1780 struct intelxl_nic *intelxl = netdev->priv;
1781
1782 /* Unregister network device */
1783 unregister_netdev ( netdev );
1784
1785 /* Close admin queues */
1786 intelxl_close_admin ( intelxl );
1787
1788 /* Disable MSI-X dummy interrupt */
1789 intelxl_msix_disable ( intelxl, pci );
1790
1791 /* Reset the NIC */
1792 intelxl_reset ( intelxl );
1793
1794 /* Free network device */
1795 iounmap ( intelxl->regs );
1796 netdev_nullify ( netdev );
1797 netdev_put ( netdev );
1798 }
1799
1800 /** PCI device IDs */
1801 static struct pci_device_id intelxl_nics[] = {
1802 PCI_ROM ( 0x8086, 0x1572, "x710-sfp", "X710 10GbE SFP+", 0 ),
1803 PCI_ROM ( 0x8086, 0x1574, "xl710-qemu", "Virtual XL710", 0 ),
1804 PCI_ROM ( 0x8086, 0x1580, "xl710-kx-b", "XL710 40GbE backplane", 0 ),
1805 PCI_ROM ( 0x8086, 0x1581, "xl710-kx-c", "XL710 10GbE backplane", 0 ),
1806 PCI_ROM ( 0x8086, 0x1583, "xl710-qda2", "XL710 40GbE QSFP+", 0 ),
1807 PCI_ROM ( 0x8086, 0x1584, "xl710-qda1", "XL710 40GbE QSFP+", 0 ),
1808 PCI_ROM ( 0x8086, 0x1585, "x710-qsfp", "X710 10GbE QSFP+", 0 ),
1809 PCI_ROM ( 0x8086, 0x1586, "x710-10gt", "X710 10GBASE-T", 0 ),
1810 PCI_ROM ( 0x8086, 0x1587, "x710-kr2", "XL710 20GbE backplane", 0 ),
1811 PCI_ROM ( 0x8086, 0x1588, "x710-kr2-a", "XL710 20GbE backplane", 0 ),
1812 PCI_ROM ( 0x8086, 0x1589, "x710-10gt4", "X710 10GBASE-T4", 0 ),
1813 PCI_ROM ( 0x8086, 0x158a, "xxv710", "XXV710 25GbE backplane", 0 ),
1814 PCI_ROM ( 0x8086, 0x158b, "xxv710-sfp28", "XXV710 25GbE SFP28", 0 ),
1815 PCI_ROM ( 0x8086, 0x37ce, "x722-kx", "X722 10GbE backplane", 0 ),
1816 PCI_ROM ( 0x8086, 0x37cf, "x722-qsfp", "X722 10GbE QSFP+", 0 ),
1817 PCI_ROM ( 0x8086, 0x37d0, "x722-sfp", "X722 10GbE SFP+", 0 ),
1818 PCI_ROM ( 0x8086, 0x37d1, "x722-1gt", "X722 1GBASE-T", 0 ),
1819 PCI_ROM ( 0x8086, 0x37d2, "x722-10gt", "X722 10GBASE-T", 0 ),
1820 PCI_ROM ( 0x8086, 0x37d3, "x722-sfp-i", "X722 10GbE SFP+", 0 ),
1821 };
1822
1823 /** PCI driver */
1824 struct pci_driver intelxl_driver __pci_driver = {
1825 .ids = intelxl_nics,
1826 .id_count = ( sizeof ( intelxl_nics ) / sizeof ( intelxl_nics[0] ) ),
1827 .probe = intelxl_probe,
1828 .remove = intelxl_remove,
1829 };